Coverage for drivers/blktap2.py : 39%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# blktap2: blktap/tapdisk management layer
19#
20import grp
21import os
22import re
23import stat
24import time
25import copy
26from lock import Lock
27import util
28import xmlrpc.client
29import http.client
30import errno
31import signal
32import subprocess
33import syslog as _syslog
34import glob
35import json
36import xs_errors
37import XenAPI # pylint: disable=import-error
38import scsiutil
39from syslog import openlog, syslog
40from stat import * # S_ISBLK(), ...
41import nfs
43import resetvdis
44import vhdutil
45import lvhdutil
47import VDI as sm
49# For RRDD Plugin Registration
50from xmlrpc.client import ServerProxy, Transport
51from socket import socket, AF_UNIX, SOCK_STREAM
53try:
54 from linstorvolumemanager import log_drbd_openers
55 LINSTOR_AVAILABLE = True
56except ImportError:
57 LINSTOR_AVAILABLE = False
59PLUGIN_TAP_PAUSE = "tapdisk-pause"
61SOCKPATH = "/var/xapi/xcp-rrdd"
63NUM_PAGES_PER_RING = 32 * 11
64MAX_FULL_RINGS = 8
65POOL_NAME_KEY = "mem-pool"
66POOL_SIZE_KEY = "mem-pool-size-rings"
68ENABLE_MULTIPLE_ATTACH = "/etc/xensource/allow_multiple_vdi_attach"
69NO_MULTIPLE_ATTACH = not (os.path.exists(ENABLE_MULTIPLE_ATTACH))
72def locking(excType, override=True):
73 def locking2(op):
74 def wrapper(self, *args):
75 self.lock.acquire()
76 try:
77 try:
78 ret = op(self, * args)
79 except (util.CommandException, util.SMException, XenAPI.Failure) as e:
80 util.logException("BLKTAP2:%s" % op)
81 msg = str(e)
82 if isinstance(e, util.CommandException):
83 msg = "Command %s failed (%s): %s" % \
84 (e.cmd, e.code, e.reason)
85 if override:
86 raise xs_errors.XenError(excType, opterr=msg)
87 else:
88 raise
89 except:
90 util.logException("BLKTAP2:%s" % op)
91 raise
92 finally:
93 self.lock.release() 93 ↛ exitline 93 didn't except from function 'wrapper', because the raise on line 86 wasn't executed or the raise on line 88 wasn't executed or the raise on line 91 wasn't executed
94 return ret
95 return wrapper
96 return locking2
99class RetryLoop(object):
101 def __init__(self, backoff, limit):
102 self.backoff = backoff
103 self.limit = limit
105 def __call__(self, f):
107 def loop(*__t, **__d):
108 attempt = 0
110 while True:
111 attempt += 1
113 try:
114 return f( * __t, ** __d)
116 except self.TransientFailure as e:
117 e = e.exception
119 if attempt >= self.limit: 119 ↛ 120line 119 didn't jump to line 120, because the condition on line 119 was never true
120 raise e
122 time.sleep(self.backoff)
124 return loop
126 class TransientFailure(Exception):
127 def __init__(self, exception):
128 self.exception = exception
131def retried(**args):
132 return RetryLoop( ** args)
135class TapCtl(object):
136 """Tapdisk IPC utility calls."""
138 PATH = "/usr/sbin/tap-ctl"
140 def __init__(self, cmd, p):
141 self.cmd = cmd
142 self._p = p
143 self.stdout = p.stdout
145 class CommandFailure(Exception):
146 """TapCtl cmd failure."""
148 def __init__(self, cmd, **info):
149 self.cmd = cmd
150 self.info = info
152 def __str__(self):
153 items = self.info.items()
154 info = ", ".join("%s=%s" % item
155 for item in items)
156 return "%s failed: %s" % (self.cmd, info)
158 # Trying to get a non-existent attribute throws an AttributeError
159 # exception
160 def __getattr__(self, key):
161 if key in self.info: 161 ↛ 163line 161 didn't jump to line 163, because the condition on line 161 was never false
162 return self.info[key]
163 return object.__getattribute__(self, key)
165 @property
166 def has_status(self):
167 return 'status' in self.info
169 @property
170 def has_signal(self):
171 return 'signal' in self.info
173 # Retrieves the error code returned by the command. If the error code
174 # was not supplied at object-construction time, zero is returned.
175 def get_error_code(self):
176 key = 'status'
177 if key in self.info:
178 return self.info[key]
179 else:
180 return 0
182 @classmethod
183 def __mkcmd_real(cls, args):
184 return [cls.PATH] + [str(x) for x in args]
186 __next_mkcmd = __mkcmd_real
188 @classmethod
189 def _mkcmd(cls, args):
191 __next_mkcmd = cls.__next_mkcmd
192 cls.__next_mkcmd = cls.__mkcmd_real
194 return __next_mkcmd(args)
196 @classmethod
197 def _call(cls, args, quiet=False, input=None, text_mode=True):
198 """
199 Spawn a tap-ctl process. Return a TapCtl invocation.
200 Raises a TapCtl.CommandFailure if subprocess creation failed.
201 """
202 cmd = cls._mkcmd(args)
204 if not quiet:
205 util.SMlog(cmd)
206 try:
207 p = subprocess.Popen(cmd,
208 stdin=subprocess.PIPE,
209 stdout=subprocess.PIPE,
210 stderr=subprocess.PIPE,
211 close_fds=True,
212 universal_newlines=text_mode)
213 if input:
214 p.stdin.write(input)
215 p.stdin.close()
216 except OSError as e:
217 raise cls.CommandFailure(cmd, errno=e.errno)
219 return cls(cmd, p)
221 def _errmsg(self):
222 output = map(str.rstrip, self._p.stderr)
223 return "; ".join(output)
225 def _wait(self, quiet=False):
226 """
227 Reap the child tap-ctl process of this invocation.
228 Raises a TapCtl.CommandFailure on non-zero exit status.
229 """
230 status = self._p.wait()
231 if not quiet:
232 util.SMlog(" = %d" % status)
234 if status == 0:
235 return
237 info = {'errmsg': self._errmsg(),
238 'pid': self._p.pid}
240 if status < 0:
241 info['signal'] = -status
242 else:
243 info['status'] = status
245 raise self.CommandFailure(self.cmd, ** info)
247 @classmethod
248 def _pread(cls, args, quiet=False, input=None, text_mode=True):
249 """
250 Spawn a tap-ctl invocation and read a single line.
251 """
252 tapctl = cls._call(args=args, quiet=quiet, input=input,
253 text_mode=text_mode)
255 output = tapctl.stdout.readline().rstrip()
257 tapctl._wait(quiet)
258 return output
260 @staticmethod
261 def _maybe(opt, parm):
262 if parm is not None:
263 return [opt, parm]
264 return []
266 @classmethod
267 def __list(cls, minor=None, pid=None, _type=None, path=None):
268 args = ["list"]
269 args += cls._maybe("-m", minor)
270 args += cls._maybe("-p", pid)
271 args += cls._maybe("-t", _type)
272 args += cls._maybe("-f", path)
274 tapctl = cls._call(args, True)
276 for stdout_line in tapctl.stdout:
277 # FIXME: tap-ctl writes error messages to stdout and
278 # confuses this parser
279 if stdout_line == "blktap kernel module not installed\n": 279 ↛ 282line 279 didn't jump to line 282, because the condition on line 279 was never true
280 # This isn't pretty but (a) neither is confusing stdout/stderr
281 # and at least causes the error to describe the fix
282 raise Exception("blktap kernel module not installed: try 'modprobe blktap'")
283 row = {}
285 for field in stdout_line.rstrip().split(' ', 3):
286 bits = field.split('=')
287 if len(bits) == 2: 287 ↛ 299line 287 didn't jump to line 299, because the condition on line 287 was never false
288 key, val = field.split('=')
290 if key in ('pid', 'minor'):
291 row[key] = int(val, 10)
293 elif key in ('state'):
294 row[key] = int(val, 0x10)
296 else:
297 row[key] = val
298 else:
299 util.SMlog("Ignoring unexpected tap-ctl output: %s" % repr(field))
300 yield row
302 tapctl._wait(True)
304 @classmethod
305 @retried(backoff=.5, limit=10)
306 def list(cls, **args):
308 # FIXME. We typically get an EPROTO when uevents interleave
309 # with SM ops and a tapdisk shuts down under our feet. Should
310 # be fixed in SM.
312 try:
313 return list(cls.__list( ** args))
315 except cls.CommandFailure as e:
316 transient = [errno.EPROTO, errno.ENOENT]
317 if e.has_status and e.status in transient:
318 raise RetryLoop.TransientFailure(e)
319 raise
321 @classmethod
322 def allocate(cls, devpath=None):
323 args = ["allocate"]
324 args += cls._maybe("-d", devpath)
325 return cls._pread(args)
327 @classmethod
328 def free(cls, minor):
329 args = ["free", "-m", minor]
330 cls._pread(args)
332 @classmethod
333 @retried(backoff=.5, limit=10)
334 def spawn(cls):
335 args = ["spawn"]
336 try:
337 pid = cls._pread(args)
338 return int(pid)
339 except cls.CommandFailure as ce:
340 # intermittent failures to spawn. CA-292268
341 if ce.status == 1:
342 raise RetryLoop.TransientFailure(ce)
343 raise
345 @classmethod
346 def attach(cls, pid, minor):
347 args = ["attach", "-p", pid, "-m", minor]
348 cls._pread(args)
350 @classmethod
351 def detach(cls, pid, minor):
352 args = ["detach", "-p", pid, "-m", minor]
353 cls._pread(args)
355 @classmethod
356 def _load_key(cls, key_hash, vdi_uuid):
357 import plugins
359 return plugins.load_key(key_hash, vdi_uuid)
361 @classmethod
362 def open(cls, pid, minor, _type, _file, options):
363 params = Tapdisk.Arg(_type, _file)
364 args = ["open", "-p", pid, "-m", minor, '-a', str(params)]
365 text_mode = True
366 input = None
367 if options.get("rdonly"):
368 args.append('-R')
369 if options.get("lcache"):
370 args.append("-r")
371 if options.get("existing_prt") is not None:
372 args.append("-e")
373 args.append(str(options["existing_prt"]))
374 if options.get("secondary"):
375 args.append("-2")
376 args.append(options["secondary"])
377 if options.get("standby"):
378 args.append("-s")
379 if options.get("timeout"):
380 args.append("-t")
381 args.append(str(options["timeout"]))
382 if not options.get("o_direct", True):
383 args.append("-D")
384 if options.get('cbtlog'):
385 args.extend(['-C', options['cbtlog']])
386 if options.get('key_hash'):
387 key_hash = options['key_hash']
388 vdi_uuid = options['vdi_uuid']
389 key = cls._load_key(key_hash, vdi_uuid)
391 if not key:
392 raise util.SMException("No key found with key hash {}".format(key_hash))
393 input = key
394 text_mode = False
395 args.append('-E')
397 cls._pread(args=args, input=input, text_mode=text_mode)
399 @classmethod
400 def close(cls, pid, minor, force=False):
401 args = ["close", "-p", pid, "-m", minor, "-t", "120"]
402 if force:
403 args += ["-f"]
404 cls._pread(args)
406 @classmethod
407 def pause(cls, pid, minor):
408 args = ["pause", "-p", pid, "-m", minor]
409 cls._pread(args)
411 @classmethod
412 def unpause(cls, pid, minor, _type=None, _file=None, mirror=None,
413 cbtlog=None):
414 args = ["unpause", "-p", pid, "-m", minor]
415 if mirror:
416 args.extend(["-2", mirror])
417 if _type and _file:
418 params = Tapdisk.Arg(_type, _file)
419 args += ["-a", str(params)]
420 if cbtlog:
421 args.extend(["-c", cbtlog])
422 cls._pread(args)
424 @classmethod
425 def shutdown(cls, pid):
426 # TODO: This should be a real tap-ctl command
427 os.kill(pid, signal.SIGTERM)
428 os.waitpid(pid, 0)
430 @classmethod
431 def stats(cls, pid, minor):
432 args = ["stats", "-p", pid, "-m", minor]
433 return cls._pread(args, quiet=True)
435 @classmethod
436 def major(cls):
437 args = ["major"]
438 major = cls._pread(args)
439 return int(major)
442class TapdiskExists(Exception):
443 """Tapdisk already running."""
445 def __init__(self, tapdisk):
446 self.tapdisk = tapdisk
448 def __str__(self):
449 return "%s already running" % self.tapdisk
452class TapdiskNotRunning(Exception):
453 """No such Tapdisk."""
455 def __init__(self, **attrs):
456 self.attrs = attrs
458 def __str__(self):
459 items = iter(self.attrs.items())
460 attrs = ", ".join("%s=%s" % attr
461 for attr in items)
462 return "No such Tapdisk(%s)" % attrs
465class TapdiskNotUnique(Exception):
466 """More than one tapdisk on one path."""
468 def __init__(self, tapdisks):
469 self.tapdisks = tapdisks
471 def __str__(self):
472 tapdisks = map(str, self.tapdisks)
473 return "Found multiple tapdisks: %s" % tapdisks
476class TapdiskFailed(Exception):
477 """Tapdisk launch failure."""
479 def __init__(self, arg, err):
480 self.arg = arg
481 self.err = err
483 def __str__(self):
484 return "Tapdisk(%s): %s" % (self.arg, self.err)
486 def get_error(self):
487 return self.err
490class TapdiskInvalidState(Exception):
491 """Tapdisk pause/unpause failure"""
493 def __init__(self, tapdisk):
494 self.tapdisk = tapdisk
496 def __str__(self):
497 return str(self.tapdisk)
500def mkdirs(path, mode=0o777):
501 if not os.path.exists(path):
502 parent, subdir = os.path.split(path)
503 assert parent != path
504 try:
505 if parent:
506 mkdirs(parent, mode)
507 if subdir:
508 os.mkdir(path, mode)
509 except OSError as e:
510 if e.errno != errno.EEXIST:
511 raise
514class KObject(object):
516 SYSFS_CLASSTYPE = None
518 def sysfs_devname(self):
519 raise NotImplementedError("sysfs_devname is undefined")
522class Attribute(object):
524 SYSFS_NODENAME = None
526 def __init__(self, path):
527 self.path = path
529 @classmethod
530 def from_kobject(cls, kobj):
531 path = "%s/%s" % (kobj.sysfs_path(), cls.SYSFS_NODENAME)
532 return cls(path)
534 class NoSuchAttribute(Exception):
535 def __init__(self, name):
536 self.name = name
538 def __str__(self):
539 return "No such attribute: %s" % self.name
541 def _open(self, mode='r'):
542 try:
543 return open(self.path, mode)
544 except IOError as e:
545 if e.errno == errno.ENOENT:
546 raise self.NoSuchAttribute(self)
547 raise
549 def readline(self):
550 f = self._open('r')
551 s = f.readline().rstrip()
552 f.close()
553 return s
555 def writeline(self, val):
556 f = self._open('w')
557 f.write(val)
558 f.close()
561class ClassDevice(KObject):
563 @classmethod
564 def sysfs_class_path(cls):
565 return "/sys/class/%s" % cls.SYSFS_CLASSTYPE
567 def sysfs_path(self):
568 return "%s/%s" % (self.sysfs_class_path(),
569 self.sysfs_devname())
572class Blktap(ClassDevice):
574 DEV_BASEDIR = '/dev/xen/blktap-2'
576 SYSFS_CLASSTYPE = "blktap2"
578 def __init__(self, minor):
579 self.minor = minor
580 self._pool = None
581 self._task = None
583 @classmethod
584 def allocate(cls):
585 # FIXME. Should rather go into init.
586 mkdirs(cls.DEV_BASEDIR)
588 devname = TapCtl.allocate()
589 minor = Tapdisk._parse_minor(devname)
590 return cls(minor)
592 def free(self):
593 TapCtl.free(self.minor)
595 def __str__(self):
596 return "%s(minor=%d)" % (self.__class__.__name__, self.minor)
598 def sysfs_devname(self):
599 return "blktap!blktap%d" % self.minor
601 class Pool(Attribute):
602 SYSFS_NODENAME = "pool"
604 def get_pool_attr(self):
605 if not self._pool:
606 self._pool = self.Pool.from_kobject(self)
607 return self._pool
609 def get_pool_name(self):
610 return self.get_pool_attr().readline()
612 def set_pool_name(self, name):
613 self.get_pool_attr().writeline(name)
615 def set_pool_size(self, pages):
616 self.get_pool().set_size(pages)
618 def get_pool(self):
619 return BlktapControl.get_pool(self.get_pool_name())
621 def set_pool(self, pool):
622 self.set_pool_name(pool.name)
624 class Task(Attribute):
625 SYSFS_NODENAME = "task"
627 def get_task_attr(self):
628 if not self._task:
629 self._task = self.Task.from_kobject(self)
630 return self._task
632 def get_task_pid(self):
633 pid = self.get_task_attr().readline()
634 try:
635 return int(pid)
636 except ValueError:
637 return None
639 def find_tapdisk(self):
640 pid = self.get_task_pid()
641 if pid is None:
642 return None
644 return Tapdisk.find(pid=pid, minor=self.minor)
646 def get_tapdisk(self):
647 tapdisk = self.find_tapdisk()
648 if not tapdisk:
649 raise TapdiskNotRunning(minor=self.minor)
650 return tapdisk
653class Tapdisk(object):
655 TYPES = ['aio', 'vhd']
657 def __init__(self, pid, minor, _type, path, state):
658 self.pid = pid
659 self.minor = minor
660 self.type = _type
661 self.path = path
662 self.state = state
663 self._dirty = False
664 self._blktap = None
666 def __str__(self):
667 state = self.pause_state()
668 return "Tapdisk(%s, pid=%d, minor=%s, state=%s)" % \
669 (self.get_arg(), self.pid, self.minor, state)
671 @classmethod
672 def list(cls, **args):
674 for row in TapCtl.list( ** args):
676 args = {'pid': None,
677 'minor': None,
678 'state': None,
679 '_type': None,
680 'path': None}
682 for key, val in row.items():
683 if key in args:
684 args[key] = val
686 if 'args' in row: 686 ↛ 691line 686 didn't jump to line 691, because the condition on line 686 was never false
687 image = Tapdisk.Arg.parse(row['args'])
688 args['_type'] = image.type
689 args['path'] = image.path
691 if None in args.values(): 691 ↛ 692line 691 didn't jump to line 692, because the condition on line 691 was never true
692 continue
694 yield Tapdisk( ** args)
696 @classmethod
697 def find(cls, **args):
699 found = list(cls.list( ** args))
701 if len(found) > 1:
702 raise TapdiskNotUnique(found)
704 if found:
705 return found[0]
707 return None
709 @classmethod
710 def find_by_path(cls, path):
711 return cls.find(path=path)
713 @classmethod
714 def find_by_minor(cls, minor):
715 return cls.find(minor=minor)
717 @classmethod
718 def get(cls, **attrs):
720 tapdisk = cls.find( ** attrs)
722 if not tapdisk:
723 raise TapdiskNotRunning( ** attrs)
725 return tapdisk
727 @classmethod
728 def from_path(cls, path):
729 return cls.get(path=path)
731 @classmethod
732 def from_minor(cls, minor):
733 return cls.get(minor=minor)
735 @classmethod
736 def __from_blktap(cls, blktap):
737 tapdisk = cls.from_minor(minor=blktap.minor)
738 tapdisk._blktap = blktap
739 return tapdisk
741 def get_blktap(self):
742 if not self._blktap:
743 self._blktap = Blktap(self.minor)
744 return self._blktap
746 class Arg:
748 def __init__(self, _type, path):
749 self.type = _type
750 self.path = path
752 def __str__(self):
753 return "%s:%s" % (self.type, self.path)
755 @classmethod
756 def parse(cls, arg):
758 try:
759 _type, path = arg.split(":", 1)
760 except ValueError:
761 raise cls.InvalidArgument(arg)
763 if _type not in Tapdisk.TYPES: 763 ↛ 764line 763 didn't jump to line 764, because the condition on line 763 was never true
764 raise cls.InvalidType(_type)
766 return cls(_type, path)
768 class InvalidType(Exception):
769 def __init__(self, _type):
770 self.type = _type
772 def __str__(self):
773 return "Not a Tapdisk type: %s" % self.type
775 class InvalidArgument(Exception):
776 def __init__(self, arg):
777 self.arg = arg
779 def __str__(self):
780 return "Not a Tapdisk image: %s" % self.arg
782 def get_arg(self):
783 return self.Arg(self.type, self.path)
785 def get_devpath(self):
786 return "%s/tapdev%d" % (Blktap.DEV_BASEDIR, self.minor)
788 @classmethod
789 def launch_from_arg(cls, arg):
790 arg = cls.Arg.parse(arg)
791 return cls.launch(arg.path, arg.type, False)
793 @classmethod
794 def cgclassify(cls, pid):
796 # We dont provide any <controllers>:<path>
797 # so cgclassify uses /etc/cgrules.conf which
798 # we have configured in the spec file.
799 cmd = ["cgclassify", str(pid)]
800 try:
801 util.pread2(cmd)
802 except util.CommandException as e:
803 util.logException(e)
805 @classmethod
806 def spawn(cls):
807 return TapCtl.spawn()
809 @classmethod
810 def launch_on_tap(cls, blktap, path, _type, options):
812 tapdisk = cls.find_by_path(path)
813 if tapdisk: 813 ↛ 814line 813 didn't jump to line 814, because the condition on line 813 was never true
814 raise TapdiskExists(tapdisk)
816 minor = blktap.minor
818 try:
819 pid = cls.spawn()
820 cls.cgclassify(pid)
821 try:
822 TapCtl.attach(pid, minor)
824 try:
825 retry_open = 0
826 while True:
827 try:
828 TapCtl.open(pid, minor, _type, path, options)
829 except TapCtl.CommandFailure as e:
830 err = (
831 'status' in e.info and e.info['status']
832 ) or None
833 if err in (errno.EIO, errno.EROFS, errno.EAGAIN):
834 if retry_open < 5:
835 retry_open += 1
836 time.sleep(1)
837 continue
838 if LINSTOR_AVAILABLE and err == errno.EROFS:
839 log_drbd_openers(path)
840 break
841 try:
842 tapdisk = cls.__from_blktap(blktap)
843 node = '/sys/dev/block/%d:%d' % (tapdisk.major(), tapdisk.minor)
844 util.set_scheduler_sysfs_node(node, ['none', 'noop'])
845 return tapdisk
846 except:
847 TapCtl.close(pid, minor)
848 raise
850 except:
851 TapCtl.detach(pid, minor)
852 raise
854 except:
855 try:
856 TapCtl.shutdown(pid)
857 except:
858 # Best effort to shutdown
859 pass
860 raise
862 except TapCtl.CommandFailure as ctl:
863 util.logException(ctl)
864 if ('/dev/xapi/cd/' in path and
865 'status' in ctl.info and
866 ctl.info['status'] == 123): # ENOMEDIUM (No medium found)
867 raise xs_errors.XenError('TapdiskDriveEmpty')
868 else:
869 raise TapdiskFailed(cls.Arg(_type, path), ctl)
871 @classmethod
872 def launch(cls, path, _type, rdonly):
873 blktap = Blktap.allocate()
874 try:
875 return cls.launch_on_tap(blktap, path, _type, {"rdonly": rdonly})
876 except:
877 blktap.free()
878 raise
880 def shutdown(self, force=False):
882 TapCtl.close(self.pid, self.minor, force)
884 TapCtl.detach(self.pid, self.minor)
886 self.get_blktap().free()
888 def pause(self):
890 if not self.is_running():
891 raise TapdiskInvalidState(self)
893 TapCtl.pause(self.pid, self.minor)
895 self._set_dirty()
897 def unpause(self, _type=None, path=None, mirror=None, cbtlog=None):
899 if not self.is_paused():
900 raise TapdiskInvalidState(self)
902 # FIXME: should the arguments be optional?
903 if _type is None:
904 _type = self.type
905 if path is None:
906 path = self.path
908 TapCtl.unpause(self.pid, self.minor, _type, path, mirror=mirror,
909 cbtlog=cbtlog)
911 self._set_dirty()
913 def stats(self):
914 return json.loads(TapCtl.stats(self.pid, self.minor))
915 #
916 # NB. dirty/refresh: reload attributes on next access
917 #
919 def _set_dirty(self):
920 self._dirty = True
922 def _refresh(self, __get):
923 t = self.from_minor(__get('minor'))
924 self.__init__(t.pid, t.minor, t.type, t.path, t.state)
926 def __getattribute__(self, name):
927 def __get(name):
928 # NB. avoid(rec(ursion)
929 return object.__getattribute__(self, name)
931 if __get('_dirty') and \ 931 ↛ 933line 931 didn't jump to line 933, because the condition on line 931 was never true
932 name in ['minor', 'type', 'path', 'state']:
933 self._refresh(__get)
934 self._dirty = False
936 return __get(name)
938 class PauseState:
939 RUNNING = 'R'
940 PAUSING = 'r'
941 PAUSED = 'P'
943 class Flags:
944 DEAD = 0x0001
945 CLOSED = 0x0002
946 QUIESCE_REQUESTED = 0x0004
947 QUIESCED = 0x0008
948 PAUSE_REQUESTED = 0x0010
949 PAUSED = 0x0020
950 SHUTDOWN_REQUESTED = 0x0040
951 LOCKING = 0x0080
952 RETRY_NEEDED = 0x0100
953 LOG_DROPPED = 0x0200
955 PAUSE_MASK = PAUSE_REQUESTED | PAUSED
957 def is_paused(self):
958 return not not (self.state & self.Flags.PAUSED)
960 def is_running(self):
961 return not (self.state & self.Flags.PAUSE_MASK)
963 def pause_state(self):
964 if self.state & self.Flags.PAUSED: 964 ↛ 965line 964 didn't jump to line 965, because the condition on line 964 was never true
965 return self.PauseState.PAUSED
967 if self.state & self.Flags.PAUSE_REQUESTED: 967 ↛ 968line 967 didn't jump to line 968, because the condition on line 967 was never true
968 return self.PauseState.PAUSING
970 return self.PauseState.RUNNING
972 @staticmethod
973 def _parse_minor(devpath):
974 regex = r'%s/(blktap|tapdev)(\d+)$' % Blktap.DEV_BASEDIR
975 pattern = re.compile(regex)
976 groups = pattern.search(devpath)
977 if not groups:
978 raise Exception("malformed tap device: '%s' (%s) " % (devpath, regex))
980 minor = groups.group(2)
981 return int(minor)
983 _major = None
985 @classmethod
986 def major(cls):
987 if cls._major:
988 return cls._major
990 devices = open("/proc/devices")
991 for line in devices:
993 row = line.rstrip().split(' ')
994 if len(row) != 2:
995 continue
997 major, name = row
998 if name != 'tapdev':
999 continue
1001 cls._major = int(major)
1002 break
1004 devices.close()
1005 return cls._major
1008class VDI(object):
1009 """SR.vdi driver decorator for blktap2"""
1011 CONF_KEY_ALLOW_CACHING = "vdi_allow_caching"
1012 CONF_KEY_MODE_ON_BOOT = "vdi_on_boot"
1013 CONF_KEY_CACHE_SR = "local_cache_sr"
1014 CONF_KEY_O_DIRECT = "o_direct"
1015 LOCK_CACHE_SETUP = "cachesetup"
1017 ATTACH_DETACH_RETRY_SECS = 120
1019 # number of seconds on top of NFS timeo mount option the tapdisk should
1020 # wait before reporting errors. This is to allow a retry to succeed in case
1021 # packets were lost the first time around, which prevented the NFS client
1022 # from returning before the timeo is reached even if the NFS server did
1023 # come back earlier
1024 TAPDISK_TIMEOUT_MARGIN = 30
1026 def __init__(self, uuid, target, driver_info):
1027 self.target = self.TargetDriver(target, driver_info)
1028 self._vdi_uuid = uuid
1029 self._session = target.session
1030 self.xenstore_data = scsiutil.update_XS_SCSIdata(uuid, scsiutil.gen_synthetic_page_data(uuid))
1031 self.__o_direct = None
1032 self.__o_direct_reason = None
1033 self.lock = Lock("vdi", uuid)
1034 self.tap = None
1036 def get_o_direct_capability(self, options):
1037 """Returns True/False based on licensing and caching_params"""
1038 if self.__o_direct is not None: 1038 ↛ 1039line 1038 didn't jump to line 1039, because the condition on line 1038 was never true
1039 return self.__o_direct, self.__o_direct_reason
1041 if util.read_caching_is_restricted(self._session): 1041 ↛ 1042line 1041 didn't jump to line 1042, because the condition on line 1041 was never true
1042 self.__o_direct = True
1043 self.__o_direct_reason = "LICENSE_RESTRICTION"
1044 elif not ((self.target.vdi.sr.handles("nfs") or self.target.vdi.sr.handles("ext") or self.target.vdi.sr.handles("smb"))): 1044 ↛ 1047line 1044 didn't jump to line 1047, because the condition on line 1044 was never false
1045 self.__o_direct = True
1046 self.__o_direct_reason = "SR_NOT_SUPPORTED"
1047 elif not (options.get("rdonly") or self.target.vdi.parent):
1048 util.SMlog(self.target.vdi)
1049 self.__o_direct = True
1050 self.__o_direct_reason = "NO_RO_IMAGE"
1051 elif options.get("rdonly") and not self.target.vdi.parent:
1052 self.__o_direct = True
1053 self.__o_direct_reason = "RO_WITH_NO_PARENT"
1054 elif options.get(self.CONF_KEY_O_DIRECT):
1055 self.__o_direct = True
1056 self.__o_direct_reason = "SR_OVERRIDE"
1058 if self.__o_direct is None: 1058 ↛ 1059line 1058 didn't jump to line 1059, because the condition on line 1058 was never true
1059 self.__o_direct = False
1060 self.__o_direct_reason = ""
1062 return self.__o_direct, self.__o_direct_reason
1064 @classmethod
1065 def from_cli(cls, uuid):
1066 import VDI as sm
1068 session = XenAPI.xapi_local()
1069 session.xenapi.login_with_password('root', '', '', 'SM')
1071 target = sm.VDI.from_uuid(session, uuid)
1072 driver_info = target.sr.srcmd.driver_info
1074 session.xenapi.session.logout()
1076 return cls(uuid, target, driver_info)
1078 @staticmethod
1079 def _tap_type(vdi_type):
1080 """Map a VDI type (e.g. 'raw') to a tapdisk driver type (e.g. 'aio')"""
1081 return {
1082 'raw': 'aio',
1083 'vhd': 'vhd',
1084 'iso': 'aio', # for ISO SR
1085 'aio': 'aio', # for LVHD
1086 'file': 'aio',
1087 'phy': 'aio'
1088 }[vdi_type]
1090 def get_tap_type(self):
1091 vdi_type = self.target.get_vdi_type()
1092 return VDI._tap_type(vdi_type)
1094 def get_phy_path(self):
1095 return self.target.get_vdi_path()
1097 class UnexpectedVDIType(Exception):
1099 def __init__(self, vdi_type, target):
1100 self.vdi_type = vdi_type
1101 self.target = target
1103 def __str__(self):
1104 return \
1105 "Target %s has unexpected VDI type '%s'" % \
1106 (type(self.target), self.vdi_type)
1108 VDI_PLUG_TYPE = {'phy': 'phy', # for NETAPP
1109 'raw': 'phy',
1110 'aio': 'tap', # for LVHD raw nodes
1111 'iso': 'tap', # for ISOSR
1112 'file': 'tap',
1113 'vhd': 'tap'}
1115 def tap_wanted(self):
1116 # 1. Let the target vdi_type decide
1118 vdi_type = self.target.get_vdi_type()
1120 try:
1121 plug_type = self.VDI_PLUG_TYPE[vdi_type]
1122 except KeyError:
1123 raise self.UnexpectedVDIType(vdi_type,
1124 self.target.vdi)
1126 if plug_type == 'tap': 1126 ↛ 1127line 1126 didn't jump to line 1127, because the condition on line 1126 was never true
1127 return True
1128 elif self.target.vdi.sr.handles('udev'): 1128 ↛ 1134line 1128 didn't jump to line 1134, because the condition on line 1128 was never false
1129 return True
1130 # 2. Otherwise, there may be more reasons
1131 #
1132 # .. TBD
1134 return False
1136 class TargetDriver:
1137 """Safe target driver access."""
1138 # NB. *Must* test caps for optional calls. Some targets
1139 # actually implement some slots, but do not enable them. Just
1140 # try/except would risk breaking compatibility.
1142 def __init__(self, vdi, driver_info):
1143 self.vdi = vdi
1144 self._caps = driver_info['capabilities']
1146 def has_cap(self, cap):
1147 """Determine if target has given capability"""
1148 return cap in self._caps
1150 def attach(self, sr_uuid, vdi_uuid):
1151 #assert self.has_cap("VDI_ATTACH")
1152 return self.vdi.attach(sr_uuid, vdi_uuid)
1154 def detach(self, sr_uuid, vdi_uuid):
1155 #assert self.has_cap("VDI_DETACH")
1156 self.vdi.detach(sr_uuid, vdi_uuid)
1158 def activate(self, sr_uuid, vdi_uuid):
1159 if self.has_cap("VDI_ACTIVATE"):
1160 return self.vdi.activate(sr_uuid, vdi_uuid)
1162 def deactivate(self, sr_uuid, vdi_uuid):
1163 if self.has_cap("VDI_DEACTIVATE"):
1164 self.vdi.deactivate(sr_uuid, vdi_uuid)
1165 #def resize(self, sr_uuid, vdi_uuid, size):
1166 # return self.vdi.resize(sr_uuid, vdi_uuid, size)
1168 def get_vdi_type(self):
1169 _type = self.vdi.vdi_type
1170 if not _type:
1171 _type = self.vdi.sr.sr_vditype
1172 if not _type:
1173 raise VDI.UnexpectedVDIType(_type, self.vdi)
1174 return _type
1176 def get_vdi_path(self):
1177 return self.vdi.path
1179 class Link(object):
1180 """Relink a node under a common name"""
1181 # NB. We have to provide the device node path during
1182 # VDI.attach, but currently do not allocate the tapdisk minor
1183 # before VDI.activate. Therefore those link steps where we
1184 # relink existing devices under deterministic path names.
1186 BASEDIR = None
1188 def _mklink(self, target):
1189 raise NotImplementedError("_mklink is not defined")
1191 def _equals(self, target):
1192 raise NotImplementedError("_equals is not defined")
1194 def __init__(self, path):
1195 self._path = path
1197 @classmethod
1198 def from_name(cls, name):
1199 path = "%s/%s" % (cls.BASEDIR, name)
1200 return cls(path)
1202 @classmethod
1203 def from_uuid(cls, sr_uuid, vdi_uuid):
1204 name = "%s/%s" % (sr_uuid, vdi_uuid)
1205 return cls.from_name(name)
1207 def path(self):
1208 return self._path
1210 def stat(self):
1211 return os.stat(self.path())
1213 def mklink(self, target):
1215 path = self.path()
1216 util.SMlog("%s -> %s" % (self, target))
1218 mkdirs(os.path.dirname(path))
1219 try:
1220 self._mklink(target)
1221 except OSError as e:
1222 # We do unlink during teardown, but have to stay
1223 # idempotent. However, a *wrong* target should never
1224 # be seen.
1225 if e.errno != errno.EEXIST:
1226 raise
1227 assert self._equals(target), "'%s' not equal to '%s'" % (path, target)
1229 def unlink(self):
1230 try:
1231 os.unlink(self.path())
1232 except OSError as e:
1233 if e.errno != errno.ENOENT:
1234 raise
1236 def __str__(self):
1237 path = self.path()
1238 return "%s(%s)" % (self.__class__.__name__, path)
1240 class SymLink(Link):
1241 """Symlink some file to a common name"""
1243 def readlink(self):
1244 return os.readlink(self.path())
1246 def symlink(self):
1247 return self.path()
1249 def _mklink(self, target):
1250 os.symlink(target, self.path())
1252 def _equals(self, target):
1253 return self.readlink() == target
1255 class DeviceNode(Link):
1256 """Relink a block device node to a common name"""
1258 @classmethod
1259 def _real_stat(cls, target):
1260 """stat() not on @target, but its realpath()"""
1261 _target = os.path.realpath(target)
1262 return os.stat(_target)
1264 @classmethod
1265 def is_block(cls, target):
1266 """Whether @target refers to a block device."""
1267 return S_ISBLK(cls._real_stat(target).st_mode)
1269 def _mklink(self, target):
1271 st = self._real_stat(target)
1272 if not S_ISBLK(st.st_mode):
1273 raise self.NotABlockDevice(target, st)
1275 # set group read for disk group as well as root
1276 os.mknod(self.path(), st.st_mode | stat.S_IRGRP, st.st_rdev)
1277 os.chown(self.path(), st.st_uid, grp.getgrnam("disk").gr_gid)
1279 def _equals(self, target):
1280 target_rdev = self._real_stat(target).st_rdev
1281 return self.stat().st_rdev == target_rdev
1283 def rdev(self):
1284 st = self.stat()
1285 assert S_ISBLK(st.st_mode)
1286 return os.major(st.st_rdev), os.minor(st.st_rdev)
1288 class NotABlockDevice(Exception):
1290 def __init__(self, path, st):
1291 self.path = path
1292 self.st = st
1294 def __str__(self):
1295 return "%s is not a block device: %s" % (self.path, self.st)
1297 class Hybrid(Link):
1299 def __init__(self, path):
1300 VDI.Link.__init__(self, path)
1301 self._devnode = VDI.DeviceNode(path)
1302 self._symlink = VDI.SymLink(path)
1304 def rdev(self):
1305 st = self.stat()
1306 if S_ISBLK(st.st_mode):
1307 return self._devnode.rdev()
1308 raise self._devnode.NotABlockDevice(self.path(), st)
1310 def mklink(self, target):
1311 if self._devnode.is_block(target):
1312 self._obj = self._devnode
1313 else:
1314 self._obj = self._symlink
1315 self._obj.mklink(target)
1317 def _equals(self, target):
1318 return self._obj._equals(target)
1320 class PhyLink(SymLink):
1321 BASEDIR = "/dev/sm/phy"
1322 # NB. Cannot use DeviceNodes, e.g. FileVDIs aren't bdevs.
1324 class NBDLink(SymLink):
1326 BASEDIR = "/run/blktap-control/nbd"
1328 class BackendLink(Hybrid):
1329 BASEDIR = "/dev/sm/backend"
1330 # NB. Could be SymLinks as well, but saving major,minor pairs in
1331 # Links enables neat state capturing when managing Tapdisks. Note
1332 # that we essentially have a tap-ctl list replacement here. For
1333 # now make it a 'Hybrid'. Likely to collapse into a DeviceNode as
1334 # soon as ISOs are tapdisks.
1336 @staticmethod
1337 def _tap_activate(phy_path, vdi_type, sr_uuid, options, pool_size=None):
1339 tapdisk = Tapdisk.find_by_path(phy_path)
1340 if not tapdisk: 1340 ↛ 1341line 1340 didn't jump to line 1341, because the condition on line 1340 was never true
1341 blktap = Blktap.allocate()
1342 blktap.set_pool_name(sr_uuid)
1343 if pool_size:
1344 blktap.set_pool_size(pool_size)
1346 try:
1347 tapdisk = \
1348 Tapdisk.launch_on_tap(blktap,
1349 phy_path,
1350 VDI._tap_type(vdi_type),
1351 options)
1352 except:
1353 blktap.free()
1354 raise
1355 util.SMlog("tap.activate: Launched %s" % tapdisk)
1357 else:
1358 util.SMlog("tap.activate: Found %s" % tapdisk)
1360 return tapdisk.get_devpath(), tapdisk
1362 @staticmethod
1363 def _tap_deactivate(minor):
1365 try:
1366 tapdisk = Tapdisk.from_minor(minor)
1367 except TapdiskNotRunning as e:
1368 util.SMlog("tap.deactivate: Warning, %s" % e)
1369 # NB. Should not be here unless the agent refcount
1370 # broke. Also, a clean shutdown should not have leaked
1371 # the recorded minor.
1372 else:
1373 tapdisk.shutdown()
1374 util.SMlog("tap.deactivate: Shut down %s" % tapdisk)
1376 @classmethod
1377 def tap_pause(cls, session, sr_uuid, vdi_uuid, failfast=False):
1378 """
1379 Pauses the tapdisk.
1381 session: a XAPI session
1382 sr_uuid: the UUID of the SR on which VDI lives
1383 vdi_uuid: the UUID of the VDI to pause
1384 failfast: controls whether the VDI lock should be acquired in a
1385 non-blocking manner
1386 """
1387 util.SMlog("Pause request for %s" % vdi_uuid)
1388 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1389 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'paused', 'true')
1390 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1391 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1391 ↛ 1392line 1391 didn't jump to line 1392, because the loop on line 1391 never started
1392 host_ref = key[len('host_'):]
1393 util.SMlog("Calling tap-pause on host %s" % host_ref)
1394 if not cls.call_pluginhandler(session, host_ref,
1395 sr_uuid, vdi_uuid, "pause", failfast=failfast):
1396 # Failed to pause node
1397 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1398 return False
1399 return True
1401 @classmethod
1402 def tap_unpause(cls, session, sr_uuid, vdi_uuid, secondary=None,
1403 activate_parents=False):
1404 util.SMlog("Unpause request for %s secondary=%s" % (vdi_uuid, secondary))
1405 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1406 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1407 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1407 ↛ 1408line 1407 didn't jump to line 1408, because the loop on line 1407 never started
1408 host_ref = key[len('host_'):]
1409 util.SMlog("Calling tap-unpause on host %s" % host_ref)
1410 if not cls.call_pluginhandler(session, host_ref,
1411 sr_uuid, vdi_uuid, "unpause", secondary, activate_parents):
1412 # Failed to unpause node
1413 return False
1414 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1415 return True
1417 @classmethod
1418 def tap_refresh(cls, session, sr_uuid, vdi_uuid, activate_parents=False):
1419 util.SMlog("Refresh request for %s" % vdi_uuid)
1420 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1421 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1422 for key in [x for x in sm_config.keys() if x.startswith('host_')]:
1423 host_ref = key[len('host_'):]
1424 util.SMlog("Calling tap-refresh on host %s" % host_ref)
1425 if not cls.call_pluginhandler(session, host_ref,
1426 sr_uuid, vdi_uuid, "refresh", None,
1427 activate_parents=activate_parents):
1428 # Failed to refresh node
1429 return False
1430 return True
1432 @classmethod
1433 def tap_status(cls, session, vdi_uuid):
1434 """Return True if disk is attached, false if it isn't"""
1435 util.SMlog("Disk status request for %s" % vdi_uuid)
1436 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1437 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1438 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1438 ↛ 1439line 1438 didn't jump to line 1439, because the loop on line 1438 never started
1439 return True
1440 return False
1442 @classmethod
1443 def call_pluginhandler(cls, session, host_ref, sr_uuid, vdi_uuid, action,
1444 secondary=None, activate_parents=False, failfast=False):
1445 """Optionally, activate the parent LV before unpausing"""
1446 try:
1447 args = {"sr_uuid": sr_uuid, "vdi_uuid": vdi_uuid,
1448 "failfast": str(failfast)}
1449 if secondary:
1450 args["secondary"] = secondary
1451 if activate_parents:
1452 args["activate_parents"] = "true"
1453 ret = session.xenapi.host.call_plugin(
1454 host_ref, PLUGIN_TAP_PAUSE, action,
1455 args)
1456 return ret == "True"
1457 except Exception as e:
1458 util.logException("BLKTAP2:call_pluginhandler %s" % e)
1459 return False
1461 def _add_tag(self, vdi_uuid, writable):
1462 util.SMlog("Adding tag to: %s" % vdi_uuid)
1463 attach_mode = "RO"
1464 if writable: 1464 ↛ 1466line 1464 didn't jump to line 1466, because the condition on line 1464 was never false
1465 attach_mode = "RW"
1466 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1467 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1468 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1469 attached_as = util.attached_as(sm_config)
1470 if NO_MULTIPLE_ATTACH and (attached_as == "RW" or \ 1470 ↛ 1472line 1470 didn't jump to line 1472, because the condition on line 1470 was never true
1471 (attached_as == "RO" and attach_mode == "RW")):
1472 util.SMlog("need to reset VDI %s" % vdi_uuid)
1473 if not resetvdis.reset_vdi(self._session, vdi_uuid, force=False,
1474 term_output=False, writable=writable):
1475 raise util.SMException("VDI %s not detached cleanly" % vdi_uuid)
1476 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1477 if 'relinking' in sm_config:
1478 util.SMlog("Relinking key found, back-off and retry" % sm_config)
1479 return False
1480 if 'paused' in sm_config:
1481 util.SMlog("Paused or host_ref key found [%s]" % sm_config)
1482 return False
1483 self._session.xenapi.VDI.add_to_sm_config(
1484 vdi_ref, 'activating', 'True')
1485 host_key = "host_%s" % host_ref
1486 assert host_key not in sm_config
1487 self._session.xenapi.VDI.add_to_sm_config(vdi_ref, host_key,
1488 attach_mode)
1489 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1490 if 'paused' in sm_config or 'relinking' in sm_config:
1491 util.SMlog("Found %s key, aborting" % (
1492 'paused' if 'paused' in sm_config else 'relinking'))
1493 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1494 self._session.xenapi.VDI.remove_from_sm_config(
1495 vdi_ref, 'activating')
1496 return False
1497 util.SMlog("Activate lock succeeded")
1498 return True
1500 def _check_tag(self, vdi_uuid):
1501 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1502 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1503 if 'paused' in sm_config:
1504 util.SMlog("Paused key found [%s]" % sm_config)
1505 return False
1506 return True
1508 def _remove_tag(self, vdi_uuid):
1509 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1510 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1511 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1512 host_key = "host_%s" % host_ref
1513 if host_key in sm_config:
1514 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1515 util.SMlog("Removed host key %s for %s" % (host_key, vdi_uuid))
1516 else:
1517 util.SMlog("_remove_tag: host key %s not found, ignore" % host_key)
1519 def _get_pool_config(self, pool_name):
1520 pool_info = dict()
1521 vdi_ref = self.target.vdi.sr.srcmd.params.get('vdi_ref')
1522 if not vdi_ref: 1522 ↛ 1525line 1522 didn't jump to line 1525, because the condition on line 1522 was never true
1523 # attach_from_config context: HA disks don't need to be in any
1524 # special pool
1525 return pool_info
1527 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1528 sr_config = self._session.xenapi.SR.get_other_config(sr_ref)
1529 vdi_config = self._session.xenapi.VDI.get_other_config(vdi_ref)
1530 pool_size_str = sr_config.get(POOL_SIZE_KEY)
1531 pool_name_override = vdi_config.get(POOL_NAME_KEY)
1532 if pool_name_override: 1532 ↛ 1537line 1532 didn't jump to line 1537, because the condition on line 1532 was never false
1533 pool_name = pool_name_override
1534 pool_size_override = vdi_config.get(POOL_SIZE_KEY)
1535 if pool_size_override: 1535 ↛ 1537line 1535 didn't jump to line 1537, because the condition on line 1535 was never false
1536 pool_size_str = pool_size_override
1537 pool_size = 0
1538 if pool_size_str: 1538 ↛ 1548line 1538 didn't jump to line 1548, because the condition on line 1538 was never false
1539 try:
1540 pool_size = int(pool_size_str)
1541 if pool_size < 1 or pool_size > MAX_FULL_RINGS: 1541 ↛ 1542line 1541 didn't jump to line 1542, because the condition on line 1541 was never true
1542 raise ValueError("outside of range")
1543 pool_size = NUM_PAGES_PER_RING * pool_size
1544 except ValueError:
1545 util.SMlog("Error: invalid mem-pool-size %s" % pool_size_str)
1546 pool_size = 0
1548 pool_info["mem-pool"] = pool_name
1549 if pool_size: 1549 ↛ 1552line 1549 didn't jump to line 1552, because the condition on line 1549 was never false
1550 pool_info["mem-pool-size"] = str(pool_size)
1552 return pool_info
1554 def linkNBD(self, sr_uuid, vdi_uuid):
1555 if self.tap:
1556 nbd_path = '/run/blktap-control/nbd%d.%d' % (int(self.tap.pid),
1557 int(self.tap.minor))
1558 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).mklink(nbd_path)
1560 def attach(self, sr_uuid, vdi_uuid, writable, activate=False, caching_params={}):
1561 """Return/dev/sm/backend symlink path"""
1562 self.xenstore_data.update(self._get_pool_config(sr_uuid))
1563 if not self.target.has_cap("ATOMIC_PAUSE") or activate:
1564 util.SMlog("Attach & activate")
1565 self._attach(sr_uuid, vdi_uuid)
1566 dev_path = self._activate(sr_uuid, vdi_uuid,
1567 {"rdonly": not writable})
1568 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1569 self.linkNBD(sr_uuid, vdi_uuid)
1571 # Return backend/ link
1572 back_path = self.BackendLink.from_uuid(sr_uuid, vdi_uuid).path()
1573 if self.tap_wanted():
1574 # Only have NBD if we also have a tap
1575 nbd_path = "nbd:unix:{}:exportname={}".format(
1576 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).path(),
1577 vdi_uuid)
1578 else:
1579 nbd_path = ""
1581 options = {"rdonly": not writable}
1582 options.update(caching_params)
1583 o_direct, o_direct_reason = self.get_o_direct_capability(options)
1584 struct = {'params': back_path,
1585 'params_nbd': nbd_path,
1586 'o_direct': o_direct,
1587 'o_direct_reason': o_direct_reason,
1588 'xenstore_data': self.xenstore_data}
1589 util.SMlog('result: %s' % struct)
1591 try:
1592 f = open("%s.attach_info" % back_path, 'a')
1593 f.write(xmlrpc.client.dumps((struct, ), "", True))
1594 f.close()
1595 except:
1596 pass
1598 return xmlrpc.client.dumps((struct, ), "", True)
1600 def activate(self, sr_uuid, vdi_uuid, writable, caching_params):
1601 util.SMlog("blktap2.activate")
1602 options = {"rdonly": not writable}
1603 options.update(caching_params)
1605 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1606 sr_other_config = self._session.xenapi.SR.get_other_config(sr_ref)
1607 timeout = nfs.get_nfs_timeout(sr_other_config)
1608 if timeout: 1608 ↛ 1612line 1608 didn't jump to line 1612, because the condition on line 1608 was never false
1609 # Note NFS timeout values are in deciseconds
1610 timeout = int((timeout + 5) / 10)
1611 options["timeout"] = timeout + self.TAPDISK_TIMEOUT_MARGIN
1612 for i in range(self.ATTACH_DETACH_RETRY_SECS): 1612 ↛ 1619line 1612 didn't jump to line 1619, because the loop on line 1612 didn't complete
1613 try:
1614 if self._activate_locked(sr_uuid, vdi_uuid, options):
1615 return
1616 except util.SRBusyException:
1617 util.SMlog("SR locked, retrying")
1618 time.sleep(1)
1619 raise util.SMException("VDI %s locked" % vdi_uuid)
1621 @locking("VDIUnavailable")
1622 def _activate_locked(self, sr_uuid, vdi_uuid, options):
1623 """Wraps target.activate and adds a tapdisk"""
1625 #util.SMlog("VDI.activate %s" % vdi_uuid)
1626 if self.tap_wanted(): 1626 ↛ 1639line 1626 didn't jump to line 1639, because the condition on line 1626 was never false
1627 if not self._add_tag(vdi_uuid, not options["rdonly"]):
1628 return False
1629 # it is possible that while the VDI was paused some of its
1630 # attributes have changed (e.g. its size if it was inflated; or its
1631 # path if it was leaf-coalesced onto a raw LV), so refresh the
1632 # object completely
1633 params = self.target.vdi.sr.srcmd.params
1634 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1635 target.sr.srcmd.params = params
1636 driver_info = target.sr.srcmd.driver_info
1637 self.target = self.TargetDriver(target, driver_info)
1639 try:
1640 util.fistpoint.activate_custom_fn( 1640 ↛ exitline 1640 didn't jump to the function exit
1641 "blktap_activate_inject_failure",
1642 lambda: util.inject_failure())
1644 # Attach the physical node
1645 if self.target.has_cap("ATOMIC_PAUSE"): 1645 ↛ 1648line 1645 didn't jump to line 1648, because the condition on line 1645 was never false
1646 self._attach(sr_uuid, vdi_uuid)
1648 vdi_type = self.target.get_vdi_type()
1650 # Take lvchange-p Lock before running
1651 # tap-ctl open
1652 # Needed to avoid race with lvchange -p which is
1653 # now taking the same lock
1654 # This is a fix for CA-155766
1655 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1655 ↛ 1658line 1655 didn't jump to line 1658, because the condition on line 1655 was never true
1656 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1657 vdi_type == vhdutil.VDI_TYPE_VHD:
1658 lock = Lock("lvchange-p", lvhdutil.NS_PREFIX_LVM + sr_uuid)
1659 lock.acquire()
1661 # When we attach a static VDI for HA, we cannot communicate with
1662 # xapi, because has not started yet. These VDIs are raw.
1663 if vdi_type != vhdutil.VDI_TYPE_RAW: 1663 ↛ 1674line 1663 didn't jump to line 1674, because the condition on line 1663 was never false
1664 session = self.target.vdi.session
1665 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1666 # pylint: disable=used-before-assignment
1667 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1668 if 'key_hash' in sm_config: 1668 ↛ 1669line 1668 didn't jump to line 1669, because the condition on line 1668 was never true
1669 key_hash = sm_config['key_hash']
1670 options['key_hash'] = key_hash
1671 options['vdi_uuid'] = vdi_uuid
1672 util.SMlog('Using key with hash {} for VDI {}'.format(key_hash, vdi_uuid))
1673 # Activate the physical node
1674 dev_path = self._activate(sr_uuid, vdi_uuid, options)
1676 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1676 ↛ 1679line 1676 didn't jump to line 1679, because the condition on line 1676 was never true
1677 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1678 self.target.get_vdi_type() == vhdutil.VDI_TYPE_VHD:
1679 lock.release()
1680 except:
1681 util.SMlog("Exception in activate/attach")
1682 if self.tap_wanted():
1683 util.fistpoint.activate_custom_fn(
1684 "blktap_activate_error_handling",
1685 lambda: time.sleep(30))
1686 while True:
1687 try:
1688 self._remove_tag(vdi_uuid)
1689 break
1690 except xmlrpc.client.ProtocolError as e:
1691 # If there's a connection error, keep trying forever.
1692 if e.errcode == http.HTTPStatus.INTERNAL_SERVER_ERROR.value:
1693 continue
1694 else:
1695 util.SMlog('failed to remove tag: %s' % e)
1696 break
1697 except Exception as e:
1698 util.SMlog('failed to remove tag: %s' % e)
1699 break
1700 raise
1701 finally:
1702 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1703 self._session.xenapi.VDI.remove_from_sm_config(
1704 vdi_ref, 'activating')
1705 util.SMlog("Removed activating flag from %s" % vdi_uuid) 1705 ↛ exitline 1705 didn't except from function '_activate_locked', because the raise on line 1700 wasn't executed
1707 # Link result to backend/
1708 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1709 self.linkNBD(sr_uuid, vdi_uuid)
1710 return True
1712 def _activate(self, sr_uuid, vdi_uuid, options):
1713 vdi_options = self.target.activate(sr_uuid, vdi_uuid)
1715 dev_path = self.setup_cache(sr_uuid, vdi_uuid, options)
1716 if not dev_path: 1716 ↛ 1730line 1716 didn't jump to line 1730, because the condition on line 1716 was never false
1717 phy_path = self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink()
1718 # Maybe launch a tapdisk on the physical link
1719 if self.tap_wanted(): 1719 ↛ 1728line 1719 didn't jump to line 1728, because the condition on line 1719 was never false
1720 vdi_type = self.target.get_vdi_type()
1721 options["o_direct"] = self.get_o_direct_capability(options)[0]
1722 if vdi_options: 1722 ↛ 1724line 1722 didn't jump to line 1724, because the condition on line 1722 was never false
1723 options.update(vdi_options)
1724 dev_path, self.tap = self._tap_activate(phy_path, vdi_type,
1725 sr_uuid, options,
1726 self._get_pool_config(sr_uuid).get("mem-pool-size"))
1727 else:
1728 dev_path = phy_path # Just reuse phy
1730 return dev_path
1732 def _attach(self, sr_uuid, vdi_uuid):
1733 attach_info = xmlrpc.client.loads(self.target.attach(sr_uuid, vdi_uuid))[0][0]
1734 params = attach_info['params']
1735 xenstore_data = attach_info['xenstore_data']
1736 phy_path = util.to_plain_string(params)
1737 self.xenstore_data.update(xenstore_data)
1738 # Save it to phy/
1739 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(phy_path)
1741 def deactivate(self, sr_uuid, vdi_uuid, caching_params):
1742 util.SMlog("blktap2.deactivate")
1743 for i in range(self.ATTACH_DETACH_RETRY_SECS):
1744 try:
1745 if self._deactivate_locked(sr_uuid, vdi_uuid, caching_params):
1746 return
1747 except util.SRBusyException as e:
1748 util.SMlog("SR locked, retrying")
1749 time.sleep(1)
1750 raise util.SMException("VDI %s locked" % vdi_uuid)
1752 @locking("VDIUnavailable")
1753 def _deactivate_locked(self, sr_uuid, vdi_uuid, caching_params):
1754 """Wraps target.deactivate and removes a tapdisk"""
1756 #util.SMlog("VDI.deactivate %s" % vdi_uuid)
1757 if self.tap_wanted() and not self._check_tag(vdi_uuid):
1758 return False
1760 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1761 if self.target.has_cap("ATOMIC_PAUSE"):
1762 self._detach(sr_uuid, vdi_uuid)
1763 if self.tap_wanted():
1764 self._remove_tag(vdi_uuid)
1766 return True
1768 def _resetPhylink(self, sr_uuid, vdi_uuid, path):
1769 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(path)
1771 def detach(self, sr_uuid, vdi_uuid, deactivate=False, caching_params={}):
1772 if not self.target.has_cap("ATOMIC_PAUSE") or deactivate:
1773 util.SMlog("Deactivate & detach")
1774 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1775 self._detach(sr_uuid, vdi_uuid)
1776 else:
1777 pass # nothing to do
1779 def _deactivate(self, sr_uuid, vdi_uuid, caching_params):
1780 import VDI as sm
1782 # Shutdown tapdisk
1783 back_link = self.BackendLink.from_uuid(sr_uuid, vdi_uuid)
1785 if not util.pathexists(back_link.path()):
1786 util.SMlog("Backend path %s does not exist" % back_link.path())
1787 return
1789 try:
1790 attach_info_path = "%s.attach_info" % (back_link.path())
1791 os.unlink(attach_info_path)
1792 except:
1793 util.SMlog("unlink of attach_info failed")
1795 try:
1796 major, minor = back_link.rdev()
1797 except self.DeviceNode.NotABlockDevice:
1798 pass
1799 else:
1800 if major == Tapdisk.major():
1801 self._tap_deactivate(minor)
1802 self.remove_cache(sr_uuid, vdi_uuid, caching_params)
1804 # Remove the backend link
1805 back_link.unlink()
1806 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1808 # Deactivate & detach the physical node
1809 if self.tap_wanted() and self.target.vdi.session is not None:
1810 # it is possible that while the VDI was paused some of its
1811 # attributes have changed (e.g. its size if it was inflated; or its
1812 # path if it was leaf-coalesced onto a raw LV), so refresh the
1813 # object completely
1814 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1815 driver_info = target.sr.srcmd.driver_info
1816 self.target = self.TargetDriver(target, driver_info)
1818 self.target.deactivate(sr_uuid, vdi_uuid)
1820 def _detach(self, sr_uuid, vdi_uuid):
1821 self.target.detach(sr_uuid, vdi_uuid)
1823 # Remove phy/
1824 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1826 def _updateCacheRecord(self, session, vdi_uuid, on_boot, caching):
1827 # Remove existing VDI.sm_config fields
1828 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1829 for key in ["on_boot", "caching"]:
1830 session.xenapi.VDI.remove_from_sm_config(vdi_ref, key)
1831 if not on_boot is None:
1832 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'on_boot', on_boot)
1833 if not caching is None:
1834 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'caching', caching)
1836 def setup_cache(self, sr_uuid, vdi_uuid, params):
1837 if params.get(self.CONF_KEY_ALLOW_CACHING) != "true": 1837 ↛ 1840line 1837 didn't jump to line 1840, because the condition on line 1837 was never false
1838 return
1840 util.SMlog("Requested local caching")
1841 if not self.target.has_cap("SR_CACHING"):
1842 util.SMlog("Error: local caching not supported by this SR")
1843 return
1845 scratch_mode = False
1846 if params.get(self.CONF_KEY_MODE_ON_BOOT) == "reset":
1847 scratch_mode = True
1848 util.SMlog("Requested scratch mode")
1849 if not self.target.has_cap("VDI_RESET_ON_BOOT/2"):
1850 util.SMlog("Error: scratch mode not supported by this SR")
1851 return
1853 dev_path = None
1854 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
1855 if not local_sr_uuid:
1856 util.SMlog("ERROR: Local cache SR not specified, not enabling")
1857 return
1858 dev_path = self._setup_cache(self._session, sr_uuid, vdi_uuid,
1859 local_sr_uuid, scratch_mode, params)
1861 if dev_path:
1862 self._updateCacheRecord(self._session, self.target.vdi.uuid,
1863 params.get(self.CONF_KEY_MODE_ON_BOOT),
1864 params.get(self.CONF_KEY_ALLOW_CACHING))
1866 return dev_path
1868 def alert_no_cache(self, session, vdi_uuid, cache_sr_uuid, err):
1869 vm_uuid = None
1870 vm_label = ""
1871 try:
1872 cache_sr_ref = session.xenapi.SR.get_by_uuid(cache_sr_uuid)
1873 cache_sr_rec = session.xenapi.SR.get_record(cache_sr_ref)
1874 cache_sr_label = cache_sr_rec.get("name_label")
1876 host_ref = session.xenapi.host.get_by_uuid(util.get_this_host())
1877 host_rec = session.xenapi.host.get_record(host_ref)
1878 host_label = host_rec.get("name_label")
1880 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1881 vbds = session.xenapi.VBD.get_all_records_where( \
1882 "field \"VDI\" = \"%s\"" % vdi_ref)
1883 for vbd_rec in vbds.values():
1884 vm_ref = vbd_rec.get("VM")
1885 vm_rec = session.xenapi.VM.get_record(vm_ref)
1886 vm_uuid = vm_rec.get("uuid")
1887 vm_label = vm_rec.get("name_label")
1888 except:
1889 util.logException("alert_no_cache")
1891 alert_obj = "SR"
1892 alert_uuid = str(cache_sr_uuid)
1893 alert_str = "No space left in Local Cache SR %s" % cache_sr_uuid
1894 if vm_uuid:
1895 alert_obj = "VM"
1896 alert_uuid = vm_uuid
1897 reason = ""
1898 if err == errno.ENOSPC:
1899 reason = "because there is no space left"
1900 alert_str = "The VM \"%s\" is not using IntelliCache %s on the Local Cache SR (\"%s\") on host \"%s\"" % \
1901 (vm_label, reason, cache_sr_label, host_label)
1903 util.SMlog("Creating alert: (%s, %s, \"%s\")" % \
1904 (alert_obj, alert_uuid, alert_str))
1905 session.xenapi.message.create("No space left in local cache", "3",
1906 alert_obj, alert_uuid, alert_str)
1908 def _setup_cache(self, session, sr_uuid, vdi_uuid, local_sr_uuid,
1909 scratch_mode, options):
1910 import SR
1911 import EXTSR
1912 import NFSSR
1913 from lock import Lock
1914 from FileSR import FileVDI
1916 parent_uuid = vhdutil.getParent(self.target.vdi.path,
1917 FileVDI.extractUuid)
1918 if not parent_uuid:
1919 util.SMlog("ERROR: VDI %s has no parent, not enabling" % \
1920 self.target.vdi.uuid)
1921 return
1923 util.SMlog("Setting up cache")
1924 parent_uuid = parent_uuid.strip()
1925 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
1927 if shared_target.parent:
1928 util.SMlog("ERROR: Parent VDI %s has parent, not enabling" %
1929 shared_target.uuid)
1930 return
1932 SR.registerSR(EXTSR.EXTSR)
1933 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
1935 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
1936 lock.acquire()
1938 # read cache
1939 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
1940 if util.pathexists(read_cache_path):
1941 util.SMlog("Read cache node (%s) already exists, not creating" % \
1942 read_cache_path)
1943 else:
1944 try:
1945 vhdutil.snapshot(read_cache_path, shared_target.path, False)
1946 except util.CommandException as e:
1947 util.SMlog("Error creating parent cache: %s" % e)
1948 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1949 return None
1951 # local write node
1952 leaf_size = vhdutil.getSizeVirt(self.target.vdi.path)
1953 local_leaf_path = "%s/%s.vhdcache" % \
1954 (local_sr.path, self.target.vdi.uuid)
1955 if util.pathexists(local_leaf_path):
1956 util.SMlog("Local leaf node (%s) already exists, deleting" % \
1957 local_leaf_path)
1958 os.unlink(local_leaf_path)
1959 try:
1960 vhdutil.snapshot(local_leaf_path, read_cache_path, False,
1961 msize=leaf_size // 1024 // 1024, checkEmpty=False)
1962 except util.CommandException as e:
1963 util.SMlog("Error creating leaf cache: %s" % e)
1964 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1965 return None
1967 local_leaf_size = vhdutil.getSizeVirt(local_leaf_path)
1968 if leaf_size > local_leaf_size:
1969 util.SMlog("Leaf size %d > local leaf cache size %d, resizing" %
1970 (leaf_size, local_leaf_size))
1971 vhdutil.setSizeVirtFast(local_leaf_path, leaf_size)
1973 vdi_type = self.target.get_vdi_type()
1975 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
1976 if not prt_tapdisk:
1977 parent_options = copy.deepcopy(options)
1978 parent_options["rdonly"] = False
1979 parent_options["lcache"] = True
1981 blktap = Blktap.allocate()
1982 try:
1983 blktap.set_pool_name("lcache-parent-pool-%s" % blktap.minor)
1984 # no need to change pool_size since each parent tapdisk is in
1985 # its own pool
1986 prt_tapdisk = \
1987 Tapdisk.launch_on_tap(blktap, read_cache_path,
1988 'vhd', parent_options)
1989 except:
1990 blktap.free()
1991 raise
1993 secondary = "%s:%s" % (self.target.get_vdi_type(),
1994 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink())
1996 util.SMlog("Parent tapdisk: %s" % prt_tapdisk)
1997 leaf_tapdisk = Tapdisk.find_by_path(local_leaf_path)
1998 if not leaf_tapdisk:
1999 blktap = Blktap.allocate()
2000 child_options = copy.deepcopy(options)
2001 child_options["rdonly"] = False
2002 child_options["lcache"] = False
2003 child_options["existing_prt"] = prt_tapdisk.minor
2004 child_options["secondary"] = secondary
2005 child_options["standby"] = scratch_mode
2006 try:
2007 leaf_tapdisk = \
2008 Tapdisk.launch_on_tap(blktap, local_leaf_path,
2009 'vhd', child_options)
2010 except:
2011 blktap.free()
2012 raise
2014 lock.release()
2016 util.SMlog("Local read cache: %s, local leaf: %s" % \
2017 (read_cache_path, local_leaf_path))
2019 self.tap = leaf_tapdisk
2020 return leaf_tapdisk.get_devpath()
2022 def remove_cache(self, sr_uuid, vdi_uuid, params):
2023 if not self.target.has_cap("SR_CACHING"):
2024 return
2026 caching = params.get(self.CONF_KEY_ALLOW_CACHING) == "true"
2028 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
2029 if caching and not local_sr_uuid:
2030 util.SMlog("ERROR: Local cache SR not specified, ignore")
2031 return
2033 if caching:
2034 self._remove_cache(self._session, local_sr_uuid)
2036 if self._session is not None:
2037 self._updateCacheRecord(self._session, self.target.vdi.uuid, None, None)
2039 def _is_tapdisk_in_use(self, minor):
2040 retVal, links, sockets = util.findRunningProcessOrOpenFile("tapdisk")
2041 if not retVal:
2042 # err on the side of caution
2043 return True
2045 for link in links:
2046 if link.find("tapdev%d" % minor) != -1:
2047 return True
2049 socket_re = re.compile(r'^/.*/nbd\d+\.%d' % minor)
2050 for s in sockets:
2051 if socket_re.match(s):
2052 return True
2054 return False
2056 def _remove_cache(self, session, local_sr_uuid):
2057 import SR
2058 import EXTSR
2059 import NFSSR
2060 from lock import Lock
2061 from FileSR import FileVDI
2063 parent_uuid = vhdutil.getParent(self.target.vdi.path,
2064 FileVDI.extractUuid)
2065 if not parent_uuid:
2066 util.SMlog("ERROR: No parent for VDI %s, ignore" % \
2067 self.target.vdi.uuid)
2068 return
2070 util.SMlog("Tearing down the cache")
2072 parent_uuid = parent_uuid.strip()
2073 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
2075 SR.registerSR(EXTSR.EXTSR)
2076 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
2078 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
2079 lock.acquire()
2081 # local write node
2082 local_leaf_path = "%s/%s.vhdcache" % \
2083 (local_sr.path, self.target.vdi.uuid)
2084 if util.pathexists(local_leaf_path):
2085 util.SMlog("Deleting local leaf node %s" % local_leaf_path)
2086 os.unlink(local_leaf_path)
2088 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
2089 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
2090 if not prt_tapdisk:
2091 util.SMlog("Parent tapdisk not found")
2092 elif not self._is_tapdisk_in_use(prt_tapdisk.minor):
2093 util.SMlog("Parent tapdisk not in use: shutting down %s" % \
2094 read_cache_path)
2095 try:
2096 prt_tapdisk.shutdown()
2097 except:
2098 util.logException("shutting down parent tapdisk")
2099 else:
2100 util.SMlog("Parent tapdisk still in use: %s" % read_cache_path)
2101 # the parent cache files are removed during the local SR's background
2102 # GC run
2104 lock.release()
2106PythonKeyError = KeyError
2109class UEventHandler(object):
2111 def __init__(self):
2112 self._action = None
2114 class KeyError(PythonKeyError):
2115 def __init__(self, args):
2116 super().__init__(args)
2117 self.key = args[0]
2119 def __str__(self):
2120 return \
2121 "Key '%s' missing in environment. " % self.key + \
2122 "Not called in udev context?"
2124 @classmethod
2125 def getenv(cls, key):
2126 try:
2127 return os.environ[key]
2128 except KeyError as e:
2129 raise cls.KeyError(e.args[0])
2131 def get_action(self):
2132 if not self._action:
2133 self._action = self.getenv('ACTION')
2134 return self._action
2136 class UnhandledEvent(Exception):
2138 def __init__(self, event, handler):
2139 self.event = event
2140 self.handler = handler
2142 def __str__(self):
2143 return "Uevent '%s' not handled by %s" % \
2144 (self.event, self.handler.__class__.__name__)
2146 ACTIONS = {}
2148 def run(self):
2150 action = self.get_action()
2151 try:
2152 fn = self.ACTIONS[action]
2153 except KeyError:
2154 raise self.UnhandledEvent(action, self)
2156 return fn(self)
2158 def __str__(self):
2159 try:
2160 action = self.get_action()
2161 except:
2162 action = None
2163 return "%s[%s]" % (self.__class__.__name__, action)
2166class __BlktapControl(ClassDevice):
2167 SYSFS_CLASSTYPE = "misc"
2169 def __init__(self):
2170 ClassDevice.__init__(self)
2171 self._default_pool = None
2173 def sysfs_devname(self):
2174 return "blktap!control"
2176 class DefaultPool(Attribute):
2177 SYSFS_NODENAME = "default_pool"
2179 def get_default_pool_attr(self):
2180 if not self._default_pool:
2181 self._default_pool = self.DefaultPool.from_kobject(self)
2182 return self._default_pool
2184 def get_default_pool_name(self):
2185 return self.get_default_pool_attr().readline()
2187 def set_default_pool_name(self, name):
2188 self.get_default_pool_attr().writeline(name)
2190 def get_default_pool(self):
2191 return BlktapControl.get_pool(self.get_default_pool_name())
2193 def set_default_pool(self, pool):
2194 self.set_default_pool_name(pool.name)
2196 class NoSuchPool(Exception):
2197 def __init__(self, name):
2198 self.name = name
2200 def __str__(self):
2201 return "No such pool: {}".format(self.name)
2203 def get_pool(self, name):
2204 path = "%s/pools/%s" % (self.sysfs_path(), name)
2206 if not os.path.isdir(path):
2207 raise self.NoSuchPool(name)
2209 return PagePool(path)
2211BlktapControl = __BlktapControl()
2214class PagePool(KObject):
2216 def __init__(self, path):
2217 self.path = path
2218 self._size = None
2220 def sysfs_path(self):
2221 return self.path
2223 class Size(Attribute):
2224 SYSFS_NODENAME = "size"
2226 def get_size_attr(self):
2227 if not self._size:
2228 self._size = self.Size.from_kobject(self)
2229 return self._size
2231 def set_size(self, pages):
2232 pages = str(pages)
2233 self.get_size_attr().writeline(pages)
2235 def get_size(self):
2236 pages = self.get_size_attr().readline()
2237 return int(pages)
2240class BusDevice(KObject):
2242 SYSFS_BUSTYPE = None
2244 @classmethod
2245 def sysfs_bus_path(cls):
2246 return "/sys/bus/%s" % cls.SYSFS_BUSTYPE
2248 def sysfs_path(self):
2249 path = "%s/devices/%s" % (self.sysfs_bus_path(),
2250 self.sysfs_devname())
2252 return path
2255class XenbusDevice(BusDevice):
2256 """Xenbus device, in XS and sysfs"""
2258 XBT_NIL = ""
2260 XENBUS_DEVTYPE = None
2262 def __init__(self, domid, devid):
2263 self.domid = int(domid)
2264 self.devid = int(devid)
2265 self._xbt = XenbusDevice.XBT_NIL
2267 import xen.lowlevel.xs # pylint: disable=import-error
2268 self.xs = xen.lowlevel.xs.xs()
2270 def xs_path(self, key=None):
2271 path = "backend/%s/%d/%d" % (self.XENBUS_DEVTYPE,
2272 self.domid,
2273 self.devid)
2274 if key is not None:
2275 path = "%s/%s" % (path, key)
2277 return path
2279 def _log(self, prio, msg):
2280 syslog(prio, msg)
2282 def info(self, msg):
2283 self._log(_syslog.LOG_INFO, msg)
2285 def warn(self, msg):
2286 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2288 def _xs_read_path(self, path):
2289 val = self.xs.read(self._xbt, path)
2290 #self.info("read %s = '%s'" % (path, val))
2291 return val
2293 def _xs_write_path(self, path, val):
2294 self.xs.write(self._xbt, path, val)
2295 self.info("wrote %s = '%s'" % (path, val))
2297 def _xs_rm_path(self, path):
2298 self.xs.rm(self._xbt, path)
2299 self.info("removed %s" % path)
2301 def read(self, key):
2302 return self._xs_read_path(self.xs_path(key))
2304 def has_xs_key(self, key):
2305 return self.read(key) is not None
2307 def write(self, key, val):
2308 self._xs_write_path(self.xs_path(key), val)
2310 def rm(self, key):
2311 self._xs_rm_path(self.xs_path(key))
2313 def exists(self):
2314 return self.has_xs_key(None)
2316 def begin(self):
2317 assert(self._xbt == XenbusDevice.XBT_NIL)
2318 self._xbt = self.xs.transaction_start()
2320 def commit(self):
2321 ok = self.xs.transaction_end(self._xbt, 0)
2322 self._xbt = XenbusDevice.XBT_NIL
2323 return ok
2325 def abort(self):
2326 ok = self.xs.transaction_end(self._xbt, 1)
2327 assert(ok == True)
2328 self._xbt = XenbusDevice.XBT_NIL
2330 def create_physical_device(self):
2331 """The standard protocol is: toolstack writes 'params', linux hotplug
2332 script translates this into physical-device=%x:%x"""
2333 if self.has_xs_key("physical-device"):
2334 return
2335 try:
2336 params = self.read("params")
2337 frontend = self.read("frontend")
2338 is_cdrom = self._xs_read_path("%s/device-type") == "cdrom"
2339 # We don't have PV drivers for CDROM devices, so we prevent blkback
2340 # from opening the physical-device
2341 if not(is_cdrom):
2342 major_minor = os.stat(params).st_rdev
2343 major, minor = divmod(major_minor, 256)
2344 self.write("physical-device", "%x:%x" % (major, minor))
2345 except:
2346 util.logException("BLKTAP2:create_physical_device")
2348 def signal_hotplug(self, online=True):
2349 xapi_path = "/xapi/%d/hotplug/%s/%d/hotplug" % (self.domid,
2350 self.XENBUS_DEVTYPE,
2351 self.devid)
2352 upstream_path = self.xs_path("hotplug-status")
2353 if online:
2354 self._xs_write_path(xapi_path, "online")
2355 self._xs_write_path(upstream_path, "connected")
2356 else:
2357 self._xs_rm_path(xapi_path)
2358 self._xs_rm_path(upstream_path)
2360 def sysfs_devname(self):
2361 return "%s-%d-%d" % (self.XENBUS_DEVTYPE,
2362 self.domid, self.devid)
2364 def __str__(self):
2365 return self.sysfs_devname()
2367 @classmethod
2368 def find(cls):
2369 pattern = "/sys/bus/%s/devices/%s*" % (cls.SYSFS_BUSTYPE,
2370 cls.XENBUS_DEVTYPE)
2371 for path in glob.glob(pattern):
2373 name = os.path.basename(path)
2374 (_type, domid, devid) = name.split('-')
2376 yield cls(domid, devid)
2379class XenBackendDevice(XenbusDevice):
2380 """Xenbus backend device"""
2381 SYSFS_BUSTYPE = "xen-backend"
2383 @classmethod
2384 def from_xs_path(cls, _path):
2385 (_backend, _type, domid, devid) = _path.split('/')
2387 assert _backend == 'backend'
2388 assert _type == cls.XENBUS_DEVTYPE
2390 domid = int(domid)
2391 devid = int(devid)
2393 return cls(domid, devid)
2396class Blkback(XenBackendDevice):
2397 """A blkback VBD"""
2399 XENBUS_DEVTYPE = "vbd"
2401 def __init__(self, domid, devid):
2402 XenBackendDevice.__init__(self, domid, devid)
2403 self._phy = None
2404 self._vdi_uuid = None
2405 self._q_state = None
2406 self._q_events = None
2408 class XenstoreValueError(Exception):
2409 KEY = None
2411 def __init__(self, vbd, _str):
2412 self.vbd = vbd
2413 self.str = _str
2415 def __str__(self):
2416 return "Backend %s " % self.vbd + \
2417 "has %s = %s" % (self.KEY, self.str)
2419 class PhysicalDeviceError(XenstoreValueError):
2420 KEY = "physical-device"
2422 class PhysicalDevice(object):
2424 def __init__(self, major, minor):
2425 self.major = int(major)
2426 self.minor = int(minor)
2428 @classmethod
2429 def from_xbdev(cls, xbdev):
2431 phy = xbdev.read("physical-device")
2433 try:
2434 major, minor = phy.split(':')
2435 major = int(major, 0x10)
2436 minor = int(minor, 0x10)
2437 except Exception as e:
2438 raise xbdev.PhysicalDeviceError(xbdev, phy)
2440 return cls(major, minor)
2442 def makedev(self):
2443 return os.makedev(self.major, self.minor)
2445 def is_tap(self):
2446 return self.major == Tapdisk.major()
2448 def __str__(self):
2449 return "%s:%s" % (self.major, self.minor)
2451 def __eq__(self, other):
2452 return \
2453 self.major == other.major and \
2454 self.minor == other.minor
2456 def get_physical_device(self):
2457 if not self._phy:
2458 self._phy = self.PhysicalDevice.from_xbdev(self)
2459 return self._phy
2461 class QueueEvents(Attribute):
2462 """Blkback sysfs node to select queue-state event
2463 notifications emitted."""
2465 SYSFS_NODENAME = "queue_events"
2467 QUEUE_RUNNING = (1 << 0)
2468 QUEUE_PAUSE_DONE = (1 << 1)
2469 QUEUE_SHUTDOWN_DONE = (1 << 2)
2470 QUEUE_PAUSE_REQUEST = (1 << 3)
2471 QUEUE_SHUTDOWN_REQUEST = (1 << 4)
2473 def get_mask(self):
2474 return int(self.readline(), 0x10)
2476 def set_mask(self, mask):
2477 self.writeline("0x%x" % mask)
2479 def get_queue_events(self):
2480 if not self._q_events:
2481 self._q_events = self.QueueEvents.from_kobject(self)
2482 return self._q_events
2484 def get_vdi_uuid(self):
2485 if not self._vdi_uuid:
2486 self._vdi_uuid = self.read("sm-data/vdi-uuid")
2487 return self._vdi_uuid
2489 def pause_requested(self):
2490 return self.has_xs_key("pause")
2492 def shutdown_requested(self):
2493 return self.has_xs_key("shutdown-request")
2495 def shutdown_done(self):
2496 return self.has_xs_key("shutdown-done")
2498 def running(self):
2499 return self.has_xs_key('queue-0/kthread-pid')
2501 @classmethod
2502 def find_by_physical_device(cls, phy):
2503 for dev in cls.find():
2504 try:
2505 _phy = dev.get_physical_device()
2506 except cls.PhysicalDeviceError:
2507 continue
2509 if _phy == phy:
2510 yield dev
2512 @classmethod
2513 def find_by_tap_minor(cls, minor):
2514 phy = cls.PhysicalDevice(Tapdisk.major(), minor)
2515 return cls.find_by_physical_device(phy)
2517 @classmethod
2518 def find_by_tap(cls, tapdisk):
2519 return cls.find_by_tap_minor(tapdisk.minor)
2521 def has_tap(self):
2523 if not self.can_tap():
2524 return False
2526 phy = self.get_physical_device()
2527 if phy:
2528 return phy.is_tap()
2530 return False
2532 def is_bare_hvm(self):
2533 """File VDIs for bare HVM. These are directly accessible by Qemu."""
2534 try:
2535 self.get_physical_device()
2537 except self.PhysicalDeviceError as e:
2538 vdi_type = self.read("type")
2540 self.info("HVM VDI: type=%s" % vdi_type)
2542 if e.str is not None or vdi_type != 'file':
2543 raise
2545 return True
2547 return False
2549 def can_tap(self):
2550 return not self.is_bare_hvm()
2553class BlkbackEventHandler(UEventHandler):
2555 LOG_FACILITY = _syslog.LOG_DAEMON
2557 def __init__(self, ident=None, action=None):
2558 if not ident:
2559 ident = self.__class__.__name__
2561 self.ident = ident
2562 self._vbd = None
2563 self._tapdisk = None
2565 UEventHandler.__init__(self)
2567 def run(self):
2569 self.xs_path = self.getenv('XENBUS_PATH')
2570 openlog(str(self), 0, self.LOG_FACILITY)
2572 UEventHandler.run(self)
2574 def __str__(self):
2576 try:
2577 path = self.xs_path
2578 except:
2579 path = None
2581 try:
2582 action = self.get_action()
2583 except:
2584 action = None
2586 return "%s[%s](%s)" % (self.ident, action, path)
2588 def _log(self, prio, msg):
2589 syslog(prio, msg)
2590 util.SMlog("%s: " % self + msg)
2592 def info(self, msg):
2593 self._log(_syslog.LOG_INFO, msg)
2595 def warn(self, msg):
2596 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2598 def error(self, msg):
2599 self._log(_syslog.LOG_ERR, "ERROR: " + msg)
2601 def get_vbd(self):
2602 if not self._vbd:
2603 self._vbd = Blkback.from_xs_path(self.xs_path)
2604 return self._vbd
2606 def get_tapdisk(self):
2607 if not self._tapdisk:
2608 minor = self.get_vbd().get_physical_device().minor
2609 self._tapdisk = Tapdisk.from_minor(minor)
2610 return self._tapdisk
2611 #
2612 # Events
2613 #
2615 def __add(self):
2616 vbd = self.get_vbd()
2617 # Manage blkback transitions
2618 # self._manage_vbd()
2620 vbd.create_physical_device()
2622 vbd.signal_hotplug()
2624 @retried(backoff=.5, limit=10)
2625 def add(self):
2626 try:
2627 self.__add()
2628 except Attribute.NoSuchAttribute as e:
2629 #
2630 # FIXME: KOBJ_ADD is racing backend.probe, which
2631 # registers device attributes. So poll a little.
2632 #
2633 self.warn("%s, still trying." % e)
2634 raise RetryLoop.TransientFailure(e)
2636 def __change(self):
2637 vbd = self.get_vbd()
2639 # 1. Pause or resume tapdisk (if there is one)
2641 if vbd.has_tap():
2642 pass
2643 #self._pause_update_tap()
2645 # 2. Signal Xapi.VBD.pause/resume completion
2647 self._signal_xapi()
2649 def change(self):
2650 vbd = self.get_vbd()
2652 # NB. Beware of spurious change events between shutdown
2653 # completion and device removal. Also, Xapi.VM.migrate will
2654 # hammer a couple extra shutdown-requests into the source VBD.
2656 while True:
2657 vbd.begin()
2659 if not vbd.exists() or \
2660 vbd.shutdown_done():
2661 break
2663 self.__change()
2665 if vbd.commit():
2666 return
2668 vbd.abort()
2669 self.info("spurious uevent, ignored.")
2671 def remove(self):
2672 vbd = self.get_vbd()
2674 vbd.signal_hotplug(False)
2676 ACTIONS = {'add': add,
2677 'change': change,
2678 'remove': remove}
2679 #
2680 # VDI.pause
2681 #
2683 def _tap_should_pause(self):
2684 """Enumerate all VBDs on our tapdisk. Returns true iff any was
2685 paused"""
2687 tapdisk = self.get_tapdisk()
2688 TapState = Tapdisk.PauseState
2690 PAUSED = 'P'
2691 RUNNING = 'R'
2692 PAUSED_SHUTDOWN = 'P,S'
2693 # NB. Shutdown/paused is special. We know it's not going
2694 # to restart again, so it's a RUNNING. Still better than
2695 # backtracking a removed device during Vbd.unplug completion.
2697 next = TapState.RUNNING
2698 vbds = {}
2700 for vbd in Blkback.find_by_tap(tapdisk):
2701 name = str(vbd)
2703 pausing = vbd.pause_requested()
2704 closing = vbd.shutdown_requested()
2705 running = vbd.running()
2707 if pausing:
2708 if closing and not running:
2709 vbds[name] = PAUSED_SHUTDOWN
2710 else:
2711 vbds[name] = PAUSED
2712 next = TapState.PAUSED
2714 else:
2715 vbds[name] = RUNNING
2717 self.info("tapdev%d (%s): %s -> %s"
2718 % (tapdisk.minor, tapdisk.pause_state(),
2719 vbds, next))
2721 return next == TapState.PAUSED
2723 def _pause_update_tap(self):
2724 vbd = self.get_vbd()
2726 if self._tap_should_pause():
2727 self._pause_tap()
2728 else:
2729 self._resume_tap()
2731 def _pause_tap(self):
2732 tapdisk = self.get_tapdisk()
2734 if not tapdisk.is_paused():
2735 self.info("pausing %s" % tapdisk)
2736 tapdisk.pause()
2738 def _resume_tap(self):
2739 tapdisk = self.get_tapdisk()
2741 # NB. Raw VDI snapshots. Refresh the physical path and
2742 # type while resuming.
2743 vbd = self.get_vbd()
2744 vdi_uuid = vbd.get_vdi_uuid()
2746 if tapdisk.is_paused():
2747 self.info("loading vdi uuid=%s" % vdi_uuid)
2748 vdi = VDI.from_cli(vdi_uuid)
2749 _type = vdi.get_tap_type()
2750 path = vdi.get_phy_path()
2751 self.info("resuming %s on %s:%s" % (tapdisk, _type, path))
2752 tapdisk.unpause(_type, path)
2753 #
2754 # VBD.pause/shutdown
2755 #
2757 def _manage_vbd(self):
2758 vbd = self.get_vbd()
2759 # NB. Hook into VBD state transitions.
2761 events = vbd.get_queue_events()
2763 mask = 0
2764 mask |= events.QUEUE_PAUSE_DONE # pause/unpause
2765 mask |= events.QUEUE_SHUTDOWN_DONE # shutdown
2766 # TODO: mask |= events.QUEUE_SHUTDOWN_REQUEST, for shutdown=force
2767 # TODO: mask |= events.QUEUE_RUNNING, for ionice updates etc
2769 events.set_mask(mask)
2770 self.info("wrote %s = %#02x" % (events.path, mask))
2772 def _signal_xapi(self):
2773 vbd = self.get_vbd()
2775 pausing = vbd.pause_requested()
2776 closing = vbd.shutdown_requested()
2777 running = vbd.running()
2779 handled = 0
2781 if pausing and not running:
2782 if 'pause-done' not in vbd:
2783 vbd.write('pause-done', '')
2784 handled += 1
2786 if not pausing:
2787 if 'pause-done' in vbd:
2788 vbd.rm('pause-done')
2789 handled += 1
2791 if closing and not running:
2792 if 'shutdown-done' not in vbd:
2793 vbd.write('shutdown-done', '')
2794 handled += 1
2796 if handled > 1:
2797 self.warn("handled %d events, " % handled +
2798 "pausing=%s closing=%s running=%s" % \
2799 (pausing, closing, running))
2801if __name__ == '__main__': 2801 ↛ 2803line 2801 didn't jump to line 2803, because the condition on line 2801 was never true
2803 import sys
2804 prog = os.path.basename(sys.argv[0])
2806 #
2807 # Simple CLI interface for manual operation
2808 #
2809 # tap.* level calls go down to local Tapdisk()s (by physical path)
2810 # vdi.* level calls run the plugin calls across host boundaries.
2811 #
2813 def usage(stream):
2814 print("usage: %s tap.{list|major}" % prog, file=stream)
2815 print(" %s tap.{launch|find|get|pause|" % prog + \
2816 "unpause|shutdown|stats} {[<tt>:]<path>} | [minor=]<int> | .. }", file=stream)
2817 print(" %s vbd.uevent" % prog, file=stream)
2819 try:
2820 cmd = sys.argv[1]
2821 except IndexError:
2822 usage(sys.stderr)
2823 sys.exit(1)
2825 try:
2826 _class, method = cmd.split('.')
2827 except:
2828 usage(sys.stderr)
2829 sys.exit(1)
2831 #
2832 # Local Tapdisks
2833 #
2835 if cmd == 'tap.major':
2837 print("%d" % Tapdisk.major())
2839 elif cmd == 'tap.launch':
2841 tapdisk = Tapdisk.launch_from_arg(sys.argv[2])
2842 print("Launched %s" % tapdisk, file=sys.stderr)
2844 elif _class == 'tap':
2846 attrs = {}
2847 for item in sys.argv[2:]:
2848 try:
2849 key, val = item.split('=')
2850 attrs[key] = val
2851 continue
2852 except ValueError:
2853 pass
2855 try:
2856 attrs['minor'] = int(item)
2857 continue
2858 except ValueError:
2859 pass
2861 try:
2862 arg = Tapdisk.Arg.parse(item)
2863 attrs['_type'] = arg.type
2864 attrs['path'] = arg.path
2865 continue
2866 except Tapdisk.Arg.InvalidArgument:
2867 pass
2869 attrs['path'] = item
2871 if cmd == 'tap.list':
2873 for tapdisk in Tapdisk.list( ** attrs):
2874 blktap = tapdisk.get_blktap()
2875 print(tapdisk, end=' ')
2876 print("%s: task=%s pool=%s" % \
2877 (blktap,
2878 blktap.get_task_pid(),
2879 blktap.get_pool_name()))
2881 elif cmd == 'tap.vbds':
2882 # Find all Blkback instances for a given tapdisk
2884 for tapdisk in Tapdisk.list( ** attrs):
2885 print("%s:" % tapdisk, end=' ')
2886 for vbd in Blkback.find_by_tap(tapdisk):
2887 print(vbd, end=' ')
2888 print()
2890 else:
2892 if not attrs:
2893 usage(sys.stderr)
2894 sys.exit(1)
2896 try:
2897 tapdisk = Tapdisk.get( ** attrs)
2898 except TypeError:
2899 usage(sys.stderr)
2900 sys.exit(1)
2902 if cmd == 'tap.shutdown':
2903 # Shutdown a running tapdisk, or raise
2904 tapdisk.shutdown()
2905 print("Shut down %s" % tapdisk, file=sys.stderr)
2907 elif cmd == 'tap.pause':
2908 # Pause an unpaused tapdisk, or raise
2909 tapdisk.pause()
2910 print("Paused %s" % tapdisk, file=sys.stderr)
2912 elif cmd == 'tap.unpause':
2913 # Unpause a paused tapdisk, or raise
2914 tapdisk.unpause()
2915 print("Unpaused %s" % tapdisk, file=sys.stderr)
2917 elif cmd == 'tap.stats':
2918 # Gather tapdisk status
2919 stats = tapdisk.stats()
2920 print("%s:" % tapdisk)
2921 print(json.dumps(stats, indent=True))
2923 else:
2924 usage(sys.stderr)
2925 sys.exit(1)
2927 elif cmd == 'vbd.uevent':
2929 hnd = BlkbackEventHandler(cmd)
2931 if not sys.stdin.isatty():
2932 try:
2933 hnd.run()
2934 except Exception as e:
2935 hnd.error("Unhandled Exception: %s" % e)
2937 import traceback
2938 _type, value, tb = sys.exc_info()
2939 trace = traceback.format_exception(_type, value, tb)
2940 for entry in trace:
2941 for line in entry.rstrip().split('\n'):
2942 util.SMlog(line)
2943 else:
2944 hnd.run()
2946 elif cmd == 'vbd.list':
2948 for vbd in Blkback.find():
2949 print(vbd, \
2950 "physical-device=%s" % vbd.get_physical_device(), \
2951 "pause=%s" % vbd.pause_requested())
2953 else:
2954 usage(sys.stderr)
2955 sys.exit(1)