Coverage for drivers/blktap2.py : 39%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# blktap2: blktap/tapdisk management layer
19#
21import os
22import re
23import time
24import copy
25from lock import Lock
26import util
27import xmlrpc.client
28import http.client
29import errno
30import signal
31import subprocess
32import syslog as _syslog
33import glob
34import json
35import xs_errors
36import XenAPI
37import scsiutil
38from syslog import openlog, syslog
39from stat import * # S_ISBLK(), ...
40import nfs
42import resetvdis
43import vhdutil
44import lvhdutil
46import VDI as sm
48# For RRDD Plugin Registration
49from xmlrpc.client import ServerProxy, Transport
50from socket import socket, AF_UNIX, SOCK_STREAM
52PLUGIN_TAP_PAUSE = "tapdisk-pause"
54SOCKPATH = "/var/xapi/xcp-rrdd"
56NUM_PAGES_PER_RING = 32 * 11
57MAX_FULL_RINGS = 8
58POOL_NAME_KEY = "mem-pool"
59POOL_SIZE_KEY = "mem-pool-size-rings"
61ENABLE_MULTIPLE_ATTACH = "/etc/xensource/allow_multiple_vdi_attach"
62NO_MULTIPLE_ATTACH = not (os.path.exists(ENABLE_MULTIPLE_ATTACH))
65def locking(excType, override=True):
66 def locking2(op):
67 def wrapper(self, *args):
68 self.lock.acquire()
69 try:
70 try:
71 ret = op(self, * args)
72 except (util.CommandException, util.SMException, XenAPI.Failure) as e:
73 util.logException("BLKTAP2:%s" % op)
74 msg = str(e)
75 if isinstance(e, util.CommandException):
76 msg = "Command %s failed (%s): %s" % \
77 (e.cmd, e.code, e.reason)
78 if override:
79 raise xs_errors.XenError(excType, opterr=msg)
80 else:
81 raise
82 except:
83 util.logException("BLKTAP2:%s" % op)
84 raise
85 finally:
86 self.lock.release() 86 ↛ exitline 86 didn't except from function 'wrapper', because the raise on line 79 wasn't executed or the raise on line 81 wasn't executed or the raise on line 84 wasn't executed
87 return ret
88 return wrapper
89 return locking2
92class RetryLoop(object):
94 def __init__(self, backoff, limit):
95 self.backoff = backoff
96 self.limit = limit
98 def __call__(self, f):
100 def loop(*__t, **__d):
101 attempt = 0
103 while True:
104 attempt += 1
106 try:
107 return f( * __t, ** __d)
109 except self.TransientFailure as e:
110 e = e.exception
112 if attempt >= self.limit: 112 ↛ 113line 112 didn't jump to line 113, because the condition on line 112 was never true
113 raise e
115 time.sleep(self.backoff)
117 return loop
119 class TransientFailure(Exception):
120 def __init__(self, exception):
121 self.exception = exception
124def retried(**args):
125 return RetryLoop( ** args)
128class TapCtl(object):
129 """Tapdisk IPC utility calls."""
131 PATH = "/usr/sbin/tap-ctl"
133 def __init__(self, cmd, p):
134 self.cmd = cmd
135 self._p = p
136 self.stdout = p.stdout
138 class CommandFailure(Exception):
139 """TapCtl cmd failure."""
141 def __init__(self, cmd, **info):
142 self.cmd = cmd
143 self.info = info
145 def __str__(self):
146 items = self.info.items()
147 info = ", ".join("%s=%s" % item
148 for item in items)
149 return "%s failed: %s" % (self.cmd, info)
151 # Trying to get a non-existent attribute throws an AttributeError
152 # exception
153 def __getattr__(self, key):
154 if key in self.info: 154 ↛ 156line 154 didn't jump to line 156, because the condition on line 154 was never false
155 return self.info[key]
156 return object.__getattribute__(self, key)
158 @property
159 def has_status(self):
160 return 'status' in self.info
162 @property
163 def has_signal(self):
164 return 'signal' in self.info
166 # Retrieves the error code returned by the command. If the error code
167 # was not supplied at object-construction time, zero is returned.
168 def get_error_code(self):
169 key = 'status'
170 if key in self.info:
171 return self.info[key]
172 else:
173 return 0
175 @classmethod
176 def __mkcmd_real(cls, args):
177 return [cls.PATH] + [str(x) for x in args]
179 __next_mkcmd = __mkcmd_real
181 @classmethod
182 def _mkcmd(cls, args):
184 __next_mkcmd = cls.__next_mkcmd
185 cls.__next_mkcmd = cls.__mkcmd_real
187 return __next_mkcmd(args)
189 @classmethod
190 def _call(cls, args, quiet=False, input=None):
191 """
192 Spawn a tap-ctl process. Return a TapCtl invocation.
193 Raises a TapCtl.CommandFailure if subprocess creation failed.
194 """
195 cmd = cls._mkcmd(args)
197 if not quiet:
198 util.SMlog(cmd)
199 try:
200 p = subprocess.Popen(cmd,
201 stdin=subprocess.PIPE,
202 stdout=subprocess.PIPE,
203 stderr=subprocess.PIPE,
204 close_fds=True)
205 if input:
206 p.stdin.write(input)
207 p.stdin.close()
208 except OSError as e:
209 raise cls.CommandFailure(cmd, errno=e.errno)
211 return cls(cmd, p)
213 def _errmsg(self):
214 output = map(str.rstrip, self._p.stderr)
215 return "; ".join(output)
217 def _wait(self, quiet=False):
218 """
219 Reap the child tap-ctl process of this invocation.
220 Raises a TapCtl.CommandFailure on non-zero exit status.
221 """
222 status = self._p.wait()
223 if not quiet:
224 util.SMlog(" = %d" % status)
226 if status == 0:
227 return
229 info = {'errmsg': self._errmsg(),
230 'pid': self._p.pid}
232 if status < 0:
233 info['signal'] = -status
234 else:
235 info['status'] = status
237 raise self.CommandFailure(self.cmd, ** info)
239 @classmethod
240 def _pread(cls, args, quiet=False, input=None):
241 """
242 Spawn a tap-ctl invocation and read a single line.
243 """
244 tapctl = cls._call(args=args, quiet=quiet, input=input)
246 output = tapctl.stdout.readline().rstrip()
248 tapctl._wait(quiet)
249 return output.decode()
251 @staticmethod
252 def _maybe(opt, parm):
253 if parm is not None:
254 return [opt, parm]
255 return []
257 @classmethod
258 def __list(cls, minor=None, pid=None, _type=None, path=None):
259 args = ["list"]
260 args += cls._maybe("-m", minor)
261 args += cls._maybe("-p", pid)
262 args += cls._maybe("-t", _type)
263 args += cls._maybe("-f", path)
265 tapctl = cls._call(args, True)
267 for stdout_line in tapctl.stdout:
268 decoded_line = stdout_line.decode()
269 # FIXME: tap-ctl writes error messages to stdout and
270 # confuses this parser
271 if decoded_line == "blktap kernel module not installed\n": 271 ↛ 274line 271 didn't jump to line 274, because the condition on line 271 was never true
272 # This isn't pretty but (a) neither is confusing stdout/stderr
273 # and at least causes the error to describe the fix
274 raise Exception("blktap kernel module not installed: try 'modprobe blktap'")
275 row = {}
277 for field in decoded_line.rstrip().split(' ', 3):
278 bits = field.split('=')
279 if len(bits) == 2: 279 ↛ 291line 279 didn't jump to line 291, because the condition on line 279 was never false
280 key, val = field.split('=')
282 if key in ('pid', 'minor'):
283 row[key] = int(val, 10)
285 elif key in ('state'):
286 row[key] = int(val, 0x10)
288 else:
289 row[key] = val
290 else:
291 util.SMlog("Ignoring unexpected tap-ctl output: %s" % repr(field))
292 yield row
294 tapctl._wait(True)
296 @classmethod
297 @retried(backoff=.5, limit=10)
298 def list(cls, **args):
300 # FIXME. We typically get an EPROTO when uevents interleave
301 # with SM ops and a tapdisk shuts down under our feet. Should
302 # be fixed in SM.
304 try:
305 return list(cls.__list( ** args))
307 except cls.CommandFailure as e:
308 transient = [errno.EPROTO, errno.ENOENT]
309 if e.has_status and e.status in transient:
310 raise RetryLoop.TransientFailure(e)
311 raise
313 @classmethod
314 def allocate(cls, devpath=None):
315 args = ["allocate"]
316 args += cls._maybe("-d", devpath)
317 return cls._pread(args)
319 @classmethod
320 def free(cls, minor):
321 args = ["free", "-m", minor]
322 cls._pread(args)
324 @classmethod
325 @retried(backoff=.5, limit=10)
326 def spawn(cls):
327 args = ["spawn"]
328 try:
329 pid = cls._pread(args)
330 return int(pid)
331 except cls.CommandFailure as ce:
332 # intermittent failures to spawn. CA-292268
333 if ce.status == 1:
334 raise RetryLoop.TransientFailure(ce)
335 raise
337 @classmethod
338 def attach(cls, pid, minor):
339 args = ["attach", "-p", pid, "-m", minor]
340 cls._pread(args)
342 @classmethod
343 def detach(cls, pid, minor):
344 args = ["detach", "-p", pid, "-m", minor]
345 cls._pread(args)
347 @classmethod
348 def _load_key(cls, key_hash, vdi_uuid):
349 import plugins
351 return plugins.load_key(key_hash, vdi_uuid)
353 @classmethod
354 def open(cls, pid, minor, _type, _file, options):
355 params = Tapdisk.Arg(_type, _file)
356 args = ["open", "-p", pid, "-m", minor, '-a', str(params)]
357 input = None
358 if options.get("rdonly"):
359 args.append('-R')
360 if options.get("lcache"):
361 args.append("-r")
362 if options.get("existing_prt") is not None:
363 args.append("-e")
364 args.append(str(options["existing_prt"]))
365 if options.get("secondary"):
366 args.append("-2")
367 args.append(options["secondary"])
368 if options.get("standby"):
369 args.append("-s")
370 if options.get("timeout"):
371 args.append("-t")
372 args.append(str(options["timeout"]))
373 if not options.get("o_direct", True):
374 args.append("-D")
375 if options.get('cbtlog'):
376 args.extend(['-C', options['cbtlog']])
377 if options.get('key_hash'):
378 key_hash = options['key_hash']
379 vdi_uuid = options['vdi_uuid']
380 key = cls._load_key(key_hash, vdi_uuid)
382 if not key:
383 raise util.SMException("No key found with key hash {}".format(key_hash))
384 input = key
385 args.append('-E')
386 cls._pread(args=args, input=input)
388 @classmethod
389 def close(cls, pid, minor, force=False):
390 args = ["close", "-p", pid, "-m", minor, "-t", "120"]
391 if force:
392 args += ["-f"]
393 cls._pread(args)
395 @classmethod
396 def pause(cls, pid, minor):
397 args = ["pause", "-p", pid, "-m", minor]
398 cls._pread(args)
400 @classmethod
401 def unpause(cls, pid, minor, _type=None, _file=None, mirror=None,
402 cbtlog=None):
403 args = ["unpause", "-p", pid, "-m", minor]
404 if mirror:
405 args.extend(["-2", mirror])
406 if _type and _file:
407 params = Tapdisk.Arg(_type, _file)
408 args += ["-a", str(params)]
409 if cbtlog:
410 args.extend(["-c", cbtlog])
411 cls._pread(args)
413 @classmethod
414 def shutdown(cls, pid):
415 # TODO: This should be a real tap-ctl command
416 os.kill(pid, signal.SIGTERM)
417 os.waitpid(pid, 0)
419 @classmethod
420 def stats(cls, pid, minor):
421 args = ["stats", "-p", pid, "-m", minor]
422 return cls._pread(args, quiet=True)
424 @classmethod
425 def major(cls):
426 args = ["major"]
427 major = cls._pread(args)
428 return int(major)
431class TapdiskExists(Exception):
432 """Tapdisk already running."""
434 def __init__(self, tapdisk):
435 self.tapdisk = tapdisk
437 def __str__(self):
438 return "%s already running" % self.tapdisk
441class TapdiskNotRunning(Exception):
442 """No such Tapdisk."""
444 def __init__(self, **attrs):
445 self.attrs = attrs
447 def __str__(self):
448 items = iter(self.attrs.items())
449 attrs = ", ".join("%s=%s" % attr
450 for attr in items)
451 return "No such Tapdisk(%s)" % attrs
454class TapdiskNotUnique(Exception):
455 """More than one tapdisk on one path."""
457 def __init__(self, tapdisks):
458 self.tapdisks = tapdisks
460 def __str__(self):
461 tapdisks = map(str, self.tapdisks)
462 return "Found multiple tapdisks: %s" % tapdisks
465class TapdiskFailed(Exception):
466 """Tapdisk launch failure."""
468 def __init__(self, arg, err):
469 self.arg = arg
470 self.err = err
472 def __str__(self):
473 return "Tapdisk(%s): %s" % (self.arg, self.err)
475 def get_error(self):
476 return self.err
479class TapdiskInvalidState(Exception):
480 """Tapdisk pause/unpause failure"""
482 def __init__(self, tapdisk):
483 self.tapdisk = tapdisk
485 def __str__(self):
486 return str(self.tapdisk)
489def mkdirs(path, mode=0o777):
490 if not os.path.exists(path):
491 parent, subdir = os.path.split(path)
492 assert parent != path
493 try:
494 if parent:
495 mkdirs(parent, mode)
496 if subdir:
497 os.mkdir(path, mode)
498 except OSError as e:
499 if e.errno != errno.EEXIST:
500 raise
503class KObject(object):
505 SYSFS_CLASSTYPE = None
507 def sysfs_devname(self):
508 raise NotImplementedError("sysfs_devname is undefined")
511class Attribute(object):
513 SYSFS_NODENAME = None
515 def __init__(self, path):
516 self.path = path
518 @classmethod
519 def from_kobject(cls, kobj):
520 path = "%s/%s" % (kobj.sysfs_path(), cls.SYSFS_NODENAME)
521 return cls(path)
523 class NoSuchAttribute(Exception):
524 def __init__(self, name):
525 self.name = name
527 def __str__(self):
528 return "No such attribute: %s" % self.name
530 def _open(self, mode='r'):
531 try:
532 return open(self.path, mode)
533 except IOError as e:
534 if e.errno == errno.ENOENT:
535 raise self.NoSuchAttribute(self)
536 raise
538 def readline(self):
539 f = self._open('r')
540 s = f.readline().rstrip()
541 f.close()
542 return s
544 def writeline(self, val):
545 f = self._open('w')
546 f.write(val)
547 f.close()
550class ClassDevice(KObject):
552 @classmethod
553 def sysfs_class_path(cls):
554 return "/sys/class/%s" % cls.SYSFS_CLASSTYPE
556 def sysfs_path(self):
557 return "%s/%s" % (self.sysfs_class_path(),
558 self.sysfs_devname())
561class Blktap(ClassDevice):
563 DEV_BASEDIR = '/dev/xen/blktap-2'
565 SYSFS_CLASSTYPE = "blktap2"
567 def __init__(self, minor):
568 self.minor = minor
569 self._pool = None
570 self._task = None
572 @classmethod
573 def allocate(cls):
574 # FIXME. Should rather go into init.
575 mkdirs(cls.DEV_BASEDIR)
577 devname = TapCtl.allocate()
578 minor = Tapdisk._parse_minor(devname)
579 return cls(minor)
581 def free(self):
582 TapCtl.free(self.minor)
584 def __str__(self):
585 return "%s(minor=%d)" % (self.__class__.__name__, self.minor)
587 def sysfs_devname(self):
588 return "blktap!blktap%d" % self.minor
590 class Pool(Attribute):
591 SYSFS_NODENAME = "pool"
593 def get_pool_attr(self):
594 if not self._pool:
595 self._pool = self.Pool.from_kobject(self)
596 return self._pool
598 def get_pool_name(self):
599 return self.get_pool_attr().readline()
601 def set_pool_name(self, name):
602 self.get_pool_attr().writeline(name)
604 def set_pool_size(self, pages):
605 self.get_pool().set_size(pages)
607 def get_pool(self):
608 return BlktapControl.get_pool(self.get_pool_name())
610 def set_pool(self, pool):
611 self.set_pool_name(pool.name)
613 class Task(Attribute):
614 SYSFS_NODENAME = "task"
616 def get_task_attr(self):
617 if not self._task:
618 self._task = self.Task.from_kobject(self)
619 return self._task
621 def get_task_pid(self):
622 pid = self.get_task_attr().readline()
623 try:
624 return int(pid)
625 except ValueError:
626 return None
628 def find_tapdisk(self):
629 pid = self.get_task_pid()
630 if pid is None:
631 return None
633 return Tapdisk.find(pid=pid, minor=self.minor)
635 def get_tapdisk(self):
636 tapdisk = self.find_tapdisk()
637 if not tapdisk:
638 raise TapdiskNotRunning(minor=self.minor)
639 return tapdisk
642class Tapdisk(object):
644 TYPES = ['aio', 'vhd']
646 def __init__(self, pid, minor, _type, path, state):
647 self.pid = pid
648 self.minor = minor
649 self.type = _type
650 self.path = path
651 self.state = state
652 self._dirty = False
653 self._blktap = None
655 def __str__(self):
656 state = self.pause_state()
657 return "Tapdisk(%s, pid=%d, minor=%s, state=%s)" % \
658 (self.get_arg(), self.pid, self.minor, state)
660 @classmethod
661 def list(cls, **args):
663 for row in TapCtl.list( ** args):
665 args = {'pid': None,
666 'minor': None,
667 'state': None,
668 '_type': None,
669 'path': None}
671 for key, val in row.items():
672 if key in args:
673 args[key] = val
675 if 'args' in row: 675 ↛ 680line 675 didn't jump to line 680, because the condition on line 675 was never false
676 image = Tapdisk.Arg.parse(row['args'])
677 args['_type'] = image.type
678 args['path'] = image.path
680 if None in args.values(): 680 ↛ 681line 680 didn't jump to line 681, because the condition on line 680 was never true
681 continue
683 yield Tapdisk( ** args)
685 @classmethod
686 def find(cls, **args):
688 found = list(cls.list( ** args))
690 if len(found) > 1:
691 raise TapdiskNotUnique(found)
693 if found:
694 return found[0]
696 return None
698 @classmethod
699 def find_by_path(cls, path):
700 return cls.find(path=path)
702 @classmethod
703 def find_by_minor(cls, minor):
704 return cls.find(minor=minor)
706 @classmethod
707 def get(cls, **attrs):
709 tapdisk = cls.find( ** attrs)
711 if not tapdisk:
712 raise TapdiskNotRunning( ** attrs)
714 return tapdisk
716 @classmethod
717 def from_path(cls, path):
718 return cls.get(path=path)
720 @classmethod
721 def from_minor(cls, minor):
722 return cls.get(minor=minor)
724 @classmethod
725 def __from_blktap(cls, blktap):
726 tapdisk = cls.from_minor(minor=blktap.minor)
727 tapdisk._blktap = blktap
728 return tapdisk
730 def get_blktap(self):
731 if not self._blktap:
732 self._blktap = Blktap(self.minor)
733 return self._blktap
735 class Arg:
737 def __init__(self, _type, path):
738 self.type = _type
739 self.path = path
741 def __str__(self):
742 return "%s:%s" % (self.type, self.path)
744 @classmethod
745 def parse(cls, arg):
747 try:
748 _type, path = arg.split(":", 1)
749 except ValueError:
750 raise cls.InvalidArgument(arg)
752 if _type not in Tapdisk.TYPES: 752 ↛ 753line 752 didn't jump to line 753, because the condition on line 752 was never true
753 raise cls.InvalidType(_type)
755 return cls(_type, path)
757 class InvalidType(Exception):
758 def __init__(self, _type):
759 self.type = _type
761 def __str__(self):
762 return "Not a Tapdisk type: %s" % self.type
764 class InvalidArgument(Exception):
765 def __init__(self, arg):
766 self.arg = arg
768 def __str__(self):
769 return "Not a Tapdisk image: %s" % self.arg
771 def get_arg(self):
772 return self.Arg(self.type, self.path)
774 def get_devpath(self):
775 return "%s/tapdev%d" % (Blktap.DEV_BASEDIR, self.minor)
777 @classmethod
778 def launch_from_arg(cls, arg):
779 arg = cls.Arg.parse(arg)
780 return cls.launch(arg.path, arg.type, False)
782 @classmethod
783 def cgclassify(cls, pid):
785 # We dont provide any <controllers>:<path>
786 # so cgclassify uses /etc/cgrules.conf which
787 # we have configured in the spec file.
788 cmd = ["cgclassify", str(pid)]
789 try:
790 util.pread2(cmd)
791 except util.CommandException as e:
792 util.logException(e)
794 @classmethod
795 def spawn(cls):
796 return TapCtl.spawn()
798 @classmethod
799 def launch_on_tap(cls, blktap, path, _type, options):
801 tapdisk = cls.find_by_path(path)
802 if tapdisk: 802 ↛ 803line 802 didn't jump to line 803, because the condition on line 802 was never true
803 raise TapdiskExists(tapdisk)
805 minor = blktap.minor
807 try:
808 pid = cls.spawn()
809 cls.cgclassify(pid)
810 try:
811 TapCtl.attach(pid, minor)
813 try:
814 TapCtl.open(pid, minor, _type, path, options)
815 try:
816 tapdisk = cls.__from_blktap(blktap)
817 node = '/sys/dev/block/%d:%d' % (tapdisk.major(), tapdisk.minor)
818 util.set_scheduler_sysfs_node(node, 'noop')
819 return tapdisk
820 except:
821 TapCtl.close(pid, minor)
822 raise
824 except:
825 TapCtl.detach(pid, minor)
826 raise
828 except:
829 TapCtl.shutdown(pid)
830 raise
832 except TapCtl.CommandFailure as ctl:
833 util.logException(ctl)
834 if ('/dev/xapi/cd/' in path and
835 'status' in ctl.info and
836 ctl.info['status'] == 123): # ENOMEDIUM (No medium found)
837 raise xs_errors.XenError('TapdiskDriveEmpty')
838 else:
839 raise TapdiskFailed(cls.Arg(_type, path), ctl)
841 @classmethod
842 def launch(cls, path, _type, rdonly):
843 blktap = Blktap.allocate()
844 try:
845 return cls.launch_on_tap(blktap, path, _type, {"rdonly": rdonly})
846 except:
847 blktap.free()
848 raise
850 def shutdown(self, force=False):
852 TapCtl.close(self.pid, self.minor, force)
854 TapCtl.detach(self.pid, self.minor)
856 self.get_blktap().free()
858 def pause(self):
860 if not self.is_running():
861 raise TapdiskInvalidState(self)
863 TapCtl.pause(self.pid, self.minor)
865 self._set_dirty()
867 def unpause(self, _type=None, path=None, mirror=None, cbtlog=None):
869 if not self.is_paused():
870 raise TapdiskInvalidState(self)
872 # FIXME: should the arguments be optional?
873 if _type is None:
874 _type = self.type
875 if path is None:
876 path = self.path
878 TapCtl.unpause(self.pid, self.minor, _type, path, mirror=mirror,
879 cbtlog=cbtlog)
881 self._set_dirty()
883 def stats(self):
884 return json.loads(TapCtl.stats(self.pid, self.minor))
885 #
886 # NB. dirty/refresh: reload attributes on next access
887 #
889 def _set_dirty(self):
890 self._dirty = True
892 def _refresh(self, __get):
893 t = self.from_minor(__get('minor'))
894 self.__init__(t.pid, t.minor, t.type, t.path, t.state)
896 def __getattribute__(self, name):
897 def __get(name):
898 # NB. avoid(rec(ursion)
899 return object.__getattribute__(self, name)
901 if __get('_dirty') and \ 901 ↛ 903line 901 didn't jump to line 903, because the condition on line 901 was never true
902 name in ['minor', 'type', 'path', 'state']:
903 self._refresh(__get)
904 self._dirty = False
906 return __get(name)
908 class PauseState:
909 RUNNING = 'R'
910 PAUSING = 'r'
911 PAUSED = 'P'
913 class Flags:
914 DEAD = 0x0001
915 CLOSED = 0x0002
916 QUIESCE_REQUESTED = 0x0004
917 QUIESCED = 0x0008
918 PAUSE_REQUESTED = 0x0010
919 PAUSED = 0x0020
920 SHUTDOWN_REQUESTED = 0x0040
921 LOCKING = 0x0080
922 RETRY_NEEDED = 0x0100
923 LOG_DROPPED = 0x0200
925 PAUSE_MASK = PAUSE_REQUESTED | PAUSED
927 def is_paused(self):
928 return not not (self.state & self.Flags.PAUSED)
930 def is_running(self):
931 return not (self.state & self.Flags.PAUSE_MASK)
933 def pause_state(self):
934 if self.state & self.Flags.PAUSED: 934 ↛ 935line 934 didn't jump to line 935, because the condition on line 934 was never true
935 return self.PauseState.PAUSED
937 if self.state & self.Flags.PAUSE_REQUESTED: 937 ↛ 938line 937 didn't jump to line 938, because the condition on line 937 was never true
938 return self.PauseState.PAUSING
940 return self.PauseState.RUNNING
942 @staticmethod
943 def _parse_minor(devpath):
944 regex = '%s/(blktap|tapdev)(\d+)$' % Blktap.DEV_BASEDIR
945 pattern = re.compile(regex)
946 groups = pattern.search(devpath)
947 if not groups:
948 raise Exception("malformed tap device: '%s' (%s) " % (devpath, regex))
950 minor = groups.group(2)
951 return int(minor)
953 _major = None
955 @classmethod
956 def major(cls):
957 if cls._major:
958 return cls._major
960 devices = open("/proc/devices")
961 for line in devices:
963 row = line.rstrip().split(' ')
964 if len(row) != 2:
965 continue
967 major, name = row
968 if name != 'tapdev':
969 continue
971 cls._major = int(major)
972 break
974 devices.close()
975 return cls._major
978class VDI(object):
979 """SR.vdi driver decorator for blktap2"""
981 CONF_KEY_ALLOW_CACHING = "vdi_allow_caching"
982 CONF_KEY_MODE_ON_BOOT = "vdi_on_boot"
983 CONF_KEY_CACHE_SR = "local_cache_sr"
984 CONF_KEY_O_DIRECT = "o_direct"
985 LOCK_CACHE_SETUP = "cachesetup"
987 ATTACH_DETACH_RETRY_SECS = 120
989 # number of seconds on top of NFS timeo mount option the tapdisk should
990 # wait before reporting errors. This is to allow a retry to succeed in case
991 # packets were lost the first time around, which prevented the NFS client
992 # from returning before the timeo is reached even if the NFS server did
993 # come back earlier
994 TAPDISK_TIMEOUT_MARGIN = 30
996 def __init__(self, uuid, target, driver_info):
997 self.target = self.TargetDriver(target, driver_info)
998 self._vdi_uuid = uuid
999 self._session = target.session
1000 self.xenstore_data = scsiutil.update_XS_SCSIdata(uuid, scsiutil.gen_synthetic_page_data(uuid))
1001 self.__o_direct = None
1002 self.__o_direct_reason = None
1003 self.lock = Lock("vdi", uuid)
1004 self.tap = None
1006 def get_o_direct_capability(self, options):
1007 """Returns True/False based on licensing and caching_params"""
1008 if self.__o_direct is not None: 1008 ↛ 1009line 1008 didn't jump to line 1009, because the condition on line 1008 was never true
1009 return self.__o_direct, self.__o_direct_reason
1011 if util.read_caching_is_restricted(self._session): 1011 ↛ 1012line 1011 didn't jump to line 1012, because the condition on line 1011 was never true
1012 self.__o_direct = True
1013 self.__o_direct_reason = "LICENSE_RESTRICTION"
1014 elif not ((self.target.vdi.sr.handles("nfs") or self.target.vdi.sr.handles("ext") or self.target.vdi.sr.handles("smb"))): 1014 ↛ 1017line 1014 didn't jump to line 1017, because the condition on line 1014 was never false
1015 self.__o_direct = True
1016 self.__o_direct_reason = "SR_NOT_SUPPORTED"
1017 elif not (options.get("rdonly") or self.target.vdi.parent):
1018 util.SMlog(self.target.vdi)
1019 self.__o_direct = True
1020 self.__o_direct_reason = "NO_RO_IMAGE"
1021 elif options.get("rdonly") and not self.target.vdi.parent:
1022 self.__o_direct = True
1023 self.__o_direct_reason = "RO_WITH_NO_PARENT"
1024 elif options.get(self.CONF_KEY_O_DIRECT):
1025 self.__o_direct = True
1026 self.__o_direct_reason = "SR_OVERRIDE"
1028 if self.__o_direct is None: 1028 ↛ 1029line 1028 didn't jump to line 1029, because the condition on line 1028 was never true
1029 self.__o_direct = False
1030 self.__o_direct_reason = ""
1032 return self.__o_direct, self.__o_direct_reason
1034 @classmethod
1035 def from_cli(cls, uuid):
1036 import VDI as sm
1037 import XenAPI
1039 session = XenAPI.xapi_local()
1040 session.xenapi.login_with_password('root', '', '', 'SM')
1042 target = sm.VDI.from_uuid(session, uuid)
1043 driver_info = target.sr.srcmd.driver_info
1045 session.xenapi.session.logout()
1047 return cls(uuid, target, driver_info)
1049 @staticmethod
1050 def _tap_type(vdi_type):
1051 """Map a VDI type (e.g. 'raw') to a tapdisk driver type (e.g. 'aio')"""
1052 return {
1053 'raw': 'aio',
1054 'vhd': 'vhd',
1055 'iso': 'aio', # for ISO SR
1056 'aio': 'aio', # for LVHD
1057 'file': 'aio',
1058 'phy': 'aio'
1059 }[vdi_type]
1061 def get_tap_type(self):
1062 vdi_type = self.target.get_vdi_type()
1063 return VDI._tap_type(vdi_type)
1065 def get_phy_path(self):
1066 return self.target.get_vdi_path()
1068 class UnexpectedVDIType(Exception):
1070 def __init__(self, vdi_type, target):
1071 self.vdi_type = vdi_type
1072 self.target = target
1074 def __str__(self):
1075 return \
1076 "Target %s has unexpected VDI type '%s'" % \
1077 (type(self.target), self.vdi_type)
1079 VDI_PLUG_TYPE = {'phy': 'phy', # for NETAPP
1080 'raw': 'phy',
1081 'aio': 'tap', # for LVHD raw nodes
1082 'iso': 'tap', # for ISOSR
1083 'file': 'tap',
1084 'vhd': 'tap'}
1086 def tap_wanted(self):
1087 # 1. Let the target vdi_type decide
1089 vdi_type = self.target.get_vdi_type()
1091 try:
1092 plug_type = self.VDI_PLUG_TYPE[vdi_type]
1093 except KeyError:
1094 raise self.UnexpectedVDIType(vdi_type,
1095 self.target.vdi)
1097 if plug_type == 'tap': 1097 ↛ 1098line 1097 didn't jump to line 1098, because the condition on line 1097 was never true
1098 return True
1099 elif self.target.vdi.sr.handles('udev'): 1099 ↛ 1105line 1099 didn't jump to line 1105, because the condition on line 1099 was never false
1100 return True
1101 # 2. Otherwise, there may be more reasons
1102 #
1103 # .. TBD
1105 return False
1107 class TargetDriver:
1108 """Safe target driver access."""
1109 # NB. *Must* test caps for optional calls. Some targets
1110 # actually implement some slots, but do not enable them. Just
1111 # try/except would risk breaking compatibility.
1113 def __init__(self, vdi, driver_info):
1114 self.vdi = vdi
1115 self._caps = driver_info['capabilities']
1117 def has_cap(self, cap):
1118 """Determine if target has given capability"""
1119 return cap in self._caps
1121 def attach(self, sr_uuid, vdi_uuid):
1122 #assert self.has_cap("VDI_ATTACH")
1123 return self.vdi.attach(sr_uuid, vdi_uuid)
1125 def detach(self, sr_uuid, vdi_uuid):
1126 #assert self.has_cap("VDI_DETACH")
1127 self.vdi.detach(sr_uuid, vdi_uuid)
1129 def activate(self, sr_uuid, vdi_uuid):
1130 if self.has_cap("VDI_ACTIVATE"):
1131 return self.vdi.activate(sr_uuid, vdi_uuid)
1133 def deactivate(self, sr_uuid, vdi_uuid):
1134 if self.has_cap("VDI_DEACTIVATE"):
1135 self.vdi.deactivate(sr_uuid, vdi_uuid)
1136 #def resize(self, sr_uuid, vdi_uuid, size):
1137 # return self.vdi.resize(sr_uuid, vdi_uuid, size)
1139 def get_vdi_type(self):
1140 _type = self.vdi.vdi_type
1141 if not _type:
1142 _type = self.vdi.sr.sr_vditype
1143 if not _type:
1144 raise VDI.UnexpectedVDIType(_type, self.vdi)
1145 return _type
1147 def get_vdi_path(self):
1148 return self.vdi.path
1150 class Link(object):
1151 """Relink a node under a common name"""
1152 # NB. We have to provide the device node path during
1153 # VDI.attach, but currently do not allocate the tapdisk minor
1154 # before VDI.activate. Therefore those link steps where we
1155 # relink existing devices under deterministic path names.
1157 BASEDIR = None
1159 def _mklink(self, target):
1160 raise NotImplementedError("_mklink is not defined")
1162 def _equals(self, target):
1163 raise NotImplementedError("_equals is not defined")
1165 def __init__(self, path):
1166 self._path = path
1168 @classmethod
1169 def from_name(cls, name):
1170 path = "%s/%s" % (cls.BASEDIR, name)
1171 return cls(path)
1173 @classmethod
1174 def from_uuid(cls, sr_uuid, vdi_uuid):
1175 name = "%s/%s" % (sr_uuid, vdi_uuid)
1176 return cls.from_name(name)
1178 def path(self):
1179 return self._path
1181 def stat(self):
1182 return os.stat(self.path())
1184 def mklink(self, target):
1186 path = self.path()
1187 util.SMlog("%s -> %s" % (self, target))
1189 mkdirs(os.path.dirname(path))
1190 try:
1191 self._mklink(target)
1192 except OSError as e:
1193 # We do unlink during teardown, but have to stay
1194 # idempotent. However, a *wrong* target should never
1195 # be seen.
1196 if e.errno != errno.EEXIST:
1197 raise
1198 assert self._equals(target), "'%s' not equal to '%s'" % (path, target)
1200 def unlink(self):
1201 try:
1202 os.unlink(self.path())
1203 except OSError as e:
1204 if e.errno != errno.ENOENT:
1205 raise
1207 def __str__(self):
1208 path = self.path()
1209 return "%s(%s)" % (self.__class__.__name__, path)
1211 class SymLink(Link):
1212 """Symlink some file to a common name"""
1214 def readlink(self):
1215 return os.readlink(self.path())
1217 def symlink(self):
1218 return self.path()
1220 def _mklink(self, target):
1221 os.symlink(target, self.path())
1223 def _equals(self, target):
1224 return self.readlink() == target
1226 class DeviceNode(Link):
1227 """Relink a block device node to a common name"""
1229 @classmethod
1230 def _real_stat(cls, target):
1231 """stat() not on @target, but its realpath()"""
1232 _target = os.path.realpath(target)
1233 return os.stat(_target)
1235 @classmethod
1236 def is_block(cls, target):
1237 """Whether @target refers to a block device."""
1238 return S_ISBLK(cls._real_stat(target).st_mode)
1240 def _mklink(self, target):
1242 st = self._real_stat(target)
1243 if not S_ISBLK(st.st_mode):
1244 raise self.NotABlockDevice(target, st)
1246 os.mknod(self.path(), st.st_mode, st.st_rdev)
1248 def _equals(self, target):
1249 target_rdev = self._real_stat(target).st_rdev
1250 return self.stat().st_rdev == target_rdev
1252 def rdev(self):
1253 st = self.stat()
1254 assert S_ISBLK(st.st_mode)
1255 return os.major(st.st_rdev), os.minor(st.st_rdev)
1257 class NotABlockDevice(Exception):
1259 def __init__(self, path, st):
1260 self.path = path
1261 self.st = st
1263 def __str__(self):
1264 return "%s is not a block device: %s" % (self.path, self.st)
1266 class Hybrid(Link):
1268 def __init__(self, path):
1269 VDI.Link.__init__(self, path)
1270 self._devnode = VDI.DeviceNode(path)
1271 self._symlink = VDI.SymLink(path)
1273 def rdev(self):
1274 st = self.stat()
1275 if S_ISBLK(st.st_mode):
1276 return self._devnode.rdev()
1277 raise self._devnode.NotABlockDevice(self.path(), st)
1279 def mklink(self, target):
1280 if self._devnode.is_block(target):
1281 self._obj = self._devnode
1282 else:
1283 self._obj = self._symlink
1284 self._obj.mklink(target)
1286 def _equals(self, target):
1287 return self._obj._equals(target)
1289 class PhyLink(SymLink):
1290 BASEDIR = "/dev/sm/phy"
1291 # NB. Cannot use DeviceNodes, e.g. FileVDIs aren't bdevs.
1293 class NBDLink(SymLink):
1295 BASEDIR = "/run/blktap-control/nbd"
1297 class BackendLink(Hybrid):
1298 BASEDIR = "/dev/sm/backend"
1299 # NB. Could be SymLinks as well, but saving major,minor pairs in
1300 # Links enables neat state capturing when managing Tapdisks. Note
1301 # that we essentially have a tap-ctl list replacement here. For
1302 # now make it a 'Hybrid'. Likely to collapse into a DeviceNode as
1303 # soon as ISOs are tapdisks.
1305 @staticmethod
1306 def _tap_activate(phy_path, vdi_type, sr_uuid, options, pool_size=None):
1308 tapdisk = Tapdisk.find_by_path(phy_path)
1309 if not tapdisk: 1309 ↛ 1310line 1309 didn't jump to line 1310, because the condition on line 1309 was never true
1310 blktap = Blktap.allocate()
1311 blktap.set_pool_name(sr_uuid)
1312 if pool_size:
1313 blktap.set_pool_size(pool_size)
1315 try:
1316 tapdisk = \
1317 Tapdisk.launch_on_tap(blktap,
1318 phy_path,
1319 VDI._tap_type(vdi_type),
1320 options)
1321 except:
1322 blktap.free()
1323 raise
1324 util.SMlog("tap.activate: Launched %s" % tapdisk)
1326 else:
1327 util.SMlog("tap.activate: Found %s" % tapdisk)
1329 return tapdisk.get_devpath(), tapdisk
1331 @staticmethod
1332 def _tap_deactivate(minor):
1334 try:
1335 tapdisk = Tapdisk.from_minor(minor)
1336 except TapdiskNotRunning as e:
1337 util.SMlog("tap.deactivate: Warning, %s" % e)
1338 # NB. Should not be here unless the agent refcount
1339 # broke. Also, a clean shutdown should not have leaked
1340 # the recorded minor.
1341 else:
1342 tapdisk.shutdown()
1343 util.SMlog("tap.deactivate: Shut down %s" % tapdisk)
1345 @classmethod
1346 def tap_pause(cls, session, sr_uuid, vdi_uuid, failfast=False):
1347 """
1348 Pauses the tapdisk.
1350 session: a XAPI session
1351 sr_uuid: the UUID of the SR on which VDI lives
1352 vdi_uuid: the UUID of the VDI to pause
1353 failfast: controls whether the VDI lock should be acquired in a
1354 non-blocking manner
1355 """
1356 util.SMlog("Pause request for %s" % vdi_uuid)
1357 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1358 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'paused', 'true')
1359 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1360 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1360 ↛ 1361line 1360 didn't jump to line 1361, because the loop on line 1360 never started
1361 host_ref = key[len('host_'):]
1362 util.SMlog("Calling tap-pause on host %s" % host_ref)
1363 if not cls.call_pluginhandler(session, host_ref,
1364 sr_uuid, vdi_uuid, "pause", failfast=failfast):
1365 # Failed to pause node
1366 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1367 return False
1368 return True
1370 @classmethod
1371 def tap_unpause(cls, session, sr_uuid, vdi_uuid, secondary=None,
1372 activate_parents=False):
1373 util.SMlog("Unpause request for %s secondary=%s" % (vdi_uuid, secondary))
1374 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1375 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1376 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1376 ↛ 1377line 1376 didn't jump to line 1377, because the loop on line 1376 never started
1377 host_ref = key[len('host_'):]
1378 util.SMlog("Calling tap-unpause on host %s" % host_ref)
1379 if not cls.call_pluginhandler(session, host_ref,
1380 sr_uuid, vdi_uuid, "unpause", secondary, activate_parents):
1381 # Failed to unpause node
1382 return False
1383 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1384 return True
1386 @classmethod
1387 def tap_refresh(cls, session, sr_uuid, vdi_uuid, activate_parents=False):
1388 util.SMlog("Refresh request for %s" % vdi_uuid)
1389 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1390 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1391 for key in [x for x in sm_config.keys() if x.startswith('host_')]:
1392 host_ref = key[len('host_'):]
1393 util.SMlog("Calling tap-refresh on host %s" % host_ref)
1394 if not cls.call_pluginhandler(session, host_ref,
1395 sr_uuid, vdi_uuid, "refresh", None,
1396 activate_parents=activate_parents):
1397 # Failed to refresh node
1398 return False
1399 return True
1401 @classmethod
1402 def tap_status(cls, session, vdi_uuid):
1403 """Return True if disk is attached, false if it isn't"""
1404 util.SMlog("Disk status request for %s" % vdi_uuid)
1405 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1406 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1407 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1407 ↛ 1408line 1407 didn't jump to line 1408, because the loop on line 1407 never started
1408 return True
1409 return False
1411 @classmethod
1412 def call_pluginhandler(cls, session, host_ref, sr_uuid, vdi_uuid, action,
1413 secondary=None, activate_parents=False, failfast=False):
1414 """Optionally, activate the parent LV before unpausing"""
1415 try:
1416 args = {"sr_uuid": sr_uuid, "vdi_uuid": vdi_uuid,
1417 "failfast": str(failfast)}
1418 if secondary:
1419 args["secondary"] = secondary
1420 if activate_parents:
1421 args["activate_parents"] = "true"
1422 ret = session.xenapi.host.call_plugin(
1423 host_ref, PLUGIN_TAP_PAUSE, action,
1424 args)
1425 return ret == "True"
1426 except Exception as e:
1427 util.logException("BLKTAP2:call_pluginhandler %s" % e)
1428 return False
1430 def _add_tag(self, vdi_uuid, writable):
1431 util.SMlog("Adding tag to: %s" % vdi_uuid)
1432 attach_mode = "RO"
1433 if writable: 1433 ↛ 1435line 1433 didn't jump to line 1435, because the condition on line 1433 was never false
1434 attach_mode = "RW"
1435 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1436 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1437 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1438 attached_as = util.attached_as(sm_config)
1439 if NO_MULTIPLE_ATTACH and (attached_as == "RW" or \ 1439 ↛ 1441line 1439 didn't jump to line 1441, because the condition on line 1439 was never true
1440 (attached_as == "RO" and attach_mode == "RW")):
1441 util.SMlog("need to reset VDI %s" % vdi_uuid)
1442 if not resetvdis.reset_vdi(self._session, vdi_uuid, force=False,
1443 term_output=False, writable=writable):
1444 raise util.SMException("VDI %s not detached cleanly" % vdi_uuid)
1445 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1446 if 'relinking' in sm_config:
1447 util.SMlog("Relinking key found, back-off and retry" % sm_config)
1448 return False
1449 if 'paused' in sm_config:
1450 util.SMlog("Paused or host_ref key found [%s]" % sm_config)
1451 return False
1452 self._session.xenapi.VDI.add_to_sm_config(
1453 vdi_ref, 'activating', 'True')
1454 host_key = "host_%s" % host_ref
1455 assert host_key not in sm_config
1456 self._session.xenapi.VDI.add_to_sm_config(vdi_ref, host_key,
1457 attach_mode)
1458 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1459 if 'paused' in sm_config or 'relinking' in sm_config:
1460 util.SMlog("Found %s key, aborting" % (
1461 'paused' if 'paused' in sm_config else 'relinking'))
1462 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1463 self._session.xenapi.VDI.remove_from_sm_config(
1464 vdi_ref, 'activating')
1465 return False
1466 util.SMlog("Activate lock succeeded")
1467 return True
1469 def _check_tag(self, vdi_uuid):
1470 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1471 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1472 if 'paused' in sm_config:
1473 util.SMlog("Paused key found [%s]" % sm_config)
1474 return False
1475 return True
1477 def _remove_tag(self, vdi_uuid):
1478 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1479 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1480 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1481 host_key = "host_%s" % host_ref
1482 if host_key in sm_config:
1483 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1484 util.SMlog("Removed host key %s for %s" % (host_key, vdi_uuid))
1485 else:
1486 util.SMlog("_remove_tag: host key %s not found, ignore" % host_key)
1488 def _get_pool_config(self, pool_name):
1489 pool_info = dict()
1490 vdi_ref = self.target.vdi.sr.srcmd.params.get('vdi_ref')
1491 if not vdi_ref: 1491 ↛ 1494line 1491 didn't jump to line 1494, because the condition on line 1491 was never true
1492 # attach_from_config context: HA disks don't need to be in any
1493 # special pool
1494 return pool_info
1496 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1497 sr_config = self._session.xenapi.SR.get_other_config(sr_ref)
1498 vdi_config = self._session.xenapi.VDI.get_other_config(vdi_ref)
1499 pool_size_str = sr_config.get(POOL_SIZE_KEY)
1500 pool_name_override = vdi_config.get(POOL_NAME_KEY)
1501 if pool_name_override: 1501 ↛ 1506line 1501 didn't jump to line 1506, because the condition on line 1501 was never false
1502 pool_name = pool_name_override
1503 pool_size_override = vdi_config.get(POOL_SIZE_KEY)
1504 if pool_size_override: 1504 ↛ 1506line 1504 didn't jump to line 1506, because the condition on line 1504 was never false
1505 pool_size_str = pool_size_override
1506 pool_size = 0
1507 if pool_size_str: 1507 ↛ 1517line 1507 didn't jump to line 1517, because the condition on line 1507 was never false
1508 try:
1509 pool_size = int(pool_size_str)
1510 if pool_size < 1 or pool_size > MAX_FULL_RINGS: 1510 ↛ 1511line 1510 didn't jump to line 1511, because the condition on line 1510 was never true
1511 raise ValueError("outside of range")
1512 pool_size = NUM_PAGES_PER_RING * pool_size
1513 except ValueError:
1514 util.SMlog("Error: invalid mem-pool-size %s" % pool_size_str)
1515 pool_size = 0
1517 pool_info["mem-pool"] = pool_name
1518 if pool_size: 1518 ↛ 1521line 1518 didn't jump to line 1521, because the condition on line 1518 was never false
1519 pool_info["mem-pool-size"] = str(pool_size)
1521 return pool_info
1523 def linkNBD(self, sr_uuid, vdi_uuid):
1524 if self.tap:
1525 nbd_path = '/run/blktap-control/nbd%d.%d' % (int(self.tap.pid),
1526 int(self.tap.minor))
1527 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).mklink(nbd_path)
1529 def attach(self, sr_uuid, vdi_uuid, writable, activate=False, caching_params={}):
1530 """Return/dev/sm/backend symlink path"""
1531 self.xenstore_data.update(self._get_pool_config(sr_uuid))
1532 if not self.target.has_cap("ATOMIC_PAUSE") or activate:
1533 util.SMlog("Attach & activate")
1534 self._attach(sr_uuid, vdi_uuid)
1535 dev_path = self._activate(sr_uuid, vdi_uuid,
1536 {"rdonly": not writable})
1537 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1538 self.linkNBD(sr_uuid, vdi_uuid)
1540 # Return backend/ link
1541 back_path = self.BackendLink.from_uuid(sr_uuid, vdi_uuid).path()
1542 if self.tap_wanted():
1543 # Only have NBD if we also have a tap
1544 nbd_path = "nbd:unix:{}:exportname={}".format(
1545 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).path(),
1546 vdi_uuid)
1547 else:
1548 nbd_path = ""
1550 options = {"rdonly": not writable}
1551 options.update(caching_params)
1552 o_direct, o_direct_reason = self.get_o_direct_capability(options)
1553 struct = {'params': back_path,
1554 'params_nbd': nbd_path,
1555 'o_direct': o_direct,
1556 'o_direct_reason': o_direct_reason,
1557 'xenstore_data': self.xenstore_data}
1558 util.SMlog('result: %s' % struct)
1560 try:
1561 f = open("%s.attach_info" % back_path, 'a')
1562 f.write(xmlrpc.client.dumps((struct, ), "", True))
1563 f.close()
1564 except:
1565 pass
1567 return xmlrpc.client.dumps((struct, ), "", True)
1569 def activate(self, sr_uuid, vdi_uuid, writable, caching_params):
1570 util.SMlog("blktap2.activate")
1571 options = {"rdonly": not writable}
1572 options.update(caching_params)
1574 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1575 sr_other_config = self._session.xenapi.SR.get_other_config(sr_ref)
1576 timeout = nfs.get_nfs_timeout(sr_other_config)
1577 if timeout: 1577 ↛ 1581line 1577 didn't jump to line 1581, because the condition on line 1577 was never false
1578 # Note NFS timeout values are in deciseconds
1579 timeout = int((timeout + 5) / 10)
1580 options["timeout"] = timeout + self.TAPDISK_TIMEOUT_MARGIN
1581 for i in range(self.ATTACH_DETACH_RETRY_SECS): 1581 ↛ 1588line 1581 didn't jump to line 1588, because the loop on line 1581 didn't complete
1582 try:
1583 if self._activate_locked(sr_uuid, vdi_uuid, options):
1584 return
1585 except util.SRBusyException:
1586 util.SMlog("SR locked, retrying")
1587 time.sleep(1)
1588 raise util.SMException("VDI %s locked" % vdi_uuid)
1590 @locking("VDIUnavailable")
1591 def _activate_locked(self, sr_uuid, vdi_uuid, options):
1592 """Wraps target.activate and adds a tapdisk"""
1594 #util.SMlog("VDI.activate %s" % vdi_uuid)
1595 if self.tap_wanted(): 1595 ↛ 1608line 1595 didn't jump to line 1608, because the condition on line 1595 was never false
1596 if not self._add_tag(vdi_uuid, not options["rdonly"]):
1597 return False
1598 # it is possible that while the VDI was paused some of its
1599 # attributes have changed (e.g. its size if it was inflated; or its
1600 # path if it was leaf-coalesced onto a raw LV), so refresh the
1601 # object completely
1602 params = self.target.vdi.sr.srcmd.params
1603 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1604 target.sr.srcmd.params = params
1605 driver_info = target.sr.srcmd.driver_info
1606 self.target = self.TargetDriver(target, driver_info)
1608 try:
1609 util.fistpoint.activate_custom_fn( 1609 ↛ exitline 1609 didn't jump to the function exit
1610 "blktap_activate_inject_failure",
1611 lambda: util.inject_failure())
1613 # Attach the physical node
1614 if self.target.has_cap("ATOMIC_PAUSE"): 1614 ↛ 1617line 1614 didn't jump to line 1617, because the condition on line 1614 was never false
1615 self._attach(sr_uuid, vdi_uuid)
1617 vdi_type = self.target.get_vdi_type()
1619 # Take lvchange-p Lock before running
1620 # tap-ctl open
1621 # Needed to avoid race with lvchange -p which is
1622 # now taking the same lock
1623 # This is a fix for CA-155766
1624 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1624 ↛ 1627line 1624 didn't jump to line 1627, because the condition on line 1624 was never true
1625 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1626 vdi_type == vhdutil.VDI_TYPE_VHD:
1627 lock = Lock("lvchange-p", lvhdutil.NS_PREFIX_LVM + sr_uuid)
1628 lock.acquire()
1630 # When we attach a static VDI for HA, we cannot communicate with
1631 # xapi, because has not started yet. These VDIs are raw.
1632 if vdi_type != vhdutil.VDI_TYPE_RAW: 1632 ↛ 1643line 1632 didn't jump to line 1643, because the condition on line 1632 was never false
1633 session = self.target.vdi.session
1634 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1635 # pylint: disable=used-before-assignment
1636 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1637 if 'key_hash' in sm_config: 1637 ↛ 1638line 1637 didn't jump to line 1638, because the condition on line 1637 was never true
1638 key_hash = sm_config['key_hash']
1639 options['key_hash'] = key_hash
1640 options['vdi_uuid'] = vdi_uuid
1641 util.SMlog('Using key with hash {} for VDI {}'.format(key_hash, vdi_uuid))
1642 # Activate the physical node
1643 dev_path = self._activate(sr_uuid, vdi_uuid, options)
1645 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1645 ↛ 1648line 1645 didn't jump to line 1648, because the condition on line 1645 was never true
1646 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1647 self.target.get_vdi_type() == vhdutil.VDI_TYPE_VHD:
1648 lock.release()
1649 except:
1650 util.SMlog("Exception in activate/attach")
1651 if self.tap_wanted():
1652 util.fistpoint.activate_custom_fn(
1653 "blktap_activate_error_handling",
1654 lambda: time.sleep(30))
1655 while True:
1656 try:
1657 self._remove_tag(vdi_uuid)
1658 break
1659 except xmlrpc.client.ProtocolError as e:
1660 # If there's a connection error, keep trying forever.
1661 if e.errcode == http.HTTPStatus.INTERNAL_SERVER_ERROR.value:
1662 continue
1663 else:
1664 util.SMlog('failed to remove tag: %s' % e)
1665 break
1666 except Exception as e:
1667 util.SMlog('failed to remove tag: %s' % e)
1668 break
1669 raise
1670 finally:
1671 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1672 self._session.xenapi.VDI.remove_from_sm_config(
1673 vdi_ref, 'activating')
1674 util.SMlog("Removed activating flag from %s" % vdi_uuid) 1674 ↛ exitline 1674 didn't except from function '_activate_locked', because the raise on line 1669 wasn't executed
1676 # Link result to backend/
1677 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1678 self.linkNBD(sr_uuid, vdi_uuid)
1679 return True
1681 def _activate(self, sr_uuid, vdi_uuid, options):
1682 vdi_options = self.target.activate(sr_uuid, vdi_uuid)
1684 dev_path = self.setup_cache(sr_uuid, vdi_uuid, options)
1685 if not dev_path: 1685 ↛ 1699line 1685 didn't jump to line 1699, because the condition on line 1685 was never false
1686 phy_path = self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink()
1687 # Maybe launch a tapdisk on the physical link
1688 if self.tap_wanted(): 1688 ↛ 1697line 1688 didn't jump to line 1697, because the condition on line 1688 was never false
1689 vdi_type = self.target.get_vdi_type()
1690 options["o_direct"] = self.get_o_direct_capability(options)[0]
1691 if vdi_options: 1691 ↛ 1693line 1691 didn't jump to line 1693, because the condition on line 1691 was never false
1692 options.update(vdi_options)
1693 dev_path, self.tap = self._tap_activate(phy_path, vdi_type,
1694 sr_uuid, options,
1695 self._get_pool_config(sr_uuid).get("mem-pool-size"))
1696 else:
1697 dev_path = phy_path # Just reuse phy
1699 return dev_path
1701 def _attach(self, sr_uuid, vdi_uuid):
1702 attach_info = xmlrpc.client.loads(self.target.attach(sr_uuid, vdi_uuid))[0][0]
1703 params = attach_info['params']
1704 xenstore_data = attach_info['xenstore_data']
1705 phy_path = util.to_plain_string(params)
1706 self.xenstore_data.update(xenstore_data)
1707 # Save it to phy/
1708 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(phy_path)
1710 def deactivate(self, sr_uuid, vdi_uuid, caching_params):
1711 util.SMlog("blktap2.deactivate")
1712 for i in range(self.ATTACH_DETACH_RETRY_SECS):
1713 try:
1714 if self._deactivate_locked(sr_uuid, vdi_uuid, caching_params):
1715 return
1716 except util.SRBusyException as e:
1717 util.SMlog("SR locked, retrying")
1718 time.sleep(1)
1719 raise util.SMException("VDI %s locked" % vdi_uuid)
1721 @locking("VDIUnavailable")
1722 def _deactivate_locked(self, sr_uuid, vdi_uuid, caching_params):
1723 """Wraps target.deactivate and removes a tapdisk"""
1725 #util.SMlog("VDI.deactivate %s" % vdi_uuid)
1726 if self.tap_wanted() and not self._check_tag(vdi_uuid):
1727 return False
1729 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1730 if self.target.has_cap("ATOMIC_PAUSE"):
1731 self._detach(sr_uuid, vdi_uuid)
1732 if self.tap_wanted():
1733 self._remove_tag(vdi_uuid)
1735 return True
1737 def _resetPhylink(self, sr_uuid, vdi_uuid, path):
1738 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(path)
1740 def detach(self, sr_uuid, vdi_uuid, deactivate=False, caching_params={}):
1741 if not self.target.has_cap("ATOMIC_PAUSE") or deactivate:
1742 util.SMlog("Deactivate & detach")
1743 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1744 self._detach(sr_uuid, vdi_uuid)
1745 else:
1746 pass # nothing to do
1748 def _deactivate(self, sr_uuid, vdi_uuid, caching_params):
1749 import VDI as sm
1751 # Shutdown tapdisk
1752 back_link = self.BackendLink.from_uuid(sr_uuid, vdi_uuid)
1754 if not util.pathexists(back_link.path()):
1755 util.SMlog("Backend path %s does not exist" % back_link.path())
1756 return
1758 try:
1759 attach_info_path = "%s.attach_info" % (back_link.path())
1760 os.unlink(attach_info_path)
1761 except:
1762 util.SMlog("unlink of attach_info failed")
1764 try:
1765 major, minor = back_link.rdev()
1766 except self.DeviceNode.NotABlockDevice:
1767 pass
1768 else:
1769 if major == Tapdisk.major():
1770 self._tap_deactivate(minor)
1771 self.remove_cache(sr_uuid, vdi_uuid, caching_params)
1773 # Remove the backend link
1774 back_link.unlink()
1775 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1777 # Deactivate & detach the physical node
1778 if self.tap_wanted() and self.target.vdi.session is not None:
1779 # it is possible that while the VDI was paused some of its
1780 # attributes have changed (e.g. its size if it was inflated; or its
1781 # path if it was leaf-coalesced onto a raw LV), so refresh the
1782 # object completely
1783 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1784 driver_info = target.sr.srcmd.driver_info
1785 self.target = self.TargetDriver(target, driver_info)
1787 self.target.deactivate(sr_uuid, vdi_uuid)
1789 def _detach(self, sr_uuid, vdi_uuid):
1790 self.target.detach(sr_uuid, vdi_uuid)
1792 # Remove phy/
1793 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1795 def _updateCacheRecord(self, session, vdi_uuid, on_boot, caching):
1796 # Remove existing VDI.sm_config fields
1797 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1798 for key in ["on_boot", "caching"]:
1799 session.xenapi.VDI.remove_from_sm_config(vdi_ref, key)
1800 if not on_boot is None:
1801 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'on_boot', on_boot)
1802 if not caching is None:
1803 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'caching', caching)
1805 def setup_cache(self, sr_uuid, vdi_uuid, params):
1806 if params.get(self.CONF_KEY_ALLOW_CACHING) != "true": 1806 ↛ 1809line 1806 didn't jump to line 1809, because the condition on line 1806 was never false
1807 return
1809 util.SMlog("Requested local caching")
1810 if not self.target.has_cap("SR_CACHING"):
1811 util.SMlog("Error: local caching not supported by this SR")
1812 return
1814 scratch_mode = False
1815 if params.get(self.CONF_KEY_MODE_ON_BOOT) == "reset":
1816 scratch_mode = True
1817 util.SMlog("Requested scratch mode")
1818 if not self.target.has_cap("VDI_RESET_ON_BOOT/2"):
1819 util.SMlog("Error: scratch mode not supported by this SR")
1820 return
1822 dev_path = None
1823 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
1824 if not local_sr_uuid:
1825 util.SMlog("ERROR: Local cache SR not specified, not enabling")
1826 return
1827 dev_path = self._setup_cache(self._session, sr_uuid, vdi_uuid,
1828 local_sr_uuid, scratch_mode, params)
1830 if dev_path:
1831 self._updateCacheRecord(self._session, self.target.vdi.uuid,
1832 params.get(self.CONF_KEY_MODE_ON_BOOT),
1833 params.get(self.CONF_KEY_ALLOW_CACHING))
1835 return dev_path
1837 def alert_no_cache(self, session, vdi_uuid, cache_sr_uuid, err):
1838 vm_uuid = None
1839 vm_label = ""
1840 try:
1841 cache_sr_ref = session.xenapi.SR.get_by_uuid(cache_sr_uuid)
1842 cache_sr_rec = session.xenapi.SR.get_record(cache_sr_ref)
1843 cache_sr_label = cache_sr_rec.get("name_label")
1845 host_ref = session.xenapi.host.get_by_uuid(util.get_this_host())
1846 host_rec = session.xenapi.host.get_record(host_ref)
1847 host_label = host_rec.get("name_label")
1849 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1850 vbds = session.xenapi.VBD.get_all_records_where( \
1851 "field \"VDI\" = \"%s\"" % vdi_ref)
1852 for vbd_rec in vbds.values():
1853 vm_ref = vbd_rec.get("VM")
1854 vm_rec = session.xenapi.VM.get_record(vm_ref)
1855 vm_uuid = vm_rec.get("uuid")
1856 vm_label = vm_rec.get("name_label")
1857 except:
1858 util.logException("alert_no_cache")
1860 alert_obj = "SR"
1861 alert_uuid = str(cache_sr_uuid)
1862 alert_str = "No space left in Local Cache SR %s" % cache_sr_uuid
1863 if vm_uuid:
1864 alert_obj = "VM"
1865 alert_uuid = vm_uuid
1866 reason = ""
1867 if err == errno.ENOSPC:
1868 reason = "because there is no space left"
1869 alert_str = "The VM \"%s\" is not using IntelliCache %s on the Local Cache SR (\"%s\") on host \"%s\"" % \
1870 (vm_label, reason, cache_sr_label, host_label)
1872 util.SMlog("Creating alert: (%s, %s, \"%s\")" % \
1873 (alert_obj, alert_uuid, alert_str))
1874 session.xenapi.message.create("No space left in local cache", "3",
1875 alert_obj, alert_uuid, alert_str)
1877 def _setup_cache(self, session, sr_uuid, vdi_uuid, local_sr_uuid,
1878 scratch_mode, options):
1879 import SR
1880 import EXTSR
1881 import NFSSR
1882 import XenAPI
1883 from lock import Lock
1884 from FileSR import FileVDI
1886 parent_uuid = vhdutil.getParent(self.target.vdi.path,
1887 FileVDI.extractUuid)
1888 if not parent_uuid:
1889 util.SMlog("ERROR: VDI %s has no parent, not enabling" % \
1890 self.target.vdi.uuid)
1891 return
1893 util.SMlog("Setting up cache")
1894 parent_uuid = parent_uuid.strip()
1895 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
1897 if shared_target.parent:
1898 util.SMlog("ERROR: Parent VDI %s has parent, not enabling" %
1899 shared_target.uuid)
1900 return
1902 SR.registerSR(EXTSR.EXTSR)
1903 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
1905 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
1906 lock.acquire()
1908 # read cache
1909 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
1910 if util.pathexists(read_cache_path):
1911 util.SMlog("Read cache node (%s) already exists, not creating" % \
1912 read_cache_path)
1913 else:
1914 try:
1915 vhdutil.snapshot(read_cache_path, shared_target.path, False)
1916 except util.CommandException as e:
1917 util.SMlog("Error creating parent cache: %s" % e)
1918 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1919 return None
1921 # local write node
1922 leaf_size = vhdutil.getSizeVirt(self.target.vdi.path)
1923 local_leaf_path = "%s/%s.vhdcache" % \
1924 (local_sr.path, self.target.vdi.uuid)
1925 if util.pathexists(local_leaf_path):
1926 util.SMlog("Local leaf node (%s) already exists, deleting" % \
1927 local_leaf_path)
1928 os.unlink(local_leaf_path)
1929 try:
1930 vhdutil.snapshot(local_leaf_path, read_cache_path, False,
1931 msize=leaf_size // 1024 // 1024, checkEmpty=False)
1932 except util.CommandException as e:
1933 util.SMlog("Error creating leaf cache: %s" % e)
1934 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1935 return None
1937 local_leaf_size = vhdutil.getSizeVirt(local_leaf_path)
1938 if leaf_size > local_leaf_size:
1939 util.SMlog("Leaf size %d > local leaf cache size %d, resizing" %
1940 (leaf_size, local_leaf_size))
1941 vhdutil.setSizeVirtFast(local_leaf_path, leaf_size)
1943 vdi_type = self.target.get_vdi_type()
1945 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
1946 if not prt_tapdisk:
1947 parent_options = copy.deepcopy(options)
1948 parent_options["rdonly"] = False
1949 parent_options["lcache"] = True
1951 blktap = Blktap.allocate()
1952 try:
1953 blktap.set_pool_name("lcache-parent-pool-%s" % blktap.minor)
1954 # no need to change pool_size since each parent tapdisk is in
1955 # its own pool
1956 prt_tapdisk = \
1957 Tapdisk.launch_on_tap(blktap, read_cache_path,
1958 'vhd', parent_options)
1959 except:
1960 blktap.free()
1961 raise
1963 secondary = "%s:%s" % (self.target.get_vdi_type(),
1964 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink())
1966 util.SMlog("Parent tapdisk: %s" % prt_tapdisk)
1967 leaf_tapdisk = Tapdisk.find_by_path(local_leaf_path)
1968 if not leaf_tapdisk:
1969 blktap = Blktap.allocate()
1970 child_options = copy.deepcopy(options)
1971 child_options["rdonly"] = False
1972 child_options["lcache"] = False
1973 child_options["existing_prt"] = prt_tapdisk.minor
1974 child_options["secondary"] = secondary
1975 child_options["standby"] = scratch_mode
1976 try:
1977 leaf_tapdisk = \
1978 Tapdisk.launch_on_tap(blktap, local_leaf_path,
1979 'vhd', child_options)
1980 except:
1981 blktap.free()
1982 raise
1984 lock.release()
1986 util.SMlog("Local read cache: %s, local leaf: %s" % \
1987 (read_cache_path, local_leaf_path))
1989 self.tap = leaf_tapdisk
1990 return leaf_tapdisk.get_devpath()
1992 def remove_cache(self, sr_uuid, vdi_uuid, params):
1993 if not self.target.has_cap("SR_CACHING"):
1994 return
1996 caching = params.get(self.CONF_KEY_ALLOW_CACHING) == "true"
1998 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
1999 if caching and not local_sr_uuid:
2000 util.SMlog("ERROR: Local cache SR not specified, ignore")
2001 return
2003 if caching:
2004 self._remove_cache(self._session, local_sr_uuid)
2006 if self._session is not None:
2007 self._updateCacheRecord(self._session, self.target.vdi.uuid, None, None)
2009 def _is_tapdisk_in_use(self, minor):
2010 retVal, links, sockets = util.findRunningProcessOrOpenFile("tapdisk")
2011 if not retVal:
2012 # err on the side of caution
2013 return True
2015 for link in links:
2016 if link.find("tapdev%d" % minor) != -1:
2017 return True
2019 socket_re = re.compile(r'^/.*/nbd\d+\.%d' % minor)
2020 for s in sockets:
2021 if socket_re.match(s):
2022 return True
2024 return False
2026 def _remove_cache(self, session, local_sr_uuid):
2027 import SR
2028 import EXTSR
2029 import NFSSR
2030 import XenAPI
2031 from lock import Lock
2032 from FileSR import FileVDI
2034 parent_uuid = vhdutil.getParent(self.target.vdi.path,
2035 FileVDI.extractUuid)
2036 if not parent_uuid:
2037 util.SMlog("ERROR: No parent for VDI %s, ignore" % \
2038 self.target.vdi.uuid)
2039 return
2041 util.SMlog("Tearing down the cache")
2043 parent_uuid = parent_uuid.strip()
2044 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
2046 SR.registerSR(EXTSR.EXTSR)
2047 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
2049 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
2050 lock.acquire()
2052 # local write node
2053 local_leaf_path = "%s/%s.vhdcache" % \
2054 (local_sr.path, self.target.vdi.uuid)
2055 if util.pathexists(local_leaf_path):
2056 util.SMlog("Deleting local leaf node %s" % local_leaf_path)
2057 os.unlink(local_leaf_path)
2059 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
2060 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
2061 if not prt_tapdisk:
2062 util.SMlog("Parent tapdisk not found")
2063 elif not self._is_tapdisk_in_use(prt_tapdisk.minor):
2064 util.SMlog("Parent tapdisk not in use: shutting down %s" % \
2065 read_cache_path)
2066 try:
2067 prt_tapdisk.shutdown()
2068 except:
2069 util.logException("shutting down parent tapdisk")
2070 else:
2071 util.SMlog("Parent tapdisk still in use: %s" % read_cache_path)
2072 # the parent cache files are removed during the local SR's background
2073 # GC run
2075 lock.release()
2077PythonKeyError = KeyError
2080class UEventHandler(object):
2082 def __init__(self):
2083 self._action = None
2085 class KeyError(PythonKeyError):
2086 def __str__(self):
2087 return \
2088 "Key '%s' missing in environment. " % self.args[0] + \
2089 "Not called in udev context?"
2091 @classmethod
2092 def getenv(cls, key):
2093 try:
2094 return os.environ[key]
2095 except KeyError as e:
2096 raise cls.KeyError(e.args[0])
2098 def get_action(self):
2099 if not self._action:
2100 self._action = self.getenv('ACTION')
2101 return self._action
2103 class UnhandledEvent(Exception):
2105 def __init__(self, event, handler):
2106 self.event = event
2107 self.handler = handler
2109 def __str__(self):
2110 return "Uevent '%s' not handled by %s" % \
2111 (self.event, self.handler.__class__.__name__)
2113 ACTIONS = {}
2115 def run(self):
2117 action = self.get_action()
2118 try:
2119 fn = self.ACTIONS[action]
2120 except KeyError:
2121 raise self.UnhandledEvent(action, self)
2123 return fn(self)
2125 def __str__(self):
2126 try:
2127 action = self.get_action()
2128 except:
2129 action = None
2130 return "%s[%s]" % (self.__class__.__name__, action)
2133class __BlktapControl(ClassDevice):
2134 SYSFS_CLASSTYPE = "misc"
2136 def __init__(self):
2137 ClassDevice.__init__(self)
2138 self._default_pool = None
2140 def sysfs_devname(self):
2141 return "blktap!control"
2143 class DefaultPool(Attribute):
2144 SYSFS_NODENAME = "default_pool"
2146 def get_default_pool_attr(self):
2147 if not self._default_pool:
2148 self._default_pool = self.DefaultPool.from_kobject(self)
2149 return self._default_pool
2151 def get_default_pool_name(self):
2152 return self.get_default_pool_attr().readline()
2154 def set_default_pool_name(self, name):
2155 self.get_default_pool_attr().writeline(name)
2157 def get_default_pool(self):
2158 return BlktapControl.get_pool(self.get_default_pool_name())
2160 def set_default_pool(self, pool):
2161 self.set_default_pool_name(pool.name)
2163 class NoSuchPool(Exception):
2164 def __init__(self, name):
2165 self.name = name
2167 def __str__(self):
2168 return "No such pool: %s", self.name
2170 def get_pool(self, name):
2171 path = "%s/pools/%s" % (self.sysfs_path(), name)
2173 if not os.path.isdir(path):
2174 raise self.NoSuchPool(name)
2176 return PagePool(path)
2178BlktapControl = __BlktapControl()
2181class PagePool(KObject):
2183 def __init__(self, path):
2184 self.path = path
2185 self._size = None
2187 def sysfs_path(self):
2188 return self.path
2190 class Size(Attribute):
2191 SYSFS_NODENAME = "size"
2193 def get_size_attr(self):
2194 if not self._size:
2195 self._size = self.Size.from_kobject(self)
2196 return self._size
2198 def set_size(self, pages):
2199 pages = str(pages)
2200 self.get_size_attr().writeline(pages)
2202 def get_size(self):
2203 pages = self.get_size_attr().readline()
2204 return int(pages)
2207class BusDevice(KObject):
2209 SYSFS_BUSTYPE = None
2211 @classmethod
2212 def sysfs_bus_path(cls):
2213 return "/sys/bus/%s" % cls.SYSFS_BUSTYPE
2215 def sysfs_path(self):
2216 path = "%s/devices/%s" % (self.sysfs_bus_path(),
2217 self.sysfs_devname())
2219 return path
2222class XenbusDevice(BusDevice):
2223 """Xenbus device, in XS and sysfs"""
2225 XBT_NIL = ""
2227 XENBUS_DEVTYPE = None
2229 def __init__(self, domid, devid):
2230 self.domid = int(domid)
2231 self.devid = int(devid)
2232 self._xbt = XenbusDevice.XBT_NIL
2234 import xen.lowlevel.xs
2235 self.xs = xen.lowlevel.xs.xs()
2237 def xs_path(self, key=None):
2238 path = "backend/%s/%d/%d" % (self.XENBUS_DEVTYPE,
2239 self.domid,
2240 self.devid)
2241 if key is not None:
2242 path = "%s/%s" % (path, key)
2244 return path
2246 def _log(self, prio, msg):
2247 syslog(prio, msg)
2249 def info(self, msg):
2250 self._log(_syslog.LOG_INFO, msg)
2252 def warn(self, msg):
2253 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2255 def _xs_read_path(self, path):
2256 val = self.xs.read(self._xbt, path)
2257 #self.info("read %s = '%s'" % (path, val))
2258 return val
2260 def _xs_write_path(self, path, val):
2261 self.xs.write(self._xbt, path, val)
2262 self.info("wrote %s = '%s'" % (path, val))
2264 def _xs_rm_path(self, path):
2265 self.xs.rm(self._xbt, path)
2266 self.info("removed %s" % path)
2268 def read(self, key):
2269 return self._xs_read_path(self.xs_path(key))
2271 def has_xs_key(self, key):
2272 return self.read(key) is not None
2274 def write(self, key, val):
2275 self._xs_write_path(self.xs_path(key), val)
2277 def rm(self, key):
2278 self._xs_rm_path(self.xs_path(key))
2280 def exists(self):
2281 return self.has_xs_key(None)
2283 def begin(self):
2284 assert(self._xbt == XenbusDevice.XBT_NIL)
2285 self._xbt = self.xs.transaction_start()
2287 def commit(self):
2288 ok = self.xs.transaction_end(self._xbt, 0)
2289 self._xbt = XenbusDevice.XBT_NIL
2290 return ok
2292 def abort(self):
2293 ok = self.xs.transaction_end(self._xbt, 1)
2294 assert(ok == True)
2295 self._xbt = XenbusDevice.XBT_NIL
2297 def create_physical_device(self):
2298 """The standard protocol is: toolstack writes 'params', linux hotplug
2299 script translates this into physical-device=%x:%x"""
2300 if self.has_xs_key("physical-device"):
2301 return
2302 try:
2303 params = self.read("params")
2304 frontend = self.read("frontend")
2305 is_cdrom = self._xs_read_path("%s/device-type") == "cdrom"
2306 # We don't have PV drivers for CDROM devices, so we prevent blkback
2307 # from opening the physical-device
2308 if not(is_cdrom):
2309 major_minor = os.stat(params).st_rdev
2310 major, minor = divmod(major_minor, 256)
2311 self.write("physical-device", "%x:%x" % (major, minor))
2312 except:
2313 util.logException("BLKTAP2:create_physical_device")
2315 def signal_hotplug(self, online=True):
2316 xapi_path = "/xapi/%d/hotplug/%s/%d/hotplug" % (self.domid,
2317 self.XENBUS_DEVTYPE,
2318 self.devid)
2319 upstream_path = self.xs_path("hotplug-status")
2320 if online:
2321 self._xs_write_path(xapi_path, "online")
2322 self._xs_write_path(upstream_path, "connected")
2323 else:
2324 self._xs_rm_path(xapi_path)
2325 self._xs_rm_path(upstream_path)
2327 def sysfs_devname(self):
2328 return "%s-%d-%d" % (self.XENBUS_DEVTYPE,
2329 self.domid, self.devid)
2331 def __str__(self):
2332 return self.sysfs_devname()
2334 @classmethod
2335 def find(cls):
2336 pattern = "/sys/bus/%s/devices/%s*" % (cls.SYSFS_BUSTYPE,
2337 cls.XENBUS_DEVTYPE)
2338 for path in glob.glob(pattern):
2340 name = os.path.basename(path)
2341 (_type, domid, devid) = name.split('-')
2343 yield cls(domid, devid)
2346class XenBackendDevice(XenbusDevice):
2347 """Xenbus backend device"""
2348 SYSFS_BUSTYPE = "xen-backend"
2350 @classmethod
2351 def from_xs_path(cls, _path):
2352 (_backend, _type, domid, devid) = _path.split('/')
2354 assert _backend == 'backend'
2355 assert _type == cls.XENBUS_DEVTYPE
2357 domid = int(domid)
2358 devid = int(devid)
2360 return cls(domid, devid)
2363class Blkback(XenBackendDevice):
2364 """A blkback VBD"""
2366 XENBUS_DEVTYPE = "vbd"
2368 def __init__(self, domid, devid):
2369 XenBackendDevice.__init__(self, domid, devid)
2370 self._phy = None
2371 self._vdi_uuid = None
2372 self._q_state = None
2373 self._q_events = None
2375 class XenstoreValueError(Exception):
2376 KEY = None
2378 def __init__(self, vbd, _str):
2379 self.vbd = vbd
2380 self.str = _str
2382 def __str__(self):
2383 return "Backend %s " % self.vbd + \
2384 "has %s = %s" % (self.KEY, self.str)
2386 class PhysicalDeviceError(XenstoreValueError):
2387 KEY = "physical-device"
2389 class PhysicalDevice(object):
2391 def __init__(self, major, minor):
2392 self.major = int(major)
2393 self.minor = int(minor)
2395 @classmethod
2396 def from_xbdev(cls, xbdev):
2398 phy = xbdev.read("physical-device")
2400 try:
2401 major, minor = phy.split(':')
2402 major = int(major, 0x10)
2403 minor = int(minor, 0x10)
2404 except Exception as e:
2405 raise xbdev.PhysicalDeviceError(xbdev, phy)
2407 return cls(major, minor)
2409 def makedev(self):
2410 return os.makedev(self.major, self.minor)
2412 def is_tap(self):
2413 return self.major == Tapdisk.major()
2415 def __str__(self):
2416 return "%s:%s" % (self.major, self.minor)
2418 def __eq__(self, other):
2419 return \
2420 self.major == other.major and \
2421 self.minor == other.minor
2423 def get_physical_device(self):
2424 if not self._phy:
2425 self._phy = self.PhysicalDevice.from_xbdev(self)
2426 return self._phy
2428 class QueueEvents(Attribute):
2429 """Blkback sysfs node to select queue-state event
2430 notifications emitted."""
2432 SYSFS_NODENAME = "queue_events"
2434 QUEUE_RUNNING = (1 << 0)
2435 QUEUE_PAUSE_DONE = (1 << 1)
2436 QUEUE_SHUTDOWN_DONE = (1 << 2)
2437 QUEUE_PAUSE_REQUEST = (1 << 3)
2438 QUEUE_SHUTDOWN_REQUEST = (1 << 4)
2440 def get_mask(self):
2441 return int(self.readline(), 0x10)
2443 def set_mask(self, mask):
2444 self.writeline("0x%x" % mask)
2446 def get_queue_events(self):
2447 if not self._q_events:
2448 self._q_events = self.QueueEvents.from_kobject(self)
2449 return self._q_events
2451 def get_vdi_uuid(self):
2452 if not self._vdi_uuid:
2453 self._vdi_uuid = self.read("sm-data/vdi-uuid")
2454 return self._vdi_uuid
2456 def pause_requested(self):
2457 return self.has_xs_key("pause")
2459 def shutdown_requested(self):
2460 return self.has_xs_key("shutdown-request")
2462 def shutdown_done(self):
2463 return self.has_xs_key("shutdown-done")
2465 def running(self):
2466 return self.has_xs_key('queue-0/kthread-pid')
2468 @classmethod
2469 def find_by_physical_device(cls, phy):
2470 for dev in cls.find():
2471 try:
2472 _phy = dev.get_physical_device()
2473 except cls.PhysicalDeviceError:
2474 continue
2476 if _phy == phy:
2477 yield dev
2479 @classmethod
2480 def find_by_tap_minor(cls, minor):
2481 phy = cls.PhysicalDevice(Tapdisk.major(), minor)
2482 return cls.find_by_physical_device(phy)
2484 @classmethod
2485 def find_by_tap(cls, tapdisk):
2486 return cls.find_by_tap_minor(tapdisk.minor)
2488 def has_tap(self):
2490 if not self.can_tap():
2491 return False
2493 phy = self.get_physical_device()
2494 if phy:
2495 return phy.is_tap()
2497 return False
2499 def is_bare_hvm(self):
2500 """File VDIs for bare HVM. These are directly accessible by Qemu."""
2501 try:
2502 self.get_physical_device()
2504 except self.PhysicalDeviceError as e:
2505 vdi_type = self.read("type")
2507 self.info("HVM VDI: type=%s" % vdi_type)
2509 if e.str is not None or vdi_type != 'file':
2510 raise
2512 return True
2514 return False
2516 def can_tap(self):
2517 return not self.is_bare_hvm()
2520class BlkbackEventHandler(UEventHandler):
2522 LOG_FACILITY = _syslog.LOG_DAEMON
2524 def __init__(self, ident=None, action=None):
2525 if not ident:
2526 ident = self.__class__.__name__
2528 self.ident = ident
2529 self._vbd = None
2530 self._tapdisk = None
2532 UEventHandler.__init__(self)
2534 def run(self):
2536 self.xs_path = self.getenv('XENBUS_PATH')
2537 openlog(str(self), 0, self.LOG_FACILITY)
2539 UEventHandler.run(self)
2541 def __str__(self):
2543 try:
2544 path = self.xs_path
2545 except:
2546 path = None
2548 try:
2549 action = self.get_action()
2550 except:
2551 action = None
2553 return "%s[%s](%s)" % (self.ident, action, path)
2555 def _log(self, prio, msg):
2556 syslog(prio, msg)
2557 util.SMlog("%s: " % self + msg)
2559 def info(self, msg):
2560 self._log(_syslog.LOG_INFO, msg)
2562 def warn(self, msg):
2563 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2565 def error(self, msg):
2566 self._log(_syslog.LOG_ERR, "ERROR: " + msg)
2568 def get_vbd(self):
2569 if not self._vbd:
2570 self._vbd = Blkback.from_xs_path(self.xs_path)
2571 return self._vbd
2573 def get_tapdisk(self):
2574 if not self._tapdisk:
2575 minor = self.get_vbd().get_physical_device().minor
2576 self._tapdisk = Tapdisk.from_minor(minor)
2577 return self._tapdisk
2578 #
2579 # Events
2580 #
2582 def __add(self):
2583 vbd = self.get_vbd()
2584 # Manage blkback transitions
2585 # self._manage_vbd()
2587 vbd.create_physical_device()
2589 vbd.signal_hotplug()
2591 @retried(backoff=.5, limit=10)
2592 def add(self):
2593 try:
2594 self.__add()
2595 except Attribute.NoSuchAttribute as e:
2596 #
2597 # FIXME: KOBJ_ADD is racing backend.probe, which
2598 # registers device attributes. So poll a little.
2599 #
2600 self.warn("%s, still trying." % e)
2601 raise RetryLoop.TransientFailure(e)
2603 def __change(self):
2604 vbd = self.get_vbd()
2606 # 1. Pause or resume tapdisk (if there is one)
2608 if vbd.has_tap():
2609 pass
2610 #self._pause_update_tap()
2612 # 2. Signal Xapi.VBD.pause/resume completion
2614 self._signal_xapi()
2616 def change(self):
2617 vbd = self.get_vbd()
2619 # NB. Beware of spurious change events between shutdown
2620 # completion and device removal. Also, Xapi.VM.migrate will
2621 # hammer a couple extra shutdown-requests into the source VBD.
2623 while True:
2624 vbd.begin()
2626 if not vbd.exists() or \
2627 vbd.shutdown_done():
2628 break
2630 self.__change()
2632 if vbd.commit():
2633 return
2635 vbd.abort()
2636 self.info("spurious uevent, ignored.")
2638 def remove(self):
2639 vbd = self.get_vbd()
2641 vbd.signal_hotplug(False)
2643 ACTIONS = {'add': add,
2644 'change': change,
2645 'remove': remove}
2646 #
2647 # VDI.pause
2648 #
2650 def _tap_should_pause(self):
2651 """Enumerate all VBDs on our tapdisk. Returns true iff any was
2652 paused"""
2654 tapdisk = self.get_tapdisk()
2655 TapState = Tapdisk.PauseState
2657 PAUSED = 'P'
2658 RUNNING = 'R'
2659 PAUSED_SHUTDOWN = 'P,S'
2660 # NB. Shutdown/paused is special. We know it's not going
2661 # to restart again, so it's a RUNNING. Still better than
2662 # backtracking a removed device during Vbd.unplug completion.
2664 next = TapState.RUNNING
2665 vbds = {}
2667 for vbd in Blkback.find_by_tap(tapdisk):
2668 name = str(vbd)
2670 pausing = vbd.pause_requested()
2671 closing = vbd.shutdown_requested()
2672 running = vbd.running()
2674 if pausing:
2675 if closing and not running:
2676 vbds[name] = PAUSED_SHUTDOWN
2677 else:
2678 vbds[name] = PAUSED
2679 next = TapState.PAUSED
2681 else:
2682 vbds[name] = RUNNING
2684 self.info("tapdev%d (%s): %s -> %s"
2685 % (tapdisk.minor, tapdisk.pause_state(),
2686 vbds, next))
2688 return next == TapState.PAUSED
2690 def _pause_update_tap(self):
2691 vbd = self.get_vbd()
2693 if self._tap_should_pause():
2694 self._pause_tap()
2695 else:
2696 self._resume_tap()
2698 def _pause_tap(self):
2699 tapdisk = self.get_tapdisk()
2701 if not tapdisk.is_paused():
2702 self.info("pausing %s" % tapdisk)
2703 tapdisk.pause()
2705 def _resume_tap(self):
2706 tapdisk = self.get_tapdisk()
2708 # NB. Raw VDI snapshots. Refresh the physical path and
2709 # type while resuming.
2710 vbd = self.get_vbd()
2711 vdi_uuid = vbd.get_vdi_uuid()
2713 if tapdisk.is_paused():
2714 self.info("loading vdi uuid=%s" % vdi_uuid)
2715 vdi = VDI.from_cli(vdi_uuid)
2716 _type = vdi.get_tap_type()
2717 path = vdi.get_phy_path()
2718 self.info("resuming %s on %s:%s" % (tapdisk, _type, path))
2719 tapdisk.unpause(_type, path)
2720 #
2721 # VBD.pause/shutdown
2722 #
2724 def _manage_vbd(self):
2725 vbd = self.get_vbd()
2726 # NB. Hook into VBD state transitions.
2728 events = vbd.get_queue_events()
2730 mask = 0
2731 mask |= events.QUEUE_PAUSE_DONE # pause/unpause
2732 mask |= events.QUEUE_SHUTDOWN_DONE # shutdown
2733 # TODO: mask |= events.QUEUE_SHUTDOWN_REQUEST, for shutdown=force
2734 # TODO: mask |= events.QUEUE_RUNNING, for ionice updates etc
2736 events.set_mask(mask)
2737 self.info("wrote %s = %#02x" % (events.path, mask))
2739 def _signal_xapi(self):
2740 vbd = self.get_vbd()
2742 pausing = vbd.pause_requested()
2743 closing = vbd.shutdown_requested()
2744 running = vbd.running()
2746 handled = 0
2748 if pausing and not running:
2749 if 'pause-done' not in vbd:
2750 vbd.write('pause-done', '')
2751 handled += 1
2753 if not pausing:
2754 if 'pause-done' in vbd:
2755 vbd.rm('pause-done')
2756 handled += 1
2758 if closing and not running:
2759 if 'shutdown-done' not in vbd:
2760 vbd.write('shutdown-done', '')
2761 handled += 1
2763 if handled > 1:
2764 self.warn("handled %d events, " % handled +
2765 "pausing=%s closing=%s running=%s" % \
2766 (pausing, closing, running))
2768if __name__ == '__main__': 2768 ↛ 2770line 2768 didn't jump to line 2770, because the condition on line 2768 was never true
2770 import sys
2771 prog = os.path.basename(sys.argv[0])
2773 #
2774 # Simple CLI interface for manual operation
2775 #
2776 # tap.* level calls go down to local Tapdisk()s (by physical path)
2777 # vdi.* level calls run the plugin calls across host boundaries.
2778 #
2780 def usage(stream):
2781 print("usage: %s tap.{list|major}" % prog, file=stream)
2782 print(" %s tap.{launch|find|get|pause|" % prog + \
2783 "unpause|shutdown|stats} {[<tt>:]<path>} | [minor=]<int> | .. }", file=stream)
2784 print(" %s vbd.uevent" % prog, file=stream)
2786 try:
2787 cmd = sys.argv[1]
2788 except IndexError:
2789 usage(sys.stderr)
2790 sys.exit(1)
2792 try:
2793 _class, method = cmd.split('.')
2794 except:
2795 usage(sys.stderr)
2796 sys.exit(1)
2798 #
2799 # Local Tapdisks
2800 #
2802 if cmd == 'tap.major':
2804 print("%d" % Tapdisk.major())
2806 elif cmd == 'tap.launch':
2808 tapdisk = Tapdisk.launch_from_arg(sys.argv[2])
2809 print("Launched %s" % tapdisk, file=sys.stderr)
2811 elif _class == 'tap':
2813 attrs = {}
2814 for item in sys.argv[2:]:
2815 try:
2816 key, val = item.split('=')
2817 attrs[key] = val
2818 continue
2819 except ValueError:
2820 pass
2822 try:
2823 attrs['minor'] = int(item)
2824 continue
2825 except ValueError:
2826 pass
2828 try:
2829 arg = Tapdisk.Arg.parse(item)
2830 attrs['_type'] = arg.type
2831 attrs['path'] = arg.path
2832 continue
2833 except Tapdisk.Arg.InvalidArgument:
2834 pass
2836 attrs['path'] = item
2838 if cmd == 'tap.list':
2840 for tapdisk in Tapdisk.list( ** attrs):
2841 blktap = tapdisk.get_blktap()
2842 print(tapdisk, end=' ')
2843 print("%s: task=%s pool=%s" % \
2844 (blktap,
2845 blktap.get_task_pid(),
2846 blktap.get_pool_name()))
2848 elif cmd == 'tap.vbds':
2849 # Find all Blkback instances for a given tapdisk
2851 for tapdisk in Tapdisk.list( ** attrs):
2852 print("%s:" % tapdisk, end=' ')
2853 for vbd in Blkback.find_by_tap(tapdisk):
2854 print(vbd, end=' ')
2855 print()
2857 else:
2859 if not attrs:
2860 usage(sys.stderr)
2861 sys.exit(1)
2863 try:
2864 tapdisk = Tapdisk.get( ** attrs)
2865 except TypeError:
2866 usage(sys.stderr)
2867 sys.exit(1)
2869 if cmd == 'tap.shutdown':
2870 # Shutdown a running tapdisk, or raise
2871 tapdisk.shutdown()
2872 print("Shut down %s" % tapdisk, file=sys.stderr)
2874 elif cmd == 'tap.pause':
2875 # Pause an unpaused tapdisk, or raise
2876 tapdisk.pause()
2877 print("Paused %s" % tapdisk, file=sys.stderr)
2879 elif cmd == 'tap.unpause':
2880 # Unpause a paused tapdisk, or raise
2881 tapdisk.unpause()
2882 print("Unpaused %s" % tapdisk, file=sys.stderr)
2884 elif cmd == 'tap.stats':
2885 # Gather tapdisk status
2886 stats = tapdisk.stats()
2887 print("%s:" % tapdisk)
2888 print(json.dumps(stats, indent=True))
2890 else:
2891 usage(sys.stderr)
2892 sys.exit(1)
2894 elif cmd == 'vbd.uevent':
2896 hnd = BlkbackEventHandler(cmd)
2898 if not sys.stdin.isatty():
2899 try:
2900 hnd.run()
2901 except Exception as e:
2902 hnd.error("Unhandled Exception: %s" % e)
2904 import traceback
2905 _type, value, tb = sys.exc_info()
2906 trace = traceback.format_exception(_type, value, tb)
2907 for entry in trace:
2908 for line in entry.rstrip().split('\n'):
2909 util.SMlog(line)
2910 else:
2911 hnd.run()
2913 elif cmd == 'vbd.list':
2915 for vbd in Blkback.find():
2916 print(vbd, \
2917 "physical-device=%s" % vbd.get_physical_device(), \
2918 "pause=%s" % vbd.pause_requested())
2920 else:
2921 usage(sys.stderr)
2922 sys.exit(1)