Coverage for drivers/blktap2.py : 39%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# blktap2: blktap/tapdisk management layer
19#
21import os
22import re
23import time
24import copy
25from lock import Lock
26import util
27import xmlrpc.client
28import http.client
29import errno
30import signal
31import subprocess
32import syslog as _syslog
33import glob
34import json
35import xs_errors
36import XenAPI
37import scsiutil
38from syslog import openlog, syslog
39from stat import * # S_ISBLK(), ...
40import nfs
42import resetvdis
43import vhdutil
44import lvhdutil
46import VDI as sm
48# For RRDD Plugin Registration
49from xmlrpc.client import ServerProxy, Transport
50from socket import socket, AF_UNIX, SOCK_STREAM
52try:
53 from linstorvolumemanager import log_drbd_openers
54 LINSTOR_AVAILABLE = True
55except ImportError:
56 LINSTOR_AVAILABLE = False
58PLUGIN_TAP_PAUSE = "tapdisk-pause"
60SOCKPATH = "/var/xapi/xcp-rrdd"
62NUM_PAGES_PER_RING = 32 * 11
63MAX_FULL_RINGS = 8
64POOL_NAME_KEY = "mem-pool"
65POOL_SIZE_KEY = "mem-pool-size-rings"
67ENABLE_MULTIPLE_ATTACH = "/etc/xensource/allow_multiple_vdi_attach"
68NO_MULTIPLE_ATTACH = not (os.path.exists(ENABLE_MULTIPLE_ATTACH))
71def locking(excType, override=True):
72 def locking2(op):
73 def wrapper(self, *args):
74 self.lock.acquire()
75 try:
76 try:
77 ret = op(self, * args)
78 except (util.CommandException, util.SMException, XenAPI.Failure) as e:
79 util.logException("BLKTAP2:%s" % op)
80 msg = str(e)
81 if isinstance(e, util.CommandException):
82 msg = "Command %s failed (%s): %s" % \
83 (e.cmd, e.code, e.reason)
84 if override:
85 raise xs_errors.XenError(excType, opterr=msg)
86 else:
87 raise
88 except:
89 util.logException("BLKTAP2:%s" % op)
90 raise
91 finally:
92 self.lock.release() 92 ↛ exitline 92 didn't except from function 'wrapper', because the raise on line 85 wasn't executed or the raise on line 87 wasn't executed or the raise on line 90 wasn't executed
93 return ret
94 return wrapper
95 return locking2
98class RetryLoop(object):
100 def __init__(self, backoff, limit):
101 self.backoff = backoff
102 self.limit = limit
104 def __call__(self, f):
106 def loop(*__t, **__d):
107 attempt = 0
109 while True:
110 attempt += 1
112 try:
113 return f( * __t, ** __d)
115 except self.TransientFailure as e:
116 e = e.exception
118 if attempt >= self.limit: 118 ↛ 119line 118 didn't jump to line 119, because the condition on line 118 was never true
119 raise e
121 time.sleep(self.backoff)
123 return loop
125 class TransientFailure(Exception):
126 def __init__(self, exception):
127 self.exception = exception
130def retried(**args):
131 return RetryLoop( ** args)
134class TapCtl(object):
135 """Tapdisk IPC utility calls."""
137 PATH = "/usr/sbin/tap-ctl"
139 def __init__(self, cmd, p):
140 self.cmd = cmd
141 self._p = p
142 self.stdout = p.stdout
144 class CommandFailure(Exception):
145 """TapCtl cmd failure."""
147 def __init__(self, cmd, **info):
148 self.cmd = cmd
149 self.info = info
151 def __str__(self):
152 items = self.info.items()
153 info = ", ".join("%s=%s" % item
154 for item in items)
155 return "%s failed: %s" % (self.cmd, info)
157 # Trying to get a non-existent attribute throws an AttributeError
158 # exception
159 def __getattr__(self, key):
160 if key in self.info: 160 ↛ 162line 160 didn't jump to line 162, because the condition on line 160 was never false
161 return self.info[key]
162 return object.__getattribute__(self, key)
164 @property
165 def has_status(self):
166 return 'status' in self.info
168 @property
169 def has_signal(self):
170 return 'signal' in self.info
172 # Retrieves the error code returned by the command. If the error code
173 # was not supplied at object-construction time, zero is returned.
174 def get_error_code(self):
175 key = 'status'
176 if key in self.info:
177 return self.info[key]
178 else:
179 return 0
181 @classmethod
182 def __mkcmd_real(cls, args):
183 return [cls.PATH] + [str(x) for x in args]
185 __next_mkcmd = __mkcmd_real
187 @classmethod
188 def _mkcmd(cls, args):
190 __next_mkcmd = cls.__next_mkcmd
191 cls.__next_mkcmd = cls.__mkcmd_real
193 return __next_mkcmd(args)
195 @classmethod
196 def _call(cls, args, quiet=False, input=None):
197 """
198 Spawn a tap-ctl process. Return a TapCtl invocation.
199 Raises a TapCtl.CommandFailure if subprocess creation failed.
200 """
201 cmd = cls._mkcmd(args)
203 if not quiet:
204 util.SMlog(cmd)
205 try:
206 p = subprocess.Popen(cmd,
207 stdin=subprocess.PIPE,
208 stdout=subprocess.PIPE,
209 stderr=subprocess.PIPE,
210 close_fds=True,
211 universal_newlines=True)
212 if input:
213 p.stdin.write(input)
214 p.stdin.close()
215 except OSError as e:
216 raise cls.CommandFailure(cmd, errno=e.errno)
218 return cls(cmd, p)
220 def _errmsg(self):
221 output = map(str.rstrip, self._p.stderr)
222 return "; ".join(output)
224 def _wait(self, quiet=False):
225 """
226 Reap the child tap-ctl process of this invocation.
227 Raises a TapCtl.CommandFailure on non-zero exit status.
228 """
229 status = self._p.wait()
230 if not quiet:
231 util.SMlog(" = %d" % status)
233 if status == 0:
234 return
236 info = {'errmsg': self._errmsg(),
237 'pid': self._p.pid}
239 if status < 0:
240 info['signal'] = -status
241 else:
242 info['status'] = status
244 raise self.CommandFailure(self.cmd, ** info)
246 @classmethod
247 def _pread(cls, args, quiet=False, input=None):
248 """
249 Spawn a tap-ctl invocation and read a single line.
250 """
251 tapctl = cls._call(args=args, quiet=quiet, input=input)
253 output = tapctl.stdout.readline().rstrip()
255 tapctl._wait(quiet)
256 return output
258 @staticmethod
259 def _maybe(opt, parm):
260 if parm is not None:
261 return [opt, parm]
262 return []
264 @classmethod
265 def __list(cls, minor=None, pid=None, _type=None, path=None):
266 args = ["list"]
267 args += cls._maybe("-m", minor)
268 args += cls._maybe("-p", pid)
269 args += cls._maybe("-t", _type)
270 args += cls._maybe("-f", path)
272 tapctl = cls._call(args, True)
274 for stdout_line in tapctl.stdout:
275 # FIXME: tap-ctl writes error messages to stdout and
276 # confuses this parser
277 if stdout_line == "blktap kernel module not installed\n": 277 ↛ 280line 277 didn't jump to line 280, because the condition on line 277 was never true
278 # This isn't pretty but (a) neither is confusing stdout/stderr
279 # and at least causes the error to describe the fix
280 raise Exception("blktap kernel module not installed: try 'modprobe blktap'")
281 row = {}
283 for field in stdout_line.rstrip().split(' ', 3):
284 bits = field.split('=')
285 if len(bits) == 2: 285 ↛ 297line 285 didn't jump to line 297, because the condition on line 285 was never false
286 key, val = field.split('=')
288 if key in ('pid', 'minor'):
289 row[key] = int(val, 10)
291 elif key in ('state'):
292 row[key] = int(val, 0x10)
294 else:
295 row[key] = val
296 else:
297 util.SMlog("Ignoring unexpected tap-ctl output: %s" % repr(field))
298 yield row
300 tapctl._wait(True)
302 @classmethod
303 @retried(backoff=.5, limit=10)
304 def list(cls, **args):
306 # FIXME. We typically get an EPROTO when uevents interleave
307 # with SM ops and a tapdisk shuts down under our feet. Should
308 # be fixed in SM.
310 try:
311 return list(cls.__list( ** args))
313 except cls.CommandFailure as e:
314 transient = [errno.EPROTO, errno.ENOENT]
315 if e.has_status and e.status in transient:
316 raise RetryLoop.TransientFailure(e)
317 raise
319 @classmethod
320 def allocate(cls, devpath=None):
321 args = ["allocate"]
322 args += cls._maybe("-d", devpath)
323 return cls._pread(args)
325 @classmethod
326 def free(cls, minor):
327 args = ["free", "-m", minor]
328 cls._pread(args)
330 @classmethod
331 @retried(backoff=.5, limit=10)
332 def spawn(cls):
333 args = ["spawn"]
334 try:
335 pid = cls._pread(args)
336 return int(pid)
337 except cls.CommandFailure as ce:
338 # intermittent failures to spawn. CA-292268
339 if ce.status == 1:
340 raise RetryLoop.TransientFailure(ce)
341 raise
343 @classmethod
344 def attach(cls, pid, minor):
345 args = ["attach", "-p", pid, "-m", minor]
346 cls._pread(args)
348 @classmethod
349 def detach(cls, pid, minor):
350 args = ["detach", "-p", pid, "-m", minor]
351 cls._pread(args)
353 @classmethod
354 def _load_key(cls, key_hash, vdi_uuid):
355 import plugins
357 return plugins.load_key(key_hash, vdi_uuid)
359 @classmethod
360 def open(cls, pid, minor, _type, _file, options):
361 params = Tapdisk.Arg(_type, _file)
362 args = ["open", "-p", pid, "-m", minor, '-a', str(params)]
363 input = None
364 if options.get("rdonly"):
365 args.append('-R')
366 if options.get("lcache"):
367 args.append("-r")
368 if options.get("existing_prt") is not None:
369 args.append("-e")
370 args.append(str(options["existing_prt"]))
371 if options.get("secondary"):
372 args.append("-2")
373 args.append(options["secondary"])
374 if options.get("standby"):
375 args.append("-s")
376 if options.get("timeout"):
377 args.append("-t")
378 args.append(str(options["timeout"]))
379 if not options.get("o_direct", True):
380 args.append("-D")
381 if options.get('cbtlog'):
382 args.extend(['-C', options['cbtlog']])
383 if options.get('key_hash'):
384 key_hash = options['key_hash']
385 vdi_uuid = options['vdi_uuid']
386 key = cls._load_key(key_hash, vdi_uuid)
388 if not key:
389 raise util.SMException("No key found with key hash {}".format(key_hash))
390 input = key
391 args.append('-E')
392 cls._pread(args=args, input=input)
394 @classmethod
395 def close(cls, pid, minor, force=False):
396 args = ["close", "-p", pid, "-m", minor, "-t", "120"]
397 if force:
398 args += ["-f"]
399 cls._pread(args)
401 @classmethod
402 def pause(cls, pid, minor):
403 args = ["pause", "-p", pid, "-m", minor]
404 cls._pread(args)
406 @classmethod
407 def unpause(cls, pid, minor, _type=None, _file=None, mirror=None,
408 cbtlog=None):
409 args = ["unpause", "-p", pid, "-m", minor]
410 if mirror:
411 args.extend(["-2", mirror])
412 if _type and _file:
413 params = Tapdisk.Arg(_type, _file)
414 args += ["-a", str(params)]
415 if cbtlog:
416 args.extend(["-c", cbtlog])
417 cls._pread(args)
419 @classmethod
420 def shutdown(cls, pid):
421 # TODO: This should be a real tap-ctl command
422 os.kill(pid, signal.SIGTERM)
423 os.waitpid(pid, 0)
425 @classmethod
426 def stats(cls, pid, minor):
427 args = ["stats", "-p", pid, "-m", minor]
428 return cls._pread(args, quiet=True)
430 @classmethod
431 def major(cls):
432 args = ["major"]
433 major = cls._pread(args)
434 return int(major)
437class TapdiskExists(Exception):
438 """Tapdisk already running."""
440 def __init__(self, tapdisk):
441 self.tapdisk = tapdisk
443 def __str__(self):
444 return "%s already running" % self.tapdisk
447class TapdiskNotRunning(Exception):
448 """No such Tapdisk."""
450 def __init__(self, **attrs):
451 self.attrs = attrs
453 def __str__(self):
454 items = iter(self.attrs.items())
455 attrs = ", ".join("%s=%s" % attr
456 for attr in items)
457 return "No such Tapdisk(%s)" % attrs
460class TapdiskNotUnique(Exception):
461 """More than one tapdisk on one path."""
463 def __init__(self, tapdisks):
464 self.tapdisks = tapdisks
466 def __str__(self):
467 tapdisks = map(str, self.tapdisks)
468 return "Found multiple tapdisks: %s" % tapdisks
471class TapdiskFailed(Exception):
472 """Tapdisk launch failure."""
474 def __init__(self, arg, err):
475 self.arg = arg
476 self.err = err
478 def __str__(self):
479 return "Tapdisk(%s): %s" % (self.arg, self.err)
481 def get_error(self):
482 return self.err
485class TapdiskInvalidState(Exception):
486 """Tapdisk pause/unpause failure"""
488 def __init__(self, tapdisk):
489 self.tapdisk = tapdisk
491 def __str__(self):
492 return str(self.tapdisk)
495def mkdirs(path, mode=0o777):
496 if not os.path.exists(path):
497 parent, subdir = os.path.split(path)
498 assert parent != path
499 try:
500 if parent:
501 mkdirs(parent, mode)
502 if subdir:
503 os.mkdir(path, mode)
504 except OSError as e:
505 if e.errno != errno.EEXIST:
506 raise
509class KObject(object):
511 SYSFS_CLASSTYPE = None
513 def sysfs_devname(self):
514 raise NotImplementedError("sysfs_devname is undefined")
517class Attribute(object):
519 SYSFS_NODENAME = None
521 def __init__(self, path):
522 self.path = path
524 @classmethod
525 def from_kobject(cls, kobj):
526 path = "%s/%s" % (kobj.sysfs_path(), cls.SYSFS_NODENAME)
527 return cls(path)
529 class NoSuchAttribute(Exception):
530 def __init__(self, name):
531 self.name = name
533 def __str__(self):
534 return "No such attribute: %s" % self.name
536 def _open(self, mode='r'):
537 try:
538 return open(self.path, mode)
539 except IOError as e:
540 if e.errno == errno.ENOENT:
541 raise self.NoSuchAttribute(self)
542 raise
544 def readline(self):
545 f = self._open('r')
546 s = f.readline().rstrip()
547 f.close()
548 return s
550 def writeline(self, val):
551 f = self._open('w')
552 f.write(val)
553 f.close()
556class ClassDevice(KObject):
558 @classmethod
559 def sysfs_class_path(cls):
560 return "/sys/class/%s" % cls.SYSFS_CLASSTYPE
562 def sysfs_path(self):
563 return "%s/%s" % (self.sysfs_class_path(),
564 self.sysfs_devname())
567class Blktap(ClassDevice):
569 DEV_BASEDIR = '/dev/xen/blktap-2'
571 SYSFS_CLASSTYPE = "blktap2"
573 def __init__(self, minor):
574 self.minor = minor
575 self._pool = None
576 self._task = None
578 @classmethod
579 def allocate(cls):
580 # FIXME. Should rather go into init.
581 mkdirs(cls.DEV_BASEDIR)
583 devname = TapCtl.allocate()
584 minor = Tapdisk._parse_minor(devname)
585 return cls(minor)
587 def free(self):
588 TapCtl.free(self.minor)
590 def __str__(self):
591 return "%s(minor=%d)" % (self.__class__.__name__, self.minor)
593 def sysfs_devname(self):
594 return "blktap!blktap%d" % self.minor
596 class Pool(Attribute):
597 SYSFS_NODENAME = "pool"
599 def get_pool_attr(self):
600 if not self._pool:
601 self._pool = self.Pool.from_kobject(self)
602 return self._pool
604 def get_pool_name(self):
605 return self.get_pool_attr().readline()
607 def set_pool_name(self, name):
608 self.get_pool_attr().writeline(name)
610 def set_pool_size(self, pages):
611 self.get_pool().set_size(pages)
613 def get_pool(self):
614 return BlktapControl.get_pool(self.get_pool_name())
616 def set_pool(self, pool):
617 self.set_pool_name(pool.name)
619 class Task(Attribute):
620 SYSFS_NODENAME = "task"
622 def get_task_attr(self):
623 if not self._task:
624 self._task = self.Task.from_kobject(self)
625 return self._task
627 def get_task_pid(self):
628 pid = self.get_task_attr().readline()
629 try:
630 return int(pid)
631 except ValueError:
632 return None
634 def find_tapdisk(self):
635 pid = self.get_task_pid()
636 if pid is None:
637 return None
639 return Tapdisk.find(pid=pid, minor=self.minor)
641 def get_tapdisk(self):
642 tapdisk = self.find_tapdisk()
643 if not tapdisk:
644 raise TapdiskNotRunning(minor=self.minor)
645 return tapdisk
648class Tapdisk(object):
650 TYPES = ['aio', 'vhd']
652 def __init__(self, pid, minor, _type, path, state):
653 self.pid = pid
654 self.minor = minor
655 self.type = _type
656 self.path = path
657 self.state = state
658 self._dirty = False
659 self._blktap = None
661 def __str__(self):
662 state = self.pause_state()
663 return "Tapdisk(%s, pid=%d, minor=%s, state=%s)" % \
664 (self.get_arg(), self.pid, self.minor, state)
666 @classmethod
667 def list(cls, **args):
669 for row in TapCtl.list( ** args):
671 args = {'pid': None,
672 'minor': None,
673 'state': None,
674 '_type': None,
675 'path': None}
677 for key, val in row.items():
678 if key in args:
679 args[key] = val
681 if 'args' in row: 681 ↛ 686line 681 didn't jump to line 686, because the condition on line 681 was never false
682 image = Tapdisk.Arg.parse(row['args'])
683 args['_type'] = image.type
684 args['path'] = image.path
686 if None in args.values(): 686 ↛ 687line 686 didn't jump to line 687, because the condition on line 686 was never true
687 continue
689 yield Tapdisk( ** args)
691 @classmethod
692 def find(cls, **args):
694 found = list(cls.list( ** args))
696 if len(found) > 1:
697 raise TapdiskNotUnique(found)
699 if found:
700 return found[0]
702 return None
704 @classmethod
705 def find_by_path(cls, path):
706 return cls.find(path=path)
708 @classmethod
709 def find_by_minor(cls, minor):
710 return cls.find(minor=minor)
712 @classmethod
713 def get(cls, **attrs):
715 tapdisk = cls.find( ** attrs)
717 if not tapdisk:
718 raise TapdiskNotRunning( ** attrs)
720 return tapdisk
722 @classmethod
723 def from_path(cls, path):
724 return cls.get(path=path)
726 @classmethod
727 def from_minor(cls, minor):
728 return cls.get(minor=minor)
730 @classmethod
731 def __from_blktap(cls, blktap):
732 tapdisk = cls.from_minor(minor=blktap.minor)
733 tapdisk._blktap = blktap
734 return tapdisk
736 def get_blktap(self):
737 if not self._blktap:
738 self._blktap = Blktap(self.minor)
739 return self._blktap
741 class Arg:
743 def __init__(self, _type, path):
744 self.type = _type
745 self.path = path
747 def __str__(self):
748 return "%s:%s" % (self.type, self.path)
750 @classmethod
751 def parse(cls, arg):
753 try:
754 _type, path = arg.split(":", 1)
755 except ValueError:
756 raise cls.InvalidArgument(arg)
758 if _type not in Tapdisk.TYPES: 758 ↛ 759line 758 didn't jump to line 759, because the condition on line 758 was never true
759 raise cls.InvalidType(_type)
761 return cls(_type, path)
763 class InvalidType(Exception):
764 def __init__(self, _type):
765 self.type = _type
767 def __str__(self):
768 return "Not a Tapdisk type: %s" % self.type
770 class InvalidArgument(Exception):
771 def __init__(self, arg):
772 self.arg = arg
774 def __str__(self):
775 return "Not a Tapdisk image: %s" % self.arg
777 def get_arg(self):
778 return self.Arg(self.type, self.path)
780 def get_devpath(self):
781 return "%s/tapdev%d" % (Blktap.DEV_BASEDIR, self.minor)
783 @classmethod
784 def launch_from_arg(cls, arg):
785 arg = cls.Arg.parse(arg)
786 return cls.launch(arg.path, arg.type, False)
788 @classmethod
789 def cgclassify(cls, pid):
791 # We dont provide any <controllers>:<path>
792 # so cgclassify uses /etc/cgrules.conf which
793 # we have configured in the spec file.
794 cmd = ["cgclassify", str(pid)]
795 try:
796 util.pread2(cmd)
797 except util.CommandException as e:
798 util.logException(e)
800 @classmethod
801 def spawn(cls):
802 return TapCtl.spawn()
804 @classmethod
805 def launch_on_tap(cls, blktap, path, _type, options):
807 tapdisk = cls.find_by_path(path)
808 if tapdisk: 808 ↛ 809line 808 didn't jump to line 809, because the condition on line 808 was never true
809 raise TapdiskExists(tapdisk)
811 minor = blktap.minor
813 try:
814 pid = cls.spawn()
815 cls.cgclassify(pid)
816 try:
817 TapCtl.attach(pid, minor)
819 try:
820 retry_open = 0
821 while True:
822 try:
823 TapCtl.open(pid, minor, _type, path, options)
824 except TapCtl.CommandFailure as e:
825 err = (
826 'status' in e.info and e.info['status']
827 ) or None
828 if err in (errno.EIO, errno.EROFS, errno.EAGAIN):
829 if retry_open < 5:
830 retry_open += 1
831 time.sleep(1)
832 continue
833 if LINSTOR_AVAILABLE and err == errno.EROFS:
834 log_drbd_openers(path)
835 break
836 try:
837 tapdisk = cls.__from_blktap(blktap)
838 node = '/sys/dev/block/%d:%d' % (tapdisk.major(), tapdisk.minor)
839 util.set_scheduler_sysfs_node(node, 'noop')
840 return tapdisk
841 except:
842 TapCtl.close(pid, minor)
843 raise
845 except:
846 TapCtl.detach(pid, minor)
847 raise
849 except:
850 TapCtl.shutdown(pid)
851 raise
853 except TapCtl.CommandFailure as ctl:
854 util.logException(ctl)
855 if ('/dev/xapi/cd/' in path and
856 'status' in ctl.info and
857 ctl.info['status'] == 123): # ENOMEDIUM (No medium found)
858 raise xs_errors.XenError('TapdiskDriveEmpty')
859 else:
860 raise TapdiskFailed(cls.Arg(_type, path), ctl)
862 @classmethod
863 def launch(cls, path, _type, rdonly):
864 blktap = Blktap.allocate()
865 try:
866 return cls.launch_on_tap(blktap, path, _type, {"rdonly": rdonly})
867 except:
868 blktap.free()
869 raise
871 def shutdown(self, force=False):
873 TapCtl.close(self.pid, self.minor, force)
875 TapCtl.detach(self.pid, self.minor)
877 self.get_blktap().free()
879 def pause(self):
881 if not self.is_running():
882 raise TapdiskInvalidState(self)
884 TapCtl.pause(self.pid, self.minor)
886 self._set_dirty()
888 def unpause(self, _type=None, path=None, mirror=None, cbtlog=None):
890 if not self.is_paused():
891 raise TapdiskInvalidState(self)
893 # FIXME: should the arguments be optional?
894 if _type is None:
895 _type = self.type
896 if path is None:
897 path = self.path
899 TapCtl.unpause(self.pid, self.minor, _type, path, mirror=mirror,
900 cbtlog=cbtlog)
902 self._set_dirty()
904 def stats(self):
905 return json.loads(TapCtl.stats(self.pid, self.minor))
906 #
907 # NB. dirty/refresh: reload attributes on next access
908 #
910 def _set_dirty(self):
911 self._dirty = True
913 def _refresh(self, __get):
914 t = self.from_minor(__get('minor'))
915 self.__init__(t.pid, t.minor, t.type, t.path, t.state)
917 def __getattribute__(self, name):
918 def __get(name):
919 # NB. avoid(rec(ursion)
920 return object.__getattribute__(self, name)
922 if __get('_dirty') and \ 922 ↛ 924line 922 didn't jump to line 924, because the condition on line 922 was never true
923 name in ['minor', 'type', 'path', 'state']:
924 self._refresh(__get)
925 self._dirty = False
927 return __get(name)
929 class PauseState:
930 RUNNING = 'R'
931 PAUSING = 'r'
932 PAUSED = 'P'
934 class Flags:
935 DEAD = 0x0001
936 CLOSED = 0x0002
937 QUIESCE_REQUESTED = 0x0004
938 QUIESCED = 0x0008
939 PAUSE_REQUESTED = 0x0010
940 PAUSED = 0x0020
941 SHUTDOWN_REQUESTED = 0x0040
942 LOCKING = 0x0080
943 RETRY_NEEDED = 0x0100
944 LOG_DROPPED = 0x0200
946 PAUSE_MASK = PAUSE_REQUESTED | PAUSED
948 def is_paused(self):
949 return not not (self.state & self.Flags.PAUSED)
951 def is_running(self):
952 return not (self.state & self.Flags.PAUSE_MASK)
954 def pause_state(self):
955 if self.state & self.Flags.PAUSED: 955 ↛ 956line 955 didn't jump to line 956, because the condition on line 955 was never true
956 return self.PauseState.PAUSED
958 if self.state & self.Flags.PAUSE_REQUESTED: 958 ↛ 959line 958 didn't jump to line 959, because the condition on line 958 was never true
959 return self.PauseState.PAUSING
961 return self.PauseState.RUNNING
963 @staticmethod
964 def _parse_minor(devpath):
965 regex = '%s/(blktap|tapdev)(\d+)$' % Blktap.DEV_BASEDIR
966 pattern = re.compile(regex)
967 groups = pattern.search(devpath)
968 if not groups:
969 raise Exception("malformed tap device: '%s' (%s) " % (devpath, regex))
971 minor = groups.group(2)
972 return int(minor)
974 _major = None
976 @classmethod
977 def major(cls):
978 if cls._major:
979 return cls._major
981 devices = open("/proc/devices")
982 for line in devices:
984 row = line.rstrip().split(' ')
985 if len(row) != 2:
986 continue
988 major, name = row
989 if name != 'tapdev':
990 continue
992 cls._major = int(major)
993 break
995 devices.close()
996 return cls._major
999class VDI(object):
1000 """SR.vdi driver decorator for blktap2"""
1002 CONF_KEY_ALLOW_CACHING = "vdi_allow_caching"
1003 CONF_KEY_MODE_ON_BOOT = "vdi_on_boot"
1004 CONF_KEY_CACHE_SR = "local_cache_sr"
1005 CONF_KEY_O_DIRECT = "o_direct"
1006 LOCK_CACHE_SETUP = "cachesetup"
1008 ATTACH_DETACH_RETRY_SECS = 120
1010 # number of seconds on top of NFS timeo mount option the tapdisk should
1011 # wait before reporting errors. This is to allow a retry to succeed in case
1012 # packets were lost the first time around, which prevented the NFS client
1013 # from returning before the timeo is reached even if the NFS server did
1014 # come back earlier
1015 TAPDISK_TIMEOUT_MARGIN = 30
1017 def __init__(self, uuid, target, driver_info):
1018 self.target = self.TargetDriver(target, driver_info)
1019 self._vdi_uuid = uuid
1020 self._session = target.session
1021 self.xenstore_data = scsiutil.update_XS_SCSIdata(uuid, scsiutil.gen_synthetic_page_data(uuid))
1022 self.__o_direct = None
1023 self.__o_direct_reason = None
1024 self.lock = Lock("vdi", uuid)
1025 self.tap = None
1027 def get_o_direct_capability(self, options):
1028 """Returns True/False based on licensing and caching_params"""
1029 if self.__o_direct is not None: 1029 ↛ 1030line 1029 didn't jump to line 1030, because the condition on line 1029 was never true
1030 return self.__o_direct, self.__o_direct_reason
1032 if util.read_caching_is_restricted(self._session): 1032 ↛ 1033line 1032 didn't jump to line 1033, because the condition on line 1032 was never true
1033 self.__o_direct = True
1034 self.__o_direct_reason = "LICENSE_RESTRICTION"
1035 elif not ((self.target.vdi.sr.handles("nfs") or self.target.vdi.sr.handles("ext") or self.target.vdi.sr.handles("smb"))): 1035 ↛ 1038line 1035 didn't jump to line 1038, because the condition on line 1035 was never false
1036 self.__o_direct = True
1037 self.__o_direct_reason = "SR_NOT_SUPPORTED"
1038 elif not (options.get("rdonly") or self.target.vdi.parent):
1039 util.SMlog(self.target.vdi)
1040 self.__o_direct = True
1041 self.__o_direct_reason = "NO_RO_IMAGE"
1042 elif options.get("rdonly") and not self.target.vdi.parent:
1043 self.__o_direct = True
1044 self.__o_direct_reason = "RO_WITH_NO_PARENT"
1045 elif options.get(self.CONF_KEY_O_DIRECT):
1046 self.__o_direct = True
1047 self.__o_direct_reason = "SR_OVERRIDE"
1049 if self.__o_direct is None: 1049 ↛ 1050line 1049 didn't jump to line 1050, because the condition on line 1049 was never true
1050 self.__o_direct = False
1051 self.__o_direct_reason = ""
1053 return self.__o_direct, self.__o_direct_reason
1055 @classmethod
1056 def from_cli(cls, uuid):
1057 import VDI as sm
1058 import XenAPI
1060 session = XenAPI.xapi_local()
1061 session.xenapi.login_with_password('root', '', '', 'SM')
1063 target = sm.VDI.from_uuid(session, uuid)
1064 driver_info = target.sr.srcmd.driver_info
1066 session.xenapi.session.logout()
1068 return cls(uuid, target, driver_info)
1070 @staticmethod
1071 def _tap_type(vdi_type):
1072 """Map a VDI type (e.g. 'raw') to a tapdisk driver type (e.g. 'aio')"""
1073 return {
1074 'raw': 'aio',
1075 'vhd': 'vhd',
1076 'iso': 'aio', # for ISO SR
1077 'aio': 'aio', # for LVHD
1078 'file': 'aio',
1079 'phy': 'aio'
1080 }[vdi_type]
1082 def get_tap_type(self):
1083 vdi_type = self.target.get_vdi_type()
1084 return VDI._tap_type(vdi_type)
1086 def get_phy_path(self):
1087 return self.target.get_vdi_path()
1089 class UnexpectedVDIType(Exception):
1091 def __init__(self, vdi_type, target):
1092 self.vdi_type = vdi_type
1093 self.target = target
1095 def __str__(self):
1096 return \
1097 "Target %s has unexpected VDI type '%s'" % \
1098 (type(self.target), self.vdi_type)
1100 VDI_PLUG_TYPE = {'phy': 'phy', # for NETAPP
1101 'raw': 'phy',
1102 'aio': 'tap', # for LVHD raw nodes
1103 'iso': 'tap', # for ISOSR
1104 'file': 'tap',
1105 'vhd': 'tap'}
1107 def tap_wanted(self):
1108 # 1. Let the target vdi_type decide
1110 vdi_type = self.target.get_vdi_type()
1112 try:
1113 plug_type = self.VDI_PLUG_TYPE[vdi_type]
1114 except KeyError:
1115 raise self.UnexpectedVDIType(vdi_type,
1116 self.target.vdi)
1118 if plug_type == 'tap': 1118 ↛ 1119line 1118 didn't jump to line 1119, because the condition on line 1118 was never true
1119 return True
1120 elif self.target.vdi.sr.handles('udev'): 1120 ↛ 1126line 1120 didn't jump to line 1126, because the condition on line 1120 was never false
1121 return True
1122 # 2. Otherwise, there may be more reasons
1123 #
1124 # .. TBD
1126 return False
1128 class TargetDriver:
1129 """Safe target driver access."""
1130 # NB. *Must* test caps for optional calls. Some targets
1131 # actually implement some slots, but do not enable them. Just
1132 # try/except would risk breaking compatibility.
1134 def __init__(self, vdi, driver_info):
1135 self.vdi = vdi
1136 self._caps = driver_info['capabilities']
1138 def has_cap(self, cap):
1139 """Determine if target has given capability"""
1140 return cap in self._caps
1142 def attach(self, sr_uuid, vdi_uuid):
1143 #assert self.has_cap("VDI_ATTACH")
1144 return self.vdi.attach(sr_uuid, vdi_uuid)
1146 def detach(self, sr_uuid, vdi_uuid):
1147 #assert self.has_cap("VDI_DETACH")
1148 self.vdi.detach(sr_uuid, vdi_uuid)
1150 def activate(self, sr_uuid, vdi_uuid):
1151 if self.has_cap("VDI_ACTIVATE"):
1152 return self.vdi.activate(sr_uuid, vdi_uuid)
1154 def deactivate(self, sr_uuid, vdi_uuid):
1155 if self.has_cap("VDI_DEACTIVATE"):
1156 self.vdi.deactivate(sr_uuid, vdi_uuid)
1157 #def resize(self, sr_uuid, vdi_uuid, size):
1158 # return self.vdi.resize(sr_uuid, vdi_uuid, size)
1160 def get_vdi_type(self):
1161 _type = self.vdi.vdi_type
1162 if not _type:
1163 _type = self.vdi.sr.sr_vditype
1164 if not _type:
1165 raise VDI.UnexpectedVDIType(_type, self.vdi)
1166 return _type
1168 def get_vdi_path(self):
1169 return self.vdi.path
1171 class Link(object):
1172 """Relink a node under a common name"""
1173 # NB. We have to provide the device node path during
1174 # VDI.attach, but currently do not allocate the tapdisk minor
1175 # before VDI.activate. Therefore those link steps where we
1176 # relink existing devices under deterministic path names.
1178 BASEDIR = None
1180 def _mklink(self, target):
1181 raise NotImplementedError("_mklink is not defined")
1183 def _equals(self, target):
1184 raise NotImplementedError("_equals is not defined")
1186 def __init__(self, path):
1187 self._path = path
1189 @classmethod
1190 def from_name(cls, name):
1191 path = "%s/%s" % (cls.BASEDIR, name)
1192 return cls(path)
1194 @classmethod
1195 def from_uuid(cls, sr_uuid, vdi_uuid):
1196 name = "%s/%s" % (sr_uuid, vdi_uuid)
1197 return cls.from_name(name)
1199 def path(self):
1200 return self._path
1202 def stat(self):
1203 return os.stat(self.path())
1205 def mklink(self, target):
1207 path = self.path()
1208 util.SMlog("%s -> %s" % (self, target))
1210 mkdirs(os.path.dirname(path))
1211 try:
1212 self._mklink(target)
1213 except OSError as e:
1214 # We do unlink during teardown, but have to stay
1215 # idempotent. However, a *wrong* target should never
1216 # be seen.
1217 if e.errno != errno.EEXIST:
1218 raise
1219 assert self._equals(target), "'%s' not equal to '%s'" % (path, target)
1221 def unlink(self):
1222 try:
1223 os.unlink(self.path())
1224 except OSError as e:
1225 if e.errno != errno.ENOENT:
1226 raise
1228 def __str__(self):
1229 path = self.path()
1230 return "%s(%s)" % (self.__class__.__name__, path)
1232 class SymLink(Link):
1233 """Symlink some file to a common name"""
1235 def readlink(self):
1236 return os.readlink(self.path())
1238 def symlink(self):
1239 return self.path()
1241 def _mklink(self, target):
1242 os.symlink(target, self.path())
1244 def _equals(self, target):
1245 return self.readlink() == target
1247 class DeviceNode(Link):
1248 """Relink a block device node to a common name"""
1250 @classmethod
1251 def _real_stat(cls, target):
1252 """stat() not on @target, but its realpath()"""
1253 _target = os.path.realpath(target)
1254 return os.stat(_target)
1256 @classmethod
1257 def is_block(cls, target):
1258 """Whether @target refers to a block device."""
1259 return S_ISBLK(cls._real_stat(target).st_mode)
1261 def _mklink(self, target):
1263 st = self._real_stat(target)
1264 if not S_ISBLK(st.st_mode):
1265 raise self.NotABlockDevice(target, st)
1267 os.mknod(self.path(), st.st_mode, st.st_rdev)
1269 def _equals(self, target):
1270 target_rdev = self._real_stat(target).st_rdev
1271 return self.stat().st_rdev == target_rdev
1273 def rdev(self):
1274 st = self.stat()
1275 assert S_ISBLK(st.st_mode)
1276 return os.major(st.st_rdev), os.minor(st.st_rdev)
1278 class NotABlockDevice(Exception):
1280 def __init__(self, path, st):
1281 self.path = path
1282 self.st = st
1284 def __str__(self):
1285 return "%s is not a block device: %s" % (self.path, self.st)
1287 class Hybrid(Link):
1289 def __init__(self, path):
1290 VDI.Link.__init__(self, path)
1291 self._devnode = VDI.DeviceNode(path)
1292 self._symlink = VDI.SymLink(path)
1294 def rdev(self):
1295 st = self.stat()
1296 if S_ISBLK(st.st_mode):
1297 return self._devnode.rdev()
1298 raise self._devnode.NotABlockDevice(self.path(), st)
1300 def mklink(self, target):
1301 if self._devnode.is_block(target):
1302 self._obj = self._devnode
1303 else:
1304 self._obj = self._symlink
1305 self._obj.mklink(target)
1307 def _equals(self, target):
1308 return self._obj._equals(target)
1310 class PhyLink(SymLink):
1311 BASEDIR = "/dev/sm/phy"
1312 # NB. Cannot use DeviceNodes, e.g. FileVDIs aren't bdevs.
1314 class NBDLink(SymLink):
1316 BASEDIR = "/run/blktap-control/nbd"
1318 class BackendLink(Hybrid):
1319 BASEDIR = "/dev/sm/backend"
1320 # NB. Could be SymLinks as well, but saving major,minor pairs in
1321 # Links enables neat state capturing when managing Tapdisks. Note
1322 # that we essentially have a tap-ctl list replacement here. For
1323 # now make it a 'Hybrid'. Likely to collapse into a DeviceNode as
1324 # soon as ISOs are tapdisks.
1326 @staticmethod
1327 def _tap_activate(phy_path, vdi_type, sr_uuid, options, pool_size=None):
1329 tapdisk = Tapdisk.find_by_path(phy_path)
1330 if not tapdisk: 1330 ↛ 1331line 1330 didn't jump to line 1331, because the condition on line 1330 was never true
1331 blktap = Blktap.allocate()
1332 blktap.set_pool_name(sr_uuid)
1333 if pool_size:
1334 blktap.set_pool_size(pool_size)
1336 try:
1337 tapdisk = \
1338 Tapdisk.launch_on_tap(blktap,
1339 phy_path,
1340 VDI._tap_type(vdi_type),
1341 options)
1342 except:
1343 blktap.free()
1344 raise
1345 util.SMlog("tap.activate: Launched %s" % tapdisk)
1347 else:
1348 util.SMlog("tap.activate: Found %s" % tapdisk)
1350 return tapdisk.get_devpath(), tapdisk
1352 @staticmethod
1353 def _tap_deactivate(minor):
1355 try:
1356 tapdisk = Tapdisk.from_minor(minor)
1357 except TapdiskNotRunning as e:
1358 util.SMlog("tap.deactivate: Warning, %s" % e)
1359 # NB. Should not be here unless the agent refcount
1360 # broke. Also, a clean shutdown should not have leaked
1361 # the recorded minor.
1362 else:
1363 tapdisk.shutdown()
1364 util.SMlog("tap.deactivate: Shut down %s" % tapdisk)
1366 @classmethod
1367 def tap_pause(cls, session, sr_uuid, vdi_uuid, failfast=False):
1368 """
1369 Pauses the tapdisk.
1371 session: a XAPI session
1372 sr_uuid: the UUID of the SR on which VDI lives
1373 vdi_uuid: the UUID of the VDI to pause
1374 failfast: controls whether the VDI lock should be acquired in a
1375 non-blocking manner
1376 """
1377 util.SMlog("Pause request for %s" % vdi_uuid)
1378 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1379 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'paused', 'true')
1380 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1381 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1381 ↛ 1382line 1381 didn't jump to line 1382, because the loop on line 1381 never started
1382 host_ref = key[len('host_'):]
1383 util.SMlog("Calling tap-pause on host %s" % host_ref)
1384 if not cls.call_pluginhandler(session, host_ref,
1385 sr_uuid, vdi_uuid, "pause", failfast=failfast):
1386 # Failed to pause node
1387 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1388 return False
1389 return True
1391 @classmethod
1392 def tap_unpause(cls, session, sr_uuid, vdi_uuid, secondary=None,
1393 activate_parents=False):
1394 util.SMlog("Unpause request for %s secondary=%s" % (vdi_uuid, secondary))
1395 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1396 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1397 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1397 ↛ 1398line 1397 didn't jump to line 1398, because the loop on line 1397 never started
1398 host_ref = key[len('host_'):]
1399 util.SMlog("Calling tap-unpause on host %s" % host_ref)
1400 if not cls.call_pluginhandler(session, host_ref,
1401 sr_uuid, vdi_uuid, "unpause", secondary, activate_parents):
1402 # Failed to unpause node
1403 return False
1404 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1405 return True
1407 @classmethod
1408 def tap_refresh(cls, session, sr_uuid, vdi_uuid, activate_parents=False):
1409 util.SMlog("Refresh request for %s" % vdi_uuid)
1410 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1411 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1412 for key in [x for x in sm_config.keys() if x.startswith('host_')]:
1413 host_ref = key[len('host_'):]
1414 util.SMlog("Calling tap-refresh on host %s" % host_ref)
1415 if not cls.call_pluginhandler(session, host_ref,
1416 sr_uuid, vdi_uuid, "refresh", None,
1417 activate_parents=activate_parents):
1418 # Failed to refresh node
1419 return False
1420 return True
1422 @classmethod
1423 def tap_status(cls, session, vdi_uuid):
1424 """Return True if disk is attached, false if it isn't"""
1425 util.SMlog("Disk status request for %s" % vdi_uuid)
1426 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1427 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1428 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1428 ↛ 1429line 1428 didn't jump to line 1429, because the loop on line 1428 never started
1429 return True
1430 return False
1432 @classmethod
1433 def call_pluginhandler(cls, session, host_ref, sr_uuid, vdi_uuid, action,
1434 secondary=None, activate_parents=False, failfast=False):
1435 """Optionally, activate the parent LV before unpausing"""
1436 try:
1437 args = {"sr_uuid": sr_uuid, "vdi_uuid": vdi_uuid,
1438 "failfast": str(failfast)}
1439 if secondary:
1440 args["secondary"] = secondary
1441 if activate_parents:
1442 args["activate_parents"] = "true"
1443 ret = session.xenapi.host.call_plugin(
1444 host_ref, PLUGIN_TAP_PAUSE, action,
1445 args)
1446 return ret == "True"
1447 except Exception as e:
1448 util.logException("BLKTAP2:call_pluginhandler %s" % e)
1449 return False
1451 def _add_tag(self, vdi_uuid, writable):
1452 util.SMlog("Adding tag to: %s" % vdi_uuid)
1453 attach_mode = "RO"
1454 if writable: 1454 ↛ 1456line 1454 didn't jump to line 1456, because the condition on line 1454 was never false
1455 attach_mode = "RW"
1456 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1457 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1458 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1459 attached_as = util.attached_as(sm_config)
1460 if NO_MULTIPLE_ATTACH and (attached_as == "RW" or \ 1460 ↛ 1462line 1460 didn't jump to line 1462, because the condition on line 1460 was never true
1461 (attached_as == "RO" and attach_mode == "RW")):
1462 util.SMlog("need to reset VDI %s" % vdi_uuid)
1463 if not resetvdis.reset_vdi(self._session, vdi_uuid, force=False,
1464 term_output=False, writable=writable):
1465 raise util.SMException("VDI %s not detached cleanly" % vdi_uuid)
1466 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1467 if 'relinking' in sm_config:
1468 util.SMlog("Relinking key found, back-off and retry" % sm_config)
1469 return False
1470 if 'paused' in sm_config:
1471 util.SMlog("Paused or host_ref key found [%s]" % sm_config)
1472 return False
1473 self._session.xenapi.VDI.add_to_sm_config(
1474 vdi_ref, 'activating', 'True')
1475 host_key = "host_%s" % host_ref
1476 assert host_key not in sm_config
1477 self._session.xenapi.VDI.add_to_sm_config(vdi_ref, host_key,
1478 attach_mode)
1479 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1480 if 'paused' in sm_config or 'relinking' in sm_config:
1481 util.SMlog("Found %s key, aborting" % (
1482 'paused' if 'paused' in sm_config else 'relinking'))
1483 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1484 self._session.xenapi.VDI.remove_from_sm_config(
1485 vdi_ref, 'activating')
1486 return False
1487 util.SMlog("Activate lock succeeded")
1488 return True
1490 def _check_tag(self, vdi_uuid):
1491 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1492 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1493 if 'paused' in sm_config:
1494 util.SMlog("Paused key found [%s]" % sm_config)
1495 return False
1496 return True
1498 def _remove_tag(self, vdi_uuid):
1499 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1500 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1501 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1502 host_key = "host_%s" % host_ref
1503 if host_key in sm_config:
1504 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1505 util.SMlog("Removed host key %s for %s" % (host_key, vdi_uuid))
1506 else:
1507 util.SMlog("_remove_tag: host key %s not found, ignore" % host_key)
1509 def _get_pool_config(self, pool_name):
1510 pool_info = dict()
1511 vdi_ref = self.target.vdi.sr.srcmd.params.get('vdi_ref')
1512 if not vdi_ref: 1512 ↛ 1515line 1512 didn't jump to line 1515, because the condition on line 1512 was never true
1513 # attach_from_config context: HA disks don't need to be in any
1514 # special pool
1515 return pool_info
1517 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1518 sr_config = self._session.xenapi.SR.get_other_config(sr_ref)
1519 vdi_config = self._session.xenapi.VDI.get_other_config(vdi_ref)
1520 pool_size_str = sr_config.get(POOL_SIZE_KEY)
1521 pool_name_override = vdi_config.get(POOL_NAME_KEY)
1522 if pool_name_override: 1522 ↛ 1527line 1522 didn't jump to line 1527, because the condition on line 1522 was never false
1523 pool_name = pool_name_override
1524 pool_size_override = vdi_config.get(POOL_SIZE_KEY)
1525 if pool_size_override: 1525 ↛ 1527line 1525 didn't jump to line 1527, because the condition on line 1525 was never false
1526 pool_size_str = pool_size_override
1527 pool_size = 0
1528 if pool_size_str: 1528 ↛ 1538line 1528 didn't jump to line 1538, because the condition on line 1528 was never false
1529 try:
1530 pool_size = int(pool_size_str)
1531 if pool_size < 1 or pool_size > MAX_FULL_RINGS: 1531 ↛ 1532line 1531 didn't jump to line 1532, because the condition on line 1531 was never true
1532 raise ValueError("outside of range")
1533 pool_size = NUM_PAGES_PER_RING * pool_size
1534 except ValueError:
1535 util.SMlog("Error: invalid mem-pool-size %s" % pool_size_str)
1536 pool_size = 0
1538 pool_info["mem-pool"] = pool_name
1539 if pool_size: 1539 ↛ 1542line 1539 didn't jump to line 1542, because the condition on line 1539 was never false
1540 pool_info["mem-pool-size"] = str(pool_size)
1542 return pool_info
1544 def linkNBD(self, sr_uuid, vdi_uuid):
1545 if self.tap:
1546 nbd_path = '/run/blktap-control/nbd%d.%d' % (int(self.tap.pid),
1547 int(self.tap.minor))
1548 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).mklink(nbd_path)
1550 def attach(self, sr_uuid, vdi_uuid, writable, activate=False, caching_params={}):
1551 """Return/dev/sm/backend symlink path"""
1552 self.xenstore_data.update(self._get_pool_config(sr_uuid))
1553 if not self.target.has_cap("ATOMIC_PAUSE") or activate:
1554 util.SMlog("Attach & activate")
1555 self._attach(sr_uuid, vdi_uuid)
1556 dev_path = self._activate(sr_uuid, vdi_uuid,
1557 {"rdonly": not writable})
1558 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1559 self.linkNBD(sr_uuid, vdi_uuid)
1561 # Return backend/ link
1562 back_path = self.BackendLink.from_uuid(sr_uuid, vdi_uuid).path()
1563 if self.tap_wanted():
1564 # Only have NBD if we also have a tap
1565 nbd_path = "nbd:unix:{}:exportname={}".format(
1566 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).path(),
1567 vdi_uuid)
1568 else:
1569 nbd_path = ""
1571 options = {"rdonly": not writable}
1572 options.update(caching_params)
1573 o_direct, o_direct_reason = self.get_o_direct_capability(options)
1574 struct = {'params': back_path,
1575 'params_nbd': nbd_path,
1576 'o_direct': o_direct,
1577 'o_direct_reason': o_direct_reason,
1578 'xenstore_data': self.xenstore_data}
1579 util.SMlog('result: %s' % struct)
1581 try:
1582 f = open("%s.attach_info" % back_path, 'a')
1583 f.write(xmlrpc.client.dumps((struct, ), "", True))
1584 f.close()
1585 except:
1586 pass
1588 return xmlrpc.client.dumps((struct, ), "", True)
1590 def activate(self, sr_uuid, vdi_uuid, writable, caching_params):
1591 util.SMlog("blktap2.activate")
1592 options = {"rdonly": not writable}
1593 options.update(caching_params)
1595 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1596 sr_other_config = self._session.xenapi.SR.get_other_config(sr_ref)
1597 timeout = nfs.get_nfs_timeout(sr_other_config)
1598 if timeout: 1598 ↛ 1602line 1598 didn't jump to line 1602, because the condition on line 1598 was never false
1599 # Note NFS timeout values are in deciseconds
1600 timeout = int((timeout + 5) / 10)
1601 options["timeout"] = timeout + self.TAPDISK_TIMEOUT_MARGIN
1602 for i in range(self.ATTACH_DETACH_RETRY_SECS): 1602 ↛ 1609line 1602 didn't jump to line 1609, because the loop on line 1602 didn't complete
1603 try:
1604 if self._activate_locked(sr_uuid, vdi_uuid, options):
1605 return
1606 except util.SRBusyException:
1607 util.SMlog("SR locked, retrying")
1608 time.sleep(1)
1609 raise util.SMException("VDI %s locked" % vdi_uuid)
1611 @locking("VDIUnavailable")
1612 def _activate_locked(self, sr_uuid, vdi_uuid, options):
1613 """Wraps target.activate and adds a tapdisk"""
1615 #util.SMlog("VDI.activate %s" % vdi_uuid)
1616 if self.tap_wanted(): 1616 ↛ 1629line 1616 didn't jump to line 1629, because the condition on line 1616 was never false
1617 if not self._add_tag(vdi_uuid, not options["rdonly"]):
1618 return False
1619 # it is possible that while the VDI was paused some of its
1620 # attributes have changed (e.g. its size if it was inflated; or its
1621 # path if it was leaf-coalesced onto a raw LV), so refresh the
1622 # object completely
1623 params = self.target.vdi.sr.srcmd.params
1624 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1625 target.sr.srcmd.params = params
1626 driver_info = target.sr.srcmd.driver_info
1627 self.target = self.TargetDriver(target, driver_info)
1629 try:
1630 util.fistpoint.activate_custom_fn( 1630 ↛ exitline 1630 didn't jump to the function exit
1631 "blktap_activate_inject_failure",
1632 lambda: util.inject_failure())
1634 # Attach the physical node
1635 if self.target.has_cap("ATOMIC_PAUSE"): 1635 ↛ 1638line 1635 didn't jump to line 1638, because the condition on line 1635 was never false
1636 self._attach(sr_uuid, vdi_uuid)
1638 vdi_type = self.target.get_vdi_type()
1640 # Take lvchange-p Lock before running
1641 # tap-ctl open
1642 # Needed to avoid race with lvchange -p which is
1643 # now taking the same lock
1644 # This is a fix for CA-155766
1645 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1645 ↛ 1648line 1645 didn't jump to line 1648, because the condition on line 1645 was never true
1646 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1647 vdi_type == vhdutil.VDI_TYPE_VHD:
1648 lock = Lock("lvchange-p", lvhdutil.NS_PREFIX_LVM + sr_uuid)
1649 lock.acquire()
1651 # When we attach a static VDI for HA, we cannot communicate with
1652 # xapi, because has not started yet. These VDIs are raw.
1653 if vdi_type != vhdutil.VDI_TYPE_RAW: 1653 ↛ 1664line 1653 didn't jump to line 1664, because the condition on line 1653 was never false
1654 session = self.target.vdi.session
1655 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1656 # pylint: disable=used-before-assignment
1657 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1658 if 'key_hash' in sm_config: 1658 ↛ 1659line 1658 didn't jump to line 1659, because the condition on line 1658 was never true
1659 key_hash = sm_config['key_hash']
1660 options['key_hash'] = key_hash
1661 options['vdi_uuid'] = vdi_uuid
1662 util.SMlog('Using key with hash {} for VDI {}'.format(key_hash, vdi_uuid))
1663 # Activate the physical node
1664 dev_path = self._activate(sr_uuid, vdi_uuid, options)
1666 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1666 ↛ 1669line 1666 didn't jump to line 1669, because the condition on line 1666 was never true
1667 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1668 self.target.get_vdi_type() == vhdutil.VDI_TYPE_VHD:
1669 lock.release()
1670 except:
1671 util.SMlog("Exception in activate/attach")
1672 if self.tap_wanted():
1673 util.fistpoint.activate_custom_fn(
1674 "blktap_activate_error_handling",
1675 lambda: time.sleep(30))
1676 while True:
1677 try:
1678 self._remove_tag(vdi_uuid)
1679 break
1680 except xmlrpc.client.ProtocolError as e:
1681 # If there's a connection error, keep trying forever.
1682 if e.errcode == http.HTTPStatus.INTERNAL_SERVER_ERROR.value:
1683 continue
1684 else:
1685 util.SMlog('failed to remove tag: %s' % e)
1686 break
1687 except Exception as e:
1688 util.SMlog('failed to remove tag: %s' % e)
1689 break
1690 raise
1691 finally:
1692 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1693 self._session.xenapi.VDI.remove_from_sm_config(
1694 vdi_ref, 'activating')
1695 util.SMlog("Removed activating flag from %s" % vdi_uuid) 1695 ↛ exitline 1695 didn't except from function '_activate_locked', because the raise on line 1690 wasn't executed
1697 # Link result to backend/
1698 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1699 self.linkNBD(sr_uuid, vdi_uuid)
1700 return True
1702 def _activate(self, sr_uuid, vdi_uuid, options):
1703 vdi_options = self.target.activate(sr_uuid, vdi_uuid)
1705 dev_path = self.setup_cache(sr_uuid, vdi_uuid, options)
1706 if not dev_path: 1706 ↛ 1720line 1706 didn't jump to line 1720, because the condition on line 1706 was never false
1707 phy_path = self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink()
1708 # Maybe launch a tapdisk on the physical link
1709 if self.tap_wanted(): 1709 ↛ 1718line 1709 didn't jump to line 1718, because the condition on line 1709 was never false
1710 vdi_type = self.target.get_vdi_type()
1711 options["o_direct"] = self.get_o_direct_capability(options)[0]
1712 if vdi_options: 1712 ↛ 1714line 1712 didn't jump to line 1714, because the condition on line 1712 was never false
1713 options.update(vdi_options)
1714 dev_path, self.tap = self._tap_activate(phy_path, vdi_type,
1715 sr_uuid, options,
1716 self._get_pool_config(sr_uuid).get("mem-pool-size"))
1717 else:
1718 dev_path = phy_path # Just reuse phy
1720 return dev_path
1722 def _attach(self, sr_uuid, vdi_uuid):
1723 attach_info = xmlrpc.client.loads(self.target.attach(sr_uuid, vdi_uuid))[0][0]
1724 params = attach_info['params']
1725 xenstore_data = attach_info['xenstore_data']
1726 phy_path = util.to_plain_string(params)
1727 self.xenstore_data.update(xenstore_data)
1728 # Save it to phy/
1729 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(phy_path)
1731 def deactivate(self, sr_uuid, vdi_uuid, caching_params):
1732 util.SMlog("blktap2.deactivate")
1733 for i in range(self.ATTACH_DETACH_RETRY_SECS):
1734 try:
1735 if self._deactivate_locked(sr_uuid, vdi_uuid, caching_params):
1736 return
1737 except util.SRBusyException as e:
1738 util.SMlog("SR locked, retrying")
1739 time.sleep(1)
1740 raise util.SMException("VDI %s locked" % vdi_uuid)
1742 @locking("VDIUnavailable")
1743 def _deactivate_locked(self, sr_uuid, vdi_uuid, caching_params):
1744 """Wraps target.deactivate and removes a tapdisk"""
1746 #util.SMlog("VDI.deactivate %s" % vdi_uuid)
1747 if self.tap_wanted() and not self._check_tag(vdi_uuid):
1748 return False
1750 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1751 if self.target.has_cap("ATOMIC_PAUSE"):
1752 self._detach(sr_uuid, vdi_uuid)
1753 if self.tap_wanted():
1754 self._remove_tag(vdi_uuid)
1756 return True
1758 def _resetPhylink(self, sr_uuid, vdi_uuid, path):
1759 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(path)
1761 def detach(self, sr_uuid, vdi_uuid, deactivate=False, caching_params={}):
1762 if not self.target.has_cap("ATOMIC_PAUSE") or deactivate:
1763 util.SMlog("Deactivate & detach")
1764 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1765 self._detach(sr_uuid, vdi_uuid)
1766 else:
1767 pass # nothing to do
1769 def _deactivate(self, sr_uuid, vdi_uuid, caching_params):
1770 import VDI as sm
1772 # Shutdown tapdisk
1773 back_link = self.BackendLink.from_uuid(sr_uuid, vdi_uuid)
1775 if not util.pathexists(back_link.path()):
1776 util.SMlog("Backend path %s does not exist" % back_link.path())
1777 return
1779 try:
1780 attach_info_path = "%s.attach_info" % (back_link.path())
1781 os.unlink(attach_info_path)
1782 except:
1783 util.SMlog("unlink of attach_info failed")
1785 try:
1786 major, minor = back_link.rdev()
1787 except self.DeviceNode.NotABlockDevice:
1788 pass
1789 else:
1790 if major == Tapdisk.major():
1791 self._tap_deactivate(minor)
1792 self.remove_cache(sr_uuid, vdi_uuid, caching_params)
1794 # Remove the backend link
1795 back_link.unlink()
1796 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1798 # Deactivate & detach the physical node
1799 if self.tap_wanted() and self.target.vdi.session is not None:
1800 # it is possible that while the VDI was paused some of its
1801 # attributes have changed (e.g. its size if it was inflated; or its
1802 # path if it was leaf-coalesced onto a raw LV), so refresh the
1803 # object completely
1804 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1805 driver_info = target.sr.srcmd.driver_info
1806 self.target = self.TargetDriver(target, driver_info)
1808 self.target.deactivate(sr_uuid, vdi_uuid)
1810 def _detach(self, sr_uuid, vdi_uuid):
1811 self.target.detach(sr_uuid, vdi_uuid)
1813 # Remove phy/
1814 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1816 def _updateCacheRecord(self, session, vdi_uuid, on_boot, caching):
1817 # Remove existing VDI.sm_config fields
1818 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1819 for key in ["on_boot", "caching"]:
1820 session.xenapi.VDI.remove_from_sm_config(vdi_ref, key)
1821 if not on_boot is None:
1822 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'on_boot', on_boot)
1823 if not caching is None:
1824 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'caching', caching)
1826 def setup_cache(self, sr_uuid, vdi_uuid, params):
1827 if params.get(self.CONF_KEY_ALLOW_CACHING) != "true": 1827 ↛ 1830line 1827 didn't jump to line 1830, because the condition on line 1827 was never false
1828 return
1830 util.SMlog("Requested local caching")
1831 if not self.target.has_cap("SR_CACHING"):
1832 util.SMlog("Error: local caching not supported by this SR")
1833 return
1835 scratch_mode = False
1836 if params.get(self.CONF_KEY_MODE_ON_BOOT) == "reset":
1837 scratch_mode = True
1838 util.SMlog("Requested scratch mode")
1839 if not self.target.has_cap("VDI_RESET_ON_BOOT/2"):
1840 util.SMlog("Error: scratch mode not supported by this SR")
1841 return
1843 dev_path = None
1844 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
1845 if not local_sr_uuid:
1846 util.SMlog("ERROR: Local cache SR not specified, not enabling")
1847 return
1848 dev_path = self._setup_cache(self._session, sr_uuid, vdi_uuid,
1849 local_sr_uuid, scratch_mode, params)
1851 if dev_path:
1852 self._updateCacheRecord(self._session, self.target.vdi.uuid,
1853 params.get(self.CONF_KEY_MODE_ON_BOOT),
1854 params.get(self.CONF_KEY_ALLOW_CACHING))
1856 return dev_path
1858 def alert_no_cache(self, session, vdi_uuid, cache_sr_uuid, err):
1859 vm_uuid = None
1860 vm_label = ""
1861 try:
1862 cache_sr_ref = session.xenapi.SR.get_by_uuid(cache_sr_uuid)
1863 cache_sr_rec = session.xenapi.SR.get_record(cache_sr_ref)
1864 cache_sr_label = cache_sr_rec.get("name_label")
1866 host_ref = session.xenapi.host.get_by_uuid(util.get_this_host())
1867 host_rec = session.xenapi.host.get_record(host_ref)
1868 host_label = host_rec.get("name_label")
1870 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1871 vbds = session.xenapi.VBD.get_all_records_where( \
1872 "field \"VDI\" = \"%s\"" % vdi_ref)
1873 for vbd_rec in vbds.values():
1874 vm_ref = vbd_rec.get("VM")
1875 vm_rec = session.xenapi.VM.get_record(vm_ref)
1876 vm_uuid = vm_rec.get("uuid")
1877 vm_label = vm_rec.get("name_label")
1878 except:
1879 util.logException("alert_no_cache")
1881 alert_obj = "SR"
1882 alert_uuid = str(cache_sr_uuid)
1883 alert_str = "No space left in Local Cache SR %s" % cache_sr_uuid
1884 if vm_uuid:
1885 alert_obj = "VM"
1886 alert_uuid = vm_uuid
1887 reason = ""
1888 if err == errno.ENOSPC:
1889 reason = "because there is no space left"
1890 alert_str = "The VM \"%s\" is not using IntelliCache %s on the Local Cache SR (\"%s\") on host \"%s\"" % \
1891 (vm_label, reason, cache_sr_label, host_label)
1893 util.SMlog("Creating alert: (%s, %s, \"%s\")" % \
1894 (alert_obj, alert_uuid, alert_str))
1895 session.xenapi.message.create("No space left in local cache", "3",
1896 alert_obj, alert_uuid, alert_str)
1898 def _setup_cache(self, session, sr_uuid, vdi_uuid, local_sr_uuid,
1899 scratch_mode, options):
1900 import SR
1901 import EXTSR
1902 import NFSSR
1903 import XenAPI
1904 from lock import Lock
1905 from FileSR import FileVDI
1907 parent_uuid = vhdutil.getParent(self.target.vdi.path,
1908 FileVDI.extractUuid)
1909 if not parent_uuid:
1910 util.SMlog("ERROR: VDI %s has no parent, not enabling" % \
1911 self.target.vdi.uuid)
1912 return
1914 util.SMlog("Setting up cache")
1915 parent_uuid = parent_uuid.strip()
1916 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
1918 if shared_target.parent:
1919 util.SMlog("ERROR: Parent VDI %s has parent, not enabling" %
1920 shared_target.uuid)
1921 return
1923 SR.registerSR(EXTSR.EXTSR)
1924 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
1926 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
1927 lock.acquire()
1929 # read cache
1930 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
1931 if util.pathexists(read_cache_path):
1932 util.SMlog("Read cache node (%s) already exists, not creating" % \
1933 read_cache_path)
1934 else:
1935 try:
1936 vhdutil.snapshot(read_cache_path, shared_target.path, False)
1937 except util.CommandException as e:
1938 util.SMlog("Error creating parent cache: %s" % e)
1939 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1940 return None
1942 # local write node
1943 leaf_size = vhdutil.getSizeVirt(self.target.vdi.path)
1944 local_leaf_path = "%s/%s.vhdcache" % \
1945 (local_sr.path, self.target.vdi.uuid)
1946 if util.pathexists(local_leaf_path):
1947 util.SMlog("Local leaf node (%s) already exists, deleting" % \
1948 local_leaf_path)
1949 os.unlink(local_leaf_path)
1950 try:
1951 vhdutil.snapshot(local_leaf_path, read_cache_path, False,
1952 msize=leaf_size // 1024 // 1024, checkEmpty=False)
1953 except util.CommandException as e:
1954 util.SMlog("Error creating leaf cache: %s" % e)
1955 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1956 return None
1958 local_leaf_size = vhdutil.getSizeVirt(local_leaf_path)
1959 if leaf_size > local_leaf_size:
1960 util.SMlog("Leaf size %d > local leaf cache size %d, resizing" %
1961 (leaf_size, local_leaf_size))
1962 vhdutil.setSizeVirtFast(local_leaf_path, leaf_size)
1964 vdi_type = self.target.get_vdi_type()
1966 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
1967 if not prt_tapdisk:
1968 parent_options = copy.deepcopy(options)
1969 parent_options["rdonly"] = False
1970 parent_options["lcache"] = True
1972 blktap = Blktap.allocate()
1973 try:
1974 blktap.set_pool_name("lcache-parent-pool-%s" % blktap.minor)
1975 # no need to change pool_size since each parent tapdisk is in
1976 # its own pool
1977 prt_tapdisk = \
1978 Tapdisk.launch_on_tap(blktap, read_cache_path,
1979 'vhd', parent_options)
1980 except:
1981 blktap.free()
1982 raise
1984 secondary = "%s:%s" % (self.target.get_vdi_type(),
1985 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink())
1987 util.SMlog("Parent tapdisk: %s" % prt_tapdisk)
1988 leaf_tapdisk = Tapdisk.find_by_path(local_leaf_path)
1989 if not leaf_tapdisk:
1990 blktap = Blktap.allocate()
1991 child_options = copy.deepcopy(options)
1992 child_options["rdonly"] = False
1993 child_options["lcache"] = False
1994 child_options["existing_prt"] = prt_tapdisk.minor
1995 child_options["secondary"] = secondary
1996 child_options["standby"] = scratch_mode
1997 try:
1998 leaf_tapdisk = \
1999 Tapdisk.launch_on_tap(blktap, local_leaf_path,
2000 'vhd', child_options)
2001 except:
2002 blktap.free()
2003 raise
2005 lock.release()
2007 util.SMlog("Local read cache: %s, local leaf: %s" % \
2008 (read_cache_path, local_leaf_path))
2010 self.tap = leaf_tapdisk
2011 return leaf_tapdisk.get_devpath()
2013 def remove_cache(self, sr_uuid, vdi_uuid, params):
2014 if not self.target.has_cap("SR_CACHING"):
2015 return
2017 caching = params.get(self.CONF_KEY_ALLOW_CACHING) == "true"
2019 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
2020 if caching and not local_sr_uuid:
2021 util.SMlog("ERROR: Local cache SR not specified, ignore")
2022 return
2024 if caching:
2025 self._remove_cache(self._session, local_sr_uuid)
2027 if self._session is not None:
2028 self._updateCacheRecord(self._session, self.target.vdi.uuid, None, None)
2030 def _is_tapdisk_in_use(self, minor):
2031 retVal, links, sockets = util.findRunningProcessOrOpenFile("tapdisk")
2032 if not retVal:
2033 # err on the side of caution
2034 return True
2036 for link in links:
2037 if link.find("tapdev%d" % minor) != -1:
2038 return True
2040 socket_re = re.compile(r'^/.*/nbd\d+\.%d' % minor)
2041 for s in sockets:
2042 if socket_re.match(s):
2043 return True
2045 return False
2047 def _remove_cache(self, session, local_sr_uuid):
2048 import SR
2049 import EXTSR
2050 import NFSSR
2051 import XenAPI
2052 from lock import Lock
2053 from FileSR import FileVDI
2055 parent_uuid = vhdutil.getParent(self.target.vdi.path,
2056 FileVDI.extractUuid)
2057 if not parent_uuid:
2058 util.SMlog("ERROR: No parent for VDI %s, ignore" % \
2059 self.target.vdi.uuid)
2060 return
2062 util.SMlog("Tearing down the cache")
2064 parent_uuid = parent_uuid.strip()
2065 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
2067 SR.registerSR(EXTSR.EXTSR)
2068 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
2070 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
2071 lock.acquire()
2073 # local write node
2074 local_leaf_path = "%s/%s.vhdcache" % \
2075 (local_sr.path, self.target.vdi.uuid)
2076 if util.pathexists(local_leaf_path):
2077 util.SMlog("Deleting local leaf node %s" % local_leaf_path)
2078 os.unlink(local_leaf_path)
2080 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
2081 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
2082 if not prt_tapdisk:
2083 util.SMlog("Parent tapdisk not found")
2084 elif not self._is_tapdisk_in_use(prt_tapdisk.minor):
2085 util.SMlog("Parent tapdisk not in use: shutting down %s" % \
2086 read_cache_path)
2087 try:
2088 prt_tapdisk.shutdown()
2089 except:
2090 util.logException("shutting down parent tapdisk")
2091 else:
2092 util.SMlog("Parent tapdisk still in use: %s" % read_cache_path)
2093 # the parent cache files are removed during the local SR's background
2094 # GC run
2096 lock.release()
2098PythonKeyError = KeyError
2101class UEventHandler(object):
2103 def __init__(self):
2104 self._action = None
2106 class KeyError(PythonKeyError):
2107 def __str__(self):
2108 return \
2109 "Key '%s' missing in environment. " % self.args[0] + \
2110 "Not called in udev context?"
2112 @classmethod
2113 def getenv(cls, key):
2114 try:
2115 return os.environ[key]
2116 except KeyError as e:
2117 raise cls.KeyError(e.args[0])
2119 def get_action(self):
2120 if not self._action:
2121 self._action = self.getenv('ACTION')
2122 return self._action
2124 class UnhandledEvent(Exception):
2126 def __init__(self, event, handler):
2127 self.event = event
2128 self.handler = handler
2130 def __str__(self):
2131 return "Uevent '%s' not handled by %s" % \
2132 (self.event, self.handler.__class__.__name__)
2134 ACTIONS = {}
2136 def run(self):
2138 action = self.get_action()
2139 try:
2140 fn = self.ACTIONS[action]
2141 except KeyError:
2142 raise self.UnhandledEvent(action, self)
2144 return fn(self)
2146 def __str__(self):
2147 try:
2148 action = self.get_action()
2149 except:
2150 action = None
2151 return "%s[%s]" % (self.__class__.__name__, action)
2154class __BlktapControl(ClassDevice):
2155 SYSFS_CLASSTYPE = "misc"
2157 def __init__(self):
2158 ClassDevice.__init__(self)
2159 self._default_pool = None
2161 def sysfs_devname(self):
2162 return "blktap!control"
2164 class DefaultPool(Attribute):
2165 SYSFS_NODENAME = "default_pool"
2167 def get_default_pool_attr(self):
2168 if not self._default_pool:
2169 self._default_pool = self.DefaultPool.from_kobject(self)
2170 return self._default_pool
2172 def get_default_pool_name(self):
2173 return self.get_default_pool_attr().readline()
2175 def set_default_pool_name(self, name):
2176 self.get_default_pool_attr().writeline(name)
2178 def get_default_pool(self):
2179 return BlktapControl.get_pool(self.get_default_pool_name())
2181 def set_default_pool(self, pool):
2182 self.set_default_pool_name(pool.name)
2184 class NoSuchPool(Exception):
2185 def __init__(self, name):
2186 self.name = name
2188 def __str__(self):
2189 return "No such pool: %s", self.name
2191 def get_pool(self, name):
2192 path = "%s/pools/%s" % (self.sysfs_path(), name)
2194 if not os.path.isdir(path):
2195 raise self.NoSuchPool(name)
2197 return PagePool(path)
2199BlktapControl = __BlktapControl()
2202class PagePool(KObject):
2204 def __init__(self, path):
2205 self.path = path
2206 self._size = None
2208 def sysfs_path(self):
2209 return self.path
2211 class Size(Attribute):
2212 SYSFS_NODENAME = "size"
2214 def get_size_attr(self):
2215 if not self._size:
2216 self._size = self.Size.from_kobject(self)
2217 return self._size
2219 def set_size(self, pages):
2220 pages = str(pages)
2221 self.get_size_attr().writeline(pages)
2223 def get_size(self):
2224 pages = self.get_size_attr().readline()
2225 return int(pages)
2228class BusDevice(KObject):
2230 SYSFS_BUSTYPE = None
2232 @classmethod
2233 def sysfs_bus_path(cls):
2234 return "/sys/bus/%s" % cls.SYSFS_BUSTYPE
2236 def sysfs_path(self):
2237 path = "%s/devices/%s" % (self.sysfs_bus_path(),
2238 self.sysfs_devname())
2240 return path
2243class XenbusDevice(BusDevice):
2244 """Xenbus device, in XS and sysfs"""
2246 XBT_NIL = ""
2248 XENBUS_DEVTYPE = None
2250 def __init__(self, domid, devid):
2251 self.domid = int(domid)
2252 self.devid = int(devid)
2253 self._xbt = XenbusDevice.XBT_NIL
2255 import xen.lowlevel.xs
2256 self.xs = xen.lowlevel.xs.xs()
2258 def xs_path(self, key=None):
2259 path = "backend/%s/%d/%d" % (self.XENBUS_DEVTYPE,
2260 self.domid,
2261 self.devid)
2262 if key is not None:
2263 path = "%s/%s" % (path, key)
2265 return path
2267 def _log(self, prio, msg):
2268 syslog(prio, msg)
2270 def info(self, msg):
2271 self._log(_syslog.LOG_INFO, msg)
2273 def warn(self, msg):
2274 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2276 def _xs_read_path(self, path):
2277 val = self.xs.read(self._xbt, path)
2278 #self.info("read %s = '%s'" % (path, val))
2279 return val
2281 def _xs_write_path(self, path, val):
2282 self.xs.write(self._xbt, path, val)
2283 self.info("wrote %s = '%s'" % (path, val))
2285 def _xs_rm_path(self, path):
2286 self.xs.rm(self._xbt, path)
2287 self.info("removed %s" % path)
2289 def read(self, key):
2290 return self._xs_read_path(self.xs_path(key))
2292 def has_xs_key(self, key):
2293 return self.read(key) is not None
2295 def write(self, key, val):
2296 self._xs_write_path(self.xs_path(key), val)
2298 def rm(self, key):
2299 self._xs_rm_path(self.xs_path(key))
2301 def exists(self):
2302 return self.has_xs_key(None)
2304 def begin(self):
2305 assert(self._xbt == XenbusDevice.XBT_NIL)
2306 self._xbt = self.xs.transaction_start()
2308 def commit(self):
2309 ok = self.xs.transaction_end(self._xbt, 0)
2310 self._xbt = XenbusDevice.XBT_NIL
2311 return ok
2313 def abort(self):
2314 ok = self.xs.transaction_end(self._xbt, 1)
2315 assert(ok == True)
2316 self._xbt = XenbusDevice.XBT_NIL
2318 def create_physical_device(self):
2319 """The standard protocol is: toolstack writes 'params', linux hotplug
2320 script translates this into physical-device=%x:%x"""
2321 if self.has_xs_key("physical-device"):
2322 return
2323 try:
2324 params = self.read("params")
2325 frontend = self.read("frontend")
2326 is_cdrom = self._xs_read_path("%s/device-type") == "cdrom"
2327 # We don't have PV drivers for CDROM devices, so we prevent blkback
2328 # from opening the physical-device
2329 if not(is_cdrom):
2330 major_minor = os.stat(params).st_rdev
2331 major, minor = divmod(major_minor, 256)
2332 self.write("physical-device", "%x:%x" % (major, minor))
2333 except:
2334 util.logException("BLKTAP2:create_physical_device")
2336 def signal_hotplug(self, online=True):
2337 xapi_path = "/xapi/%d/hotplug/%s/%d/hotplug" % (self.domid,
2338 self.XENBUS_DEVTYPE,
2339 self.devid)
2340 upstream_path = self.xs_path("hotplug-status")
2341 if online:
2342 self._xs_write_path(xapi_path, "online")
2343 self._xs_write_path(upstream_path, "connected")
2344 else:
2345 self._xs_rm_path(xapi_path)
2346 self._xs_rm_path(upstream_path)
2348 def sysfs_devname(self):
2349 return "%s-%d-%d" % (self.XENBUS_DEVTYPE,
2350 self.domid, self.devid)
2352 def __str__(self):
2353 return self.sysfs_devname()
2355 @classmethod
2356 def find(cls):
2357 pattern = "/sys/bus/%s/devices/%s*" % (cls.SYSFS_BUSTYPE,
2358 cls.XENBUS_DEVTYPE)
2359 for path in glob.glob(pattern):
2361 name = os.path.basename(path)
2362 (_type, domid, devid) = name.split('-')
2364 yield cls(domid, devid)
2367class XenBackendDevice(XenbusDevice):
2368 """Xenbus backend device"""
2369 SYSFS_BUSTYPE = "xen-backend"
2371 @classmethod
2372 def from_xs_path(cls, _path):
2373 (_backend, _type, domid, devid) = _path.split('/')
2375 assert _backend == 'backend'
2376 assert _type == cls.XENBUS_DEVTYPE
2378 domid = int(domid)
2379 devid = int(devid)
2381 return cls(domid, devid)
2384class Blkback(XenBackendDevice):
2385 """A blkback VBD"""
2387 XENBUS_DEVTYPE = "vbd"
2389 def __init__(self, domid, devid):
2390 XenBackendDevice.__init__(self, domid, devid)
2391 self._phy = None
2392 self._vdi_uuid = None
2393 self._q_state = None
2394 self._q_events = None
2396 class XenstoreValueError(Exception):
2397 KEY = None
2399 def __init__(self, vbd, _str):
2400 self.vbd = vbd
2401 self.str = _str
2403 def __str__(self):
2404 return "Backend %s " % self.vbd + \
2405 "has %s = %s" % (self.KEY, self.str)
2407 class PhysicalDeviceError(XenstoreValueError):
2408 KEY = "physical-device"
2410 class PhysicalDevice(object):
2412 def __init__(self, major, minor):
2413 self.major = int(major)
2414 self.minor = int(minor)
2416 @classmethod
2417 def from_xbdev(cls, xbdev):
2419 phy = xbdev.read("physical-device")
2421 try:
2422 major, minor = phy.split(':')
2423 major = int(major, 0x10)
2424 minor = int(minor, 0x10)
2425 except Exception as e:
2426 raise xbdev.PhysicalDeviceError(xbdev, phy)
2428 return cls(major, minor)
2430 def makedev(self):
2431 return os.makedev(self.major, self.minor)
2433 def is_tap(self):
2434 return self.major == Tapdisk.major()
2436 def __str__(self):
2437 return "%s:%s" % (self.major, self.minor)
2439 def __eq__(self, other):
2440 return \
2441 self.major == other.major and \
2442 self.minor == other.minor
2444 def get_physical_device(self):
2445 if not self._phy:
2446 self._phy = self.PhysicalDevice.from_xbdev(self)
2447 return self._phy
2449 class QueueEvents(Attribute):
2450 """Blkback sysfs node to select queue-state event
2451 notifications emitted."""
2453 SYSFS_NODENAME = "queue_events"
2455 QUEUE_RUNNING = (1 << 0)
2456 QUEUE_PAUSE_DONE = (1 << 1)
2457 QUEUE_SHUTDOWN_DONE = (1 << 2)
2458 QUEUE_PAUSE_REQUEST = (1 << 3)
2459 QUEUE_SHUTDOWN_REQUEST = (1 << 4)
2461 def get_mask(self):
2462 return int(self.readline(), 0x10)
2464 def set_mask(self, mask):
2465 self.writeline("0x%x" % mask)
2467 def get_queue_events(self):
2468 if not self._q_events:
2469 self._q_events = self.QueueEvents.from_kobject(self)
2470 return self._q_events
2472 def get_vdi_uuid(self):
2473 if not self._vdi_uuid:
2474 self._vdi_uuid = self.read("sm-data/vdi-uuid")
2475 return self._vdi_uuid
2477 def pause_requested(self):
2478 return self.has_xs_key("pause")
2480 def shutdown_requested(self):
2481 return self.has_xs_key("shutdown-request")
2483 def shutdown_done(self):
2484 return self.has_xs_key("shutdown-done")
2486 def running(self):
2487 return self.has_xs_key('queue-0/kthread-pid')
2489 @classmethod
2490 def find_by_physical_device(cls, phy):
2491 for dev in cls.find():
2492 try:
2493 _phy = dev.get_physical_device()
2494 except cls.PhysicalDeviceError:
2495 continue
2497 if _phy == phy:
2498 yield dev
2500 @classmethod
2501 def find_by_tap_minor(cls, minor):
2502 phy = cls.PhysicalDevice(Tapdisk.major(), minor)
2503 return cls.find_by_physical_device(phy)
2505 @classmethod
2506 def find_by_tap(cls, tapdisk):
2507 return cls.find_by_tap_minor(tapdisk.minor)
2509 def has_tap(self):
2511 if not self.can_tap():
2512 return False
2514 phy = self.get_physical_device()
2515 if phy:
2516 return phy.is_tap()
2518 return False
2520 def is_bare_hvm(self):
2521 """File VDIs for bare HVM. These are directly accessible by Qemu."""
2522 try:
2523 self.get_physical_device()
2525 except self.PhysicalDeviceError as e:
2526 vdi_type = self.read("type")
2528 self.info("HVM VDI: type=%s" % vdi_type)
2530 if e.str is not None or vdi_type != 'file':
2531 raise
2533 return True
2535 return False
2537 def can_tap(self):
2538 return not self.is_bare_hvm()
2541class BlkbackEventHandler(UEventHandler):
2543 LOG_FACILITY = _syslog.LOG_DAEMON
2545 def __init__(self, ident=None, action=None):
2546 if not ident:
2547 ident = self.__class__.__name__
2549 self.ident = ident
2550 self._vbd = None
2551 self._tapdisk = None
2553 UEventHandler.__init__(self)
2555 def run(self):
2557 self.xs_path = self.getenv('XENBUS_PATH')
2558 openlog(str(self), 0, self.LOG_FACILITY)
2560 UEventHandler.run(self)
2562 def __str__(self):
2564 try:
2565 path = self.xs_path
2566 except:
2567 path = None
2569 try:
2570 action = self.get_action()
2571 except:
2572 action = None
2574 return "%s[%s](%s)" % (self.ident, action, path)
2576 def _log(self, prio, msg):
2577 syslog(prio, msg)
2578 util.SMlog("%s: " % self + msg)
2580 def info(self, msg):
2581 self._log(_syslog.LOG_INFO, msg)
2583 def warn(self, msg):
2584 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2586 def error(self, msg):
2587 self._log(_syslog.LOG_ERR, "ERROR: " + msg)
2589 def get_vbd(self):
2590 if not self._vbd:
2591 self._vbd = Blkback.from_xs_path(self.xs_path)
2592 return self._vbd
2594 def get_tapdisk(self):
2595 if not self._tapdisk:
2596 minor = self.get_vbd().get_physical_device().minor
2597 self._tapdisk = Tapdisk.from_minor(minor)
2598 return self._tapdisk
2599 #
2600 # Events
2601 #
2603 def __add(self):
2604 vbd = self.get_vbd()
2605 # Manage blkback transitions
2606 # self._manage_vbd()
2608 vbd.create_physical_device()
2610 vbd.signal_hotplug()
2612 @retried(backoff=.5, limit=10)
2613 def add(self):
2614 try:
2615 self.__add()
2616 except Attribute.NoSuchAttribute as e:
2617 #
2618 # FIXME: KOBJ_ADD is racing backend.probe, which
2619 # registers device attributes. So poll a little.
2620 #
2621 self.warn("%s, still trying." % e)
2622 raise RetryLoop.TransientFailure(e)
2624 def __change(self):
2625 vbd = self.get_vbd()
2627 # 1. Pause or resume tapdisk (if there is one)
2629 if vbd.has_tap():
2630 pass
2631 #self._pause_update_tap()
2633 # 2. Signal Xapi.VBD.pause/resume completion
2635 self._signal_xapi()
2637 def change(self):
2638 vbd = self.get_vbd()
2640 # NB. Beware of spurious change events between shutdown
2641 # completion and device removal. Also, Xapi.VM.migrate will
2642 # hammer a couple extra shutdown-requests into the source VBD.
2644 while True:
2645 vbd.begin()
2647 if not vbd.exists() or \
2648 vbd.shutdown_done():
2649 break
2651 self.__change()
2653 if vbd.commit():
2654 return
2656 vbd.abort()
2657 self.info("spurious uevent, ignored.")
2659 def remove(self):
2660 vbd = self.get_vbd()
2662 vbd.signal_hotplug(False)
2664 ACTIONS = {'add': add,
2665 'change': change,
2666 'remove': remove}
2667 #
2668 # VDI.pause
2669 #
2671 def _tap_should_pause(self):
2672 """Enumerate all VBDs on our tapdisk. Returns true iff any was
2673 paused"""
2675 tapdisk = self.get_tapdisk()
2676 TapState = Tapdisk.PauseState
2678 PAUSED = 'P'
2679 RUNNING = 'R'
2680 PAUSED_SHUTDOWN = 'P,S'
2681 # NB. Shutdown/paused is special. We know it's not going
2682 # to restart again, so it's a RUNNING. Still better than
2683 # backtracking a removed device during Vbd.unplug completion.
2685 next = TapState.RUNNING
2686 vbds = {}
2688 for vbd in Blkback.find_by_tap(tapdisk):
2689 name = str(vbd)
2691 pausing = vbd.pause_requested()
2692 closing = vbd.shutdown_requested()
2693 running = vbd.running()
2695 if pausing:
2696 if closing and not running:
2697 vbds[name] = PAUSED_SHUTDOWN
2698 else:
2699 vbds[name] = PAUSED
2700 next = TapState.PAUSED
2702 else:
2703 vbds[name] = RUNNING
2705 self.info("tapdev%d (%s): %s -> %s"
2706 % (tapdisk.minor, tapdisk.pause_state(),
2707 vbds, next))
2709 return next == TapState.PAUSED
2711 def _pause_update_tap(self):
2712 vbd = self.get_vbd()
2714 if self._tap_should_pause():
2715 self._pause_tap()
2716 else:
2717 self._resume_tap()
2719 def _pause_tap(self):
2720 tapdisk = self.get_tapdisk()
2722 if not tapdisk.is_paused():
2723 self.info("pausing %s" % tapdisk)
2724 tapdisk.pause()
2726 def _resume_tap(self):
2727 tapdisk = self.get_tapdisk()
2729 # NB. Raw VDI snapshots. Refresh the physical path and
2730 # type while resuming.
2731 vbd = self.get_vbd()
2732 vdi_uuid = vbd.get_vdi_uuid()
2734 if tapdisk.is_paused():
2735 self.info("loading vdi uuid=%s" % vdi_uuid)
2736 vdi = VDI.from_cli(vdi_uuid)
2737 _type = vdi.get_tap_type()
2738 path = vdi.get_phy_path()
2739 self.info("resuming %s on %s:%s" % (tapdisk, _type, path))
2740 tapdisk.unpause(_type, path)
2741 #
2742 # VBD.pause/shutdown
2743 #
2745 def _manage_vbd(self):
2746 vbd = self.get_vbd()
2747 # NB. Hook into VBD state transitions.
2749 events = vbd.get_queue_events()
2751 mask = 0
2752 mask |= events.QUEUE_PAUSE_DONE # pause/unpause
2753 mask |= events.QUEUE_SHUTDOWN_DONE # shutdown
2754 # TODO: mask |= events.QUEUE_SHUTDOWN_REQUEST, for shutdown=force
2755 # TODO: mask |= events.QUEUE_RUNNING, for ionice updates etc
2757 events.set_mask(mask)
2758 self.info("wrote %s = %#02x" % (events.path, mask))
2760 def _signal_xapi(self):
2761 vbd = self.get_vbd()
2763 pausing = vbd.pause_requested()
2764 closing = vbd.shutdown_requested()
2765 running = vbd.running()
2767 handled = 0
2769 if pausing and not running:
2770 if 'pause-done' not in vbd:
2771 vbd.write('pause-done', '')
2772 handled += 1
2774 if not pausing:
2775 if 'pause-done' in vbd:
2776 vbd.rm('pause-done')
2777 handled += 1
2779 if closing and not running:
2780 if 'shutdown-done' not in vbd:
2781 vbd.write('shutdown-done', '')
2782 handled += 1
2784 if handled > 1:
2785 self.warn("handled %d events, " % handled +
2786 "pausing=%s closing=%s running=%s" % \
2787 (pausing, closing, running))
2789if __name__ == '__main__': 2789 ↛ 2791line 2789 didn't jump to line 2791, because the condition on line 2789 was never true
2791 import sys
2792 prog = os.path.basename(sys.argv[0])
2794 #
2795 # Simple CLI interface for manual operation
2796 #
2797 # tap.* level calls go down to local Tapdisk()s (by physical path)
2798 # vdi.* level calls run the plugin calls across host boundaries.
2799 #
2801 def usage(stream):
2802 print("usage: %s tap.{list|major}" % prog, file=stream)
2803 print(" %s tap.{launch|find|get|pause|" % prog + \
2804 "unpause|shutdown|stats} {[<tt>:]<path>} | [minor=]<int> | .. }", file=stream)
2805 print(" %s vbd.uevent" % prog, file=stream)
2807 try:
2808 cmd = sys.argv[1]
2809 except IndexError:
2810 usage(sys.stderr)
2811 sys.exit(1)
2813 try:
2814 _class, method = cmd.split('.')
2815 except:
2816 usage(sys.stderr)
2817 sys.exit(1)
2819 #
2820 # Local Tapdisks
2821 #
2823 if cmd == 'tap.major':
2825 print("%d" % Tapdisk.major())
2827 elif cmd == 'tap.launch':
2829 tapdisk = Tapdisk.launch_from_arg(sys.argv[2])
2830 print("Launched %s" % tapdisk, file=sys.stderr)
2832 elif _class == 'tap':
2834 attrs = {}
2835 for item in sys.argv[2:]:
2836 try:
2837 key, val = item.split('=')
2838 attrs[key] = val
2839 continue
2840 except ValueError:
2841 pass
2843 try:
2844 attrs['minor'] = int(item)
2845 continue
2846 except ValueError:
2847 pass
2849 try:
2850 arg = Tapdisk.Arg.parse(item)
2851 attrs['_type'] = arg.type
2852 attrs['path'] = arg.path
2853 continue
2854 except Tapdisk.Arg.InvalidArgument:
2855 pass
2857 attrs['path'] = item
2859 if cmd == 'tap.list':
2861 for tapdisk in Tapdisk.list( ** attrs):
2862 blktap = tapdisk.get_blktap()
2863 print(tapdisk, end=' ')
2864 print("%s: task=%s pool=%s" % \
2865 (blktap,
2866 blktap.get_task_pid(),
2867 blktap.get_pool_name()))
2869 elif cmd == 'tap.vbds':
2870 # Find all Blkback instances for a given tapdisk
2872 for tapdisk in Tapdisk.list( ** attrs):
2873 print("%s:" % tapdisk, end=' ')
2874 for vbd in Blkback.find_by_tap(tapdisk):
2875 print(vbd, end=' ')
2876 print()
2878 else:
2880 if not attrs:
2881 usage(sys.stderr)
2882 sys.exit(1)
2884 try:
2885 tapdisk = Tapdisk.get( ** attrs)
2886 except TypeError:
2887 usage(sys.stderr)
2888 sys.exit(1)
2890 if cmd == 'tap.shutdown':
2891 # Shutdown a running tapdisk, or raise
2892 tapdisk.shutdown()
2893 print("Shut down %s" % tapdisk, file=sys.stderr)
2895 elif cmd == 'tap.pause':
2896 # Pause an unpaused tapdisk, or raise
2897 tapdisk.pause()
2898 print("Paused %s" % tapdisk, file=sys.stderr)
2900 elif cmd == 'tap.unpause':
2901 # Unpause a paused tapdisk, or raise
2902 tapdisk.unpause()
2903 print("Unpaused %s" % tapdisk, file=sys.stderr)
2905 elif cmd == 'tap.stats':
2906 # Gather tapdisk status
2907 stats = tapdisk.stats()
2908 print("%s:" % tapdisk)
2909 print(json.dumps(stats, indent=True))
2911 else:
2912 usage(sys.stderr)
2913 sys.exit(1)
2915 elif cmd == 'vbd.uevent':
2917 hnd = BlkbackEventHandler(cmd)
2919 if not sys.stdin.isatty():
2920 try:
2921 hnd.run()
2922 except Exception as e:
2923 hnd.error("Unhandled Exception: %s" % e)
2925 import traceback
2926 _type, value, tb = sys.exc_info()
2927 trace = traceback.format_exception(_type, value, tb)
2928 for entry in trace:
2929 for line in entry.rstrip().split('\n'):
2930 util.SMlog(line)
2931 else:
2932 hnd.run()
2934 elif cmd == 'vbd.list':
2936 for vbd in Blkback.find():
2937 print(vbd, \
2938 "physical-device=%s" % vbd.get_physical_device(), \
2939 "pause=%s" % vbd.pause_requested())
2941 else:
2942 usage(sys.stderr)
2943 sys.exit(1)