Coverage for drivers/blktap2.py : 39%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# blktap2: blktap/tapdisk management layer
19#
20import grp
21import os
22import re
23import stat
24import time
25import copy
26from lock import Lock
27import util
28import xmlrpc.client
29import http.client
30import errno
31import signal
32import subprocess
33import syslog as _syslog
34import glob
35import json
36import xs_errors
37import XenAPI # pylint: disable=import-error
38import scsiutil
39from syslog import openlog, syslog
40from stat import * # S_ISBLK(), ...
41import nfs
43import resetvdis
44import vhdutil
45import lvhdutil
47import VDI as sm
49# For RRDD Plugin Registration
50from xmlrpc.client import ServerProxy, Transport
51from socket import socket, AF_UNIX, SOCK_STREAM
53try:
54 from linstorvolumemanager import log_drbd_openers
55 LINSTOR_AVAILABLE = True
56except ImportError:
57 LINSTOR_AVAILABLE = False
59PLUGIN_TAP_PAUSE = "tapdisk-pause"
61SOCKPATH = "/var/xapi/xcp-rrdd"
63NUM_PAGES_PER_RING = 32 * 11
64MAX_FULL_RINGS = 8
65POOL_NAME_KEY = "mem-pool"
66POOL_SIZE_KEY = "mem-pool-size-rings"
68ENABLE_MULTIPLE_ATTACH = "/etc/xensource/allow_multiple_vdi_attach"
69NO_MULTIPLE_ATTACH = not (os.path.exists(ENABLE_MULTIPLE_ATTACH))
72def locking(excType, override=True):
73 def locking2(op):
74 def wrapper(self, *args):
75 self.lock.acquire()
76 try:
77 try:
78 ret = op(self, * args)
79 except (util.CommandException, util.SMException, XenAPI.Failure) as e:
80 util.logException("BLKTAP2:%s" % op)
81 msg = str(e)
82 if isinstance(e, util.CommandException):
83 msg = "Command %s failed (%s): %s" % \
84 (e.cmd, e.code, e.reason)
85 if override:
86 raise xs_errors.XenError(excType, opterr=msg)
87 else:
88 raise
89 except:
90 util.logException("BLKTAP2:%s" % op)
91 raise
92 finally:
93 self.lock.release() 93 ↛ exitline 93 didn't except from function 'wrapper', because the raise on line 86 wasn't executed or the raise on line 88 wasn't executed or the raise on line 91 wasn't executed
94 return ret
95 return wrapper
96 return locking2
99class RetryLoop(object):
101 def __init__(self, backoff, limit):
102 self.backoff = backoff
103 self.limit = limit
105 def __call__(self, f):
107 def loop(*__t, **__d):
108 attempt = 0
110 while True:
111 attempt += 1
113 try:
114 return f( * __t, ** __d)
116 except self.TransientFailure as e:
117 e = e.exception
119 if attempt >= self.limit: 119 ↛ 120line 119 didn't jump to line 120, because the condition on line 119 was never true
120 raise e
122 time.sleep(self.backoff)
124 return loop
126 class TransientFailure(Exception):
127 def __init__(self, exception):
128 self.exception = exception
131def retried(**args):
132 return RetryLoop( ** args)
135class TapCtl(object):
136 """Tapdisk IPC utility calls."""
138 PATH = "/usr/sbin/tap-ctl"
140 def __init__(self, cmd, p):
141 self.cmd = cmd
142 self._p = p
143 self.stdout = p.stdout
145 class CommandFailure(Exception):
146 """TapCtl cmd failure."""
148 def __init__(self, cmd, **info):
149 self.cmd = cmd
150 self.info = info
152 def __str__(self):
153 items = self.info.items()
154 info = ", ".join("%s=%s" % item
155 for item in items)
156 return "%s failed: %s" % (self.cmd, info)
158 # Trying to get a non-existent attribute throws an AttributeError
159 # exception
160 def __getattr__(self, key):
161 if key in self.info: 161 ↛ 163line 161 didn't jump to line 163, because the condition on line 161 was never false
162 return self.info[key]
163 return object.__getattribute__(self, key)
165 @property
166 def has_status(self):
167 return 'status' in self.info
169 @property
170 def has_signal(self):
171 return 'signal' in self.info
173 # Retrieves the error code returned by the command. If the error code
174 # was not supplied at object-construction time, zero is returned.
175 def get_error_code(self):
176 key = 'status'
177 if key in self.info:
178 return self.info[key]
179 else:
180 return 0
182 @classmethod
183 def __mkcmd_real(cls, args):
184 return [cls.PATH] + [str(x) for x in args]
186 __next_mkcmd = __mkcmd_real
188 @classmethod
189 def _mkcmd(cls, args):
191 __next_mkcmd = cls.__next_mkcmd
192 cls.__next_mkcmd = cls.__mkcmd_real
194 return __next_mkcmd(args)
196 @classmethod
197 def _call(cls, args, quiet=False, input=None, text_mode=True):
198 """
199 Spawn a tap-ctl process. Return a TapCtl invocation.
200 Raises a TapCtl.CommandFailure if subprocess creation failed.
201 """
202 cmd = cls._mkcmd(args)
204 if not quiet:
205 util.SMlog(cmd)
206 try:
207 p = subprocess.Popen(cmd,
208 stdin=subprocess.PIPE,
209 stdout=subprocess.PIPE,
210 stderr=subprocess.PIPE,
211 close_fds=True,
212 universal_newlines=text_mode)
213 if input:
214 p.stdin.write(input)
215 p.stdin.close()
216 except OSError as e:
217 raise cls.CommandFailure(cmd, errno=e.errno)
219 return cls(cmd, p)
221 def _errmsg(self):
222 output = map(str.rstrip, self._p.stderr)
223 return "; ".join(output)
225 def _wait(self, quiet=False):
226 """
227 Reap the child tap-ctl process of this invocation.
228 Raises a TapCtl.CommandFailure on non-zero exit status.
229 """
230 status = self._p.wait()
231 if not quiet:
232 util.SMlog(" = %d" % status)
234 if status == 0:
235 return
237 info = {'errmsg': self._errmsg(),
238 'pid': self._p.pid}
240 if status < 0:
241 info['signal'] = -status
242 else:
243 info['status'] = status
245 raise self.CommandFailure(self.cmd, ** info)
247 @classmethod
248 def _pread(cls, args, quiet=False, input=None, text_mode=True):
249 """
250 Spawn a tap-ctl invocation and read a single line.
251 """
252 tapctl = cls._call(args=args, quiet=quiet, input=input,
253 text_mode=text_mode)
255 output = tapctl.stdout.readline().rstrip()
257 tapctl._wait(quiet)
258 return output
260 @staticmethod
261 def _maybe(opt, parm):
262 if parm is not None:
263 return [opt, parm]
264 return []
266 @classmethod
267 def __list(cls, minor=None, pid=None, _type=None, path=None):
268 args = ["list"]
269 args += cls._maybe("-m", minor)
270 args += cls._maybe("-p", pid)
271 args += cls._maybe("-t", _type)
272 args += cls._maybe("-f", path)
274 tapctl = cls._call(args, True)
276 for stdout_line in tapctl.stdout:
277 # FIXME: tap-ctl writes error messages to stdout and
278 # confuses this parser
279 if stdout_line == "blktap kernel module not installed\n": 279 ↛ 282line 279 didn't jump to line 282, because the condition on line 279 was never true
280 # This isn't pretty but (a) neither is confusing stdout/stderr
281 # and at least causes the error to describe the fix
282 raise Exception("blktap kernel module not installed: try 'modprobe blktap'")
283 row = {}
285 for field in stdout_line.rstrip().split(' ', 3):
286 bits = field.split('=')
287 if len(bits) == 2: 287 ↛ 299line 287 didn't jump to line 299, because the condition on line 287 was never false
288 key, val = field.split('=')
290 if key in ('pid', 'minor'):
291 row[key] = int(val, 10)
293 elif key in ('state'):
294 row[key] = int(val, 0x10)
296 else:
297 row[key] = val
298 else:
299 util.SMlog("Ignoring unexpected tap-ctl output: %s" % repr(field))
300 yield row
302 tapctl._wait(True)
304 @classmethod
305 @retried(backoff=.5, limit=10)
306 def list(cls, **args):
308 # FIXME. We typically get an EPROTO when uevents interleave
309 # with SM ops and a tapdisk shuts down under our feet. Should
310 # be fixed in SM.
312 try:
313 return list(cls.__list( ** args))
315 except cls.CommandFailure as e:
316 transient = [errno.EPROTO, errno.ENOENT]
317 if e.has_status and e.status in transient:
318 raise RetryLoop.TransientFailure(e)
319 raise
321 @classmethod
322 def allocate(cls, devpath=None):
323 args = ["allocate"]
324 args += cls._maybe("-d", devpath)
325 return cls._pread(args)
327 @classmethod
328 def free(cls, minor):
329 args = ["free", "-m", minor]
330 cls._pread(args)
332 @classmethod
333 @retried(backoff=.5, limit=10)
334 def spawn(cls):
335 args = ["spawn"]
336 try:
337 pid = cls._pread(args)
338 return int(pid)
339 except cls.CommandFailure as ce:
340 # intermittent failures to spawn. CA-292268
341 if ce.status == 1:
342 raise RetryLoop.TransientFailure(ce)
343 raise
345 @classmethod
346 def attach(cls, pid, minor):
347 args = ["attach", "-p", pid, "-m", minor]
348 cls._pread(args)
350 @classmethod
351 def detach(cls, pid, minor):
352 args = ["detach", "-p", pid, "-m", minor]
353 cls._pread(args)
355 @classmethod
356 def _load_key(cls, key_hash, vdi_uuid):
357 import plugins
359 return plugins.load_key(key_hash, vdi_uuid)
361 @classmethod
362 def open(cls, pid, minor, _type, _file, options):
363 params = Tapdisk.Arg(_type, _file)
364 args = ["open", "-p", pid, "-m", minor, '-a', str(params)]
365 text_mode = True
366 input = None
367 if options.get("rdonly"):
368 args.append('-R')
369 if options.get("lcache"):
370 args.append("-r")
371 if options.get("existing_prt") is not None:
372 args.append("-e")
373 args.append(str(options["existing_prt"]))
374 if options.get("secondary"):
375 args.append("-2")
376 args.append(options["secondary"])
377 if options.get("standby"):
378 args.append("-s")
379 if options.get("timeout"):
380 args.append("-t")
381 args.append(str(options["timeout"]))
382 if not options.get("o_direct", True):
383 args.append("-D")
384 if options.get('cbtlog'):
385 args.extend(['-C', options['cbtlog']])
386 if options.get('key_hash'):
387 key_hash = options['key_hash']
388 vdi_uuid = options['vdi_uuid']
389 key = cls._load_key(key_hash, vdi_uuid)
391 if not key:
392 raise util.SMException("No key found with key hash {}".format(key_hash))
393 input = key
394 text_mode = False
395 args.append('-E')
397 cls._pread(args=args, input=input, text_mode=text_mode)
399 @classmethod
400 def close(cls, pid, minor, force=False):
401 args = ["close", "-p", pid, "-m", minor, "-t", "120"]
402 if force:
403 args += ["-f"]
404 cls._pread(args)
406 @classmethod
407 def pause(cls, pid, minor):
408 args = ["pause", "-p", pid, "-m", minor]
409 cls._pread(args)
411 @classmethod
412 def unpause(cls, pid, minor, _type=None, _file=None, mirror=None,
413 cbtlog=None):
414 args = ["unpause", "-p", pid, "-m", minor]
415 if mirror:
416 args.extend(["-2", mirror])
417 if _type and _file:
418 params = Tapdisk.Arg(_type, _file)
419 args += ["-a", str(params)]
420 if cbtlog:
421 args.extend(["-c", cbtlog])
422 cls._pread(args)
424 @classmethod
425 def shutdown(cls, pid):
426 # TODO: This should be a real tap-ctl command
427 os.kill(pid, signal.SIGTERM)
428 os.waitpid(pid, 0)
430 @classmethod
431 def stats(cls, pid, minor):
432 args = ["stats", "-p", pid, "-m", minor]
433 return cls._pread(args, quiet=True)
435 @classmethod
436 def major(cls):
437 args = ["major"]
438 major = cls._pread(args)
439 return int(major)
442class TapdiskExists(Exception):
443 """Tapdisk already running."""
445 def __init__(self, tapdisk):
446 self.tapdisk = tapdisk
448 def __str__(self):
449 return "%s already running" % self.tapdisk
452class TapdiskNotRunning(Exception):
453 """No such Tapdisk."""
455 def __init__(self, **attrs):
456 self.attrs = attrs
458 def __str__(self):
459 items = iter(self.attrs.items())
460 attrs = ", ".join("%s=%s" % attr
461 for attr in items)
462 return "No such Tapdisk(%s)" % attrs
465class TapdiskNotUnique(Exception):
466 """More than one tapdisk on one path."""
468 def __init__(self, tapdisks):
469 self.tapdisks = tapdisks
471 def __str__(self):
472 tapdisks = map(str, self.tapdisks)
473 return "Found multiple tapdisks: %s" % tapdisks
476class TapdiskFailed(Exception):
477 """Tapdisk launch failure."""
479 def __init__(self, arg, err):
480 self.arg = arg
481 self.err = err
483 def __str__(self):
484 return "Tapdisk(%s): %s" % (self.arg, self.err)
486 def get_error(self):
487 return self.err
490class TapdiskInvalidState(Exception):
491 """Tapdisk pause/unpause failure"""
493 def __init__(self, tapdisk):
494 self.tapdisk = tapdisk
496 def __str__(self):
497 return str(self.tapdisk)
500def mkdirs(path, mode=0o777):
501 if not os.path.exists(path):
502 parent, subdir = os.path.split(path)
503 assert parent != path
504 try:
505 if parent:
506 mkdirs(parent, mode)
507 if subdir:
508 os.mkdir(path, mode)
509 except OSError as e:
510 if e.errno != errno.EEXIST:
511 raise
514class KObject(object):
516 SYSFS_CLASSTYPE = None
518 def sysfs_devname(self):
519 raise NotImplementedError("sysfs_devname is undefined")
522class Attribute(object):
524 SYSFS_NODENAME = None
526 def __init__(self, path):
527 self.path = path
529 @classmethod
530 def from_kobject(cls, kobj):
531 path = "%s/%s" % (kobj.sysfs_path(), cls.SYSFS_NODENAME)
532 return cls(path)
534 class NoSuchAttribute(Exception):
535 def __init__(self, name):
536 self.name = name
538 def __str__(self):
539 return "No such attribute: %s" % self.name
541 def _open(self, mode='r'):
542 try:
543 return open(self.path, mode)
544 except IOError as e:
545 if e.errno == errno.ENOENT:
546 raise self.NoSuchAttribute(self)
547 raise
549 def readline(self):
550 f = self._open('r')
551 s = f.readline().rstrip()
552 f.close()
553 return s
555 def writeline(self, val):
556 f = self._open('w')
557 f.write(val)
558 f.close()
561class ClassDevice(KObject):
563 @classmethod
564 def sysfs_class_path(cls):
565 return "/sys/class/%s" % cls.SYSFS_CLASSTYPE
567 def sysfs_path(self):
568 return "%s/%s" % (self.sysfs_class_path(),
569 self.sysfs_devname())
572class Blktap(ClassDevice):
574 DEV_BASEDIR = '/dev/xen/blktap-2'
576 SYSFS_CLASSTYPE = "blktap2"
578 def __init__(self, minor):
579 self.minor = minor
580 self._pool = None
581 self._task = None
583 @classmethod
584 def allocate(cls):
585 # FIXME. Should rather go into init.
586 mkdirs(cls.DEV_BASEDIR)
588 devname = TapCtl.allocate()
589 minor = Tapdisk._parse_minor(devname)
590 return cls(minor)
592 def free(self):
593 TapCtl.free(self.minor)
595 def __str__(self):
596 return "%s(minor=%d)" % (self.__class__.__name__, self.minor)
598 def sysfs_devname(self):
599 return "blktap!blktap%d" % self.minor
601 class Pool(Attribute):
602 SYSFS_NODENAME = "pool"
604 def get_pool_attr(self):
605 if not self._pool:
606 self._pool = self.Pool.from_kobject(self)
607 return self._pool
609 def get_pool_name(self):
610 return self.get_pool_attr().readline()
612 def set_pool_name(self, name):
613 self.get_pool_attr().writeline(name)
615 def set_pool_size(self, pages):
616 self.get_pool().set_size(pages)
618 def get_pool(self):
619 return BlktapControl.get_pool(self.get_pool_name())
621 def set_pool(self, pool):
622 self.set_pool_name(pool.name)
624 class Task(Attribute):
625 SYSFS_NODENAME = "task"
627 def get_task_attr(self):
628 if not self._task:
629 self._task = self.Task.from_kobject(self)
630 return self._task
632 def get_task_pid(self):
633 pid = self.get_task_attr().readline()
634 try:
635 return int(pid)
636 except ValueError:
637 return None
639 def find_tapdisk(self):
640 pid = self.get_task_pid()
641 if pid is None:
642 return None
644 return Tapdisk.find(pid=pid, minor=self.minor)
646 def get_tapdisk(self):
647 tapdisk = self.find_tapdisk()
648 if not tapdisk:
649 raise TapdiskNotRunning(minor=self.minor)
650 return tapdisk
653class Tapdisk(object):
655 TYPES = ['aio', 'vhd']
657 def __init__(self, pid, minor, _type, path, state):
658 self.pid = pid
659 self.minor = minor
660 self.type = _type
661 self.path = path
662 self.state = state
663 self._dirty = False
664 self._blktap = None
666 def __str__(self):
667 state = self.pause_state()
668 return "Tapdisk(%s, pid=%d, minor=%s, state=%s)" % \
669 (self.get_arg(), self.pid, self.minor, state)
671 @classmethod
672 def list(cls, **args):
674 for row in TapCtl.list( ** args):
676 args = {'pid': None,
677 'minor': None,
678 'state': None,
679 '_type': None,
680 'path': None}
682 for key, val in row.items():
683 if key in args:
684 args[key] = val
686 if 'args' in row: 686 ↛ 691line 686 didn't jump to line 691, because the condition on line 686 was never false
687 image = Tapdisk.Arg.parse(row['args'])
688 args['_type'] = image.type
689 args['path'] = image.path
691 if None in args.values(): 691 ↛ 692line 691 didn't jump to line 692, because the condition on line 691 was never true
692 continue
694 yield Tapdisk( ** args)
696 @classmethod
697 def find(cls, **args):
699 found = list(cls.list( ** args))
701 if len(found) > 1:
702 raise TapdiskNotUnique(found)
704 if found:
705 return found[0]
707 return None
709 @classmethod
710 def find_by_path(cls, path):
711 return cls.find(path=path)
713 @classmethod
714 def find_by_minor(cls, minor):
715 return cls.find(minor=minor)
717 @classmethod
718 def get(cls, **attrs):
720 tapdisk = cls.find( ** attrs)
722 if not tapdisk:
723 raise TapdiskNotRunning( ** attrs)
725 return tapdisk
727 @classmethod
728 def from_path(cls, path):
729 return cls.get(path=path)
731 @classmethod
732 def from_minor(cls, minor):
733 return cls.get(minor=minor)
735 @classmethod
736 def __from_blktap(cls, blktap):
737 tapdisk = cls.from_minor(minor=blktap.minor)
738 tapdisk._blktap = blktap
739 return tapdisk
741 def get_blktap(self):
742 if not self._blktap:
743 self._blktap = Blktap(self.minor)
744 return self._blktap
746 class Arg:
748 def __init__(self, _type, path):
749 self.type = _type
750 self.path = path
752 def __str__(self):
753 return "%s:%s" % (self.type, self.path)
755 @classmethod
756 def parse(cls, arg):
758 try:
759 _type, path = arg.split(":", 1)
760 except ValueError:
761 raise cls.InvalidArgument(arg)
763 if _type not in Tapdisk.TYPES: 763 ↛ 764line 763 didn't jump to line 764, because the condition on line 763 was never true
764 raise cls.InvalidType(_type)
766 return cls(_type, path)
768 class InvalidType(Exception):
769 def __init__(self, _type):
770 self.type = _type
772 def __str__(self):
773 return "Not a Tapdisk type: %s" % self.type
775 class InvalidArgument(Exception):
776 def __init__(self, arg):
777 self.arg = arg
779 def __str__(self):
780 return "Not a Tapdisk image: %s" % self.arg
782 def get_arg(self):
783 return self.Arg(self.type, self.path)
785 def get_devpath(self):
786 return "%s/tapdev%d" % (Blktap.DEV_BASEDIR, self.minor)
788 @classmethod
789 def launch_from_arg(cls, arg):
790 arg = cls.Arg.parse(arg)
791 return cls.launch(arg.path, arg.type, False)
793 @classmethod
794 def cgclassify(cls, pid):
796 # We dont provide any <controllers>:<path>
797 # so cgclassify uses /etc/cgrules.conf which
798 # we have configured in the spec file.
799 cmd = ["cgclassify", str(pid)]
800 try:
801 util.pread2(cmd)
802 except util.CommandException as e:
803 util.logException(e)
805 @classmethod
806 def spawn(cls):
807 return TapCtl.spawn()
809 @classmethod
810 def launch_on_tap(cls, blktap, path, _type, options):
812 tapdisk = cls.find_by_path(path)
813 if tapdisk: 813 ↛ 814line 813 didn't jump to line 814, because the condition on line 813 was never true
814 raise TapdiskExists(tapdisk)
816 minor = blktap.minor
818 try:
819 pid = cls.spawn()
820 cls.cgclassify(pid)
821 try:
822 TapCtl.attach(pid, minor)
824 try:
825 retry_open = 0
826 while True:
827 try:
828 TapCtl.open(pid, minor, _type, path, options)
829 except TapCtl.CommandFailure as e:
830 err = (
831 'status' in e.info and e.info['status']
832 ) or None
833 if err in (errno.EIO, errno.EROFS, errno.EAGAIN):
834 if retry_open < 5:
835 retry_open += 1
836 time.sleep(1)
837 continue
838 if LINSTOR_AVAILABLE and err == errno.EROFS:
839 log_drbd_openers(path)
840 break
841 try:
842 tapdisk = cls.__from_blktap(blktap)
843 node = '/sys/dev/block/%d:%d' % (tapdisk.major(), tapdisk.minor)
844 util.set_scheduler_sysfs_node(node, ['none', 'noop'])
845 return tapdisk
846 except:
847 TapCtl.close(pid, minor)
848 raise
850 except:
851 TapCtl.detach(pid, minor)
852 raise
854 except:
855 try:
856 TapCtl.shutdown(pid)
857 except:
858 # Best effort to shutdown
859 pass
860 raise
862 except TapCtl.CommandFailure as ctl:
863 util.logException(ctl)
864 if ('/dev/xapi/cd/' in path and
865 'status' in ctl.info and
866 ctl.info['status'] == 123): # ENOMEDIUM (No medium found)
867 raise xs_errors.XenError('TapdiskDriveEmpty')
868 else:
869 raise TapdiskFailed(cls.Arg(_type, path), ctl)
871 @classmethod
872 def launch(cls, path, _type, rdonly):
873 blktap = Blktap.allocate()
874 try:
875 return cls.launch_on_tap(blktap, path, _type, {"rdonly": rdonly})
876 except:
877 blktap.free()
878 raise
880 def shutdown(self, force=False):
882 TapCtl.close(self.pid, self.minor, force)
884 TapCtl.detach(self.pid, self.minor)
886 self.get_blktap().free()
888 def pause(self):
890 if not self.is_running():
891 raise TapdiskInvalidState(self)
893 TapCtl.pause(self.pid, self.minor)
895 self._set_dirty()
897 def unpause(self, _type=None, path=None, mirror=None, cbtlog=None):
899 if not self.is_paused():
900 raise TapdiskInvalidState(self)
902 # FIXME: should the arguments be optional?
903 if _type is None:
904 _type = self.type
905 if path is None:
906 path = self.path
908 TapCtl.unpause(self.pid, self.minor, _type, path, mirror=mirror,
909 cbtlog=cbtlog)
911 self._set_dirty()
913 def stats(self):
914 return json.loads(TapCtl.stats(self.pid, self.minor))
915 #
916 # NB. dirty/refresh: reload attributes on next access
917 #
919 def _set_dirty(self):
920 self._dirty = True
922 def _refresh(self, __get):
923 t = self.from_minor(__get('minor'))
924 self.__init__(t.pid, t.minor, t.type, t.path, t.state)
926 def __getattribute__(self, name):
927 def __get(name):
928 # NB. avoid(rec(ursion)
929 return object.__getattribute__(self, name)
931 if __get('_dirty') and \ 931 ↛ 933line 931 didn't jump to line 933, because the condition on line 931 was never true
932 name in ['minor', 'type', 'path', 'state']:
933 self._refresh(__get)
934 self._dirty = False
936 return __get(name)
938 class PauseState:
939 RUNNING = 'R'
940 PAUSING = 'r'
941 PAUSED = 'P'
943 class Flags:
944 DEAD = 0x0001
945 CLOSED = 0x0002
946 QUIESCE_REQUESTED = 0x0004
947 QUIESCED = 0x0008
948 PAUSE_REQUESTED = 0x0010
949 PAUSED = 0x0020
950 SHUTDOWN_REQUESTED = 0x0040
951 LOCKING = 0x0080
952 RETRY_NEEDED = 0x0100
953 LOG_DROPPED = 0x0200
955 PAUSE_MASK = PAUSE_REQUESTED | PAUSED
957 def is_paused(self):
958 return not not (self.state & self.Flags.PAUSED)
960 def is_running(self):
961 return not (self.state & self.Flags.PAUSE_MASK)
963 def pause_state(self):
964 if self.state & self.Flags.PAUSED: 964 ↛ 965line 964 didn't jump to line 965, because the condition on line 964 was never true
965 return self.PauseState.PAUSED
967 if self.state & self.Flags.PAUSE_REQUESTED: 967 ↛ 968line 967 didn't jump to line 968, because the condition on line 967 was never true
968 return self.PauseState.PAUSING
970 return self.PauseState.RUNNING
972 @staticmethod
973 def _parse_minor(devpath):
974 regex = r'%s/(blktap|tapdev)(\d+)$' % Blktap.DEV_BASEDIR
975 pattern = re.compile(regex)
976 groups = pattern.search(devpath)
977 if not groups:
978 raise Exception("malformed tap device: '%s' (%s) " % (devpath, regex))
980 minor = groups.group(2)
981 return int(minor)
983 _major = None
985 @classmethod
986 def major(cls):
987 if cls._major:
988 return cls._major
990 devices = open("/proc/devices")
991 for line in devices:
993 row = line.rstrip().split(' ')
994 if len(row) != 2:
995 continue
997 major, name = row
998 if name != 'tapdev':
999 continue
1001 cls._major = int(major)
1002 break
1004 devices.close()
1005 return cls._major
1008class VDI(object):
1009 """SR.vdi driver decorator for blktap2"""
1011 CONF_KEY_ALLOW_CACHING = "vdi_allow_caching"
1012 CONF_KEY_MODE_ON_BOOT = "vdi_on_boot"
1013 CONF_KEY_CACHE_SR = "local_cache_sr"
1014 CONF_KEY_O_DIRECT = "o_direct"
1015 LOCK_CACHE_SETUP = "cachesetup"
1017 ATTACH_DETACH_RETRY_SECS = 120
1019 # number of seconds on top of NFS timeo mount option the tapdisk should
1020 # wait before reporting errors. This is to allow a retry to succeed in case
1021 # packets were lost the first time around, which prevented the NFS client
1022 # from returning before the timeo is reached even if the NFS server did
1023 # come back earlier
1024 TAPDISK_TIMEOUT_MARGIN = 30
1026 def __init__(self, uuid, target, driver_info):
1027 self.target = self.TargetDriver(target, driver_info)
1028 self._vdi_uuid = uuid
1029 self._session = target.session
1030 self.xenstore_data = scsiutil.update_XS_SCSIdata(uuid, scsiutil.gen_synthetic_page_data(uuid))
1031 self.__o_direct = None
1032 self.__o_direct_reason = None
1033 self.lock = Lock("vdi", uuid)
1034 self.tap = None
1036 def get_o_direct_capability(self, options):
1037 """Returns True/False based on licensing and caching_params"""
1038 if self.__o_direct is not None: 1038 ↛ 1039line 1038 didn't jump to line 1039, because the condition on line 1038 was never true
1039 return self.__o_direct, self.__o_direct_reason
1041 if util.read_caching_is_restricted(self._session): 1041 ↛ 1042line 1041 didn't jump to line 1042, because the condition on line 1041 was never true
1042 self.__o_direct = True
1043 self.__o_direct_reason = "LICENSE_RESTRICTION"
1044 elif not ((self.target.vdi.sr.handles("nfs") or self.target.vdi.sr.handles("ext") or self.target.vdi.sr.handles("smb"))): 1044 ↛ 1047line 1044 didn't jump to line 1047, because the condition on line 1044 was never false
1045 self.__o_direct = True
1046 self.__o_direct_reason = "SR_NOT_SUPPORTED"
1047 elif not (options.get("rdonly") or self.target.vdi.parent):
1048 util.SMlog(self.target.vdi)
1049 self.__o_direct = True
1050 self.__o_direct_reason = "NO_RO_IMAGE"
1051 elif options.get("rdonly") and not self.target.vdi.parent:
1052 self.__o_direct = True
1053 self.__o_direct_reason = "RO_WITH_NO_PARENT"
1054 elif options.get(self.CONF_KEY_O_DIRECT):
1055 self.__o_direct = True
1056 self.__o_direct_reason = "SR_OVERRIDE"
1058 if self.__o_direct is None: 1058 ↛ 1059line 1058 didn't jump to line 1059, because the condition on line 1058 was never true
1059 self.__o_direct = False
1060 self.__o_direct_reason = ""
1062 return self.__o_direct, self.__o_direct_reason
1064 @classmethod
1065 def from_cli(cls, uuid):
1066 import VDI as sm
1068 session = XenAPI.xapi_local()
1069 session.xenapi.login_with_password('root', '', '', 'SM')
1071 target = sm.VDI.from_uuid(session, uuid)
1072 driver_info = target.sr.srcmd.driver_info
1074 session.xenapi.session.logout()
1076 return cls(uuid, target, driver_info)
1078 @staticmethod
1079 def _tap_type(vdi_type):
1080 """Map a VDI type (e.g. 'raw') to a tapdisk driver type (e.g. 'aio')"""
1081 return {
1082 'raw': 'aio',
1083 'vhd': 'vhd',
1084 'iso': 'aio', # for ISO SR
1085 'aio': 'aio', # for LVHD
1086 'file': 'aio',
1087 'phy': 'aio'
1088 }[vdi_type]
1090 def get_tap_type(self):
1091 vdi_type = self.target.get_vdi_type()
1092 return VDI._tap_type(vdi_type)
1094 def get_phy_path(self):
1095 return self.target.get_vdi_path()
1097 class UnexpectedVDIType(Exception):
1099 def __init__(self, vdi_type, target):
1100 self.vdi_type = vdi_type
1101 self.target = target
1103 def __str__(self):
1104 return \
1105 "Target %s has unexpected VDI type '%s'" % \
1106 (type(self.target), self.vdi_type)
1108 VDI_PLUG_TYPE = {'phy': 'phy', # for NETAPP
1109 'raw': 'phy',
1110 'aio': 'tap', # for LVHD raw nodes
1111 'iso': 'tap', # for ISOSR
1112 'file': 'tap',
1113 'vhd': 'tap'}
1115 def tap_wanted(self):
1116 # 1. Let the target vdi_type decide
1118 vdi_type = self.target.get_vdi_type()
1120 try:
1121 plug_type = self.VDI_PLUG_TYPE[vdi_type]
1122 except KeyError:
1123 raise self.UnexpectedVDIType(vdi_type,
1124 self.target.vdi)
1126 if plug_type == 'tap': 1126 ↛ 1127line 1126 didn't jump to line 1127, because the condition on line 1126 was never true
1127 return True
1128 elif self.target.vdi.sr.handles('udev'): 1128 ↛ 1134line 1128 didn't jump to line 1134, because the condition on line 1128 was never false
1129 return True
1130 # 2. Otherwise, there may be more reasons
1131 #
1132 # .. TBD
1134 return False
1136 class TargetDriver:
1137 """Safe target driver access."""
1138 # NB. *Must* test caps for optional calls. Some targets
1139 # actually implement some slots, but do not enable them. Just
1140 # try/except would risk breaking compatibility.
1142 def __init__(self, vdi, driver_info):
1143 self.vdi = vdi
1144 self._caps = driver_info['capabilities']
1146 def has_cap(self, cap):
1147 """Determine if target has given capability"""
1148 return cap in self._caps
1150 def attach(self, sr_uuid, vdi_uuid):
1151 #assert self.has_cap("VDI_ATTACH")
1152 return self.vdi.attach(sr_uuid, vdi_uuid)
1154 def detach(self, sr_uuid, vdi_uuid):
1155 #assert self.has_cap("VDI_DETACH")
1156 self.vdi.detach(sr_uuid, vdi_uuid)
1158 def activate(self, sr_uuid, vdi_uuid):
1159 if self.has_cap("VDI_ACTIVATE"):
1160 return self.vdi.activate(sr_uuid, vdi_uuid)
1162 def deactivate(self, sr_uuid, vdi_uuid):
1163 if self.has_cap("VDI_DEACTIVATE"):
1164 self.vdi.deactivate(sr_uuid, vdi_uuid)
1165 #def resize(self, sr_uuid, vdi_uuid, size):
1166 # return self.vdi.resize(sr_uuid, vdi_uuid, size)
1168 def get_vdi_type(self):
1169 _type = self.vdi.vdi_type
1170 if not _type:
1171 _type = self.vdi.sr.sr_vditype
1172 if not _type:
1173 raise VDI.UnexpectedVDIType(_type, self.vdi)
1174 return _type
1176 def get_vdi_path(self):
1177 return self.vdi.path
1179 class Link(object):
1180 """Relink a node under a common name"""
1181 # NB. We have to provide the device node path during
1182 # VDI.attach, but currently do not allocate the tapdisk minor
1183 # before VDI.activate. Therefore those link steps where we
1184 # relink existing devices under deterministic path names.
1186 BASEDIR = None
1188 def _mklink(self, target):
1189 raise NotImplementedError("_mklink is not defined")
1191 def _equals(self, target):
1192 raise NotImplementedError("_equals is not defined")
1194 def __init__(self, path):
1195 self._path = path
1197 @classmethod
1198 def from_name(cls, name):
1199 path = "%s/%s" % (cls.BASEDIR, name)
1200 return cls(path)
1202 @classmethod
1203 def from_uuid(cls, sr_uuid, vdi_uuid):
1204 name = "%s/%s" % (sr_uuid, vdi_uuid)
1205 return cls.from_name(name)
1207 def path(self):
1208 return self._path
1210 def stat(self):
1211 return os.stat(self.path())
1213 def mklink(self, target):
1215 path = self.path()
1216 util.SMlog("%s -> %s" % (self, target))
1218 mkdirs(os.path.dirname(path))
1219 try:
1220 self._mklink(target)
1221 except OSError as e:
1222 # We do unlink during teardown, but have to stay
1223 # idempotent. However, a *wrong* target should never
1224 # be seen.
1225 if e.errno != errno.EEXIST:
1226 raise
1227 assert self._equals(target), "'%s' not equal to '%s'" % (path, target)
1229 def unlink(self):
1230 try:
1231 os.unlink(self.path())
1232 except OSError as e:
1233 if e.errno != errno.ENOENT:
1234 raise
1236 def __str__(self):
1237 path = self.path()
1238 return "%s(%s)" % (self.__class__.__name__, path)
1240 class SymLink(Link):
1241 """Symlink some file to a common name"""
1243 def readlink(self):
1244 return os.readlink(self.path())
1246 def symlink(self):
1247 return self.path()
1249 def _mklink(self, target):
1250 os.symlink(target, self.path())
1252 def _equals(self, target):
1253 return self.readlink() == target
1255 class DeviceNode(Link):
1256 """Relink a block device node to a common name"""
1258 @classmethod
1259 def _real_stat(cls, target):
1260 """stat() not on @target, but its realpath()"""
1261 _target = os.path.realpath(target)
1262 return os.stat(_target)
1264 @classmethod
1265 def is_block(cls, target):
1266 """Whether @target refers to a block device."""
1267 return S_ISBLK(cls._real_stat(target).st_mode)
1269 def _mklink(self, target):
1271 st = self._real_stat(target)
1272 if not S_ISBLK(st.st_mode):
1273 raise self.NotABlockDevice(target, st)
1275 # set group read for disk group as well as root
1276 os.mknod(self.path(), st.st_mode | stat.S_IRGRP, st.st_rdev)
1277 os.chown(self.path(), st.st_uid, grp.getgrnam("disk").gr_gid)
1279 def _equals(self, target):
1280 target_rdev = self._real_stat(target).st_rdev
1281 return self.stat().st_rdev == target_rdev
1283 def rdev(self):
1284 st = self.stat()
1285 assert S_ISBLK(st.st_mode)
1286 return os.major(st.st_rdev), os.minor(st.st_rdev)
1288 class NotABlockDevice(Exception):
1290 def __init__(self, path, st):
1291 self.path = path
1292 self.st = st
1294 def __str__(self):
1295 return "%s is not a block device: %s" % (self.path, self.st)
1297 class Hybrid(Link):
1299 def __init__(self, path):
1300 VDI.Link.__init__(self, path)
1301 self._devnode = VDI.DeviceNode(path)
1302 self._symlink = VDI.SymLink(path)
1304 def rdev(self):
1305 st = self.stat()
1306 if S_ISBLK(st.st_mode):
1307 return self._devnode.rdev()
1308 raise self._devnode.NotABlockDevice(self.path(), st)
1310 def mklink(self, target):
1311 if self._devnode.is_block(target):
1312 self._obj = self._devnode
1313 else:
1314 self._obj = self._symlink
1315 self._obj.mklink(target)
1317 def _equals(self, target):
1318 return self._obj._equals(target)
1320 class PhyLink(SymLink):
1321 BASEDIR = "/dev/sm/phy"
1322 # NB. Cannot use DeviceNodes, e.g. FileVDIs aren't bdevs.
1324 class NBDLink(SymLink):
1326 BASEDIR = "/run/blktap-control/nbd"
1328 class BackendLink(Hybrid):
1329 BASEDIR = "/dev/sm/backend"
1330 # NB. Could be SymLinks as well, but saving major,minor pairs in
1331 # Links enables neat state capturing when managing Tapdisks. Note
1332 # that we essentially have a tap-ctl list replacement here. For
1333 # now make it a 'Hybrid'. Likely to collapse into a DeviceNode as
1334 # soon as ISOs are tapdisks.
1336 @staticmethod
1337 def _tap_activate(phy_path, vdi_type, sr_uuid, options, pool_size=None):
1339 tapdisk = Tapdisk.find_by_path(phy_path)
1340 if not tapdisk: 1340 ↛ 1341line 1340 didn't jump to line 1341, because the condition on line 1340 was never true
1341 blktap = Blktap.allocate()
1342 blktap.set_pool_name(sr_uuid)
1343 if pool_size:
1344 blktap.set_pool_size(pool_size)
1346 try:
1347 tapdisk = \
1348 Tapdisk.launch_on_tap(blktap,
1349 phy_path,
1350 VDI._tap_type(vdi_type),
1351 options)
1352 except:
1353 blktap.free()
1354 raise
1355 util.SMlog("tap.activate: Launched %s" % tapdisk)
1357 else:
1358 util.SMlog("tap.activate: Found %s" % tapdisk)
1360 return tapdisk.get_devpath(), tapdisk
1362 @staticmethod
1363 def _tap_deactivate(minor):
1365 try:
1366 tapdisk = Tapdisk.from_minor(minor)
1367 except TapdiskNotRunning as e:
1368 util.SMlog("tap.deactivate: Warning, %s" % e)
1369 # NB. Should not be here unless the agent refcount
1370 # broke. Also, a clean shutdown should not have leaked
1371 # the recorded minor.
1372 else:
1373 tapdisk.shutdown()
1374 util.SMlog("tap.deactivate: Shut down %s" % tapdisk)
1376 @classmethod
1377 def tap_pause(cls, session, sr_uuid, vdi_uuid, failfast=False):
1378 """
1379 Pauses the tapdisk.
1381 session: a XAPI session
1382 sr_uuid: the UUID of the SR on which VDI lives
1383 vdi_uuid: the UUID of the VDI to pause
1384 failfast: controls whether the VDI lock should be acquired in a
1385 non-blocking manner
1386 """
1387 util.SMlog("Pause request for %s" % vdi_uuid)
1388 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1389 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'paused', 'true')
1390 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1391 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1391 ↛ 1392line 1391 didn't jump to line 1392, because the loop on line 1391 never started
1392 host_ref = key[len('host_'):]
1393 util.SMlog("Calling tap-pause on host %s" % host_ref)
1394 if not cls.call_pluginhandler(session, host_ref,
1395 sr_uuid, vdi_uuid, "pause", failfast=failfast):
1396 # Failed to pause node
1397 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1398 return False
1399 return True
1401 @classmethod
1402 def tap_unpause(cls, session, sr_uuid, vdi_uuid, secondary=None,
1403 activate_parents=False):
1404 util.SMlog("Unpause request for %s secondary=%s" % (vdi_uuid, secondary))
1405 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1406 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1407 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1407 ↛ 1408line 1407 didn't jump to line 1408, because the loop on line 1407 never started
1408 host_ref = key[len('host_'):]
1409 util.SMlog("Calling tap-unpause on host %s" % host_ref)
1410 if not cls.call_pluginhandler(session, host_ref,
1411 sr_uuid, vdi_uuid, "unpause", secondary, activate_parents):
1412 # Failed to unpause node
1413 return False
1414 session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1415 return True
1417 @classmethod
1418 def tap_refresh(cls, session, sr_uuid, vdi_uuid, activate_parents=False):
1419 util.SMlog("Refresh request for %s" % vdi_uuid)
1420 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1421 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1422 for key in [x for x in sm_config.keys() if x.startswith('host_')]:
1423 host_ref = key[len('host_'):]
1424 util.SMlog("Calling tap-refresh on host %s" % host_ref)
1425 if not cls.call_pluginhandler(session, host_ref,
1426 sr_uuid, vdi_uuid, "refresh", None,
1427 activate_parents=activate_parents):
1428 # Failed to refresh node
1429 return False
1430 return True
1432 @classmethod
1433 def tap_status(cls, session, vdi_uuid):
1434 """Return True if disk is attached, false if it isn't"""
1435 util.SMlog("Disk status request for %s" % vdi_uuid)
1436 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1437 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1438 for key in [x for x in sm_config.keys() if x.startswith('host_')]: 1438 ↛ 1439line 1438 didn't jump to line 1439, because the loop on line 1438 never started
1439 return True
1440 return False
1442 @classmethod
1443 def call_pluginhandler(cls, session, host_ref, sr_uuid, vdi_uuid, action,
1444 secondary=None, activate_parents=False, failfast=False):
1445 """Optionally, activate the parent LV before unpausing"""
1446 try:
1447 args = {"sr_uuid": sr_uuid, "vdi_uuid": vdi_uuid,
1448 "failfast": str(failfast)}
1449 if secondary:
1450 args["secondary"] = secondary
1451 if activate_parents:
1452 args["activate_parents"] = "true"
1453 ret = session.xenapi.host.call_plugin(
1454 host_ref, PLUGIN_TAP_PAUSE, action,
1455 args)
1456 return ret == "True"
1457 except Exception as e:
1458 util.logException("BLKTAP2:call_pluginhandler %s" % e)
1459 return False
1461 def _add_tag(self, vdi_uuid, writable):
1462 util.SMlog("Adding tag to: %s" % vdi_uuid)
1463 attach_mode = "RO"
1464 if writable: 1464 ↛ 1466line 1464 didn't jump to line 1466, because the condition on line 1464 was never false
1465 attach_mode = "RW"
1466 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1467 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1468 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1469 attached_as = util.attached_as(sm_config)
1470 if NO_MULTIPLE_ATTACH and (attached_as == "RW" or \ 1470 ↛ 1472line 1470 didn't jump to line 1472, because the condition on line 1470 was never true
1471 (attached_as == "RO" and attach_mode == "RW")):
1472 util.SMlog("need to reset VDI %s" % vdi_uuid)
1473 if not resetvdis.reset_vdi(self._session, vdi_uuid, force=False,
1474 term_output=False, writable=writable):
1475 raise util.SMException("VDI %s not detached cleanly" % vdi_uuid)
1476 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1477 if 'relinking' in sm_config:
1478 util.SMlog("Relinking key found, back-off and retry" % sm_config)
1479 return False
1480 if 'paused' in sm_config:
1481 util.SMlog("Paused or host_ref key found [%s]" % sm_config)
1482 return False
1483 self._session.xenapi.VDI.add_to_sm_config(
1484 vdi_ref, 'activating', 'True')
1485 host_key = "host_%s" % host_ref
1486 assert host_key not in sm_config
1487 self._session.xenapi.VDI.add_to_sm_config(vdi_ref, host_key,
1488 attach_mode)
1489 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1490 if 'paused' in sm_config or 'relinking' in sm_config:
1491 util.SMlog("Found %s key, aborting" % (
1492 'paused' if 'paused' in sm_config else 'relinking'))
1493 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1494 self._session.xenapi.VDI.remove_from_sm_config(
1495 vdi_ref, 'activating')
1496 return False
1497 util.SMlog("Activate lock succeeded")
1498 return True
1500 def _check_tag(self, vdi_uuid):
1501 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1502 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1503 if 'paused' in sm_config:
1504 util.SMlog("Paused key found [%s]" % sm_config)
1505 return False
1506 return True
1508 def _remove_tag(self, vdi_uuid):
1509 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1510 host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
1511 sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
1512 host_key = "host_%s" % host_ref
1513 if host_key in sm_config:
1514 self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
1515 util.SMlog("Removed host key %s for %s" % (host_key, vdi_uuid))
1516 else:
1517 util.SMlog("_remove_tag: host key %s not found, ignore" % host_key)
1519 def _get_pool_config(self, pool_name):
1520 pool_info = dict()
1521 vdi_ref = self.target.vdi.sr.srcmd.params.get('vdi_ref')
1522 if not vdi_ref: 1522 ↛ 1525line 1522 didn't jump to line 1525, because the condition on line 1522 was never true
1523 # attach_from_config context: HA disks don't need to be in any
1524 # special pool
1525 return pool_info
1527 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1528 sr_config = self._session.xenapi.SR.get_other_config(sr_ref)
1529 vdi_config = self._session.xenapi.VDI.get_other_config(vdi_ref)
1530 pool_size_str = sr_config.get(POOL_SIZE_KEY)
1531 pool_name_override = vdi_config.get(POOL_NAME_KEY)
1532 if pool_name_override: 1532 ↛ 1537line 1532 didn't jump to line 1537, because the condition on line 1532 was never false
1533 pool_name = pool_name_override
1534 pool_size_override = vdi_config.get(POOL_SIZE_KEY)
1535 if pool_size_override: 1535 ↛ 1537line 1535 didn't jump to line 1537, because the condition on line 1535 was never false
1536 pool_size_str = pool_size_override
1537 pool_size = 0
1538 if pool_size_str: 1538 ↛ 1548line 1538 didn't jump to line 1548, because the condition on line 1538 was never false
1539 try:
1540 pool_size = int(pool_size_str)
1541 if pool_size < 1 or pool_size > MAX_FULL_RINGS: 1541 ↛ 1542line 1541 didn't jump to line 1542, because the condition on line 1541 was never true
1542 raise ValueError("outside of range")
1543 pool_size = NUM_PAGES_PER_RING * pool_size
1544 except ValueError:
1545 util.SMlog("Error: invalid mem-pool-size %s" % pool_size_str)
1546 pool_size = 0
1548 pool_info["mem-pool"] = pool_name
1549 if pool_size: 1549 ↛ 1552line 1549 didn't jump to line 1552, because the condition on line 1549 was never false
1550 pool_info["mem-pool-size"] = str(pool_size)
1552 return pool_info
1554 def linkNBD(self, sr_uuid, vdi_uuid):
1555 if self.tap:
1556 nbd_path = '/run/blktap-control/nbd%d.%d' % (int(self.tap.pid),
1557 int(self.tap.minor))
1558 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).mklink(nbd_path)
1560 def attach(self, sr_uuid, vdi_uuid, writable, activate=False, caching_params={}):
1561 """Return/dev/sm/backend symlink path"""
1562 self.xenstore_data.update(self._get_pool_config(sr_uuid))
1563 if not self.target.has_cap("ATOMIC_PAUSE") or activate:
1564 util.SMlog("Attach & activate")
1565 self._attach(sr_uuid, vdi_uuid)
1566 dev_path = self._activate(sr_uuid, vdi_uuid,
1567 {"rdonly": not writable})
1568 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1569 self.linkNBD(sr_uuid, vdi_uuid)
1571 # Return backend/ link
1572 back_path = self.BackendLink.from_uuid(sr_uuid, vdi_uuid).path()
1573 if self.tap_wanted():
1574 # Only have NBD if we also have a tap
1575 nbd_path = "nbd:unix:{}:exportname={}".format(
1576 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).path(),
1577 vdi_uuid)
1578 else:
1579 nbd_path = ""
1581 options = {"rdonly": not writable}
1582 options.update(caching_params)
1583 o_direct, o_direct_reason = self.get_o_direct_capability(options)
1584 struct = {'params': back_path,
1585 'params_nbd': nbd_path,
1586 'o_direct': o_direct,
1587 'o_direct_reason': o_direct_reason,
1588 'xenstore_data': self.xenstore_data}
1589 util.SMlog('result: %s' % struct)
1591 try:
1592 f = open("%s.attach_info" % back_path, 'a')
1593 f.write(xmlrpc.client.dumps((struct, ), "", True))
1594 f.close()
1595 except:
1596 pass
1598 return xmlrpc.client.dumps((struct, ), "", True)
1600 def activate(self, sr_uuid, vdi_uuid, writable, caching_params):
1601 util.SMlog("blktap2.activate")
1602 options = {"rdonly": not writable}
1603 options.update(caching_params)
1605 sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
1606 sr_other_config = self._session.xenapi.SR.get_other_config(sr_ref)
1607 timeout = nfs.get_nfs_timeout(sr_other_config)
1608 if timeout: 1608 ↛ 1612line 1608 didn't jump to line 1612, because the condition on line 1608 was never false
1609 # Note NFS timeout values are in deciseconds
1610 timeout = int((timeout + 5) / 10)
1611 options["timeout"] = timeout + self.TAPDISK_TIMEOUT_MARGIN
1612 for i in range(self.ATTACH_DETACH_RETRY_SECS): 1612 ↛ 1619line 1612 didn't jump to line 1619, because the loop on line 1612 didn't complete
1613 try:
1614 if self._activate_locked(sr_uuid, vdi_uuid, options):
1615 return
1616 except util.SRBusyException:
1617 util.SMlog("SR locked, retrying")
1618 time.sleep(1)
1619 raise util.SMException("VDI %s locked" % vdi_uuid)
1621 @locking("VDIUnavailable")
1622 def _activate_locked(self, sr_uuid, vdi_uuid, options):
1623 """Wraps target.activate and adds a tapdisk"""
1625 #util.SMlog("VDI.activate %s" % vdi_uuid)
1626 refresh = False
1627 if self.tap_wanted(): 1627 ↛ 1632line 1627 didn't jump to line 1632, because the condition on line 1627 was never false
1628 if not self._add_tag(vdi_uuid, not options["rdonly"]):
1629 return False
1630 refresh = True
1632 try:
1633 if refresh: 1633 ↛ 1644line 1633 didn't jump to line 1644, because the condition on line 1633 was never false
1634 # it is possible that while the VDI was paused some of its
1635 # attributes have changed (e.g. its size if it was inflated; or its
1636 # path if it was leaf-coalesced onto a raw LV), so refresh the
1637 # object completely
1638 params = self.target.vdi.sr.srcmd.params
1639 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1640 target.sr.srcmd.params = params
1641 driver_info = target.sr.srcmd.driver_info
1642 self.target = self.TargetDriver(target, driver_info)
1644 util.fistpoint.activate_custom_fn( 1644 ↛ exitline 1644 didn't jump to the function exit
1645 "blktap_activate_inject_failure",
1646 lambda: util.inject_failure())
1648 # Attach the physical node
1649 if self.target.has_cap("ATOMIC_PAUSE"): 1649 ↛ 1652line 1649 didn't jump to line 1652, because the condition on line 1649 was never false
1650 self._attach(sr_uuid, vdi_uuid)
1652 vdi_type = self.target.get_vdi_type()
1654 # Take lvchange-p Lock before running
1655 # tap-ctl open
1656 # Needed to avoid race with lvchange -p which is
1657 # now taking the same lock
1658 # This is a fix for CA-155766
1659 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1659 ↛ 1662line 1659 didn't jump to line 1662, because the condition on line 1659 was never true
1660 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1661 vdi_type == vhdutil.VDI_TYPE_VHD:
1662 lock = Lock("lvchange-p", lvhdutil.NS_PREFIX_LVM + sr_uuid)
1663 lock.acquire()
1665 # When we attach a static VDI for HA, we cannot communicate with
1666 # xapi, because has not started yet. These VDIs are raw.
1667 if vdi_type != vhdutil.VDI_TYPE_RAW: 1667 ↛ 1678line 1667 didn't jump to line 1678, because the condition on line 1667 was never false
1668 session = self.target.vdi.session
1669 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1670 # pylint: disable=used-before-assignment
1671 sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
1672 if 'key_hash' in sm_config: 1672 ↛ 1673line 1672 didn't jump to line 1673, because the condition on line 1672 was never true
1673 key_hash = sm_config['key_hash']
1674 options['key_hash'] = key_hash
1675 options['vdi_uuid'] = vdi_uuid
1676 util.SMlog('Using key with hash {} for VDI {}'.format(key_hash, vdi_uuid))
1677 # Activate the physical node
1678 dev_path = self._activate(sr_uuid, vdi_uuid, options)
1680 if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \ 1680 ↛ 1683line 1680 didn't jump to line 1683, because the condition on line 1680 was never true
1681 self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
1682 self.target.get_vdi_type() == vhdutil.VDI_TYPE_VHD:
1683 lock.release()
1684 except:
1685 util.SMlog("Exception in activate/attach")
1686 if self.tap_wanted():
1687 util.fistpoint.activate_custom_fn(
1688 "blktap_activate_error_handling",
1689 lambda: time.sleep(30))
1690 while True:
1691 try:
1692 self._remove_tag(vdi_uuid)
1693 break
1694 except xmlrpc.client.ProtocolError as e:
1695 # If there's a connection error, keep trying forever.
1696 if e.errcode == http.HTTPStatus.INTERNAL_SERVER_ERROR.value:
1697 continue
1698 else:
1699 util.SMlog('failed to remove tag: %s' % e)
1700 break
1701 except Exception as e:
1702 util.SMlog('failed to remove tag: %s' % e)
1703 break
1704 raise
1705 finally:
1706 vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
1707 self._session.xenapi.VDI.remove_from_sm_config(
1708 vdi_ref, 'activating')
1709 util.SMlog("Removed activating flag from %s" % vdi_uuid) 1709 ↛ exitline 1709 didn't except from function '_activate_locked', because the raise on line 1704 wasn't executed
1711 # Link result to backend/
1712 self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
1713 self.linkNBD(sr_uuid, vdi_uuid)
1714 return True
1716 def _activate(self, sr_uuid, vdi_uuid, options):
1717 vdi_options = self.target.activate(sr_uuid, vdi_uuid)
1719 dev_path = self.setup_cache(sr_uuid, vdi_uuid, options)
1720 if not dev_path: 1720 ↛ 1734line 1720 didn't jump to line 1734, because the condition on line 1720 was never false
1721 phy_path = self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink()
1722 # Maybe launch a tapdisk on the physical link
1723 if self.tap_wanted(): 1723 ↛ 1732line 1723 didn't jump to line 1732, because the condition on line 1723 was never false
1724 vdi_type = self.target.get_vdi_type()
1725 options["o_direct"] = self.get_o_direct_capability(options)[0]
1726 if vdi_options: 1726 ↛ 1728line 1726 didn't jump to line 1728, because the condition on line 1726 was never false
1727 options.update(vdi_options)
1728 dev_path, self.tap = self._tap_activate(phy_path, vdi_type,
1729 sr_uuid, options,
1730 self._get_pool_config(sr_uuid).get("mem-pool-size"))
1731 else:
1732 dev_path = phy_path # Just reuse phy
1734 return dev_path
1736 def _attach(self, sr_uuid, vdi_uuid):
1737 attach_info = xmlrpc.client.loads(self.target.attach(sr_uuid, vdi_uuid))[0][0]
1738 params = attach_info['params']
1739 xenstore_data = attach_info['xenstore_data']
1740 phy_path = util.to_plain_string(params)
1741 self.xenstore_data.update(xenstore_data)
1742 # Save it to phy/
1743 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(phy_path)
1745 def deactivate(self, sr_uuid, vdi_uuid, caching_params):
1746 util.SMlog("blktap2.deactivate")
1747 for i in range(self.ATTACH_DETACH_RETRY_SECS):
1748 try:
1749 if self._deactivate_locked(sr_uuid, vdi_uuid, caching_params):
1750 return
1751 except util.SRBusyException as e:
1752 util.SMlog("SR locked, retrying")
1753 time.sleep(1)
1754 raise util.SMException("VDI %s locked" % vdi_uuid)
1756 @locking("VDIUnavailable")
1757 def _deactivate_locked(self, sr_uuid, vdi_uuid, caching_params):
1758 """Wraps target.deactivate and removes a tapdisk"""
1760 #util.SMlog("VDI.deactivate %s" % vdi_uuid)
1761 if self.tap_wanted() and not self._check_tag(vdi_uuid):
1762 return False
1764 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1765 if self.target.has_cap("ATOMIC_PAUSE"):
1766 self._detach(sr_uuid, vdi_uuid)
1767 if self.tap_wanted():
1768 self._remove_tag(vdi_uuid)
1770 return True
1772 def _resetPhylink(self, sr_uuid, vdi_uuid, path):
1773 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(path)
1775 def detach(self, sr_uuid, vdi_uuid, deactivate=False, caching_params={}):
1776 if not self.target.has_cap("ATOMIC_PAUSE") or deactivate:
1777 util.SMlog("Deactivate & detach")
1778 self._deactivate(sr_uuid, vdi_uuid, caching_params)
1779 self._detach(sr_uuid, vdi_uuid)
1780 else:
1781 pass # nothing to do
1783 def _deactivate(self, sr_uuid, vdi_uuid, caching_params):
1784 import VDI as sm
1786 # Shutdown tapdisk
1787 back_link = self.BackendLink.from_uuid(sr_uuid, vdi_uuid)
1789 if not util.pathexists(back_link.path()):
1790 util.SMlog("Backend path %s does not exist" % back_link.path())
1791 return
1793 try:
1794 attach_info_path = "%s.attach_info" % (back_link.path())
1795 os.unlink(attach_info_path)
1796 except:
1797 util.SMlog("unlink of attach_info failed")
1799 try:
1800 major, minor = back_link.rdev()
1801 except self.DeviceNode.NotABlockDevice:
1802 pass
1803 else:
1804 if major == Tapdisk.major():
1805 self._tap_deactivate(minor)
1806 self.remove_cache(sr_uuid, vdi_uuid, caching_params)
1808 # Remove the backend link
1809 back_link.unlink()
1810 VDI.NBDLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1812 # Deactivate & detach the physical node
1813 if self.tap_wanted() and self.target.vdi.session is not None:
1814 # it is possible that while the VDI was paused some of its
1815 # attributes have changed (e.g. its size if it was inflated; or its
1816 # path if it was leaf-coalesced onto a raw LV), so refresh the
1817 # object completely
1818 target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
1819 driver_info = target.sr.srcmd.driver_info
1820 self.target = self.TargetDriver(target, driver_info)
1822 self.target.deactivate(sr_uuid, vdi_uuid)
1824 def _detach(self, sr_uuid, vdi_uuid):
1825 self.target.detach(sr_uuid, vdi_uuid)
1827 # Remove phy/
1828 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).unlink()
1830 def _updateCacheRecord(self, session, vdi_uuid, on_boot, caching):
1831 # Remove existing VDI.sm_config fields
1832 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1833 for key in ["on_boot", "caching"]:
1834 session.xenapi.VDI.remove_from_sm_config(vdi_ref, key)
1835 if not on_boot is None:
1836 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'on_boot', on_boot)
1837 if not caching is None:
1838 session.xenapi.VDI.add_to_sm_config(vdi_ref, 'caching', caching)
1840 def setup_cache(self, sr_uuid, vdi_uuid, params):
1841 if params.get(self.CONF_KEY_ALLOW_CACHING) != "true": 1841 ↛ 1844line 1841 didn't jump to line 1844, because the condition on line 1841 was never false
1842 return
1844 util.SMlog("Requested local caching")
1845 if not self.target.has_cap("SR_CACHING"):
1846 util.SMlog("Error: local caching not supported by this SR")
1847 return
1849 scratch_mode = False
1850 if params.get(self.CONF_KEY_MODE_ON_BOOT) == "reset":
1851 scratch_mode = True
1852 util.SMlog("Requested scratch mode")
1853 if not self.target.has_cap("VDI_RESET_ON_BOOT/2"):
1854 util.SMlog("Error: scratch mode not supported by this SR")
1855 return
1857 dev_path = None
1858 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
1859 if not local_sr_uuid:
1860 util.SMlog("ERROR: Local cache SR not specified, not enabling")
1861 return
1862 dev_path = self._setup_cache(self._session, sr_uuid, vdi_uuid,
1863 local_sr_uuid, scratch_mode, params)
1865 if dev_path:
1866 self._updateCacheRecord(self._session, self.target.vdi.uuid,
1867 params.get(self.CONF_KEY_MODE_ON_BOOT),
1868 params.get(self.CONF_KEY_ALLOW_CACHING))
1870 return dev_path
1872 def alert_no_cache(self, session, vdi_uuid, cache_sr_uuid, err):
1873 vm_uuid = None
1874 vm_label = ""
1875 try:
1876 cache_sr_ref = session.xenapi.SR.get_by_uuid(cache_sr_uuid)
1877 cache_sr_rec = session.xenapi.SR.get_record(cache_sr_ref)
1878 cache_sr_label = cache_sr_rec.get("name_label")
1880 host_ref = session.xenapi.host.get_by_uuid(util.get_this_host())
1881 host_rec = session.xenapi.host.get_record(host_ref)
1882 host_label = host_rec.get("name_label")
1884 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
1885 vbds = session.xenapi.VBD.get_all_records_where( \
1886 "field \"VDI\" = \"%s\"" % vdi_ref)
1887 for vbd_rec in vbds.values():
1888 vm_ref = vbd_rec.get("VM")
1889 vm_rec = session.xenapi.VM.get_record(vm_ref)
1890 vm_uuid = vm_rec.get("uuid")
1891 vm_label = vm_rec.get("name_label")
1892 except:
1893 util.logException("alert_no_cache")
1895 alert_obj = "SR"
1896 alert_uuid = str(cache_sr_uuid)
1897 alert_str = "No space left in Local Cache SR %s" % cache_sr_uuid
1898 if vm_uuid:
1899 alert_obj = "VM"
1900 alert_uuid = vm_uuid
1901 reason = ""
1902 if err == errno.ENOSPC:
1903 reason = "because there is no space left"
1904 alert_str = "The VM \"%s\" is not using IntelliCache %s on the Local Cache SR (\"%s\") on host \"%s\"" % \
1905 (vm_label, reason, cache_sr_label, host_label)
1907 util.SMlog("Creating alert: (%s, %s, \"%s\")" % \
1908 (alert_obj, alert_uuid, alert_str))
1909 session.xenapi.message.create("No space left in local cache", "3",
1910 alert_obj, alert_uuid, alert_str)
1912 def _setup_cache(self, session, sr_uuid, vdi_uuid, local_sr_uuid,
1913 scratch_mode, options):
1914 import SR
1915 import EXTSR
1916 import NFSSR
1917 from lock import Lock
1918 from FileSR import FileVDI
1920 parent_uuid = vhdutil.getParent(self.target.vdi.path,
1921 FileVDI.extractUuid)
1922 if not parent_uuid:
1923 util.SMlog("ERROR: VDI %s has no parent, not enabling" % \
1924 self.target.vdi.uuid)
1925 return
1927 util.SMlog("Setting up cache")
1928 parent_uuid = parent_uuid.strip()
1929 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
1931 if shared_target.parent:
1932 util.SMlog("ERROR: Parent VDI %s has parent, not enabling" %
1933 shared_target.uuid)
1934 return
1936 SR.registerSR(EXTSR.EXTSR)
1937 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
1939 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
1940 lock.acquire()
1942 # read cache
1943 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
1944 if util.pathexists(read_cache_path):
1945 util.SMlog("Read cache node (%s) already exists, not creating" % \
1946 read_cache_path)
1947 else:
1948 try:
1949 vhdutil.snapshot(read_cache_path, shared_target.path, False)
1950 except util.CommandException as e:
1951 util.SMlog("Error creating parent cache: %s" % e)
1952 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1953 return None
1955 # local write node
1956 leaf_size = vhdutil.getSizeVirt(self.target.vdi.path)
1957 local_leaf_path = "%s/%s.vhdcache" % \
1958 (local_sr.path, self.target.vdi.uuid)
1959 if util.pathexists(local_leaf_path):
1960 util.SMlog("Local leaf node (%s) already exists, deleting" % \
1961 local_leaf_path)
1962 os.unlink(local_leaf_path)
1963 try:
1964 vhdutil.snapshot(local_leaf_path, read_cache_path, False,
1965 msize=leaf_size // 1024 // 1024, checkEmpty=False)
1966 except util.CommandException as e:
1967 util.SMlog("Error creating leaf cache: %s" % e)
1968 self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
1969 return None
1971 local_leaf_size = vhdutil.getSizeVirt(local_leaf_path)
1972 if leaf_size > local_leaf_size:
1973 util.SMlog("Leaf size %d > local leaf cache size %d, resizing" %
1974 (leaf_size, local_leaf_size))
1975 vhdutil.setSizeVirtFast(local_leaf_path, leaf_size)
1977 vdi_type = self.target.get_vdi_type()
1979 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
1980 if not prt_tapdisk:
1981 parent_options = copy.deepcopy(options)
1982 parent_options["rdonly"] = False
1983 parent_options["lcache"] = True
1985 blktap = Blktap.allocate()
1986 try:
1987 blktap.set_pool_name("lcache-parent-pool-%s" % blktap.minor)
1988 # no need to change pool_size since each parent tapdisk is in
1989 # its own pool
1990 prt_tapdisk = \
1991 Tapdisk.launch_on_tap(blktap, read_cache_path,
1992 'vhd', parent_options)
1993 except:
1994 blktap.free()
1995 raise
1997 secondary = "%s:%s" % (self.target.get_vdi_type(),
1998 self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink())
2000 util.SMlog("Parent tapdisk: %s" % prt_tapdisk)
2001 leaf_tapdisk = Tapdisk.find_by_path(local_leaf_path)
2002 if not leaf_tapdisk:
2003 blktap = Blktap.allocate()
2004 child_options = copy.deepcopy(options)
2005 child_options["rdonly"] = False
2006 child_options["lcache"] = False
2007 child_options["existing_prt"] = prt_tapdisk.minor
2008 child_options["secondary"] = secondary
2009 child_options["standby"] = scratch_mode
2010 try:
2011 leaf_tapdisk = \
2012 Tapdisk.launch_on_tap(blktap, local_leaf_path,
2013 'vhd', child_options)
2014 except:
2015 blktap.free()
2016 raise
2018 lock.release()
2020 util.SMlog("Local read cache: %s, local leaf: %s" % \
2021 (read_cache_path, local_leaf_path))
2023 self.tap = leaf_tapdisk
2024 return leaf_tapdisk.get_devpath()
2026 def remove_cache(self, sr_uuid, vdi_uuid, params):
2027 if not self.target.has_cap("SR_CACHING"):
2028 return
2030 caching = params.get(self.CONF_KEY_ALLOW_CACHING) == "true"
2032 local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
2033 if caching and not local_sr_uuid:
2034 util.SMlog("ERROR: Local cache SR not specified, ignore")
2035 return
2037 if caching:
2038 self._remove_cache(self._session, local_sr_uuid)
2040 if self._session is not None:
2041 self._updateCacheRecord(self._session, self.target.vdi.uuid, None, None)
2043 def _is_tapdisk_in_use(self, minor):
2044 retVal, links, sockets = util.findRunningProcessOrOpenFile("tapdisk")
2045 if not retVal:
2046 # err on the side of caution
2047 return True
2049 for link in links:
2050 if link.find("tapdev%d" % minor) != -1:
2051 return True
2053 socket_re = re.compile(r'^/.*/nbd\d+\.%d' % minor)
2054 for s in sockets:
2055 if socket_re.match(s):
2056 return True
2058 return False
2060 def _remove_cache(self, session, local_sr_uuid):
2061 import SR
2062 import EXTSR
2063 import NFSSR
2064 from lock import Lock
2065 from FileSR import FileVDI
2067 parent_uuid = vhdutil.getParent(self.target.vdi.path,
2068 FileVDI.extractUuid)
2069 if not parent_uuid:
2070 util.SMlog("ERROR: No parent for VDI %s, ignore" % \
2071 self.target.vdi.uuid)
2072 return
2074 util.SMlog("Tearing down the cache")
2076 parent_uuid = parent_uuid.strip()
2077 shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
2079 SR.registerSR(EXTSR.EXTSR)
2080 local_sr = SR.SR.from_uuid(session, local_sr_uuid)
2082 lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
2083 lock.acquire()
2085 # local write node
2086 local_leaf_path = "%s/%s.vhdcache" % \
2087 (local_sr.path, self.target.vdi.uuid)
2088 if util.pathexists(local_leaf_path):
2089 util.SMlog("Deleting local leaf node %s" % local_leaf_path)
2090 os.unlink(local_leaf_path)
2092 read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
2093 prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
2094 if not prt_tapdisk:
2095 util.SMlog("Parent tapdisk not found")
2096 elif not self._is_tapdisk_in_use(prt_tapdisk.minor):
2097 util.SMlog("Parent tapdisk not in use: shutting down %s" % \
2098 read_cache_path)
2099 try:
2100 prt_tapdisk.shutdown()
2101 except:
2102 util.logException("shutting down parent tapdisk")
2103 else:
2104 util.SMlog("Parent tapdisk still in use: %s" % read_cache_path)
2105 # the parent cache files are removed during the local SR's background
2106 # GC run
2108 lock.release()
2110PythonKeyError = KeyError
2113class UEventHandler(object):
2115 def __init__(self):
2116 self._action = None
2118 class KeyError(PythonKeyError):
2119 def __init__(self, args):
2120 super().__init__(args)
2121 self.key = args[0]
2123 def __str__(self):
2124 return \
2125 "Key '%s' missing in environment. " % self.key + \
2126 "Not called in udev context?"
2128 @classmethod
2129 def getenv(cls, key):
2130 try:
2131 return os.environ[key]
2132 except KeyError as e:
2133 raise cls.KeyError(e.args[0])
2135 def get_action(self):
2136 if not self._action:
2137 self._action = self.getenv('ACTION')
2138 return self._action
2140 class UnhandledEvent(Exception):
2142 def __init__(self, event, handler):
2143 self.event = event
2144 self.handler = handler
2146 def __str__(self):
2147 return "Uevent '%s' not handled by %s" % \
2148 (self.event, self.handler.__class__.__name__)
2150 ACTIONS = {}
2152 def run(self):
2154 action = self.get_action()
2155 try:
2156 fn = self.ACTIONS[action]
2157 except KeyError:
2158 raise self.UnhandledEvent(action, self)
2160 return fn(self)
2162 def __str__(self):
2163 try:
2164 action = self.get_action()
2165 except:
2166 action = None
2167 return "%s[%s]" % (self.__class__.__name__, action)
2170class __BlktapControl(ClassDevice):
2171 SYSFS_CLASSTYPE = "misc"
2173 def __init__(self):
2174 ClassDevice.__init__(self)
2175 self._default_pool = None
2177 def sysfs_devname(self):
2178 return "blktap!control"
2180 class DefaultPool(Attribute):
2181 SYSFS_NODENAME = "default_pool"
2183 def get_default_pool_attr(self):
2184 if not self._default_pool:
2185 self._default_pool = self.DefaultPool.from_kobject(self)
2186 return self._default_pool
2188 def get_default_pool_name(self):
2189 return self.get_default_pool_attr().readline()
2191 def set_default_pool_name(self, name):
2192 self.get_default_pool_attr().writeline(name)
2194 def get_default_pool(self):
2195 return BlktapControl.get_pool(self.get_default_pool_name())
2197 def set_default_pool(self, pool):
2198 self.set_default_pool_name(pool.name)
2200 class NoSuchPool(Exception):
2201 def __init__(self, name):
2202 self.name = name
2204 def __str__(self):
2205 return "No such pool: {}".format(self.name)
2207 def get_pool(self, name):
2208 path = "%s/pools/%s" % (self.sysfs_path(), name)
2210 if not os.path.isdir(path):
2211 raise self.NoSuchPool(name)
2213 return PagePool(path)
2215BlktapControl = __BlktapControl()
2218class PagePool(KObject):
2220 def __init__(self, path):
2221 self.path = path
2222 self._size = None
2224 def sysfs_path(self):
2225 return self.path
2227 class Size(Attribute):
2228 SYSFS_NODENAME = "size"
2230 def get_size_attr(self):
2231 if not self._size:
2232 self._size = self.Size.from_kobject(self)
2233 return self._size
2235 def set_size(self, pages):
2236 pages = str(pages)
2237 self.get_size_attr().writeline(pages)
2239 def get_size(self):
2240 pages = self.get_size_attr().readline()
2241 return int(pages)
2244class BusDevice(KObject):
2246 SYSFS_BUSTYPE = None
2248 @classmethod
2249 def sysfs_bus_path(cls):
2250 return "/sys/bus/%s" % cls.SYSFS_BUSTYPE
2252 def sysfs_path(self):
2253 path = "%s/devices/%s" % (self.sysfs_bus_path(),
2254 self.sysfs_devname())
2256 return path
2259class XenbusDevice(BusDevice):
2260 """Xenbus device, in XS and sysfs"""
2262 XBT_NIL = ""
2264 XENBUS_DEVTYPE = None
2266 def __init__(self, domid, devid):
2267 self.domid = int(domid)
2268 self.devid = int(devid)
2269 self._xbt = XenbusDevice.XBT_NIL
2271 import xen.lowlevel.xs # pylint: disable=import-error
2272 self.xs = xen.lowlevel.xs.xs()
2274 def xs_path(self, key=None):
2275 path = "backend/%s/%d/%d" % (self.XENBUS_DEVTYPE,
2276 self.domid,
2277 self.devid)
2278 if key is not None:
2279 path = "%s/%s" % (path, key)
2281 return path
2283 def _log(self, prio, msg):
2284 syslog(prio, msg)
2286 def info(self, msg):
2287 self._log(_syslog.LOG_INFO, msg)
2289 def warn(self, msg):
2290 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2292 def _xs_read_path(self, path):
2293 val = self.xs.read(self._xbt, path)
2294 #self.info("read %s = '%s'" % (path, val))
2295 return val
2297 def _xs_write_path(self, path, val):
2298 self.xs.write(self._xbt, path, val)
2299 self.info("wrote %s = '%s'" % (path, val))
2301 def _xs_rm_path(self, path):
2302 self.xs.rm(self._xbt, path)
2303 self.info("removed %s" % path)
2305 def read(self, key):
2306 return self._xs_read_path(self.xs_path(key))
2308 def has_xs_key(self, key):
2309 return self.read(key) is not None
2311 def write(self, key, val):
2312 self._xs_write_path(self.xs_path(key), val)
2314 def rm(self, key):
2315 self._xs_rm_path(self.xs_path(key))
2317 def exists(self):
2318 return self.has_xs_key(None)
2320 def begin(self):
2321 assert(self._xbt == XenbusDevice.XBT_NIL)
2322 self._xbt = self.xs.transaction_start()
2324 def commit(self):
2325 ok = self.xs.transaction_end(self._xbt, 0)
2326 self._xbt = XenbusDevice.XBT_NIL
2327 return ok
2329 def abort(self):
2330 ok = self.xs.transaction_end(self._xbt, 1)
2331 assert(ok == True)
2332 self._xbt = XenbusDevice.XBT_NIL
2334 def create_physical_device(self):
2335 """The standard protocol is: toolstack writes 'params', linux hotplug
2336 script translates this into physical-device=%x:%x"""
2337 if self.has_xs_key("physical-device"):
2338 return
2339 try:
2340 params = self.read("params")
2341 frontend = self.read("frontend")
2342 is_cdrom = self._xs_read_path("%s/device-type") == "cdrom"
2343 # We don't have PV drivers for CDROM devices, so we prevent blkback
2344 # from opening the physical-device
2345 if not(is_cdrom):
2346 major_minor = os.stat(params).st_rdev
2347 major, minor = divmod(major_minor, 256)
2348 self.write("physical-device", "%x:%x" % (major, minor))
2349 except:
2350 util.logException("BLKTAP2:create_physical_device")
2352 def signal_hotplug(self, online=True):
2353 xapi_path = "/xapi/%d/hotplug/%s/%d/hotplug" % (self.domid,
2354 self.XENBUS_DEVTYPE,
2355 self.devid)
2356 upstream_path = self.xs_path("hotplug-status")
2357 if online:
2358 self._xs_write_path(xapi_path, "online")
2359 self._xs_write_path(upstream_path, "connected")
2360 else:
2361 self._xs_rm_path(xapi_path)
2362 self._xs_rm_path(upstream_path)
2364 def sysfs_devname(self):
2365 return "%s-%d-%d" % (self.XENBUS_DEVTYPE,
2366 self.domid, self.devid)
2368 def __str__(self):
2369 return self.sysfs_devname()
2371 @classmethod
2372 def find(cls):
2373 pattern = "/sys/bus/%s/devices/%s*" % (cls.SYSFS_BUSTYPE,
2374 cls.XENBUS_DEVTYPE)
2375 for path in glob.glob(pattern):
2377 name = os.path.basename(path)
2378 (_type, domid, devid) = name.split('-')
2380 yield cls(domid, devid)
2383class XenBackendDevice(XenbusDevice):
2384 """Xenbus backend device"""
2385 SYSFS_BUSTYPE = "xen-backend"
2387 @classmethod
2388 def from_xs_path(cls, _path):
2389 (_backend, _type, domid, devid) = _path.split('/')
2391 assert _backend == 'backend'
2392 assert _type == cls.XENBUS_DEVTYPE
2394 domid = int(domid)
2395 devid = int(devid)
2397 return cls(domid, devid)
2400class Blkback(XenBackendDevice):
2401 """A blkback VBD"""
2403 XENBUS_DEVTYPE = "vbd"
2405 def __init__(self, domid, devid):
2406 XenBackendDevice.__init__(self, domid, devid)
2407 self._phy = None
2408 self._vdi_uuid = None
2409 self._q_state = None
2410 self._q_events = None
2412 class XenstoreValueError(Exception):
2413 KEY = None
2415 def __init__(self, vbd, _str):
2416 self.vbd = vbd
2417 self.str = _str
2419 def __str__(self):
2420 return "Backend %s " % self.vbd + \
2421 "has %s = %s" % (self.KEY, self.str)
2423 class PhysicalDeviceError(XenstoreValueError):
2424 KEY = "physical-device"
2426 class PhysicalDevice(object):
2428 def __init__(self, major, minor):
2429 self.major = int(major)
2430 self.minor = int(minor)
2432 @classmethod
2433 def from_xbdev(cls, xbdev):
2435 phy = xbdev.read("physical-device")
2437 try:
2438 major, minor = phy.split(':')
2439 major = int(major, 0x10)
2440 minor = int(minor, 0x10)
2441 except Exception as e:
2442 raise xbdev.PhysicalDeviceError(xbdev, phy)
2444 return cls(major, minor)
2446 def makedev(self):
2447 return os.makedev(self.major, self.minor)
2449 def is_tap(self):
2450 return self.major == Tapdisk.major()
2452 def __str__(self):
2453 return "%s:%s" % (self.major, self.minor)
2455 def __eq__(self, other):
2456 return \
2457 self.major == other.major and \
2458 self.minor == other.minor
2460 def get_physical_device(self):
2461 if not self._phy:
2462 self._phy = self.PhysicalDevice.from_xbdev(self)
2463 return self._phy
2465 class QueueEvents(Attribute):
2466 """Blkback sysfs node to select queue-state event
2467 notifications emitted."""
2469 SYSFS_NODENAME = "queue_events"
2471 QUEUE_RUNNING = (1 << 0)
2472 QUEUE_PAUSE_DONE = (1 << 1)
2473 QUEUE_SHUTDOWN_DONE = (1 << 2)
2474 QUEUE_PAUSE_REQUEST = (1 << 3)
2475 QUEUE_SHUTDOWN_REQUEST = (1 << 4)
2477 def get_mask(self):
2478 return int(self.readline(), 0x10)
2480 def set_mask(self, mask):
2481 self.writeline("0x%x" % mask)
2483 def get_queue_events(self):
2484 if not self._q_events:
2485 self._q_events = self.QueueEvents.from_kobject(self)
2486 return self._q_events
2488 def get_vdi_uuid(self):
2489 if not self._vdi_uuid:
2490 self._vdi_uuid = self.read("sm-data/vdi-uuid")
2491 return self._vdi_uuid
2493 def pause_requested(self):
2494 return self.has_xs_key("pause")
2496 def shutdown_requested(self):
2497 return self.has_xs_key("shutdown-request")
2499 def shutdown_done(self):
2500 return self.has_xs_key("shutdown-done")
2502 def running(self):
2503 return self.has_xs_key('queue-0/kthread-pid')
2505 @classmethod
2506 def find_by_physical_device(cls, phy):
2507 for dev in cls.find():
2508 try:
2509 _phy = dev.get_physical_device()
2510 except cls.PhysicalDeviceError:
2511 continue
2513 if _phy == phy:
2514 yield dev
2516 @classmethod
2517 def find_by_tap_minor(cls, minor):
2518 phy = cls.PhysicalDevice(Tapdisk.major(), minor)
2519 return cls.find_by_physical_device(phy)
2521 @classmethod
2522 def find_by_tap(cls, tapdisk):
2523 return cls.find_by_tap_minor(tapdisk.minor)
2525 def has_tap(self):
2527 if not self.can_tap():
2528 return False
2530 phy = self.get_physical_device()
2531 if phy:
2532 return phy.is_tap()
2534 return False
2536 def is_bare_hvm(self):
2537 """File VDIs for bare HVM. These are directly accessible by Qemu."""
2538 try:
2539 self.get_physical_device()
2541 except self.PhysicalDeviceError as e:
2542 vdi_type = self.read("type")
2544 self.info("HVM VDI: type=%s" % vdi_type)
2546 if e.str is not None or vdi_type != 'file':
2547 raise
2549 return True
2551 return False
2553 def can_tap(self):
2554 return not self.is_bare_hvm()
2557class BlkbackEventHandler(UEventHandler):
2559 LOG_FACILITY = _syslog.LOG_DAEMON
2561 def __init__(self, ident=None, action=None):
2562 if not ident:
2563 ident = self.__class__.__name__
2565 self.ident = ident
2566 self._vbd = None
2567 self._tapdisk = None
2569 UEventHandler.__init__(self)
2571 def run(self):
2573 self.xs_path = self.getenv('XENBUS_PATH')
2574 openlog(str(self), 0, self.LOG_FACILITY)
2576 UEventHandler.run(self)
2578 def __str__(self):
2580 try:
2581 path = self.xs_path
2582 except:
2583 path = None
2585 try:
2586 action = self.get_action()
2587 except:
2588 action = None
2590 return "%s[%s](%s)" % (self.ident, action, path)
2592 def _log(self, prio, msg):
2593 syslog(prio, msg)
2594 util.SMlog("%s: " % self + msg)
2596 def info(self, msg):
2597 self._log(_syslog.LOG_INFO, msg)
2599 def warn(self, msg):
2600 self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
2602 def error(self, msg):
2603 self._log(_syslog.LOG_ERR, "ERROR: " + msg)
2605 def get_vbd(self):
2606 if not self._vbd:
2607 self._vbd = Blkback.from_xs_path(self.xs_path)
2608 return self._vbd
2610 def get_tapdisk(self):
2611 if not self._tapdisk:
2612 minor = self.get_vbd().get_physical_device().minor
2613 self._tapdisk = Tapdisk.from_minor(minor)
2614 return self._tapdisk
2615 #
2616 # Events
2617 #
2619 def __add(self):
2620 vbd = self.get_vbd()
2621 # Manage blkback transitions
2622 # self._manage_vbd()
2624 vbd.create_physical_device()
2626 vbd.signal_hotplug()
2628 @retried(backoff=.5, limit=10)
2629 def add(self):
2630 try:
2631 self.__add()
2632 except Attribute.NoSuchAttribute as e:
2633 #
2634 # FIXME: KOBJ_ADD is racing backend.probe, which
2635 # registers device attributes. So poll a little.
2636 #
2637 self.warn("%s, still trying." % e)
2638 raise RetryLoop.TransientFailure(e)
2640 def __change(self):
2641 vbd = self.get_vbd()
2643 # 1. Pause or resume tapdisk (if there is one)
2645 if vbd.has_tap():
2646 pass
2647 #self._pause_update_tap()
2649 # 2. Signal Xapi.VBD.pause/resume completion
2651 self._signal_xapi()
2653 def change(self):
2654 vbd = self.get_vbd()
2656 # NB. Beware of spurious change events between shutdown
2657 # completion and device removal. Also, Xapi.VM.migrate will
2658 # hammer a couple extra shutdown-requests into the source VBD.
2660 while True:
2661 vbd.begin()
2663 if not vbd.exists() or \
2664 vbd.shutdown_done():
2665 break
2667 self.__change()
2669 if vbd.commit():
2670 return
2672 vbd.abort()
2673 self.info("spurious uevent, ignored.")
2675 def remove(self):
2676 vbd = self.get_vbd()
2678 vbd.signal_hotplug(False)
2680 ACTIONS = {'add': add,
2681 'change': change,
2682 'remove': remove}
2683 #
2684 # VDI.pause
2685 #
2687 def _tap_should_pause(self):
2688 """Enumerate all VBDs on our tapdisk. Returns true iff any was
2689 paused"""
2691 tapdisk = self.get_tapdisk()
2692 TapState = Tapdisk.PauseState
2694 PAUSED = 'P'
2695 RUNNING = 'R'
2696 PAUSED_SHUTDOWN = 'P,S'
2697 # NB. Shutdown/paused is special. We know it's not going
2698 # to restart again, so it's a RUNNING. Still better than
2699 # backtracking a removed device during Vbd.unplug completion.
2701 next = TapState.RUNNING
2702 vbds = {}
2704 for vbd in Blkback.find_by_tap(tapdisk):
2705 name = str(vbd)
2707 pausing = vbd.pause_requested()
2708 closing = vbd.shutdown_requested()
2709 running = vbd.running()
2711 if pausing:
2712 if closing and not running:
2713 vbds[name] = PAUSED_SHUTDOWN
2714 else:
2715 vbds[name] = PAUSED
2716 next = TapState.PAUSED
2718 else:
2719 vbds[name] = RUNNING
2721 self.info("tapdev%d (%s): %s -> %s"
2722 % (tapdisk.minor, tapdisk.pause_state(),
2723 vbds, next))
2725 return next == TapState.PAUSED
2727 def _pause_update_tap(self):
2728 vbd = self.get_vbd()
2730 if self._tap_should_pause():
2731 self._pause_tap()
2732 else:
2733 self._resume_tap()
2735 def _pause_tap(self):
2736 tapdisk = self.get_tapdisk()
2738 if not tapdisk.is_paused():
2739 self.info("pausing %s" % tapdisk)
2740 tapdisk.pause()
2742 def _resume_tap(self):
2743 tapdisk = self.get_tapdisk()
2745 # NB. Raw VDI snapshots. Refresh the physical path and
2746 # type while resuming.
2747 vbd = self.get_vbd()
2748 vdi_uuid = vbd.get_vdi_uuid()
2750 if tapdisk.is_paused():
2751 self.info("loading vdi uuid=%s" % vdi_uuid)
2752 vdi = VDI.from_cli(vdi_uuid)
2753 _type = vdi.get_tap_type()
2754 path = vdi.get_phy_path()
2755 self.info("resuming %s on %s:%s" % (tapdisk, _type, path))
2756 tapdisk.unpause(_type, path)
2757 #
2758 # VBD.pause/shutdown
2759 #
2761 def _manage_vbd(self):
2762 vbd = self.get_vbd()
2763 # NB. Hook into VBD state transitions.
2765 events = vbd.get_queue_events()
2767 mask = 0
2768 mask |= events.QUEUE_PAUSE_DONE # pause/unpause
2769 mask |= events.QUEUE_SHUTDOWN_DONE # shutdown
2770 # TODO: mask |= events.QUEUE_SHUTDOWN_REQUEST, for shutdown=force
2771 # TODO: mask |= events.QUEUE_RUNNING, for ionice updates etc
2773 events.set_mask(mask)
2774 self.info("wrote %s = %#02x" % (events.path, mask))
2776 def _signal_xapi(self):
2777 vbd = self.get_vbd()
2779 pausing = vbd.pause_requested()
2780 closing = vbd.shutdown_requested()
2781 running = vbd.running()
2783 handled = 0
2785 if pausing and not running:
2786 if 'pause-done' not in vbd:
2787 vbd.write('pause-done', '')
2788 handled += 1
2790 if not pausing:
2791 if 'pause-done' in vbd:
2792 vbd.rm('pause-done')
2793 handled += 1
2795 if closing and not running:
2796 if 'shutdown-done' not in vbd:
2797 vbd.write('shutdown-done', '')
2798 handled += 1
2800 if handled > 1:
2801 self.warn("handled %d events, " % handled +
2802 "pausing=%s closing=%s running=%s" % \
2803 (pausing, closing, running))
2805if __name__ == '__main__': 2805 ↛ 2807line 2805 didn't jump to line 2807, because the condition on line 2805 was never true
2807 import sys
2808 prog = os.path.basename(sys.argv[0])
2810 #
2811 # Simple CLI interface for manual operation
2812 #
2813 # tap.* level calls go down to local Tapdisk()s (by physical path)
2814 # vdi.* level calls run the plugin calls across host boundaries.
2815 #
2817 def usage(stream):
2818 print("usage: %s tap.{list|major}" % prog, file=stream)
2819 print(" %s tap.{launch|find|get|pause|" % prog + \
2820 "unpause|shutdown|stats} {[<tt>:]<path>} | [minor=]<int> | .. }", file=stream)
2821 print(" %s vbd.uevent" % prog, file=stream)
2823 try:
2824 cmd = sys.argv[1]
2825 except IndexError:
2826 usage(sys.stderr)
2827 sys.exit(1)
2829 try:
2830 _class, method = cmd.split('.')
2831 except:
2832 usage(sys.stderr)
2833 sys.exit(1)
2835 #
2836 # Local Tapdisks
2837 #
2839 if cmd == 'tap.major':
2841 print("%d" % Tapdisk.major())
2843 elif cmd == 'tap.launch':
2845 tapdisk = Tapdisk.launch_from_arg(sys.argv[2])
2846 print("Launched %s" % tapdisk, file=sys.stderr)
2848 elif _class == 'tap':
2850 attrs = {}
2851 for item in sys.argv[2:]:
2852 try:
2853 key, val = item.split('=')
2854 attrs[key] = val
2855 continue
2856 except ValueError:
2857 pass
2859 try:
2860 attrs['minor'] = int(item)
2861 continue
2862 except ValueError:
2863 pass
2865 try:
2866 arg = Tapdisk.Arg.parse(item)
2867 attrs['_type'] = arg.type
2868 attrs['path'] = arg.path
2869 continue
2870 except Tapdisk.Arg.InvalidArgument:
2871 pass
2873 attrs['path'] = item
2875 if cmd == 'tap.list':
2877 for tapdisk in Tapdisk.list( ** attrs):
2878 blktap = tapdisk.get_blktap()
2879 print(tapdisk, end=' ')
2880 print("%s: task=%s pool=%s" % \
2881 (blktap,
2882 blktap.get_task_pid(),
2883 blktap.get_pool_name()))
2885 elif cmd == 'tap.vbds':
2886 # Find all Blkback instances for a given tapdisk
2888 for tapdisk in Tapdisk.list( ** attrs):
2889 print("%s:" % tapdisk, end=' ')
2890 for vbd in Blkback.find_by_tap(tapdisk):
2891 print(vbd, end=' ')
2892 print()
2894 else:
2896 if not attrs:
2897 usage(sys.stderr)
2898 sys.exit(1)
2900 try:
2901 tapdisk = Tapdisk.get( ** attrs)
2902 except TypeError:
2903 usage(sys.stderr)
2904 sys.exit(1)
2906 if cmd == 'tap.shutdown':
2907 # Shutdown a running tapdisk, or raise
2908 tapdisk.shutdown()
2909 print("Shut down %s" % tapdisk, file=sys.stderr)
2911 elif cmd == 'tap.pause':
2912 # Pause an unpaused tapdisk, or raise
2913 tapdisk.pause()
2914 print("Paused %s" % tapdisk, file=sys.stderr)
2916 elif cmd == 'tap.unpause':
2917 # Unpause a paused tapdisk, or raise
2918 tapdisk.unpause()
2919 print("Unpaused %s" % tapdisk, file=sys.stderr)
2921 elif cmd == 'tap.stats':
2922 # Gather tapdisk status
2923 stats = tapdisk.stats()
2924 print("%s:" % tapdisk)
2925 print(json.dumps(stats, indent=True))
2927 else:
2928 usage(sys.stderr)
2929 sys.exit(1)
2931 elif cmd == 'vbd.uevent':
2933 hnd = BlkbackEventHandler(cmd)
2935 if not sys.stdin.isatty():
2936 try:
2937 hnd.run()
2938 except Exception as e:
2939 hnd.error("Unhandled Exception: %s" % e)
2941 import traceback
2942 _type, value, tb = sys.exc_info()
2943 trace = traceback.format_exception(_type, value, tb)
2944 for entry in trace:
2945 for line in entry.rstrip().split('\n'):
2946 util.SMlog(line)
2947 else:
2948 hnd.run()
2950 elif cmd == 'vbd.list':
2952 for vbd in Blkback.find():
2953 print(vbd, \
2954 "physical-device=%s" % vbd.get_physical_device(), \
2955 "pause=%s" % vbd.pause_requested())
2957 else:
2958 usage(sys.stderr)
2959 sys.exit(1)