Coverage for drivers/LVHDSR.py : 45%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVHDSR: VHD on LVM storage repository
19#
21from sm_typing import Dict, List, override
23import SR
24from SR import deviceCheck
25import VDI
26import SRCommand
27import util
28import lvutil
29import lvmcache
30import vhdutil
31import lvhdutil
32import scsiutil
33import os
34import sys
35import time
36import errno
37import xs_errors
38import cleanup
39import blktap2
40from journaler import Journaler
41from lock import Lock
42from refcounter import RefCounter
43from ipc import IPCFlag
44from lvmanager import LVActivator
45import XenAPI # pylint: disable=import-error
46import re
47from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
48 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
49 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
50 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
51 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
52from metadata import retrieveXMLfromFile, _parseXML
53from xmlrpc.client import DateTime
54import glob
55from constants import CBTLOG_TAG
56from fairlock import Fairlock
57DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX)
59geneology: Dict[str, List[str]] = {}
60CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
61 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
62 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
63 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
64 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
66CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
68DRIVER_INFO = {
69 'name': 'Local VHD on LVM',
70 'description': 'SR plugin which represents disks as VHD disks on ' + \
71 'Logical Volumes within a locally-attached Volume Group',
72 'vendor': 'XenSource Inc',
73 'copyright': '(C) 2008 XenSource Inc',
74 'driver_version': '1.0',
75 'required_api_version': '1.0',
76 'capabilities': CAPABILITIES,
77 'configuration': CONFIGURATION
78 }
80PARAM_VHD = "vhd"
81PARAM_RAW = "raw"
83OPS_EXCLUSIVE = [
84 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
85 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
86 "vdi_clone"]
88# Log if snapshot pauses VM for more than this many seconds
89LONG_SNAPTIME = 60
91class LVHDSR(SR.SR):
92 DRIVER_TYPE = 'lvhd'
94 PROVISIONING_TYPES = ["thin", "thick"]
95 PROVISIONING_DEFAULT = "thick"
96 THIN_PLUGIN = "lvhd-thin"
98 PLUGIN_ON_SLAVE = "on-slave"
100 FLAG_USE_VHD = "use_vhd"
101 MDVOLUME_NAME = "MGT"
103 ALLOCATION_QUANTUM = "allocation_quantum"
104 INITIAL_ALLOCATION = "initial_allocation"
106 LOCK_RETRY_INTERVAL = 3
107 LOCK_RETRY_ATTEMPTS = 10
109 TEST_MODE_KEY = "testmode"
110 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
111 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
112 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
113 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
114 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
115 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
116 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
118 ENV_VAR_VHD_TEST = {
119 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
120 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
121 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
122 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
123 TEST_MODE_VHD_FAIL_REPARENT_END:
124 "VHD_UTIL_TEST_FAIL_REPARENT_END",
125 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
126 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
127 TEST_MODE_VHD_FAIL_RESIZE_DATA:
128 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
129 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
130 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
131 TEST_MODE_VHD_FAIL_RESIZE_END:
132 "VHD_UTIL_TEST_FAIL_RESIZE_END"
133 }
134 testMode = ""
136 legacyMode = True
138 @override
139 @staticmethod
140 def handles(type) -> bool:
141 """Returns True if this SR class understands the given dconf string"""
142 # we can pose as LVMSR or EXTSR for compatibility purposes
143 if __name__ == '__main__':
144 name = sys.argv[0]
145 else:
146 name = __name__
147 if name.endswith("LVMSR"):
148 return type == "lvm"
149 elif name.endswith("EXTSR"):
150 return type == "ext"
151 return type == LVHDSR.DRIVER_TYPE
153 @override
154 def load(self, sr_uuid) -> None:
155 self.ops_exclusive = OPS_EXCLUSIVE
157 self.isMaster = False
158 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
159 self.isMaster = True
161 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
162 self.sr_vditype = SR.DEFAULT_TAP
163 self.uuid = sr_uuid
164 self.vgname = lvhdutil.VG_PREFIX + self.uuid
165 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname)
166 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
167 self.provision = self.PROVISIONING_DEFAULT
169 has_sr_ref = self.srcmd.params.get("sr_ref")
170 if has_sr_ref:
171 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
172 else:
173 self.other_conf = None
175 self.lvm_conf = None
176 if self.other_conf:
177 self.lvm_conf = self.other_conf.get('lvm-conf')
179 try:
180 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf)
181 except:
182 raise xs_errors.XenError('SRUnavailable', \
183 opterr='Failed to initialise the LVMCache')
184 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
185 self.journaler = Journaler(self.lvmCache)
186 if not has_sr_ref:
187 return # must be a probe call
188 # Test for thick vs thin provisioning conf parameter
189 if 'allocation' in self.dconf: 189 ↛ 190line 189 didn't jump to line 190, because the condition on line 189 was never true
190 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
191 self.provision = self.dconf['allocation']
192 else:
193 raise xs_errors.XenError('InvalidArg', \
194 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
196 if self.other_conf.get(self.TEST_MODE_KEY): 196 ↛ 200line 196 didn't jump to line 200, because the condition on line 196 was never false
197 self.testMode = self.other_conf[self.TEST_MODE_KEY]
198 self._prepareTestMode()
200 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
201 # sm_config flag overrides PBD, if any
202 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
203 self.provision = self.sm_config.get('allocation')
205 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
206 self.legacyMode = False
208 if lvutil._checkVG(self.vgname):
209 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 209 ↛ 212line 209 didn't jump to line 212, because the condition on line 209 was never false
210 "vdi_activate", "vdi_deactivate"]:
211 self._undoAllJournals()
212 if not self.cmd in ["sr_attach", "sr_probe"]:
213 self._checkMetadataVolume()
215 self.mdexists = False
217 # get a VDI -> TYPE map from the storage
218 contains_uuid_regex = \
219 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
220 self.storageVDIs = {}
222 for key in self.lvmCache.lvs.keys(): 222 ↛ 224line 222 didn't jump to line 224, because the loop on line 222 never started
223 # if the lvname has a uuid in it
224 type = None
225 if contains_uuid_regex.search(key) is not None:
226 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):
227 type = vhdutil.VDI_TYPE_VHD
228 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
229 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]):
230 type = vhdutil.VDI_TYPE_RAW
231 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
232 else:
233 continue
235 if type is not None:
236 self.storageVDIs[vdi] = type
238 # check if metadata volume exists
239 try:
240 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
241 except:
242 pass
244 @override
245 def cleanup(self) -> None:
246 # we don't need to hold the lock to dec refcounts of activated LVs
247 if not self.lvActivator.deactivateAll(): 247 ↛ 248line 247 didn't jump to line 248, because the condition on line 247 was never true
248 raise util.SMException("failed to deactivate LVs")
250 def updateSRMetadata(self, allocation):
251 try:
252 # Add SR specific SR metadata
253 sr_info = \
254 {ALLOCATION_TAG: allocation,
255 UUID_TAG: self.uuid,
256 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
257 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
258 }
260 vdi_info = {}
261 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
262 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
264 # Create the VDI entry in the SR metadata
265 vdi_info[vdi_uuid] = \
266 {
267 UUID_TAG: vdi_uuid,
268 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
269 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
270 IS_A_SNAPSHOT_TAG: \
271 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
272 SNAPSHOT_OF_TAG: \
273 self.session.xenapi.VDI.get_snapshot_of(vdi),
274 SNAPSHOT_TIME_TAG: \
275 self.session.xenapi.VDI.get_snapshot_time(vdi),
276 TYPE_TAG: \
277 self.session.xenapi.VDI.get_type(vdi),
278 VDI_TYPE_TAG: \
279 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'],
280 READ_ONLY_TAG: \
281 int(self.session.xenapi.VDI.get_read_only(vdi)),
282 METADATA_OF_POOL_TAG: \
283 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
284 MANAGED_TAG: \
285 int(self.session.xenapi.VDI.get_managed(vdi))
286 }
287 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
289 except Exception as e:
290 raise xs_errors.XenError('MetadataError', \
291 opterr='Error upgrading SR Metadata: %s' % str(e))
293 def syncMetadataAndStorage(self):
294 try:
295 # if a VDI is present in the metadata but not in the storage
296 # then delete it from the metadata
297 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
298 for vdi in list(vdi_info.keys()):
299 update_map = {}
300 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 300 ↛ 307line 300 didn't jump to line 307, because the condition on line 300 was never false
301 # delete this from metadata
302 LVMMetadataHandler(self.mdpath). \
303 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
304 else:
305 # search for this in the metadata, compare types
306 # self.storageVDIs is a map of vdi_uuid to vdi_type
307 if vdi_info[vdi][VDI_TYPE_TAG] != \
308 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
309 # storage type takes authority
310 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
311 = METADATA_OBJECT_TYPE_VDI
312 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
313 update_map[VDI_TYPE_TAG] = \
314 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
315 LVMMetadataHandler(self.mdpath) \
316 .updateMetadata(update_map)
317 else:
318 # This should never happen
319 pass
321 except Exception as e:
322 raise xs_errors.XenError('MetadataError', \
323 opterr='Error synching SR Metadata and storage: %s' % str(e))
325 def syncMetadataAndXapi(self):
326 try:
327 # get metadata
328 (sr_info, vdi_info) = \
329 LVMMetadataHandler(self.mdpath, False).getMetadata()
331 # First synch SR parameters
332 self.update(self.uuid)
334 # Now update the VDI information in the metadata if required
335 for vdi_offset in vdi_info.keys():
336 try:
337 vdi_ref = \
338 self.session.xenapi.VDI.get_by_uuid( \
339 vdi_info[vdi_offset][UUID_TAG])
340 except:
341 # may be the VDI is not in XAPI yet dont bother
342 continue
344 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
345 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
347 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
348 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
349 new_name_description:
350 update_map = {}
351 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
352 METADATA_OBJECT_TYPE_VDI
353 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
354 update_map[NAME_LABEL_TAG] = new_name_label
355 update_map[NAME_DESCRIPTION_TAG] = new_name_description
356 LVMMetadataHandler(self.mdpath) \
357 .updateMetadata(update_map)
358 except Exception as e:
359 raise xs_errors.XenError('MetadataError', \
360 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
362 def _checkMetadataVolume(self):
363 util.SMlog("Entering _checkMetadataVolume")
364 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
365 if self.isMaster: 365 ↛ 381line 365 didn't jump to line 381, because the condition on line 365 was never false
366 if self.mdexists and self.cmd == "sr_attach":
367 try:
368 # activate the management volume
369 # will be deactivated at detach time
370 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
371 self._synchSmConfigWithMetaData()
372 util.SMlog("Sync SR metadata and the state on the storage.")
373 self.syncMetadataAndStorage()
374 self.syncMetadataAndXapi()
375 except Exception as e:
376 util.SMlog("Exception in _checkMetadataVolume, " \
377 "Error: %s." % str(e))
378 elif not self.mdexists and not self.legacyMode: 378 ↛ 381line 378 didn't jump to line 381, because the condition on line 378 was never false
379 self._introduceMetaDataVolume()
381 if self.mdexists:
382 self.legacyMode = False
384 def _synchSmConfigWithMetaData(self):
385 util.SMlog("Synching sm-config with metadata volume")
387 try:
388 # get SR info from metadata
389 sr_info = {}
390 map = {}
391 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
393 if sr_info == {}: 393 ↛ 394line 393 didn't jump to line 394, because the condition on line 393 was never true
394 raise Exception("Failed to get SR information from metadata.")
396 if "allocation" in sr_info: 396 ↛ 400line 396 didn't jump to line 400, because the condition on line 396 was never false
397 self.provision = sr_info.get("allocation")
398 map['allocation'] = sr_info.get("allocation")
399 else:
400 raise Exception("Allocation key not found in SR metadata. "
401 "SR info found: %s" % sr_info)
403 except Exception as e:
404 raise xs_errors.XenError(
405 'MetadataError',
406 opterr='Error reading SR params from '
407 'metadata Volume: %s' % str(e))
408 try:
409 map[self.FLAG_USE_VHD] = 'true'
410 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
411 except:
412 raise xs_errors.XenError(
413 'MetadataError',
414 opterr='Error updating sm_config key')
416 def _introduceMetaDataVolume(self):
417 util.SMlog("Creating Metadata volume")
418 try:
419 config = {}
420 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
422 # activate the management volume, will be deactivated at detach time
423 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
425 name_label = util.to_plain_string( \
426 self.session.xenapi.SR.get_name_label(self.sr_ref))
427 name_description = util.to_plain_string( \
428 self.session.xenapi.SR.get_name_description(self.sr_ref))
429 config[self.FLAG_USE_VHD] = "true"
430 config['allocation'] = self.provision
431 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
433 # Add the SR metadata
434 self.updateSRMetadata(self.provision)
435 except Exception as e:
436 raise xs_errors.XenError('MetadataError', \
437 opterr='Error introducing Metadata Volume: %s' % str(e))
439 def _removeMetadataVolume(self):
440 if self.mdexists:
441 try:
442 self.lvmCache.remove(self.MDVOLUME_NAME)
443 except:
444 raise xs_errors.XenError('MetadataError', \
445 opterr='Failed to delete MGT Volume')
447 def _refresh_size(self):
448 """
449 Refreshs the size of the backing device.
450 Return true if all paths/devices agree on the same size.
451 """
452 if hasattr(self, 'SCSIid'): 452 ↛ 454line 452 didn't jump to line 454, because the condition on line 452 was never true
453 # LVHDoHBASR, LVHDoISCSISR
454 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
455 else:
456 # LVHDSR
457 devices = self.dconf['device'].split(',')
458 scsiutil.refreshdev(devices)
459 return True
461 def _expand_size(self):
462 """
463 Expands the size of the SR by growing into additional availiable
464 space, if extra space is availiable on the backing device.
465 Needs to be called after a successful call of _refresh_size.
466 """
467 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
468 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
469 resizethreshold = 100 * 1024 * 1024 # 100MB
470 devices = self.dconf['device'].split(',')
471 totaldevicesize = 0
472 for device in devices:
473 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
474 if totaldevicesize >= (currentvgsize + resizethreshold):
475 try:
476 if hasattr(self, 'SCSIid'): 476 ↛ 478line 476 didn't jump to line 478, because the condition on line 476 was never true
477 # LVHDoHBASR, LVHDoISCSISR might have slaves
478 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
479 getattr(self, 'SCSIid'))
480 util.SMlog("LVHDSR._expand_size for %s will resize the pv." %
481 self.uuid)
482 for pv in lvutil.get_pv_for_vg(self.vgname):
483 lvutil.resizePV(pv)
484 except:
485 util.logException("LVHDSR._expand_size for %s failed to resize"
486 " the PV" % self.uuid)
488 @override
489 @deviceCheck
490 def create(self, uuid, size) -> None:
491 util.SMlog("LVHDSR.create for %s" % self.uuid)
492 if not self.isMaster:
493 util.SMlog('sr_create blocked for non-master')
494 raise xs_errors.XenError('LVMMaster')
496 if lvutil._checkVG(self.vgname):
497 raise xs_errors.XenError('SRExists')
499 # Check none of the devices already in use by other PBDs
500 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
501 raise xs_errors.XenError('SRInUse')
503 # Check serial number entry in SR records
504 for dev in self.dconf['device'].split(','):
505 if util.test_scsiserial(self.session, dev):
506 raise xs_errors.XenError('SRInUse')
508 lvutil.createVG(self.dconf['device'], self.vgname)
510 #Update serial number string
511 scsiutil.add_serial_record(self.session, self.sr_ref, \
512 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
514 # since this is an SR.create turn off legacy mode
515 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
516 self.FLAG_USE_VHD, 'true')
518 @override
519 def delete(self, uuid) -> None:
520 util.SMlog("LVHDSR.delete for %s" % self.uuid)
521 if not self.isMaster:
522 raise xs_errors.XenError('LVMMaster')
523 cleanup.gc_force(self.session, self.uuid)
525 success = True
526 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
527 if util.extractSRFromDevMapper(fileName) != self.uuid:
528 continue
530 if util.doesFileHaveOpenHandles(fileName):
531 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \
532 "handles" % fileName)
533 success = False
534 continue
536 # Now attempt to remove the dev mapper entry
537 if not lvutil.removeDevMapperEntry(fileName, False):
538 success = False
539 continue
541 try:
542 lvname = os.path.basename(fileName.replace('-', '/'). \
543 replace('//', '-'))
544 lpath = os.path.join(self.path, lvname)
545 os.unlink(lpath)
546 except OSError as e:
547 if e.errno != errno.ENOENT:
548 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \
549 "file %s. Error: %s" % (fileName, str(e)))
550 success = False
552 if success:
553 try:
554 if util.pathexists(self.path):
555 os.rmdir(self.path)
556 except Exception as e:
557 util.SMlog("LVHDSR.delete: failed to remove the symlink " \
558 "directory %s. Error: %s" % (self.path, str(e)))
559 success = False
561 self._removeMetadataVolume()
562 self.lvmCache.refresh()
563 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0:
564 raise xs_errors.XenError('SRNotEmpty')
566 if not success:
567 raise Exception("LVHDSR delete failed, please refer to the log " \
568 "for details.")
570 lvutil.removeVG(self.dconf['device'], self.vgname)
571 self._cleanup()
573 @override
574 def attach(self, uuid) -> None:
575 util.SMlog("LVHDSR.attach for %s" % self.uuid)
577 self._cleanup(True) # in case of host crashes, if detach wasn't called
579 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 579 ↛ 580line 579 didn't jump to line 580, because the condition on line 579 was never true
580 raise xs_errors.XenError('SRUnavailable', \
581 opterr='no such volume group: %s' % self.vgname)
583 # Refresh the metadata status
584 self._checkMetadataVolume()
586 refreshsizeok = self._refresh_size()
588 if self.isMaster: 588 ↛ 599line 588 didn't jump to line 599, because the condition on line 588 was never false
589 if refreshsizeok: 589 ↛ 593line 589 didn't jump to line 593, because the condition on line 589 was never false
590 self._expand_size()
592 # Update SCSIid string
593 util.SMlog("Calling devlist_to_serial")
594 scsiutil.add_serial_record(
595 self.session, self.sr_ref,
596 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
598 # Test Legacy Mode Flag and update if VHD volumes exist
599 if self.isMaster and self.legacyMode: 599 ↛ 600line 599 didn't jump to line 600, because the condition on line 599 was never true
600 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
601 for uuid, info in vdiInfo.items():
602 if info.vdiType == vhdutil.VDI_TYPE_VHD:
603 self.legacyMode = False
604 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
605 self._introduceMetaDataVolume()
606 break
608 # Set the block scheduler
609 for dev in self.dconf['device'].split(','):
610 self.block_setscheduler(dev)
612 @override
613 def detach(self, uuid) -> None:
614 util.SMlog("LVHDSR.detach for %s" % self.uuid)
615 cleanup.abort(self.uuid)
617 # Do a best effort cleanup of the dev mapper entries
618 # go through all devmapper entries for this VG
619 success = True
620 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
621 if util.extractSRFromDevMapper(fileName) != self.uuid: 621 ↛ 622line 621 didn't jump to line 622, because the condition on line 621 was never true
622 continue
624 with Fairlock('devicemapper'):
625 # check if any file has open handles
626 if util.doesFileHaveOpenHandles(fileName):
627 # if yes, log this and signal failure
628 util.SMlog(
629 f"LVHDSR.detach: The dev mapper entry {fileName} has "
630 "open handles")
631 success = False
632 continue
634 # Now attempt to remove the dev mapper entry
635 if not lvutil.removeDevMapperEntry(fileName, False): 635 ↛ 636line 635 didn't jump to line 636, because the condition on line 635 was never true
636 success = False
637 continue
639 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
640 try:
641 lvname = os.path.basename(fileName.replace('-', '/'). \
642 replace('//', '-'))
643 lvname = os.path.join(self.path, lvname)
644 util.force_unlink(lvname)
645 except Exception as e:
646 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \
647 "file %s. Error: %s" % (fileName, str(e)))
648 success = False
650 # now remove the directory where the symlinks are
651 # this should pass as the directory should be empty by now
652 if success:
653 try:
654 if util.pathexists(self.path): 654 ↛ 655line 654 didn't jump to line 655, because the condition on line 654 was never true
655 os.rmdir(self.path)
656 except Exception as e:
657 util.SMlog("LVHDSR.detach: failed to remove the symlink " \
658 "directory %s. Error: %s" % (self.path, str(e)))
659 success = False
661 if not success:
662 raise Exception("SR detach failed, please refer to the log " \
663 "for details.")
665 # Don't delete lock files on the master as it will break the locking
666 # between SM and any GC thread that survives through SR.detach.
667 # However, we should still delete lock files on slaves as it is the
668 # only place to do so.
669 self._cleanup(self.isMaster)
671 @override
672 def forget_vdi(self, uuid) -> None:
673 if not self.legacyMode:
674 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
675 super(LVHDSR, self).forget_vdi(uuid)
677 @override
678 def scan(self, uuid) -> None:
679 activated = True
680 try:
681 lvname = ''
682 util.SMlog("LVHDSR.scan for %s" % self.uuid)
683 if not self.isMaster: 683 ↛ 684line 683 didn't jump to line 684, because the condition on line 683 was never true
684 util.SMlog('sr_scan blocked for non-master')
685 raise xs_errors.XenError('LVMMaster')
687 if self._refresh_size(): 687 ↛ 689line 687 didn't jump to line 689, because the condition on line 687 was never false
688 self._expand_size()
689 self.lvmCache.refresh()
690 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
691 self._loadvdis()
692 stats = lvutil._getVGstats(self.vgname)
693 self.physical_size = stats['physical_size']
694 self.physical_utilisation = stats['physical_utilisation']
696 # Now check if there are any VDIs in the metadata, which are not in
697 # XAPI
698 if self.mdexists: 698 ↛ 808line 698 didn't jump to line 808, because the condition on line 698 was never false
699 vdiToSnaps: Dict[str, List[str]] = {}
700 # get VDIs from XAPI
701 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
702 vdi_uuids = set([])
703 for vdi in vdis:
704 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
706 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
708 for vdi in list(info.keys()):
709 vdi_uuid = info[vdi][UUID_TAG]
710 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 710 ↛ 711line 710 didn't jump to line 711, because the condition on line 710 was never true
711 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
712 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
713 else:
714 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
716 if vdi_uuid not in vdi_uuids: 716 ↛ 717line 716 didn't jump to line 717, because the condition on line 716 was never true
717 util.SMlog("Introduce VDI %s as it is present in " \
718 "metadata and not in XAPI." % vdi_uuid)
719 sm_config = {}
720 sm_config['vdi_type'] = info[vdi][VDI_TYPE_TAG]
721 lvname = "%s%s" % \
722 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
723 self.lvmCache.activateNoRefcount(lvname)
724 activated = True
725 lvPath = os.path.join(self.path, lvname)
727 if info[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW:
728 size = self.lvmCache.getSize( \
729 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \
730 vdi_uuid)
731 utilisation = \
732 util.roundup(lvutil.LVM_SIZE_INCREMENT,
733 int(size))
734 else:
735 parent = \
736 vhdutil._getVHDParentNoCheck(lvPath)
738 if parent is not None:
739 sm_config['vhd-parent'] = parent[len( \
740 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):]
741 size = vhdutil.getSizeVirt(lvPath)
742 if self.provision == "thin":
743 utilisation = \
744 util.roundup(lvutil.LVM_SIZE_INCREMENT,
745 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
746 else:
747 utilisation = lvhdutil.calcSizeVHDLV(int(size))
749 vdi_ref = self.session.xenapi.VDI.db_introduce(
750 vdi_uuid,
751 info[vdi][NAME_LABEL_TAG],
752 info[vdi][NAME_DESCRIPTION_TAG],
753 self.sr_ref,
754 info[vdi][TYPE_TAG],
755 False,
756 bool(int(info[vdi][READ_ONLY_TAG])),
757 {},
758 vdi_uuid,
759 {},
760 sm_config)
762 self.session.xenapi.VDI.set_managed(vdi_ref,
763 bool(int(info[vdi][MANAGED_TAG])))
764 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
765 str(size))
766 self.session.xenapi.VDI.set_physical_utilisation( \
767 vdi_ref, str(utilisation))
768 self.session.xenapi.VDI.set_is_a_snapshot( \
769 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG])))
770 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])):
771 self.session.xenapi.VDI.set_snapshot_time( \
772 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG]))
773 if info[vdi][TYPE_TAG] == 'metadata':
774 self.session.xenapi.VDI.set_metadata_of_pool( \
775 vdi_ref, info[vdi][METADATA_OF_POOL_TAG])
777 # Update CBT status of disks either just added
778 # or already in XAPI
779 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
780 if cbt_logname in cbt_vdis: 780 ↛ 781line 780 didn't jump to line 781, because the condition on line 780 was never true
781 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
782 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
783 # For existing VDIs, update local state too
784 # Scan in base class SR updates existing VDIs
785 # again based on local states
786 if vdi_uuid in self.vdis:
787 self.vdis[vdi_uuid].cbt_enabled = True
788 cbt_vdis.remove(cbt_logname)
790 # Now set the snapshot statuses correctly in XAPI
791 for srcvdi in vdiToSnaps.keys(): 791 ↛ 792line 791 didn't jump to line 792, because the loop on line 791 never started
792 try:
793 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
794 except:
795 # the source VDI no longer exists, continue
796 continue
798 for snapvdi in vdiToSnaps[srcvdi]:
799 try:
800 # this might fail in cases where its already set
801 snapref = \
802 self.session.xenapi.VDI.get_by_uuid(snapvdi)
803 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
804 except Exception as e:
805 util.SMlog("Setting snapshot failed. " \
806 "Error: %s" % str(e))
808 if cbt_vdis: 808 ↛ 819line 808 didn't jump to line 819, because the condition on line 808 was never false
809 # If we have items remaining in this list,
810 # they are cbt_metadata VDI that XAPI doesn't know about
811 # Add them to self.vdis and they'll get added to the DB
812 for cbt_vdi in cbt_vdis: 812 ↛ 813line 812 didn't jump to line 813, because the loop on line 812 never started
813 cbt_uuid = cbt_vdi.split(".")[0]
814 new_vdi = self.vdi(cbt_uuid)
815 new_vdi.ty = "cbt_metadata"
816 new_vdi.cbt_enabled = True
817 self.vdis[cbt_uuid] = new_vdi
819 super(LVHDSR, self).scan(uuid)
820 self._kickGC()
822 finally:
823 if lvname != '' and activated: 823 ↛ 824line 823 didn't jump to line 824, because the condition on line 823 was never true
824 self.lvmCache.deactivateNoRefcount(lvname)
826 @override
827 def update(self, uuid) -> None:
828 if not lvutil._checkVG(self.vgname): 828 ↛ 829line 828 didn't jump to line 829, because the condition on line 828 was never true
829 return
830 self._updateStats(uuid, 0)
832 if self.legacyMode: 832 ↛ 833line 832 didn't jump to line 833, because the condition on line 832 was never true
833 return
835 # synch name_label in metadata with XAPI
836 update_map = {}
837 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
838 METADATA_OBJECT_TYPE_SR,
839 NAME_LABEL_TAG: util.to_plain_string( \
840 self.session.xenapi.SR.get_name_label(self.sr_ref)),
841 NAME_DESCRIPTION_TAG: util.to_plain_string( \
842 self.session.xenapi.SR.get_name_description(self.sr_ref))
843 }
844 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
846 def _updateStats(self, uuid, virtAllocDelta):
847 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
848 self.virtual_allocation = valloc + virtAllocDelta
849 util.SMlog("Setting virtual_allocation of SR %s to %d" %
850 (uuid, self.virtual_allocation))
851 stats = lvutil._getVGstats(self.vgname)
852 self.physical_size = stats['physical_size']
853 self.physical_utilisation = stats['physical_utilisation']
854 self._db_update()
856 @override
857 @deviceCheck
858 def probe(self) -> str:
859 return lvutil.srlist_toxml(
860 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']),
861 lvhdutil.VG_PREFIX,
862 ('metadata' in self.srcmd.params['sr_sm_config'] and \
863 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
865 @override
866 def vdi(self, uuid) -> VDI.VDI:
867 return LVHDVDI(self, uuid)
869 def _loadvdis(self):
870 self.virtual_allocation = 0
871 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
872 self.allVDIs = {}
874 for uuid, info in self.vdiInfo.items():
875 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 875 ↛ 876line 875 didn't jump to line 876, because the condition on line 875 was never true
876 continue
877 if info.scanError: 877 ↛ 878line 877 didn't jump to line 878, because the condition on line 877 was never true
878 raise xs_errors.XenError('VDIUnavailable', \
879 opterr='Error scanning VDI %s' % uuid)
880 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
881 if not self.vdis[uuid].hidden: 881 ↛ 874line 881 didn't jump to line 874, because the condition on line 881 was never false
882 self.virtual_allocation += self.vdis[uuid].utilisation
884 for uuid, vdi in self.vdis.items():
885 if vdi.parent: 885 ↛ 886line 885 didn't jump to line 886, because the condition on line 885 was never true
886 if vdi.parent in self.vdis:
887 self.vdis[vdi.parent].read_only = True
888 if vdi.parent in geneology:
889 geneology[vdi.parent].append(uuid)
890 else:
891 geneology[vdi.parent] = [uuid]
893 # Now remove all hidden leaf nodes to avoid introducing records that
894 # will be GC'ed
895 for uuid in list(self.vdis.keys()):
896 if uuid not in geneology and self.vdis[uuid].hidden: 896 ↛ 897line 896 didn't jump to line 897, because the condition on line 896 was never true
897 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
898 del self.vdis[uuid]
900 def _ensureSpaceAvailable(self, amount_needed):
901 space_available = lvutil._getVGstats(self.vgname)['freespace']
902 if (space_available < amount_needed):
903 util.SMlog("Not enough space! free space: %d, need: %d" % \
904 (space_available, amount_needed))
905 raise xs_errors.XenError('SRNoSpace')
907 def _handleInterruptedCloneOps(self):
908 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE)
909 for uuid, val in entries.items(): 909 ↛ 910line 909 didn't jump to line 910, because the loop on line 909 never started
910 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
911 self._handleInterruptedCloneOp(uuid, val)
912 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
913 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid)
915 def _handleInterruptedCoalesceLeaf(self):
916 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
917 if len(entries) > 0: 917 ↛ 918line 917 didn't jump to line 918, because the condition on line 917 was never true
918 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
919 cleanup.gc_force(self.session, self.uuid)
920 self.lvmCache.refresh()
922 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
923 """Either roll back or finalize the interrupted snapshot/clone
924 operation. Rolling back is unsafe if the leaf VHDs have already been
925 in use and written to. However, it is always safe to roll back while
926 we're still in the context of the failed snapshot operation since the
927 VBD is paused for the duration of the operation"""
928 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
929 lvs = lvhdutil.getLVInfo(self.lvmCache)
930 baseUuid, clonUuid = jval.split("_")
932 # is there a "base copy" VDI?
933 if not lvs.get(baseUuid):
934 # no base copy: make sure the original is there
935 if lvs.get(origUuid):
936 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
937 return
938 raise util.SMException("base copy %s not present, " \
939 "but no original %s found" % (baseUuid, origUuid))
941 if forceUndo:
942 util.SMlog("Explicit revert")
943 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
944 return
946 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
947 util.SMlog("One or both leaves missing => revert")
948 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
949 return
951 vdis = lvhdutil.getVDIInfo(self.lvmCache)
952 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
953 util.SMlog("One or both leaves invalid => revert")
954 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
955 return
957 orig = vdis[origUuid]
958 base = vdis[baseUuid]
959 self.lvActivator.activate(baseUuid, base.lvName, False)
960 self.lvActivator.activate(origUuid, orig.lvName, False)
961 if orig.parentUuid != baseUuid:
962 parent = vdis[orig.parentUuid]
963 self.lvActivator.activate(parent.uuid, parent.lvName, False)
964 origPath = os.path.join(self.path, orig.lvName)
965 if not vhdutil.check(origPath):
966 util.SMlog("Orig VHD invalid => revert")
967 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
968 return
970 if clonUuid:
971 clon = vdis[clonUuid]
972 clonPath = os.path.join(self.path, clon.lvName)
973 self.lvActivator.activate(clonUuid, clon.lvName, False)
974 if not vhdutil.check(clonPath):
975 util.SMlog("Clon VHD invalid => revert")
976 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
977 return
979 util.SMlog("Snapshot appears valid, will not roll back")
980 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid)
982 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid):
983 base = lvs[baseUuid]
984 basePath = os.path.join(self.path, base.name)
986 # make the parent RW
987 if base.readonly:
988 self.lvmCache.setReadonly(base.name, False)
990 ns = lvhdutil.NS_PREFIX_LVM + self.uuid
991 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
992 origRefcountNormal = 0
994 # un-hide the parent
995 if base.vdiType == vhdutil.VDI_TYPE_VHD:
996 self.lvActivator.activate(baseUuid, base.name, False)
997 origRefcountNormal = 1
998 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False)
999 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden:
1000 vhdutil.setHidden(basePath, False)
1001 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden:
1002 self.lvmCache.setHidden(base.name, False)
1004 # remove the child nodes
1005 if clonUuid and lvs.get(clonUuid):
1006 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD:
1007 raise util.SMException("clone %s not VHD" % clonUuid)
1008 self.lvmCache.remove(lvs[clonUuid].name)
1009 if self.lvActivator.get(clonUuid, False):
1010 self.lvActivator.remove(clonUuid, False)
1011 if lvs.get(origUuid):
1012 self.lvmCache.remove(lvs[origUuid].name)
1014 # inflate the parent to fully-allocated size
1015 if base.vdiType == vhdutil.VDI_TYPE_VHD:
1016 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
1017 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize)
1019 # rename back
1020 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid
1021 self.lvmCache.rename(base.name, origLV)
1022 RefCounter.reset(baseUuid, ns)
1023 if self.lvActivator.get(baseUuid, False):
1024 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
1025 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1027 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1028 # flag to facilitate vm deactivate
1029 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1030 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1032 # update LVM metadata on slaves
1033 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1034 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname,
1035 origLV, origUuid, slaves)
1037 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1039 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid):
1040 """Finalize the interrupted snapshot/clone operation. This must not be
1041 called from the live snapshot op context because we attempt to pause/
1042 unpause the VBD here (the VBD is already paused during snapshot, so it
1043 would cause a deadlock)"""
1044 base = vdis[baseUuid]
1045 clon = None
1046 if clonUuid:
1047 clon = vdis[clonUuid]
1049 cleanup.abort(self.uuid)
1051 # make sure the parent is hidden and read-only
1052 if not base.hidden:
1053 if base.vdiType == vhdutil.VDI_TYPE_RAW:
1054 self.lvmCache.setHidden(base.lvName)
1055 else:
1056 basePath = os.path.join(self.path, base.lvName)
1057 vhdutil.setHidden(basePath)
1058 if not base.lvReadonly:
1059 self.lvmCache.setReadonly(base.lvName, True)
1061 # NB: since this snapshot-preserving call is only invoked outside the
1062 # snapshot op context, we assume the LVM metadata on the involved slave
1063 # has by now been refreshed and do not attempt to do it here
1065 # Update the original record
1066 try:
1067 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1068 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1069 type = self.session.xenapi.VDI.get_type(vdi_ref)
1070 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1071 sm_config['vhd-parent'] = baseUuid
1072 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1073 except XenAPI.Failure:
1074 util.SMlog("ERROR updating the orig record")
1076 # introduce the new VDI records
1077 if clonUuid:
1078 try:
1079 clon_vdi = VDI.VDI(self, clonUuid)
1080 clon_vdi.read_only = False
1081 clon_vdi.location = clonUuid
1082 clon_vdi.utilisation = clon.sizeLV
1083 clon_vdi.sm_config = {
1084 "vdi_type": vhdutil.VDI_TYPE_VHD,
1085 "vhd-parent": baseUuid}
1087 if not self.legacyMode:
1088 LVMMetadataHandler(self.mdpath). \
1089 ensureSpaceIsAvailableForVdis(1)
1091 clon_vdi_ref = clon_vdi._db_introduce()
1092 util.SMlog("introduced clon VDI: %s (%s)" % \
1093 (clon_vdi_ref, clonUuid))
1095 vdi_info = {UUID_TAG: clonUuid,
1096 NAME_LABEL_TAG: clon_vdi.label,
1097 NAME_DESCRIPTION_TAG: clon_vdi.description,
1098 IS_A_SNAPSHOT_TAG: 0,
1099 SNAPSHOT_OF_TAG: '',
1100 SNAPSHOT_TIME_TAG: '',
1101 TYPE_TAG: type,
1102 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1103 READ_ONLY_TAG: int(clon_vdi.read_only),
1104 MANAGED_TAG: int(clon_vdi.managed),
1105 METADATA_OF_POOL_TAG: ''
1106 }
1108 if not self.legacyMode:
1109 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1111 except XenAPI.Failure:
1112 util.SMlog("ERROR introducing the clon record")
1114 try:
1115 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1116 base_vdi.label = "base copy"
1117 base_vdi.read_only = True
1118 base_vdi.location = baseUuid
1119 base_vdi.size = base.sizeVirt
1120 base_vdi.utilisation = base.sizeLV
1121 base_vdi.managed = False
1122 base_vdi.sm_config = {
1123 "vdi_type": vhdutil.VDI_TYPE_VHD,
1124 "vhd-parent": baseUuid}
1126 if not self.legacyMode:
1127 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1129 base_vdi_ref = base_vdi._db_introduce()
1130 util.SMlog("introduced base VDI: %s (%s)" % \
1131 (base_vdi_ref, baseUuid))
1133 vdi_info = {UUID_TAG: baseUuid,
1134 NAME_LABEL_TAG: base_vdi.label,
1135 NAME_DESCRIPTION_TAG: base_vdi.description,
1136 IS_A_SNAPSHOT_TAG: 0,
1137 SNAPSHOT_OF_TAG: '',
1138 SNAPSHOT_TIME_TAG: '',
1139 TYPE_TAG: type,
1140 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1141 READ_ONLY_TAG: int(base_vdi.read_only),
1142 MANAGED_TAG: int(base_vdi.managed),
1143 METADATA_OF_POOL_TAG: ''
1144 }
1146 if not self.legacyMode:
1147 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1148 except XenAPI.Failure:
1149 util.SMlog("ERROR introducing the base record")
1151 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1153 def _undoAllJournals(self):
1154 """Undo all VHD & SM interrupted journaled operations. This call must
1155 be serialized with respect to all operations that create journals"""
1156 # undoing interrupted inflates must be done first, since undoing VHD
1157 # ops might require inflations
1158 self.lock.acquire()
1159 try:
1160 self._undoAllInflateJournals()
1161 self._undoAllVHDJournals()
1162 self._handleInterruptedCloneOps()
1163 self._handleInterruptedCoalesceLeaf()
1164 finally:
1165 self.lock.release()
1166 self.cleanup()
1168 def _undoAllInflateJournals(self):
1169 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE)
1170 if len(entries) == 0:
1171 return
1172 self._loadvdis()
1173 for uuid, val in entries.items():
1174 vdi = self.vdis.get(uuid)
1175 if vdi: 1175 ↛ 1190line 1175 didn't jump to line 1190, because the condition on line 1175 was never false
1176 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1177 (uuid, vdi.path, val))
1178 if vdi.readonly: 1178 ↛ 1179line 1178 didn't jump to line 1179, because the condition on line 1178 was never true
1179 self.lvmCache.setReadonly(vdi.lvname, False)
1180 self.lvActivator.activate(uuid, vdi.lvname, False)
1181 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1182 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE,
1183 vhdutil.VHD_FOOTER_SIZE)
1184 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val))
1185 if vdi.readonly: 1185 ↛ 1186line 1185 didn't jump to line 1186, because the condition on line 1185 was never true
1186 self.lvmCache.setReadonly(vdi.lvname, True)
1187 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1187 ↛ 1188line 1187 didn't jump to line 1188, because the condition on line 1187 was never true
1188 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1189 self.vgname, vdi.lvname, uuid)
1190 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid)
1191 delattr(self, "vdiInfo")
1192 delattr(self, "allVDIs")
1194 def _undoAllVHDJournals(self):
1195 """check if there are VHD journals in existence and revert them"""
1196 journals = lvhdutil.getAllVHDJournals(self.lvmCache)
1197 if len(journals) == 0: 1197 ↛ 1199line 1197 didn't jump to line 1199, because the condition on line 1197 was never false
1198 return
1199 self._loadvdis()
1200 for uuid, jlvName in journals:
1201 vdi = self.vdis[uuid]
1202 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path))
1203 self.lvActivator.activate(uuid, vdi.lvname, False)
1204 self.lvmCache.activateNoRefcount(jlvName)
1205 fullSize = lvhdutil.calcSizeVHDLV(vdi.size)
1206 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize)
1207 try:
1208 jFile = os.path.join(self.path, jlvName)
1209 vhdutil.revert(vdi.path, jFile)
1210 except util.CommandException:
1211 util.logException("VHD journal revert")
1212 vhdutil.check(vdi.path)
1213 util.SMlog("VHD revert failed but VHD ok: removing journal")
1214 # Attempt to reclaim unused space
1215 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False)
1216 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
1217 if NewSize < fullSize:
1218 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1219 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1220 self.vgname, vdi.lvname, uuid)
1221 self.lvmCache.remove(jlvName)
1222 delattr(self, "vdiInfo")
1223 delattr(self, "allVDIs")
1225 def _updateSlavesPreClone(self, hostRefs, origOldLV):
1226 masterRef = util.get_this_host_ref(self.session)
1227 args = {"vgName": self.vgname,
1228 "action1": "deactivateNoRefcount",
1229 "lvName1": origOldLV}
1230 for hostRef in hostRefs:
1231 if hostRef == masterRef: 1231 ↛ 1232line 1231 didn't jump to line 1232, because the condition on line 1231 was never true
1232 continue
1233 util.SMlog("Deactivate VDI on %s" % hostRef)
1234 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1235 util.SMlog("call-plugin returned: %s" % rv)
1236 if not rv: 1236 ↛ 1237line 1236 didn't jump to line 1237, because the condition on line 1236 was never true
1237 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1239 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1240 baseUuid, baseLV):
1241 """We need to reactivate the original LV on each slave (note that the
1242 name for the original LV might change), as well as init the refcount
1243 for the base LV"""
1244 args = {"vgName": self.vgname,
1245 "action1": "refresh",
1246 "lvName1": origLV,
1247 "action2": "activate",
1248 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid,
1249 "lvName2": baseLV,
1250 "uuid2": baseUuid}
1252 masterRef = util.get_this_host_ref(self.session)
1253 for hostRef in hostRefs:
1254 if hostRef == masterRef: 1254 ↛ 1255line 1254 didn't jump to line 1255, because the condition on line 1254 was never true
1255 continue
1256 util.SMlog("Updating %s, %s, %s on slave %s" % \
1257 (origOldLV, origLV, baseLV, hostRef))
1258 rv = self.session.xenapi.host.call_plugin(
1259 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1260 util.SMlog("call-plugin returned: %s" % rv)
1261 if not rv: 1261 ↛ 1262line 1261 didn't jump to line 1262, because the condition on line 1261 was never true
1262 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1264 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1265 """Reactivate and refresh CBT log file on slaves"""
1266 args = {"vgName": self.vgname,
1267 "action1": "deactivateNoRefcount",
1268 "lvName1": cbtlog,
1269 "action2": "refresh",
1270 "lvName2": cbtlog}
1272 masterRef = util.get_this_host_ref(self.session)
1273 for hostRef in hostRefs:
1274 if hostRef == masterRef: 1274 ↛ 1275line 1274 didn't jump to line 1275, because the condition on line 1274 was never true
1275 continue
1276 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef))
1277 rv = self.session.xenapi.host.call_plugin(
1278 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1279 util.SMlog("call-plugin returned: %s" % rv)
1280 if not rv: 1280 ↛ 1281line 1280 didn't jump to line 1281, because the condition on line 1280 was never true
1281 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1283 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1284 """Tell the slave we deleted the base image"""
1285 args = {"vgName": self.vgname,
1286 "action1": "cleanupLockAndRefcount",
1287 "uuid1": baseUuid,
1288 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid}
1290 masterRef = util.get_this_host_ref(self.session)
1291 for hostRef in hostRefs:
1292 if hostRef == masterRef: 1292 ↛ 1293line 1292 didn't jump to line 1293, because the condition on line 1292 was never true
1293 continue
1294 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef))
1295 rv = self.session.xenapi.host.call_plugin(
1296 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1297 util.SMlog("call-plugin returned: %s" % rv)
1298 if not rv: 1298 ↛ 1299line 1298 didn't jump to line 1299, because the condition on line 1298 was never true
1299 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1301 def _cleanup(self, skipLockCleanup=False):
1302 """delete stale refcounter, flag, and lock files"""
1303 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1304 IPCFlag(self.uuid).clearAll()
1305 if not skipLockCleanup: 1305 ↛ 1306line 1305 didn't jump to line 1306, because the condition on line 1305 was never true
1306 Lock.cleanupAll(self.uuid)
1307 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1309 def _prepareTestMode(self):
1310 util.SMlog("Test mode: %s" % self.testMode)
1311 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1311 ↛ 1312line 1311 didn't jump to line 1312, because the condition on line 1311 was never true
1312 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1313 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1315 def _kickGC(self):
1316 util.SMlog("Kicking GC")
1317 cleanup.start_gc_service(self.uuid)
1319 def ensureCBTSpace(self):
1320 # Ensure we have space for at least one LV
1321 self._ensureSpaceAvailable(self.journaler.LV_SIZE)
1324class LVHDVDI(VDI.VDI):
1326 JRN_CLONE = "clone" # journal entry type for the clone operation
1328 @override
1329 def load(self, vdi_uuid) -> None:
1330 self.lock = self.sr.lock
1331 self.lvActivator = self.sr.lvActivator
1332 self.loaded = False
1333 self.vdi_type = vhdutil.VDI_TYPE_VHD
1334 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1334 ↛ 1336line 1334 didn't jump to line 1336, because the condition on line 1334 was never false
1335 self.vdi_type = vhdutil.VDI_TYPE_RAW
1336 self.uuid = vdi_uuid
1337 self.location = self.uuid
1338 self.exists = True
1340 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1341 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1342 if self.parent: 1342 ↛ 1343line 1342 didn't jump to line 1343, because the condition on line 1342 was never true
1343 self.sm_config_override['vhd-parent'] = self.parent
1344 else:
1345 self.sm_config_override['vhd-parent'] = None
1346 return
1348 # scan() didn't run: determine the type of the VDI manually
1349 if self._determineType():
1350 return
1352 # the VDI must be in the process of being created
1353 self.exists = False
1354 if "vdi_sm_config" in self.sr.srcmd.params and \ 1354 ↛ 1356line 1354 didn't jump to line 1356, because the condition on line 1354 was never true
1355 "type" in self.sr.srcmd.params["vdi_sm_config"]:
1356 type = self.sr.srcmd.params["vdi_sm_config"]["type"]
1357 if type == PARAM_RAW:
1358 self.vdi_type = vhdutil.VDI_TYPE_RAW
1359 elif type == PARAM_VHD:
1360 self.vdi_type = vhdutil.VDI_TYPE_VHD
1361 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode:
1362 raise xs_errors.XenError('VDICreate', \
1363 opterr='Cannot create VHD type disk in legacy mode')
1364 else:
1365 raise xs_errors.XenError('VDICreate', opterr='bad type')
1366 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid)
1367 self.path = os.path.join(self.sr.path, self.lvname)
1369 @override
1370 def create(self, sr_uuid, vdi_uuid, size) -> str:
1371 util.SMlog("LVHDVDI.create for %s" % self.uuid)
1372 if not self.sr.isMaster:
1373 raise xs_errors.XenError('LVMMaster')
1374 if self.exists:
1375 raise xs_errors.XenError('VDIExists')
1377 size = vhdutil.validate_and_round_vhd_size(int(size))
1379 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \
1380 (self.vdi_type, self.path, size))
1381 lvSize = 0
1382 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1383 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1384 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1385 else:
1386 if self.sr.provision == "thin":
1387 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1388 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1389 elif self.sr.provision == "thick":
1390 lvSize = lvhdutil.calcSizeVHDLV(int(size))
1392 self.sr._ensureSpaceAvailable(lvSize)
1394 try:
1395 self.sr.lvmCache.create(self.lvname, lvSize)
1396 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1397 self.size = self.sr.lvmCache.getSize(self.lvname)
1398 else:
1399 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB)
1400 self.size = vhdutil.getSizeVirt(self.path)
1401 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1402 except util.CommandException as e:
1403 util.SMlog("Unable to create VDI")
1404 self.sr.lvmCache.remove(self.lvname)
1405 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1407 self.utilisation = lvSize
1408 self.sm_config["vdi_type"] = self.vdi_type
1410 if not self.sr.legacyMode:
1411 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1413 self.ref = self._db_introduce()
1414 self.sr._updateStats(self.sr.uuid, self.size)
1416 vdi_info = {UUID_TAG: self.uuid,
1417 NAME_LABEL_TAG: util.to_plain_string(self.label),
1418 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1419 IS_A_SNAPSHOT_TAG: 0,
1420 SNAPSHOT_OF_TAG: '',
1421 SNAPSHOT_TIME_TAG: '',
1422 TYPE_TAG: self.ty,
1423 VDI_TYPE_TAG: self.vdi_type,
1424 READ_ONLY_TAG: int(self.read_only),
1425 MANAGED_TAG: int(self.managed),
1426 METADATA_OF_POOL_TAG: ''
1427 }
1429 if not self.sr.legacyMode:
1430 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1432 return VDI.VDI.get_params(self)
1434 @override
1435 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None:
1436 util.SMlog("LVHDVDI.delete for %s" % self.uuid)
1437 try:
1438 self._loadThis()
1439 except xs_errors.SRException as e:
1440 # Catch 'VDI doesn't exist' exception
1441 if e.errno == 46:
1442 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1443 raise
1445 vdi_ref = self.sr.srcmd.params['vdi_ref']
1446 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1447 raise xs_errors.XenError("VDIDelete", \
1448 opterr="Deleting non-leaf node not permitted")
1450 if not self.hidden:
1451 self._markHidden()
1453 if not data_only:
1454 # Remove from XAPI and delete from MGT
1455 self._db_forget()
1456 else:
1457 # If this is a data_destroy call, don't remove from XAPI db
1458 # Only delete from MGT
1459 if not self.sr.legacyMode:
1460 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1462 # deactivate here because it might be too late to do it in the "final"
1463 # step: GC might have removed the LV by then
1464 if self.sr.lvActivator.get(self.uuid, False):
1465 self.sr.lvActivator.deactivate(self.uuid, False)
1467 try:
1468 self.sr.lvmCache.remove(self.lvname)
1469 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid)
1470 self.sr.lock.cleanupAll(vdi_uuid)
1471 except xs_errors.SRException as e:
1472 util.SMlog(
1473 "Failed to remove the volume (maybe is leaf coalescing) "
1474 "for %s err:%d" % (self.uuid, e.errno))
1476 self.sr._updateStats(self.sr.uuid, -self.size)
1477 self.sr._kickGC()
1478 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1480 @override
1481 def attach(self, sr_uuid, vdi_uuid) -> str:
1482 util.SMlog("LVHDVDI.attach for %s" % self.uuid)
1483 if self.sr.journaler.hasJournals(self.uuid):
1484 raise xs_errors.XenError('VDIUnavailable',
1485 opterr='Interrupted operation detected on this VDI, '
1486 'scan SR first to trigger auto-repair')
1488 writable = ('args' not in self.sr.srcmd.params) or \
1489 (self.sr.srcmd.params['args'][0] == "true")
1490 needInflate = True
1491 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable:
1492 needInflate = False
1493 else:
1494 self._loadThis()
1495 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size):
1496 needInflate = False
1498 if needInflate:
1499 try:
1500 self._prepareThin(True)
1501 except:
1502 util.logException("attach")
1503 raise xs_errors.XenError('LVMProvisionAttach')
1505 try:
1506 return self._attach()
1507 finally:
1508 if not self.sr.lvActivator.deactivateAll():
1509 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1511 @override
1512 def detach(self, sr_uuid, vdi_uuid) -> None:
1513 util.SMlog("LVHDVDI.detach for %s" % self.uuid)
1514 self._loadThis()
1515 already_deflated = (self.utilisation < \
1516 lvhdutil.calcSizeVHDLV(self.size))
1517 needDeflate = True
1518 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated:
1519 needDeflate = False
1520 elif self.sr.provision == "thick":
1521 needDeflate = False
1522 # except for snapshots, which are always deflated
1523 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1524 vdi_ref = self.sr.srcmd.params['vdi_ref']
1525 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1526 if snap:
1527 needDeflate = True
1529 if needDeflate:
1530 try:
1531 self._prepareThin(False)
1532 except:
1533 util.logException("_prepareThin")
1534 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1536 try:
1537 self._detach()
1538 finally:
1539 if not self.sr.lvActivator.deactivateAll():
1540 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1542 # We only support offline resize
1543 @override
1544 def resize(self, sr_uuid, vdi_uuid, size) -> str:
1545 util.SMlog("LVHDVDI.resize for %s" % self.uuid)
1546 if not self.sr.isMaster:
1547 raise xs_errors.XenError('LVMMaster')
1549 self._loadThis()
1550 if self.hidden:
1551 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1553 if size < self.size:
1554 util.SMlog('vdi_resize: shrinking not supported: ' + \
1555 '(current size: %d, new size: %d)' % (self.size, size))
1556 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1558 size = vhdutil.validate_and_round_vhd_size(int(size))
1560 if size == self.size:
1561 return VDI.VDI.get_params(self)
1563 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1564 lvSizeOld = self.size
1565 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1566 else:
1567 lvSizeOld = self.utilisation
1568 lvSizeNew = lvhdutil.calcSizeVHDLV(size)
1569 if self.sr.provision == "thin":
1570 # VDI is currently deflated, so keep it deflated
1571 lvSizeNew = lvSizeOld
1572 assert(lvSizeNew >= lvSizeOld)
1573 spaceNeeded = lvSizeNew - lvSizeOld
1574 self.sr._ensureSpaceAvailable(spaceNeeded)
1576 oldSize = self.size
1577 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1578 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1579 self.size = self.sr.lvmCache.getSize(self.lvname)
1580 self.utilisation = self.size
1581 else:
1582 if lvSizeNew != lvSizeOld:
1583 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid,
1584 lvSizeNew)
1585 vhdutil.setSizeVirtFast(self.path, size)
1586 self.size = vhdutil.getSizeVirt(self.path)
1587 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1589 vdi_ref = self.sr.srcmd.params['vdi_ref']
1590 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1591 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1592 str(self.utilisation))
1593 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1594 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1595 return VDI.VDI.get_params(self)
1597 @override
1598 def clone(self, sr_uuid, vdi_uuid) -> str:
1599 return self._do_snapshot(
1600 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1602 @override
1603 def compose(self, sr_uuid, vdi1, vdi2) -> None:
1604 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1))
1605 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1606 raise xs_errors.XenError('Unimplemented')
1608 parent_uuid = vdi1
1609 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid
1610 assert(self.sr.lvmCache.checkLV(parent_lvname))
1611 parent_path = os.path.join(self.sr.path, parent_lvname)
1613 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1614 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1616 vhdutil.setParent(self.path, parent_path, False)
1617 vhdutil.setHidden(parent_path)
1618 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1620 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1621 True):
1622 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1624 util.SMlog("Compose done")
1626 def reset_leaf(self, sr_uuid, vdi_uuid):
1627 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid)
1628 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1629 raise xs_errors.XenError('Unimplemented')
1631 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1633 # safety check
1634 if not vhdutil.hasParent(self.path):
1635 raise util.SMException("ERROR: VDI %s has no parent, " + \
1636 "will not reset contents" % self.uuid)
1638 vhdutil.killData(self.path)
1640 def _attach(self):
1641 self._chainSetActive(True, True, True)
1642 if not util.pathexists(self.path):
1643 raise xs_errors.XenError('VDIUnavailable', \
1644 opterr='Could not find: %s' % self.path)
1646 if not hasattr(self, 'xenstore_data'):
1647 self.xenstore_data = {}
1649 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1650 scsiutil.gen_synthetic_page_data(self.uuid)))
1652 self.xenstore_data['storage-type'] = 'lvm'
1653 self.xenstore_data['vdi-type'] = self.vdi_type
1655 self.attached = True
1656 self.sr.lvActivator.persist()
1657 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1659 def _detach(self):
1660 self._chainSetActive(False, True)
1661 self.attached = False
1663 @override
1664 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1665 cloneOp=False, secondary=None, cbtlog=None) -> str:
1666 # If cbt enabled, save file consistency state
1667 if cbtlog is not None:
1668 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1668 ↛ 1669line 1668 didn't jump to line 1669, because the condition on line 1668 was never true
1669 consistency_state = False
1670 else:
1671 consistency_state = True
1672 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1673 (consistency_state, vdi_uuid))
1674 else:
1675 consistency_state = None
1677 pause_time = time.time()
1678 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1678 ↛ 1679line 1678 didn't jump to line 1679, because the condition on line 1678 was never true
1679 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1681 snapResult = None
1682 try:
1683 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state)
1684 except Exception as e1:
1685 try:
1686 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1687 secondary=None)
1688 except Exception as e2:
1689 util.SMlog('WARNING: failed to clean up failed snapshot: '
1690 '%s (error ignored)' % e2)
1691 raise
1692 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1693 unpause_time = time.time()
1694 if (unpause_time - pause_time) > LONG_SNAPTIME: 1694 ↛ 1695line 1694 didn't jump to line 1695, because the condition on line 1694 was never true
1695 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1696 (unpause_time - pause_time))
1697 return snapResult
1699 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None):
1700 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1702 if not self.sr.isMaster: 1702 ↛ 1703line 1702 didn't jump to line 1703, because the condition on line 1702 was never true
1703 raise xs_errors.XenError('LVMMaster')
1704 if self.sr.legacyMode: 1704 ↛ 1705line 1704 didn't jump to line 1705, because the condition on line 1704 was never true
1705 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1707 self._loadThis()
1708 if self.hidden: 1708 ↛ 1709line 1708 didn't jump to line 1709, because the condition on line 1708 was never true
1709 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1711 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1712 self.sr.srcmd.params['vdi_ref'])
1713 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1713 ↛ 1714line 1713 didn't jump to line 1714, because the condition on line 1713 was never true
1714 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1715 raise xs_errors.XenError('Unimplemented', \
1716 opterr='Raw VDI, snapshot or clone not permitted')
1718 # we must activate the entire VHD chain because the real parent could
1719 # theoretically be anywhere in the chain if all VHDs under it are empty
1720 self._chainSetActive(True, False)
1721 if not util.pathexists(self.path): 1721 ↛ 1722line 1721 didn't jump to line 1722, because the condition on line 1721 was never true
1722 raise xs_errors.XenError('VDIUnavailable', \
1723 opterr='VDI unavailable: %s' % (self.path))
1725 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1725 ↛ 1733line 1725 didn't jump to line 1733, because the condition on line 1725 was never false
1726 depth = vhdutil.getDepth(self.path)
1727 if depth == -1: 1727 ↛ 1728line 1727 didn't jump to line 1728, because the condition on line 1727 was never true
1728 raise xs_errors.XenError('VDIUnavailable', \
1729 opterr='failed to get VHD depth')
1730 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1730 ↛ 1731line 1730 didn't jump to line 1731, because the condition on line 1730 was never true
1731 raise xs_errors.XenError('SnapshotChainTooLong')
1733 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1734 self.sr.srcmd.params['vdi_ref'])
1736 fullpr = lvhdutil.calcSizeVHDLV(self.size)
1737 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \
1738 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1739 lvSizeOrig = thinpr
1740 lvSizeClon = thinpr
1742 hostRefs = []
1743 if self.sr.cmd == "vdi_snapshot":
1744 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1745 if hostRefs: 1745 ↛ 1747line 1745 didn't jump to line 1747, because the condition on line 1745 was never false
1746 lvSizeOrig = fullpr
1747 if self.sr.provision == "thick": 1747 ↛ 1753line 1747 didn't jump to line 1753, because the condition on line 1747 was never false
1748 if not self.issnap: 1748 ↛ 1749line 1748 didn't jump to line 1749, because the condition on line 1748 was never true
1749 lvSizeOrig = fullpr
1750 if self.sr.cmd != "vdi_snapshot":
1751 lvSizeClon = fullpr
1753 if (snapType == VDI.SNAPSHOT_SINGLE or 1753 ↛ 1755line 1753 didn't jump to line 1755, because the condition on line 1753 was never true
1754 snapType == VDI.SNAPSHOT_INTERNAL):
1755 lvSizeClon = 0
1757 # the space required must include 2 journal LVs: a clone journal and an
1758 # inflate journal (for the failure handling
1759 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1760 lvSizeBase = self.size
1761 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1761 ↛ 1765line 1761 didn't jump to line 1765, because the condition on line 1761 was never false
1762 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1763 vhdutil.getSizePhys(self.path))
1764 size_req -= (self.utilisation - lvSizeBase)
1765 self.sr._ensureSpaceAvailable(size_req)
1767 if hostRefs:
1768 self.sr._updateSlavesPreClone(hostRefs, self.lvname)
1770 baseUuid = util.gen_uuid()
1771 origUuid = self.uuid
1772 clonUuid = ""
1773 if snapType == VDI.SNAPSHOT_DOUBLE: 1773 ↛ 1775line 1773 didn't jump to line 1775, because the condition on line 1773 was never false
1774 clonUuid = util.gen_uuid()
1775 jval = "%s_%s" % (baseUuid, clonUuid)
1776 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1777 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1779 try:
1780 # self becomes the "base vdi"
1781 origOldLV = self.lvname
1782 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid
1783 self.sr.lvmCache.rename(self.lvname, baseLV)
1784 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1785 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1786 self.uuid = baseUuid
1787 self.lvname = baseLV
1788 self.path = os.path.join(self.sr.path, baseLV)
1789 self.label = "base copy"
1790 self.read_only = True
1791 self.location = self.uuid
1792 self.managed = False
1794 # shrink the base copy to the minimum - we do it before creating
1795 # the snapshot volumes to avoid requiring double the space
1796 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1796 ↛ 1799line 1796 didn't jump to line 1799, because the condition on line 1796 was never false
1797 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1798 self.utilisation = lvSizeBase
1799 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1801 snapVDI = self._createSnap(origUuid, lvSizeOrig, False)
1802 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1803 snapVDI2 = None
1804 if snapType == VDI.SNAPSHOT_DOUBLE: 1804 ↛ 1810line 1804 didn't jump to line 1810, because the condition on line 1804 was never false
1805 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True)
1806 # If we have CBT enabled on the VDI,
1807 # set CBT status for the new snapshot disk
1808 if cbtlog:
1809 snapVDI2.cbt_enabled = True
1810 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1812 # note: it is important to mark the parent hidden only AFTER the
1813 # new VHD children have been created, which are referencing it;
1814 # otherwise we would introduce a race with GC that could reclaim
1815 # the parent before we snapshot it
1816 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1816 ↛ 1817line 1816 didn't jump to line 1817, because the condition on line 1816 was never true
1817 self.sr.lvmCache.setHidden(self.lvname)
1818 else:
1819 vhdutil.setHidden(self.path)
1820 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1822 # set the base copy to ReadOnly
1823 self.sr.lvmCache.setReadonly(self.lvname, True)
1824 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1826 if hostRefs:
1827 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1828 snapVDI.lvname, self.uuid, self.lvname)
1830 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1831 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1832 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1833 if hostRefs: 1833 ↛ 1847line 1833 didn't jump to line 1847, because the condition on line 1833 was never false
1834 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1835 try:
1836 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1837 except:
1838 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1839 alert_str = ("Creating CBT snapshot for {} failed"
1840 .format(snapVDI.uuid))
1841 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1842 pass
1844 except (util.SMException, XenAPI.Failure) as e:
1845 util.logException("LVHDVDI._snapshot")
1846 self._failClone(origUuid, jval, str(e))
1847 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1849 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1851 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1853 def _createSnap(self, snapUuid, snapSizeLV, isNew):
1854 """Snapshot self and return the snapshot VDI object"""
1855 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid
1856 snapPath = os.path.join(self.sr.path, snapLV)
1857 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1858 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1859 if isNew:
1860 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1861 self.sr.lvActivator.add(snapUuid, snapLV, False)
1862 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW)
1863 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB)
1864 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid)
1866 snapVDI = LVHDVDI(self.sr, snapUuid)
1867 snapVDI.read_only = False
1868 snapVDI.location = snapUuid
1869 snapVDI.size = self.size
1870 snapVDI.utilisation = snapSizeLV
1871 snapVDI.sm_config = dict()
1872 for key, val in self.sm_config.items(): 1872 ↛ 1873line 1872 didn't jump to line 1873, because the loop on line 1872 never started
1873 if key not in [
1874 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1875 not key.startswith("host_"):
1876 snapVDI.sm_config[key] = val
1877 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1878 snapVDI.sm_config["vhd-parent"] = snapParent
1879 snapVDI.lvname = snapLV
1880 return snapVDI
1882 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1883 if snapType is not VDI.SNAPSHOT_INTERNAL: 1883 ↛ 1885line 1883 didn't jump to line 1885, because the condition on line 1883 was never false
1884 self.sr._updateStats(self.sr.uuid, self.size)
1885 basePresent = True
1887 # Verify parent locator field of both children and delete basePath if
1888 # unused
1889 snapParent = snapVDI.sm_config["vhd-parent"]
1890 snap2Parent = ""
1891 if snapVDI2: 1891 ↛ 1893line 1891 didn't jump to line 1893, because the condition on line 1891 was never false
1892 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1893 if snapParent != self.uuid and \ 1893 ↛ 1920line 1893 didn't jump to line 1920, because the condition on line 1893 was never false
1894 (not snapVDI2 or snap2Parent != self.uuid):
1895 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1896 (snapParent, self.uuid, snap2Parent, self.lvname))
1897 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1898 self.sr.lvmCache.remove(self.lvname)
1899 self.sr.lvActivator.remove(self.uuid, False)
1900 if hostRefs:
1901 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1902 basePresent = False
1903 else:
1904 # assign the _binary_ refcount of the original VDI to the new base
1905 # VDI (but as the normal refcount, since binary refcounts are only
1906 # for leaf nodes). The normal refcount of the child is not
1907 # transferred to to the base VDI because normal refcounts are
1908 # incremented and decremented individually, and not based on the
1909 # VHD chain (i.e., the child's normal refcount will be decremented
1910 # independently of its parent situation). Add 1 for this clone op.
1911 # Note that we do not need to do protect the refcount operations
1912 # below with per-VDI locking like we do in lvutil because at this
1913 # point we have exclusive access to the VDIs involved. Other SM
1914 # operations are serialized by the Agent or with the SR lock, and
1915 # any coalesce activations are serialized with the SR lock. (The
1916 # coalesce activates the coalesced VDI pair in the beginning, which
1917 # cannot affect the VDIs here because they cannot possibly be
1918 # involved in coalescing at this point, and at the relinkSkip step
1919 # that activates the children, which takes the SR lock.)
1920 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid
1921 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1922 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1924 # the "paused" and "host_*" sm-config keys are special and must stay on
1925 # the leaf without being inherited by anyone else
1926 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1926 ↛ 1927line 1926 didn't jump to line 1927, because the loop on line 1926 never started
1927 snapVDI.sm_config[key] = self.sm_config[key]
1928 del self.sm_config[key]
1930 # Introduce any new VDI records & update the existing one
1931 type = self.session.xenapi.VDI.get_type( \
1932 self.sr.srcmd.params['vdi_ref'])
1933 if snapVDI2: 1933 ↛ 1975line 1933 didn't jump to line 1975, because the condition on line 1933 was never false
1934 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1935 vdiRef = snapVDI2._db_introduce()
1936 if cloneOp:
1937 vdi_info = {UUID_TAG: snapVDI2.uuid,
1938 NAME_LABEL_TAG: util.to_plain_string( \
1939 self.session.xenapi.VDI.get_name_label( \
1940 self.sr.srcmd.params['vdi_ref'])),
1941 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1942 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1943 IS_A_SNAPSHOT_TAG: 0,
1944 SNAPSHOT_OF_TAG: '',
1945 SNAPSHOT_TIME_TAG: '',
1946 TYPE_TAG: type,
1947 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1948 READ_ONLY_TAG: 0,
1949 MANAGED_TAG: int(snapVDI2.managed),
1950 METADATA_OF_POOL_TAG: ''
1951 }
1952 else:
1953 util.SMlog("snapshot VDI params: %s" % \
1954 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
1955 vdi_info = {UUID_TAG: snapVDI2.uuid,
1956 NAME_LABEL_TAG: util.to_plain_string( \
1957 self.session.xenapi.VDI.get_name_label( \
1958 self.sr.srcmd.params['vdi_ref'])),
1959 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1960 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1961 IS_A_SNAPSHOT_TAG: 1,
1962 SNAPSHOT_OF_TAG: snapVDI.uuid,
1963 SNAPSHOT_TIME_TAG: '',
1964 TYPE_TAG: type,
1965 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1966 READ_ONLY_TAG: 0,
1967 MANAGED_TAG: int(snapVDI2.managed),
1968 METADATA_OF_POOL_TAG: ''
1969 }
1971 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1972 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
1973 (vdiRef, snapVDI2.uuid))
1975 if basePresent: 1975 ↛ 1976line 1975 didn't jump to line 1976, because the condition on line 1975 was never true
1976 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1977 vdiRef = self._db_introduce()
1978 vdi_info = {UUID_TAG: self.uuid,
1979 NAME_LABEL_TAG: self.label,
1980 NAME_DESCRIPTION_TAG: self.description,
1981 IS_A_SNAPSHOT_TAG: 0,
1982 SNAPSHOT_OF_TAG: '',
1983 SNAPSHOT_TIME_TAG: '',
1984 TYPE_TAG: type,
1985 VDI_TYPE_TAG: self.sm_config['vdi_type'],
1986 READ_ONLY_TAG: 1,
1987 MANAGED_TAG: 0,
1988 METADATA_OF_POOL_TAG: ''
1989 }
1991 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1992 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
1993 (vdiRef, self.uuid))
1995 # Update the original record
1996 vdi_ref = self.sr.srcmd.params['vdi_ref']
1997 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
1998 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
1999 str(snapVDI.utilisation))
2001 # Return the info on the new snap VDI
2002 snap = snapVDI2
2003 if not snap: 2003 ↛ 2004line 2003 didn't jump to line 2004, because the condition on line 2003 was never true
2004 snap = self
2005 if not basePresent:
2006 # a single-snapshot of an empty VDI will be a noop, resulting
2007 # in no new VDIs, so return the existing one. The GC wouldn't
2008 # normally try to single-snapshot an empty VHD of course, but
2009 # if an external snapshot operation manages to sneak in right
2010 # before a snapshot-coalesce phase, we would get here
2011 snap = snapVDI
2012 return snap.get_params()
2014 def _initFromVDIInfo(self, vdiInfo):
2015 self.vdi_type = vdiInfo.vdiType
2016 self.lvname = vdiInfo.lvName
2017 self.size = vdiInfo.sizeVirt
2018 self.utilisation = vdiInfo.sizeLV
2019 self.hidden = vdiInfo.hidden
2020 if self.hidden: 2020 ↛ 2021line 2020 didn't jump to line 2021, because the condition on line 2020 was never true
2021 self.managed = False
2022 self.active = vdiInfo.lvActive
2023 self.readonly = vdiInfo.lvReadonly
2024 self.parent = vdiInfo.parentUuid
2025 self.path = os.path.join(self.sr.path, self.lvname)
2026 if hasattr(self, "sm_config_override"): 2026 ↛ 2029line 2026 didn't jump to line 2029, because the condition on line 2026 was never false
2027 self.sm_config_override["vdi_type"] = self.vdi_type
2028 else:
2029 self.sm_config_override = {'vdi_type': self.vdi_type}
2030 self.loaded = True
2032 def _initFromLVInfo(self, lvInfo):
2033 self.vdi_type = lvInfo.vdiType
2034 self.lvname = lvInfo.name
2035 self.size = lvInfo.size
2036 self.utilisation = lvInfo.size
2037 self.hidden = lvInfo.hidden
2038 self.active = lvInfo.active
2039 self.readonly = lvInfo.readonly
2040 self.parent = ''
2041 self.path = os.path.join(self.sr.path, self.lvname)
2042 if hasattr(self, "sm_config_override"): 2042 ↛ 2045line 2042 didn't jump to line 2045, because the condition on line 2042 was never false
2043 self.sm_config_override["vdi_type"] = self.vdi_type
2044 else:
2045 self.sm_config_override = {'vdi_type': self.vdi_type}
2046 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2046 ↛ 2047line 2046 didn't jump to line 2047, because the condition on line 2046 was never true
2047 self.loaded = True
2049 def _initFromVHDInfo(self, vhdInfo):
2050 self.size = vhdInfo.sizeVirt
2051 self.parent = vhdInfo.parentUuid
2052 self.hidden = vhdInfo.hidden
2053 self.loaded = True
2055 def _determineType(self):
2056 """Determine whether this is a raw or a VHD VDI"""
2057 if "vdi_ref" in self.sr.srcmd.params: 2057 ↛ 2070line 2057 didn't jump to line 2070, because the condition on line 2057 was never false
2058 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2059 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2060 if sm_config.get("vdi_type"): 2060 ↛ 2061line 2060 didn't jump to line 2061, because the condition on line 2060 was never true
2061 self.vdi_type = sm_config["vdi_type"]
2062 prefix = lvhdutil.LV_PREFIX[self.vdi_type]
2063 self.lvname = "%s%s" % (prefix, self.uuid)
2064 self.path = os.path.join(self.sr.path, self.lvname)
2065 self.sm_config_override = sm_config
2066 return True
2068 # LVM commands can be costly, so check the file directly first in case
2069 # the LV is active
2070 found = False
2071 for t in lvhdutil.VDI_TYPES: 2071 ↛ 2072line 2071 didn't jump to line 2072, because the loop on line 2071 never started
2072 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid)
2073 path = os.path.join(self.sr.path, lvname)
2074 if util.pathexists(path):
2075 if found:
2076 raise xs_errors.XenError('VDILoad',
2077 opterr="multiple VDI's: uuid %s" % self.uuid)
2078 found = True
2079 self.vdi_type = t
2080 self.lvname = lvname
2081 self.path = path
2082 if found: 2082 ↛ 2083line 2082 didn't jump to line 2083, because the condition on line 2082 was never true
2083 return True
2085 # now list all LV's
2086 if not lvutil._checkVG(self.sr.vgname): 2086 ↛ 2088line 2086 didn't jump to line 2088, because the condition on line 2086 was never true
2087 # when doing attach_from_config, the VG won't be there yet
2088 return False
2090 lvs = lvhdutil.getLVInfo(self.sr.lvmCache)
2091 if lvs.get(self.uuid):
2092 self._initFromLVInfo(lvs[self.uuid])
2093 return True
2094 return False
2096 def _loadThis(self):
2097 """Load VDI info for this VDI and activate the LV if it's VHD. We
2098 don't do it in VDI.load() because not all VDI operations need it."""
2099 if self.loaded: 2099 ↛ 2100line 2099 didn't jump to line 2100, because the condition on line 2099 was never true
2100 if self.vdi_type == vhdutil.VDI_TYPE_VHD:
2101 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2102 return
2103 try:
2104 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname)
2105 except util.CommandException as e:
2106 raise xs_errors.XenError('VDIUnavailable',
2107 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2108 if not lvs.get(self.uuid): 2108 ↛ 2109line 2108 didn't jump to line 2109, because the condition on line 2108 was never true
2109 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2110 self._initFromLVInfo(lvs[self.uuid])
2111 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2111 ↛ 2118line 2111 didn't jump to line 2118, because the condition on line 2111 was never false
2112 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2113 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False)
2114 if not vhdInfo: 2114 ↛ 2115line 2114 didn't jump to line 2115, because the condition on line 2114 was never true
2115 raise xs_errors.XenError('VDIUnavailable', \
2116 opterr='getVHDInfo failed')
2117 self._initFromVHDInfo(vhdInfo)
2118 self.loaded = True
2120 def _chainSetActive(self, active, binary, persistent=False):
2121 if binary: 2121 ↛ 2122line 2121 didn't jump to line 2122, because the condition on line 2121 was never true
2122 (count, bcount) = RefCounter.checkLocked(self.uuid,
2123 lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
2124 if (active and bcount > 0) or (not active and bcount == 0):
2125 return # this is a redundant activation/deactivation call
2127 vdiList = {self.uuid: self.lvname}
2128 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2128 ↛ 2131line 2128 didn't jump to line 2131, because the condition on line 2128 was never false
2129 vdiList = vhdutil.getParentChain(self.lvname,
2130 lvhdutil.extractUuid, self.sr.vgname)
2131 for uuid, lvName in vdiList.items(): 2131 ↛ 2132line 2131 didn't jump to line 2132, because the loop on line 2131 never started
2132 binaryParam = binary
2133 if uuid != self.uuid:
2134 binaryParam = False # binary param only applies to leaf nodes
2135 if active:
2136 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2137 persistent)
2138 else:
2139 # just add the LVs for deactivation in the final (cleanup)
2140 # step. The LVs must not have been activated during the current
2141 # operation
2142 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2144 def _failClone(self, uuid, jval, msg):
2145 try:
2146 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2147 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2148 except Exception as e:
2149 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2150 ' %s (error ignored)' % e)
2151 raise xs_errors.XenError('VDIClone', opterr=msg)
2153 def _markHidden(self):
2154 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
2155 self.sr.lvmCache.setHidden(self.lvname)
2156 else:
2157 vhdutil.setHidden(self.path)
2158 self.hidden = 1
2160 def _prepareThin(self, attach):
2161 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2162 if self.sr.isMaster:
2163 # the master can prepare the VDI locally
2164 if attach:
2165 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid)
2166 else:
2167 lvhdutil.detachThin(self.session, self.sr.lvmCache,
2168 self.sr.uuid, self.uuid)
2169 else:
2170 fn = "attach"
2171 if not attach:
2172 fn = "detach"
2173 pools = self.session.xenapi.pool.get_all()
2174 master = self.session.xenapi.pool.get_master(pools[0])
2175 rv = self.session.xenapi.host.call_plugin(
2176 master, self.sr.THIN_PLUGIN, fn,
2177 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid})
2178 util.SMlog("call-plugin returned: %s" % rv)
2179 if not rv:
2180 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2181 # refresh to pick up the size change on this slave
2182 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2184 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2185 if origUtilisation != self.utilisation:
2186 vdi_ref = self.sr.srcmd.params['vdi_ref']
2187 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2188 str(self.utilisation))
2189 stats = lvutil._getVGstats(self.sr.vgname)
2190 sr_utilisation = stats['physical_utilisation']
2191 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2192 str(sr_utilisation))
2194 @override
2195 def update(self, sr_uuid, vdi_uuid) -> None:
2196 if self.sr.legacyMode:
2197 return
2199 #Synch the name_label of this VDI on storage with the name_label in XAPI
2200 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2201 update_map = {}
2202 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2203 METADATA_OBJECT_TYPE_VDI
2204 update_map[UUID_TAG] = self.uuid
2205 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2206 self.session.xenapi.VDI.get_name_label(vdi_ref))
2207 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2208 self.session.xenapi.VDI.get_name_description(vdi_ref))
2209 update_map[SNAPSHOT_TIME_TAG] = \
2210 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2211 update_map[METADATA_OF_POOL_TAG] = \
2212 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2213 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2215 @override
2216 def _ensure_cbt_space(self) -> None:
2217 self.sr.ensureCBTSpace()
2219 @override
2220 def _create_cbt_log(self) -> str:
2221 logname = self._get_cbt_logname(self.uuid)
2222 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG)
2223 logpath = super(LVHDVDI, self)._create_cbt_log()
2224 self.sr.lvmCache.deactivateNoRefcount(logname)
2225 return logpath
2227 @override
2228 def _delete_cbt_log(self) -> None:
2229 logpath = self._get_cbt_logpath(self.uuid)
2230 if self._cbt_log_exists(logpath):
2231 logname = self._get_cbt_logname(self.uuid)
2232 self.sr.lvmCache.remove(logname)
2234 @override
2235 def _rename(self, oldpath, newpath) -> None:
2236 oldname = os.path.basename(oldpath)
2237 newname = os.path.basename(newpath)
2238 self.sr.lvmCache.rename(oldname, newname)
2240 @override
2241 def _activate_cbt_log(self, lv_name) -> bool:
2242 self.sr.lvmCache.refresh()
2243 if not self.sr.lvmCache.is_active(lv_name): 2243 ↛ 2244line 2243 didn't jump to line 2244, because the condition on line 2243 was never true
2244 try:
2245 self.sr.lvmCache.activateNoRefcount(lv_name)
2246 return True
2247 except Exception as e:
2248 util.SMlog("Exception in _activate_cbt_log, "
2249 "Error: %s." % str(e))
2250 raise
2251 else:
2252 return False
2254 @override
2255 def _deactivate_cbt_log(self, lv_name) -> None:
2256 try:
2257 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2258 except Exception as e:
2259 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2260 raise
2262 @override
2263 def _cbt_log_exists(self, logpath) -> bool:
2264 return lvutil.exists(logpath)
2266if __name__ == '__main__': 2266 ↛ 2267line 2266 didn't jump to line 2267, because the condition on line 2266 was never true
2267 SRCommand.run(LVHDSR, DRIVER_INFO)
2268else:
2269 SR.registerSR(LVHDSR)