Coverage for drivers/LVHDSR.py : 43%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVHDSR: VHD on LVM storage repository
19#
21import SR
22from SR import deviceCheck
23import VDI
24import SRCommand
25import util
26import lvutil
27import lvmcache
28import vhdutil
29import lvhdutil
30import scsiutil
31import os
32import sys
33import time
34import errno
35import xs_errors
36import cleanup
37import blktap2
38from journaler import Journaler
39from lock import Lock
40from refcounter import RefCounter
41from ipc import IPCFlag
42from lvmanager import LVActivator
43import XenAPI # pylint: disable=import-error
44import re
45from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
46 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
47 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
48 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
49 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
50from metadata import retrieveXMLfromFile, _parseXML
51from xmlrpc.client import DateTime
52import glob
53from constants import CBTLOG_TAG
54DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX)
56geneology = {}
57CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
58 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
59 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
60 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
61 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
63CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
65DRIVER_INFO = {
66 'name': 'Local VHD on LVM',
67 'description': 'SR plugin which represents disks as VHD disks on ' + \
68 'Logical Volumes within a locally-attached Volume Group',
69 'vendor': 'XenSource Inc',
70 'copyright': '(C) 2008 XenSource Inc',
71 'driver_version': '1.0',
72 'required_api_version': '1.0',
73 'capabilities': CAPABILITIES,
74 'configuration': CONFIGURATION
75 }
77PARAM_VHD = "vhd"
78PARAM_RAW = "raw"
80OPS_EXCLUSIVE = [
81 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
82 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
83 "vdi_clone"]
85# Log if snapshot pauses VM for more than this many seconds
86LONG_SNAPTIME = 60
88class LVHDSR(SR.SR):
89 DRIVER_TYPE = 'lvhd'
91 PROVISIONING_TYPES = ["thin", "thick"]
92 PROVISIONING_DEFAULT = "thick"
93 THIN_PLUGIN = "lvhd-thin"
95 PLUGIN_ON_SLAVE = "on-slave"
97 FLAG_USE_VHD = "use_vhd"
98 MDVOLUME_NAME = "MGT"
100 ALLOCATION_QUANTUM = "allocation_quantum"
101 INITIAL_ALLOCATION = "initial_allocation"
103 LOCK_RETRY_INTERVAL = 3
104 LOCK_RETRY_ATTEMPTS = 10
106 TEST_MODE_KEY = "testmode"
107 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
108 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
109 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
110 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
111 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
112 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
113 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
115 ENV_VAR_VHD_TEST = {
116 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
117 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
118 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
119 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
120 TEST_MODE_VHD_FAIL_REPARENT_END:
121 "VHD_UTIL_TEST_FAIL_REPARENT_END",
122 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
123 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
124 TEST_MODE_VHD_FAIL_RESIZE_DATA:
125 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
126 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
127 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
128 TEST_MODE_VHD_FAIL_RESIZE_END:
129 "VHD_UTIL_TEST_FAIL_RESIZE_END"
130 }
131 testMode = ""
133 legacyMode = True
135 def handles(type):
136 """Returns True if this SR class understands the given dconf string"""
137 # we can pose as LVMSR or EXTSR for compatibility purposes
138 if __name__ == '__main__':
139 name = sys.argv[0]
140 else:
141 name = __name__
142 if name.endswith("LVMSR"):
143 return type == "lvm"
144 elif name.endswith("EXTSR"):
145 return type == "ext"
146 return type == LVHDSR.DRIVER_TYPE
147 handles = staticmethod(handles)
149 def load(self, sr_uuid):
150 self.ops_exclusive = OPS_EXCLUSIVE
152 self.isMaster = False
153 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
154 self.isMaster = True
156 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
157 self.sr_vditype = SR.DEFAULT_TAP
158 self.uuid = sr_uuid
159 self.vgname = lvhdutil.VG_PREFIX + self.uuid
160 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname)
161 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
162 self.provision = self.PROVISIONING_DEFAULT
163 try:
164 self.lvmCache = lvmcache.LVMCache(self.vgname)
165 except:
166 raise xs_errors.XenError('SRUnavailable', \
167 opterr='Failed to initialise the LVMCache')
168 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
169 self.journaler = Journaler(self.lvmCache)
170 if not self.srcmd.params.get("sr_ref"):
171 return # must be a probe call
172 # Test for thick vs thin provisioning conf parameter
173 if 'allocation' in self.dconf: 173 ↛ 174line 173 didn't jump to line 174, because the condition on line 173 was never true
174 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
175 self.provision = self.dconf['allocation']
176 else:
177 raise xs_errors.XenError('InvalidArg', \
178 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
180 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
181 if self.other_conf.get(self.TEST_MODE_KEY): 181 ↛ 185line 181 didn't jump to line 185, because the condition on line 181 was never false
182 self.testMode = self.other_conf[self.TEST_MODE_KEY]
183 self._prepareTestMode()
185 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
186 # sm_config flag overrides PBD, if any
187 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
188 self.provision = self.sm_config.get('allocation')
190 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
191 self.legacyMode = False
193 if lvutil._checkVG(self.vgname):
194 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 194 ↛ 197line 194 didn't jump to line 197, because the condition on line 194 was never false
195 "vdi_activate", "vdi_deactivate"]:
196 self._undoAllJournals()
197 if not self.cmd in ["sr_attach", "sr_probe"]:
198 self._checkMetadataVolume()
200 self.mdexists = False
202 # get a VDI -> TYPE map from the storage
203 contains_uuid_regex = \
204 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
205 self.storageVDIs = {}
207 for key in self.lvmCache.lvs.keys(): 207 ↛ 209line 207 didn't jump to line 209, because the loop on line 207 never started
208 # if the lvname has a uuid in it
209 type = None
210 if contains_uuid_regex.search(key) is not None:
211 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):
212 type = vhdutil.VDI_TYPE_VHD
213 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
214 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]):
215 type = vhdutil.VDI_TYPE_RAW
216 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
217 else:
218 continue
220 if type is not None:
221 self.storageVDIs[vdi] = type
223 # check if metadata volume exists
224 try:
225 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
226 except:
227 pass
229 def cleanup(self):
230 # we don't need to hold the lock to dec refcounts of activated LVs
231 if not self.lvActivator.deactivateAll(): 231 ↛ 232line 231 didn't jump to line 232, because the condition on line 231 was never true
232 raise util.SMException("failed to deactivate LVs")
234 def updateSRMetadata(self, allocation):
235 try:
236 # Add SR specific SR metadata
237 sr_info = \
238 {ALLOCATION_TAG: allocation,
239 UUID_TAG: self.uuid,
240 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
241 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
242 }
244 vdi_info = {}
245 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
246 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
248 # Create the VDI entry in the SR metadata
249 vdi_info[vdi_uuid] = \
250 {
251 UUID_TAG: vdi_uuid,
252 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
253 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
254 IS_A_SNAPSHOT_TAG: \
255 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
256 SNAPSHOT_OF_TAG: \
257 self.session.xenapi.VDI.get_snapshot_of(vdi),
258 SNAPSHOT_TIME_TAG: \
259 self.session.xenapi.VDI.get_snapshot_time(vdi),
260 TYPE_TAG: \
261 self.session.xenapi.VDI.get_type(vdi),
262 VDI_TYPE_TAG: \
263 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'],
264 READ_ONLY_TAG: \
265 int(self.session.xenapi.VDI.get_read_only(vdi)),
266 METADATA_OF_POOL_TAG: \
267 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
268 MANAGED_TAG: \
269 int(self.session.xenapi.VDI.get_managed(vdi))
270 }
271 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
273 except Exception as e:
274 raise xs_errors.XenError('MetadataError', \
275 opterr='Error upgrading SR Metadata: %s' % str(e))
277 def syncMetadataAndStorage(self):
278 try:
279 # if a VDI is present in the metadata but not in the storage
280 # then delete it from the metadata
281 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
282 for vdi in list(vdi_info.keys()):
283 update_map = {}
284 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 284 ↛ 291line 284 didn't jump to line 291, because the condition on line 284 was never false
285 # delete this from metadata
286 LVMMetadataHandler(self.mdpath). \
287 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
288 else:
289 # search for this in the metadata, compare types
290 # self.storageVDIs is a map of vdi_uuid to vdi_type
291 if vdi_info[vdi][VDI_TYPE_TAG] != \
292 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
293 # storage type takes authority
294 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
295 = METADATA_OBJECT_TYPE_VDI
296 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
297 update_map[VDI_TYPE_TAG] = \
298 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
299 LVMMetadataHandler(self.mdpath) \
300 .updateMetadata(update_map)
301 else:
302 # This should never happen
303 pass
305 except Exception as e:
306 raise xs_errors.XenError('MetadataError', \
307 opterr='Error synching SR Metadata and storage: %s' % str(e))
309 def syncMetadataAndXapi(self):
310 try:
311 # get metadata
312 (sr_info, vdi_info) = \
313 LVMMetadataHandler(self.mdpath, False).getMetadata()
315 # First synch SR parameters
316 self.update(self.uuid)
318 # Now update the VDI information in the metadata if required
319 for vdi_offset in vdi_info.keys():
320 try:
321 vdi_ref = \
322 self.session.xenapi.VDI.get_by_uuid( \
323 vdi_info[vdi_offset][UUID_TAG])
324 except:
325 # may be the VDI is not in XAPI yet dont bother
326 continue
328 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
329 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
331 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
332 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
333 new_name_description:
334 update_map = {}
335 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
336 METADATA_OBJECT_TYPE_VDI
337 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
338 update_map[NAME_LABEL_TAG] = new_name_label
339 update_map[NAME_DESCRIPTION_TAG] = new_name_description
340 LVMMetadataHandler(self.mdpath) \
341 .updateMetadata(update_map)
342 except Exception as e:
343 raise xs_errors.XenError('MetadataError', \
344 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
346 def _checkMetadataVolume(self):
347 util.SMlog("Entering _checkMetadataVolume")
348 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
349 if self.isMaster: 349 ↛ 365line 349 didn't jump to line 365, because the condition on line 349 was never false
350 if self.mdexists and self.cmd == "sr_attach":
351 try:
352 # activate the management volume
353 # will be deactivated at detach time
354 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
355 self._synchSmConfigWithMetaData()
356 util.SMlog("Sync SR metadata and the state on the storage.")
357 self.syncMetadataAndStorage()
358 self.syncMetadataAndXapi()
359 except Exception as e:
360 util.SMlog("Exception in _checkMetadataVolume, " \
361 "Error: %s." % str(e))
362 elif not self.mdexists and not self.legacyMode: 362 ↛ 365line 362 didn't jump to line 365, because the condition on line 362 was never false
363 self._introduceMetaDataVolume()
365 if self.mdexists:
366 self.legacyMode = False
368 def _synchSmConfigWithMetaData(self):
369 util.SMlog("Synching sm-config with metadata volume")
371 try:
372 # get SR info from metadata
373 sr_info = {}
374 map = {}
375 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
377 if sr_info == {}: 377 ↛ 378line 377 didn't jump to line 378, because the condition on line 377 was never true
378 raise Exception("Failed to get SR information from metadata.")
380 if "allocation" in sr_info: 380 ↛ 384line 380 didn't jump to line 384, because the condition on line 380 was never false
381 self.provision = sr_info.get("allocation")
382 map['allocation'] = sr_info.get("allocation")
383 else:
384 raise Exception("Allocation key not found in SR metadata. "
385 "SR info found: %s" % sr_info)
387 except Exception as e:
388 raise xs_errors.XenError(
389 'MetadataError',
390 opterr='Error reading SR params from '
391 'metadata Volume: %s' % str(e))
392 try:
393 map[self.FLAG_USE_VHD] = 'true'
394 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
395 except:
396 raise xs_errors.XenError(
397 'MetadataError',
398 opterr='Error updating sm_config key')
400 def _introduceMetaDataVolume(self):
401 util.SMlog("Creating Metadata volume")
402 try:
403 config = {}
404 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
406 # activate the management volume, will be deactivated at detach time
407 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
409 name_label = util.to_plain_string( \
410 self.session.xenapi.SR.get_name_label(self.sr_ref))
411 name_description = util.to_plain_string( \
412 self.session.xenapi.SR.get_name_description(self.sr_ref))
413 config[self.FLAG_USE_VHD] = "true"
414 config['allocation'] = self.provision
415 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
417 # Add the SR metadata
418 self.updateSRMetadata(self.provision)
419 except Exception as e:
420 raise xs_errors.XenError('MetadataError', \
421 opterr='Error introducing Metadata Volume: %s' % str(e))
423 def _removeMetadataVolume(self):
424 if self.mdexists:
425 try:
426 self.lvmCache.remove(self.MDVOLUME_NAME)
427 except:
428 raise xs_errors.XenError('MetadataError', \
429 opterr='Failed to delete MGT Volume')
431 def _refresh_size(self):
432 """
433 Refreshs the size of the backing device.
434 Return true if all paths/devices agree on the same size.
435 """
436 if hasattr(self, 'SCSIid'): 436 ↛ 438line 436 didn't jump to line 438, because the condition on line 436 was never true
437 # LVHDoHBASR, LVHDoISCSISR
438 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
439 else:
440 # LVHDSR
441 devices = self.dconf['device'].split(',')
442 scsiutil.refreshdev(devices)
443 return True
445 def _expand_size(self):
446 """
447 Expands the size of the SR by growing into additional availiable
448 space, if extra space is availiable on the backing device.
449 Needs to be called after a successful call of _refresh_size.
450 """
451 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
452 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
453 resizethreshold = 100 * 1024 * 1024 # 100MB
454 devices = self.dconf['device'].split(',')
455 totaldevicesize = 0
456 for device in devices:
457 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
458 if totaldevicesize >= (currentvgsize + resizethreshold):
459 try:
460 if hasattr(self, 'SCSIid'): 460 ↛ 462line 460 didn't jump to line 462, because the condition on line 460 was never true
461 # LVHDoHBASR, LVHDoISCSISR might have slaves
462 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
463 getattr(self, 'SCSIid'))
464 util.SMlog("LVHDSR._expand_size for %s will resize the pv." %
465 self.uuid)
466 for pv in lvutil.get_pv_for_vg(self.vgname):
467 lvutil.resizePV(pv)
468 except:
469 util.logException("LVHDSR._expand_size for %s failed to resize"
470 " the PV" % self.uuid)
472 @deviceCheck
473 def create(self, uuid, size):
474 util.SMlog("LVHDSR.create for %s" % self.uuid)
475 if not self.isMaster:
476 util.SMlog('sr_create blocked for non-master')
477 raise xs_errors.XenError('LVMMaster')
479 if lvutil._checkVG(self.vgname):
480 raise xs_errors.XenError('SRExists')
482 # Check none of the devices already in use by other PBDs
483 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
484 raise xs_errors.XenError('SRInUse')
486 # Check serial number entry in SR records
487 for dev in self.dconf['device'].split(','):
488 if util.test_scsiserial(self.session, dev):
489 raise xs_errors.XenError('SRInUse')
491 lvutil.createVG(self.dconf['device'], self.vgname)
493 #Update serial number string
494 scsiutil.add_serial_record(self.session, self.sr_ref, \
495 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
497 # since this is an SR.create turn off legacy mode
498 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
499 self.FLAG_USE_VHD, 'true')
501 def delete(self, uuid):
502 util.SMlog("LVHDSR.delete for %s" % self.uuid)
503 if not self.isMaster:
504 raise xs_errors.XenError('LVMMaster')
505 cleanup.gc_force(self.session, self.uuid)
507 success = True
508 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
509 if util.extractSRFromDevMapper(fileName) != self.uuid:
510 continue
512 if util.doesFileHaveOpenHandles(fileName):
513 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \
514 "handles" % fileName)
515 success = False
516 continue
518 # Now attempt to remove the dev mapper entry
519 if not lvutil.removeDevMapperEntry(fileName, False):
520 success = False
521 continue
523 try:
524 lvname = os.path.basename(fileName.replace('-', '/'). \
525 replace('//', '-'))
526 lpath = os.path.join(self.path, lvname)
527 os.unlink(lpath)
528 except OSError as e:
529 if e.errno != errno.ENOENT:
530 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \
531 "file %s. Error: %s" % (fileName, str(e)))
532 success = False
534 if success:
535 try:
536 if util.pathexists(self.path):
537 os.rmdir(self.path)
538 except Exception as e:
539 util.SMlog("LVHDSR.delete: failed to remove the symlink " \
540 "directory %s. Error: %s" % (self.path, str(e)))
541 success = False
543 self._removeMetadataVolume()
544 self.lvmCache.refresh()
545 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0:
546 raise xs_errors.XenError('SRNotEmpty')
548 if not success:
549 raise Exception("LVHDSR delete failed, please refer to the log " \
550 "for details.")
552 lvutil.removeVG(self.dconf['device'], self.vgname)
553 self._cleanup()
555 def attach(self, uuid):
556 util.SMlog("LVHDSR.attach for %s" % self.uuid)
558 self._cleanup(True) # in case of host crashes, if detach wasn't called
560 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 560 ↛ 561line 560 didn't jump to line 561, because the condition on line 560 was never true
561 raise xs_errors.XenError('SRUnavailable', \
562 opterr='no such volume group: %s' % self.vgname)
564 # Refresh the metadata status
565 self._checkMetadataVolume()
567 refreshsizeok = self._refresh_size()
569 if self.isMaster: 569 ↛ 580line 569 didn't jump to line 580, because the condition on line 569 was never false
570 if refreshsizeok: 570 ↛ 574line 570 didn't jump to line 574, because the condition on line 570 was never false
571 self._expand_size()
573 # Update SCSIid string
574 util.SMlog("Calling devlist_to_serial")
575 scsiutil.add_serial_record(
576 self.session, self.sr_ref,
577 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
579 # Test Legacy Mode Flag and update if VHD volumes exist
580 if self.isMaster and self.legacyMode: 580 ↛ 581line 580 didn't jump to line 581, because the condition on line 580 was never true
581 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
582 for uuid, info in vdiInfo.items():
583 if info.vdiType == vhdutil.VDI_TYPE_VHD:
584 self.legacyMode = False
585 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
586 self._introduceMetaDataVolume()
587 break
589 # Set the block scheduler
590 for dev in self.dconf['device'].split(','):
591 self.block_setscheduler(dev)
593 def detach(self, uuid):
594 util.SMlog("LVHDSR.detach for %s" % self.uuid)
595 cleanup.abort(self.uuid)
597 # Do a best effort cleanup of the dev mapper entries
598 # go through all devmapper entries for this VG
599 success = True
600 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 600 ↛ 601line 600 didn't jump to line 601, because the loop on line 600 never started
601 if util.extractSRFromDevMapper(fileName) != self.uuid:
602 continue
604 # check if any file has open handles
605 if util.doesFileHaveOpenHandles(fileName):
606 # if yes, log this and signal failure
607 util.SMlog("LVHDSR.detach: The dev mapper entry %s has open " \
608 "handles" % fileName)
609 success = False
610 continue
612 # Now attempt to remove the dev mapper entry
613 if not lvutil.removeDevMapperEntry(fileName, False):
614 success = False
615 continue
617 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
618 try:
619 lvname = os.path.basename(fileName.replace('-', '/'). \
620 replace('//', '-'))
621 lvname = os.path.join(self.path, lvname)
622 util.force_unlink(lvname)
623 except Exception as e:
624 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \
625 "file %s. Error: %s" % (fileName, str(e)))
626 success = False
628 # now remove the directory where the symlinks are
629 # this should pass as the directory should be empty by now
630 if success: 630 ↛ 639line 630 didn't jump to line 639, because the condition on line 630 was never false
631 try:
632 if util.pathexists(self.path): 632 ↛ 633line 632 didn't jump to line 633, because the condition on line 632 was never true
633 os.rmdir(self.path)
634 except Exception as e:
635 util.SMlog("LVHDSR.detach: failed to remove the symlink " \
636 "directory %s. Error: %s" % (self.path, str(e)))
637 success = False
639 if not success: 639 ↛ 640line 639 didn't jump to line 640, because the condition on line 639 was never true
640 raise Exception("SR detach failed, please refer to the log " \
641 "for details.")
643 # Don't delete lock files on the master as it will break the locking
644 # between SM and any GC thread that survives through SR.detach.
645 # However, we should still delete lock files on slaves as it is the
646 # only place to do so.
647 self._cleanup(self.isMaster)
649 def forget_vdi(self, uuid):
650 if not self.legacyMode:
651 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
652 super(LVHDSR, self).forget_vdi(uuid)
654 def scan(self, uuid):
655 try:
656 lvname = ''
657 activated = True
658 util.SMlog("LVHDSR.scan for %s" % self.uuid)
659 if not self.isMaster: 659 ↛ 660line 659 didn't jump to line 660, because the condition on line 659 was never true
660 util.SMlog('sr_scan blocked for non-master')
661 raise xs_errors.XenError('LVMMaster')
663 if self._refresh_size(): 663 ↛ 665line 663 didn't jump to line 665, because the condition on line 663 was never false
664 self._expand_size()
665 self.lvmCache.refresh()
666 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
667 self._loadvdis()
668 stats = lvutil._getVGstats(self.vgname)
669 self.physical_size = stats['physical_size']
670 self.physical_utilisation = stats['physical_utilisation']
672 # Now check if there are any VDIs in the metadata, which are not in
673 # XAPI
674 if self.mdexists: 674 ↛ 784line 674 didn't jump to line 784, because the condition on line 674 was never false
675 vdiToSnaps = {}
676 # get VDIs from XAPI
677 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
678 vdi_uuids = set([])
679 for vdi in vdis:
680 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
682 Dict = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
684 for vdi in list(Dict.keys()):
685 vdi_uuid = Dict[vdi][UUID_TAG]
686 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): 686 ↛ 687line 686 didn't jump to line 687, because the condition on line 686 was never true
687 if Dict[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
688 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
689 else:
690 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
692 if vdi_uuid not in vdi_uuids: 692 ↛ 693line 692 didn't jump to line 693, because the condition on line 692 was never true
693 util.SMlog("Introduce VDI %s as it is present in " \
694 "metadata and not in XAPI." % vdi_uuid)
695 sm_config = {}
696 sm_config['vdi_type'] = Dict[vdi][VDI_TYPE_TAG]
697 lvname = "%s%s" % \
698 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
699 self.lvmCache.activateNoRefcount(lvname)
700 activated = True
701 lvPath = os.path.join(self.path, lvname)
703 if Dict[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW:
704 size = self.lvmCache.getSize( \
705 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \
706 vdi_uuid)
707 utilisation = \
708 util.roundup(lvutil.LVM_SIZE_INCREMENT,
709 int(size))
710 else:
711 parent = \
712 vhdutil._getVHDParentNoCheck(lvPath)
714 if parent is not None:
715 sm_config['vhd-parent'] = parent[len( \
716 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):]
717 size = vhdutil.getSizeVirt(lvPath)
718 if self.provision == "thin":
719 utilisation = \
720 util.roundup(lvutil.LVM_SIZE_INCREMENT,
721 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
722 else:
723 utilisation = lvhdutil.calcSizeVHDLV(int(size))
725 vdi_ref = self.session.xenapi.VDI.db_introduce(
726 vdi_uuid,
727 Dict[vdi][NAME_LABEL_TAG],
728 Dict[vdi][NAME_DESCRIPTION_TAG],
729 self.sr_ref,
730 Dict[vdi][TYPE_TAG],
731 False,
732 bool(int(Dict[vdi][READ_ONLY_TAG])),
733 {},
734 vdi_uuid,
735 {},
736 sm_config)
738 self.session.xenapi.VDI.set_managed(vdi_ref,
739 bool(int(Dict[vdi][MANAGED_TAG])))
740 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
741 str(size))
742 self.session.xenapi.VDI.set_physical_utilisation( \
743 vdi_ref, str(utilisation))
744 self.session.xenapi.VDI.set_is_a_snapshot( \
745 vdi_ref, bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])))
746 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])):
747 self.session.xenapi.VDI.set_snapshot_time( \
748 vdi_ref, DateTime(Dict[vdi][SNAPSHOT_TIME_TAG]))
749 if Dict[vdi][TYPE_TAG] == 'metadata':
750 self.session.xenapi.VDI.set_metadata_of_pool( \
751 vdi_ref, Dict[vdi][METADATA_OF_POOL_TAG])
753 # Update CBT status of disks either just added
754 # or already in XAPI
755 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
756 if cbt_logname in cbt_vdis: 756 ↛ 757line 756 didn't jump to line 757, because the condition on line 756 was never true
757 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
758 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
759 # For existing VDIs, update local state too
760 # Scan in base class SR updates existing VDIs
761 # again based on local states
762 if vdi_uuid in self.vdis:
763 self.vdis[vdi_uuid].cbt_enabled = True
764 cbt_vdis.remove(cbt_logname)
766 # Now set the snapshot statuses correctly in XAPI
767 for srcvdi in vdiToSnaps.keys(): 767 ↛ 768line 767 didn't jump to line 768, because the loop on line 767 never started
768 try:
769 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
770 except:
771 # the source VDI no longer exists, continue
772 continue
774 for snapvdi in vdiToSnaps[srcvdi]:
775 try:
776 # this might fail in cases where its already set
777 snapref = \
778 self.session.xenapi.VDI.get_by_uuid(snapvdi)
779 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
780 except Exception as e:
781 util.SMlog("Setting snapshot failed. " \
782 "Error: %s" % str(e))
784 if cbt_vdis: 784 ↛ 795line 784 didn't jump to line 795, because the condition on line 784 was never false
785 # If we have items remaining in this list,
786 # they are cbt_metadata VDI that XAPI doesn't know about
787 # Add them to self.vdis and they'll get added to the DB
788 for cbt_vdi in cbt_vdis: 788 ↛ 789line 788 didn't jump to line 789, because the loop on line 788 never started
789 cbt_uuid = cbt_vdi.split(".")[0]
790 new_vdi = self.vdi(cbt_uuid)
791 new_vdi.ty = "cbt_metadata"
792 new_vdi.cbt_enabled = True
793 self.vdis[cbt_uuid] = new_vdi
795 super(LVHDSR, self).scan(uuid)
796 self._kickGC()
798 finally:
799 if lvname != '' and activated: 799 ↛ 800line 799 didn't jump to line 800, because the condition on line 799 was never true
800 self.lvmCache.deactivateNoRefcount(lvname)
802 def update(self, uuid):
803 if not lvutil._checkVG(self.vgname): 803 ↛ 804line 803 didn't jump to line 804, because the condition on line 803 was never true
804 return
805 self._updateStats(uuid, 0)
807 if self.legacyMode: 807 ↛ 808line 807 didn't jump to line 808, because the condition on line 807 was never true
808 return
810 # synch name_label in metadata with XAPI
811 update_map = {}
812 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
813 METADATA_OBJECT_TYPE_SR,
814 NAME_LABEL_TAG: util.to_plain_string( \
815 self.session.xenapi.SR.get_name_label(self.sr_ref)),
816 NAME_DESCRIPTION_TAG: util.to_plain_string( \
817 self.session.xenapi.SR.get_name_description(self.sr_ref))
818 }
819 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
821 def _updateStats(self, uuid, virtAllocDelta):
822 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
823 self.virtual_allocation = valloc + virtAllocDelta
824 util.SMlog("Setting virtual_allocation of SR %s to %d" %
825 (uuid, self.virtual_allocation))
826 stats = lvutil._getVGstats(self.vgname)
827 self.physical_size = stats['physical_size']
828 self.physical_utilisation = stats['physical_utilisation']
829 self._db_update()
831 @deviceCheck
832 def probe(self):
833 return lvutil.srlist_toxml(
834 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']),
835 lvhdutil.VG_PREFIX,
836 ('metadata' in self.srcmd.params['sr_sm_config'] and \
837 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
839 def vdi(self, uuid):
840 return LVHDVDI(self, uuid)
842 def _loadvdis(self):
843 self.virtual_allocation = 0
844 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
845 self.allVDIs = {}
847 for uuid, info in self.vdiInfo.items():
848 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 848 ↛ 849line 848 didn't jump to line 849, because the condition on line 848 was never true
849 continue
850 if info.scanError: 850 ↛ 851line 850 didn't jump to line 851, because the condition on line 850 was never true
851 raise xs_errors.XenError('VDIUnavailable', \
852 opterr='Error scanning VDI %s' % uuid)
853 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
854 if not self.vdis[uuid].hidden: 854 ↛ 847line 854 didn't jump to line 847, because the condition on line 854 was never false
855 self.virtual_allocation += self.vdis[uuid].utilisation
857 for uuid, vdi in self.vdis.items():
858 if vdi.parent: 858 ↛ 859line 858 didn't jump to line 859, because the condition on line 858 was never true
859 if vdi.parent in self.vdis:
860 self.vdis[vdi.parent].read_only = True
861 if vdi.parent in geneology:
862 geneology[vdi.parent].append(uuid)
863 else:
864 geneology[vdi.parent] = [uuid]
866 # Now remove all hidden leaf nodes to avoid introducing records that
867 # will be GC'ed
868 for uuid in list(self.vdis.keys()):
869 if uuid not in geneology and self.vdis[uuid].hidden: 869 ↛ 870line 869 didn't jump to line 870, because the condition on line 869 was never true
870 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
871 del self.vdis[uuid]
873 def _ensureSpaceAvailable(self, amount_needed):
874 space_available = lvutil._getVGstats(self.vgname)['freespace']
875 if (space_available < amount_needed):
876 util.SMlog("Not enough space! free space: %d, need: %d" % \
877 (space_available, amount_needed))
878 raise xs_errors.XenError('SRNoSpace')
880 def _handleInterruptedCloneOps(self):
881 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE)
882 for uuid, val in entries.items(): 882 ↛ 883line 882 didn't jump to line 883, because the loop on line 882 never started
883 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
884 self._handleInterruptedCloneOp(uuid, val)
885 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
886 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid)
888 def _handleInterruptedCoalesceLeaf(self):
889 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
890 if len(entries) > 0: 890 ↛ 891line 890 didn't jump to line 891, because the condition on line 890 was never true
891 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
892 cleanup.gc_force(self.session, self.uuid)
893 self.lvmCache.refresh()
895 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
896 """Either roll back or finalize the interrupted snapshot/clone
897 operation. Rolling back is unsafe if the leaf VHDs have already been
898 in use and written to. However, it is always safe to roll back while
899 we're still in the context of the failed snapshot operation since the
900 VBD is paused for the duration of the operation"""
901 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
902 lvs = lvhdutil.getLVInfo(self.lvmCache)
903 baseUuid, clonUuid = jval.split("_")
905 # is there a "base copy" VDI?
906 if not lvs.get(baseUuid):
907 # no base copy: make sure the original is there
908 if lvs.get(origUuid):
909 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
910 return
911 raise util.SMException("base copy %s not present, " \
912 "but no original %s found" % (baseUuid, origUuid))
914 if forceUndo:
915 util.SMlog("Explicit revert")
916 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
917 return
919 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
920 util.SMlog("One or both leaves missing => revert")
921 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
922 return
924 vdis = lvhdutil.getVDIInfo(self.lvmCache)
925 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
926 util.SMlog("One or both leaves invalid => revert")
927 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
928 return
930 orig = vdis[origUuid]
931 base = vdis[baseUuid]
932 self.lvActivator.activate(baseUuid, base.lvName, False)
933 self.lvActivator.activate(origUuid, orig.lvName, False)
934 if orig.parentUuid != baseUuid:
935 parent = vdis[orig.parentUuid]
936 self.lvActivator.activate(parent.uuid, parent.lvName, False)
937 origPath = os.path.join(self.path, orig.lvName)
938 if not vhdutil.check(origPath):
939 util.SMlog("Orig VHD invalid => revert")
940 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
941 return
943 if clonUuid:
944 clon = vdis[clonUuid]
945 clonPath = os.path.join(self.path, clon.lvName)
946 self.lvActivator.activate(clonUuid, clon.lvName, False)
947 if not vhdutil.check(clonPath):
948 util.SMlog("Clon VHD invalid => revert")
949 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
950 return
952 util.SMlog("Snapshot appears valid, will not roll back")
953 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid)
955 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid):
956 base = lvs[baseUuid]
957 basePath = os.path.join(self.path, base.name)
959 # make the parent RW
960 if base.readonly:
961 self.lvmCache.setReadonly(base.name, False)
963 ns = lvhdutil.NS_PREFIX_LVM + self.uuid
964 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
965 origRefcountNormal = 0
967 # un-hide the parent
968 if base.vdiType == vhdutil.VDI_TYPE_VHD:
969 self.lvActivator.activate(baseUuid, base.name, False)
970 origRefcountNormal = 1
971 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False)
972 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden:
973 vhdutil.setHidden(basePath, False)
974 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden:
975 self.lvmCache.setHidden(base.name, False)
977 # remove the child nodes
978 if clonUuid and lvs.get(clonUuid):
979 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD:
980 raise util.SMException("clone %s not VHD" % clonUuid)
981 self.lvmCache.remove(lvs[clonUuid].name)
982 if self.lvActivator.get(clonUuid, False):
983 self.lvActivator.remove(clonUuid, False)
984 if lvs.get(origUuid):
985 self.lvmCache.remove(lvs[origUuid].name)
987 # inflate the parent to fully-allocated size
988 if base.vdiType == vhdutil.VDI_TYPE_VHD:
989 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
990 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize)
992 # rename back
993 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid
994 self.lvmCache.rename(base.name, origLV)
995 RefCounter.reset(baseUuid, ns)
996 if self.lvActivator.get(baseUuid, False):
997 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
998 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1000 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1001 # flag to facilitate vm deactivate
1002 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1003 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1005 # update LVM metadata on slaves
1006 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1007 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname,
1008 origLV, origUuid, slaves)
1010 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1012 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid):
1013 """Finalize the interrupted snapshot/clone operation. This must not be
1014 called from the live snapshot op context because we attempt to pause/
1015 unpause the VBD here (the VBD is already paused during snapshot, so it
1016 would cause a deadlock)"""
1017 base = vdis[baseUuid]
1018 clon = None
1019 if clonUuid:
1020 clon = vdis[clonUuid]
1022 cleanup.abort(self.uuid)
1024 # make sure the parent is hidden and read-only
1025 if not base.hidden:
1026 if base.vdiType == vhdutil.VDI_TYPE_RAW:
1027 self.lvmCache.setHidden(base.lvName)
1028 else:
1029 basePath = os.path.join(self.path, base.lvName)
1030 vhdutil.setHidden(basePath)
1031 if not base.lvReadonly:
1032 self.lvmCache.setReadonly(base.lvName, True)
1034 # NB: since this snapshot-preserving call is only invoked outside the
1035 # snapshot op context, we assume the LVM metadata on the involved slave
1036 # has by now been refreshed and do not attempt to do it here
1038 # Update the original record
1039 try:
1040 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1041 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1042 type = self.session.xenapi.VDI.get_type(vdi_ref)
1043 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1044 sm_config['vhd-parent'] = baseUuid
1045 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1046 except XenAPI.Failure:
1047 util.SMlog("ERROR updating the orig record")
1049 # introduce the new VDI records
1050 if clonUuid:
1051 try:
1052 clon_vdi = VDI.VDI(self, clonUuid)
1053 clon_vdi.read_only = False
1054 clon_vdi.location = clonUuid
1055 clon_vdi.utilisation = clon.sizeLV
1056 clon_vdi.sm_config = {
1057 "vdi_type": vhdutil.VDI_TYPE_VHD,
1058 "vhd-parent": baseUuid}
1060 if not self.legacyMode:
1061 LVMMetadataHandler(self.mdpath). \
1062 ensureSpaceIsAvailableForVdis(1)
1064 clon_vdi_ref = clon_vdi._db_introduce()
1065 util.SMlog("introduced clon VDI: %s (%s)" % \
1066 (clon_vdi_ref, clonUuid))
1068 vdi_info = {UUID_TAG: clonUuid,
1069 NAME_LABEL_TAG: clon_vdi.label,
1070 NAME_DESCRIPTION_TAG: clon_vdi.description,
1071 IS_A_SNAPSHOT_TAG: 0,
1072 SNAPSHOT_OF_TAG: '',
1073 SNAPSHOT_TIME_TAG: '',
1074 TYPE_TAG: type,
1075 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1076 READ_ONLY_TAG: int(clon_vdi.read_only),
1077 MANAGED_TAG: int(clon_vdi.managed),
1078 METADATA_OF_POOL_TAG: ''
1079 }
1081 if not self.legacyMode:
1082 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1084 except XenAPI.Failure:
1085 util.SMlog("ERROR introducing the clon record")
1087 try:
1088 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1089 base_vdi.label = "base copy"
1090 base_vdi.read_only = True
1091 base_vdi.location = baseUuid
1092 base_vdi.size = base.sizeVirt
1093 base_vdi.utilisation = base.sizeLV
1094 base_vdi.managed = False
1095 base_vdi.sm_config = {
1096 "vdi_type": vhdutil.VDI_TYPE_VHD,
1097 "vhd-parent": baseUuid}
1099 if not self.legacyMode:
1100 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1102 base_vdi_ref = base_vdi._db_introduce()
1103 util.SMlog("introduced base VDI: %s (%s)" % \
1104 (base_vdi_ref, baseUuid))
1106 vdi_info = {UUID_TAG: baseUuid,
1107 NAME_LABEL_TAG: base_vdi.label,
1108 NAME_DESCRIPTION_TAG: base_vdi.description,
1109 IS_A_SNAPSHOT_TAG: 0,
1110 SNAPSHOT_OF_TAG: '',
1111 SNAPSHOT_TIME_TAG: '',
1112 TYPE_TAG: type,
1113 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1114 READ_ONLY_TAG: int(base_vdi.read_only),
1115 MANAGED_TAG: int(base_vdi.managed),
1116 METADATA_OF_POOL_TAG: ''
1117 }
1119 if not self.legacyMode:
1120 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1121 except XenAPI.Failure:
1122 util.SMlog("ERROR introducing the base record")
1124 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1126 def _undoAllJournals(self):
1127 """Undo all VHD & SM interrupted journaled operations. This call must
1128 be serialized with respect to all operations that create journals"""
1129 # undoing interrupted inflates must be done first, since undoing VHD
1130 # ops might require inflations
1131 self.lock.acquire()
1132 try:
1133 self._undoAllInflateJournals()
1134 self._undoAllVHDJournals()
1135 self._handleInterruptedCloneOps()
1136 self._handleInterruptedCoalesceLeaf()
1137 finally:
1138 self.lock.release()
1139 self.cleanup()
1141 def _undoAllInflateJournals(self):
1142 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE)
1143 if len(entries) == 0:
1144 return
1145 self._loadvdis()
1146 for uuid, val in entries.items():
1147 vdi = self.vdis.get(uuid)
1148 if vdi: 1148 ↛ 1163line 1148 didn't jump to line 1163, because the condition on line 1148 was never false
1149 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1150 (uuid, vdi.path, val))
1151 if vdi.readonly: 1151 ↛ 1152line 1151 didn't jump to line 1152, because the condition on line 1151 was never true
1152 self.lvmCache.setReadonly(vdi.lvname, False)
1153 self.lvActivator.activate(uuid, vdi.lvname, False)
1154 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1155 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE,
1156 vhdutil.VHD_FOOTER_SIZE)
1157 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val))
1158 if vdi.readonly: 1158 ↛ 1159line 1158 didn't jump to line 1159, because the condition on line 1158 was never true
1159 self.lvmCache.setReadonly(vdi.lvname, True)
1160 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1160 ↛ 1161line 1160 didn't jump to line 1161, because the condition on line 1160 was never true
1161 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1162 self.vgname, vdi.lvname, uuid)
1163 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid)
1164 delattr(self, "vdiInfo")
1165 delattr(self, "allVDIs")
1167 def _undoAllVHDJournals(self):
1168 """check if there are VHD journals in existence and revert them"""
1169 journals = lvhdutil.getAllVHDJournals(self.lvmCache)
1170 if len(journals) == 0: 1170 ↛ 1172line 1170 didn't jump to line 1172, because the condition on line 1170 was never false
1171 return
1172 self._loadvdis()
1173 for uuid, jlvName in journals:
1174 vdi = self.vdis[uuid]
1175 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path))
1176 self.lvActivator.activate(uuid, vdi.lvname, False)
1177 self.lvmCache.activateNoRefcount(jlvName)
1178 fullSize = lvhdutil.calcSizeVHDLV(vdi.size)
1179 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize)
1180 try:
1181 jFile = os.path.join(self.path, jlvName)
1182 vhdutil.revert(vdi.path, jFile)
1183 except util.CommandException:
1184 util.logException("VHD journal revert")
1185 vhdutil.check(vdi.path)
1186 util.SMlog("VHD revert failed but VHD ok: removing journal")
1187 # Attempt to reclaim unused space
1188 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False)
1189 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
1190 if NewSize < fullSize:
1191 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1192 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1193 self.vgname, vdi.lvname, uuid)
1194 self.lvmCache.remove(jlvName)
1195 delattr(self, "vdiInfo")
1196 delattr(self, "allVDIs")
1198 def _updateSlavesPreClone(self, hostRefs, origOldLV):
1199 masterRef = util.get_this_host_ref(self.session)
1200 args = {"vgName": self.vgname,
1201 "action1": "deactivateNoRefcount",
1202 "lvName1": origOldLV}
1203 for hostRef in hostRefs:
1204 if hostRef == masterRef: 1204 ↛ 1205line 1204 didn't jump to line 1205, because the condition on line 1204 was never true
1205 continue
1206 util.SMlog("Deactivate VDI on %s" % hostRef)
1207 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1208 util.SMlog("call-plugin returned: %s" % rv)
1209 if not rv: 1209 ↛ 1210line 1209 didn't jump to line 1210, because the condition on line 1209 was never true
1210 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1212 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1213 baseUuid, baseLV):
1214 """We need to reactivate the original LV on each slave (note that the
1215 name for the original LV might change), as well as init the refcount
1216 for the base LV"""
1217 args = {"vgName": self.vgname,
1218 "action1": "refresh",
1219 "lvName1": origLV,
1220 "action2": "activate",
1221 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid,
1222 "lvName2": baseLV,
1223 "uuid2": baseUuid}
1225 masterRef = util.get_this_host_ref(self.session)
1226 for hostRef in hostRefs:
1227 if hostRef == masterRef: 1227 ↛ 1228line 1227 didn't jump to line 1228, because the condition on line 1227 was never true
1228 continue
1229 util.SMlog("Updating %s, %s, %s on slave %s" % \
1230 (origOldLV, origLV, baseLV, hostRef))
1231 rv = self.session.xenapi.host.call_plugin(
1232 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1233 util.SMlog("call-plugin returned: %s" % rv)
1234 if not rv: 1234 ↛ 1235line 1234 didn't jump to line 1235, because the condition on line 1234 was never true
1235 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1237 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1238 """Reactivate and refresh CBT log file on slaves"""
1239 args = {"vgName": self.vgname,
1240 "action1": "deactivateNoRefcount",
1241 "lvName1": cbtlog,
1242 "action2": "refresh",
1243 "lvName2": cbtlog}
1245 masterRef = util.get_this_host_ref(self.session)
1246 for hostRef in hostRefs:
1247 if hostRef == masterRef: 1247 ↛ 1248line 1247 didn't jump to line 1248, because the condition on line 1247 was never true
1248 continue
1249 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef))
1250 rv = self.session.xenapi.host.call_plugin(
1251 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1252 util.SMlog("call-plugin returned: %s" % rv)
1253 if not rv: 1253 ↛ 1254line 1253 didn't jump to line 1254, because the condition on line 1253 was never true
1254 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1256 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1257 """Tell the slave we deleted the base image"""
1258 args = {"vgName": self.vgname,
1259 "action1": "cleanupLockAndRefcount",
1260 "uuid1": baseUuid,
1261 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid}
1263 masterRef = util.get_this_host_ref(self.session)
1264 for hostRef in hostRefs:
1265 if hostRef == masterRef: 1265 ↛ 1266line 1265 didn't jump to line 1266, because the condition on line 1265 was never true
1266 continue
1267 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef))
1268 rv = self.session.xenapi.host.call_plugin(
1269 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1270 util.SMlog("call-plugin returned: %s" % rv)
1271 if not rv: 1271 ↛ 1272line 1271 didn't jump to line 1272, because the condition on line 1271 was never true
1272 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1274 def _cleanup(self, skipLockCleanup=False):
1275 """delete stale refcounter, flag, and lock files"""
1276 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1277 IPCFlag(self.uuid).clearAll()
1278 if not skipLockCleanup: 1278 ↛ 1279line 1278 didn't jump to line 1279, because the condition on line 1278 was never true
1279 Lock.cleanupAll(self.uuid)
1280 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1282 def _prepareTestMode(self):
1283 util.SMlog("Test mode: %s" % self.testMode)
1284 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1284 ↛ 1285line 1284 didn't jump to line 1285, because the condition on line 1284 was never true
1285 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1286 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1288 def _kickGC(self):
1289 # don't bother if an instance already running (this is just an
1290 # optimization to reduce the overhead of forking a new process if we
1291 # don't have to, but the process will check the lock anyways)
1292 lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid)
1293 if not lockRunning.acquireNoblock(): 1293 ↛ 1294line 1293 didn't jump to line 1294, because the condition on line 1293 was never true
1294 if cleanup.should_preempt(self.session, self.uuid):
1295 util.SMlog("Aborting currently-running coalesce of garbage VDI")
1296 try:
1297 if not cleanup.abort(self.uuid, soft=True):
1298 util.SMlog("The GC has already been scheduled to "
1299 "re-start")
1300 except util.CommandException as e:
1301 if e.code != errno.ETIMEDOUT:
1302 raise
1303 util.SMlog('failed to abort the GC')
1304 else:
1305 util.SMlog("A GC instance already running, not kicking")
1306 return
1307 else:
1308 lockRunning.release()
1310 util.SMlog("Kicking GC")
1311 cleanup.gc(self.session, self.uuid, True)
1313 def ensureCBTSpace(self):
1314 # Ensure we have space for at least one LV
1315 self._ensureSpaceAvailable(self.journaler.LV_SIZE)
1318class LVHDVDI(VDI.VDI):
1320 JRN_CLONE = "clone" # journal entry type for the clone operation
1322 def load(self, vdi_uuid):
1323 self.lock = self.sr.lock
1324 self.lvActivator = self.sr.lvActivator
1325 self.loaded = False
1326 self.vdi_type = vhdutil.VDI_TYPE_VHD
1327 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1327 ↛ 1329line 1327 didn't jump to line 1329, because the condition on line 1327 was never false
1328 self.vdi_type = vhdutil.VDI_TYPE_RAW
1329 self.uuid = vdi_uuid
1330 self.location = self.uuid
1331 self.exists = True
1333 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1334 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1335 if self.parent: 1335 ↛ 1336line 1335 didn't jump to line 1336, because the condition on line 1335 was never true
1336 self.sm_config_override['vhd-parent'] = self.parent
1337 else:
1338 self.sm_config_override['vhd-parent'] = None
1339 return
1341 # scan() didn't run: determine the type of the VDI manually
1342 if self._determineType():
1343 return
1345 # the VDI must be in the process of being created
1346 self.exists = False
1347 if "vdi_sm_config" in self.sr.srcmd.params and \ 1347 ↛ 1349line 1347 didn't jump to line 1349, because the condition on line 1347 was never true
1348 "type" in self.sr.srcmd.params["vdi_sm_config"]:
1349 type = self.sr.srcmd.params["vdi_sm_config"]["type"]
1350 if type == PARAM_RAW:
1351 self.vdi_type = vhdutil.VDI_TYPE_RAW
1352 elif type == PARAM_VHD:
1353 self.vdi_type = vhdutil.VDI_TYPE_VHD
1354 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode:
1355 raise xs_errors.XenError('VDICreate', \
1356 opterr='Cannot create VHD type disk in legacy mode')
1357 else:
1358 raise xs_errors.XenError('VDICreate', opterr='bad type')
1359 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid)
1360 self.path = os.path.join(self.sr.path, self.lvname)
1362 def create(self, sr_uuid, vdi_uuid, size):
1363 util.SMlog("LVHDVDI.create for %s" % self.uuid)
1364 if not self.sr.isMaster:
1365 raise xs_errors.XenError('LVMMaster')
1366 if self.exists:
1367 raise xs_errors.XenError('VDIExists')
1369 size = vhdutil.validate_and_round_vhd_size(int(size))
1371 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \
1372 (self.vdi_type, self.path, size))
1373 lvSize = 0
1374 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1375 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1376 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1377 else:
1378 if self.sr.provision == "thin":
1379 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1380 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1381 elif self.sr.provision == "thick":
1382 lvSize = lvhdutil.calcSizeVHDLV(int(size))
1384 self.sr._ensureSpaceAvailable(lvSize)
1386 try:
1387 self.sr.lvmCache.create(self.lvname, lvSize)
1388 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1389 self.size = self.sr.lvmCache.getSize(self.lvname)
1390 else:
1391 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB)
1392 self.size = vhdutil.getSizeVirt(self.path)
1393 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1394 except util.CommandException as e:
1395 util.SMlog("Unable to create VDI")
1396 self.sr.lvmCache.remove(self.lvname)
1397 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1399 self.utilisation = lvSize
1400 self.sm_config["vdi_type"] = self.vdi_type
1402 if not self.sr.legacyMode:
1403 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1405 self.ref = self._db_introduce()
1406 self.sr._updateStats(self.sr.uuid, self.size)
1408 vdi_info = {UUID_TAG: self.uuid,
1409 NAME_LABEL_TAG: util.to_plain_string(self.label),
1410 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1411 IS_A_SNAPSHOT_TAG: 0,
1412 SNAPSHOT_OF_TAG: '',
1413 SNAPSHOT_TIME_TAG: '',
1414 TYPE_TAG: self.ty,
1415 VDI_TYPE_TAG: self.vdi_type,
1416 READ_ONLY_TAG: int(self.read_only),
1417 MANAGED_TAG: int(self.managed),
1418 METADATA_OF_POOL_TAG: ''
1419 }
1421 if not self.sr.legacyMode:
1422 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1424 return VDI.VDI.get_params(self)
1426 def delete(self, sr_uuid, vdi_uuid, data_only=False):
1427 util.SMlog("LVHDVDI.delete for %s" % self.uuid)
1428 try:
1429 self._loadThis()
1430 except SR.SRException as e:
1431 # Catch 'VDI doesn't exist' exception
1432 if e.errno == 46:
1433 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1434 raise
1436 vdi_ref = self.sr.srcmd.params['vdi_ref']
1437 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1438 raise xs_errors.XenError("VDIDelete", \
1439 opterr="Deleting non-leaf node not permitted")
1441 if not self.hidden:
1442 self._markHidden()
1444 if not data_only:
1445 # Remove from XAPI and delete from MGT
1446 self._db_forget()
1447 else:
1448 # If this is a data_destroy call, don't remove from XAPI db
1449 # Only delete from MGT
1450 if not self.sr.legacyMode:
1451 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1453 # deactivate here because it might be too late to do it in the "final"
1454 # step: GC might have removed the LV by then
1455 if self.sr.lvActivator.get(self.uuid, False):
1456 self.sr.lvActivator.deactivate(self.uuid, False)
1458 try:
1459 self.sr.lvmCache.remove(self.lvname)
1460 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid)
1461 self.sr.lock.cleanupAll(vdi_uuid)
1462 except SR.SRException as e:
1463 util.SMlog(
1464 "Failed to remove the volume (maybe is leaf coalescing) "
1465 "for %s err:%d" % (self.uuid, e.errno))
1467 self.sr._updateStats(self.sr.uuid, -self.size)
1468 self.sr._kickGC()
1469 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1471 def attach(self, sr_uuid, vdi_uuid):
1472 util.SMlog("LVHDVDI.attach for %s" % self.uuid)
1473 if self.sr.journaler.hasJournals(self.uuid):
1474 raise xs_errors.XenError('VDIUnavailable',
1475 opterr='Interrupted operation detected on this VDI, '
1476 'scan SR first to trigger auto-repair')
1478 writable = ('args' not in self.sr.srcmd.params) or \
1479 (self.sr.srcmd.params['args'][0] == "true")
1480 needInflate = True
1481 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable:
1482 needInflate = False
1483 else:
1484 self._loadThis()
1485 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size):
1486 needInflate = False
1488 if needInflate:
1489 try:
1490 self._prepareThin(True)
1491 except:
1492 util.logException("attach")
1493 raise xs_errors.XenError('LVMProvisionAttach')
1495 try:
1496 return self._attach()
1497 finally:
1498 if not self.sr.lvActivator.deactivateAll():
1499 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1501 def detach(self, sr_uuid, vdi_uuid):
1502 util.SMlog("LVHDVDI.detach for %s" % self.uuid)
1503 self._loadThis()
1504 already_deflated = (self.utilisation < \
1505 lvhdutil.calcSizeVHDLV(self.size))
1506 needDeflate = True
1507 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated:
1508 needDeflate = False
1509 elif self.sr.provision == "thick":
1510 needDeflate = False
1511 # except for snapshots, which are always deflated
1512 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1513 vdi_ref = self.sr.srcmd.params['vdi_ref']
1514 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1515 if snap:
1516 needDeflate = True
1518 if needDeflate:
1519 try:
1520 self._prepareThin(False)
1521 except:
1522 util.logException("_prepareThin")
1523 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1525 try:
1526 self._detach()
1527 finally:
1528 if not self.sr.lvActivator.deactivateAll():
1529 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1531 # We only support offline resize
1532 def resize(self, sr_uuid, vdi_uuid, size):
1533 util.SMlog("LVHDVDI.resize for %s" % self.uuid)
1534 if not self.sr.isMaster:
1535 raise xs_errors.XenError('LVMMaster')
1537 self._loadThis()
1538 if self.hidden:
1539 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1541 if size < self.size:
1542 util.SMlog('vdi_resize: shrinking not supported: ' + \
1543 '(current size: %d, new size: %d)' % (self.size, size))
1544 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1546 size = vhdutil.validate_and_round_vhd_size(int(size))
1548 if size == self.size:
1549 return VDI.VDI.get_params(self)
1551 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1552 lvSizeOld = self.size
1553 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1554 else:
1555 lvSizeOld = self.utilisation
1556 lvSizeNew = lvhdutil.calcSizeVHDLV(size)
1557 if self.sr.provision == "thin":
1558 # VDI is currently deflated, so keep it deflated
1559 lvSizeNew = lvSizeOld
1560 assert(lvSizeNew >= lvSizeOld)
1561 spaceNeeded = lvSizeNew - lvSizeOld
1562 self.sr._ensureSpaceAvailable(spaceNeeded)
1564 oldSize = self.size
1565 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1566 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1567 self.size = self.sr.lvmCache.getSize(self.lvname)
1568 self.utilisation = self.size
1569 else:
1570 if lvSizeNew != lvSizeOld:
1571 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid,
1572 lvSizeNew)
1573 vhdutil.setSizeVirtFast(self.path, size)
1574 self.size = vhdutil.getSizeVirt(self.path)
1575 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1577 vdi_ref = self.sr.srcmd.params['vdi_ref']
1578 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1579 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1580 str(self.utilisation))
1581 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1582 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1583 return VDI.VDI.get_params(self)
1585 def clone(self, sr_uuid, vdi_uuid):
1586 return self._do_snapshot(
1587 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1589 def compose(self, sr_uuid, vdi1, vdi2):
1590 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1))
1591 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1592 raise xs_errors.XenError('Unimplemented')
1594 parent_uuid = vdi1
1595 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid
1596 assert(self.sr.lvmCache.checkLV(parent_lvname))
1597 parent_path = os.path.join(self.sr.path, parent_lvname)
1599 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1600 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1602 vhdutil.setParent(self.path, parent_path, False)
1603 vhdutil.setHidden(parent_path)
1604 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1606 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1607 True):
1608 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1610 util.SMlog("Compose done")
1612 def reset_leaf(self, sr_uuid, vdi_uuid):
1613 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid)
1614 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1615 raise xs_errors.XenError('Unimplemented')
1617 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1619 # safety check
1620 if not vhdutil.hasParent(self.path):
1621 raise util.SMException("ERROR: VDI %s has no parent, " + \
1622 "will not reset contents" % self.uuid)
1624 vhdutil.killData(self.path)
1626 def _attach(self):
1627 self._chainSetActive(True, True, True)
1628 if not util.pathexists(self.path):
1629 raise xs_errors.XenError('VDIUnavailable', \
1630 opterr='Could not find: %s' % self.path)
1632 if not hasattr(self, 'xenstore_data'):
1633 self.xenstore_data = {}
1635 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1636 scsiutil.gen_synthetic_page_data(self.uuid)))
1638 self.xenstore_data['storage-type'] = 'lvm'
1639 self.xenstore_data['vdi-type'] = self.vdi_type
1641 self.attached = True
1642 self.sr.lvActivator.persist()
1643 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1645 def _detach(self):
1646 self._chainSetActive(False, True)
1647 self.attached = False
1649 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1650 cloneOp=False, secondary=None, cbtlog=None):
1651 # If cbt enabled, save file consistency state
1652 if cbtlog is not None:
1653 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1653 ↛ 1654line 1653 didn't jump to line 1654, because the condition on line 1653 was never true
1654 consistency_state = False
1655 else:
1656 consistency_state = True
1657 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1658 (consistency_state, vdi_uuid))
1659 else:
1660 consistency_state = None
1662 pause_time = time.time()
1663 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1663 ↛ 1664line 1663 didn't jump to line 1664, because the condition on line 1663 was never true
1664 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1666 snapResult = None
1667 try:
1668 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state)
1669 except Exception as e1:
1670 try:
1671 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1672 secondary=None)
1673 except Exception as e2:
1674 util.SMlog('WARNING: failed to clean up failed snapshot: '
1675 '%s (error ignored)' % e2)
1676 raise
1677 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1678 unpause_time = time.time()
1679 if (unpause_time - pause_time) > LONG_SNAPTIME: 1679 ↛ 1680line 1679 didn't jump to line 1680, because the condition on line 1679 was never true
1680 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1681 (unpause_time - pause_time))
1682 return snapResult
1684 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None):
1685 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1687 if not self.sr.isMaster: 1687 ↛ 1688line 1687 didn't jump to line 1688, because the condition on line 1687 was never true
1688 raise xs_errors.XenError('LVMMaster')
1689 if self.sr.legacyMode: 1689 ↛ 1690line 1689 didn't jump to line 1690, because the condition on line 1689 was never true
1690 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1692 self._loadThis()
1693 if self.hidden: 1693 ↛ 1694line 1693 didn't jump to line 1694, because the condition on line 1693 was never true
1694 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1696 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1697 self.sr.srcmd.params['vdi_ref'])
1698 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1698 ↛ 1699line 1698 didn't jump to line 1699, because the condition on line 1698 was never true
1699 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1700 raise xs_errors.XenError('Unimplemented', \
1701 opterr='Raw VDI, snapshot or clone not permitted')
1703 # we must activate the entire VHD chain because the real parent could
1704 # theoretically be anywhere in the chain if all VHDs under it are empty
1705 self._chainSetActive(True, False)
1706 if not util.pathexists(self.path): 1706 ↛ 1707line 1706 didn't jump to line 1707, because the condition on line 1706 was never true
1707 raise xs_errors.XenError('VDIUnavailable', \
1708 opterr='VDI unavailable: %s' % (self.path))
1710 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1710 ↛ 1718line 1710 didn't jump to line 1718, because the condition on line 1710 was never false
1711 depth = vhdutil.getDepth(self.path)
1712 if depth == -1: 1712 ↛ 1713line 1712 didn't jump to line 1713, because the condition on line 1712 was never true
1713 raise xs_errors.XenError('VDIUnavailable', \
1714 opterr='failed to get VHD depth')
1715 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1715 ↛ 1716line 1715 didn't jump to line 1716, because the condition on line 1715 was never true
1716 raise xs_errors.XenError('SnapshotChainTooLong')
1718 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1719 self.sr.srcmd.params['vdi_ref'])
1721 fullpr = lvhdutil.calcSizeVHDLV(self.size)
1722 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \
1723 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1724 lvSizeOrig = thinpr
1725 lvSizeClon = thinpr
1727 hostRefs = []
1728 if self.sr.cmd == "vdi_snapshot":
1729 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1730 if hostRefs: 1730 ↛ 1732line 1730 didn't jump to line 1732, because the condition on line 1730 was never false
1731 lvSizeOrig = fullpr
1732 if self.sr.provision == "thick": 1732 ↛ 1738line 1732 didn't jump to line 1738, because the condition on line 1732 was never false
1733 if not self.issnap: 1733 ↛ 1734line 1733 didn't jump to line 1734, because the condition on line 1733 was never true
1734 lvSizeOrig = fullpr
1735 if self.sr.cmd != "vdi_snapshot":
1736 lvSizeClon = fullpr
1738 if (snapType == VDI.SNAPSHOT_SINGLE or 1738 ↛ 1740line 1738 didn't jump to line 1740, because the condition on line 1738 was never true
1739 snapType == VDI.SNAPSHOT_INTERNAL):
1740 lvSizeClon = 0
1742 # the space required must include 2 journal LVs: a clone journal and an
1743 # inflate journal (for the failure handling
1744 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1745 lvSizeBase = self.size
1746 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1746 ↛ 1750line 1746 didn't jump to line 1750, because the condition on line 1746 was never false
1747 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1748 vhdutil.getSizePhys(self.path))
1749 size_req -= (self.utilisation - lvSizeBase)
1750 self.sr._ensureSpaceAvailable(size_req)
1752 if hostRefs:
1753 self.sr._updateSlavesPreClone(hostRefs, self.lvname)
1755 baseUuid = util.gen_uuid()
1756 origUuid = self.uuid
1757 clonUuid = ""
1758 if snapType == VDI.SNAPSHOT_DOUBLE: 1758 ↛ 1760line 1758 didn't jump to line 1760, because the condition on line 1758 was never false
1759 clonUuid = util.gen_uuid()
1760 jval = "%s_%s" % (baseUuid, clonUuid)
1761 with lvutil.LvmLockContext():
1762 # This makes multiple LVM calls so take the lock early
1763 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1764 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1766 try:
1767 with lvutil.LvmLockContext():
1768 # self becomes the "base vdi"
1769 origOldLV = self.lvname
1770 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid
1771 self.sr.lvmCache.rename(self.lvname, baseLV)
1772 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1773 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1774 self.uuid = baseUuid
1775 self.lvname = baseLV
1776 self.path = os.path.join(self.sr.path, baseLV)
1777 self.label = "base copy"
1778 self.read_only = True
1779 self.location = self.uuid
1780 self.managed = False
1782 # shrink the base copy to the minimum - we do it before creating
1783 # the snapshot volumes to avoid requiring double the space
1784 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1784 ↛ 1787line 1784 didn't jump to line 1787, because the condition on line 1784 was never false
1785 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1786 self.utilisation = lvSizeBase
1787 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1789 snapVDI = self._createSnap(origUuid, lvSizeOrig, False)
1790 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1791 snapVDI2 = None
1792 if snapType == VDI.SNAPSHOT_DOUBLE: 1792 ↛ 1798line 1792 didn't jump to line 1798, because the condition on line 1792 was never false
1793 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True)
1794 # If we have CBT enabled on the VDI,
1795 # set CBT status for the new snapshot disk
1796 if cbtlog:
1797 snapVDI2.cbt_enabled = True
1798 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1800 # note: it is important to mark the parent hidden only AFTER the
1801 # new VHD children have been created, which are referencing it;
1802 # otherwise we would introduce a race with GC that could reclaim
1803 # the parent before we snapshot it
1804 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1804 ↛ 1805line 1804 didn't jump to line 1805, because the condition on line 1804 was never true
1805 self.sr.lvmCache.setHidden(self.lvname)
1806 else:
1807 vhdutil.setHidden(self.path)
1808 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1810 # set the base copy to ReadOnly
1811 # Do this outside the LvmLockContext to avoid deadlock
1812 self.sr.lvmCache.setReadonly(self.lvname, True)
1813 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1815 if hostRefs:
1816 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1817 snapVDI.lvname, self.uuid, self.lvname)
1819 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1820 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1821 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1822 if hostRefs: 1822 ↛ 1836line 1822 didn't jump to line 1836, because the condition on line 1822 was never false
1823 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1824 try:
1825 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1826 except:
1827 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1828 alert_str = ("Creating CBT snapshot for {} failed"
1829 .format(snapVDI.uuid))
1830 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1831 pass
1833 except (util.SMException, XenAPI.Failure) as e:
1834 util.logException("LVHDVDI._snapshot")
1835 self._failClone(origUuid, jval, str(e))
1836 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1838 with lvutil.LvmLockContext():
1839 # This makes multiple LVM calls so take the lock early
1840 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1842 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1844 def _createSnap(self, snapUuid, snapSizeLV, isNew):
1845 """Snapshot self and return the snapshot VDI object"""
1846 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid
1847 snapPath = os.path.join(self.sr.path, snapLV)
1848 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1849 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1850 if isNew:
1851 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1852 self.sr.lvActivator.add(snapUuid, snapLV, False)
1853 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW)
1854 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB)
1855 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid)
1857 snapVDI = LVHDVDI(self.sr, snapUuid)
1858 snapVDI.read_only = False
1859 snapVDI.location = snapUuid
1860 snapVDI.size = self.size
1861 snapVDI.utilisation = snapSizeLV
1862 snapVDI.sm_config = dict()
1863 for key, val in self.sm_config.items(): 1863 ↛ 1864line 1863 didn't jump to line 1864, because the loop on line 1863 never started
1864 if key not in [
1865 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1866 not key.startswith("host_"):
1867 snapVDI.sm_config[key] = val
1868 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1869 snapVDI.sm_config["vhd-parent"] = snapParent
1870 snapVDI.lvname = snapLV
1871 return snapVDI
1873 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1874 if snapType is not VDI.SNAPSHOT_INTERNAL: 1874 ↛ 1876line 1874 didn't jump to line 1876, because the condition on line 1874 was never false
1875 self.sr._updateStats(self.sr.uuid, self.size)
1876 basePresent = True
1878 # Verify parent locator field of both children and delete basePath if
1879 # unused
1880 snapParent = snapVDI.sm_config["vhd-parent"]
1881 snap2Parent = ""
1882 if snapVDI2: 1882 ↛ 1884line 1882 didn't jump to line 1884, because the condition on line 1882 was never false
1883 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1884 if snapParent != self.uuid and \ 1884 ↛ 1911line 1884 didn't jump to line 1911, because the condition on line 1884 was never false
1885 (not snapVDI2 or snap2Parent != self.uuid):
1886 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1887 (snapParent, self.uuid, snap2Parent, self.lvname))
1888 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1889 self.sr.lvmCache.remove(self.lvname)
1890 self.sr.lvActivator.remove(self.uuid, False)
1891 if hostRefs:
1892 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1893 basePresent = False
1894 else:
1895 # assign the _binary_ refcount of the original VDI to the new base
1896 # VDI (but as the normal refcount, since binary refcounts are only
1897 # for leaf nodes). The normal refcount of the child is not
1898 # transferred to to the base VDI because normal refcounts are
1899 # incremented and decremented individually, and not based on the
1900 # VHD chain (i.e., the child's normal refcount will be decremented
1901 # independently of its parent situation). Add 1 for this clone op.
1902 # Note that we do not need to do protect the refcount operations
1903 # below with per-VDI locking like we do in lvutil because at this
1904 # point we have exclusive access to the VDIs involved. Other SM
1905 # operations are serialized by the Agent or with the SR lock, and
1906 # any coalesce activations are serialized with the SR lock. (The
1907 # coalesce activates the coalesced VDI pair in the beginning, which
1908 # cannot affect the VDIs here because they cannot possibly be
1909 # involved in coalescing at this point, and at the relinkSkip step
1910 # that activates the children, which takes the SR lock.)
1911 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid
1912 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1913 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1915 # the "paused" and "host_*" sm-config keys are special and must stay on
1916 # the leaf without being inherited by anyone else
1917 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1917 ↛ 1918line 1917 didn't jump to line 1918, because the loop on line 1917 never started
1918 snapVDI.sm_config[key] = self.sm_config[key]
1919 del self.sm_config[key]
1921 # Introduce any new VDI records & update the existing one
1922 type = self.session.xenapi.VDI.get_type( \
1923 self.sr.srcmd.params['vdi_ref'])
1924 if snapVDI2: 1924 ↛ 1966line 1924 didn't jump to line 1966, because the condition on line 1924 was never false
1925 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1926 vdiRef = snapVDI2._db_introduce()
1927 if cloneOp:
1928 vdi_info = {UUID_TAG: snapVDI2.uuid,
1929 NAME_LABEL_TAG: util.to_plain_string( \
1930 self.session.xenapi.VDI.get_name_label( \
1931 self.sr.srcmd.params['vdi_ref'])),
1932 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1933 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1934 IS_A_SNAPSHOT_TAG: 0,
1935 SNAPSHOT_OF_TAG: '',
1936 SNAPSHOT_TIME_TAG: '',
1937 TYPE_TAG: type,
1938 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1939 READ_ONLY_TAG: 0,
1940 MANAGED_TAG: int(snapVDI2.managed),
1941 METADATA_OF_POOL_TAG: ''
1942 }
1943 else:
1944 util.SMlog("snapshot VDI params: %s" % \
1945 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
1946 vdi_info = {UUID_TAG: snapVDI2.uuid,
1947 NAME_LABEL_TAG: util.to_plain_string( \
1948 self.session.xenapi.VDI.get_name_label( \
1949 self.sr.srcmd.params['vdi_ref'])),
1950 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1951 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1952 IS_A_SNAPSHOT_TAG: 1,
1953 SNAPSHOT_OF_TAG: snapVDI.uuid,
1954 SNAPSHOT_TIME_TAG: '',
1955 TYPE_TAG: type,
1956 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1957 READ_ONLY_TAG: 0,
1958 MANAGED_TAG: int(snapVDI2.managed),
1959 METADATA_OF_POOL_TAG: ''
1960 }
1962 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1963 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
1964 (vdiRef, snapVDI2.uuid))
1966 if basePresent: 1966 ↛ 1967line 1966 didn't jump to line 1967, because the condition on line 1966 was never true
1967 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1968 vdiRef = self._db_introduce()
1969 vdi_info = {UUID_TAG: self.uuid,
1970 NAME_LABEL_TAG: self.label,
1971 NAME_DESCRIPTION_TAG: self.description,
1972 IS_A_SNAPSHOT_TAG: 0,
1973 SNAPSHOT_OF_TAG: '',
1974 SNAPSHOT_TIME_TAG: '',
1975 TYPE_TAG: type,
1976 VDI_TYPE_TAG: self.sm_config['vdi_type'],
1977 READ_ONLY_TAG: 1,
1978 MANAGED_TAG: 0,
1979 METADATA_OF_POOL_TAG: ''
1980 }
1982 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1983 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
1984 (vdiRef, self.uuid))
1986 # Update the original record
1987 vdi_ref = self.sr.srcmd.params['vdi_ref']
1988 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
1989 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
1990 str(snapVDI.utilisation))
1992 # Return the info on the new snap VDI
1993 snap = snapVDI2
1994 if not snap: 1994 ↛ 1995line 1994 didn't jump to line 1995, because the condition on line 1994 was never true
1995 snap = self
1996 if not basePresent:
1997 # a single-snapshot of an empty VDI will be a noop, resulting
1998 # in no new VDIs, so return the existing one. The GC wouldn't
1999 # normally try to single-snapshot an empty VHD of course, but
2000 # if an external snapshot operation manages to sneak in right
2001 # before a snapshot-coalesce phase, we would get here
2002 snap = snapVDI
2003 return snap.get_params()
2005 def _initFromVDIInfo(self, vdiInfo):
2006 self.vdi_type = vdiInfo.vdiType
2007 self.lvname = vdiInfo.lvName
2008 self.size = vdiInfo.sizeVirt
2009 self.utilisation = vdiInfo.sizeLV
2010 self.hidden = vdiInfo.hidden
2011 if self.hidden: 2011 ↛ 2012line 2011 didn't jump to line 2012, because the condition on line 2011 was never true
2012 self.managed = False
2013 self.active = vdiInfo.lvActive
2014 self.readonly = vdiInfo.lvReadonly
2015 self.parent = vdiInfo.parentUuid
2016 self.path = os.path.join(self.sr.path, self.lvname)
2017 if hasattr(self, "sm_config_override"): 2017 ↛ 2020line 2017 didn't jump to line 2020, because the condition on line 2017 was never false
2018 self.sm_config_override["vdi_type"] = self.vdi_type
2019 else:
2020 self.sm_config_override = {'vdi_type': self.vdi_type}
2021 self.loaded = True
2023 def _initFromLVInfo(self, lvInfo):
2024 self.vdi_type = lvInfo.vdiType
2025 self.lvname = lvInfo.name
2026 self.size = lvInfo.size
2027 self.utilisation = lvInfo.size
2028 self.hidden = lvInfo.hidden
2029 self.active = lvInfo.active
2030 self.readonly = lvInfo.readonly
2031 self.parent = ''
2032 self.path = os.path.join(self.sr.path, self.lvname)
2033 if hasattr(self, "sm_config_override"): 2033 ↛ 2036line 2033 didn't jump to line 2036, because the condition on line 2033 was never false
2034 self.sm_config_override["vdi_type"] = self.vdi_type
2035 else:
2036 self.sm_config_override = {'vdi_type': self.vdi_type}
2037 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2037 ↛ 2038line 2037 didn't jump to line 2038, because the condition on line 2037 was never true
2038 self.loaded = True
2040 def _initFromVHDInfo(self, vhdInfo):
2041 self.size = vhdInfo.sizeVirt
2042 self.parent = vhdInfo.parentUuid
2043 self.hidden = vhdInfo.hidden
2044 self.loaded = True
2046 def _determineType(self):
2047 """Determine whether this is a raw or a VHD VDI"""
2048 if "vdi_ref" in self.sr.srcmd.params: 2048 ↛ 2061line 2048 didn't jump to line 2061, because the condition on line 2048 was never false
2049 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2050 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2051 if sm_config.get("vdi_type"): 2051 ↛ 2052line 2051 didn't jump to line 2052, because the condition on line 2051 was never true
2052 self.vdi_type = sm_config["vdi_type"]
2053 prefix = lvhdutil.LV_PREFIX[self.vdi_type]
2054 self.lvname = "%s%s" % (prefix, self.uuid)
2055 self.path = os.path.join(self.sr.path, self.lvname)
2056 self.sm_config_override = sm_config
2057 return True
2059 # LVM commands can be costly, so check the file directly first in case
2060 # the LV is active
2061 found = False
2062 for t in lvhdutil.VDI_TYPES: 2062 ↛ 2063line 2062 didn't jump to line 2063, because the loop on line 2062 never started
2063 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid)
2064 path = os.path.join(self.sr.path, lvname)
2065 if util.pathexists(path):
2066 if found:
2067 raise xs_errors.XenError('VDILoad',
2068 opterr="multiple VDI's: uuid %s" % self.uuid)
2069 found = True
2070 self.vdi_type = t
2071 self.lvname = lvname
2072 self.path = path
2073 if found: 2073 ↛ 2074line 2073 didn't jump to line 2074, because the condition on line 2073 was never true
2074 return True
2076 # now list all LV's
2077 if not lvutil._checkVG(self.sr.vgname): 2077 ↛ 2079line 2077 didn't jump to line 2079, because the condition on line 2077 was never true
2078 # when doing attach_from_config, the VG won't be there yet
2079 return False
2081 lvs = lvhdutil.getLVInfo(self.sr.lvmCache)
2082 if lvs.get(self.uuid):
2083 self._initFromLVInfo(lvs[self.uuid])
2084 return True
2085 return False
2087 def _loadThis(self):
2088 """Load VDI info for this VDI and activate the LV if it's VHD. We
2089 don't do it in VDI.load() because not all VDI operations need it."""
2090 if self.loaded: 2090 ↛ 2091line 2090 didn't jump to line 2091, because the condition on line 2090 was never true
2091 if self.vdi_type == vhdutil.VDI_TYPE_VHD:
2092 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2093 return
2094 try:
2095 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname)
2096 except util.CommandException as e:
2097 raise xs_errors.XenError('VDIUnavailable',
2098 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2099 if not lvs.get(self.uuid): 2099 ↛ 2100line 2099 didn't jump to line 2100, because the condition on line 2099 was never true
2100 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2101 self._initFromLVInfo(lvs[self.uuid])
2102 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2102 ↛ 2109line 2102 didn't jump to line 2109, because the condition on line 2102 was never false
2103 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2104 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False)
2105 if not vhdInfo: 2105 ↛ 2106line 2105 didn't jump to line 2106, because the condition on line 2105 was never true
2106 raise xs_errors.XenError('VDIUnavailable', \
2107 opterr='getVHDInfo failed')
2108 self._initFromVHDInfo(vhdInfo)
2109 self.loaded = True
2111 def _chainSetActive(self, active, binary, persistent=False):
2112 if binary: 2112 ↛ 2113line 2112 didn't jump to line 2113, because the condition on line 2112 was never true
2113 (count, bcount) = RefCounter.checkLocked(self.uuid,
2114 lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
2115 if (active and bcount > 0) or (not active and bcount == 0):
2116 return # this is a redundant activation/deactivation call
2118 vdiList = {self.uuid: self.lvname}
2119 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2119 ↛ 2122line 2119 didn't jump to line 2122, because the condition on line 2119 was never false
2120 vdiList = vhdutil.getParentChain(self.lvname,
2121 lvhdutil.extractUuid, self.sr.vgname)
2122 for uuid, lvName in vdiList.items(): 2122 ↛ 2123line 2122 didn't jump to line 2123, because the loop on line 2122 never started
2123 binaryParam = binary
2124 if uuid != self.uuid:
2125 binaryParam = False # binary param only applies to leaf nodes
2126 if active:
2127 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2128 persistent)
2129 else:
2130 # just add the LVs for deactivation in the final (cleanup)
2131 # step. The LVs must not have been activated during the current
2132 # operation
2133 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2135 def _failClone(self, uuid, jval, msg):
2136 try:
2137 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2138 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2139 except Exception as e:
2140 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2141 ' %s (error ignored)' % e)
2142 raise xs_errors.XenError('VDIClone', opterr=msg)
2144 def _markHidden(self):
2145 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
2146 self.sr.lvmCache.setHidden(self.lvname)
2147 else:
2148 vhdutil.setHidden(self.path)
2149 self.hidden = 1
2151 def _prepareThin(self, attach):
2152 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2153 if self.sr.isMaster:
2154 # the master can prepare the VDI locally
2155 if attach:
2156 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid)
2157 else:
2158 lvhdutil.detachThin(self.session, self.sr.lvmCache,
2159 self.sr.uuid, self.uuid)
2160 else:
2161 fn = "attach"
2162 if not attach:
2163 fn = "detach"
2164 pools = self.session.xenapi.pool.get_all()
2165 master = self.session.xenapi.pool.get_master(pools[0])
2166 rv = self.session.xenapi.host.call_plugin(
2167 master, self.sr.THIN_PLUGIN, fn,
2168 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid})
2169 util.SMlog("call-plugin returned: %s" % rv)
2170 if not rv:
2171 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2172 # refresh to pick up the size change on this slave
2173 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2175 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2176 if origUtilisation != self.utilisation:
2177 vdi_ref = self.sr.srcmd.params['vdi_ref']
2178 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2179 str(self.utilisation))
2180 stats = lvutil._getVGstats(self.sr.vgname)
2181 sr_utilisation = stats['physical_utilisation']
2182 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2183 str(sr_utilisation))
2185 def update(self, sr_uuid, vdi_uuid):
2186 if self.sr.legacyMode:
2187 return
2189 #Synch the name_label of this VDI on storage with the name_label in XAPI
2190 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2191 update_map = {}
2192 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2193 METADATA_OBJECT_TYPE_VDI
2194 update_map[UUID_TAG] = self.uuid
2195 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2196 self.session.xenapi.VDI.get_name_label(vdi_ref))
2197 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2198 self.session.xenapi.VDI.get_name_description(vdi_ref))
2199 update_map[SNAPSHOT_TIME_TAG] = \
2200 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2201 update_map[METADATA_OF_POOL_TAG] = \
2202 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2203 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2205 def _ensure_cbt_space(self):
2206 self.sr.ensureCBTSpace()
2208 def _create_cbt_log(self):
2209 logname = self._get_cbt_logname(self.uuid)
2210 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG)
2211 logpath = super(LVHDVDI, self)._create_cbt_log()
2212 self.sr.lvmCache.deactivateNoRefcount(logname)
2213 return logpath
2215 def _delete_cbt_log(self):
2216 logpath = self._get_cbt_logpath(self.uuid)
2217 if self._cbt_log_exists(logpath):
2218 logname = self._get_cbt_logname(self.uuid)
2219 self.sr.lvmCache.remove(logname)
2221 def _rename(self, oldpath, newpath):
2222 oldname = os.path.basename(oldpath)
2223 newname = os.path.basename(newpath)
2224 self.sr.lvmCache.rename(oldname, newname)
2226 def _activate_cbt_log(self, lv_name):
2227 self.sr.lvmCache.refresh()
2228 if not self.sr.lvmCache.is_active(lv_name): 2228 ↛ 2229line 2228 didn't jump to line 2229, because the condition on line 2228 was never true
2229 try:
2230 self.sr.lvmCache.activateNoRefcount(lv_name)
2231 return True
2232 except Exception as e:
2233 util.SMlog("Exception in _activate_cbt_log, "
2234 "Error: %s." % str(e))
2235 raise
2236 else:
2237 return False
2239 def _deactivate_cbt_log(self, lv_name):
2240 try:
2241 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2242 except Exception as e:
2243 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2244 raise
2246 def _cbt_log_exists(self, logpath):
2247 return lvutil.exists(logpath)
2249if __name__ == '__main__': 2249 ↛ 2250line 2249 didn't jump to line 2250, because the condition on line 2249 was never true
2250 SRCommand.run(LVHDSR, DRIVER_INFO)
2251else:
2252 SR.registerSR(LVHDSR)