Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# LVHDSR: VHD on LVM storage repository 

19# 

20 

21from sm_typing import Dict, List, override 

22 

23import SR 

24from SR import deviceCheck 

25import VDI 

26import SRCommand 

27import util 

28import lvutil 

29import lvmcache 

30import vhdutil 

31import lvhdutil 

32import scsiutil 

33import os 

34import sys 

35import time 

36import errno 

37import xs_errors 

38import cleanup 

39import blktap2 

40from journaler import Journaler 

41from lock import Lock 

42from refcounter import RefCounter 

43from ipc import IPCFlag 

44from lvmanager import LVActivator 

45import XenAPI # pylint: disable=import-error 

46import re 

47from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \ 

48 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ 

49 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ 

50 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ 

51 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG 

52from metadata import retrieveXMLfromFile, _parseXML 

53from xmlrpc.client import DateTime 

54import glob 

55from constants import CBTLOG_TAG 

56from fairlock import Fairlock 

57DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX) 

58 

59geneology: Dict[str, List[str]] = {} 

60CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM", 

61 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", 

62 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", 

63 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", 

64 "VDI_ACTIVATE", "VDI_DEACTIVATE"] 

65 

66CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']] 

67 

68DRIVER_INFO = { 

69 'name': 'Local VHD on LVM', 

70 'description': 'SR plugin which represents disks as VHD disks on ' + \ 

71 'Logical Volumes within a locally-attached Volume Group', 

72 'vendor': 'XenSource Inc', 

73 'copyright': '(C) 2008 XenSource Inc', 

74 'driver_version': '1.0', 

75 'required_api_version': '1.0', 

76 'capabilities': CAPABILITIES, 

77 'configuration': CONFIGURATION 

78 } 

79 

80PARAM_VHD = "vhd" 

81PARAM_RAW = "raw" 

82 

83OPS_EXCLUSIVE = [ 

84 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", 

85 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", 

86 "vdi_clone"] 

87 

88# Log if snapshot pauses VM for more than this many seconds 

89LONG_SNAPTIME = 60 

90 

91class LVHDSR(SR.SR): 

92 DRIVER_TYPE = 'lvhd' 

93 

94 PROVISIONING_TYPES = ["thin", "thick"] 

95 PROVISIONING_DEFAULT = "thick" 

96 THIN_PLUGIN = "lvhd-thin" 

97 

98 PLUGIN_ON_SLAVE = "on-slave" 

99 

100 FLAG_USE_VHD = "use_vhd" 

101 MDVOLUME_NAME = "MGT" 

102 

103 ALLOCATION_QUANTUM = "allocation_quantum" 

104 INITIAL_ALLOCATION = "initial_allocation" 

105 

106 LOCK_RETRY_INTERVAL = 3 

107 LOCK_RETRY_ATTEMPTS = 10 

108 

109 TEST_MODE_KEY = "testmode" 

110 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin" 

111 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator" 

112 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end" 

113 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin" 

114 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data" 

115 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata" 

116 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end" 

117 

118 ENV_VAR_VHD_TEST = { 

119 TEST_MODE_VHD_FAIL_REPARENT_BEGIN: 

120 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", 

121 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: 

122 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", 

123 TEST_MODE_VHD_FAIL_REPARENT_END: 

124 "VHD_UTIL_TEST_FAIL_REPARENT_END", 

125 TEST_MODE_VHD_FAIL_RESIZE_BEGIN: 

126 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", 

127 TEST_MODE_VHD_FAIL_RESIZE_DATA: 

128 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", 

129 TEST_MODE_VHD_FAIL_RESIZE_METADATA: 

130 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", 

131 TEST_MODE_VHD_FAIL_RESIZE_END: 

132 "VHD_UTIL_TEST_FAIL_RESIZE_END" 

133 } 

134 testMode = "" 

135 

136 legacyMode = True 

137 

138 @override 

139 @staticmethod 

140 def handles(type) -> bool: 

141 """Returns True if this SR class understands the given dconf string""" 

142 # we can pose as LVMSR or EXTSR for compatibility purposes 

143 if __name__ == '__main__': 

144 name = sys.argv[0] 

145 else: 

146 name = __name__ 

147 if name.endswith("LVMSR"): 

148 return type == "lvm" 

149 elif name.endswith("EXTSR"): 

150 return type == "ext" 

151 return type == LVHDSR.DRIVER_TYPE 

152 

153 @override 

154 def load(self, sr_uuid) -> None: 

155 self.ops_exclusive = OPS_EXCLUSIVE 

156 

157 self.isMaster = False 

158 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

159 self.isMaster = True 

160 

161 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

162 self.sr_vditype = SR.DEFAULT_TAP 

163 self.uuid = sr_uuid 

164 self.vgname = lvhdutil.VG_PREFIX + self.uuid 

165 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname) 

166 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME) 

167 self.provision = self.PROVISIONING_DEFAULT 

168 

169 has_sr_ref = self.srcmd.params.get("sr_ref") 

170 if has_sr_ref: 

171 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) 

172 else: 

173 self.other_conf = None 

174 

175 self.lvm_conf = None 

176 if self.other_conf: 

177 self.lvm_conf = self.other_conf.get('lvm-conf') 

178 

179 try: 

180 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf) 

181 except: 

182 raise xs_errors.XenError('SRUnavailable', \ 

183 opterr='Failed to initialise the LVMCache') 

184 self.lvActivator = LVActivator(self.uuid, self.lvmCache) 

185 self.journaler = Journaler(self.lvmCache) 

186 if not has_sr_ref: 

187 return # must be a probe call 

188 # Test for thick vs thin provisioning conf parameter 

189 if 'allocation' in self.dconf: 189 ↛ 190line 189 didn't jump to line 190, because the condition on line 189 was never true

190 if self.dconf['allocation'] in self.PROVISIONING_TYPES: 

191 self.provision = self.dconf['allocation'] 

192 else: 

193 raise xs_errors.XenError('InvalidArg', \ 

194 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES) 

195 

196 if self.other_conf.get(self.TEST_MODE_KEY): 196 ↛ 200line 196 didn't jump to line 200, because the condition on line 196 was never false

197 self.testMode = self.other_conf[self.TEST_MODE_KEY] 

198 self._prepareTestMode() 

199 

200 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

201 # sm_config flag overrides PBD, if any 

202 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: 

203 self.provision = self.sm_config.get('allocation') 

204 

205 if self.sm_config.get(self.FLAG_USE_VHD) == "true": 

206 self.legacyMode = False 

207 

208 if lvutil._checkVG(self.vgname): 

209 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 209 ↛ 212line 209 didn't jump to line 212, because the condition on line 209 was never false

210 "vdi_activate", "vdi_deactivate"]: 

211 self._undoAllJournals() 

212 if not self.cmd in ["sr_attach", "sr_probe"]: 

213 self._checkMetadataVolume() 

214 

215 self.mdexists = False 

216 

217 # get a VDI -> TYPE map from the storage 

218 contains_uuid_regex = \ 

219 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") 

220 self.storageVDIs = {} 

221 

222 for key in self.lvmCache.lvs.keys(): 222 ↛ 224line 222 didn't jump to line 224, because the loop on line 222 never started

223 # if the lvname has a uuid in it 

224 type = None 

225 if contains_uuid_regex.search(key) is not None: 

226 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]): 

227 type = vhdutil.VDI_TYPE_VHD 

228 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

229 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]): 

230 type = vhdutil.VDI_TYPE_RAW 

231 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

232 else: 

233 continue 

234 

235 if type is not None: 

236 self.storageVDIs[vdi] = type 

237 

238 # check if metadata volume exists 

239 try: 

240 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

241 except: 

242 pass 

243 

244 @override 

245 def cleanup(self) -> None: 

246 # we don't need to hold the lock to dec refcounts of activated LVs 

247 if not self.lvActivator.deactivateAll(): 247 ↛ 248line 247 didn't jump to line 248, because the condition on line 247 was never true

248 raise util.SMException("failed to deactivate LVs") 

249 

250 def updateSRMetadata(self, allocation): 

251 try: 

252 # Add SR specific SR metadata 

253 sr_info = \ 

254 {ALLOCATION_TAG: allocation, 

255 UUID_TAG: self.uuid, 

256 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)), 

257 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref)) 

258 } 

259 

260 vdi_info = {} 

261 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): 

262 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi) 

263 

264 # Create the VDI entry in the SR metadata 

265 vdi_info[vdi_uuid] = \ 

266 { 

267 UUID_TAG: vdi_uuid, 

268 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)), 

269 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)), 

270 IS_A_SNAPSHOT_TAG: \ 

271 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), 

272 SNAPSHOT_OF_TAG: \ 

273 self.session.xenapi.VDI.get_snapshot_of(vdi), 

274 SNAPSHOT_TIME_TAG: \ 

275 self.session.xenapi.VDI.get_snapshot_time(vdi), 

276 TYPE_TAG: \ 

277 self.session.xenapi.VDI.get_type(vdi), 

278 VDI_TYPE_TAG: \ 

279 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'], 

280 READ_ONLY_TAG: \ 

281 int(self.session.xenapi.VDI.get_read_only(vdi)), 

282 METADATA_OF_POOL_TAG: \ 

283 self.session.xenapi.VDI.get_metadata_of_pool(vdi), 

284 MANAGED_TAG: \ 

285 int(self.session.xenapi.VDI.get_managed(vdi)) 

286 } 

287 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info) 

288 

289 except Exception as e: 

290 raise xs_errors.XenError('MetadataError', \ 

291 opterr='Error upgrading SR Metadata: %s' % str(e)) 

292 

293 def syncMetadataAndStorage(self): 

294 try: 

295 # if a VDI is present in the metadata but not in the storage 

296 # then delete it from the metadata 

297 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

298 for vdi in list(vdi_info.keys()): 

299 update_map = {} 

300 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 300 ↛ 307line 300 didn't jump to line 307, because the condition on line 300 was never false

301 # delete this from metadata 

302 LVMMetadataHandler(self.mdpath). \ 

303 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) 

304 else: 

305 # search for this in the metadata, compare types 

306 # self.storageVDIs is a map of vdi_uuid to vdi_type 

307 if vdi_info[vdi][VDI_TYPE_TAG] != \ 

308 self.storageVDIs[vdi_info[vdi][UUID_TAG]]: 

309 # storage type takes authority 

310 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ 

311 = METADATA_OBJECT_TYPE_VDI 

312 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] 

313 update_map[VDI_TYPE_TAG] = \ 

314 self.storageVDIs[vdi_info[vdi][UUID_TAG]] 

315 LVMMetadataHandler(self.mdpath) \ 

316 .updateMetadata(update_map) 

317 else: 

318 # This should never happen 

319 pass 

320 

321 except Exception as e: 

322 raise xs_errors.XenError('MetadataError', \ 

323 opterr='Error synching SR Metadata and storage: %s' % str(e)) 

324 

325 def syncMetadataAndXapi(self): 

326 try: 

327 # get metadata 

328 (sr_info, vdi_info) = \ 

329 LVMMetadataHandler(self.mdpath, False).getMetadata() 

330 

331 # First synch SR parameters 

332 self.update(self.uuid) 

333 

334 # Now update the VDI information in the metadata if required 

335 for vdi_offset in vdi_info.keys(): 

336 try: 

337 vdi_ref = \ 

338 self.session.xenapi.VDI.get_by_uuid( \ 

339 vdi_info[vdi_offset][UUID_TAG]) 

340 except: 

341 # may be the VDI is not in XAPI yet dont bother 

342 continue 

343 

344 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref)) 

345 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref)) 

346 

347 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ 

348 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ 

349 new_name_description: 

350 update_map = {} 

351 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

352 METADATA_OBJECT_TYPE_VDI 

353 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] 

354 update_map[NAME_LABEL_TAG] = new_name_label 

355 update_map[NAME_DESCRIPTION_TAG] = new_name_description 

356 LVMMetadataHandler(self.mdpath) \ 

357 .updateMetadata(update_map) 

358 except Exception as e: 

359 raise xs_errors.XenError('MetadataError', \ 

360 opterr='Error synching SR Metadata and XAPI: %s' % str(e)) 

361 

362 def _checkMetadataVolume(self): 

363 util.SMlog("Entering _checkMetadataVolume") 

364 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

365 if self.isMaster: 365 ↛ 381line 365 didn't jump to line 381, because the condition on line 365 was never false

366 if self.mdexists and self.cmd == "sr_attach": 

367 try: 

368 # activate the management volume 

369 # will be deactivated at detach time 

370 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

371 self._synchSmConfigWithMetaData() 

372 util.SMlog("Sync SR metadata and the state on the storage.") 

373 self.syncMetadataAndStorage() 

374 self.syncMetadataAndXapi() 

375 except Exception as e: 

376 util.SMlog("Exception in _checkMetadataVolume, " \ 

377 "Error: %s." % str(e)) 

378 elif not self.mdexists and not self.legacyMode: 378 ↛ 381line 378 didn't jump to line 381, because the condition on line 378 was never false

379 self._introduceMetaDataVolume() 

380 

381 if self.mdexists: 

382 self.legacyMode = False 

383 

384 def _synchSmConfigWithMetaData(self): 

385 util.SMlog("Synching sm-config with metadata volume") 

386 

387 try: 

388 # get SR info from metadata 

389 sr_info = {} 

390 map = {} 

391 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0] 

392 

393 if sr_info == {}: 393 ↛ 394line 393 didn't jump to line 394, because the condition on line 393 was never true

394 raise Exception("Failed to get SR information from metadata.") 

395 

396 if "allocation" in sr_info: 396 ↛ 400line 396 didn't jump to line 400, because the condition on line 396 was never false

397 self.provision = sr_info.get("allocation") 

398 map['allocation'] = sr_info.get("allocation") 

399 else: 

400 raise Exception("Allocation key not found in SR metadata. " 

401 "SR info found: %s" % sr_info) 

402 

403 except Exception as e: 

404 raise xs_errors.XenError( 

405 'MetadataError', 

406 opterr='Error reading SR params from ' 

407 'metadata Volume: %s' % str(e)) 

408 try: 

409 map[self.FLAG_USE_VHD] = 'true' 

410 self.session.xenapi.SR.set_sm_config(self.sr_ref, map) 

411 except: 

412 raise xs_errors.XenError( 

413 'MetadataError', 

414 opterr='Error updating sm_config key') 

415 

416 def _introduceMetaDataVolume(self): 

417 util.SMlog("Creating Metadata volume") 

418 try: 

419 config = {} 

420 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024) 

421 

422 # activate the management volume, will be deactivated at detach time 

423 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

424 

425 name_label = util.to_plain_string( \ 

426 self.session.xenapi.SR.get_name_label(self.sr_ref)) 

427 name_description = util.to_plain_string( \ 

428 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

429 config[self.FLAG_USE_VHD] = "true" 

430 config['allocation'] = self.provision 

431 self.session.xenapi.SR.set_sm_config(self.sr_ref, config) 

432 

433 # Add the SR metadata 

434 self.updateSRMetadata(self.provision) 

435 except Exception as e: 

436 raise xs_errors.XenError('MetadataError', \ 

437 opterr='Error introducing Metadata Volume: %s' % str(e)) 

438 

439 def _removeMetadataVolume(self): 

440 if self.mdexists: 

441 try: 

442 self.lvmCache.remove(self.MDVOLUME_NAME) 

443 except: 

444 raise xs_errors.XenError('MetadataError', \ 

445 opterr='Failed to delete MGT Volume') 

446 

447 def _refresh_size(self): 

448 """ 

449 Refreshs the size of the backing device. 

450 Return true if all paths/devices agree on the same size. 

451 """ 

452 if hasattr(self, 'SCSIid'): 452 ↛ 454line 452 didn't jump to line 454, because the condition on line 452 was never true

453 # LVHDoHBASR, LVHDoISCSISR 

454 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) 

455 else: 

456 # LVHDSR 

457 devices = self.dconf['device'].split(',') 

458 scsiutil.refreshdev(devices) 

459 return True 

460 

461 def _expand_size(self): 

462 """ 

463 Expands the size of the SR by growing into additional availiable 

464 space, if extra space is availiable on the backing device. 

465 Needs to be called after a successful call of _refresh_size. 

466 """ 

467 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] 

468 # We are comparing PV- with VG-sizes that are aligned. Need a threshold 

469 resizethreshold = 100 * 1024 * 1024 # 100MB 

470 devices = self.dconf['device'].split(',') 

471 totaldevicesize = 0 

472 for device in devices: 

473 totaldevicesize = totaldevicesize + scsiutil.getsize(device) 

474 if totaldevicesize >= (currentvgsize + resizethreshold): 

475 try: 

476 if hasattr(self, 'SCSIid'): 476 ↛ 478line 476 didn't jump to line 478, because the condition on line 476 was never true

477 # LVHDoHBASR, LVHDoISCSISR might have slaves 

478 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, 

479 getattr(self, 'SCSIid')) 

480 util.SMlog("LVHDSR._expand_size for %s will resize the pv." % 

481 self.uuid) 

482 for pv in lvutil.get_pv_for_vg(self.vgname): 

483 lvutil.resizePV(pv) 

484 except: 

485 util.logException("LVHDSR._expand_size for %s failed to resize" 

486 " the PV" % self.uuid) 

487 

488 @override 

489 @deviceCheck 

490 def create(self, uuid, size) -> None: 

491 util.SMlog("LVHDSR.create for %s" % self.uuid) 

492 if not self.isMaster: 

493 util.SMlog('sr_create blocked for non-master') 

494 raise xs_errors.XenError('LVMMaster') 

495 

496 if lvutil._checkVG(self.vgname): 

497 raise xs_errors.XenError('SRExists') 

498 

499 # Check none of the devices already in use by other PBDs 

500 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']): 

501 raise xs_errors.XenError('SRInUse') 

502 

503 # Check serial number entry in SR records 

504 for dev in self.dconf['device'].split(','): 

505 if util.test_scsiserial(self.session, dev): 

506 raise xs_errors.XenError('SRInUse') 

507 

508 lvutil.createVG(self.dconf['device'], self.vgname) 

509 

510 #Update serial number string 

511 scsiutil.add_serial_record(self.session, self.sr_ref, \ 

512 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

513 

514 # since this is an SR.create turn off legacy mode 

515 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ 

516 self.FLAG_USE_VHD, 'true') 

517 

518 @override 

519 def delete(self, uuid) -> None: 

520 util.SMlog("LVHDSR.delete for %s" % self.uuid) 

521 if not self.isMaster: 

522 raise xs_errors.XenError('LVMMaster') 

523 cleanup.gc_force(self.session, self.uuid) 

524 

525 success = True 

526 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

527 if util.extractSRFromDevMapper(fileName) != self.uuid: 

528 continue 

529 

530 if util.doesFileHaveOpenHandles(fileName): 

531 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \ 

532 "handles" % fileName) 

533 success = False 

534 continue 

535 

536 # Now attempt to remove the dev mapper entry 

537 if not lvutil.removeDevMapperEntry(fileName, False): 

538 success = False 

539 continue 

540 

541 try: 

542 lvname = os.path.basename(fileName.replace('-', '/'). \ 

543 replace('//', '-')) 

544 lpath = os.path.join(self.path, lvname) 

545 os.unlink(lpath) 

546 except OSError as e: 

547 if e.errno != errno.ENOENT: 

548 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \ 

549 "file %s. Error: %s" % (fileName, str(e))) 

550 success = False 

551 

552 if success: 

553 try: 

554 if util.pathexists(self.path): 

555 os.rmdir(self.path) 

556 except Exception as e: 

557 util.SMlog("LVHDSR.delete: failed to remove the symlink " \ 

558 "directory %s. Error: %s" % (self.path, str(e))) 

559 success = False 

560 

561 self._removeMetadataVolume() 

562 self.lvmCache.refresh() 

563 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0: 

564 raise xs_errors.XenError('SRNotEmpty') 

565 

566 if not success: 

567 raise Exception("LVHDSR delete failed, please refer to the log " \ 

568 "for details.") 

569 

570 lvutil.removeVG(self.dconf['device'], self.vgname) 

571 self._cleanup() 

572 

573 @override 

574 def attach(self, uuid) -> None: 

575 util.SMlog("LVHDSR.attach for %s" % self.uuid) 

576 

577 self._cleanup(True) # in case of host crashes, if detach wasn't called 

578 

579 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 579 ↛ 580line 579 didn't jump to line 580, because the condition on line 579 was never true

580 raise xs_errors.XenError('SRUnavailable', \ 

581 opterr='no such volume group: %s' % self.vgname) 

582 

583 # Refresh the metadata status 

584 self._checkMetadataVolume() 

585 

586 refreshsizeok = self._refresh_size() 

587 

588 if self.isMaster: 588 ↛ 599line 588 didn't jump to line 599, because the condition on line 588 was never false

589 if refreshsizeok: 589 ↛ 593line 589 didn't jump to line 593, because the condition on line 589 was never false

590 self._expand_size() 

591 

592 # Update SCSIid string 

593 util.SMlog("Calling devlist_to_serial") 

594 scsiutil.add_serial_record( 

595 self.session, self.sr_ref, 

596 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

597 

598 # Test Legacy Mode Flag and update if VHD volumes exist 

599 if self.isMaster and self.legacyMode: 599 ↛ 600line 599 didn't jump to line 600, because the condition on line 599 was never true

600 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

601 for uuid, info in vdiInfo.items(): 

602 if info.vdiType == vhdutil.VDI_TYPE_VHD: 

603 self.legacyMode = False 

604 map = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

605 self._introduceMetaDataVolume() 

606 break 

607 

608 # Set the block scheduler 

609 for dev in self.dconf['device'].split(','): 

610 self.block_setscheduler(dev) 

611 

612 @override 

613 def detach(self, uuid) -> None: 

614 util.SMlog("LVHDSR.detach for %s" % self.uuid) 

615 cleanup.abort(self.uuid) 

616 

617 # Do a best effort cleanup of the dev mapper entries 

618 # go through all devmapper entries for this VG 

619 success = True 

620 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

621 if util.extractSRFromDevMapper(fileName) != self.uuid: 621 ↛ 622line 621 didn't jump to line 622, because the condition on line 621 was never true

622 continue 

623 

624 with Fairlock('devicemapper'): 

625 # check if any file has open handles 

626 if util.doesFileHaveOpenHandles(fileName): 

627 # if yes, log this and signal failure 

628 util.SMlog( 

629 f"LVHDSR.detach: The dev mapper entry {fileName} has " 

630 "open handles") 

631 success = False 

632 continue 

633 

634 # Now attempt to remove the dev mapper entry 

635 if not lvutil.removeDevMapperEntry(fileName, False): 635 ↛ 636line 635 didn't jump to line 636, because the condition on line 635 was never true

636 success = False 

637 continue 

638 

639 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/* 

640 try: 

641 lvname = os.path.basename(fileName.replace('-', '/'). \ 

642 replace('//', '-')) 

643 lvname = os.path.join(self.path, lvname) 

644 util.force_unlink(lvname) 

645 except Exception as e: 

646 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \ 

647 "file %s. Error: %s" % (fileName, str(e))) 

648 success = False 

649 

650 # now remove the directory where the symlinks are 

651 # this should pass as the directory should be empty by now 

652 if success: 

653 try: 

654 if util.pathexists(self.path): 654 ↛ 655line 654 didn't jump to line 655, because the condition on line 654 was never true

655 os.rmdir(self.path) 

656 except Exception as e: 

657 util.SMlog("LVHDSR.detach: failed to remove the symlink " \ 

658 "directory %s. Error: %s" % (self.path, str(e))) 

659 success = False 

660 

661 if not success: 

662 raise Exception("SR detach failed, please refer to the log " \ 

663 "for details.") 

664 

665 # Don't delete lock files on the master as it will break the locking 

666 # between SM and any GC thread that survives through SR.detach. 

667 # However, we should still delete lock files on slaves as it is the 

668 # only place to do so. 

669 self._cleanup(self.isMaster) 

670 

671 @override 

672 def forget_vdi(self, uuid) -> None: 

673 if not self.legacyMode: 

674 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) 

675 super(LVHDSR, self).forget_vdi(uuid) 

676 

677 @override 

678 def scan(self, uuid) -> None: 

679 activated_lvs = set() 

680 try: 

681 util.SMlog("LVHDSR.scan for %s" % self.uuid) 

682 if not self.isMaster: 682 ↛ 683line 682 didn't jump to line 683, because the condition on line 682 was never true

683 util.SMlog('sr_scan blocked for non-master') 

684 raise xs_errors.XenError('LVMMaster') 

685 

686 if self._refresh_size(): 686 ↛ 688line 686 didn't jump to line 688, because the condition on line 686 was never false

687 self._expand_size() 

688 self.lvmCache.refresh() 

689 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) 

690 self._loadvdis() 

691 stats = lvutil._getVGstats(self.vgname) 

692 self.physical_size = stats['physical_size'] 

693 self.physical_utilisation = stats['physical_utilisation'] 

694 

695 # Now check if there are any VDIs in the metadata, which are not in 

696 # XAPI 

697 if self.mdexists: 697 ↛ 808line 697 didn't jump to line 808, because the condition on line 697 was never false

698 vdiToSnaps: Dict[str, List[str]] = {} 

699 # get VDIs from XAPI 

700 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) 

701 vdi_uuids = set([]) 

702 for vdi in vdis: 

703 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi)) 

704 

705 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

706 

707 for vdi in list(info.keys()): 

708 vdi_uuid = info[vdi][UUID_TAG] 

709 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 709 ↛ 710line 709 didn't jump to line 710, because the condition on line 709 was never true

710 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps: 

711 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) 

712 else: 

713 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid] 

714 

715 if vdi_uuid not in vdi_uuids: 

716 util.SMlog("Introduce VDI %s as it is present in " \ 

717 "metadata and not in XAPI." % vdi_uuid) 

718 sm_config = {} 

719 sm_config['vdi_type'] = info[vdi][VDI_TYPE_TAG] 

720 lvname = "%s%s" % \ 

721 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid) 

722 self.lvActivator.activate( 

723 vdi_uuid, lvname, LVActivator.NORMAL) 

724 activated_lvs.add(vdi_uuid) 

725 lvPath = os.path.join(self.path, lvname) 

726 

727 if info[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW: 727 ↛ 728line 727 didn't jump to line 728, because the condition on line 727 was never true

728 size = self.lvmCache.getSize( \ 

729 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \ 

730 vdi_uuid) 

731 utilisation = \ 

732 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

733 int(size)) 

734 else: 

735 parent = \ 

736 vhdutil._getVHDParentNoCheck(lvPath) 

737 

738 if parent is not None: 738 ↛ 739line 738 didn't jump to line 739, because the condition on line 738 was never true

739 sm_config['vhd-parent'] = parent[len( \ 

740 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):] 

741 size = vhdutil.getSizeVirt(lvPath) 

742 if self.provision == "thin": 742 ↛ 743line 742 didn't jump to line 743

743 utilisation = \ 

744 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

745 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

746 else: 

747 utilisation = lvhdutil.calcSizeVHDLV(int(size)) 

748 

749 vdi_ref = self.session.xenapi.VDI.db_introduce( 

750 vdi_uuid, 

751 info[vdi][NAME_LABEL_TAG], 

752 info[vdi][NAME_DESCRIPTION_TAG], 

753 self.sr_ref, 

754 info[vdi][TYPE_TAG], 

755 False, 

756 bool(int(info[vdi][READ_ONLY_TAG])), 

757 {}, 

758 vdi_uuid, 

759 {}, 

760 sm_config) 

761 

762 self.session.xenapi.VDI.set_managed(vdi_ref, 

763 bool(int(info[vdi][MANAGED_TAG]))) 

764 self.session.xenapi.VDI.set_virtual_size(vdi_ref, 

765 str(size)) 

766 self.session.xenapi.VDI.set_physical_utilisation( \ 

767 vdi_ref, str(utilisation)) 

768 self.session.xenapi.VDI.set_is_a_snapshot( \ 

769 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG]))) 

770 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 770 ↛ 771line 770 didn't jump to line 771, because the condition on line 770 was never true

771 self.session.xenapi.VDI.set_snapshot_time( \ 

772 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG])) 

773 if info[vdi][TYPE_TAG] == 'metadata': 773 ↛ 774line 773 didn't jump to line 774, because the condition on line 773 was never true

774 self.session.xenapi.VDI.set_metadata_of_pool( \ 

775 vdi_ref, info[vdi][METADATA_OF_POOL_TAG]) 

776 

777 # Update CBT status of disks either just added 

778 # or already in XAPI 

779 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) 

780 if cbt_logname in cbt_vdis: 780 ↛ 781line 780 didn't jump to line 781, because the condition on line 780 was never true

781 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

782 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

783 # For existing VDIs, update local state too 

784 # Scan in base class SR updates existing VDIs 

785 # again based on local states 

786 if vdi_uuid in self.vdis: 

787 self.vdis[vdi_uuid].cbt_enabled = True 

788 cbt_vdis.remove(cbt_logname) 

789 

790 # Now set the snapshot statuses correctly in XAPI 

791 for srcvdi in vdiToSnaps.keys(): 791 ↛ 792line 791 didn't jump to line 792, because the loop on line 791 never started

792 try: 

793 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) 

794 except: 

795 # the source VDI no longer exists, continue 

796 continue 

797 

798 for snapvdi in vdiToSnaps[srcvdi]: 

799 try: 

800 # this might fail in cases where its already set 

801 snapref = \ 

802 self.session.xenapi.VDI.get_by_uuid(snapvdi) 

803 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) 

804 except Exception as e: 

805 util.SMlog("Setting snapshot failed. " \ 

806 "Error: %s" % str(e)) 

807 

808 if cbt_vdis: 808 ↛ 819line 808 didn't jump to line 819, because the condition on line 808 was never false

809 # If we have items remaining in this list, 

810 # they are cbt_metadata VDI that XAPI doesn't know about 

811 # Add them to self.vdis and they'll get added to the DB 

812 for cbt_vdi in cbt_vdis: 812 ↛ 813line 812 didn't jump to line 813, because the loop on line 812 never started

813 cbt_uuid = cbt_vdi.split(".")[0] 

814 new_vdi = self.vdi(cbt_uuid) 

815 new_vdi.ty = "cbt_metadata" 

816 new_vdi.cbt_enabled = True 

817 self.vdis[cbt_uuid] = new_vdi 

818 

819 super(LVHDSR, self).scan(uuid) 

820 self._kickGC() 

821 

822 finally: 

823 for vdi in activated_lvs: 

824 self.lvActivator.deactivate( 

825 vdi, LVActivator.NORMAL, False) 

826 

827 @override 

828 def update(self, uuid) -> None: 

829 if not lvutil._checkVG(self.vgname): 829 ↛ 830line 829 didn't jump to line 830, because the condition on line 829 was never true

830 return 

831 self._updateStats(uuid, 0) 

832 

833 if self.legacyMode: 833 ↛ 834line 833 didn't jump to line 834, because the condition on line 833 was never true

834 return 

835 

836 # synch name_label in metadata with XAPI 

837 update_map = {} 

838 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ 

839 METADATA_OBJECT_TYPE_SR, 

840 NAME_LABEL_TAG: util.to_plain_string( \ 

841 self.session.xenapi.SR.get_name_label(self.sr_ref)), 

842 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

843 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

844 } 

845 LVMMetadataHandler(self.mdpath).updateMetadata(update_map) 

846 

847 def _updateStats(self, uuid, virtAllocDelta): 

848 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

849 self.virtual_allocation = valloc + virtAllocDelta 

850 util.SMlog("Setting virtual_allocation of SR %s to %d" % 

851 (uuid, self.virtual_allocation)) 

852 stats = lvutil._getVGstats(self.vgname) 

853 self.physical_size = stats['physical_size'] 

854 self.physical_utilisation = stats['physical_utilisation'] 

855 self._db_update() 

856 

857 @override 

858 @deviceCheck 

859 def probe(self) -> str: 

860 return lvutil.srlist_toxml( 

861 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']), 

862 lvhdutil.VG_PREFIX, 

863 ('metadata' in self.srcmd.params['sr_sm_config'] and \ 

864 self.srcmd.params['sr_sm_config']['metadata'] == 'true')) 

865 

866 @override 

867 def vdi(self, uuid) -> VDI.VDI: 

868 return LVHDVDI(self, uuid) 

869 

870 def _loadvdis(self): 

871 self.virtual_allocation = 0 

872 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

873 self.allVDIs = {} 

874 

875 for uuid, info in self.vdiInfo.items(): 

876 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 876 ↛ 877line 876 didn't jump to line 877, because the condition on line 876 was never true

877 continue 

878 if info.scanError: 878 ↛ 879line 878 didn't jump to line 879, because the condition on line 878 was never true

879 raise xs_errors.XenError('VDIUnavailable', \ 

880 opterr='Error scanning VDI %s' % uuid) 

881 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid) 

882 if not self.vdis[uuid].hidden: 882 ↛ 875line 882 didn't jump to line 875, because the condition on line 882 was never false

883 self.virtual_allocation += self.vdis[uuid].utilisation 

884 

885 for uuid, vdi in self.vdis.items(): 

886 if vdi.parent: 886 ↛ 887line 886 didn't jump to line 887, because the condition on line 886 was never true

887 if vdi.parent in self.vdis: 

888 self.vdis[vdi.parent].read_only = True 

889 if vdi.parent in geneology: 

890 geneology[vdi.parent].append(uuid) 

891 else: 

892 geneology[vdi.parent] = [uuid] 

893 

894 # Now remove all hidden leaf nodes to avoid introducing records that 

895 # will be GC'ed 

896 for uuid in list(self.vdis.keys()): 

897 if uuid not in geneology and self.vdis[uuid].hidden: 897 ↛ 898line 897 didn't jump to line 898, because the condition on line 897 was never true

898 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

899 del self.vdis[uuid] 

900 

901 def _ensureSpaceAvailable(self, amount_needed): 

902 space_available = lvutil._getVGstats(self.vgname)['freespace'] 

903 if (space_available < amount_needed): 

904 util.SMlog("Not enough space! free space: %d, need: %d" % \ 

905 (space_available, amount_needed)) 

906 raise xs_errors.XenError('SRNoSpace') 

907 

908 def _handleInterruptedCloneOps(self): 

909 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE) 

910 for uuid, val in entries.items(): 910 ↛ 911line 910 didn't jump to line 911, because the loop on line 910 never started

911 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid) 

912 self._handleInterruptedCloneOp(uuid, val) 

913 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid) 

914 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid) 

915 

916 def _handleInterruptedCoalesceLeaf(self): 

917 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) 

918 if len(entries) > 0: 918 ↛ 919line 918 didn't jump to line 919, because the condition on line 918 was never true

919 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") 

920 cleanup.gc_force(self.session, self.uuid) 

921 self.lvmCache.refresh() 

922 

923 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False): 

924 """Either roll back or finalize the interrupted snapshot/clone 

925 operation. Rolling back is unsafe if the leaf VHDs have already been 

926 in use and written to. However, it is always safe to roll back while 

927 we're still in the context of the failed snapshot operation since the 

928 VBD is paused for the duration of the operation""" 

929 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) 

930 lvs = lvhdutil.getLVInfo(self.lvmCache) 

931 baseUuid, clonUuid = jval.split("_") 

932 

933 # is there a "base copy" VDI? 

934 if not lvs.get(baseUuid): 

935 # no base copy: make sure the original is there 

936 if lvs.get(origUuid): 

937 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") 

938 return 

939 raise util.SMException("base copy %s not present, " \ 

940 "but no original %s found" % (baseUuid, origUuid)) 

941 

942 if forceUndo: 

943 util.SMlog("Explicit revert") 

944 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

945 return 

946 

947 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): 

948 util.SMlog("One or both leaves missing => revert") 

949 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

950 return 

951 

952 vdis = lvhdutil.getVDIInfo(self.lvmCache) 

953 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): 

954 util.SMlog("One or both leaves invalid => revert") 

955 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

956 return 

957 

958 orig = vdis[origUuid] 

959 base = vdis[baseUuid] 

960 self.lvActivator.activate(baseUuid, base.lvName, False) 

961 self.lvActivator.activate(origUuid, orig.lvName, False) 

962 if orig.parentUuid != baseUuid: 

963 parent = vdis[orig.parentUuid] 

964 self.lvActivator.activate(parent.uuid, parent.lvName, False) 

965 origPath = os.path.join(self.path, orig.lvName) 

966 if not vhdutil.check(origPath): 

967 util.SMlog("Orig VHD invalid => revert") 

968 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

969 return 

970 

971 if clonUuid: 

972 clon = vdis[clonUuid] 

973 clonPath = os.path.join(self.path, clon.lvName) 

974 self.lvActivator.activate(clonUuid, clon.lvName, False) 

975 if not vhdutil.check(clonPath): 

976 util.SMlog("Clon VHD invalid => revert") 

977 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

978 return 

979 

980 util.SMlog("Snapshot appears valid, will not roll back") 

981 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid) 

982 

983 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid): 

984 base = lvs[baseUuid] 

985 basePath = os.path.join(self.path, base.name) 

986 

987 # make the parent RW 

988 if base.readonly: 

989 self.lvmCache.setReadonly(base.name, False) 

990 

991 ns = lvhdutil.NS_PREFIX_LVM + self.uuid 

992 origRefcountBinary = RefCounter.check(origUuid, ns)[1] 

993 origRefcountNormal = 0 

994 

995 # un-hide the parent 

996 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

997 self.lvActivator.activate(baseUuid, base.name, False) 

998 origRefcountNormal = 1 

999 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False) 

1000 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden: 

1001 vhdutil.setHidden(basePath, False) 

1002 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden: 

1003 self.lvmCache.setHidden(base.name, False) 

1004 

1005 # remove the child nodes 

1006 if clonUuid and lvs.get(clonUuid): 

1007 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD: 

1008 raise util.SMException("clone %s not VHD" % clonUuid) 

1009 self.lvmCache.remove(lvs[clonUuid].name) 

1010 if self.lvActivator.get(clonUuid, False): 

1011 self.lvActivator.remove(clonUuid, False) 

1012 if lvs.get(origUuid): 

1013 self.lvmCache.remove(lvs[origUuid].name) 

1014 

1015 # inflate the parent to fully-allocated size 

1016 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

1017 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1018 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize) 

1019 

1020 # rename back 

1021 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid 

1022 self.lvmCache.rename(base.name, origLV) 

1023 RefCounter.reset(baseUuid, ns) 

1024 if self.lvActivator.get(baseUuid, False): 

1025 self.lvActivator.replace(baseUuid, origUuid, origLV, False) 

1026 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns) 

1027 

1028 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1029 # flag to facilitate vm deactivate 

1030 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1031 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused') 

1032 

1033 # update LVM metadata on slaves 

1034 slaves = util.get_slaves_attached_on(self.session, [origUuid]) 

1035 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname, 

1036 origLV, origUuid, slaves) 

1037 

1038 util.SMlog("*** INTERRUPTED CLONE OP: rollback success") 

1039 

1040 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid): 

1041 """Finalize the interrupted snapshot/clone operation. This must not be 

1042 called from the live snapshot op context because we attempt to pause/ 

1043 unpause the VBD here (the VBD is already paused during snapshot, so it 

1044 would cause a deadlock)""" 

1045 base = vdis[baseUuid] 

1046 clon = None 

1047 if clonUuid: 

1048 clon = vdis[clonUuid] 

1049 

1050 cleanup.abort(self.uuid) 

1051 

1052 # make sure the parent is hidden and read-only 

1053 if not base.hidden: 

1054 if base.vdiType == vhdutil.VDI_TYPE_RAW: 

1055 self.lvmCache.setHidden(base.lvName) 

1056 else: 

1057 basePath = os.path.join(self.path, base.lvName) 

1058 vhdutil.setHidden(basePath) 

1059 if not base.lvReadonly: 

1060 self.lvmCache.setReadonly(base.lvName, True) 

1061 

1062 # NB: since this snapshot-preserving call is only invoked outside the 

1063 # snapshot op context, we assume the LVM metadata on the involved slave 

1064 # has by now been refreshed and do not attempt to do it here 

1065 

1066 # Update the original record 

1067 try: 

1068 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1069 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1070 type = self.session.xenapi.VDI.get_type(vdi_ref) 

1071 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1072 sm_config['vhd-parent'] = baseUuid 

1073 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

1074 except XenAPI.Failure: 

1075 util.SMlog("ERROR updating the orig record") 

1076 

1077 # introduce the new VDI records 

1078 if clonUuid: 

1079 try: 

1080 clon_vdi = VDI.VDI(self, clonUuid) 

1081 clon_vdi.read_only = False 

1082 clon_vdi.location = clonUuid 

1083 clon_vdi.utilisation = clon.sizeLV 

1084 clon_vdi.sm_config = { 

1085 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1086 "vhd-parent": baseUuid} 

1087 

1088 if not self.legacyMode: 

1089 LVMMetadataHandler(self.mdpath). \ 

1090 ensureSpaceIsAvailableForVdis(1) 

1091 

1092 clon_vdi_ref = clon_vdi._db_introduce() 

1093 util.SMlog("introduced clon VDI: %s (%s)" % \ 

1094 (clon_vdi_ref, clonUuid)) 

1095 

1096 vdi_info = {UUID_TAG: clonUuid, 

1097 NAME_LABEL_TAG: clon_vdi.label, 

1098 NAME_DESCRIPTION_TAG: clon_vdi.description, 

1099 IS_A_SNAPSHOT_TAG: 0, 

1100 SNAPSHOT_OF_TAG: '', 

1101 SNAPSHOT_TIME_TAG: '', 

1102 TYPE_TAG: type, 

1103 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], 

1104 READ_ONLY_TAG: int(clon_vdi.read_only), 

1105 MANAGED_TAG: int(clon_vdi.managed), 

1106 METADATA_OF_POOL_TAG: '' 

1107 } 

1108 

1109 if not self.legacyMode: 

1110 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1111 

1112 except XenAPI.Failure: 

1113 util.SMlog("ERROR introducing the clon record") 

1114 

1115 try: 

1116 base_vdi = VDI.VDI(self, baseUuid) # readonly parent 

1117 base_vdi.label = "base copy" 

1118 base_vdi.read_only = True 

1119 base_vdi.location = baseUuid 

1120 base_vdi.size = base.sizeVirt 

1121 base_vdi.utilisation = base.sizeLV 

1122 base_vdi.managed = False 

1123 base_vdi.sm_config = { 

1124 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1125 "vhd-parent": baseUuid} 

1126 

1127 if not self.legacyMode: 

1128 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1) 

1129 

1130 base_vdi_ref = base_vdi._db_introduce() 

1131 util.SMlog("introduced base VDI: %s (%s)" % \ 

1132 (base_vdi_ref, baseUuid)) 

1133 

1134 vdi_info = {UUID_TAG: baseUuid, 

1135 NAME_LABEL_TAG: base_vdi.label, 

1136 NAME_DESCRIPTION_TAG: base_vdi.description, 

1137 IS_A_SNAPSHOT_TAG: 0, 

1138 SNAPSHOT_OF_TAG: '', 

1139 SNAPSHOT_TIME_TAG: '', 

1140 TYPE_TAG: type, 

1141 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], 

1142 READ_ONLY_TAG: int(base_vdi.read_only), 

1143 MANAGED_TAG: int(base_vdi.managed), 

1144 METADATA_OF_POOL_TAG: '' 

1145 } 

1146 

1147 if not self.legacyMode: 

1148 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1149 except XenAPI.Failure: 

1150 util.SMlog("ERROR introducing the base record") 

1151 

1152 util.SMlog("*** INTERRUPTED CLONE OP: complete") 

1153 

1154 def _undoAllJournals(self): 

1155 """Undo all VHD & SM interrupted journaled operations. This call must 

1156 be serialized with respect to all operations that create journals""" 

1157 # undoing interrupted inflates must be done first, since undoing VHD 

1158 # ops might require inflations 

1159 self.lock.acquire() 

1160 try: 

1161 self._undoAllInflateJournals() 

1162 self._undoAllVHDJournals() 

1163 self._handleInterruptedCloneOps() 

1164 self._handleInterruptedCoalesceLeaf() 

1165 finally: 

1166 self.lock.release() 

1167 self.cleanup() 

1168 

1169 def _undoAllInflateJournals(self): 

1170 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE) 

1171 if len(entries) == 0: 

1172 return 

1173 self._loadvdis() 

1174 for uuid, val in entries.items(): 

1175 vdi = self.vdis.get(uuid) 

1176 if vdi: 1176 ↛ 1191line 1176 didn't jump to line 1191, because the condition on line 1176 was never false

1177 util.SMlog("Found inflate journal %s, deflating %s to %s" % \ 

1178 (uuid, vdi.path, val)) 

1179 if vdi.readonly: 1179 ↛ 1180line 1179 didn't jump to line 1180, because the condition on line 1179 was never true

1180 self.lvmCache.setReadonly(vdi.lvname, False) 

1181 self.lvActivator.activate(uuid, vdi.lvname, False) 

1182 currSizeLV = self.lvmCache.getSize(vdi.lvname) 

1183 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE, 

1184 vhdutil.VHD_FOOTER_SIZE) 

1185 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val)) 

1186 if vdi.readonly: 1186 ↛ 1187line 1186 didn't jump to line 1187, because the condition on line 1186 was never true

1187 self.lvmCache.setReadonly(vdi.lvname, True) 

1188 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1188 ↛ 1189line 1188 didn't jump to line 1189, because the condition on line 1188 was never true

1189 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1190 self.vgname, vdi.lvname, uuid) 

1191 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid) 

1192 delattr(self, "vdiInfo") 

1193 delattr(self, "allVDIs") 

1194 

1195 def _undoAllVHDJournals(self): 

1196 """check if there are VHD journals in existence and revert them""" 

1197 journals = lvhdutil.getAllVHDJournals(self.lvmCache) 

1198 if len(journals) == 0: 1198 ↛ 1200line 1198 didn't jump to line 1200, because the condition on line 1198 was never false

1199 return 

1200 self._loadvdis() 

1201 for uuid, jlvName in journals: 

1202 vdi = self.vdis[uuid] 

1203 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path)) 

1204 self.lvActivator.activate(uuid, vdi.lvname, False) 

1205 self.lvmCache.activateNoRefcount(jlvName) 

1206 fullSize = lvhdutil.calcSizeVHDLV(vdi.size) 

1207 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize) 

1208 try: 

1209 jFile = os.path.join(self.path, jlvName) 

1210 vhdutil.revert(vdi.path, jFile) 

1211 except util.CommandException: 

1212 util.logException("VHD journal revert") 

1213 vhdutil.check(vdi.path) 

1214 util.SMlog("VHD revert failed but VHD ok: removing journal") 

1215 # Attempt to reclaim unused space 

1216 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False) 

1217 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1218 if NewSize < fullSize: 

1219 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) 

1220 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1221 self.vgname, vdi.lvname, uuid) 

1222 self.lvmCache.remove(jlvName) 

1223 delattr(self, "vdiInfo") 

1224 delattr(self, "allVDIs") 

1225 

1226 def _updateSlavesPreClone(self, hostRefs, origOldLV): 

1227 masterRef = util.get_this_host_ref(self.session) 

1228 args = {"vgName": self.vgname, 

1229 "action1": "deactivateNoRefcount", 

1230 "lvName1": origOldLV} 

1231 for hostRef in hostRefs: 

1232 if hostRef == masterRef: 1232 ↛ 1233line 1232 didn't jump to line 1233, because the condition on line 1232 was never true

1233 continue 

1234 util.SMlog("Deactivate VDI on %s" % hostRef) 

1235 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1236 util.SMlog("call-plugin returned: %s" % rv) 

1237 if not rv: 1237 ↛ 1238line 1237 didn't jump to line 1238, because the condition on line 1237 was never true

1238 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1239 

1240 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV, 

1241 baseUuid, baseLV): 

1242 """We need to reactivate the original LV on each slave (note that the 

1243 name for the original LV might change), as well as init the refcount 

1244 for the base LV""" 

1245 args = {"vgName": self.vgname, 

1246 "action1": "refresh", 

1247 "lvName1": origLV, 

1248 "action2": "activate", 

1249 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid, 

1250 "lvName2": baseLV, 

1251 "uuid2": baseUuid} 

1252 

1253 masterRef = util.get_this_host_ref(self.session) 

1254 for hostRef in hostRefs: 

1255 if hostRef == masterRef: 1255 ↛ 1256line 1255 didn't jump to line 1256, because the condition on line 1255 was never true

1256 continue 

1257 util.SMlog("Updating %s, %s, %s on slave %s" % \ 

1258 (origOldLV, origLV, baseLV, hostRef)) 

1259 rv = self.session.xenapi.host.call_plugin( 

1260 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1261 util.SMlog("call-plugin returned: %s" % rv) 

1262 if not rv: 1262 ↛ 1263line 1262 didn't jump to line 1263, because the condition on line 1262 was never true

1263 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1264 

1265 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog): 

1266 """Reactivate and refresh CBT log file on slaves""" 

1267 args = {"vgName": self.vgname, 

1268 "action1": "deactivateNoRefcount", 

1269 "lvName1": cbtlog, 

1270 "action2": "refresh", 

1271 "lvName2": cbtlog} 

1272 

1273 masterRef = util.get_this_host_ref(self.session) 

1274 for hostRef in hostRefs: 

1275 if hostRef == masterRef: 1275 ↛ 1276line 1275 didn't jump to line 1276, because the condition on line 1275 was never true

1276 continue 

1277 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef)) 

1278 rv = self.session.xenapi.host.call_plugin( 

1279 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1280 util.SMlog("call-plugin returned: %s" % rv) 

1281 if not rv: 1281 ↛ 1282line 1281 didn't jump to line 1282, because the condition on line 1281 was never true

1282 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1283 

1284 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV): 

1285 """Tell the slave we deleted the base image""" 

1286 args = {"vgName": self.vgname, 

1287 "action1": "cleanupLockAndRefcount", 

1288 "uuid1": baseUuid, 

1289 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid} 

1290 

1291 masterRef = util.get_this_host_ref(self.session) 

1292 for hostRef in hostRefs: 

1293 if hostRef == masterRef: 1293 ↛ 1294line 1293 didn't jump to line 1294, because the condition on line 1293 was never true

1294 continue 

1295 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef)) 

1296 rv = self.session.xenapi.host.call_plugin( 

1297 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1298 util.SMlog("call-plugin returned: %s" % rv) 

1299 if not rv: 1299 ↛ 1300line 1299 didn't jump to line 1300, because the condition on line 1299 was never true

1300 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1301 

1302 def _cleanup(self, skipLockCleanup=False): 

1303 """delete stale refcounter, flag, and lock files""" 

1304 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1305 IPCFlag(self.uuid).clearAll() 

1306 if not skipLockCleanup: 1306 ↛ 1307line 1306 didn't jump to line 1307, because the condition on line 1306 was never true

1307 Lock.cleanupAll(self.uuid) 

1308 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1309 

1310 def _prepareTestMode(self): 

1311 util.SMlog("Test mode: %s" % self.testMode) 

1312 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1312 ↛ 1313line 1312 didn't jump to line 1313, because the condition on line 1312 was never true

1313 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" 

1314 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode]) 

1315 

1316 def _kickGC(self): 

1317 util.SMlog("Kicking GC") 

1318 cleanup.start_gc_service(self.uuid) 

1319 

1320 def ensureCBTSpace(self): 

1321 # Ensure we have space for at least one LV 

1322 self._ensureSpaceAvailable(self.journaler.LV_SIZE) 

1323 

1324 

1325class LVHDVDI(VDI.VDI): 

1326 

1327 JRN_CLONE = "clone" # journal entry type for the clone operation 

1328 

1329 @override 

1330 def load(self, vdi_uuid) -> None: 

1331 self.lock = self.sr.lock 

1332 self.lvActivator = self.sr.lvActivator 

1333 self.loaded = False 

1334 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1335 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1335 ↛ 1337line 1335 didn't jump to line 1337, because the condition on line 1335 was never false

1336 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1337 self.uuid = vdi_uuid 

1338 self.location = self.uuid 

1339 self.exists = True 

1340 

1341 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid): 

1342 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid]) 

1343 if self.parent: 1343 ↛ 1344line 1343 didn't jump to line 1344, because the condition on line 1343 was never true

1344 self.sm_config_override['vhd-parent'] = self.parent 

1345 else: 

1346 self.sm_config_override['vhd-parent'] = None 

1347 return 

1348 

1349 # scan() didn't run: determine the type of the VDI manually 

1350 if self._determineType(): 

1351 return 

1352 

1353 # the VDI must be in the process of being created 

1354 self.exists = False 

1355 if "vdi_sm_config" in self.sr.srcmd.params and \ 1355 ↛ 1357line 1355 didn't jump to line 1357, because the condition on line 1355 was never true

1356 "type" in self.sr.srcmd.params["vdi_sm_config"]: 

1357 type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

1358 if type == PARAM_RAW: 

1359 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1360 elif type == PARAM_VHD: 

1361 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1362 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode: 

1363 raise xs_errors.XenError('VDICreate', \ 

1364 opterr='Cannot create VHD type disk in legacy mode') 

1365 else: 

1366 raise xs_errors.XenError('VDICreate', opterr='bad type') 

1367 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid) 

1368 self.path = os.path.join(self.sr.path, self.lvname) 

1369 

1370 @override 

1371 def create(self, sr_uuid, vdi_uuid, size) -> str: 

1372 util.SMlog("LVHDVDI.create for %s" % self.uuid) 

1373 if not self.sr.isMaster: 

1374 raise xs_errors.XenError('LVMMaster') 

1375 if self.exists: 

1376 raise xs_errors.XenError('VDIExists') 

1377 

1378 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1379 

1380 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \ 

1381 (self.vdi_type, self.path, size)) 

1382 lvSize = 0 

1383 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] 

1384 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1385 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size)) 

1386 else: 

1387 if self.sr.provision == "thin": 

1388 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1389 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1390 elif self.sr.provision == "thick": 

1391 lvSize = lvhdutil.calcSizeVHDLV(int(size)) 

1392 

1393 self.sr._ensureSpaceAvailable(lvSize) 

1394 

1395 try: 

1396 self.sr.lvmCache.create(self.lvname, lvSize) 

1397 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1398 self.size = self.sr.lvmCache.getSize(self.lvname) 

1399 else: 

1400 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB) 

1401 self.size = vhdutil.getSizeVirt(self.path) 

1402 self.sr.lvmCache.deactivateNoRefcount(self.lvname) 

1403 except util.CommandException as e: 

1404 util.SMlog("Unable to create VDI") 

1405 self.sr.lvmCache.remove(self.lvname) 

1406 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code) 

1407 

1408 self.utilisation = lvSize 

1409 self.sm_config["vdi_type"] = self.vdi_type 

1410 

1411 if not self.sr.legacyMode: 

1412 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1413 

1414 self.ref = self._db_introduce() 

1415 self.sr._updateStats(self.sr.uuid, self.size) 

1416 

1417 vdi_info = {UUID_TAG: self.uuid, 

1418 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1419 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1420 IS_A_SNAPSHOT_TAG: 0, 

1421 SNAPSHOT_OF_TAG: '', 

1422 SNAPSHOT_TIME_TAG: '', 

1423 TYPE_TAG: self.ty, 

1424 VDI_TYPE_TAG: self.vdi_type, 

1425 READ_ONLY_TAG: int(self.read_only), 

1426 MANAGED_TAG: int(self.managed), 

1427 METADATA_OF_POOL_TAG: '' 

1428 } 

1429 

1430 if not self.sr.legacyMode: 

1431 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1432 

1433 return VDI.VDI.get_params(self) 

1434 

1435 @override 

1436 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None: 

1437 util.SMlog("LVHDVDI.delete for %s" % self.uuid) 

1438 try: 

1439 self._loadThis() 

1440 except xs_errors.SRException as e: 

1441 # Catch 'VDI doesn't exist' exception 

1442 if e.errno == 46: 

1443 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1444 raise 

1445 

1446 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1447 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1448 raise xs_errors.XenError("VDIDelete", \ 

1449 opterr="Deleting non-leaf node not permitted") 

1450 

1451 if not self.hidden: 

1452 self._markHidden() 

1453 

1454 if not data_only: 

1455 # Remove from XAPI and delete from MGT 

1456 self._db_forget() 

1457 else: 

1458 # If this is a data_destroy call, don't remove from XAPI db 

1459 # Only delete from MGT 

1460 if not self.sr.legacyMode: 

1461 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid) 

1462 

1463 # deactivate here because it might be too late to do it in the "final" 

1464 # step: GC might have removed the LV by then 

1465 if self.sr.lvActivator.get(self.uuid, False): 

1466 self.sr.lvActivator.deactivate(self.uuid, False) 

1467 

1468 try: 

1469 self.sr.lvmCache.remove(self.lvname) 

1470 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid) 

1471 self.sr.lock.cleanupAll(vdi_uuid) 

1472 except xs_errors.SRException as e: 

1473 util.SMlog( 

1474 "Failed to remove the volume (maybe is leaf coalescing) " 

1475 "for %s err:%d" % (self.uuid, e.errno)) 

1476 

1477 self.sr._updateStats(self.sr.uuid, -self.size) 

1478 self.sr._kickGC() 

1479 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1480 

1481 @override 

1482 def attach(self, sr_uuid, vdi_uuid) -> str: 

1483 util.SMlog("LVHDVDI.attach for %s" % self.uuid) 

1484 if self.sr.journaler.hasJournals(self.uuid): 

1485 raise xs_errors.XenError('VDIUnavailable', 

1486 opterr='Interrupted operation detected on this VDI, ' 

1487 'scan SR first to trigger auto-repair') 

1488 

1489 writable = ('args' not in self.sr.srcmd.params) or \ 

1490 (self.sr.srcmd.params['args'][0] == "true") 

1491 needInflate = True 

1492 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable: 

1493 needInflate = False 

1494 else: 

1495 self._loadThis() 

1496 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size): 

1497 needInflate = False 

1498 

1499 if needInflate: 

1500 try: 

1501 self._prepareThin(True) 

1502 except: 

1503 util.logException("attach") 

1504 raise xs_errors.XenError('LVMProvisionAttach') 

1505 

1506 try: 

1507 return self._attach() 

1508 finally: 

1509 if not self.sr.lvActivator.deactivateAll(): 

1510 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid) 

1511 

1512 @override 

1513 def detach(self, sr_uuid, vdi_uuid) -> None: 

1514 util.SMlog("LVHDVDI.detach for %s" % self.uuid) 

1515 self._loadThis() 

1516 already_deflated = (self.utilisation < \ 

1517 lvhdutil.calcSizeVHDLV(self.size)) 

1518 needDeflate = True 

1519 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated: 

1520 needDeflate = False 

1521 elif self.sr.provision == "thick": 

1522 needDeflate = False 

1523 # except for snapshots, which are always deflated 

1524 if self.sr.srcmd.cmd != 'vdi_detach_from_config': 

1525 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1526 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) 

1527 if snap: 

1528 needDeflate = True 

1529 

1530 if needDeflate: 

1531 try: 

1532 self._prepareThin(False) 

1533 except: 

1534 util.logException("_prepareThin") 

1535 raise xs_errors.XenError('VDIUnavailable', opterr='deflate') 

1536 

1537 try: 

1538 self._detach() 

1539 finally: 

1540 if not self.sr.lvActivator.deactivateAll(): 

1541 raise xs_errors.XenError("SMGeneral", opterr="deactivation") 

1542 

1543 # We only support offline resize 

1544 @override 

1545 def resize(self, sr_uuid, vdi_uuid, size) -> str: 

1546 util.SMlog("LVHDVDI.resize for %s" % self.uuid) 

1547 if not self.sr.isMaster: 

1548 raise xs_errors.XenError('LVMMaster') 

1549 

1550 self._loadThis() 

1551 if self.hidden: 

1552 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1553 

1554 if size < self.size: 

1555 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

1556 '(current size: %d, new size: %d)' % (self.size, size)) 

1557 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1558 

1559 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1560 

1561 if size == self.size: 

1562 return VDI.VDI.get_params(self) 

1563 

1564 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1565 lvSizeOld = self.size 

1566 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) 

1567 else: 

1568 lvSizeOld = self.utilisation 

1569 lvSizeNew = lvhdutil.calcSizeVHDLV(size) 

1570 if self.sr.provision == "thin": 

1571 # VDI is currently deflated, so keep it deflated 

1572 lvSizeNew = lvSizeOld 

1573 assert(lvSizeNew >= lvSizeOld) 

1574 spaceNeeded = lvSizeNew - lvSizeOld 

1575 self.sr._ensureSpaceAvailable(spaceNeeded) 

1576 

1577 oldSize = self.size 

1578 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1579 self.sr.lvmCache.setSize(self.lvname, lvSizeNew) 

1580 self.size = self.sr.lvmCache.getSize(self.lvname) 

1581 self.utilisation = self.size 

1582 else: 

1583 if lvSizeNew != lvSizeOld: 

1584 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, 

1585 lvSizeNew) 

1586 vhdutil.setSizeVirtFast(self.path, size) 

1587 self.size = vhdutil.getSizeVirt(self.path) 

1588 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

1589 

1590 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1591 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1592 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

1593 str(self.utilisation)) 

1594 self.sr._updateStats(self.sr.uuid, self.size - oldSize) 

1595 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

1596 return VDI.VDI.get_params(self) 

1597 

1598 @override 

1599 def clone(self, sr_uuid, vdi_uuid) -> str: 

1600 return self._do_snapshot( 

1601 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True) 

1602 

1603 @override 

1604 def compose(self, sr_uuid, vdi1, vdi2) -> None: 

1605 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1)) 

1606 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1607 raise xs_errors.XenError('Unimplemented') 

1608 

1609 parent_uuid = vdi1 

1610 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid 

1611 assert(self.sr.lvmCache.checkLV(parent_lvname)) 

1612 parent_path = os.path.join(self.sr.path, parent_lvname) 

1613 

1614 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1615 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False) 

1616 

1617 vhdutil.setParent(self.path, parent_path, False) 

1618 vhdutil.setHidden(parent_path) 

1619 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

1620 

1621 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, 

1622 True): 

1623 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

1624 

1625 util.SMlog("Compose done") 

1626 

1627 def reset_leaf(self, sr_uuid, vdi_uuid): 

1628 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid) 

1629 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1630 raise xs_errors.XenError('Unimplemented') 

1631 

1632 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1633 

1634 # safety check 

1635 if not vhdutil.hasParent(self.path): 

1636 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

1637 "will not reset contents" % self.uuid) 

1638 

1639 vhdutil.killData(self.path) 

1640 

1641 def _attach(self): 

1642 self._chainSetActive(True, True, True) 

1643 if not util.pathexists(self.path): 

1644 raise xs_errors.XenError('VDIUnavailable', \ 

1645 opterr='Could not find: %s' % self.path) 

1646 

1647 if not hasattr(self, 'xenstore_data'): 

1648 self.xenstore_data = {} 

1649 

1650 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ 

1651 scsiutil.gen_synthetic_page_data(self.uuid))) 

1652 

1653 self.xenstore_data['storage-type'] = 'lvm' 

1654 self.xenstore_data['vdi-type'] = self.vdi_type 

1655 

1656 self.attached = True 

1657 self.sr.lvActivator.persist() 

1658 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1659 

1660 def _detach(self): 

1661 self._chainSetActive(False, True) 

1662 self.attached = False 

1663 

1664 @override 

1665 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

1666 cloneOp=False, secondary=None, cbtlog=None) -> str: 

1667 # If cbt enabled, save file consistency state 

1668 if cbtlog is not None: 

1669 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1669 ↛ 1670line 1669 didn't jump to line 1670, because the condition on line 1669 was never true

1670 consistency_state = False 

1671 else: 

1672 consistency_state = True 

1673 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

1674 (consistency_state, vdi_uuid)) 

1675 else: 

1676 consistency_state = None 

1677 

1678 pause_time = time.time() 

1679 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1679 ↛ 1680line 1679 didn't jump to line 1680, because the condition on line 1679 was never true

1680 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

1681 

1682 snapResult = None 

1683 try: 

1684 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) 

1685 except Exception as e1: 

1686 try: 

1687 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, 

1688 secondary=None) 

1689 except Exception as e2: 

1690 util.SMlog('WARNING: failed to clean up failed snapshot: ' 

1691 '%s (error ignored)' % e2) 

1692 raise 

1693 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary) 

1694 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1695 unpause_time = time.time() 

1696 if (unpause_time - pause_time) > LONG_SNAPTIME: 1696 ↛ 1697line 1696 didn't jump to line 1697, because the condition on line 1696 was never true

1697 util.SMlog('WARNING: snapshot paused VM for %s seconds' % 

1698 (unpause_time - pause_time)) 

1699 return snapResult 

1700 

1701 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None): 

1702 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType)) 

1703 

1704 if not self.sr.isMaster: 1704 ↛ 1705line 1704 didn't jump to line 1705, because the condition on line 1704 was never true

1705 raise xs_errors.XenError('LVMMaster') 

1706 if self.sr.legacyMode: 1706 ↛ 1707line 1706 didn't jump to line 1707, because the condition on line 1706 was never true

1707 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode') 

1708 

1709 self._loadThis() 

1710 if self.hidden: 1710 ↛ 1711line 1710 didn't jump to line 1711, because the condition on line 1710 was never true

1711 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI') 

1712 

1713 self.sm_config = self.session.xenapi.VDI.get_sm_config( \ 

1714 self.sr.srcmd.params['vdi_ref']) 

1715 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1715 ↛ 1716line 1715 didn't jump to line 1716, because the condition on line 1715 was never true

1716 if not util.fistpoint.is_active("testsm_clone_allow_raw"): 

1717 raise xs_errors.XenError('Unimplemented', \ 

1718 opterr='Raw VDI, snapshot or clone not permitted') 

1719 

1720 # we must activate the entire VHD chain because the real parent could 

1721 # theoretically be anywhere in the chain if all VHDs under it are empty 

1722 self._chainSetActive(True, False) 

1723 if not util.pathexists(self.path): 1723 ↛ 1724line 1723 didn't jump to line 1724, because the condition on line 1723 was never true

1724 raise xs_errors.XenError('VDIUnavailable', \ 

1725 opterr='VDI unavailable: %s' % (self.path)) 

1726 

1727 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1727 ↛ 1735line 1727 didn't jump to line 1735, because the condition on line 1727 was never false

1728 depth = vhdutil.getDepth(self.path) 

1729 if depth == -1: 1729 ↛ 1730line 1729 didn't jump to line 1730, because the condition on line 1729 was never true

1730 raise xs_errors.XenError('VDIUnavailable', \ 

1731 opterr='failed to get VHD depth') 

1732 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1732 ↛ 1733line 1732 didn't jump to line 1733, because the condition on line 1732 was never true

1733 raise xs_errors.XenError('SnapshotChainTooLong') 

1734 

1735 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ 

1736 self.sr.srcmd.params['vdi_ref']) 

1737 

1738 fullpr = lvhdutil.calcSizeVHDLV(self.size) 

1739 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \ 

1740 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1741 lvSizeOrig = thinpr 

1742 lvSizeClon = thinpr 

1743 

1744 hostRefs = [] 

1745 if self.sr.cmd == "vdi_snapshot": 

1746 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) 

1747 if hostRefs: 1747 ↛ 1749line 1747 didn't jump to line 1749, because the condition on line 1747 was never false

1748 lvSizeOrig = fullpr 

1749 if self.sr.provision == "thick": 1749 ↛ 1755line 1749 didn't jump to line 1755, because the condition on line 1749 was never false

1750 if not self.issnap: 1750 ↛ 1751line 1750 didn't jump to line 1751, because the condition on line 1750 was never true

1751 lvSizeOrig = fullpr 

1752 if self.sr.cmd != "vdi_snapshot": 

1753 lvSizeClon = fullpr 

1754 

1755 if (snapType == VDI.SNAPSHOT_SINGLE or 1755 ↛ 1757line 1755 didn't jump to line 1757, because the condition on line 1755 was never true

1756 snapType == VDI.SNAPSHOT_INTERNAL): 

1757 lvSizeClon = 0 

1758 

1759 # the space required must include 2 journal LVs: a clone journal and an 

1760 # inflate journal (for the failure handling 

1761 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE 

1762 lvSizeBase = self.size 

1763 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1763 ↛ 1767line 1763 didn't jump to line 1767, because the condition on line 1763 was never false

1764 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1765 vhdutil.getSizePhys(self.path)) 

1766 size_req -= (self.utilisation - lvSizeBase) 

1767 self.sr._ensureSpaceAvailable(size_req) 

1768 

1769 if hostRefs: 

1770 self.sr._updateSlavesPreClone(hostRefs, self.lvname) 

1771 

1772 baseUuid = util.gen_uuid() 

1773 origUuid = self.uuid 

1774 clonUuid = "" 

1775 if snapType == VDI.SNAPSHOT_DOUBLE: 1775 ↛ 1777line 1775 didn't jump to line 1777, because the condition on line 1775 was never false

1776 clonUuid = util.gen_uuid() 

1777 jval = "%s_%s" % (baseUuid, clonUuid) 

1778 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) 

1779 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid) 

1780 

1781 try: 

1782 # self becomes the "base vdi" 

1783 origOldLV = self.lvname 

1784 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid 

1785 self.sr.lvmCache.rename(self.lvname, baseLV) 

1786 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) 

1787 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1788 self.uuid = baseUuid 

1789 self.lvname = baseLV 

1790 self.path = os.path.join(self.sr.path, baseLV) 

1791 self.label = "base copy" 

1792 self.read_only = True 

1793 self.location = self.uuid 

1794 self.managed = False 

1795 

1796 # shrink the base copy to the minimum - we do it before creating 

1797 # the snapshot volumes to avoid requiring double the space 

1798 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1798 ↛ 1801line 1798 didn't jump to line 1801, because the condition on line 1798 was never false

1799 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) 

1800 self.utilisation = lvSizeBase 

1801 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid) 

1802 

1803 snapVDI = self._createSnap(origUuid, lvSizeOrig, False) 

1804 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) 

1805 snapVDI2 = None 

1806 if snapType == VDI.SNAPSHOT_DOUBLE: 1806 ↛ 1812line 1806 didn't jump to line 1812, because the condition on line 1806 was never false

1807 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True) 

1808 # If we have CBT enabled on the VDI, 

1809 # set CBT status for the new snapshot disk 

1810 if cbtlog: 

1811 snapVDI2.cbt_enabled = True 

1812 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid) 

1813 

1814 # note: it is important to mark the parent hidden only AFTER the 

1815 # new VHD children have been created, which are referencing it; 

1816 # otherwise we would introduce a race with GC that could reclaim 

1817 # the parent before we snapshot it 

1818 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1818 ↛ 1819line 1818 didn't jump to line 1819, because the condition on line 1818 was never true

1819 self.sr.lvmCache.setHidden(self.lvname) 

1820 else: 

1821 vhdutil.setHidden(self.path) 

1822 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid) 

1823 

1824 # set the base copy to ReadOnly 

1825 self.sr.lvmCache.setReadonly(self.lvname, True) 

1826 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid) 

1827 

1828 if hostRefs: 

1829 self.sr._updateSlavesOnClone(hostRefs, origOldLV, 

1830 snapVDI.lvname, self.uuid, self.lvname) 

1831 

1832 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

1833 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: 

1834 snapVDI._cbt_snapshot(clonUuid, cbt_consistency) 

1835 if hostRefs: 1835 ↛ 1849line 1835 didn't jump to line 1849, because the condition on line 1835 was never false

1836 cbtlog_file = self._get_cbt_logname(snapVDI.uuid) 

1837 try: 

1838 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) 

1839 except: 

1840 alert_name = "VDI_CBT_SNAPSHOT_FAILED" 

1841 alert_str = ("Creating CBT snapshot for {} failed" 

1842 .format(snapVDI.uuid)) 

1843 snapVDI._disable_cbt_on_error(alert_name, alert_str) 

1844 pass 

1845 

1846 except (util.SMException, XenAPI.Failure) as e: 

1847 util.logException("LVHDVDI._snapshot") 

1848 self._failClone(origUuid, jval, str(e)) 

1849 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid) 

1850 

1851 self.sr.journaler.remove(self.JRN_CLONE, origUuid) 

1852 

1853 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType) 

1854 

1855 def _createSnap(self, snapUuid, snapSizeLV, isNew): 

1856 """Snapshot self and return the snapshot VDI object""" 

1857 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid 

1858 snapPath = os.path.join(self.sr.path, snapLV) 

1859 self.sr.lvmCache.create(snapLV, int(snapSizeLV)) 

1860 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) 

1861 if isNew: 

1862 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1863 self.sr.lvActivator.add(snapUuid, snapLV, False) 

1864 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) 

1865 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB) 

1866 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid) 

1867 

1868 snapVDI = LVHDVDI(self.sr, snapUuid) 

1869 snapVDI.read_only = False 

1870 snapVDI.location = snapUuid 

1871 snapVDI.size = self.size 

1872 snapVDI.utilisation = snapSizeLV 

1873 snapVDI.sm_config = dict() 

1874 for key, val in self.sm_config.items(): 1874 ↛ 1875line 1874 didn't jump to line 1875, because the loop on line 1874 never started

1875 if key not in [ 

1876 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \ 

1877 not key.startswith("host_"): 

1878 snapVDI.sm_config[key] = val 

1879 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1880 snapVDI.sm_config["vhd-parent"] = snapParent 

1881 snapVDI.lvname = snapLV 

1882 return snapVDI 

1883 

1884 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None): 

1885 if snapType is not VDI.SNAPSHOT_INTERNAL: 1885 ↛ 1887line 1885 didn't jump to line 1887, because the condition on line 1885 was never false

1886 self.sr._updateStats(self.sr.uuid, self.size) 

1887 basePresent = True 

1888 

1889 # Verify parent locator field of both children and delete basePath if 

1890 # unused 

1891 snapParent = snapVDI.sm_config["vhd-parent"] 

1892 snap2Parent = "" 

1893 if snapVDI2: 1893 ↛ 1895line 1893 didn't jump to line 1895, because the condition on line 1893 was never false

1894 snap2Parent = snapVDI2.sm_config["vhd-parent"] 

1895 if snapParent != self.uuid and \ 1895 ↛ 1922line 1895 didn't jump to line 1922, because the condition on line 1895 was never false

1896 (not snapVDI2 or snap2Parent != self.uuid): 

1897 util.SMlog("%s != %s != %s => deleting unused base %s" % \ 

1898 (snapParent, self.uuid, snap2Parent, self.lvname)) 

1899 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1900 self.sr.lvmCache.remove(self.lvname) 

1901 self.sr.lvActivator.remove(self.uuid, False) 

1902 if hostRefs: 

1903 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) 

1904 basePresent = False 

1905 else: 

1906 # assign the _binary_ refcount of the original VDI to the new base 

1907 # VDI (but as the normal refcount, since binary refcounts are only 

1908 # for leaf nodes). The normal refcount of the child is not 

1909 # transferred to to the base VDI because normal refcounts are 

1910 # incremented and decremented individually, and not based on the 

1911 # VHD chain (i.e., the child's normal refcount will be decremented 

1912 # independently of its parent situation). Add 1 for this clone op. 

1913 # Note that we do not need to do protect the refcount operations 

1914 # below with per-VDI locking like we do in lvutil because at this 

1915 # point we have exclusive access to the VDIs involved. Other SM 

1916 # operations are serialized by the Agent or with the SR lock, and 

1917 # any coalesce activations are serialized with the SR lock. (The 

1918 # coalesce activates the coalesced VDI pair in the beginning, which 

1919 # cannot affect the VDIs here because they cannot possibly be 

1920 # involved in coalescing at this point, and at the relinkSkip step 

1921 # that activates the children, which takes the SR lock.) 

1922 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid 

1923 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) 

1924 RefCounter.set(self.uuid, bcnt + 1, 0, ns) 

1925 

1926 # the "paused" and "host_*" sm-config keys are special and must stay on 

1927 # the leaf without being inherited by anyone else 

1928 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1928 ↛ 1929line 1928 didn't jump to line 1929, because the loop on line 1928 never started

1929 snapVDI.sm_config[key] = self.sm_config[key] 

1930 del self.sm_config[key] 

1931 

1932 # Introduce any new VDI records & update the existing one 

1933 type = self.session.xenapi.VDI.get_type( \ 

1934 self.sr.srcmd.params['vdi_ref']) 

1935 if snapVDI2: 1935 ↛ 1977line 1935 didn't jump to line 1977, because the condition on line 1935 was never false

1936 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1937 vdiRef = snapVDI2._db_introduce() 

1938 if cloneOp: 

1939 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1940 NAME_LABEL_TAG: util.to_plain_string( \ 

1941 self.session.xenapi.VDI.get_name_label( \ 

1942 self.sr.srcmd.params['vdi_ref'])), 

1943 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1944 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1945 IS_A_SNAPSHOT_TAG: 0, 

1946 SNAPSHOT_OF_TAG: '', 

1947 SNAPSHOT_TIME_TAG: '', 

1948 TYPE_TAG: type, 

1949 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1950 READ_ONLY_TAG: 0, 

1951 MANAGED_TAG: int(snapVDI2.managed), 

1952 METADATA_OF_POOL_TAG: '' 

1953 } 

1954 else: 

1955 util.SMlog("snapshot VDI params: %s" % \ 

1956 self.session.xenapi.VDI.get_snapshot_time(vdiRef)) 

1957 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1958 NAME_LABEL_TAG: util.to_plain_string( \ 

1959 self.session.xenapi.VDI.get_name_label( \ 

1960 self.sr.srcmd.params['vdi_ref'])), 

1961 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1962 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1963 IS_A_SNAPSHOT_TAG: 1, 

1964 SNAPSHOT_OF_TAG: snapVDI.uuid, 

1965 SNAPSHOT_TIME_TAG: '', 

1966 TYPE_TAG: type, 

1967 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1968 READ_ONLY_TAG: 0, 

1969 MANAGED_TAG: int(snapVDI2.managed), 

1970 METADATA_OF_POOL_TAG: '' 

1971 } 

1972 

1973 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1974 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ 

1975 (vdiRef, snapVDI2.uuid)) 

1976 

1977 if basePresent: 1977 ↛ 1978line 1977 didn't jump to line 1978, because the condition on line 1977 was never true

1978 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1979 vdiRef = self._db_introduce() 

1980 vdi_info = {UUID_TAG: self.uuid, 

1981 NAME_LABEL_TAG: self.label, 

1982 NAME_DESCRIPTION_TAG: self.description, 

1983 IS_A_SNAPSHOT_TAG: 0, 

1984 SNAPSHOT_OF_TAG: '', 

1985 SNAPSHOT_TIME_TAG: '', 

1986 TYPE_TAG: type, 

1987 VDI_TYPE_TAG: self.sm_config['vdi_type'], 

1988 READ_ONLY_TAG: 1, 

1989 MANAGED_TAG: 0, 

1990 METADATA_OF_POOL_TAG: '' 

1991 } 

1992 

1993 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1994 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ 

1995 (vdiRef, self.uuid)) 

1996 

1997 # Update the original record 

1998 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1999 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) 

2000 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ 

2001 str(snapVDI.utilisation)) 

2002 

2003 # Return the info on the new snap VDI 

2004 snap = snapVDI2 

2005 if not snap: 2005 ↛ 2006line 2005 didn't jump to line 2006, because the condition on line 2005 was never true

2006 snap = self 

2007 if not basePresent: 

2008 # a single-snapshot of an empty VDI will be a noop, resulting 

2009 # in no new VDIs, so return the existing one. The GC wouldn't 

2010 # normally try to single-snapshot an empty VHD of course, but 

2011 # if an external snapshot operation manages to sneak in right 

2012 # before a snapshot-coalesce phase, we would get here 

2013 snap = snapVDI 

2014 return snap.get_params() 

2015 

2016 def _initFromVDIInfo(self, vdiInfo): 

2017 self.vdi_type = vdiInfo.vdiType 

2018 self.lvname = vdiInfo.lvName 

2019 self.size = vdiInfo.sizeVirt 

2020 self.utilisation = vdiInfo.sizeLV 

2021 self.hidden = vdiInfo.hidden 

2022 if self.hidden: 2022 ↛ 2023line 2022 didn't jump to line 2023, because the condition on line 2022 was never true

2023 self.managed = False 

2024 self.active = vdiInfo.lvActive 

2025 self.readonly = vdiInfo.lvReadonly 

2026 self.parent = vdiInfo.parentUuid 

2027 self.path = os.path.join(self.sr.path, self.lvname) 

2028 if hasattr(self, "sm_config_override"): 2028 ↛ 2031line 2028 didn't jump to line 2031, because the condition on line 2028 was never false

2029 self.sm_config_override["vdi_type"] = self.vdi_type 

2030 else: 

2031 self.sm_config_override = {'vdi_type': self.vdi_type} 

2032 self.loaded = True 

2033 

2034 def _initFromLVInfo(self, lvInfo): 

2035 self.vdi_type = lvInfo.vdiType 

2036 self.lvname = lvInfo.name 

2037 self.size = lvInfo.size 

2038 self.utilisation = lvInfo.size 

2039 self.hidden = lvInfo.hidden 

2040 self.active = lvInfo.active 

2041 self.readonly = lvInfo.readonly 

2042 self.parent = '' 

2043 self.path = os.path.join(self.sr.path, self.lvname) 

2044 if hasattr(self, "sm_config_override"): 2044 ↛ 2047line 2044 didn't jump to line 2047, because the condition on line 2044 was never false

2045 self.sm_config_override["vdi_type"] = self.vdi_type 

2046 else: 

2047 self.sm_config_override = {'vdi_type': self.vdi_type} 

2048 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2048 ↛ 2049line 2048 didn't jump to line 2049, because the condition on line 2048 was never true

2049 self.loaded = True 

2050 

2051 def _initFromVHDInfo(self, vhdInfo): 

2052 self.size = vhdInfo.sizeVirt 

2053 self.parent = vhdInfo.parentUuid 

2054 self.hidden = vhdInfo.hidden 

2055 self.loaded = True 

2056 

2057 def _determineType(self): 

2058 """Determine whether this is a raw or a VHD VDI""" 

2059 if "vdi_ref" in self.sr.srcmd.params: 2059 ↛ 2072line 2059 didn't jump to line 2072, because the condition on line 2059 was never false

2060 vdi_ref = self.sr.srcmd.params["vdi_ref"] 

2061 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2062 if sm_config.get("vdi_type"): 2062 ↛ 2063line 2062 didn't jump to line 2063, because the condition on line 2062 was never true

2063 self.vdi_type = sm_config["vdi_type"] 

2064 prefix = lvhdutil.LV_PREFIX[self.vdi_type] 

2065 self.lvname = "%s%s" % (prefix, self.uuid) 

2066 self.path = os.path.join(self.sr.path, self.lvname) 

2067 self.sm_config_override = sm_config 

2068 return True 

2069 

2070 # LVM commands can be costly, so check the file directly first in case 

2071 # the LV is active 

2072 found = False 

2073 for t in lvhdutil.VDI_TYPES: 2073 ↛ 2074line 2073 didn't jump to line 2074, because the loop on line 2073 never started

2074 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid) 

2075 path = os.path.join(self.sr.path, lvname) 

2076 if util.pathexists(path): 

2077 if found: 

2078 raise xs_errors.XenError('VDILoad', 

2079 opterr="multiple VDI's: uuid %s" % self.uuid) 

2080 found = True 

2081 self.vdi_type = t 

2082 self.lvname = lvname 

2083 self.path = path 

2084 if found: 2084 ↛ 2085line 2084 didn't jump to line 2085, because the condition on line 2084 was never true

2085 return True 

2086 

2087 # now list all LV's 

2088 if not lvutil._checkVG(self.sr.vgname): 2088 ↛ 2090line 2088 didn't jump to line 2090, because the condition on line 2088 was never true

2089 # when doing attach_from_config, the VG won't be there yet 

2090 return False 

2091 

2092 lvs = lvhdutil.getLVInfo(self.sr.lvmCache) 

2093 if lvs.get(self.uuid): 

2094 self._initFromLVInfo(lvs[self.uuid]) 

2095 return True 

2096 return False 

2097 

2098 def _loadThis(self): 

2099 """Load VDI info for this VDI and activate the LV if it's VHD. We 

2100 don't do it in VDI.load() because not all VDI operations need it.""" 

2101 if self.loaded: 2101 ↛ 2102line 2101 didn't jump to line 2102, because the condition on line 2101 was never true

2102 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

2103 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2104 return 

2105 try: 

2106 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname) 

2107 except util.CommandException as e: 

2108 raise xs_errors.XenError('VDIUnavailable', 

2109 opterr='%s (LV scan error)' % os.strerror(abs(e.code))) 

2110 if not lvs.get(self.uuid): 2110 ↛ 2111line 2110 didn't jump to line 2111, because the condition on line 2110 was never true

2111 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') 

2112 self._initFromLVInfo(lvs[self.uuid]) 

2113 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2113 ↛ 2120line 2113 didn't jump to line 2120, because the condition on line 2113 was never false

2114 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2115 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False) 

2116 if not vhdInfo: 2116 ↛ 2117line 2116 didn't jump to line 2117, because the condition on line 2116 was never true

2117 raise xs_errors.XenError('VDIUnavailable', \ 

2118 opterr='getVHDInfo failed') 

2119 self._initFromVHDInfo(vhdInfo) 

2120 self.loaded = True 

2121 

2122 def _chainSetActive(self, active, binary, persistent=False): 

2123 if binary: 2123 ↛ 2124line 2123 didn't jump to line 2124, because the condition on line 2123 was never true

2124 (count, bcount) = RefCounter.checkLocked(self.uuid, 

2125 lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

2126 if (active and bcount > 0) or (not active and bcount == 0): 

2127 return # this is a redundant activation/deactivation call 

2128 

2129 vdiList = {self.uuid: self.lvname} 

2130 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2130 ↛ 2133line 2130 didn't jump to line 2133, because the condition on line 2130 was never false

2131 vdiList = vhdutil.getParentChain(self.lvname, 

2132 lvhdutil.extractUuid, self.sr.vgname) 

2133 for uuid, lvName in vdiList.items(): 2133 ↛ 2134line 2133 didn't jump to line 2134, because the loop on line 2133 never started

2134 binaryParam = binary 

2135 if uuid != self.uuid: 

2136 binaryParam = False # binary param only applies to leaf nodes 

2137 if active: 

2138 self.sr.lvActivator.activate(uuid, lvName, binaryParam, 

2139 persistent) 

2140 else: 

2141 # just add the LVs for deactivation in the final (cleanup) 

2142 # step. The LVs must not have been activated during the current 

2143 # operation 

2144 self.sr.lvActivator.add(uuid, lvName, binaryParam) 

2145 

2146 def _failClone(self, uuid, jval, msg): 

2147 try: 

2148 self.sr._handleInterruptedCloneOp(uuid, jval, True) 

2149 self.sr.journaler.remove(self.JRN_CLONE, uuid) 

2150 except Exception as e: 

2151 util.SMlog('WARNING: failed to clean up failed snapshot: ' \ 

2152 ' %s (error ignored)' % e) 

2153 raise xs_errors.XenError('VDIClone', opterr=msg) 

2154 

2155 def _markHidden(self): 

2156 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

2157 self.sr.lvmCache.setHidden(self.lvname) 

2158 else: 

2159 vhdutil.setHidden(self.path) 

2160 self.hidden = 1 

2161 

2162 def _prepareThin(self, attach): 

2163 origUtilisation = self.sr.lvmCache.getSize(self.lvname) 

2164 if self.sr.isMaster: 

2165 # the master can prepare the VDI locally 

2166 if attach: 

2167 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid) 

2168 else: 

2169 lvhdutil.detachThin(self.session, self.sr.lvmCache, 

2170 self.sr.uuid, self.uuid) 

2171 else: 

2172 fn = "attach" 

2173 if not attach: 

2174 fn = "detach" 

2175 pools = self.session.xenapi.pool.get_all() 

2176 master = self.session.xenapi.pool.get_master(pools[0]) 

2177 rv = self.session.xenapi.host.call_plugin( 

2178 master, self.sr.THIN_PLUGIN, fn, 

2179 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid}) 

2180 util.SMlog("call-plugin returned: %s" % rv) 

2181 if not rv: 

2182 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) 

2183 # refresh to pick up the size change on this slave 

2184 self.sr.lvmCache.activateNoRefcount(self.lvname, True) 

2185 

2186 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

2187 if origUtilisation != self.utilisation: 

2188 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2189 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

2190 str(self.utilisation)) 

2191 stats = lvutil._getVGstats(self.sr.vgname) 

2192 sr_utilisation = stats['physical_utilisation'] 

2193 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, 

2194 str(sr_utilisation)) 

2195 

2196 @override 

2197 def update(self, sr_uuid, vdi_uuid) -> None: 

2198 if self.sr.legacyMode: 

2199 return 

2200 

2201 #Synch the name_label of this VDI on storage with the name_label in XAPI 

2202 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

2203 update_map = {} 

2204 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

2205 METADATA_OBJECT_TYPE_VDI 

2206 update_map[UUID_TAG] = self.uuid 

2207 update_map[NAME_LABEL_TAG] = util.to_plain_string( \ 

2208 self.session.xenapi.VDI.get_name_label(vdi_ref)) 

2209 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \ 

2210 self.session.xenapi.VDI.get_name_description(vdi_ref)) 

2211 update_map[SNAPSHOT_TIME_TAG] = \ 

2212 self.session.xenapi.VDI.get_snapshot_time(vdi_ref) 

2213 update_map[METADATA_OF_POOL_TAG] = \ 

2214 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) 

2215 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map) 

2216 

2217 @override 

2218 def _ensure_cbt_space(self) -> None: 

2219 self.sr.ensureCBTSpace() 

2220 

2221 @override 

2222 def _create_cbt_log(self) -> str: 

2223 logname = self._get_cbt_logname(self.uuid) 

2224 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) 

2225 logpath = super(LVHDVDI, self)._create_cbt_log() 

2226 self.sr.lvmCache.deactivateNoRefcount(logname) 

2227 return logpath 

2228 

2229 @override 

2230 def _delete_cbt_log(self) -> None: 

2231 logpath = self._get_cbt_logpath(self.uuid) 

2232 if self._cbt_log_exists(logpath): 

2233 logname = self._get_cbt_logname(self.uuid) 

2234 self.sr.lvmCache.remove(logname) 

2235 

2236 @override 

2237 def _rename(self, oldpath, newpath) -> None: 

2238 oldname = os.path.basename(oldpath) 

2239 newname = os.path.basename(newpath) 

2240 self.sr.lvmCache.rename(oldname, newname) 

2241 

2242 @override 

2243 def _activate_cbt_log(self, lv_name) -> bool: 

2244 self.sr.lvmCache.refresh() 

2245 if not self.sr.lvmCache.is_active(lv_name): 2245 ↛ 2246line 2245 didn't jump to line 2246, because the condition on line 2245 was never true

2246 try: 

2247 self.sr.lvmCache.activateNoRefcount(lv_name) 

2248 return True 

2249 except Exception as e: 

2250 util.SMlog("Exception in _activate_cbt_log, " 

2251 "Error: %s." % str(e)) 

2252 raise 

2253 else: 

2254 return False 

2255 

2256 @override 

2257 def _deactivate_cbt_log(self, lv_name) -> None: 

2258 try: 

2259 self.sr.lvmCache.deactivateNoRefcount(lv_name) 

2260 except Exception as e: 

2261 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e)) 

2262 raise 

2263 

2264 @override 

2265 def _cbt_log_exists(self, logpath) -> bool: 

2266 return lvutil.exists(logpath) 

2267 

2268if __name__ == '__main__': 2268 ↛ 2269line 2268 didn't jump to line 2269, because the condition on line 2268 was never true

2269 SRCommand.run(LVHDSR, DRIVER_INFO) 

2270else: 

2271 SR.registerSR(LVHDSR)