Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# LVHDSR: VHD on LVM storage repository 

19# 

20 

21from sm_typing import Dict, List, override 

22 

23import SR 

24from SR import deviceCheck 

25import VDI 

26import SRCommand 

27import util 

28import lvutil 

29import lvmcache 

30import vhdutil 

31import lvhdutil 

32import scsiutil 

33import os 

34import sys 

35import time 

36import errno 

37import xs_errors 

38import cleanup 

39import blktap2 

40from journaler import Journaler 

41from lock import Lock 

42from refcounter import RefCounter 

43from ipc import IPCFlag 

44from lvmanager import LVActivator 

45import XenAPI # pylint: disable=import-error 

46import re 

47from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \ 

48 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ 

49 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ 

50 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ 

51 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG 

52from metadata import retrieveXMLfromFile, _parseXML 

53from xmlrpc.client import DateTime 

54import glob 

55from constants import CBTLOG_TAG 

56from fairlock import Fairlock 

57DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX) 

58 

59geneology: Dict[str, List[str]] = {} 

60CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM", 

61 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", 

62 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", 

63 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", 

64 "VDI_ACTIVATE", "VDI_DEACTIVATE"] 

65 

66CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']] 

67 

68DRIVER_INFO = { 

69 'name': 'Local VHD on LVM', 

70 'description': 'SR plugin which represents disks as VHD disks on ' + \ 

71 'Logical Volumes within a locally-attached Volume Group', 

72 'vendor': 'XenSource Inc', 

73 'copyright': '(C) 2008 XenSource Inc', 

74 'driver_version': '1.0', 

75 'required_api_version': '1.0', 

76 'capabilities': CAPABILITIES, 

77 'configuration': CONFIGURATION 

78 } 

79 

80PARAM_VHD = "vhd" 

81PARAM_RAW = "raw" 

82 

83OPS_EXCLUSIVE = [ 

84 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", 

85 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", 

86 "vdi_clone"] 

87 

88# Log if snapshot pauses VM for more than this many seconds 

89LONG_SNAPTIME = 60 

90 

91class LVHDSR(SR.SR): 

92 DRIVER_TYPE = 'lvhd' 

93 

94 PROVISIONING_TYPES = ["thin", "thick"] 

95 PROVISIONING_DEFAULT = "thick" 

96 THIN_PLUGIN = "lvhd-thin" 

97 

98 PLUGIN_ON_SLAVE = "on-slave" 

99 

100 FLAG_USE_VHD = "use_vhd" 

101 MDVOLUME_NAME = "MGT" 

102 

103 ALLOCATION_QUANTUM = "allocation_quantum" 

104 INITIAL_ALLOCATION = "initial_allocation" 

105 

106 LOCK_RETRY_INTERVAL = 3 

107 LOCK_RETRY_ATTEMPTS = 10 

108 

109 TEST_MODE_KEY = "testmode" 

110 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin" 

111 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator" 

112 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end" 

113 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin" 

114 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data" 

115 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata" 

116 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end" 

117 

118 ENV_VAR_VHD_TEST = { 

119 TEST_MODE_VHD_FAIL_REPARENT_BEGIN: 

120 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", 

121 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: 

122 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", 

123 TEST_MODE_VHD_FAIL_REPARENT_END: 

124 "VHD_UTIL_TEST_FAIL_REPARENT_END", 

125 TEST_MODE_VHD_FAIL_RESIZE_BEGIN: 

126 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", 

127 TEST_MODE_VHD_FAIL_RESIZE_DATA: 

128 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", 

129 TEST_MODE_VHD_FAIL_RESIZE_METADATA: 

130 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", 

131 TEST_MODE_VHD_FAIL_RESIZE_END: 

132 "VHD_UTIL_TEST_FAIL_RESIZE_END" 

133 } 

134 testMode = "" 

135 

136 legacyMode = True 

137 

138 @override 

139 @staticmethod 

140 def handles(type) -> bool: 

141 """Returns True if this SR class understands the given dconf string""" 

142 # we can pose as LVMSR or EXTSR for compatibility purposes 

143 if __name__ == '__main__': 

144 name = sys.argv[0] 

145 else: 

146 name = __name__ 

147 if name.endswith("LVMSR"): 

148 return type == "lvm" 

149 elif name.endswith("EXTSR"): 

150 return type == "ext" 

151 return type == LVHDSR.DRIVER_TYPE 

152 

153 @override 

154 def load(self, sr_uuid) -> None: 

155 self.ops_exclusive = OPS_EXCLUSIVE 

156 

157 self.isMaster = False 

158 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

159 self.isMaster = True 

160 

161 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

162 self.sr_vditype = SR.DEFAULT_TAP 

163 self.uuid = sr_uuid 

164 self.vgname = lvhdutil.VG_PREFIX + self.uuid 

165 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname) 

166 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME) 

167 self.provision = self.PROVISIONING_DEFAULT 

168 

169 self.other_conf = None 

170 has_sr_ref = self.srcmd.params.get("sr_ref") 

171 if has_sr_ref: 

172 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) 

173 

174 self.lvm_conf = None 

175 if self.other_conf: 

176 self.lvm_conf = self.other_conf.get('lvm-conf') 

177 

178 try: 

179 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf) 

180 except: 

181 raise xs_errors.XenError('SRUnavailable', \ 

182 opterr='Failed to initialise the LVMCache') 

183 self.lvActivator = LVActivator(self.uuid, self.lvmCache) 

184 self.journaler = Journaler(self.lvmCache) 

185 if not has_sr_ref: 

186 return # must be a probe call 

187 # Test for thick vs thin provisioning conf parameter 

188 if 'allocation' in self.dconf: 188 ↛ 189line 188 didn't jump to line 189, because the condition on line 188 was never true

189 if self.dconf['allocation'] in self.PROVISIONING_TYPES: 

190 self.provision = self.dconf['allocation'] 

191 else: 

192 raise xs_errors.XenError('InvalidArg', \ 

193 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES) 

194 

195 if self.other_conf.get(self.TEST_MODE_KEY): 195 ↛ 199line 195 didn't jump to line 199, because the condition on line 195 was never false

196 self.testMode = self.other_conf[self.TEST_MODE_KEY] 

197 self._prepareTestMode() 

198 

199 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

200 # sm_config flag overrides PBD, if any 

201 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: 

202 self.provision = self.sm_config.get('allocation') 

203 

204 if self.sm_config.get(self.FLAG_USE_VHD) == "true": 

205 self.legacyMode = False 

206 

207 if lvutil._checkVG(self.vgname): 

208 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 208 ↛ 211line 208 didn't jump to line 211, because the condition on line 208 was never false

209 "vdi_activate", "vdi_deactivate"]: 

210 self._undoAllJournals() 

211 if not self.cmd in ["sr_attach", "sr_probe"]: 

212 self._checkMetadataVolume() 

213 

214 self.mdexists = False 

215 

216 # get a VDI -> TYPE map from the storage 

217 contains_uuid_regex = \ 

218 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") 

219 self.storageVDIs = {} 

220 

221 for key in self.lvmCache.lvs.keys(): 221 ↛ 223line 221 didn't jump to line 223, because the loop on line 221 never started

222 # if the lvname has a uuid in it 

223 type = None 

224 if contains_uuid_regex.search(key) is not None: 

225 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]): 

226 type = vhdutil.VDI_TYPE_VHD 

227 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

228 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]): 

229 type = vhdutil.VDI_TYPE_RAW 

230 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

231 else: 

232 continue 

233 

234 if type is not None: 

235 self.storageVDIs[vdi] = type 

236 

237 # check if metadata volume exists 

238 try: 

239 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

240 except: 

241 pass 

242 

243 @override 

244 def cleanup(self) -> None: 

245 # we don't need to hold the lock to dec refcounts of activated LVs 

246 if not self.lvActivator.deactivateAll(): 246 ↛ 247line 246 didn't jump to line 247, because the condition on line 246 was never true

247 raise util.SMException("failed to deactivate LVs") 

248 

249 def updateSRMetadata(self, allocation): 

250 try: 

251 # Add SR specific SR metadata 

252 sr_info = \ 

253 {ALLOCATION_TAG: allocation, 

254 UUID_TAG: self.uuid, 

255 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)), 

256 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref)) 

257 } 

258 

259 vdi_info = {} 

260 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): 

261 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi) 

262 

263 # Create the VDI entry in the SR metadata 

264 vdi_info[vdi_uuid] = \ 

265 { 

266 UUID_TAG: vdi_uuid, 

267 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)), 

268 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)), 

269 IS_A_SNAPSHOT_TAG: \ 

270 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), 

271 SNAPSHOT_OF_TAG: \ 

272 self.session.xenapi.VDI.get_snapshot_of(vdi), 

273 SNAPSHOT_TIME_TAG: \ 

274 self.session.xenapi.VDI.get_snapshot_time(vdi), 

275 TYPE_TAG: \ 

276 self.session.xenapi.VDI.get_type(vdi), 

277 VDI_TYPE_TAG: \ 

278 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'], 

279 READ_ONLY_TAG: \ 

280 int(self.session.xenapi.VDI.get_read_only(vdi)), 

281 METADATA_OF_POOL_TAG: \ 

282 self.session.xenapi.VDI.get_metadata_of_pool(vdi), 

283 MANAGED_TAG: \ 

284 int(self.session.xenapi.VDI.get_managed(vdi)) 

285 } 

286 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info) 

287 

288 except Exception as e: 

289 raise xs_errors.XenError('MetadataError', \ 

290 opterr='Error upgrading SR Metadata: %s' % str(e)) 

291 

292 def syncMetadataAndStorage(self): 

293 try: 

294 # if a VDI is present in the metadata but not in the storage 

295 # then delete it from the metadata 

296 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

297 for vdi in list(vdi_info.keys()): 

298 update_map = {} 

299 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 299 ↛ 306line 299 didn't jump to line 306, because the condition on line 299 was never false

300 # delete this from metadata 

301 LVMMetadataHandler(self.mdpath). \ 

302 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) 

303 else: 

304 # search for this in the metadata, compare types 

305 # self.storageVDIs is a map of vdi_uuid to vdi_type 

306 if vdi_info[vdi][VDI_TYPE_TAG] != \ 

307 self.storageVDIs[vdi_info[vdi][UUID_TAG]]: 

308 # storage type takes authority 

309 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ 

310 = METADATA_OBJECT_TYPE_VDI 

311 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] 

312 update_map[VDI_TYPE_TAG] = \ 

313 self.storageVDIs[vdi_info[vdi][UUID_TAG]] 

314 LVMMetadataHandler(self.mdpath) \ 

315 .updateMetadata(update_map) 

316 else: 

317 # This should never happen 

318 pass 

319 

320 except Exception as e: 

321 raise xs_errors.XenError('MetadataError', \ 

322 opterr='Error synching SR Metadata and storage: %s' % str(e)) 

323 

324 def syncMetadataAndXapi(self): 

325 try: 

326 # get metadata 

327 (sr_info, vdi_info) = \ 

328 LVMMetadataHandler(self.mdpath, False).getMetadata() 

329 

330 # First synch SR parameters 

331 self.update(self.uuid) 

332 

333 # Now update the VDI information in the metadata if required 

334 for vdi_offset in vdi_info.keys(): 

335 try: 

336 vdi_ref = \ 

337 self.session.xenapi.VDI.get_by_uuid( \ 

338 vdi_info[vdi_offset][UUID_TAG]) 

339 except: 

340 # may be the VDI is not in XAPI yet dont bother 

341 continue 

342 

343 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref)) 

344 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref)) 

345 

346 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ 

347 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ 

348 new_name_description: 

349 update_map = {} 

350 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

351 METADATA_OBJECT_TYPE_VDI 

352 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] 

353 update_map[NAME_LABEL_TAG] = new_name_label 

354 update_map[NAME_DESCRIPTION_TAG] = new_name_description 

355 LVMMetadataHandler(self.mdpath) \ 

356 .updateMetadata(update_map) 

357 except Exception as e: 

358 raise xs_errors.XenError('MetadataError', \ 

359 opterr='Error synching SR Metadata and XAPI: %s' % str(e)) 

360 

361 def _checkMetadataVolume(self): 

362 util.SMlog("Entering _checkMetadataVolume") 

363 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

364 if self.isMaster: 364 ↛ 380line 364 didn't jump to line 380, because the condition on line 364 was never false

365 if self.mdexists and self.cmd == "sr_attach": 

366 try: 

367 # activate the management volume 

368 # will be deactivated at detach time 

369 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

370 self._synchSmConfigWithMetaData() 

371 util.SMlog("Sync SR metadata and the state on the storage.") 

372 self.syncMetadataAndStorage() 

373 self.syncMetadataAndXapi() 

374 except Exception as e: 

375 util.SMlog("Exception in _checkMetadataVolume, " \ 

376 "Error: %s." % str(e)) 

377 elif not self.mdexists and not self.legacyMode: 377 ↛ 380line 377 didn't jump to line 380, because the condition on line 377 was never false

378 self._introduceMetaDataVolume() 

379 

380 if self.mdexists: 

381 self.legacyMode = False 

382 

383 def _synchSmConfigWithMetaData(self): 

384 util.SMlog("Synching sm-config with metadata volume") 

385 

386 try: 

387 # get SR info from metadata 

388 sr_info = {} 

389 map = {} 

390 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0] 

391 

392 if sr_info == {}: 392 ↛ 393line 392 didn't jump to line 393, because the condition on line 392 was never true

393 raise Exception("Failed to get SR information from metadata.") 

394 

395 if "allocation" in sr_info: 395 ↛ 399line 395 didn't jump to line 399, because the condition on line 395 was never false

396 self.provision = sr_info.get("allocation") 

397 map['allocation'] = sr_info.get("allocation") 

398 else: 

399 raise Exception("Allocation key not found in SR metadata. " 

400 "SR info found: %s" % sr_info) 

401 

402 except Exception as e: 

403 raise xs_errors.XenError( 

404 'MetadataError', 

405 opterr='Error reading SR params from ' 

406 'metadata Volume: %s' % str(e)) 

407 try: 

408 map[self.FLAG_USE_VHD] = 'true' 

409 self.session.xenapi.SR.set_sm_config(self.sr_ref, map) 

410 except: 

411 raise xs_errors.XenError( 

412 'MetadataError', 

413 opterr='Error updating sm_config key') 

414 

415 def _introduceMetaDataVolume(self): 

416 util.SMlog("Creating Metadata volume") 

417 try: 

418 config = {} 

419 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024) 

420 

421 # activate the management volume, will be deactivated at detach time 

422 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

423 

424 name_label = util.to_plain_string( \ 

425 self.session.xenapi.SR.get_name_label(self.sr_ref)) 

426 name_description = util.to_plain_string( \ 

427 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

428 config[self.FLAG_USE_VHD] = "true" 

429 config['allocation'] = self.provision 

430 self.session.xenapi.SR.set_sm_config(self.sr_ref, config) 

431 

432 # Add the SR metadata 

433 self.updateSRMetadata(self.provision) 

434 except Exception as e: 

435 raise xs_errors.XenError('MetadataError', \ 

436 opterr='Error introducing Metadata Volume: %s' % str(e)) 

437 

438 def _removeMetadataVolume(self): 

439 if self.mdexists: 

440 try: 

441 self.lvmCache.remove(self.MDVOLUME_NAME) 

442 except: 

443 raise xs_errors.XenError('MetadataError', \ 

444 opterr='Failed to delete MGT Volume') 

445 

446 def _refresh_size(self): 

447 """ 

448 Refreshs the size of the backing device. 

449 Return true if all paths/devices agree on the same size. 

450 """ 

451 if hasattr(self, 'SCSIid'): 451 ↛ 453line 451 didn't jump to line 453, because the condition on line 451 was never true

452 # LVHDoHBASR, LVHDoISCSISR 

453 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) 

454 else: 

455 # LVHDSR 

456 devices = self.dconf['device'].split(',') 

457 scsiutil.refreshdev(devices) 

458 return True 

459 

460 def _expand_size(self): 

461 """ 

462 Expands the size of the SR by growing into additional availiable 

463 space, if extra space is availiable on the backing device. 

464 Needs to be called after a successful call of _refresh_size. 

465 """ 

466 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] 

467 # We are comparing PV- with VG-sizes that are aligned. Need a threshold 

468 resizethreshold = 100 * 1024 * 1024 # 100MB 

469 devices = self.dconf['device'].split(',') 

470 totaldevicesize = 0 

471 for device in devices: 

472 totaldevicesize = totaldevicesize + scsiutil.getsize(device) 

473 if totaldevicesize >= (currentvgsize + resizethreshold): 

474 try: 

475 if hasattr(self, 'SCSIid'): 475 ↛ 477line 475 didn't jump to line 477, because the condition on line 475 was never true

476 # LVHDoHBASR, LVHDoISCSISR might have slaves 

477 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, 

478 getattr(self, 'SCSIid')) 

479 util.SMlog("LVHDSR._expand_size for %s will resize the pv." % 

480 self.uuid) 

481 for pv in lvutil.get_pv_for_vg(self.vgname): 

482 lvutil.resizePV(pv) 

483 except: 

484 util.logException("LVHDSR._expand_size for %s failed to resize" 

485 " the PV" % self.uuid) 

486 

487 @override 

488 @deviceCheck 

489 def create(self, uuid, size) -> None: 

490 util.SMlog("LVHDSR.create for %s" % self.uuid) 

491 if not self.isMaster: 

492 util.SMlog('sr_create blocked for non-master') 

493 raise xs_errors.XenError('LVMMaster') 

494 

495 if lvutil._checkVG(self.vgname): 

496 raise xs_errors.XenError('SRExists') 

497 

498 # Check none of the devices already in use by other PBDs 

499 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']): 

500 raise xs_errors.XenError('SRInUse') 

501 

502 # Check serial number entry in SR records 

503 for dev in self.dconf['device'].split(','): 

504 if util.test_scsiserial(self.session, dev): 

505 raise xs_errors.XenError('SRInUse') 

506 

507 lvutil.createVG(self.dconf['device'], self.vgname) 

508 

509 #Update serial number string 

510 scsiutil.add_serial_record(self.session, self.sr_ref, \ 

511 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

512 

513 # since this is an SR.create turn off legacy mode 

514 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ 

515 self.FLAG_USE_VHD, 'true') 

516 

517 @override 

518 def delete(self, uuid) -> None: 

519 util.SMlog("LVHDSR.delete for %s" % self.uuid) 

520 if not self.isMaster: 

521 raise xs_errors.XenError('LVMMaster') 

522 cleanup.gc_force(self.session, self.uuid) 

523 

524 success = True 

525 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

526 if util.extractSRFromDevMapper(fileName) != self.uuid: 

527 continue 

528 

529 if util.doesFileHaveOpenHandles(fileName): 

530 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \ 

531 "handles" % fileName) 

532 success = False 

533 continue 

534 

535 # Now attempt to remove the dev mapper entry 

536 if not lvutil.removeDevMapperEntry(fileName, False): 

537 success = False 

538 continue 

539 

540 try: 

541 lvname = os.path.basename(fileName.replace('-', '/'). \ 

542 replace('//', '-')) 

543 lpath = os.path.join(self.path, lvname) 

544 os.unlink(lpath) 

545 except OSError as e: 

546 if e.errno != errno.ENOENT: 

547 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \ 

548 "file %s. Error: %s" % (fileName, str(e))) 

549 success = False 

550 

551 if success: 

552 try: 

553 if util.pathexists(self.path): 

554 os.rmdir(self.path) 

555 except Exception as e: 

556 util.SMlog("LVHDSR.delete: failed to remove the symlink " \ 

557 "directory %s. Error: %s" % (self.path, str(e))) 

558 success = False 

559 

560 self._removeMetadataVolume() 

561 self.lvmCache.refresh() 

562 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0: 

563 raise xs_errors.XenError('SRNotEmpty') 

564 

565 if not success: 

566 raise Exception("LVHDSR delete failed, please refer to the log " \ 

567 "for details.") 

568 

569 lvutil.removeVG(self.dconf['device'], self.vgname) 

570 self._cleanup() 

571 

572 @override 

573 def attach(self, uuid) -> None: 

574 util.SMlog("LVHDSR.attach for %s" % self.uuid) 

575 

576 self._cleanup(True) # in case of host crashes, if detach wasn't called 

577 

578 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 578 ↛ 579line 578 didn't jump to line 579, because the condition on line 578 was never true

579 raise xs_errors.XenError('SRUnavailable', \ 

580 opterr='no such volume group: %s' % self.vgname) 

581 

582 # Refresh the metadata status 

583 self._checkMetadataVolume() 

584 

585 refreshsizeok = self._refresh_size() 

586 

587 if self.isMaster: 587 ↛ 598line 587 didn't jump to line 598, because the condition on line 587 was never false

588 if refreshsizeok: 588 ↛ 592line 588 didn't jump to line 592, because the condition on line 588 was never false

589 self._expand_size() 

590 

591 # Update SCSIid string 

592 util.SMlog("Calling devlist_to_serial") 

593 scsiutil.add_serial_record( 

594 self.session, self.sr_ref, 

595 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

596 

597 # Test Legacy Mode Flag and update if VHD volumes exist 

598 if self.isMaster and self.legacyMode: 598 ↛ 599line 598 didn't jump to line 599, because the condition on line 598 was never true

599 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

600 for uuid, info in vdiInfo.items(): 

601 if info.vdiType == vhdutil.VDI_TYPE_VHD: 

602 self.legacyMode = False 

603 map = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

604 self._introduceMetaDataVolume() 

605 break 

606 

607 # Set the block scheduler 

608 for dev in self.dconf['device'].split(','): 

609 self.block_setscheduler(dev) 

610 

611 @override 

612 def detach(self, uuid) -> None: 

613 util.SMlog("LVHDSR.detach for %s" % self.uuid) 

614 cleanup.abort(self.uuid) 

615 

616 # Do a best effort cleanup of the dev mapper entries 

617 # go through all devmapper entries for this VG 

618 success = True 

619 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

620 if util.extractSRFromDevMapper(fileName) != self.uuid: 620 ↛ 621line 620 didn't jump to line 621, because the condition on line 620 was never true

621 continue 

622 

623 with Fairlock('devicemapper'): 

624 # check if any file has open handles 

625 if util.doesFileHaveOpenHandles(fileName): 

626 # if yes, log this and signal failure 

627 util.SMlog( 

628 f"LVHDSR.detach: The dev mapper entry {fileName} has " 

629 "open handles") 

630 success = False 

631 continue 

632 

633 # Now attempt to remove the dev mapper entry 

634 if not lvutil.removeDevMapperEntry(fileName, False): 634 ↛ 635line 634 didn't jump to line 635, because the condition on line 634 was never true

635 success = False 

636 continue 

637 

638 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/* 

639 try: 

640 lvname = os.path.basename(fileName.replace('-', '/'). \ 

641 replace('//', '-')) 

642 lvname = os.path.join(self.path, lvname) 

643 util.force_unlink(lvname) 

644 except Exception as e: 

645 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \ 

646 "file %s. Error: %s" % (fileName, str(e))) 

647 success = False 

648 

649 # now remove the directory where the symlinks are 

650 # this should pass as the directory should be empty by now 

651 if success: 

652 try: 

653 if util.pathexists(self.path): 653 ↛ 654line 653 didn't jump to line 654, because the condition on line 653 was never true

654 os.rmdir(self.path) 

655 except Exception as e: 

656 util.SMlog("LVHDSR.detach: failed to remove the symlink " \ 

657 "directory %s. Error: %s" % (self.path, str(e))) 

658 success = False 

659 

660 if not success: 

661 raise Exception("SR detach failed, please refer to the log " \ 

662 "for details.") 

663 

664 # Don't delete lock files on the master as it will break the locking 

665 # between SM and any GC thread that survives through SR.detach. 

666 # However, we should still delete lock files on slaves as it is the 

667 # only place to do so. 

668 self._cleanup(self.isMaster) 

669 

670 @override 

671 def forget_vdi(self, uuid) -> None: 

672 if not self.legacyMode: 

673 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) 

674 super(LVHDSR, self).forget_vdi(uuid) 

675 

676 @override 

677 def scan(self, uuid) -> None: 

678 activated = True 

679 try: 

680 lvname = '' 

681 util.SMlog("LVHDSR.scan for %s" % self.uuid) 

682 if not self.isMaster: 682 ↛ 683line 682 didn't jump to line 683, because the condition on line 682 was never true

683 util.SMlog('sr_scan blocked for non-master') 

684 raise xs_errors.XenError('LVMMaster') 

685 

686 if self._refresh_size(): 686 ↛ 688line 686 didn't jump to line 688, because the condition on line 686 was never false

687 self._expand_size() 

688 self.lvmCache.refresh() 

689 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) 

690 self._loadvdis() 

691 stats = lvutil._getVGstats(self.vgname) 

692 self.physical_size = stats['physical_size'] 

693 self.physical_utilisation = stats['physical_utilisation'] 

694 

695 # Now check if there are any VDIs in the metadata, which are not in 

696 # XAPI 

697 if self.mdexists: 697 ↛ 807line 697 didn't jump to line 807, because the condition on line 697 was never false

698 vdiToSnaps: Dict[str, List[str]] = {} 

699 # get VDIs from XAPI 

700 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) 

701 vdi_uuids = set([]) 

702 for vdi in vdis: 

703 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi)) 

704 

705 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

706 

707 for vdi in list(info.keys()): 

708 vdi_uuid = info[vdi][UUID_TAG] 

709 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 709 ↛ 710line 709 didn't jump to line 710, because the condition on line 709 was never true

710 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps: 

711 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) 

712 else: 

713 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid] 

714 

715 if vdi_uuid not in vdi_uuids: 715 ↛ 716line 715 didn't jump to line 716, because the condition on line 715 was never true

716 util.SMlog("Introduce VDI %s as it is present in " \ 

717 "metadata and not in XAPI." % vdi_uuid) 

718 sm_config = {} 

719 sm_config['vdi_type'] = info[vdi][VDI_TYPE_TAG] 

720 lvname = "%s%s" % \ 

721 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid) 

722 self.lvmCache.activateNoRefcount(lvname) 

723 activated = True 

724 lvPath = os.path.join(self.path, lvname) 

725 

726 if info[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW: 

727 size = self.lvmCache.getSize( \ 

728 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \ 

729 vdi_uuid) 

730 utilisation = \ 

731 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

732 int(size)) 

733 else: 

734 parent = \ 

735 vhdutil._getVHDParentNoCheck(lvPath) 

736 

737 if parent is not None: 

738 sm_config['vhd-parent'] = parent[len( \ 

739 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):] 

740 size = vhdutil.getSizeVirt(lvPath) 

741 if self.provision == "thin": 

742 utilisation = \ 

743 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

744 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

745 else: 

746 utilisation = lvhdutil.calcSizeVHDLV(int(size)) 

747 

748 vdi_ref = self.session.xenapi.VDI.db_introduce( 

749 vdi_uuid, 

750 info[vdi][NAME_LABEL_TAG], 

751 info[vdi][NAME_DESCRIPTION_TAG], 

752 self.sr_ref, 

753 info[vdi][TYPE_TAG], 

754 False, 

755 bool(int(info[vdi][READ_ONLY_TAG])), 

756 {}, 

757 vdi_uuid, 

758 {}, 

759 sm_config) 

760 

761 self.session.xenapi.VDI.set_managed(vdi_ref, 

762 bool(int(info[vdi][MANAGED_TAG]))) 

763 self.session.xenapi.VDI.set_virtual_size(vdi_ref, 

764 str(size)) 

765 self.session.xenapi.VDI.set_physical_utilisation( \ 

766 vdi_ref, str(utilisation)) 

767 self.session.xenapi.VDI.set_is_a_snapshot( \ 

768 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG]))) 

769 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 

770 self.session.xenapi.VDI.set_snapshot_time( \ 

771 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG])) 

772 if info[vdi][TYPE_TAG] == 'metadata': 

773 self.session.xenapi.VDI.set_metadata_of_pool( \ 

774 vdi_ref, info[vdi][METADATA_OF_POOL_TAG]) 

775 

776 # Update CBT status of disks either just added 

777 # or already in XAPI 

778 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) 

779 if cbt_logname in cbt_vdis: 779 ↛ 780line 779 didn't jump to line 780, because the condition on line 779 was never true

780 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

781 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

782 # For existing VDIs, update local state too 

783 # Scan in base class SR updates existing VDIs 

784 # again based on local states 

785 if vdi_uuid in self.vdis: 

786 self.vdis[vdi_uuid].cbt_enabled = True 

787 cbt_vdis.remove(cbt_logname) 

788 

789 # Now set the snapshot statuses correctly in XAPI 

790 for srcvdi in vdiToSnaps.keys(): 790 ↛ 791line 790 didn't jump to line 791, because the loop on line 790 never started

791 try: 

792 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) 

793 except: 

794 # the source VDI no longer exists, continue 

795 continue 

796 

797 for snapvdi in vdiToSnaps[srcvdi]: 

798 try: 

799 # this might fail in cases where its already set 

800 snapref = \ 

801 self.session.xenapi.VDI.get_by_uuid(snapvdi) 

802 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) 

803 except Exception as e: 

804 util.SMlog("Setting snapshot failed. " \ 

805 "Error: %s" % str(e)) 

806 

807 if cbt_vdis: 807 ↛ 818line 807 didn't jump to line 818, because the condition on line 807 was never false

808 # If we have items remaining in this list, 

809 # they are cbt_metadata VDI that XAPI doesn't know about 

810 # Add them to self.vdis and they'll get added to the DB 

811 for cbt_vdi in cbt_vdis: 811 ↛ 812line 811 didn't jump to line 812, because the loop on line 811 never started

812 cbt_uuid = cbt_vdi.split(".")[0] 

813 new_vdi = self.vdi(cbt_uuid) 

814 new_vdi.ty = "cbt_metadata" 

815 new_vdi.cbt_enabled = True 

816 self.vdis[cbt_uuid] = new_vdi 

817 

818 super(LVHDSR, self).scan(uuid) 

819 self._kickGC() 

820 

821 finally: 

822 if lvname != '' and activated: 822 ↛ 823line 822 didn't jump to line 823, because the condition on line 822 was never true

823 self.lvmCache.deactivateNoRefcount(lvname) 

824 

825 @override 

826 def update(self, uuid) -> None: 

827 if not lvutil._checkVG(self.vgname): 827 ↛ 828line 827 didn't jump to line 828, because the condition on line 827 was never true

828 return 

829 self._updateStats(uuid, 0) 

830 

831 if self.legacyMode: 831 ↛ 832line 831 didn't jump to line 832, because the condition on line 831 was never true

832 return 

833 

834 # synch name_label in metadata with XAPI 

835 update_map = {} 

836 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ 

837 METADATA_OBJECT_TYPE_SR, 

838 NAME_LABEL_TAG: util.to_plain_string( \ 

839 self.session.xenapi.SR.get_name_label(self.sr_ref)), 

840 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

841 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

842 } 

843 LVMMetadataHandler(self.mdpath).updateMetadata(update_map) 

844 

845 def _updateStats(self, uuid, virtAllocDelta): 

846 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

847 self.virtual_allocation = valloc + virtAllocDelta 

848 util.SMlog("Setting virtual_allocation of SR %s to %d" % 

849 (uuid, self.virtual_allocation)) 

850 stats = lvutil._getVGstats(self.vgname) 

851 self.physical_size = stats['physical_size'] 

852 self.physical_utilisation = stats['physical_utilisation'] 

853 self._db_update() 

854 

855 @override 

856 @deviceCheck 

857 def probe(self) -> str: 

858 return lvutil.srlist_toxml( 

859 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']), 

860 lvhdutil.VG_PREFIX, 

861 ('metadata' in self.srcmd.params['sr_sm_config'] and \ 

862 self.srcmd.params['sr_sm_config']['metadata'] == 'true')) 

863 

864 @override 

865 def vdi(self, uuid) -> VDI.VDI: 

866 return LVHDVDI(self, uuid) 

867 

868 def _loadvdis(self): 

869 self.virtual_allocation = 0 

870 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

871 self.allVDIs = {} 

872 

873 for uuid, info in self.vdiInfo.items(): 

874 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 874 ↛ 875line 874 didn't jump to line 875, because the condition on line 874 was never true

875 continue 

876 if info.scanError: 876 ↛ 877line 876 didn't jump to line 877, because the condition on line 876 was never true

877 raise xs_errors.XenError('VDIUnavailable', \ 

878 opterr='Error scanning VDI %s' % uuid) 

879 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid) 

880 if not self.vdis[uuid].hidden: 880 ↛ 873line 880 didn't jump to line 873, because the condition on line 880 was never false

881 self.virtual_allocation += self.vdis[uuid].utilisation 

882 

883 for uuid, vdi in self.vdis.items(): 

884 if vdi.parent: 884 ↛ 885line 884 didn't jump to line 885, because the condition on line 884 was never true

885 if vdi.parent in self.vdis: 

886 self.vdis[vdi.parent].read_only = True 

887 if vdi.parent in geneology: 

888 geneology[vdi.parent].append(uuid) 

889 else: 

890 geneology[vdi.parent] = [uuid] 

891 

892 # Now remove all hidden leaf nodes to avoid introducing records that 

893 # will be GC'ed 

894 for uuid in list(self.vdis.keys()): 

895 if uuid not in geneology and self.vdis[uuid].hidden: 895 ↛ 896line 895 didn't jump to line 896, because the condition on line 895 was never true

896 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

897 del self.vdis[uuid] 

898 

899 def _ensureSpaceAvailable(self, amount_needed): 

900 space_available = lvutil._getVGstats(self.vgname)['freespace'] 

901 if (space_available < amount_needed): 

902 util.SMlog("Not enough space! free space: %d, need: %d" % \ 

903 (space_available, amount_needed)) 

904 raise xs_errors.XenError('SRNoSpace') 

905 

906 def _handleInterruptedCloneOps(self): 

907 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE) 

908 for uuid, val in entries.items(): 908 ↛ 909line 908 didn't jump to line 909, because the loop on line 908 never started

909 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid) 

910 self._handleInterruptedCloneOp(uuid, val) 

911 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid) 

912 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid) 

913 

914 def _handleInterruptedCoalesceLeaf(self): 

915 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) 

916 if len(entries) > 0: 916 ↛ 917line 916 didn't jump to line 917, because the condition on line 916 was never true

917 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") 

918 cleanup.gc_force(self.session, self.uuid) 

919 self.lvmCache.refresh() 

920 

921 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False): 

922 """Either roll back or finalize the interrupted snapshot/clone 

923 operation. Rolling back is unsafe if the leaf VHDs have already been 

924 in use and written to. However, it is always safe to roll back while 

925 we're still in the context of the failed snapshot operation since the 

926 VBD is paused for the duration of the operation""" 

927 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) 

928 lvs = lvhdutil.getLVInfo(self.lvmCache) 

929 baseUuid, clonUuid = jval.split("_") 

930 

931 # is there a "base copy" VDI? 

932 if not lvs.get(baseUuid): 

933 # no base copy: make sure the original is there 

934 if lvs.get(origUuid): 

935 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") 

936 return 

937 raise util.SMException("base copy %s not present, " \ 

938 "but no original %s found" % (baseUuid, origUuid)) 

939 

940 if forceUndo: 

941 util.SMlog("Explicit revert") 

942 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

943 return 

944 

945 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): 

946 util.SMlog("One or both leaves missing => revert") 

947 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

948 return 

949 

950 vdis = lvhdutil.getVDIInfo(self.lvmCache) 

951 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): 

952 util.SMlog("One or both leaves invalid => revert") 

953 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

954 return 

955 

956 orig = vdis[origUuid] 

957 base = vdis[baseUuid] 

958 self.lvActivator.activate(baseUuid, base.lvName, False) 

959 self.lvActivator.activate(origUuid, orig.lvName, False) 

960 if orig.parentUuid != baseUuid: 

961 parent = vdis[orig.parentUuid] 

962 self.lvActivator.activate(parent.uuid, parent.lvName, False) 

963 origPath = os.path.join(self.path, orig.lvName) 

964 if not vhdutil.check(origPath): 

965 util.SMlog("Orig VHD invalid => revert") 

966 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

967 return 

968 

969 if clonUuid: 

970 clon = vdis[clonUuid] 

971 clonPath = os.path.join(self.path, clon.lvName) 

972 self.lvActivator.activate(clonUuid, clon.lvName, False) 

973 if not vhdutil.check(clonPath): 

974 util.SMlog("Clon VHD invalid => revert") 

975 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

976 return 

977 

978 util.SMlog("Snapshot appears valid, will not roll back") 

979 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid) 

980 

981 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid): 

982 base = lvs[baseUuid] 

983 basePath = os.path.join(self.path, base.name) 

984 

985 # make the parent RW 

986 if base.readonly: 

987 self.lvmCache.setReadonly(base.name, False) 

988 

989 ns = lvhdutil.NS_PREFIX_LVM + self.uuid 

990 origRefcountBinary = RefCounter.check(origUuid, ns)[1] 

991 origRefcountNormal = 0 

992 

993 # un-hide the parent 

994 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

995 self.lvActivator.activate(baseUuid, base.name, False) 

996 origRefcountNormal = 1 

997 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False) 

998 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden: 

999 vhdutil.setHidden(basePath, False) 

1000 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden: 

1001 self.lvmCache.setHidden(base.name, False) 

1002 

1003 # remove the child nodes 

1004 if clonUuid and lvs.get(clonUuid): 

1005 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD: 

1006 raise util.SMException("clone %s not VHD" % clonUuid) 

1007 self.lvmCache.remove(lvs[clonUuid].name) 

1008 if self.lvActivator.get(clonUuid, False): 

1009 self.lvActivator.remove(clonUuid, False) 

1010 if lvs.get(origUuid): 

1011 self.lvmCache.remove(lvs[origUuid].name) 

1012 

1013 # inflate the parent to fully-allocated size 

1014 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

1015 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1016 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize) 

1017 

1018 # rename back 

1019 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid 

1020 self.lvmCache.rename(base.name, origLV) 

1021 RefCounter.reset(baseUuid, ns) 

1022 if self.lvActivator.get(baseUuid, False): 

1023 self.lvActivator.replace(baseUuid, origUuid, origLV, False) 

1024 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns) 

1025 

1026 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1027 # flag to facilitate vm deactivate 

1028 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1029 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused') 

1030 

1031 # update LVM metadata on slaves 

1032 slaves = util.get_slaves_attached_on(self.session, [origUuid]) 

1033 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname, 

1034 origLV, origUuid, slaves) 

1035 

1036 util.SMlog("*** INTERRUPTED CLONE OP: rollback success") 

1037 

1038 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid): 

1039 """Finalize the interrupted snapshot/clone operation. This must not be 

1040 called from the live snapshot op context because we attempt to pause/ 

1041 unpause the VBD here (the VBD is already paused during snapshot, so it 

1042 would cause a deadlock)""" 

1043 base = vdis[baseUuid] 

1044 clon = None 

1045 if clonUuid: 

1046 clon = vdis[clonUuid] 

1047 

1048 cleanup.abort(self.uuid) 

1049 

1050 # make sure the parent is hidden and read-only 

1051 if not base.hidden: 

1052 if base.vdiType == vhdutil.VDI_TYPE_RAW: 

1053 self.lvmCache.setHidden(base.lvName) 

1054 else: 

1055 basePath = os.path.join(self.path, base.lvName) 

1056 vhdutil.setHidden(basePath) 

1057 if not base.lvReadonly: 

1058 self.lvmCache.setReadonly(base.lvName, True) 

1059 

1060 # NB: since this snapshot-preserving call is only invoked outside the 

1061 # snapshot op context, we assume the LVM metadata on the involved slave 

1062 # has by now been refreshed and do not attempt to do it here 

1063 

1064 # Update the original record 

1065 try: 

1066 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1067 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1068 type = self.session.xenapi.VDI.get_type(vdi_ref) 

1069 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1070 sm_config['vhd-parent'] = baseUuid 

1071 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

1072 except XenAPI.Failure: 

1073 util.SMlog("ERROR updating the orig record") 

1074 

1075 # introduce the new VDI records 

1076 if clonUuid: 

1077 try: 

1078 clon_vdi = VDI.VDI(self, clonUuid) 

1079 clon_vdi.read_only = False 

1080 clon_vdi.location = clonUuid 

1081 clon_vdi.utilisation = clon.sizeLV 

1082 clon_vdi.sm_config = { 

1083 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1084 "vhd-parent": baseUuid} 

1085 

1086 if not self.legacyMode: 

1087 LVMMetadataHandler(self.mdpath). \ 

1088 ensureSpaceIsAvailableForVdis(1) 

1089 

1090 clon_vdi_ref = clon_vdi._db_introduce() 

1091 util.SMlog("introduced clon VDI: %s (%s)" % \ 

1092 (clon_vdi_ref, clonUuid)) 

1093 

1094 vdi_info = {UUID_TAG: clonUuid, 

1095 NAME_LABEL_TAG: clon_vdi.label, 

1096 NAME_DESCRIPTION_TAG: clon_vdi.description, 

1097 IS_A_SNAPSHOT_TAG: 0, 

1098 SNAPSHOT_OF_TAG: '', 

1099 SNAPSHOT_TIME_TAG: '', 

1100 TYPE_TAG: type, 

1101 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], 

1102 READ_ONLY_TAG: int(clon_vdi.read_only), 

1103 MANAGED_TAG: int(clon_vdi.managed), 

1104 METADATA_OF_POOL_TAG: '' 

1105 } 

1106 

1107 if not self.legacyMode: 

1108 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1109 

1110 except XenAPI.Failure: 

1111 util.SMlog("ERROR introducing the clon record") 

1112 

1113 try: 

1114 base_vdi = VDI.VDI(self, baseUuid) # readonly parent 

1115 base_vdi.label = "base copy" 

1116 base_vdi.read_only = True 

1117 base_vdi.location = baseUuid 

1118 base_vdi.size = base.sizeVirt 

1119 base_vdi.utilisation = base.sizeLV 

1120 base_vdi.managed = False 

1121 base_vdi.sm_config = { 

1122 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1123 "vhd-parent": baseUuid} 

1124 

1125 if not self.legacyMode: 

1126 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1) 

1127 

1128 base_vdi_ref = base_vdi._db_introduce() 

1129 util.SMlog("introduced base VDI: %s (%s)" % \ 

1130 (base_vdi_ref, baseUuid)) 

1131 

1132 vdi_info = {UUID_TAG: baseUuid, 

1133 NAME_LABEL_TAG: base_vdi.label, 

1134 NAME_DESCRIPTION_TAG: base_vdi.description, 

1135 IS_A_SNAPSHOT_TAG: 0, 

1136 SNAPSHOT_OF_TAG: '', 

1137 SNAPSHOT_TIME_TAG: '', 

1138 TYPE_TAG: type, 

1139 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], 

1140 READ_ONLY_TAG: int(base_vdi.read_only), 

1141 MANAGED_TAG: int(base_vdi.managed), 

1142 METADATA_OF_POOL_TAG: '' 

1143 } 

1144 

1145 if not self.legacyMode: 

1146 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1147 except XenAPI.Failure: 

1148 util.SMlog("ERROR introducing the base record") 

1149 

1150 util.SMlog("*** INTERRUPTED CLONE OP: complete") 

1151 

1152 def _undoAllJournals(self): 

1153 """Undo all VHD & SM interrupted journaled operations. This call must 

1154 be serialized with respect to all operations that create journals""" 

1155 # undoing interrupted inflates must be done first, since undoing VHD 

1156 # ops might require inflations 

1157 self.lock.acquire() 

1158 try: 

1159 self._undoAllInflateJournals() 

1160 self._undoAllVHDJournals() 

1161 self._handleInterruptedCloneOps() 

1162 self._handleInterruptedCoalesceLeaf() 

1163 finally: 

1164 self.lock.release() 

1165 self.cleanup() 

1166 

1167 def _undoAllInflateJournals(self): 

1168 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE) 

1169 if len(entries) == 0: 

1170 return 

1171 self._loadvdis() 

1172 for uuid, val in entries.items(): 

1173 vdi = self.vdis.get(uuid) 

1174 if vdi: 1174 ↛ 1189line 1174 didn't jump to line 1189, because the condition on line 1174 was never false

1175 util.SMlog("Found inflate journal %s, deflating %s to %s" % \ 

1176 (uuid, vdi.path, val)) 

1177 if vdi.readonly: 1177 ↛ 1178line 1177 didn't jump to line 1178, because the condition on line 1177 was never true

1178 self.lvmCache.setReadonly(vdi.lvname, False) 

1179 self.lvActivator.activate(uuid, vdi.lvname, False) 

1180 currSizeLV = self.lvmCache.getSize(vdi.lvname) 

1181 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE, 

1182 vhdutil.VHD_FOOTER_SIZE) 

1183 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val)) 

1184 if vdi.readonly: 1184 ↛ 1185line 1184 didn't jump to line 1185, because the condition on line 1184 was never true

1185 self.lvmCache.setReadonly(vdi.lvname, True) 

1186 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1186 ↛ 1187line 1186 didn't jump to line 1187, because the condition on line 1186 was never true

1187 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1188 self.vgname, vdi.lvname, uuid) 

1189 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid) 

1190 delattr(self, "vdiInfo") 

1191 delattr(self, "allVDIs") 

1192 

1193 def _undoAllVHDJournals(self): 

1194 """check if there are VHD journals in existence and revert them""" 

1195 journals = lvhdutil.getAllVHDJournals(self.lvmCache) 

1196 if len(journals) == 0: 1196 ↛ 1198line 1196 didn't jump to line 1198, because the condition on line 1196 was never false

1197 return 

1198 self._loadvdis() 

1199 for uuid, jlvName in journals: 

1200 vdi = self.vdis[uuid] 

1201 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path)) 

1202 self.lvActivator.activate(uuid, vdi.lvname, False) 

1203 self.lvmCache.activateNoRefcount(jlvName) 

1204 fullSize = lvhdutil.calcSizeVHDLV(vdi.size) 

1205 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize) 

1206 try: 

1207 jFile = os.path.join(self.path, jlvName) 

1208 vhdutil.revert(vdi.path, jFile) 

1209 except util.CommandException: 

1210 util.logException("VHD journal revert") 

1211 vhdutil.check(vdi.path) 

1212 util.SMlog("VHD revert failed but VHD ok: removing journal") 

1213 # Attempt to reclaim unused space 

1214 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False) 

1215 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1216 if NewSize < fullSize: 

1217 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) 

1218 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1219 self.vgname, vdi.lvname, uuid) 

1220 self.lvmCache.remove(jlvName) 

1221 delattr(self, "vdiInfo") 

1222 delattr(self, "allVDIs") 

1223 

1224 def _updateSlavesPreClone(self, hostRefs, origOldLV): 

1225 masterRef = util.get_this_host_ref(self.session) 

1226 args = {"vgName": self.vgname, 

1227 "action1": "deactivateNoRefcount", 

1228 "lvName1": origOldLV} 

1229 for hostRef in hostRefs: 

1230 if hostRef == masterRef: 1230 ↛ 1231line 1230 didn't jump to line 1231, because the condition on line 1230 was never true

1231 continue 

1232 util.SMlog("Deactivate VDI on %s" % hostRef) 

1233 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1234 util.SMlog("call-plugin returned: %s" % rv) 

1235 if not rv: 1235 ↛ 1236line 1235 didn't jump to line 1236, because the condition on line 1235 was never true

1236 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1237 

1238 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV, 

1239 baseUuid, baseLV): 

1240 """We need to reactivate the original LV on each slave (note that the 

1241 name for the original LV might change), as well as init the refcount 

1242 for the base LV""" 

1243 args = {"vgName": self.vgname, 

1244 "action1": "refresh", 

1245 "lvName1": origLV, 

1246 "action2": "activate", 

1247 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid, 

1248 "lvName2": baseLV, 

1249 "uuid2": baseUuid} 

1250 

1251 masterRef = util.get_this_host_ref(self.session) 

1252 for hostRef in hostRefs: 

1253 if hostRef == masterRef: 1253 ↛ 1254line 1253 didn't jump to line 1254, because the condition on line 1253 was never true

1254 continue 

1255 util.SMlog("Updating %s, %s, %s on slave %s" % \ 

1256 (origOldLV, origLV, baseLV, hostRef)) 

1257 rv = self.session.xenapi.host.call_plugin( 

1258 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1259 util.SMlog("call-plugin returned: %s" % rv) 

1260 if not rv: 1260 ↛ 1261line 1260 didn't jump to line 1261, because the condition on line 1260 was never true

1261 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1262 

1263 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog): 

1264 """Reactivate and refresh CBT log file on slaves""" 

1265 args = {"vgName": self.vgname, 

1266 "action1": "deactivateNoRefcount", 

1267 "lvName1": cbtlog, 

1268 "action2": "refresh", 

1269 "lvName2": cbtlog} 

1270 

1271 masterRef = util.get_this_host_ref(self.session) 

1272 for hostRef in hostRefs: 

1273 if hostRef == masterRef: 1273 ↛ 1274line 1273 didn't jump to line 1274, because the condition on line 1273 was never true

1274 continue 

1275 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef)) 

1276 rv = self.session.xenapi.host.call_plugin( 

1277 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1278 util.SMlog("call-plugin returned: %s" % rv) 

1279 if not rv: 1279 ↛ 1280line 1279 didn't jump to line 1280, because the condition on line 1279 was never true

1280 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1281 

1282 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV): 

1283 """Tell the slave we deleted the base image""" 

1284 args = {"vgName": self.vgname, 

1285 "action1": "cleanupLockAndRefcount", 

1286 "uuid1": baseUuid, 

1287 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid} 

1288 

1289 masterRef = util.get_this_host_ref(self.session) 

1290 for hostRef in hostRefs: 

1291 if hostRef == masterRef: 1291 ↛ 1292line 1291 didn't jump to line 1292, because the condition on line 1291 was never true

1292 continue 

1293 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef)) 

1294 rv = self.session.xenapi.host.call_plugin( 

1295 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1296 util.SMlog("call-plugin returned: %s" % rv) 

1297 if not rv: 1297 ↛ 1298line 1297 didn't jump to line 1298, because the condition on line 1297 was never true

1298 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1299 

1300 def _cleanup(self, skipLockCleanup=False): 

1301 """delete stale refcounter, flag, and lock files""" 

1302 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1303 IPCFlag(self.uuid).clearAll() 

1304 if not skipLockCleanup: 1304 ↛ 1305line 1304 didn't jump to line 1305, because the condition on line 1304 was never true

1305 Lock.cleanupAll(self.uuid) 

1306 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1307 

1308 def _prepareTestMode(self): 

1309 util.SMlog("Test mode: %s" % self.testMode) 

1310 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1310 ↛ 1311line 1310 didn't jump to line 1311, because the condition on line 1310 was never true

1311 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" 

1312 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode]) 

1313 

1314 def _kickGC(self): 

1315 util.SMlog("Kicking GC") 

1316 cleanup.start_gc_service(self.uuid) 

1317 

1318 def ensureCBTSpace(self): 

1319 # Ensure we have space for at least one LV 

1320 self._ensureSpaceAvailable(self.journaler.LV_SIZE) 

1321 

1322 

1323class LVHDVDI(VDI.VDI): 

1324 

1325 JRN_CLONE = "clone" # journal entry type for the clone operation 

1326 

1327 @override 

1328 def load(self, vdi_uuid) -> None: 

1329 self.lock = self.sr.lock 

1330 self.lvActivator = self.sr.lvActivator 

1331 self.loaded = False 

1332 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1333 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1333 ↛ 1335line 1333 didn't jump to line 1335, because the condition on line 1333 was never false

1334 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1335 self.uuid = vdi_uuid 

1336 self.location = self.uuid 

1337 self.exists = True 

1338 

1339 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid): 

1340 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid]) 

1341 if self.parent: 1341 ↛ 1342line 1341 didn't jump to line 1342, because the condition on line 1341 was never true

1342 self.sm_config_override['vhd-parent'] = self.parent 

1343 else: 

1344 self.sm_config_override['vhd-parent'] = None 

1345 return 

1346 

1347 # scan() didn't run: determine the type of the VDI manually 

1348 if self._determineType(): 

1349 return 

1350 

1351 # the VDI must be in the process of being created 

1352 self.exists = False 

1353 if "vdi_sm_config" in self.sr.srcmd.params and \ 1353 ↛ 1355line 1353 didn't jump to line 1355, because the condition on line 1353 was never true

1354 "type" in self.sr.srcmd.params["vdi_sm_config"]: 

1355 type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

1356 if type == PARAM_RAW: 

1357 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1358 elif type == PARAM_VHD: 

1359 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1360 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode: 

1361 raise xs_errors.XenError('VDICreate', \ 

1362 opterr='Cannot create VHD type disk in legacy mode') 

1363 else: 

1364 raise xs_errors.XenError('VDICreate', opterr='bad type') 

1365 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid) 

1366 self.path = os.path.join(self.sr.path, self.lvname) 

1367 

1368 @override 

1369 def create(self, sr_uuid, vdi_uuid, size) -> str: 

1370 util.SMlog("LVHDVDI.create for %s" % self.uuid) 

1371 if not self.sr.isMaster: 

1372 raise xs_errors.XenError('LVMMaster') 

1373 if self.exists: 

1374 raise xs_errors.XenError('VDIExists') 

1375 

1376 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1377 

1378 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \ 

1379 (self.vdi_type, self.path, size)) 

1380 lvSize = 0 

1381 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] 

1382 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1383 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size)) 

1384 else: 

1385 if self.sr.provision == "thin": 

1386 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1387 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1388 elif self.sr.provision == "thick": 

1389 lvSize = lvhdutil.calcSizeVHDLV(int(size)) 

1390 

1391 self.sr._ensureSpaceAvailable(lvSize) 

1392 

1393 try: 

1394 self.sr.lvmCache.create(self.lvname, lvSize) 

1395 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1396 self.size = self.sr.lvmCache.getSize(self.lvname) 

1397 else: 

1398 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB) 

1399 self.size = vhdutil.getSizeVirt(self.path) 

1400 self.sr.lvmCache.deactivateNoRefcount(self.lvname) 

1401 except util.CommandException as e: 

1402 util.SMlog("Unable to create VDI") 

1403 self.sr.lvmCache.remove(self.lvname) 

1404 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code) 

1405 

1406 self.utilisation = lvSize 

1407 self.sm_config["vdi_type"] = self.vdi_type 

1408 

1409 if not self.sr.legacyMode: 

1410 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1411 

1412 self.ref = self._db_introduce() 

1413 self.sr._updateStats(self.sr.uuid, self.size) 

1414 

1415 vdi_info = {UUID_TAG: self.uuid, 

1416 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1417 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1418 IS_A_SNAPSHOT_TAG: 0, 

1419 SNAPSHOT_OF_TAG: '', 

1420 SNAPSHOT_TIME_TAG: '', 

1421 TYPE_TAG: self.ty, 

1422 VDI_TYPE_TAG: self.vdi_type, 

1423 READ_ONLY_TAG: int(self.read_only), 

1424 MANAGED_TAG: int(self.managed), 

1425 METADATA_OF_POOL_TAG: '' 

1426 } 

1427 

1428 if not self.sr.legacyMode: 

1429 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1430 

1431 return VDI.VDI.get_params(self) 

1432 

1433 @override 

1434 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None: 

1435 util.SMlog("LVHDVDI.delete for %s" % self.uuid) 

1436 try: 

1437 self._loadThis() 

1438 except xs_errors.SRException as e: 

1439 # Catch 'VDI doesn't exist' exception 

1440 if e.errno == 46: 

1441 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1442 raise 

1443 

1444 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1445 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1446 raise xs_errors.XenError("VDIDelete", \ 

1447 opterr="Deleting non-leaf node not permitted") 

1448 

1449 if not self.hidden: 

1450 self._markHidden() 

1451 

1452 if not data_only: 

1453 # Remove from XAPI and delete from MGT 

1454 self._db_forget() 

1455 else: 

1456 # If this is a data_destroy call, don't remove from XAPI db 

1457 # Only delete from MGT 

1458 if not self.sr.legacyMode: 

1459 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid) 

1460 

1461 # deactivate here because it might be too late to do it in the "final" 

1462 # step: GC might have removed the LV by then 

1463 if self.sr.lvActivator.get(self.uuid, False): 

1464 self.sr.lvActivator.deactivate(self.uuid, False) 

1465 

1466 try: 

1467 self.sr.lvmCache.remove(self.lvname) 

1468 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid) 

1469 self.sr.lock.cleanupAll(vdi_uuid) 

1470 except xs_errors.SRException as e: 

1471 util.SMlog( 

1472 "Failed to remove the volume (maybe is leaf coalescing) " 

1473 "for %s err:%d" % (self.uuid, e.errno)) 

1474 

1475 self.sr._updateStats(self.sr.uuid, -self.size) 

1476 self.sr._kickGC() 

1477 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1478 

1479 @override 

1480 def attach(self, sr_uuid, vdi_uuid) -> str: 

1481 util.SMlog("LVHDVDI.attach for %s" % self.uuid) 

1482 if self.sr.journaler.hasJournals(self.uuid): 

1483 raise xs_errors.XenError('VDIUnavailable', 

1484 opterr='Interrupted operation detected on this VDI, ' 

1485 'scan SR first to trigger auto-repair') 

1486 

1487 writable = ('args' not in self.sr.srcmd.params) or \ 

1488 (self.sr.srcmd.params['args'][0] == "true") 

1489 needInflate = True 

1490 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable: 

1491 needInflate = False 

1492 else: 

1493 self._loadThis() 

1494 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size): 

1495 needInflate = False 

1496 

1497 if needInflate: 

1498 try: 

1499 self._prepareThin(True) 

1500 except: 

1501 util.logException("attach") 

1502 raise xs_errors.XenError('LVMProvisionAttach') 

1503 

1504 try: 

1505 return self._attach() 

1506 finally: 

1507 if not self.sr.lvActivator.deactivateAll(): 

1508 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid) 

1509 

1510 @override 

1511 def detach(self, sr_uuid, vdi_uuid) -> None: 

1512 util.SMlog("LVHDVDI.detach for %s" % self.uuid) 

1513 self._loadThis() 

1514 already_deflated = (self.utilisation < \ 

1515 lvhdutil.calcSizeVHDLV(self.size)) 

1516 needDeflate = True 

1517 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated: 

1518 needDeflate = False 

1519 elif self.sr.provision == "thick": 

1520 needDeflate = False 

1521 # except for snapshots, which are always deflated 

1522 if self.sr.srcmd.cmd != 'vdi_detach_from_config': 

1523 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1524 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) 

1525 if snap: 

1526 needDeflate = True 

1527 

1528 if needDeflate: 

1529 try: 

1530 self._prepareThin(False) 

1531 except: 

1532 util.logException("_prepareThin") 

1533 raise xs_errors.XenError('VDIUnavailable', opterr='deflate') 

1534 

1535 try: 

1536 self._detach() 

1537 finally: 

1538 if not self.sr.lvActivator.deactivateAll(): 

1539 raise xs_errors.XenError("SMGeneral", opterr="deactivation") 

1540 

1541 # We only support offline resize 

1542 @override 

1543 def resize(self, sr_uuid, vdi_uuid, size) -> str: 

1544 util.SMlog("LVHDVDI.resize for %s" % self.uuid) 

1545 if not self.sr.isMaster: 

1546 raise xs_errors.XenError('LVMMaster') 

1547 

1548 self._loadThis() 

1549 if self.hidden: 

1550 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1551 

1552 if size < self.size: 

1553 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

1554 '(current size: %d, new size: %d)' % (self.size, size)) 

1555 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1556 

1557 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1558 

1559 if size == self.size: 

1560 return VDI.VDI.get_params(self) 

1561 

1562 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1563 lvSizeOld = self.size 

1564 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) 

1565 else: 

1566 lvSizeOld = self.utilisation 

1567 lvSizeNew = lvhdutil.calcSizeVHDLV(size) 

1568 if self.sr.provision == "thin": 

1569 # VDI is currently deflated, so keep it deflated 

1570 lvSizeNew = lvSizeOld 

1571 assert(lvSizeNew >= lvSizeOld) 

1572 spaceNeeded = lvSizeNew - lvSizeOld 

1573 self.sr._ensureSpaceAvailable(spaceNeeded) 

1574 

1575 oldSize = self.size 

1576 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1577 self.sr.lvmCache.setSize(self.lvname, lvSizeNew) 

1578 self.size = self.sr.lvmCache.getSize(self.lvname) 

1579 self.utilisation = self.size 

1580 else: 

1581 if lvSizeNew != lvSizeOld: 

1582 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, 

1583 lvSizeNew) 

1584 vhdutil.setSizeVirtFast(self.path, size) 

1585 self.size = vhdutil.getSizeVirt(self.path) 

1586 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

1587 

1588 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1589 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1590 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

1591 str(self.utilisation)) 

1592 self.sr._updateStats(self.sr.uuid, self.size - oldSize) 

1593 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

1594 return VDI.VDI.get_params(self) 

1595 

1596 @override 

1597 def clone(self, sr_uuid, vdi_uuid) -> str: 

1598 return self._do_snapshot( 

1599 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True) 

1600 

1601 @override 

1602 def compose(self, sr_uuid, vdi1, vdi2) -> None: 

1603 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1)) 

1604 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1605 raise xs_errors.XenError('Unimplemented') 

1606 

1607 parent_uuid = vdi1 

1608 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid 

1609 assert(self.sr.lvmCache.checkLV(parent_lvname)) 

1610 parent_path = os.path.join(self.sr.path, parent_lvname) 

1611 

1612 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1613 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False) 

1614 

1615 vhdutil.setParent(self.path, parent_path, False) 

1616 vhdutil.setHidden(parent_path) 

1617 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

1618 

1619 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, 

1620 True): 

1621 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

1622 

1623 util.SMlog("Compose done") 

1624 

1625 def reset_leaf(self, sr_uuid, vdi_uuid): 

1626 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid) 

1627 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1628 raise xs_errors.XenError('Unimplemented') 

1629 

1630 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1631 

1632 # safety check 

1633 if not vhdutil.hasParent(self.path): 

1634 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

1635 "will not reset contents" % self.uuid) 

1636 

1637 vhdutil.killData(self.path) 

1638 

1639 def _attach(self): 

1640 self._chainSetActive(True, True, True) 

1641 if not util.pathexists(self.path): 

1642 raise xs_errors.XenError('VDIUnavailable', \ 

1643 opterr='Could not find: %s' % self.path) 

1644 

1645 if not hasattr(self, 'xenstore_data'): 

1646 self.xenstore_data = {} 

1647 

1648 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ 

1649 scsiutil.gen_synthetic_page_data(self.uuid))) 

1650 

1651 self.xenstore_data['storage-type'] = 'lvm' 

1652 self.xenstore_data['vdi-type'] = self.vdi_type 

1653 

1654 self.attached = True 

1655 self.sr.lvActivator.persist() 

1656 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1657 

1658 def _detach(self): 

1659 self._chainSetActive(False, True) 

1660 self.attached = False 

1661 

1662 @override 

1663 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

1664 cloneOp=False, secondary=None, cbtlog=None) -> str: 

1665 # If cbt enabled, save file consistency state 

1666 if cbtlog is not None: 

1667 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1667 ↛ 1668line 1667 didn't jump to line 1668, because the condition on line 1667 was never true

1668 consistency_state = False 

1669 else: 

1670 consistency_state = True 

1671 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

1672 (consistency_state, vdi_uuid)) 

1673 else: 

1674 consistency_state = None 

1675 

1676 pause_time = time.time() 

1677 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1677 ↛ 1678line 1677 didn't jump to line 1678, because the condition on line 1677 was never true

1678 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

1679 

1680 snapResult = None 

1681 try: 

1682 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) 

1683 except Exception as e1: 

1684 try: 

1685 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, 

1686 secondary=None) 

1687 except Exception as e2: 

1688 util.SMlog('WARNING: failed to clean up failed snapshot: ' 

1689 '%s (error ignored)' % e2) 

1690 raise 

1691 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1692 unpause_time = time.time() 

1693 if (unpause_time - pause_time) > LONG_SNAPTIME: 1693 ↛ 1694line 1693 didn't jump to line 1694, because the condition on line 1693 was never true

1694 util.SMlog('WARNING: snapshot paused VM for %s seconds' % 

1695 (unpause_time - pause_time)) 

1696 return snapResult 

1697 

1698 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None): 

1699 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType)) 

1700 

1701 if not self.sr.isMaster: 1701 ↛ 1702line 1701 didn't jump to line 1702, because the condition on line 1701 was never true

1702 raise xs_errors.XenError('LVMMaster') 

1703 if self.sr.legacyMode: 1703 ↛ 1704line 1703 didn't jump to line 1704, because the condition on line 1703 was never true

1704 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode') 

1705 

1706 self._loadThis() 

1707 if self.hidden: 1707 ↛ 1708line 1707 didn't jump to line 1708, because the condition on line 1707 was never true

1708 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI') 

1709 

1710 self.sm_config = self.session.xenapi.VDI.get_sm_config( \ 

1711 self.sr.srcmd.params['vdi_ref']) 

1712 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1712 ↛ 1713line 1712 didn't jump to line 1713, because the condition on line 1712 was never true

1713 if not util.fistpoint.is_active("testsm_clone_allow_raw"): 

1714 raise xs_errors.XenError('Unimplemented', \ 

1715 opterr='Raw VDI, snapshot or clone not permitted') 

1716 

1717 # we must activate the entire VHD chain because the real parent could 

1718 # theoretically be anywhere in the chain if all VHDs under it are empty 

1719 self._chainSetActive(True, False) 

1720 if not util.pathexists(self.path): 1720 ↛ 1721line 1720 didn't jump to line 1721, because the condition on line 1720 was never true

1721 raise xs_errors.XenError('VDIUnavailable', \ 

1722 opterr='VDI unavailable: %s' % (self.path)) 

1723 

1724 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1724 ↛ 1732line 1724 didn't jump to line 1732, because the condition on line 1724 was never false

1725 depth = vhdutil.getDepth(self.path) 

1726 if depth == -1: 1726 ↛ 1727line 1726 didn't jump to line 1727, because the condition on line 1726 was never true

1727 raise xs_errors.XenError('VDIUnavailable', \ 

1728 opterr='failed to get VHD depth') 

1729 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1729 ↛ 1730line 1729 didn't jump to line 1730, because the condition on line 1729 was never true

1730 raise xs_errors.XenError('SnapshotChainTooLong') 

1731 

1732 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ 

1733 self.sr.srcmd.params['vdi_ref']) 

1734 

1735 fullpr = lvhdutil.calcSizeVHDLV(self.size) 

1736 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \ 

1737 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1738 lvSizeOrig = thinpr 

1739 lvSizeClon = thinpr 

1740 

1741 hostRefs = [] 

1742 if self.sr.cmd == "vdi_snapshot": 

1743 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) 

1744 if hostRefs: 1744 ↛ 1746line 1744 didn't jump to line 1746, because the condition on line 1744 was never false

1745 lvSizeOrig = fullpr 

1746 if self.sr.provision == "thick": 1746 ↛ 1752line 1746 didn't jump to line 1752, because the condition on line 1746 was never false

1747 if not self.issnap: 1747 ↛ 1748line 1747 didn't jump to line 1748, because the condition on line 1747 was never true

1748 lvSizeOrig = fullpr 

1749 if self.sr.cmd != "vdi_snapshot": 

1750 lvSizeClon = fullpr 

1751 

1752 if (snapType == VDI.SNAPSHOT_SINGLE or 1752 ↛ 1754line 1752 didn't jump to line 1754, because the condition on line 1752 was never true

1753 snapType == VDI.SNAPSHOT_INTERNAL): 

1754 lvSizeClon = 0 

1755 

1756 # the space required must include 2 journal LVs: a clone journal and an 

1757 # inflate journal (for the failure handling 

1758 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE 

1759 lvSizeBase = self.size 

1760 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1760 ↛ 1764line 1760 didn't jump to line 1764, because the condition on line 1760 was never false

1761 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1762 vhdutil.getSizePhys(self.path)) 

1763 size_req -= (self.utilisation - lvSizeBase) 

1764 self.sr._ensureSpaceAvailable(size_req) 

1765 

1766 if hostRefs: 

1767 self.sr._updateSlavesPreClone(hostRefs, self.lvname) 

1768 

1769 baseUuid = util.gen_uuid() 

1770 origUuid = self.uuid 

1771 clonUuid = "" 

1772 if snapType == VDI.SNAPSHOT_DOUBLE: 1772 ↛ 1774line 1772 didn't jump to line 1774, because the condition on line 1772 was never false

1773 clonUuid = util.gen_uuid() 

1774 jval = "%s_%s" % (baseUuid, clonUuid) 

1775 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) 

1776 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid) 

1777 

1778 try: 

1779 # self becomes the "base vdi" 

1780 origOldLV = self.lvname 

1781 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid 

1782 self.sr.lvmCache.rename(self.lvname, baseLV) 

1783 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) 

1784 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1785 self.uuid = baseUuid 

1786 self.lvname = baseLV 

1787 self.path = os.path.join(self.sr.path, baseLV) 

1788 self.label = "base copy" 

1789 self.read_only = True 

1790 self.location = self.uuid 

1791 self.managed = False 

1792 

1793 # shrink the base copy to the minimum - we do it before creating 

1794 # the snapshot volumes to avoid requiring double the space 

1795 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1795 ↛ 1798line 1795 didn't jump to line 1798, because the condition on line 1795 was never false

1796 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) 

1797 self.utilisation = lvSizeBase 

1798 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid) 

1799 

1800 snapVDI = self._createSnap(origUuid, lvSizeOrig, False) 

1801 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) 

1802 snapVDI2 = None 

1803 if snapType == VDI.SNAPSHOT_DOUBLE: 1803 ↛ 1809line 1803 didn't jump to line 1809, because the condition on line 1803 was never false

1804 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True) 

1805 # If we have CBT enabled on the VDI, 

1806 # set CBT status for the new snapshot disk 

1807 if cbtlog: 

1808 snapVDI2.cbt_enabled = True 

1809 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid) 

1810 

1811 # note: it is important to mark the parent hidden only AFTER the 

1812 # new VHD children have been created, which are referencing it; 

1813 # otherwise we would introduce a race with GC that could reclaim 

1814 # the parent before we snapshot it 

1815 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1815 ↛ 1816line 1815 didn't jump to line 1816, because the condition on line 1815 was never true

1816 self.sr.lvmCache.setHidden(self.lvname) 

1817 else: 

1818 vhdutil.setHidden(self.path) 

1819 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid) 

1820 

1821 # set the base copy to ReadOnly 

1822 self.sr.lvmCache.setReadonly(self.lvname, True) 

1823 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid) 

1824 

1825 if hostRefs: 

1826 self.sr._updateSlavesOnClone(hostRefs, origOldLV, 

1827 snapVDI.lvname, self.uuid, self.lvname) 

1828 

1829 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

1830 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: 

1831 snapVDI._cbt_snapshot(clonUuid, cbt_consistency) 

1832 if hostRefs: 1832 ↛ 1846line 1832 didn't jump to line 1846, because the condition on line 1832 was never false

1833 cbtlog_file = self._get_cbt_logname(snapVDI.uuid) 

1834 try: 

1835 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) 

1836 except: 

1837 alert_name = "VDI_CBT_SNAPSHOT_FAILED" 

1838 alert_str = ("Creating CBT snapshot for {} failed" 

1839 .format(snapVDI.uuid)) 

1840 snapVDI._disable_cbt_on_error(alert_name, alert_str) 

1841 pass 

1842 

1843 except (util.SMException, XenAPI.Failure) as e: 

1844 util.logException("LVHDVDI._snapshot") 

1845 self._failClone(origUuid, jval, str(e)) 

1846 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid) 

1847 

1848 self.sr.journaler.remove(self.JRN_CLONE, origUuid) 

1849 

1850 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType) 

1851 

1852 def _createSnap(self, snapUuid, snapSizeLV, isNew): 

1853 """Snapshot self and return the snapshot VDI object""" 

1854 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid 

1855 snapPath = os.path.join(self.sr.path, snapLV) 

1856 self.sr.lvmCache.create(snapLV, int(snapSizeLV)) 

1857 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) 

1858 if isNew: 

1859 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1860 self.sr.lvActivator.add(snapUuid, snapLV, False) 

1861 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) 

1862 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB) 

1863 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid) 

1864 

1865 snapVDI = LVHDVDI(self.sr, snapUuid) 

1866 snapVDI.read_only = False 

1867 snapVDI.location = snapUuid 

1868 snapVDI.size = self.size 

1869 snapVDI.utilisation = snapSizeLV 

1870 snapVDI.sm_config = dict() 

1871 for key, val in self.sm_config.items(): 1871 ↛ 1872line 1871 didn't jump to line 1872, because the loop on line 1871 never started

1872 if key not in [ 

1873 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \ 

1874 not key.startswith("host_"): 

1875 snapVDI.sm_config[key] = val 

1876 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1877 snapVDI.sm_config["vhd-parent"] = snapParent 

1878 snapVDI.lvname = snapLV 

1879 return snapVDI 

1880 

1881 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None): 

1882 if snapType is not VDI.SNAPSHOT_INTERNAL: 1882 ↛ 1884line 1882 didn't jump to line 1884, because the condition on line 1882 was never false

1883 self.sr._updateStats(self.sr.uuid, self.size) 

1884 basePresent = True 

1885 

1886 # Verify parent locator field of both children and delete basePath if 

1887 # unused 

1888 snapParent = snapVDI.sm_config["vhd-parent"] 

1889 snap2Parent = "" 

1890 if snapVDI2: 1890 ↛ 1892line 1890 didn't jump to line 1892, because the condition on line 1890 was never false

1891 snap2Parent = snapVDI2.sm_config["vhd-parent"] 

1892 if snapParent != self.uuid and \ 1892 ↛ 1919line 1892 didn't jump to line 1919, because the condition on line 1892 was never false

1893 (not snapVDI2 or snap2Parent != self.uuid): 

1894 util.SMlog("%s != %s != %s => deleting unused base %s" % \ 

1895 (snapParent, self.uuid, snap2Parent, self.lvname)) 

1896 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1897 self.sr.lvmCache.remove(self.lvname) 

1898 self.sr.lvActivator.remove(self.uuid, False) 

1899 if hostRefs: 

1900 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) 

1901 basePresent = False 

1902 else: 

1903 # assign the _binary_ refcount of the original VDI to the new base 

1904 # VDI (but as the normal refcount, since binary refcounts are only 

1905 # for leaf nodes). The normal refcount of the child is not 

1906 # transferred to to the base VDI because normal refcounts are 

1907 # incremented and decremented individually, and not based on the 

1908 # VHD chain (i.e., the child's normal refcount will be decremented 

1909 # independently of its parent situation). Add 1 for this clone op. 

1910 # Note that we do not need to do protect the refcount operations 

1911 # below with per-VDI locking like we do in lvutil because at this 

1912 # point we have exclusive access to the VDIs involved. Other SM 

1913 # operations are serialized by the Agent or with the SR lock, and 

1914 # any coalesce activations are serialized with the SR lock. (The 

1915 # coalesce activates the coalesced VDI pair in the beginning, which 

1916 # cannot affect the VDIs here because they cannot possibly be 

1917 # involved in coalescing at this point, and at the relinkSkip step 

1918 # that activates the children, which takes the SR lock.) 

1919 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid 

1920 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) 

1921 RefCounter.set(self.uuid, bcnt + 1, 0, ns) 

1922 

1923 # the "paused" and "host_*" sm-config keys are special and must stay on 

1924 # the leaf without being inherited by anyone else 

1925 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1925 ↛ 1926line 1925 didn't jump to line 1926, because the loop on line 1925 never started

1926 snapVDI.sm_config[key] = self.sm_config[key] 

1927 del self.sm_config[key] 

1928 

1929 # Introduce any new VDI records & update the existing one 

1930 type = self.session.xenapi.VDI.get_type( \ 

1931 self.sr.srcmd.params['vdi_ref']) 

1932 if snapVDI2: 1932 ↛ 1974line 1932 didn't jump to line 1974, because the condition on line 1932 was never false

1933 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1934 vdiRef = snapVDI2._db_introduce() 

1935 if cloneOp: 

1936 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1937 NAME_LABEL_TAG: util.to_plain_string( \ 

1938 self.session.xenapi.VDI.get_name_label( \ 

1939 self.sr.srcmd.params['vdi_ref'])), 

1940 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1941 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1942 IS_A_SNAPSHOT_TAG: 0, 

1943 SNAPSHOT_OF_TAG: '', 

1944 SNAPSHOT_TIME_TAG: '', 

1945 TYPE_TAG: type, 

1946 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1947 READ_ONLY_TAG: 0, 

1948 MANAGED_TAG: int(snapVDI2.managed), 

1949 METADATA_OF_POOL_TAG: '' 

1950 } 

1951 else: 

1952 util.SMlog("snapshot VDI params: %s" % \ 

1953 self.session.xenapi.VDI.get_snapshot_time(vdiRef)) 

1954 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1955 NAME_LABEL_TAG: util.to_plain_string( \ 

1956 self.session.xenapi.VDI.get_name_label( \ 

1957 self.sr.srcmd.params['vdi_ref'])), 

1958 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1959 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1960 IS_A_SNAPSHOT_TAG: 1, 

1961 SNAPSHOT_OF_TAG: snapVDI.uuid, 

1962 SNAPSHOT_TIME_TAG: '', 

1963 TYPE_TAG: type, 

1964 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1965 READ_ONLY_TAG: 0, 

1966 MANAGED_TAG: int(snapVDI2.managed), 

1967 METADATA_OF_POOL_TAG: '' 

1968 } 

1969 

1970 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1971 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ 

1972 (vdiRef, snapVDI2.uuid)) 

1973 

1974 if basePresent: 1974 ↛ 1975line 1974 didn't jump to line 1975, because the condition on line 1974 was never true

1975 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1976 vdiRef = self._db_introduce() 

1977 vdi_info = {UUID_TAG: self.uuid, 

1978 NAME_LABEL_TAG: self.label, 

1979 NAME_DESCRIPTION_TAG: self.description, 

1980 IS_A_SNAPSHOT_TAG: 0, 

1981 SNAPSHOT_OF_TAG: '', 

1982 SNAPSHOT_TIME_TAG: '', 

1983 TYPE_TAG: type, 

1984 VDI_TYPE_TAG: self.sm_config['vdi_type'], 

1985 READ_ONLY_TAG: 1, 

1986 MANAGED_TAG: 0, 

1987 METADATA_OF_POOL_TAG: '' 

1988 } 

1989 

1990 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1991 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ 

1992 (vdiRef, self.uuid)) 

1993 

1994 # Update the original record 

1995 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1996 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) 

1997 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ 

1998 str(snapVDI.utilisation)) 

1999 

2000 # Return the info on the new snap VDI 

2001 snap = snapVDI2 

2002 if not snap: 2002 ↛ 2003line 2002 didn't jump to line 2003, because the condition on line 2002 was never true

2003 snap = self 

2004 if not basePresent: 

2005 # a single-snapshot of an empty VDI will be a noop, resulting 

2006 # in no new VDIs, so return the existing one. The GC wouldn't 

2007 # normally try to single-snapshot an empty VHD of course, but 

2008 # if an external snapshot operation manages to sneak in right 

2009 # before a snapshot-coalesce phase, we would get here 

2010 snap = snapVDI 

2011 return snap.get_params() 

2012 

2013 def _initFromVDIInfo(self, vdiInfo): 

2014 self.vdi_type = vdiInfo.vdiType 

2015 self.lvname = vdiInfo.lvName 

2016 self.size = vdiInfo.sizeVirt 

2017 self.utilisation = vdiInfo.sizeLV 

2018 self.hidden = vdiInfo.hidden 

2019 if self.hidden: 2019 ↛ 2020line 2019 didn't jump to line 2020, because the condition on line 2019 was never true

2020 self.managed = False 

2021 self.active = vdiInfo.lvActive 

2022 self.readonly = vdiInfo.lvReadonly 

2023 self.parent = vdiInfo.parentUuid 

2024 self.path = os.path.join(self.sr.path, self.lvname) 

2025 if hasattr(self, "sm_config_override"): 2025 ↛ 2028line 2025 didn't jump to line 2028, because the condition on line 2025 was never false

2026 self.sm_config_override["vdi_type"] = self.vdi_type 

2027 else: 

2028 self.sm_config_override = {'vdi_type': self.vdi_type} 

2029 self.loaded = True 

2030 

2031 def _initFromLVInfo(self, lvInfo): 

2032 self.vdi_type = lvInfo.vdiType 

2033 self.lvname = lvInfo.name 

2034 self.size = lvInfo.size 

2035 self.utilisation = lvInfo.size 

2036 self.hidden = lvInfo.hidden 

2037 self.active = lvInfo.active 

2038 self.readonly = lvInfo.readonly 

2039 self.parent = '' 

2040 self.path = os.path.join(self.sr.path, self.lvname) 

2041 if hasattr(self, "sm_config_override"): 2041 ↛ 2044line 2041 didn't jump to line 2044, because the condition on line 2041 was never false

2042 self.sm_config_override["vdi_type"] = self.vdi_type 

2043 else: 

2044 self.sm_config_override = {'vdi_type': self.vdi_type} 

2045 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2045 ↛ 2046line 2045 didn't jump to line 2046, because the condition on line 2045 was never true

2046 self.loaded = True 

2047 

2048 def _initFromVHDInfo(self, vhdInfo): 

2049 self.size = vhdInfo.sizeVirt 

2050 self.parent = vhdInfo.parentUuid 

2051 self.hidden = vhdInfo.hidden 

2052 self.loaded = True 

2053 

2054 def _determineType(self): 

2055 """Determine whether this is a raw or a VHD VDI""" 

2056 if "vdi_ref" in self.sr.srcmd.params: 2056 ↛ 2069line 2056 didn't jump to line 2069, because the condition on line 2056 was never false

2057 vdi_ref = self.sr.srcmd.params["vdi_ref"] 

2058 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2059 if sm_config.get("vdi_type"): 2059 ↛ 2060line 2059 didn't jump to line 2060, because the condition on line 2059 was never true

2060 self.vdi_type = sm_config["vdi_type"] 

2061 prefix = lvhdutil.LV_PREFIX[self.vdi_type] 

2062 self.lvname = "%s%s" % (prefix, self.uuid) 

2063 self.path = os.path.join(self.sr.path, self.lvname) 

2064 self.sm_config_override = sm_config 

2065 return True 

2066 

2067 # LVM commands can be costly, so check the file directly first in case 

2068 # the LV is active 

2069 found = False 

2070 for t in lvhdutil.VDI_TYPES: 2070 ↛ 2071line 2070 didn't jump to line 2071, because the loop on line 2070 never started

2071 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid) 

2072 path = os.path.join(self.sr.path, lvname) 

2073 if util.pathexists(path): 

2074 if found: 

2075 raise xs_errors.XenError('VDILoad', 

2076 opterr="multiple VDI's: uuid %s" % self.uuid) 

2077 found = True 

2078 self.vdi_type = t 

2079 self.lvname = lvname 

2080 self.path = path 

2081 if found: 2081 ↛ 2082line 2081 didn't jump to line 2082, because the condition on line 2081 was never true

2082 return True 

2083 

2084 # now list all LV's 

2085 if not lvutil._checkVG(self.sr.vgname): 2085 ↛ 2087line 2085 didn't jump to line 2087, because the condition on line 2085 was never true

2086 # when doing attach_from_config, the VG won't be there yet 

2087 return False 

2088 

2089 lvs = lvhdutil.getLVInfo(self.sr.lvmCache) 

2090 if lvs.get(self.uuid): 

2091 self._initFromLVInfo(lvs[self.uuid]) 

2092 return True 

2093 return False 

2094 

2095 def _loadThis(self): 

2096 """Load VDI info for this VDI and activate the LV if it's VHD. We 

2097 don't do it in VDI.load() because not all VDI operations need it.""" 

2098 if self.loaded: 2098 ↛ 2099line 2098 didn't jump to line 2099, because the condition on line 2098 was never true

2099 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

2100 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2101 return 

2102 try: 

2103 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname) 

2104 except util.CommandException as e: 

2105 raise xs_errors.XenError('VDIUnavailable', 

2106 opterr='%s (LV scan error)' % os.strerror(abs(e.code))) 

2107 if not lvs.get(self.uuid): 2107 ↛ 2108line 2107 didn't jump to line 2108, because the condition on line 2107 was never true

2108 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') 

2109 self._initFromLVInfo(lvs[self.uuid]) 

2110 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2110 ↛ 2117line 2110 didn't jump to line 2117, because the condition on line 2110 was never false

2111 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2112 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False) 

2113 if not vhdInfo: 2113 ↛ 2114line 2113 didn't jump to line 2114, because the condition on line 2113 was never true

2114 raise xs_errors.XenError('VDIUnavailable', \ 

2115 opterr='getVHDInfo failed') 

2116 self._initFromVHDInfo(vhdInfo) 

2117 self.loaded = True 

2118 

2119 def _chainSetActive(self, active, binary, persistent=False): 

2120 if binary: 2120 ↛ 2121line 2120 didn't jump to line 2121, because the condition on line 2120 was never true

2121 (count, bcount) = RefCounter.checkLocked(self.uuid, 

2122 lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

2123 if (active and bcount > 0) or (not active and bcount == 0): 

2124 return # this is a redundant activation/deactivation call 

2125 

2126 vdiList = {self.uuid: self.lvname} 

2127 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2127 ↛ 2130line 2127 didn't jump to line 2130, because the condition on line 2127 was never false

2128 vdiList = vhdutil.getParentChain(self.lvname, 

2129 lvhdutil.extractUuid, self.sr.vgname) 

2130 for uuid, lvName in vdiList.items(): 2130 ↛ 2131line 2130 didn't jump to line 2131, because the loop on line 2130 never started

2131 binaryParam = binary 

2132 if uuid != self.uuid: 

2133 binaryParam = False # binary param only applies to leaf nodes 

2134 if active: 

2135 self.sr.lvActivator.activate(uuid, lvName, binaryParam, 

2136 persistent) 

2137 else: 

2138 # just add the LVs for deactivation in the final (cleanup) 

2139 # step. The LVs must not have been activated during the current 

2140 # operation 

2141 self.sr.lvActivator.add(uuid, lvName, binaryParam) 

2142 

2143 def _failClone(self, uuid, jval, msg): 

2144 try: 

2145 self.sr._handleInterruptedCloneOp(uuid, jval, True) 

2146 self.sr.journaler.remove(self.JRN_CLONE, uuid) 

2147 except Exception as e: 

2148 util.SMlog('WARNING: failed to clean up failed snapshot: ' \ 

2149 ' %s (error ignored)' % e) 

2150 raise xs_errors.XenError('VDIClone', opterr=msg) 

2151 

2152 def _markHidden(self): 

2153 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

2154 self.sr.lvmCache.setHidden(self.lvname) 

2155 else: 

2156 vhdutil.setHidden(self.path) 

2157 self.hidden = 1 

2158 

2159 def _prepareThin(self, attach): 

2160 origUtilisation = self.sr.lvmCache.getSize(self.lvname) 

2161 if self.sr.isMaster: 

2162 # the master can prepare the VDI locally 

2163 if attach: 

2164 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid) 

2165 else: 

2166 lvhdutil.detachThin(self.session, self.sr.lvmCache, 

2167 self.sr.uuid, self.uuid) 

2168 else: 

2169 fn = "attach" 

2170 if not attach: 

2171 fn = "detach" 

2172 pools = self.session.xenapi.pool.get_all() 

2173 master = self.session.xenapi.pool.get_master(pools[0]) 

2174 rv = self.session.xenapi.host.call_plugin( 

2175 master, self.sr.THIN_PLUGIN, fn, 

2176 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid}) 

2177 util.SMlog("call-plugin returned: %s" % rv) 

2178 if not rv: 

2179 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) 

2180 # refresh to pick up the size change on this slave 

2181 self.sr.lvmCache.activateNoRefcount(self.lvname, True) 

2182 

2183 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

2184 if origUtilisation != self.utilisation: 

2185 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2186 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

2187 str(self.utilisation)) 

2188 stats = lvutil._getVGstats(self.sr.vgname) 

2189 sr_utilisation = stats['physical_utilisation'] 

2190 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, 

2191 str(sr_utilisation)) 

2192 

2193 @override 

2194 def update(self, sr_uuid, vdi_uuid) -> None: 

2195 if self.sr.legacyMode: 

2196 return 

2197 

2198 #Synch the name_label of this VDI on storage with the name_label in XAPI 

2199 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

2200 update_map = {} 

2201 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

2202 METADATA_OBJECT_TYPE_VDI 

2203 update_map[UUID_TAG] = self.uuid 

2204 update_map[NAME_LABEL_TAG] = util.to_plain_string( \ 

2205 self.session.xenapi.VDI.get_name_label(vdi_ref)) 

2206 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \ 

2207 self.session.xenapi.VDI.get_name_description(vdi_ref)) 

2208 update_map[SNAPSHOT_TIME_TAG] = \ 

2209 self.session.xenapi.VDI.get_snapshot_time(vdi_ref) 

2210 update_map[METADATA_OF_POOL_TAG] = \ 

2211 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) 

2212 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map) 

2213 

2214 @override 

2215 def _ensure_cbt_space(self) -> None: 

2216 self.sr.ensureCBTSpace() 

2217 

2218 @override 

2219 def _create_cbt_log(self) -> str: 

2220 logname = self._get_cbt_logname(self.uuid) 

2221 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) 

2222 logpath = super(LVHDVDI, self)._create_cbt_log() 

2223 self.sr.lvmCache.deactivateNoRefcount(logname) 

2224 return logpath 

2225 

2226 @override 

2227 def _delete_cbt_log(self) -> None: 

2228 logpath = self._get_cbt_logpath(self.uuid) 

2229 if self._cbt_log_exists(logpath): 

2230 logname = self._get_cbt_logname(self.uuid) 

2231 self.sr.lvmCache.remove(logname) 

2232 

2233 @override 

2234 def _rename(self, oldpath, newpath) -> None: 

2235 oldname = os.path.basename(oldpath) 

2236 newname = os.path.basename(newpath) 

2237 self.sr.lvmCache.rename(oldname, newname) 

2238 

2239 @override 

2240 def _activate_cbt_log(self, lv_name) -> bool: 

2241 self.sr.lvmCache.refresh() 

2242 if not self.sr.lvmCache.is_active(lv_name): 2242 ↛ 2243line 2242 didn't jump to line 2243, because the condition on line 2242 was never true

2243 try: 

2244 self.sr.lvmCache.activateNoRefcount(lv_name) 

2245 return True 

2246 except Exception as e: 

2247 util.SMlog("Exception in _activate_cbt_log, " 

2248 "Error: %s." % str(e)) 

2249 raise 

2250 else: 

2251 return False 

2252 

2253 @override 

2254 def _deactivate_cbt_log(self, lv_name) -> None: 

2255 try: 

2256 self.sr.lvmCache.deactivateNoRefcount(lv_name) 

2257 except Exception as e: 

2258 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e)) 

2259 raise 

2260 

2261 @override 

2262 def _cbt_log_exists(self, logpath) -> bool: 

2263 return lvutil.exists(logpath) 

2264 

2265if __name__ == '__main__': 2265 ↛ 2266line 2265 didn't jump to line 2266, because the condition on line 2265 was never true

2266 SRCommand.run(LVHDSR, DRIVER_INFO) 

2267else: 

2268 SR.registerSR(LVHDSR)