Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# LVHDSR: VHD on LVM storage repository 

19# 

20 

21from sm_typing import Dict, List, override 

22 

23import SR 

24from SR import deviceCheck 

25import VDI 

26import SRCommand 

27import util 

28import lvutil 

29import lvmcache 

30import vhdutil 

31import lvhdutil 

32import scsiutil 

33import os 

34import sys 

35import time 

36import errno 

37import xs_errors 

38import cleanup 

39import blktap2 

40from journaler import Journaler 

41from lock import Lock 

42from refcounter import RefCounter 

43from ipc import IPCFlag 

44from lvmanager import LVActivator 

45import XenAPI # pylint: disable=import-error 

46import re 

47from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \ 

48 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ 

49 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ 

50 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ 

51 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG 

52from metadata import retrieveXMLfromFile, _parseXML 

53from xmlrpc.client import DateTime 

54import glob 

55from constants import CBTLOG_TAG 

56from fairlock import Fairlock 

57DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX) 

58 

59geneology: Dict[str, List[str]] = {} 

60CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM", 

61 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", 

62 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", 

63 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", 

64 "VDI_ACTIVATE", "VDI_DEACTIVATE"] 

65 

66CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']] 

67 

68DRIVER_INFO = { 

69 'name': 'Local VHD on LVM', 

70 'description': 'SR plugin which represents disks as VHD disks on ' + \ 

71 'Logical Volumes within a locally-attached Volume Group', 

72 'vendor': 'XenSource Inc', 

73 'copyright': '(C) 2008 XenSource Inc', 

74 'driver_version': '1.0', 

75 'required_api_version': '1.0', 

76 'capabilities': CAPABILITIES, 

77 'configuration': CONFIGURATION 

78 } 

79 

80PARAM_VHD = "vhd" 

81PARAM_RAW = "raw" 

82 

83OPS_EXCLUSIVE = [ 

84 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", 

85 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", 

86 "vdi_clone"] 

87 

88# Log if snapshot pauses VM for more than this many seconds 

89LONG_SNAPTIME = 60 

90 

91class LVHDSR(SR.SR): 

92 DRIVER_TYPE = 'lvhd' 

93 

94 PROVISIONING_TYPES = ["thin", "thick"] 

95 PROVISIONING_DEFAULT = "thick" 

96 THIN_PLUGIN = "lvhd-thin" 

97 

98 PLUGIN_ON_SLAVE = "on-slave" 

99 

100 FLAG_USE_VHD = "use_vhd" 

101 MDVOLUME_NAME = "MGT" 

102 

103 ALLOCATION_QUANTUM = "allocation_quantum" 

104 INITIAL_ALLOCATION = "initial_allocation" 

105 

106 LOCK_RETRY_INTERVAL = 3 

107 LOCK_RETRY_ATTEMPTS = 10 

108 

109 TEST_MODE_KEY = "testmode" 

110 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin" 

111 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator" 

112 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end" 

113 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin" 

114 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data" 

115 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata" 

116 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end" 

117 

118 ENV_VAR_VHD_TEST = { 

119 TEST_MODE_VHD_FAIL_REPARENT_BEGIN: 

120 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", 

121 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: 

122 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", 

123 TEST_MODE_VHD_FAIL_REPARENT_END: 

124 "VHD_UTIL_TEST_FAIL_REPARENT_END", 

125 TEST_MODE_VHD_FAIL_RESIZE_BEGIN: 

126 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", 

127 TEST_MODE_VHD_FAIL_RESIZE_DATA: 

128 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", 

129 TEST_MODE_VHD_FAIL_RESIZE_METADATA: 

130 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", 

131 TEST_MODE_VHD_FAIL_RESIZE_END: 

132 "VHD_UTIL_TEST_FAIL_RESIZE_END" 

133 } 

134 testMode = "" 

135 

136 legacyMode = True 

137 

138 @override 

139 @staticmethod 

140 def handles(type) -> bool: 

141 """Returns True if this SR class understands the given dconf string""" 

142 # we can pose as LVMSR or EXTSR for compatibility purposes 

143 if __name__ == '__main__': 

144 name = sys.argv[0] 

145 else: 

146 name = __name__ 

147 if name.endswith("LVMSR"): 

148 return type == "lvm" 

149 elif name.endswith("EXTSR"): 

150 return type == "ext" 

151 return type == LVHDSR.DRIVER_TYPE 

152 

153 @override 

154 def load(self, sr_uuid) -> None: 

155 self.ops_exclusive = OPS_EXCLUSIVE 

156 

157 self.isMaster = False 

158 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

159 self.isMaster = True 

160 

161 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

162 self.sr_vditype = SR.DEFAULT_TAP 

163 self.uuid = sr_uuid 

164 self.vgname = lvhdutil.VG_PREFIX + self.uuid 

165 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname) 

166 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME) 

167 self.provision = self.PROVISIONING_DEFAULT 

168 

169 has_sr_ref = self.srcmd.params.get("sr_ref") 

170 if has_sr_ref: 

171 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) 

172 else: 

173 self.other_conf = None 

174 

175 self.lvm_conf = None 

176 if self.other_conf: 

177 self.lvm_conf = self.other_conf.get('lvm-conf') 

178 

179 try: 

180 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf) 

181 except: 

182 raise xs_errors.XenError('SRUnavailable', \ 

183 opterr='Failed to initialise the LVMCache') 

184 self.lvActivator = LVActivator(self.uuid, self.lvmCache) 

185 self.journaler = Journaler(self.lvmCache) 

186 if not has_sr_ref: 

187 return # must be a probe call 

188 # Test for thick vs thin provisioning conf parameter 

189 if 'allocation' in self.dconf: 189 ↛ 190line 189 didn't jump to line 190, because the condition on line 189 was never true

190 if self.dconf['allocation'] in self.PROVISIONING_TYPES: 

191 self.provision = self.dconf['allocation'] 

192 else: 

193 raise xs_errors.XenError('InvalidArg', \ 

194 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES) 

195 

196 if self.other_conf.get(self.TEST_MODE_KEY): 196 ↛ 200line 196 didn't jump to line 200, because the condition on line 196 was never false

197 self.testMode = self.other_conf[self.TEST_MODE_KEY] 

198 self._prepareTestMode() 

199 

200 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

201 # sm_config flag overrides PBD, if any 

202 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: 

203 self.provision = self.sm_config.get('allocation') 

204 

205 if self.sm_config.get(self.FLAG_USE_VHD) == "true": 

206 self.legacyMode = False 

207 

208 if lvutil._checkVG(self.vgname): 

209 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 209 ↛ 212line 209 didn't jump to line 212, because the condition on line 209 was never false

210 "vdi_activate", "vdi_deactivate"]: 

211 self._undoAllJournals() 

212 if not self.cmd in ["sr_attach", "sr_probe"]: 

213 self._checkMetadataVolume() 

214 

215 self.mdexists = False 

216 

217 # get a VDI -> TYPE map from the storage 

218 contains_uuid_regex = \ 

219 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") 

220 self.storageVDIs = {} 

221 

222 for key in self.lvmCache.lvs.keys(): 222 ↛ 224line 222 didn't jump to line 224, because the loop on line 222 never started

223 # if the lvname has a uuid in it 

224 type = None 

225 vdi = None 

226 if contains_uuid_regex.search(key) is not None: 

227 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]): 

228 type = vhdutil.VDI_TYPE_VHD 

229 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

230 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]): 

231 type = vhdutil.VDI_TYPE_RAW 

232 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

233 else: 

234 continue 

235 

236 if type is not None: 

237 self.storageVDIs[vdi] = type 

238 

239 # check if metadata volume exists 

240 try: 

241 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

242 except: 

243 pass 

244 

245 @override 

246 def cleanup(self) -> None: 

247 # we don't need to hold the lock to dec refcounts of activated LVs 

248 if not self.lvActivator.deactivateAll(): 248 ↛ 249line 248 didn't jump to line 249, because the condition on line 248 was never true

249 raise util.SMException("failed to deactivate LVs") 

250 

251 def updateSRMetadata(self, allocation): 

252 try: 

253 # Add SR specific SR metadata 

254 sr_info = \ 

255 {ALLOCATION_TAG: allocation, 

256 UUID_TAG: self.uuid, 

257 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)), 

258 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref)) 

259 } 

260 

261 vdi_info = {} 

262 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): 

263 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi) 

264 

265 # Create the VDI entry in the SR metadata 

266 vdi_info[vdi_uuid] = \ 

267 { 

268 UUID_TAG: vdi_uuid, 

269 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)), 

270 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)), 

271 IS_A_SNAPSHOT_TAG: \ 

272 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), 

273 SNAPSHOT_OF_TAG: \ 

274 self.session.xenapi.VDI.get_snapshot_of(vdi), 

275 SNAPSHOT_TIME_TAG: \ 

276 self.session.xenapi.VDI.get_snapshot_time(vdi), 

277 TYPE_TAG: \ 

278 self.session.xenapi.VDI.get_type(vdi), 

279 VDI_TYPE_TAG: \ 

280 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'], 

281 READ_ONLY_TAG: \ 

282 int(self.session.xenapi.VDI.get_read_only(vdi)), 

283 METADATA_OF_POOL_TAG: \ 

284 self.session.xenapi.VDI.get_metadata_of_pool(vdi), 

285 MANAGED_TAG: \ 

286 int(self.session.xenapi.VDI.get_managed(vdi)) 

287 } 

288 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info) 

289 

290 except Exception as e: 

291 raise xs_errors.XenError('MetadataError', \ 

292 opterr='Error upgrading SR Metadata: %s' % str(e)) 

293 

294 def syncMetadataAndStorage(self): 

295 try: 

296 # if a VDI is present in the metadata but not in the storage 

297 # then delete it from the metadata 

298 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

299 for vdi in list(vdi_info.keys()): 

300 update_map = {} 

301 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 301 ↛ 308line 301 didn't jump to line 308, because the condition on line 301 was never false

302 # delete this from metadata 

303 LVMMetadataHandler(self.mdpath). \ 

304 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) 

305 else: 

306 # search for this in the metadata, compare types 

307 # self.storageVDIs is a map of vdi_uuid to vdi_type 

308 if vdi_info[vdi][VDI_TYPE_TAG] != \ 

309 self.storageVDIs[vdi_info[vdi][UUID_TAG]]: 

310 # storage type takes authority 

311 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ 

312 = METADATA_OBJECT_TYPE_VDI 

313 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] 

314 update_map[VDI_TYPE_TAG] = \ 

315 self.storageVDIs[vdi_info[vdi][UUID_TAG]] 

316 LVMMetadataHandler(self.mdpath) \ 

317 .updateMetadata(update_map) 

318 else: 

319 # This should never happen 

320 pass 

321 

322 except Exception as e: 

323 raise xs_errors.XenError('MetadataError', \ 

324 opterr='Error synching SR Metadata and storage: %s' % str(e)) 

325 

326 def syncMetadataAndXapi(self): 

327 try: 

328 # get metadata 

329 (sr_info, vdi_info) = \ 

330 LVMMetadataHandler(self.mdpath, False).getMetadata() 

331 

332 # First synch SR parameters 

333 self.update(self.uuid) 

334 

335 # Now update the VDI information in the metadata if required 

336 for vdi_offset in vdi_info.keys(): 

337 try: 

338 vdi_ref = \ 

339 self.session.xenapi.VDI.get_by_uuid( \ 

340 vdi_info[vdi_offset][UUID_TAG]) 

341 except: 

342 # may be the VDI is not in XAPI yet dont bother 

343 continue 

344 

345 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref)) 

346 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref)) 

347 

348 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ 

349 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ 

350 new_name_description: 

351 update_map = {} 

352 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

353 METADATA_OBJECT_TYPE_VDI 

354 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] 

355 update_map[NAME_LABEL_TAG] = new_name_label 

356 update_map[NAME_DESCRIPTION_TAG] = new_name_description 

357 LVMMetadataHandler(self.mdpath) \ 

358 .updateMetadata(update_map) 

359 except Exception as e: 

360 raise xs_errors.XenError('MetadataError', \ 

361 opterr='Error synching SR Metadata and XAPI: %s' % str(e)) 

362 

363 def _checkMetadataVolume(self): 

364 util.SMlog("Entering _checkMetadataVolume") 

365 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

366 if self.isMaster: 366 ↛ 382line 366 didn't jump to line 382, because the condition on line 366 was never false

367 if self.mdexists and self.cmd == "sr_attach": 

368 try: 

369 # activate the management volume 

370 # will be deactivated at detach time 

371 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

372 self._synchSmConfigWithMetaData() 

373 util.SMlog("Sync SR metadata and the state on the storage.") 

374 self.syncMetadataAndStorage() 

375 self.syncMetadataAndXapi() 

376 except Exception as e: 

377 util.SMlog("Exception in _checkMetadataVolume, " \ 

378 "Error: %s." % str(e)) 

379 elif not self.mdexists and not self.legacyMode: 379 ↛ 382line 379 didn't jump to line 382, because the condition on line 379 was never false

380 self._introduceMetaDataVolume() 

381 

382 if self.mdexists: 

383 self.legacyMode = False 

384 

385 def _synchSmConfigWithMetaData(self): 

386 util.SMlog("Synching sm-config with metadata volume") 

387 

388 try: 

389 # get SR info from metadata 

390 sr_info = {} 

391 map = {} 

392 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0] 

393 

394 if sr_info == {}: 394 ↛ 395line 394 didn't jump to line 395, because the condition on line 394 was never true

395 raise Exception("Failed to get SR information from metadata.") 

396 

397 if "allocation" in sr_info: 397 ↛ 401line 397 didn't jump to line 401, because the condition on line 397 was never false

398 self.provision = sr_info.get("allocation") 

399 map['allocation'] = sr_info.get("allocation") 

400 else: 

401 raise Exception("Allocation key not found in SR metadata. " 

402 "SR info found: %s" % sr_info) 

403 

404 except Exception as e: 

405 raise xs_errors.XenError( 

406 'MetadataError', 

407 opterr='Error reading SR params from ' 

408 'metadata Volume: %s' % str(e)) 

409 try: 

410 map[self.FLAG_USE_VHD] = 'true' 

411 self.session.xenapi.SR.set_sm_config(self.sr_ref, map) 

412 except: 

413 raise xs_errors.XenError( 

414 'MetadataError', 

415 opterr='Error updating sm_config key') 

416 

417 def _introduceMetaDataVolume(self): 

418 util.SMlog("Creating Metadata volume") 

419 try: 

420 config = {} 

421 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024) 

422 

423 # activate the management volume, will be deactivated at detach time 

424 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

425 

426 name_label = util.to_plain_string( \ 

427 self.session.xenapi.SR.get_name_label(self.sr_ref)) 

428 name_description = util.to_plain_string( \ 

429 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

430 config[self.FLAG_USE_VHD] = "true" 

431 config['allocation'] = self.provision 

432 self.session.xenapi.SR.set_sm_config(self.sr_ref, config) 

433 

434 # Add the SR metadata 

435 self.updateSRMetadata(self.provision) 

436 except Exception as e: 

437 raise xs_errors.XenError('MetadataError', \ 

438 opterr='Error introducing Metadata Volume: %s' % str(e)) 

439 

440 def _removeMetadataVolume(self): 

441 if self.mdexists: 

442 try: 

443 self.lvmCache.remove(self.MDVOLUME_NAME) 

444 except: 

445 raise xs_errors.XenError('MetadataError', \ 

446 opterr='Failed to delete MGT Volume') 

447 

448 def _refresh_size(self): 

449 """ 

450 Refreshs the size of the backing device. 

451 Return true if all paths/devices agree on the same size. 

452 """ 

453 if hasattr(self, 'SCSIid'): 453 ↛ 455line 453 didn't jump to line 455, because the condition on line 453 was never true

454 # LVHDoHBASR, LVHDoISCSISR 

455 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) 

456 else: 

457 # LVHDSR 

458 devices = self.dconf['device'].split(',') 

459 scsiutil.refreshdev(devices) 

460 return True 

461 

462 def _expand_size(self): 

463 """ 

464 Expands the size of the SR by growing into additional availiable 

465 space, if extra space is availiable on the backing device. 

466 Needs to be called after a successful call of _refresh_size. 

467 """ 

468 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] 

469 # We are comparing PV- with VG-sizes that are aligned. Need a threshold 

470 resizethreshold = 100 * 1024 * 1024 # 100MB 

471 devices = self.dconf['device'].split(',') 

472 totaldevicesize = 0 

473 for device in devices: 

474 totaldevicesize = totaldevicesize + scsiutil.getsize(device) 

475 if totaldevicesize >= (currentvgsize + resizethreshold): 

476 try: 

477 if hasattr(self, 'SCSIid'): 477 ↛ 479line 477 didn't jump to line 479, because the condition on line 477 was never true

478 # LVHDoHBASR, LVHDoISCSISR might have slaves 

479 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, 

480 getattr(self, 'SCSIid')) 

481 util.SMlog("LVHDSR._expand_size for %s will resize the pv." % 

482 self.uuid) 

483 for pv in lvutil.get_pv_for_vg(self.vgname): 

484 lvutil.resizePV(pv) 

485 except: 

486 util.logException("LVHDSR._expand_size for %s failed to resize" 

487 " the PV" % self.uuid) 

488 

489 @override 

490 @deviceCheck 

491 def create(self, uuid, size) -> None: 

492 util.SMlog("LVHDSR.create for %s" % self.uuid) 

493 if not self.isMaster: 

494 util.SMlog('sr_create blocked for non-master') 

495 raise xs_errors.XenError('LVMMaster') 

496 

497 if lvutil._checkVG(self.vgname): 

498 raise xs_errors.XenError('SRExists') 

499 

500 # Check none of the devices already in use by other PBDs 

501 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']): 

502 raise xs_errors.XenError('SRInUse') 

503 

504 # Check serial number entry in SR records 

505 for dev in self.dconf['device'].split(','): 

506 if util.test_scsiserial(self.session, dev): 

507 raise xs_errors.XenError('SRInUse') 

508 

509 lvutil.createVG(self.dconf['device'], self.vgname) 

510 

511 #Update serial number string 

512 scsiutil.add_serial_record(self.session, self.sr_ref, \ 

513 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

514 

515 # since this is an SR.create turn off legacy mode 

516 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ 

517 self.FLAG_USE_VHD, 'true') 

518 

519 @override 

520 def delete(self, uuid) -> None: 

521 util.SMlog("LVHDSR.delete for %s" % self.uuid) 

522 if not self.isMaster: 

523 raise xs_errors.XenError('LVMMaster') 

524 cleanup.gc_force(self.session, self.uuid) 

525 

526 success = True 

527 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

528 if util.extractSRFromDevMapper(fileName) != self.uuid: 

529 continue 

530 

531 if util.doesFileHaveOpenHandles(fileName): 

532 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \ 

533 "handles" % fileName) 

534 success = False 

535 continue 

536 

537 # Now attempt to remove the dev mapper entry 

538 if not lvutil.removeDevMapperEntry(fileName, False): 

539 success = False 

540 continue 

541 

542 try: 

543 lvname = os.path.basename(fileName.replace('-', '/'). \ 

544 replace('//', '-')) 

545 lpath = os.path.join(self.path, lvname) 

546 os.unlink(lpath) 

547 except OSError as e: 

548 if e.errno != errno.ENOENT: 

549 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \ 

550 "file %s. Error: %s" % (fileName, str(e))) 

551 success = False 

552 

553 if success: 

554 try: 

555 if util.pathexists(self.path): 

556 os.rmdir(self.path) 

557 except Exception as e: 

558 util.SMlog("LVHDSR.delete: failed to remove the symlink " \ 

559 "directory %s. Error: %s" % (self.path, str(e))) 

560 success = False 

561 

562 self._removeMetadataVolume() 

563 self.lvmCache.refresh() 

564 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0: 

565 raise xs_errors.XenError('SRNotEmpty') 

566 

567 if not success: 

568 raise Exception("LVHDSR delete failed, please refer to the log " \ 

569 "for details.") 

570 

571 lvutil.removeVG(self.dconf['device'], self.vgname) 

572 self._cleanup() 

573 

574 @override 

575 def attach(self, uuid) -> None: 

576 util.SMlog("LVHDSR.attach for %s" % self.uuid) 

577 

578 self._cleanup(True) # in case of host crashes, if detach wasn't called 

579 

580 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 580 ↛ 581line 580 didn't jump to line 581, because the condition on line 580 was never true

581 raise xs_errors.XenError('SRUnavailable', \ 

582 opterr='no such volume group: %s' % self.vgname) 

583 

584 # Refresh the metadata status 

585 self._checkMetadataVolume() 

586 

587 refreshsizeok = self._refresh_size() 

588 

589 if self.isMaster: 589 ↛ 600line 589 didn't jump to line 600, because the condition on line 589 was never false

590 if refreshsizeok: 590 ↛ 594line 590 didn't jump to line 594, because the condition on line 590 was never false

591 self._expand_size() 

592 

593 # Update SCSIid string 

594 util.SMlog("Calling devlist_to_serial") 

595 scsiutil.add_serial_record( 

596 self.session, self.sr_ref, 

597 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

598 

599 # Test Legacy Mode Flag and update if VHD volumes exist 

600 if self.isMaster and self.legacyMode: 600 ↛ 601line 600 didn't jump to line 601, because the condition on line 600 was never true

601 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

602 for uuid, info in vdiInfo.items(): 

603 if info.vdiType == vhdutil.VDI_TYPE_VHD: 

604 self.legacyMode = False 

605 map = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

606 self._introduceMetaDataVolume() 

607 break 

608 

609 # Set the block scheduler 

610 for dev in self.dconf['device'].split(','): 

611 self.block_setscheduler(dev) 

612 

613 @override 

614 def detach(self, uuid) -> None: 

615 util.SMlog("LVHDSR.detach for %s" % self.uuid) 

616 cleanup.abort(self.uuid) 

617 

618 # Do a best effort cleanup of the dev mapper entries 

619 # go through all devmapper entries for this VG 

620 success = True 

621 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

622 if util.extractSRFromDevMapper(fileName) != self.uuid: 622 ↛ 623line 622 didn't jump to line 623, because the condition on line 622 was never true

623 continue 

624 

625 with Fairlock('devicemapper'): 

626 # check if any file has open handles 

627 if util.doesFileHaveOpenHandles(fileName): 

628 # if yes, log this and signal failure 

629 util.SMlog( 

630 f"LVHDSR.detach: The dev mapper entry {fileName} has " 

631 "open handles") 

632 success = False 

633 continue 

634 

635 # Now attempt to remove the dev mapper entry 

636 if not lvutil.removeDevMapperEntry(fileName, False): 636 ↛ 637line 636 didn't jump to line 637, because the condition on line 636 was never true

637 success = False 

638 continue 

639 

640 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/* 

641 try: 

642 lvname = os.path.basename(fileName.replace('-', '/'). \ 

643 replace('//', '-')) 

644 lvname = os.path.join(self.path, lvname) 

645 util.force_unlink(lvname) 

646 except Exception as e: 

647 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \ 

648 "file %s. Error: %s" % (fileName, str(e))) 

649 success = False 

650 

651 # now remove the directory where the symlinks are 

652 # this should pass as the directory should be empty by now 

653 if success: 

654 try: 

655 if util.pathexists(self.path): 655 ↛ 656line 655 didn't jump to line 656, because the condition on line 655 was never true

656 os.rmdir(self.path) 

657 except Exception as e: 

658 util.SMlog("LVHDSR.detach: failed to remove the symlink " \ 

659 "directory %s. Error: %s" % (self.path, str(e))) 

660 success = False 

661 

662 if not success: 

663 raise Exception("SR detach failed, please refer to the log " \ 

664 "for details.") 

665 

666 # Don't delete lock files on the master as it will break the locking 

667 # between SM and any GC thread that survives through SR.detach. 

668 # However, we should still delete lock files on slaves as it is the 

669 # only place to do so. 

670 self._cleanup(self.isMaster) 

671 

672 @override 

673 def forget_vdi(self, uuid) -> None: 

674 if not self.legacyMode: 

675 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) 

676 super(LVHDSR, self).forget_vdi(uuid) 

677 

678 @override 

679 def scan(self, uuid) -> None: 

680 activated_lvs = set() 

681 try: 

682 util.SMlog("LVHDSR.scan for %s" % self.uuid) 

683 if not self.isMaster: 683 ↛ 684line 683 didn't jump to line 684, because the condition on line 683 was never true

684 util.SMlog('sr_scan blocked for non-master') 

685 raise xs_errors.XenError('LVMMaster') 

686 

687 if self._refresh_size(): 687 ↛ 689line 687 didn't jump to line 689, because the condition on line 687 was never false

688 self._expand_size() 

689 self.lvmCache.refresh() 

690 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) 

691 self._loadvdis() 

692 stats = lvutil._getVGstats(self.vgname) 

693 self.physical_size = stats['physical_size'] 

694 self.physical_utilisation = stats['physical_utilisation'] 

695 

696 # Now check if there are any VDIs in the metadata, which are not in 

697 # XAPI 

698 if self.mdexists: 698 ↛ 809line 698 didn't jump to line 809, because the condition on line 698 was never false

699 vdiToSnaps: Dict[str, List[str]] = {} 

700 # get VDIs from XAPI 

701 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) 

702 vdi_uuids = set([]) 

703 for vdi in vdis: 

704 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi)) 

705 

706 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

707 

708 for vdi in list(info.keys()): 

709 vdi_uuid = info[vdi][UUID_TAG] 

710 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 710 ↛ 711line 710 didn't jump to line 711, because the condition on line 710 was never true

711 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps: 

712 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) 

713 else: 

714 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid] 

715 

716 if vdi_uuid not in vdi_uuids: 

717 util.SMlog("Introduce VDI %s as it is present in " \ 

718 "metadata and not in XAPI." % vdi_uuid) 

719 sm_config = {} 

720 sm_config['vdi_type'] = info[vdi][VDI_TYPE_TAG] 

721 lvname = "%s%s" % \ 

722 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid) 

723 self.lvActivator.activate( 

724 vdi_uuid, lvname, LVActivator.NORMAL) 

725 activated_lvs.add(vdi_uuid) 

726 lvPath = os.path.join(self.path, lvname) 

727 

728 if info[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW: 728 ↛ 729line 728 didn't jump to line 729, because the condition on line 728 was never true

729 size = self.lvmCache.getSize( \ 

730 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \ 

731 vdi_uuid) 

732 utilisation = \ 

733 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

734 int(size)) 

735 else: 

736 parent = \ 

737 vhdutil._getVHDParentNoCheck(lvPath) 

738 

739 if parent is not None: 739 ↛ 740line 739 didn't jump to line 740, because the condition on line 739 was never true

740 sm_config['vhd-parent'] = parent[len( \ 

741 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):] 

742 size = vhdutil.getSizeVirt(lvPath) 

743 if self.provision == "thin": 743 ↛ 744line 743 didn't jump to line 744

744 utilisation = \ 

745 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

746 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

747 else: 

748 utilisation = lvhdutil.calcSizeVHDLV(int(size)) 

749 

750 vdi_ref = self.session.xenapi.VDI.db_introduce( 

751 vdi_uuid, 

752 info[vdi][NAME_LABEL_TAG], 

753 info[vdi][NAME_DESCRIPTION_TAG], 

754 self.sr_ref, 

755 info[vdi][TYPE_TAG], 

756 False, 

757 bool(int(info[vdi][READ_ONLY_TAG])), 

758 {}, 

759 vdi_uuid, 

760 {}, 

761 sm_config) 

762 

763 self.session.xenapi.VDI.set_managed(vdi_ref, 

764 bool(int(info[vdi][MANAGED_TAG]))) 

765 self.session.xenapi.VDI.set_virtual_size(vdi_ref, 

766 str(size)) 

767 self.session.xenapi.VDI.set_physical_utilisation( \ 

768 vdi_ref, str(utilisation)) 

769 self.session.xenapi.VDI.set_is_a_snapshot( \ 

770 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG]))) 

771 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 771 ↛ 772line 771 didn't jump to line 772, because the condition on line 771 was never true

772 self.session.xenapi.VDI.set_snapshot_time( \ 

773 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG])) 

774 if info[vdi][TYPE_TAG] == 'metadata': 774 ↛ 775line 774 didn't jump to line 775, because the condition on line 774 was never true

775 self.session.xenapi.VDI.set_metadata_of_pool( \ 

776 vdi_ref, info[vdi][METADATA_OF_POOL_TAG]) 

777 

778 # Update CBT status of disks either just added 

779 # or already in XAPI 

780 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) 

781 if cbt_logname in cbt_vdis: 781 ↛ 782line 781 didn't jump to line 782, because the condition on line 781 was never true

782 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

783 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

784 # For existing VDIs, update local state too 

785 # Scan in base class SR updates existing VDIs 

786 # again based on local states 

787 if vdi_uuid in self.vdis: 

788 self.vdis[vdi_uuid].cbt_enabled = True 

789 cbt_vdis.remove(cbt_logname) 

790 

791 # Now set the snapshot statuses correctly in XAPI 

792 for srcvdi in vdiToSnaps.keys(): 792 ↛ 793line 792 didn't jump to line 793, because the loop on line 792 never started

793 try: 

794 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) 

795 except: 

796 # the source VDI no longer exists, continue 

797 continue 

798 

799 for snapvdi in vdiToSnaps[srcvdi]: 

800 try: 

801 # this might fail in cases where its already set 

802 snapref = \ 

803 self.session.xenapi.VDI.get_by_uuid(snapvdi) 

804 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) 

805 except Exception as e: 

806 util.SMlog("Setting snapshot failed. " \ 

807 "Error: %s" % str(e)) 

808 

809 if cbt_vdis: 809 ↛ 820line 809 didn't jump to line 820, because the condition on line 809 was never false

810 # If we have items remaining in this list, 

811 # they are cbt_metadata VDI that XAPI doesn't know about 

812 # Add them to self.vdis and they'll get added to the DB 

813 for cbt_vdi in cbt_vdis: 813 ↛ 814line 813 didn't jump to line 814, because the loop on line 813 never started

814 cbt_uuid = cbt_vdi.split(".")[0] 

815 new_vdi = self.vdi(cbt_uuid) 

816 new_vdi.ty = "cbt_metadata" 

817 new_vdi.cbt_enabled = True 

818 self.vdis[cbt_uuid] = new_vdi 

819 

820 super(LVHDSR, self).scan(uuid) 

821 self._kickGC() 

822 

823 finally: 

824 for vdi in activated_lvs: 

825 self.lvActivator.deactivate( 

826 vdi, LVActivator.NORMAL, False) 

827 

828 @override 

829 def update(self, uuid) -> None: 

830 if not lvutil._checkVG(self.vgname): 830 ↛ 831line 830 didn't jump to line 831, because the condition on line 830 was never true

831 return 

832 self._updateStats(uuid, 0) 

833 

834 if self.legacyMode: 834 ↛ 835line 834 didn't jump to line 835, because the condition on line 834 was never true

835 return 

836 

837 # synch name_label in metadata with XAPI 

838 update_map = {} 

839 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ 

840 METADATA_OBJECT_TYPE_SR, 

841 NAME_LABEL_TAG: util.to_plain_string( \ 

842 self.session.xenapi.SR.get_name_label(self.sr_ref)), 

843 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

844 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

845 } 

846 LVMMetadataHandler(self.mdpath).updateMetadata(update_map) 

847 

848 def _updateStats(self, uuid, virtAllocDelta): 

849 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

850 self.virtual_allocation = valloc + virtAllocDelta 

851 util.SMlog("Setting virtual_allocation of SR %s to %d" % 

852 (uuid, self.virtual_allocation)) 

853 stats = lvutil._getVGstats(self.vgname) 

854 self.physical_size = stats['physical_size'] 

855 self.physical_utilisation = stats['physical_utilisation'] 

856 self._db_update() 

857 

858 @override 

859 @deviceCheck 

860 def probe(self) -> str: 

861 return lvutil.srlist_toxml( 

862 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']), 

863 lvhdutil.VG_PREFIX, 

864 ('metadata' in self.srcmd.params['sr_sm_config'] and \ 

865 self.srcmd.params['sr_sm_config']['metadata'] == 'true')) 

866 

867 @override 

868 def vdi(self, uuid) -> VDI.VDI: 

869 return LVHDVDI(self, uuid) 

870 

871 def _loadvdis(self): 

872 self.virtual_allocation = 0 

873 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

874 self.allVDIs = {} 

875 

876 for uuid, info in self.vdiInfo.items(): 

877 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 877 ↛ 878line 877 didn't jump to line 878, because the condition on line 877 was never true

878 continue 

879 if info.scanError: 879 ↛ 880line 879 didn't jump to line 880, because the condition on line 879 was never true

880 raise xs_errors.XenError('VDIUnavailable', \ 

881 opterr='Error scanning VDI %s' % uuid) 

882 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid) 

883 if not self.vdis[uuid].hidden: 883 ↛ 876line 883 didn't jump to line 876, because the condition on line 883 was never false

884 self.virtual_allocation += self.vdis[uuid].utilisation 

885 

886 for uuid, vdi in self.vdis.items(): 

887 if vdi.parent: 887 ↛ 888line 887 didn't jump to line 888, because the condition on line 887 was never true

888 if vdi.parent in self.vdis: 

889 self.vdis[vdi.parent].read_only = True 

890 if vdi.parent in geneology: 

891 geneology[vdi.parent].append(uuid) 

892 else: 

893 geneology[vdi.parent] = [uuid] 

894 

895 # Now remove all hidden leaf nodes to avoid introducing records that 

896 # will be GC'ed 

897 for uuid in list(self.vdis.keys()): 

898 if uuid not in geneology and self.vdis[uuid].hidden: 898 ↛ 899line 898 didn't jump to line 899, because the condition on line 898 was never true

899 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

900 del self.vdis[uuid] 

901 

902 def _ensureSpaceAvailable(self, amount_needed): 

903 space_available = lvutil._getVGstats(self.vgname)['freespace'] 

904 if (space_available < amount_needed): 

905 util.SMlog("Not enough space! free space: %d, need: %d" % \ 

906 (space_available, amount_needed)) 

907 raise xs_errors.XenError('SRNoSpace') 

908 

909 def _handleInterruptedCloneOps(self): 

910 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE) 

911 for uuid, val in entries.items(): 911 ↛ 912line 911 didn't jump to line 912, because the loop on line 911 never started

912 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid) 

913 self._handleInterruptedCloneOp(uuid, val) 

914 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid) 

915 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid) 

916 

917 def _handleInterruptedCoalesceLeaf(self): 

918 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) 

919 if len(entries) > 0: 919 ↛ 920line 919 didn't jump to line 920, because the condition on line 919 was never true

920 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") 

921 cleanup.gc_force(self.session, self.uuid) 

922 self.lvmCache.refresh() 

923 

924 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False): 

925 """Either roll back or finalize the interrupted snapshot/clone 

926 operation. Rolling back is unsafe if the leaf VHDs have already been 

927 in use and written to. However, it is always safe to roll back while 

928 we're still in the context of the failed snapshot operation since the 

929 VBD is paused for the duration of the operation""" 

930 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) 

931 lvs = lvhdutil.getLVInfo(self.lvmCache) 

932 baseUuid, clonUuid = jval.split("_") 

933 

934 # is there a "base copy" VDI? 

935 if not lvs.get(baseUuid): 

936 # no base copy: make sure the original is there 

937 if lvs.get(origUuid): 

938 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") 

939 return 

940 raise util.SMException("base copy %s not present, " \ 

941 "but no original %s found" % (baseUuid, origUuid)) 

942 

943 if forceUndo: 

944 util.SMlog("Explicit revert") 

945 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

946 return 

947 

948 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): 

949 util.SMlog("One or both leaves missing => revert") 

950 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

951 return 

952 

953 vdis = lvhdutil.getVDIInfo(self.lvmCache) 

954 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): 

955 util.SMlog("One or both leaves invalid => revert") 

956 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

957 return 

958 

959 orig = vdis[origUuid] 

960 base = vdis[baseUuid] 

961 self.lvActivator.activate(baseUuid, base.lvName, False) 

962 self.lvActivator.activate(origUuid, orig.lvName, False) 

963 if orig.parentUuid != baseUuid: 

964 parent = vdis[orig.parentUuid] 

965 self.lvActivator.activate(parent.uuid, parent.lvName, False) 

966 origPath = os.path.join(self.path, orig.lvName) 

967 if not vhdutil.check(origPath): 

968 util.SMlog("Orig VHD invalid => revert") 

969 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

970 return 

971 

972 if clonUuid: 

973 clon = vdis[clonUuid] 

974 clonPath = os.path.join(self.path, clon.lvName) 

975 self.lvActivator.activate(clonUuid, clon.lvName, False) 

976 if not vhdutil.check(clonPath): 

977 util.SMlog("Clon VHD invalid => revert") 

978 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

979 return 

980 

981 util.SMlog("Snapshot appears valid, will not roll back") 

982 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid) 

983 

984 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid): 

985 base = lvs[baseUuid] 

986 basePath = os.path.join(self.path, base.name) 

987 

988 # make the parent RW 

989 if base.readonly: 

990 self.lvmCache.setReadonly(base.name, False) 

991 

992 ns = lvhdutil.NS_PREFIX_LVM + self.uuid 

993 origRefcountBinary = RefCounter.check(origUuid, ns)[1] 

994 origRefcountNormal = 0 

995 

996 # un-hide the parent 

997 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

998 self.lvActivator.activate(baseUuid, base.name, False) 

999 origRefcountNormal = 1 

1000 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False) 

1001 if vhdInfo.hidden: 

1002 vhdutil.setHidden(basePath, False) 

1003 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden: 

1004 self.lvmCache.setHidden(base.name, False) 

1005 

1006 # remove the child nodes 

1007 if clonUuid and lvs.get(clonUuid): 

1008 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD: 

1009 raise util.SMException("clone %s not VHD" % clonUuid) 

1010 self.lvmCache.remove(lvs[clonUuid].name) 

1011 if self.lvActivator.get(clonUuid, False): 

1012 self.lvActivator.remove(clonUuid, False) 

1013 if lvs.get(origUuid): 

1014 self.lvmCache.remove(lvs[origUuid].name) 

1015 

1016 # inflate the parent to fully-allocated size 

1017 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

1018 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1019 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize) 

1020 

1021 # rename back 

1022 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid 

1023 self.lvmCache.rename(base.name, origLV) 

1024 RefCounter.reset(baseUuid, ns) 

1025 if self.lvActivator.get(baseUuid, False): 

1026 self.lvActivator.replace(baseUuid, origUuid, origLV, False) 

1027 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns) 

1028 

1029 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1030 # flag to facilitate vm deactivate 

1031 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1032 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused') 

1033 

1034 # update LVM metadata on slaves 

1035 slaves = util.get_slaves_attached_on(self.session, [origUuid]) 

1036 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname, 

1037 origLV, origUuid, slaves) 

1038 

1039 util.SMlog("*** INTERRUPTED CLONE OP: rollback success") 

1040 

1041 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid): 

1042 """Finalize the interrupted snapshot/clone operation. This must not be 

1043 called from the live snapshot op context because we attempt to pause/ 

1044 unpause the VBD here (the VBD is already paused during snapshot, so it 

1045 would cause a deadlock)""" 

1046 base = vdis[baseUuid] 

1047 clon = None 

1048 if clonUuid: 

1049 clon = vdis[clonUuid] 

1050 

1051 cleanup.abort(self.uuid) 

1052 

1053 # make sure the parent is hidden and read-only 

1054 if not base.hidden: 

1055 if base.vdiType == vhdutil.VDI_TYPE_RAW: 

1056 self.lvmCache.setHidden(base.lvName) 

1057 else: 

1058 basePath = os.path.join(self.path, base.lvName) 

1059 vhdutil.setHidden(basePath) 

1060 if not base.lvReadonly: 

1061 self.lvmCache.setReadonly(base.lvName, True) 

1062 

1063 # NB: since this snapshot-preserving call is only invoked outside the 

1064 # snapshot op context, we assume the LVM metadata on the involved slave 

1065 # has by now been refreshed and do not attempt to do it here 

1066 

1067 # Update the original record 

1068 try: 

1069 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1070 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1071 type = self.session.xenapi.VDI.get_type(vdi_ref) 

1072 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1073 sm_config['vhd-parent'] = baseUuid 

1074 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

1075 except XenAPI.Failure: 

1076 util.SMlog("ERROR updating the orig record") 

1077 

1078 # introduce the new VDI records 

1079 if clonUuid: 

1080 try: 

1081 clon_vdi = VDI.VDI(self, clonUuid) 

1082 clon_vdi.read_only = False 

1083 clon_vdi.location = clonUuid 

1084 clon_vdi.utilisation = clon.sizeLV 

1085 clon_vdi.sm_config = { 

1086 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1087 "vhd-parent": baseUuid} 

1088 

1089 if not self.legacyMode: 

1090 LVMMetadataHandler(self.mdpath). \ 

1091 ensureSpaceIsAvailableForVdis(1) 

1092 

1093 clon_vdi_ref = clon_vdi._db_introduce() 

1094 util.SMlog("introduced clon VDI: %s (%s)" % \ 

1095 (clon_vdi_ref, clonUuid)) 

1096 

1097 vdi_info = {UUID_TAG: clonUuid, 

1098 NAME_LABEL_TAG: clon_vdi.label, 

1099 NAME_DESCRIPTION_TAG: clon_vdi.description, 

1100 IS_A_SNAPSHOT_TAG: 0, 

1101 SNAPSHOT_OF_TAG: '', 

1102 SNAPSHOT_TIME_TAG: '', 

1103 TYPE_TAG: type, 

1104 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], 

1105 READ_ONLY_TAG: int(clon_vdi.read_only), 

1106 MANAGED_TAG: int(clon_vdi.managed), 

1107 METADATA_OF_POOL_TAG: '' 

1108 } 

1109 

1110 if not self.legacyMode: 

1111 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1112 

1113 except XenAPI.Failure: 

1114 util.SMlog("ERROR introducing the clon record") 

1115 

1116 try: 

1117 base_vdi = VDI.VDI(self, baseUuid) # readonly parent 

1118 base_vdi.label = "base copy" 

1119 base_vdi.read_only = True 

1120 base_vdi.location = baseUuid 

1121 base_vdi.size = base.sizeVirt 

1122 base_vdi.utilisation = base.sizeLV 

1123 base_vdi.managed = False 

1124 base_vdi.sm_config = { 

1125 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1126 "vhd-parent": baseUuid} 

1127 

1128 if not self.legacyMode: 

1129 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1) 

1130 

1131 base_vdi_ref = base_vdi._db_introduce() 

1132 util.SMlog("introduced base VDI: %s (%s)" % \ 

1133 (base_vdi_ref, baseUuid)) 

1134 

1135 vdi_info = {UUID_TAG: baseUuid, 

1136 NAME_LABEL_TAG: base_vdi.label, 

1137 NAME_DESCRIPTION_TAG: base_vdi.description, 

1138 IS_A_SNAPSHOT_TAG: 0, 

1139 SNAPSHOT_OF_TAG: '', 

1140 SNAPSHOT_TIME_TAG: '', 

1141 TYPE_TAG: type, 

1142 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], 

1143 READ_ONLY_TAG: int(base_vdi.read_only), 

1144 MANAGED_TAG: int(base_vdi.managed), 

1145 METADATA_OF_POOL_TAG: '' 

1146 } 

1147 

1148 if not self.legacyMode: 

1149 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1150 except XenAPI.Failure: 

1151 util.SMlog("ERROR introducing the base record") 

1152 

1153 util.SMlog("*** INTERRUPTED CLONE OP: complete") 

1154 

1155 def _undoAllJournals(self): 

1156 """Undo all VHD & SM interrupted journaled operations. This call must 

1157 be serialized with respect to all operations that create journals""" 

1158 # undoing interrupted inflates must be done first, since undoing VHD 

1159 # ops might require inflations 

1160 self.lock.acquire() 

1161 try: 

1162 self._undoAllInflateJournals() 

1163 self._undoAllVHDJournals() 

1164 self._handleInterruptedCloneOps() 

1165 self._handleInterruptedCoalesceLeaf() 

1166 finally: 

1167 self.lock.release() 

1168 self.cleanup() 

1169 

1170 def _undoAllInflateJournals(self): 

1171 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE) 

1172 if len(entries) == 0: 

1173 return 

1174 self._loadvdis() 

1175 for uuid, val in entries.items(): 

1176 vdi = self.vdis.get(uuid) 

1177 if vdi: 1177 ↛ 1192line 1177 didn't jump to line 1192, because the condition on line 1177 was never false

1178 util.SMlog("Found inflate journal %s, deflating %s to %s" % \ 

1179 (uuid, vdi.path, val)) 

1180 if vdi.readonly: 1180 ↛ 1181line 1180 didn't jump to line 1181, because the condition on line 1180 was never true

1181 self.lvmCache.setReadonly(vdi.lvname, False) 

1182 self.lvActivator.activate(uuid, vdi.lvname, False) 

1183 currSizeLV = self.lvmCache.getSize(vdi.lvname) 

1184 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE, 

1185 vhdutil.VHD_FOOTER_SIZE) 

1186 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val)) 

1187 if vdi.readonly: 1187 ↛ 1188line 1187 didn't jump to line 1188, because the condition on line 1187 was never true

1188 self.lvmCache.setReadonly(vdi.lvname, True) 

1189 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1189 ↛ 1190line 1189 didn't jump to line 1190, because the condition on line 1189 was never true

1190 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1191 self.vgname, vdi.lvname, uuid) 

1192 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid) 

1193 delattr(self, "vdiInfo") 

1194 delattr(self, "allVDIs") 

1195 

1196 def _undoAllVHDJournals(self): 

1197 """check if there are VHD journals in existence and revert them""" 

1198 journals = lvhdutil.getAllVHDJournals(self.lvmCache) 

1199 if len(journals) == 0: 1199 ↛ 1201line 1199 didn't jump to line 1201, because the condition on line 1199 was never false

1200 return 

1201 self._loadvdis() 

1202 for uuid, jlvName in journals: 

1203 vdi = self.vdis[uuid] 

1204 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path)) 

1205 self.lvActivator.activate(uuid, vdi.lvname, False) 

1206 self.lvmCache.activateNoRefcount(jlvName) 

1207 fullSize = lvhdutil.calcSizeVHDLV(vdi.size) 

1208 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize) 

1209 try: 

1210 jFile = os.path.join(self.path, jlvName) 

1211 vhdutil.revert(vdi.path, jFile) 

1212 except util.CommandException: 

1213 util.logException("VHD journal revert") 

1214 vhdutil.check(vdi.path) 

1215 util.SMlog("VHD revert failed but VHD ok: removing journal") 

1216 # Attempt to reclaim unused space 

1217 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False) 

1218 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1219 if NewSize < fullSize: 

1220 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) 

1221 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1222 self.vgname, vdi.lvname, uuid) 

1223 self.lvmCache.remove(jlvName) 

1224 delattr(self, "vdiInfo") 

1225 delattr(self, "allVDIs") 

1226 

1227 def _updateSlavesPreClone(self, hostRefs, origOldLV): 

1228 masterRef = util.get_this_host_ref(self.session) 

1229 args = {"vgName": self.vgname, 

1230 "action1": "deactivateNoRefcount", 

1231 "lvName1": origOldLV} 

1232 for hostRef in hostRefs: 

1233 if hostRef == masterRef: 1233 ↛ 1234line 1233 didn't jump to line 1234, because the condition on line 1233 was never true

1234 continue 

1235 util.SMlog("Deactivate VDI on %s" % hostRef) 

1236 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1237 util.SMlog("call-plugin returned: %s" % rv) 

1238 if not rv: 1238 ↛ 1239line 1238 didn't jump to line 1239, because the condition on line 1238 was never true

1239 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1240 

1241 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV, 

1242 baseUuid, baseLV): 

1243 """We need to reactivate the original LV on each slave (note that the 

1244 name for the original LV might change), as well as init the refcount 

1245 for the base LV""" 

1246 args = {"vgName": self.vgname, 

1247 "action1": "refresh", 

1248 "lvName1": origLV, 

1249 "action2": "activate", 

1250 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid, 

1251 "lvName2": baseLV, 

1252 "uuid2": baseUuid} 

1253 

1254 masterRef = util.get_this_host_ref(self.session) 

1255 for hostRef in hostRefs: 

1256 if hostRef == masterRef: 1256 ↛ 1257line 1256 didn't jump to line 1257, because the condition on line 1256 was never true

1257 continue 

1258 util.SMlog("Updating %s, %s, %s on slave %s" % \ 

1259 (origOldLV, origLV, baseLV, hostRef)) 

1260 rv = self.session.xenapi.host.call_plugin( 

1261 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1262 util.SMlog("call-plugin returned: %s" % rv) 

1263 if not rv: 1263 ↛ 1264line 1263 didn't jump to line 1264, because the condition on line 1263 was never true

1264 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1265 

1266 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog): 

1267 """Reactivate and refresh CBT log file on slaves""" 

1268 args = {"vgName": self.vgname, 

1269 "action1": "deactivateNoRefcount", 

1270 "lvName1": cbtlog, 

1271 "action2": "refresh", 

1272 "lvName2": cbtlog} 

1273 

1274 masterRef = util.get_this_host_ref(self.session) 

1275 for hostRef in hostRefs: 

1276 if hostRef == masterRef: 1276 ↛ 1277line 1276 didn't jump to line 1277, because the condition on line 1276 was never true

1277 continue 

1278 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef)) 

1279 rv = self.session.xenapi.host.call_plugin( 

1280 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1281 util.SMlog("call-plugin returned: %s" % rv) 

1282 if not rv: 1282 ↛ 1283line 1282 didn't jump to line 1283, because the condition on line 1282 was never true

1283 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1284 

1285 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV): 

1286 """Tell the slave we deleted the base image""" 

1287 args = {"vgName": self.vgname, 

1288 "action1": "cleanupLockAndRefcount", 

1289 "uuid1": baseUuid, 

1290 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid} 

1291 

1292 masterRef = util.get_this_host_ref(self.session) 

1293 for hostRef in hostRefs: 

1294 if hostRef == masterRef: 1294 ↛ 1295line 1294 didn't jump to line 1295, because the condition on line 1294 was never true

1295 continue 

1296 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef)) 

1297 rv = self.session.xenapi.host.call_plugin( 

1298 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1299 util.SMlog("call-plugin returned: %s" % rv) 

1300 if not rv: 1300 ↛ 1301line 1300 didn't jump to line 1301, because the condition on line 1300 was never true

1301 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1302 

1303 def _cleanup(self, skipLockCleanup=False): 

1304 """delete stale refcounter, flag, and lock files""" 

1305 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1306 IPCFlag(self.uuid).clearAll() 

1307 if not skipLockCleanup: 1307 ↛ 1308line 1307 didn't jump to line 1308, because the condition on line 1307 was never true

1308 Lock.cleanupAll(self.uuid) 

1309 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1310 

1311 def _prepareTestMode(self): 

1312 util.SMlog("Test mode: %s" % self.testMode) 

1313 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1313 ↛ 1314line 1313 didn't jump to line 1314, because the condition on line 1313 was never true

1314 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" 

1315 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode]) 

1316 

1317 def _kickGC(self): 

1318 util.SMlog("Kicking GC") 

1319 cleanup.start_gc_service(self.uuid) 

1320 

1321 def ensureCBTSpace(self): 

1322 # Ensure we have space for at least one LV 

1323 self._ensureSpaceAvailable(self.journaler.LV_SIZE) 

1324 

1325 

1326class LVHDVDI(VDI.VDI): 

1327 

1328 JRN_CLONE = "clone" # journal entry type for the clone operation 

1329 

1330 @override 

1331 def load(self, vdi_uuid) -> None: 

1332 self.lock = self.sr.lock 

1333 self.lvActivator = self.sr.lvActivator 

1334 self.loaded = False 

1335 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1336 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1336 ↛ 1338line 1336 didn't jump to line 1338, because the condition on line 1336 was never false

1337 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1338 self.uuid = vdi_uuid 

1339 self.location = self.uuid 

1340 self.exists = True 

1341 

1342 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid): 

1343 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid]) 

1344 if self.parent: 1344 ↛ 1345line 1344 didn't jump to line 1345, because the condition on line 1344 was never true

1345 self.sm_config_override['vhd-parent'] = self.parent 

1346 else: 

1347 self.sm_config_override['vhd-parent'] = None 

1348 return 

1349 

1350 # scan() didn't run: determine the type of the VDI manually 

1351 if self._determineType(): 

1352 return 

1353 

1354 # the VDI must be in the process of being created 

1355 self.exists = False 

1356 if "vdi_sm_config" in self.sr.srcmd.params and \ 1356 ↛ 1358line 1356 didn't jump to line 1358, because the condition on line 1356 was never true

1357 "type" in self.sr.srcmd.params["vdi_sm_config"]: 

1358 type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

1359 if type == PARAM_RAW: 

1360 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1361 elif type == PARAM_VHD: 

1362 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1363 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode: 

1364 raise xs_errors.XenError('VDICreate', \ 

1365 opterr='Cannot create VHD type disk in legacy mode') 

1366 else: 

1367 raise xs_errors.XenError('VDICreate', opterr='bad type') 

1368 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid) 

1369 self.path = os.path.join(self.sr.path, self.lvname) 

1370 

1371 @override 

1372 def create(self, sr_uuid, vdi_uuid, size) -> str: 

1373 util.SMlog("LVHDVDI.create for %s" % self.uuid) 

1374 if not self.sr.isMaster: 

1375 raise xs_errors.XenError('LVMMaster') 

1376 if self.exists: 

1377 raise xs_errors.XenError('VDIExists') 

1378 

1379 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1380 

1381 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \ 

1382 (self.vdi_type, self.path, size)) 

1383 lvSize = 0 

1384 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] 

1385 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1386 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size)) 

1387 else: 

1388 if self.sr.provision == "thin": 

1389 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1390 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1391 elif self.sr.provision == "thick": 

1392 lvSize = lvhdutil.calcSizeVHDLV(int(size)) 

1393 

1394 self.sr._ensureSpaceAvailable(lvSize) 

1395 

1396 try: 

1397 self.sr.lvmCache.create(self.lvname, lvSize) 

1398 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1399 self.size = self.sr.lvmCache.getSize(self.lvname) 

1400 else: 

1401 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB) 

1402 self.size = vhdutil.getSizeVirt(self.path) 

1403 self.sr.lvmCache.deactivateNoRefcount(self.lvname) 

1404 except util.CommandException as e: 

1405 util.SMlog("Unable to create VDI") 

1406 self.sr.lvmCache.remove(self.lvname) 

1407 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code) 

1408 

1409 self.utilisation = lvSize 

1410 self.sm_config["vdi_type"] = self.vdi_type 

1411 

1412 if not self.sr.legacyMode: 

1413 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1414 

1415 self.ref = self._db_introduce() 

1416 self.sr._updateStats(self.sr.uuid, self.size) 

1417 

1418 vdi_info = {UUID_TAG: self.uuid, 

1419 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1420 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1421 IS_A_SNAPSHOT_TAG: 0, 

1422 SNAPSHOT_OF_TAG: '', 

1423 SNAPSHOT_TIME_TAG: '', 

1424 TYPE_TAG: self.ty, 

1425 VDI_TYPE_TAG: self.vdi_type, 

1426 READ_ONLY_TAG: int(self.read_only), 

1427 MANAGED_TAG: int(self.managed), 

1428 METADATA_OF_POOL_TAG: '' 

1429 } 

1430 

1431 if not self.sr.legacyMode: 

1432 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1433 

1434 return VDI.VDI.get_params(self) 

1435 

1436 @override 

1437 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None: 

1438 util.SMlog("LVHDVDI.delete for %s" % self.uuid) 

1439 try: 

1440 self._loadThis() 

1441 except xs_errors.SRException as e: 

1442 # Catch 'VDI doesn't exist' exception 

1443 if e.errno == 46: 

1444 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1445 raise 

1446 

1447 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1448 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1449 raise xs_errors.XenError("VDIDelete", \ 

1450 opterr="Deleting non-leaf node not permitted") 

1451 

1452 if not self.hidden: 

1453 self._markHidden() 

1454 

1455 if not data_only: 

1456 # Remove from XAPI and delete from MGT 

1457 self._db_forget() 

1458 else: 

1459 # If this is a data_destroy call, don't remove from XAPI db 

1460 # Only delete from MGT 

1461 if not self.sr.legacyMode: 

1462 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid) 

1463 

1464 # deactivate here because it might be too late to do it in the "final" 

1465 # step: GC might have removed the LV by then 

1466 if self.sr.lvActivator.get(self.uuid, False): 

1467 self.sr.lvActivator.deactivate(self.uuid, False) 

1468 

1469 try: 

1470 self.sr.lvmCache.remove(self.lvname) 

1471 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid) 

1472 self.sr.lock.cleanupAll(vdi_uuid) 

1473 except xs_errors.SRException as e: 

1474 util.SMlog( 

1475 "Failed to remove the volume (maybe is leaf coalescing) " 

1476 "for %s err:%d" % (self.uuid, e.errno)) 

1477 

1478 self.sr._updateStats(self.sr.uuid, -self.size) 

1479 self.sr._kickGC() 

1480 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1481 

1482 @override 

1483 def attach(self, sr_uuid, vdi_uuid) -> str: 

1484 util.SMlog("LVHDVDI.attach for %s" % self.uuid) 

1485 if self.sr.journaler.hasJournals(self.uuid): 

1486 raise xs_errors.XenError('VDIUnavailable', 

1487 opterr='Interrupted operation detected on this VDI, ' 

1488 'scan SR first to trigger auto-repair') 

1489 

1490 writable = ('args' not in self.sr.srcmd.params) or \ 

1491 (self.sr.srcmd.params['args'][0] == "true") 

1492 needInflate = True 

1493 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable: 

1494 needInflate = False 

1495 else: 

1496 self._loadThis() 

1497 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size): 

1498 needInflate = False 

1499 

1500 if needInflate: 

1501 try: 

1502 self._prepareThin(True) 

1503 except: 

1504 util.logException("attach") 

1505 raise xs_errors.XenError('LVMProvisionAttach') 

1506 

1507 try: 

1508 return self._attach() 

1509 finally: 

1510 if not self.sr.lvActivator.deactivateAll(): 

1511 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid) 

1512 

1513 @override 

1514 def detach(self, sr_uuid, vdi_uuid) -> None: 

1515 util.SMlog("LVHDVDI.detach for %s" % self.uuid) 

1516 self._loadThis() 

1517 already_deflated = (self.utilisation < \ 

1518 lvhdutil.calcSizeVHDLV(self.size)) 

1519 needDeflate = True 

1520 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated: 

1521 needDeflate = False 

1522 elif self.sr.provision == "thick": 

1523 needDeflate = False 

1524 # except for snapshots, which are always deflated 

1525 if self.sr.srcmd.cmd != 'vdi_detach_from_config': 

1526 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1527 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) 

1528 if snap: 

1529 needDeflate = True 

1530 

1531 if needDeflate: 

1532 try: 

1533 self._prepareThin(False) 

1534 except: 

1535 util.logException("_prepareThin") 

1536 raise xs_errors.XenError('VDIUnavailable', opterr='deflate') 

1537 

1538 try: 

1539 self._detach() 

1540 finally: 

1541 if not self.sr.lvActivator.deactivateAll(): 

1542 raise xs_errors.XenError("SMGeneral", opterr="deactivation") 

1543 

1544 # We only support offline resize 

1545 @override 

1546 def resize(self, sr_uuid, vdi_uuid, size) -> str: 

1547 util.SMlog("LVHDVDI.resize for %s" % self.uuid) 

1548 if not self.sr.isMaster: 

1549 raise xs_errors.XenError('LVMMaster') 

1550 

1551 self._loadThis() 

1552 if self.hidden: 

1553 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1554 

1555 if size < self.size: 

1556 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

1557 '(current size: %d, new size: %d)' % (self.size, size)) 

1558 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1559 

1560 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1561 

1562 if size == self.size: 

1563 return VDI.VDI.get_params(self) 

1564 

1565 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1566 lvSizeOld = self.size 

1567 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) 

1568 else: 

1569 lvSizeOld = self.utilisation 

1570 lvSizeNew = lvhdutil.calcSizeVHDLV(size) 

1571 if self.sr.provision == "thin": 

1572 # VDI is currently deflated, so keep it deflated 

1573 lvSizeNew = lvSizeOld 

1574 assert(lvSizeNew >= lvSizeOld) 

1575 spaceNeeded = lvSizeNew - lvSizeOld 

1576 self.sr._ensureSpaceAvailable(spaceNeeded) 

1577 

1578 oldSize = self.size 

1579 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1580 self.sr.lvmCache.setSize(self.lvname, lvSizeNew) 

1581 self.size = self.sr.lvmCache.getSize(self.lvname) 

1582 self.utilisation = self.size 

1583 else: 

1584 if lvSizeNew != lvSizeOld: 

1585 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, 

1586 lvSizeNew) 

1587 vhdutil.setSizeVirtFast(self.path, size) 

1588 self.size = vhdutil.getSizeVirt(self.path) 

1589 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

1590 

1591 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1592 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1593 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

1594 str(self.utilisation)) 

1595 self.sr._updateStats(self.sr.uuid, self.size - oldSize) 

1596 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

1597 return VDI.VDI.get_params(self) 

1598 

1599 @override 

1600 def clone(self, sr_uuid, vdi_uuid) -> str: 

1601 return self._do_snapshot( 

1602 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True) 

1603 

1604 @override 

1605 def compose(self, sr_uuid, vdi1, vdi2) -> None: 

1606 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1)) 

1607 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1608 raise xs_errors.XenError('Unimplemented') 

1609 

1610 parent_uuid = vdi1 

1611 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid 

1612 assert(self.sr.lvmCache.checkLV(parent_lvname)) 

1613 parent_path = os.path.join(self.sr.path, parent_lvname) 

1614 

1615 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1616 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False) 

1617 

1618 vhdutil.setParent(self.path, parent_path, False) 

1619 vhdutil.setHidden(parent_path) 

1620 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

1621 

1622 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, 

1623 True): 

1624 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

1625 

1626 util.SMlog("Compose done") 

1627 

1628 def reset_leaf(self, sr_uuid, vdi_uuid): 

1629 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid) 

1630 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1631 raise xs_errors.XenError('Unimplemented') 

1632 

1633 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1634 

1635 # safety check 

1636 if not vhdutil.hasParent(self.path): 

1637 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

1638 "will not reset contents" % self.uuid) 

1639 

1640 vhdutil.killData(self.path) 

1641 

1642 def _attach(self): 

1643 self._chainSetActive(True, True, True) 

1644 if not util.pathexists(self.path): 

1645 raise xs_errors.XenError('VDIUnavailable', \ 

1646 opterr='Could not find: %s' % self.path) 

1647 

1648 if not hasattr(self, 'xenstore_data'): 

1649 self.xenstore_data = {} 

1650 

1651 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ 

1652 scsiutil.gen_synthetic_page_data(self.uuid))) 

1653 

1654 self.xenstore_data['storage-type'] = 'lvm' 

1655 self.xenstore_data['vdi-type'] = self.vdi_type 

1656 

1657 self.attached = True 

1658 self.sr.lvActivator.persist() 

1659 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1660 

1661 def _detach(self): 

1662 self._chainSetActive(False, True) 

1663 self.attached = False 

1664 

1665 @override 

1666 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

1667 cloneOp=False, secondary=None, cbtlog=None) -> str: 

1668 # If cbt enabled, save file consistency state 

1669 if cbtlog is not None: 

1670 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1670 ↛ 1671line 1670 didn't jump to line 1671, because the condition on line 1670 was never true

1671 consistency_state = False 

1672 else: 

1673 consistency_state = True 

1674 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

1675 (consistency_state, vdi_uuid)) 

1676 else: 

1677 consistency_state = None 

1678 

1679 pause_time = time.time() 

1680 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1680 ↛ 1681line 1680 didn't jump to line 1681, because the condition on line 1680 was never true

1681 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

1682 

1683 snapResult = None 

1684 try: 

1685 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) 

1686 except Exception as e1: 

1687 try: 

1688 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, 

1689 secondary=None) 

1690 except Exception as e2: 

1691 util.SMlog('WARNING: failed to clean up failed snapshot: ' 

1692 '%s (error ignored)' % e2) 

1693 raise 

1694 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary) 

1695 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1696 unpause_time = time.time() 

1697 if (unpause_time - pause_time) > LONG_SNAPTIME: 1697 ↛ 1698line 1697 didn't jump to line 1698, because the condition on line 1697 was never true

1698 util.SMlog('WARNING: snapshot paused VM for %s seconds' % 

1699 (unpause_time - pause_time)) 

1700 return snapResult 

1701 

1702 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None): 

1703 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType)) 

1704 

1705 if not self.sr.isMaster: 1705 ↛ 1706line 1705 didn't jump to line 1706, because the condition on line 1705 was never true

1706 raise xs_errors.XenError('LVMMaster') 

1707 if self.sr.legacyMode: 1707 ↛ 1708line 1707 didn't jump to line 1708, because the condition on line 1707 was never true

1708 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode') 

1709 

1710 self._loadThis() 

1711 if self.hidden: 1711 ↛ 1712line 1711 didn't jump to line 1712, because the condition on line 1711 was never true

1712 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI') 

1713 

1714 self.sm_config = self.session.xenapi.VDI.get_sm_config( \ 

1715 self.sr.srcmd.params['vdi_ref']) 

1716 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1716 ↛ 1717line 1716 didn't jump to line 1717, because the condition on line 1716 was never true

1717 if not util.fistpoint.is_active("testsm_clone_allow_raw"): 

1718 raise xs_errors.XenError('Unimplemented', \ 

1719 opterr='Raw VDI, snapshot or clone not permitted') 

1720 

1721 # we must activate the entire VHD chain because the real parent could 

1722 # theoretically be anywhere in the chain if all VHDs under it are empty 

1723 self._chainSetActive(True, False) 

1724 if not util.pathexists(self.path): 1724 ↛ 1725line 1724 didn't jump to line 1725, because the condition on line 1724 was never true

1725 raise xs_errors.XenError('VDIUnavailable', \ 

1726 opterr='VDI unavailable: %s' % (self.path)) 

1727 

1728 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1728 ↛ 1736line 1728 didn't jump to line 1736, because the condition on line 1728 was never false

1729 depth = vhdutil.getDepth(self.path) 

1730 if depth == -1: 1730 ↛ 1731line 1730 didn't jump to line 1731, because the condition on line 1730 was never true

1731 raise xs_errors.XenError('VDIUnavailable', \ 

1732 opterr='failed to get VHD depth') 

1733 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1733 ↛ 1734line 1733 didn't jump to line 1734, because the condition on line 1733 was never true

1734 raise xs_errors.XenError('SnapshotChainTooLong') 

1735 

1736 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ 

1737 self.sr.srcmd.params['vdi_ref']) 

1738 

1739 fullpr = lvhdutil.calcSizeVHDLV(self.size) 

1740 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \ 

1741 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1742 lvSizeOrig = thinpr 

1743 lvSizeClon = thinpr 

1744 

1745 hostRefs = [] 

1746 if self.sr.cmd == "vdi_snapshot": 

1747 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) 

1748 if hostRefs: 1748 ↛ 1750line 1748 didn't jump to line 1750, because the condition on line 1748 was never false

1749 lvSizeOrig = fullpr 

1750 if self.sr.provision == "thick": 1750 ↛ 1756line 1750 didn't jump to line 1756, because the condition on line 1750 was never false

1751 if not self.issnap: 1751 ↛ 1752line 1751 didn't jump to line 1752, because the condition on line 1751 was never true

1752 lvSizeOrig = fullpr 

1753 if self.sr.cmd != "vdi_snapshot": 

1754 lvSizeClon = fullpr 

1755 

1756 if (snapType == VDI.SNAPSHOT_SINGLE or 1756 ↛ 1758line 1756 didn't jump to line 1758, because the condition on line 1756 was never true

1757 snapType == VDI.SNAPSHOT_INTERNAL): 

1758 lvSizeClon = 0 

1759 

1760 # the space required must include 2 journal LVs: a clone journal and an 

1761 # inflate journal (for the failure handling 

1762 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE 

1763 lvSizeBase = self.size 

1764 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1764 ↛ 1768line 1764 didn't jump to line 1768, because the condition on line 1764 was never false

1765 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1766 vhdutil.getSizePhys(self.path)) 

1767 size_req -= (self.utilisation - lvSizeBase) 

1768 self.sr._ensureSpaceAvailable(size_req) 

1769 

1770 if hostRefs: 

1771 self.sr._updateSlavesPreClone(hostRefs, self.lvname) 

1772 

1773 baseUuid = util.gen_uuid() 

1774 origUuid = self.uuid 

1775 clonUuid = "" 

1776 if snapType == VDI.SNAPSHOT_DOUBLE: 1776 ↛ 1778line 1776 didn't jump to line 1778, because the condition on line 1776 was never false

1777 clonUuid = util.gen_uuid() 

1778 jval = "%s_%s" % (baseUuid, clonUuid) 

1779 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) 

1780 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid) 

1781 

1782 try: 

1783 # self becomes the "base vdi" 

1784 origOldLV = self.lvname 

1785 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid 

1786 self.sr.lvmCache.rename(self.lvname, baseLV) 

1787 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) 

1788 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1789 self.uuid = baseUuid 

1790 self.lvname = baseLV 

1791 self.path = os.path.join(self.sr.path, baseLV) 

1792 self.label = "base copy" 

1793 self.read_only = True 

1794 self.location = self.uuid 

1795 self.managed = False 

1796 

1797 # shrink the base copy to the minimum - we do it before creating 

1798 # the snapshot volumes to avoid requiring double the space 

1799 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1799 ↛ 1802line 1799 didn't jump to line 1802, because the condition on line 1799 was never false

1800 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) 

1801 self.utilisation = lvSizeBase 

1802 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid) 

1803 

1804 snapVDI = self._createSnap(origUuid, lvSizeOrig, False) 

1805 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) 

1806 snapVDI2 = None 

1807 if snapType == VDI.SNAPSHOT_DOUBLE: 1807 ↛ 1813line 1807 didn't jump to line 1813, because the condition on line 1807 was never false

1808 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True) 

1809 # If we have CBT enabled on the VDI, 

1810 # set CBT status for the new snapshot disk 

1811 if cbtlog: 

1812 snapVDI2.cbt_enabled = True 

1813 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid) 

1814 

1815 # note: it is important to mark the parent hidden only AFTER the 

1816 # new VHD children have been created, which are referencing it; 

1817 # otherwise we would introduce a race with GC that could reclaim 

1818 # the parent before we snapshot it 

1819 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1819 ↛ 1820line 1819 didn't jump to line 1820, because the condition on line 1819 was never true

1820 self.sr.lvmCache.setHidden(self.lvname) 

1821 else: 

1822 vhdutil.setHidden(self.path) 

1823 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid) 

1824 

1825 # set the base copy to ReadOnly 

1826 self.sr.lvmCache.setReadonly(self.lvname, True) 

1827 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid) 

1828 

1829 if hostRefs: 

1830 self.sr._updateSlavesOnClone(hostRefs, origOldLV, 

1831 snapVDI.lvname, self.uuid, self.lvname) 

1832 

1833 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

1834 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: 

1835 snapVDI._cbt_snapshot(clonUuid, cbt_consistency) 

1836 if hostRefs: 1836 ↛ 1850line 1836 didn't jump to line 1850, because the condition on line 1836 was never false

1837 cbtlog_file = self._get_cbt_logname(snapVDI.uuid) 

1838 try: 

1839 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) 

1840 except: 

1841 alert_name = "VDI_CBT_SNAPSHOT_FAILED" 

1842 alert_str = ("Creating CBT snapshot for {} failed" 

1843 .format(snapVDI.uuid)) 

1844 snapVDI._disable_cbt_on_error(alert_name, alert_str) 

1845 pass 

1846 

1847 except (util.SMException, XenAPI.Failure) as e: 

1848 util.logException("LVHDVDI._snapshot") 

1849 self._failClone(origUuid, jval, str(e)) 

1850 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid) 

1851 

1852 self.sr.journaler.remove(self.JRN_CLONE, origUuid) 

1853 

1854 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType) 

1855 

1856 def _createSnap(self, snapUuid, snapSizeLV, isNew): 

1857 """Snapshot self and return the snapshot VDI object""" 

1858 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid 

1859 snapPath = os.path.join(self.sr.path, snapLV) 

1860 self.sr.lvmCache.create(snapLV, int(snapSizeLV)) 

1861 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) 

1862 if isNew: 

1863 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1864 self.sr.lvActivator.add(snapUuid, snapLV, False) 

1865 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) 

1866 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB) 

1867 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid) 

1868 

1869 snapVDI = LVHDVDI(self.sr, snapUuid) 

1870 snapVDI.read_only = False 

1871 snapVDI.location = snapUuid 

1872 snapVDI.size = self.size 

1873 snapVDI.utilisation = snapSizeLV 

1874 snapVDI.sm_config = dict() 

1875 for key, val in self.sm_config.items(): 1875 ↛ 1876line 1875 didn't jump to line 1876, because the loop on line 1875 never started

1876 if key not in [ 

1877 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \ 

1878 not key.startswith("host_"): 

1879 snapVDI.sm_config[key] = val 

1880 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1881 snapVDI.sm_config["vhd-parent"] = snapParent 

1882 snapVDI.lvname = snapLV 

1883 return snapVDI 

1884 

1885 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None): 

1886 if snapType is not VDI.SNAPSHOT_INTERNAL: 1886 ↛ 1888line 1886 didn't jump to line 1888, because the condition on line 1886 was never false

1887 self.sr._updateStats(self.sr.uuid, self.size) 

1888 basePresent = True 

1889 

1890 # Verify parent locator field of both children and delete basePath if 

1891 # unused 

1892 snapParent = snapVDI.sm_config["vhd-parent"] 

1893 snap2Parent = "" 

1894 if snapVDI2: 1894 ↛ 1896line 1894 didn't jump to line 1896, because the condition on line 1894 was never false

1895 snap2Parent = snapVDI2.sm_config["vhd-parent"] 

1896 if snapParent != self.uuid and \ 1896 ↛ 1923line 1896 didn't jump to line 1923, because the condition on line 1896 was never false

1897 (not snapVDI2 or snap2Parent != self.uuid): 

1898 util.SMlog("%s != %s != %s => deleting unused base %s" % \ 

1899 (snapParent, self.uuid, snap2Parent, self.lvname)) 

1900 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1901 self.sr.lvmCache.remove(self.lvname) 

1902 self.sr.lvActivator.remove(self.uuid, False) 

1903 if hostRefs: 

1904 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) 

1905 basePresent = False 

1906 else: 

1907 # assign the _binary_ refcount of the original VDI to the new base 

1908 # VDI (but as the normal refcount, since binary refcounts are only 

1909 # for leaf nodes). The normal refcount of the child is not 

1910 # transferred to to the base VDI because normal refcounts are 

1911 # incremented and decremented individually, and not based on the 

1912 # VHD chain (i.e., the child's normal refcount will be decremented 

1913 # independently of its parent situation). Add 1 for this clone op. 

1914 # Note that we do not need to do protect the refcount operations 

1915 # below with per-VDI locking like we do in lvutil because at this 

1916 # point we have exclusive access to the VDIs involved. Other SM 

1917 # operations are serialized by the Agent or with the SR lock, and 

1918 # any coalesce activations are serialized with the SR lock. (The 

1919 # coalesce activates the coalesced VDI pair in the beginning, which 

1920 # cannot affect the VDIs here because they cannot possibly be 

1921 # involved in coalescing at this point, and at the relinkSkip step 

1922 # that activates the children, which takes the SR lock.) 

1923 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid 

1924 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) 

1925 RefCounter.set(self.uuid, bcnt + 1, 0, ns) 

1926 

1927 # the "paused" and "host_*" sm-config keys are special and must stay on 

1928 # the leaf without being inherited by anyone else 

1929 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1929 ↛ 1930line 1929 didn't jump to line 1930, because the loop on line 1929 never started

1930 snapVDI.sm_config[key] = self.sm_config[key] 

1931 del self.sm_config[key] 

1932 

1933 # Introduce any new VDI records & update the existing one 

1934 type = self.session.xenapi.VDI.get_type( \ 

1935 self.sr.srcmd.params['vdi_ref']) 

1936 if snapVDI2: 1936 ↛ 1978line 1936 didn't jump to line 1978, because the condition on line 1936 was never false

1937 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1938 vdiRef = snapVDI2._db_introduce() 

1939 if cloneOp: 

1940 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1941 NAME_LABEL_TAG: util.to_plain_string( \ 

1942 self.session.xenapi.VDI.get_name_label( \ 

1943 self.sr.srcmd.params['vdi_ref'])), 

1944 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1945 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1946 IS_A_SNAPSHOT_TAG: 0, 

1947 SNAPSHOT_OF_TAG: '', 

1948 SNAPSHOT_TIME_TAG: '', 

1949 TYPE_TAG: type, 

1950 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1951 READ_ONLY_TAG: 0, 

1952 MANAGED_TAG: int(snapVDI2.managed), 

1953 METADATA_OF_POOL_TAG: '' 

1954 } 

1955 else: 

1956 util.SMlog("snapshot VDI params: %s" % \ 

1957 self.session.xenapi.VDI.get_snapshot_time(vdiRef)) 

1958 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1959 NAME_LABEL_TAG: util.to_plain_string( \ 

1960 self.session.xenapi.VDI.get_name_label( \ 

1961 self.sr.srcmd.params['vdi_ref'])), 

1962 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1963 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1964 IS_A_SNAPSHOT_TAG: 1, 

1965 SNAPSHOT_OF_TAG: snapVDI.uuid, 

1966 SNAPSHOT_TIME_TAG: '', 

1967 TYPE_TAG: type, 

1968 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1969 READ_ONLY_TAG: 0, 

1970 MANAGED_TAG: int(snapVDI2.managed), 

1971 METADATA_OF_POOL_TAG: '' 

1972 } 

1973 

1974 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1975 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ 

1976 (vdiRef, snapVDI2.uuid)) 

1977 

1978 if basePresent: 1978 ↛ 1979line 1978 didn't jump to line 1979, because the condition on line 1978 was never true

1979 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1980 vdiRef = self._db_introduce() 

1981 vdi_info = {UUID_TAG: self.uuid, 

1982 NAME_LABEL_TAG: self.label, 

1983 NAME_DESCRIPTION_TAG: self.description, 

1984 IS_A_SNAPSHOT_TAG: 0, 

1985 SNAPSHOT_OF_TAG: '', 

1986 SNAPSHOT_TIME_TAG: '', 

1987 TYPE_TAG: type, 

1988 VDI_TYPE_TAG: self.sm_config['vdi_type'], 

1989 READ_ONLY_TAG: 1, 

1990 MANAGED_TAG: 0, 

1991 METADATA_OF_POOL_TAG: '' 

1992 } 

1993 

1994 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1995 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ 

1996 (vdiRef, self.uuid)) 

1997 

1998 # Update the original record 

1999 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2000 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) 

2001 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ 

2002 str(snapVDI.utilisation)) 

2003 

2004 # Return the info on the new snap VDI 

2005 snap = snapVDI2 

2006 if not snap: 2006 ↛ 2007line 2006 didn't jump to line 2007, because the condition on line 2006 was never true

2007 snap = self 

2008 if not basePresent: 

2009 # a single-snapshot of an empty VDI will be a noop, resulting 

2010 # in no new VDIs, so return the existing one. The GC wouldn't 

2011 # normally try to single-snapshot an empty VHD of course, but 

2012 # if an external snapshot operation manages to sneak in right 

2013 # before a snapshot-coalesce phase, we would get here 

2014 snap = snapVDI 

2015 return snap.get_params() 

2016 

2017 def _initFromVDIInfo(self, vdiInfo): 

2018 self.vdi_type = vdiInfo.vdiType 

2019 self.lvname = vdiInfo.lvName 

2020 self.size = vdiInfo.sizeVirt 

2021 self.utilisation = vdiInfo.sizeLV 

2022 self.hidden = vdiInfo.hidden 

2023 if self.hidden: 2023 ↛ 2024line 2023 didn't jump to line 2024, because the condition on line 2023 was never true

2024 self.managed = False 

2025 self.active = vdiInfo.lvActive 

2026 self.readonly = vdiInfo.lvReadonly 

2027 self.parent = vdiInfo.parentUuid 

2028 self.path = os.path.join(self.sr.path, self.lvname) 

2029 if hasattr(self, "sm_config_override"): 2029 ↛ 2032line 2029 didn't jump to line 2032, because the condition on line 2029 was never false

2030 self.sm_config_override["vdi_type"] = self.vdi_type 

2031 else: 

2032 self.sm_config_override = {'vdi_type': self.vdi_type} 

2033 self.loaded = True 

2034 

2035 def _initFromLVInfo(self, lvInfo): 

2036 self.vdi_type = lvInfo.vdiType 

2037 self.lvname = lvInfo.name 

2038 self.size = lvInfo.size 

2039 self.utilisation = lvInfo.size 

2040 self.hidden = lvInfo.hidden 

2041 self.active = lvInfo.active 

2042 self.readonly = lvInfo.readonly 

2043 self.parent = '' 

2044 self.path = os.path.join(self.sr.path, self.lvname) 

2045 if hasattr(self, "sm_config_override"): 2045 ↛ 2048line 2045 didn't jump to line 2048, because the condition on line 2045 was never false

2046 self.sm_config_override["vdi_type"] = self.vdi_type 

2047 else: 

2048 self.sm_config_override = {'vdi_type': self.vdi_type} 

2049 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2049 ↛ 2050line 2049 didn't jump to line 2050, because the condition on line 2049 was never true

2050 self.loaded = True 

2051 

2052 def _initFromVHDInfo(self, vhdInfo): 

2053 self.size = vhdInfo.sizeVirt 

2054 self.parent = vhdInfo.parentUuid 

2055 self.hidden = vhdInfo.hidden 

2056 self.loaded = True 

2057 

2058 def _determineType(self): 

2059 """Determine whether this is a raw or a VHD VDI""" 

2060 if "vdi_ref" in self.sr.srcmd.params: 2060 ↛ 2073line 2060 didn't jump to line 2073, because the condition on line 2060 was never false

2061 vdi_ref = self.sr.srcmd.params["vdi_ref"] 

2062 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2063 if sm_config.get("vdi_type"): 2063 ↛ 2064line 2063 didn't jump to line 2064, because the condition on line 2063 was never true

2064 self.vdi_type = sm_config["vdi_type"] 

2065 prefix = lvhdutil.LV_PREFIX[self.vdi_type] 

2066 self.lvname = "%s%s" % (prefix, self.uuid) 

2067 self.path = os.path.join(self.sr.path, self.lvname) 

2068 self.sm_config_override = sm_config 

2069 return True 

2070 

2071 # LVM commands can be costly, so check the file directly first in case 

2072 # the LV is active 

2073 found = False 

2074 for t in lvhdutil.VDI_TYPES: 2074 ↛ 2075line 2074 didn't jump to line 2075, because the loop on line 2074 never started

2075 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid) 

2076 path = os.path.join(self.sr.path, lvname) 

2077 if util.pathexists(path): 

2078 if found: 

2079 raise xs_errors.XenError('VDILoad', 

2080 opterr="multiple VDI's: uuid %s" % self.uuid) 

2081 found = True 

2082 self.vdi_type = t 

2083 self.lvname = lvname 

2084 self.path = path 

2085 if found: 2085 ↛ 2086line 2085 didn't jump to line 2086, because the condition on line 2085 was never true

2086 return True 

2087 

2088 # now list all LV's 

2089 if not lvutil._checkVG(self.sr.vgname): 2089 ↛ 2091line 2089 didn't jump to line 2091, because the condition on line 2089 was never true

2090 # when doing attach_from_config, the VG won't be there yet 

2091 return False 

2092 

2093 lvs = lvhdutil.getLVInfo(self.sr.lvmCache) 

2094 if lvs.get(self.uuid): 

2095 self._initFromLVInfo(lvs[self.uuid]) 

2096 return True 

2097 return False 

2098 

2099 def _loadThis(self): 

2100 """Load VDI info for this VDI and activate the LV if it's VHD. We 

2101 don't do it in VDI.load() because not all VDI operations need it.""" 

2102 if self.loaded: 2102 ↛ 2103line 2102 didn't jump to line 2103, because the condition on line 2102 was never true

2103 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

2104 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2105 return 

2106 try: 

2107 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname) 

2108 except util.CommandException as e: 

2109 raise xs_errors.XenError('VDIUnavailable', 

2110 opterr='%s (LV scan error)' % os.strerror(abs(e.code))) 

2111 if not lvs.get(self.uuid): 2111 ↛ 2112line 2111 didn't jump to line 2112, because the condition on line 2111 was never true

2112 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') 

2113 self._initFromLVInfo(lvs[self.uuid]) 

2114 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2114 ↛ 2121line 2114 didn't jump to line 2121, because the condition on line 2114 was never false

2115 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2116 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False) 

2117 if not vhdInfo: 2117 ↛ 2118line 2117 didn't jump to line 2118, because the condition on line 2117 was never true

2118 raise xs_errors.XenError('VDIUnavailable', \ 

2119 opterr='getVHDInfo failed') 

2120 self._initFromVHDInfo(vhdInfo) 

2121 self.loaded = True 

2122 

2123 def _chainSetActive(self, active, binary, persistent=False): 

2124 if binary: 2124 ↛ 2125line 2124 didn't jump to line 2125, because the condition on line 2124 was never true

2125 (count, bcount) = RefCounter.checkLocked(self.uuid, 

2126 lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

2127 if (active and bcount > 0) or (not active and bcount == 0): 

2128 return # this is a redundant activation/deactivation call 

2129 

2130 vdiList = {self.uuid: self.lvname} 

2131 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2131 ↛ 2134line 2131 didn't jump to line 2134, because the condition on line 2131 was never false

2132 vdiList = vhdutil.getParentChain(self.lvname, 

2133 lvhdutil.extractUuid, self.sr.vgname) 

2134 for uuid, lvName in vdiList.items(): 2134 ↛ 2135line 2134 didn't jump to line 2135, because the loop on line 2134 never started

2135 binaryParam = binary 

2136 if uuid != self.uuid: 

2137 binaryParam = False # binary param only applies to leaf nodes 

2138 if active: 

2139 self.sr.lvActivator.activate(uuid, lvName, binaryParam, 

2140 persistent) 

2141 else: 

2142 # just add the LVs for deactivation in the final (cleanup) 

2143 # step. The LVs must not have been activated during the current 

2144 # operation 

2145 self.sr.lvActivator.add(uuid, lvName, binaryParam) 

2146 

2147 def _failClone(self, uuid, jval, msg): 

2148 try: 

2149 self.sr._handleInterruptedCloneOp(uuid, jval, True) 

2150 self.sr.journaler.remove(self.JRN_CLONE, uuid) 

2151 except Exception as e: 

2152 util.SMlog('WARNING: failed to clean up failed snapshot: ' \ 

2153 ' %s (error ignored)' % e) 

2154 raise xs_errors.XenError('VDIClone', opterr=msg) 

2155 

2156 def _markHidden(self): 

2157 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

2158 self.sr.lvmCache.setHidden(self.lvname) 

2159 else: 

2160 vhdutil.setHidden(self.path) 

2161 self.hidden = 1 

2162 

2163 def _prepareThin(self, attach): 

2164 origUtilisation = self.sr.lvmCache.getSize(self.lvname) 

2165 if self.sr.isMaster: 

2166 # the master can prepare the VDI locally 

2167 if attach: 

2168 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid) 

2169 else: 

2170 lvhdutil.detachThin(self.session, self.sr.lvmCache, 

2171 self.sr.uuid, self.uuid) 

2172 else: 

2173 fn = "attach" 

2174 if not attach: 

2175 fn = "detach" 

2176 pools = self.session.xenapi.pool.get_all() 

2177 master = self.session.xenapi.pool.get_master(pools[0]) 

2178 rv = self.session.xenapi.host.call_plugin( 

2179 master, self.sr.THIN_PLUGIN, fn, 

2180 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid}) 

2181 util.SMlog("call-plugin returned: %s" % rv) 

2182 if not rv: 

2183 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) 

2184 # refresh to pick up the size change on this slave 

2185 self.sr.lvmCache.activateNoRefcount(self.lvname, True) 

2186 

2187 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

2188 if origUtilisation != self.utilisation: 

2189 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2190 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

2191 str(self.utilisation)) 

2192 stats = lvutil._getVGstats(self.sr.vgname) 

2193 sr_utilisation = stats['physical_utilisation'] 

2194 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, 

2195 str(sr_utilisation)) 

2196 

2197 @override 

2198 def update(self, sr_uuid, vdi_uuid) -> None: 

2199 if self.sr.legacyMode: 

2200 return 

2201 

2202 #Synch the name_label of this VDI on storage with the name_label in XAPI 

2203 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

2204 update_map = {} 

2205 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

2206 METADATA_OBJECT_TYPE_VDI 

2207 update_map[UUID_TAG] = self.uuid 

2208 update_map[NAME_LABEL_TAG] = util.to_plain_string( \ 

2209 self.session.xenapi.VDI.get_name_label(vdi_ref)) 

2210 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \ 

2211 self.session.xenapi.VDI.get_name_description(vdi_ref)) 

2212 update_map[SNAPSHOT_TIME_TAG] = \ 

2213 self.session.xenapi.VDI.get_snapshot_time(vdi_ref) 

2214 update_map[METADATA_OF_POOL_TAG] = \ 

2215 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) 

2216 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map) 

2217 

2218 @override 

2219 def _ensure_cbt_space(self) -> None: 

2220 self.sr.ensureCBTSpace() 

2221 

2222 @override 

2223 def _create_cbt_log(self) -> str: 

2224 logname = self._get_cbt_logname(self.uuid) 

2225 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) 

2226 logpath = super(LVHDVDI, self)._create_cbt_log() 

2227 self.sr.lvmCache.deactivateNoRefcount(logname) 

2228 return logpath 

2229 

2230 @override 

2231 def _delete_cbt_log(self) -> None: 

2232 logpath = self._get_cbt_logpath(self.uuid) 

2233 if self._cbt_log_exists(logpath): 

2234 logname = self._get_cbt_logname(self.uuid) 

2235 self.sr.lvmCache.remove(logname) 

2236 

2237 @override 

2238 def _rename(self, oldpath, newpath) -> None: 

2239 oldname = os.path.basename(oldpath) 

2240 newname = os.path.basename(newpath) 

2241 self.sr.lvmCache.rename(oldname, newname) 

2242 

2243 @override 

2244 def _activate_cbt_log(self, lv_name) -> bool: 

2245 self.sr.lvmCache.refresh() 

2246 if not self.sr.lvmCache.is_active(lv_name): 2246 ↛ 2247line 2246 didn't jump to line 2247, because the condition on line 2246 was never true

2247 try: 

2248 self.sr.lvmCache.activateNoRefcount(lv_name) 

2249 return True 

2250 except Exception as e: 

2251 util.SMlog("Exception in _activate_cbt_log, " 

2252 "Error: %s." % str(e)) 

2253 raise 

2254 else: 

2255 return False 

2256 

2257 @override 

2258 def _deactivate_cbt_log(self, lv_name) -> None: 

2259 try: 

2260 self.sr.lvmCache.deactivateNoRefcount(lv_name) 

2261 except Exception as e: 

2262 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e)) 

2263 raise 

2264 

2265 @override 

2266 def _cbt_log_exists(self, logpath) -> bool: 

2267 return lvutil.exists(logpath) 

2268 

2269if __name__ == '__main__': 2269 ↛ 2270line 2269 didn't jump to line 2270, because the condition on line 2269 was never true

2270 SRCommand.run(LVHDSR, DRIVER_INFO) 

2271else: 

2272 SR.registerSR(LVHDSR)