Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# LVHDSR: VHD on LVM storage repository 

19# 

20 

21import SR 

22from SR import deviceCheck 

23import VDI 

24import SRCommand 

25import util 

26import lvutil 

27import lvmcache 

28import vhdutil 

29import lvhdutil 

30import scsiutil 

31import os 

32import sys 

33import time 

34import errno 

35import xs_errors 

36import cleanup 

37import blktap2 

38from journaler import Journaler 

39from lock import Lock 

40from refcounter import RefCounter 

41from ipc import IPCFlag 

42from lvmanager import LVActivator 

43import XenAPI # pylint: disable=import-error 

44import re 

45from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \ 

46 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ 

47 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ 

48 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ 

49 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG 

50from metadata import retrieveXMLfromFile, _parseXML 

51from xmlrpc.client import DateTime 

52import glob 

53from constants import CBTLOG_TAG 

54from fairlock import Fairlock 

55DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX) 

56 

57geneology = {} 

58CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM", 

59 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", 

60 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", 

61 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", 

62 "VDI_ACTIVATE", "VDI_DEACTIVATE"] 

63 

64CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']] 

65 

66DRIVER_INFO = { 

67 'name': 'Local VHD on LVM', 

68 'description': 'SR plugin which represents disks as VHD disks on ' + \ 

69 'Logical Volumes within a locally-attached Volume Group', 

70 'vendor': 'XenSource Inc', 

71 'copyright': '(C) 2008 XenSource Inc', 

72 'driver_version': '1.0', 

73 'required_api_version': '1.0', 

74 'capabilities': CAPABILITIES, 

75 'configuration': CONFIGURATION 

76 } 

77 

78PARAM_VHD = "vhd" 

79PARAM_RAW = "raw" 

80 

81OPS_EXCLUSIVE = [ 

82 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", 

83 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", 

84 "vdi_clone"] 

85 

86# Log if snapshot pauses VM for more than this many seconds 

87LONG_SNAPTIME = 60 

88 

89class LVHDSR(SR.SR): 

90 DRIVER_TYPE = 'lvhd' 

91 

92 PROVISIONING_TYPES = ["thin", "thick"] 

93 PROVISIONING_DEFAULT = "thick" 

94 THIN_PLUGIN = "lvhd-thin" 

95 

96 PLUGIN_ON_SLAVE = "on-slave" 

97 

98 FLAG_USE_VHD = "use_vhd" 

99 MDVOLUME_NAME = "MGT" 

100 

101 ALLOCATION_QUANTUM = "allocation_quantum" 

102 INITIAL_ALLOCATION = "initial_allocation" 

103 

104 LOCK_RETRY_INTERVAL = 3 

105 LOCK_RETRY_ATTEMPTS = 10 

106 

107 TEST_MODE_KEY = "testmode" 

108 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin" 

109 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator" 

110 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end" 

111 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin" 

112 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data" 

113 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata" 

114 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end" 

115 

116 ENV_VAR_VHD_TEST = { 

117 TEST_MODE_VHD_FAIL_REPARENT_BEGIN: 

118 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", 

119 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: 

120 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", 

121 TEST_MODE_VHD_FAIL_REPARENT_END: 

122 "VHD_UTIL_TEST_FAIL_REPARENT_END", 

123 TEST_MODE_VHD_FAIL_RESIZE_BEGIN: 

124 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", 

125 TEST_MODE_VHD_FAIL_RESIZE_DATA: 

126 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", 

127 TEST_MODE_VHD_FAIL_RESIZE_METADATA: 

128 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", 

129 TEST_MODE_VHD_FAIL_RESIZE_END: 

130 "VHD_UTIL_TEST_FAIL_RESIZE_END" 

131 } 

132 testMode = "" 

133 

134 legacyMode = True 

135 

136 def handles(type): 

137 """Returns True if this SR class understands the given dconf string""" 

138 # we can pose as LVMSR or EXTSR for compatibility purposes 

139 if __name__ == '__main__': 

140 name = sys.argv[0] 

141 else: 

142 name = __name__ 

143 if name.endswith("LVMSR"): 

144 return type == "lvm" 

145 elif name.endswith("EXTSR"): 

146 return type == "ext" 

147 return type == LVHDSR.DRIVER_TYPE 

148 handles = staticmethod(handles) 

149 

150 def load(self, sr_uuid): 

151 self.ops_exclusive = OPS_EXCLUSIVE 

152 

153 self.isMaster = False 

154 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

155 self.isMaster = True 

156 

157 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

158 self.sr_vditype = SR.DEFAULT_TAP 

159 self.uuid = sr_uuid 

160 self.vgname = lvhdutil.VG_PREFIX + self.uuid 

161 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname) 

162 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME) 

163 self.provision = self.PROVISIONING_DEFAULT 

164 

165 self.other_conf = None 

166 has_sr_ref = self.srcmd.params.get("sr_ref") 

167 if has_sr_ref: 

168 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) 

169 

170 self.lvm_conf = None 

171 if self.other_conf: 

172 self.lvm_conf = self.other_conf.get('lvm-conf') 

173 

174 try: 

175 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf) 

176 except: 

177 raise xs_errors.XenError('SRUnavailable', \ 

178 opterr='Failed to initialise the LVMCache') 

179 self.lvActivator = LVActivator(self.uuid, self.lvmCache) 

180 self.journaler = Journaler(self.lvmCache) 

181 if not has_sr_ref: 

182 return # must be a probe call 

183 # Test for thick vs thin provisioning conf parameter 

184 if 'allocation' in self.dconf: 184 ↛ 185line 184 didn't jump to line 185, because the condition on line 184 was never true

185 if self.dconf['allocation'] in self.PROVISIONING_TYPES: 

186 self.provision = self.dconf['allocation'] 

187 else: 

188 raise xs_errors.XenError('InvalidArg', \ 

189 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES) 

190 

191 if self.other_conf.get(self.TEST_MODE_KEY): 191 ↛ 195line 191 didn't jump to line 195, because the condition on line 191 was never false

192 self.testMode = self.other_conf[self.TEST_MODE_KEY] 

193 self._prepareTestMode() 

194 

195 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

196 # sm_config flag overrides PBD, if any 

197 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: 

198 self.provision = self.sm_config.get('allocation') 

199 

200 if self.sm_config.get(self.FLAG_USE_VHD) == "true": 

201 self.legacyMode = False 

202 

203 if lvutil._checkVG(self.vgname): 

204 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 204 ↛ 207line 204 didn't jump to line 207, because the condition on line 204 was never false

205 "vdi_activate", "vdi_deactivate"]: 

206 self._undoAllJournals() 

207 if not self.cmd in ["sr_attach", "sr_probe"]: 

208 self._checkMetadataVolume() 

209 

210 self.mdexists = False 

211 

212 # get a VDI -> TYPE map from the storage 

213 contains_uuid_regex = \ 

214 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") 

215 self.storageVDIs = {} 

216 

217 for key in self.lvmCache.lvs.keys(): 217 ↛ 219line 217 didn't jump to line 219, because the loop on line 217 never started

218 # if the lvname has a uuid in it 

219 type = None 

220 if contains_uuid_regex.search(key) is not None: 

221 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]): 

222 type = vhdutil.VDI_TYPE_VHD 

223 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

224 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]): 

225 type = vhdutil.VDI_TYPE_RAW 

226 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

227 else: 

228 continue 

229 

230 if type is not None: 

231 self.storageVDIs[vdi] = type 

232 

233 # check if metadata volume exists 

234 try: 

235 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

236 except: 

237 pass 

238 

239 def cleanup(self): 

240 # we don't need to hold the lock to dec refcounts of activated LVs 

241 if not self.lvActivator.deactivateAll(): 241 ↛ 242line 241 didn't jump to line 242, because the condition on line 241 was never true

242 raise util.SMException("failed to deactivate LVs") 

243 

244 def updateSRMetadata(self, allocation): 

245 try: 

246 # Add SR specific SR metadata 

247 sr_info = \ 

248 {ALLOCATION_TAG: allocation, 

249 UUID_TAG: self.uuid, 

250 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)), 

251 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref)) 

252 } 

253 

254 vdi_info = {} 

255 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): 

256 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi) 

257 

258 # Create the VDI entry in the SR metadata 

259 vdi_info[vdi_uuid] = \ 

260 { 

261 UUID_TAG: vdi_uuid, 

262 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)), 

263 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)), 

264 IS_A_SNAPSHOT_TAG: \ 

265 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), 

266 SNAPSHOT_OF_TAG: \ 

267 self.session.xenapi.VDI.get_snapshot_of(vdi), 

268 SNAPSHOT_TIME_TAG: \ 

269 self.session.xenapi.VDI.get_snapshot_time(vdi), 

270 TYPE_TAG: \ 

271 self.session.xenapi.VDI.get_type(vdi), 

272 VDI_TYPE_TAG: \ 

273 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'], 

274 READ_ONLY_TAG: \ 

275 int(self.session.xenapi.VDI.get_read_only(vdi)), 

276 METADATA_OF_POOL_TAG: \ 

277 self.session.xenapi.VDI.get_metadata_of_pool(vdi), 

278 MANAGED_TAG: \ 

279 int(self.session.xenapi.VDI.get_managed(vdi)) 

280 } 

281 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info) 

282 

283 except Exception as e: 

284 raise xs_errors.XenError('MetadataError', \ 

285 opterr='Error upgrading SR Metadata: %s' % str(e)) 

286 

287 def syncMetadataAndStorage(self): 

288 try: 

289 # if a VDI is present in the metadata but not in the storage 

290 # then delete it from the metadata 

291 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

292 for vdi in list(vdi_info.keys()): 

293 update_map = {} 

294 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 294 ↛ 301line 294 didn't jump to line 301, because the condition on line 294 was never false

295 # delete this from metadata 

296 LVMMetadataHandler(self.mdpath). \ 

297 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) 

298 else: 

299 # search for this in the metadata, compare types 

300 # self.storageVDIs is a map of vdi_uuid to vdi_type 

301 if vdi_info[vdi][VDI_TYPE_TAG] != \ 

302 self.storageVDIs[vdi_info[vdi][UUID_TAG]]: 

303 # storage type takes authority 

304 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ 

305 = METADATA_OBJECT_TYPE_VDI 

306 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] 

307 update_map[VDI_TYPE_TAG] = \ 

308 self.storageVDIs[vdi_info[vdi][UUID_TAG]] 

309 LVMMetadataHandler(self.mdpath) \ 

310 .updateMetadata(update_map) 

311 else: 

312 # This should never happen 

313 pass 

314 

315 except Exception as e: 

316 raise xs_errors.XenError('MetadataError', \ 

317 opterr='Error synching SR Metadata and storage: %s' % str(e)) 

318 

319 def syncMetadataAndXapi(self): 

320 try: 

321 # get metadata 

322 (sr_info, vdi_info) = \ 

323 LVMMetadataHandler(self.mdpath, False).getMetadata() 

324 

325 # First synch SR parameters 

326 self.update(self.uuid) 

327 

328 # Now update the VDI information in the metadata if required 

329 for vdi_offset in vdi_info.keys(): 

330 try: 

331 vdi_ref = \ 

332 self.session.xenapi.VDI.get_by_uuid( \ 

333 vdi_info[vdi_offset][UUID_TAG]) 

334 except: 

335 # may be the VDI is not in XAPI yet dont bother 

336 continue 

337 

338 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref)) 

339 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref)) 

340 

341 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ 

342 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ 

343 new_name_description: 

344 update_map = {} 

345 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

346 METADATA_OBJECT_TYPE_VDI 

347 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] 

348 update_map[NAME_LABEL_TAG] = new_name_label 

349 update_map[NAME_DESCRIPTION_TAG] = new_name_description 

350 LVMMetadataHandler(self.mdpath) \ 

351 .updateMetadata(update_map) 

352 except Exception as e: 

353 raise xs_errors.XenError('MetadataError', \ 

354 opterr='Error synching SR Metadata and XAPI: %s' % str(e)) 

355 

356 def _checkMetadataVolume(self): 

357 util.SMlog("Entering _checkMetadataVolume") 

358 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

359 if self.isMaster: 359 ↛ 375line 359 didn't jump to line 375, because the condition on line 359 was never false

360 if self.mdexists and self.cmd == "sr_attach": 

361 try: 

362 # activate the management volume 

363 # will be deactivated at detach time 

364 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

365 self._synchSmConfigWithMetaData() 

366 util.SMlog("Sync SR metadata and the state on the storage.") 

367 self.syncMetadataAndStorage() 

368 self.syncMetadataAndXapi() 

369 except Exception as e: 

370 util.SMlog("Exception in _checkMetadataVolume, " \ 

371 "Error: %s." % str(e)) 

372 elif not self.mdexists and not self.legacyMode: 372 ↛ 375line 372 didn't jump to line 375, because the condition on line 372 was never false

373 self._introduceMetaDataVolume() 

374 

375 if self.mdexists: 

376 self.legacyMode = False 

377 

378 def _synchSmConfigWithMetaData(self): 

379 util.SMlog("Synching sm-config with metadata volume") 

380 

381 try: 

382 # get SR info from metadata 

383 sr_info = {} 

384 map = {} 

385 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0] 

386 

387 if sr_info == {}: 387 ↛ 388line 387 didn't jump to line 388, because the condition on line 387 was never true

388 raise Exception("Failed to get SR information from metadata.") 

389 

390 if "allocation" in sr_info: 390 ↛ 394line 390 didn't jump to line 394, because the condition on line 390 was never false

391 self.provision = sr_info.get("allocation") 

392 map['allocation'] = sr_info.get("allocation") 

393 else: 

394 raise Exception("Allocation key not found in SR metadata. " 

395 "SR info found: %s" % sr_info) 

396 

397 except Exception as e: 

398 raise xs_errors.XenError( 

399 'MetadataError', 

400 opterr='Error reading SR params from ' 

401 'metadata Volume: %s' % str(e)) 

402 try: 

403 map[self.FLAG_USE_VHD] = 'true' 

404 self.session.xenapi.SR.set_sm_config(self.sr_ref, map) 

405 except: 

406 raise xs_errors.XenError( 

407 'MetadataError', 

408 opterr='Error updating sm_config key') 

409 

410 def _introduceMetaDataVolume(self): 

411 util.SMlog("Creating Metadata volume") 

412 try: 

413 config = {} 

414 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024) 

415 

416 # activate the management volume, will be deactivated at detach time 

417 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

418 

419 name_label = util.to_plain_string( \ 

420 self.session.xenapi.SR.get_name_label(self.sr_ref)) 

421 name_description = util.to_plain_string( \ 

422 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

423 config[self.FLAG_USE_VHD] = "true" 

424 config['allocation'] = self.provision 

425 self.session.xenapi.SR.set_sm_config(self.sr_ref, config) 

426 

427 # Add the SR metadata 

428 self.updateSRMetadata(self.provision) 

429 except Exception as e: 

430 raise xs_errors.XenError('MetadataError', \ 

431 opterr='Error introducing Metadata Volume: %s' % str(e)) 

432 

433 def _removeMetadataVolume(self): 

434 if self.mdexists: 

435 try: 

436 self.lvmCache.remove(self.MDVOLUME_NAME) 

437 except: 

438 raise xs_errors.XenError('MetadataError', \ 

439 opterr='Failed to delete MGT Volume') 

440 

441 def _refresh_size(self): 

442 """ 

443 Refreshs the size of the backing device. 

444 Return true if all paths/devices agree on the same size. 

445 """ 

446 if hasattr(self, 'SCSIid'): 446 ↛ 448line 446 didn't jump to line 448, because the condition on line 446 was never true

447 # LVHDoHBASR, LVHDoISCSISR 

448 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) 

449 else: 

450 # LVHDSR 

451 devices = self.dconf['device'].split(',') 

452 scsiutil.refreshdev(devices) 

453 return True 

454 

455 def _expand_size(self): 

456 """ 

457 Expands the size of the SR by growing into additional availiable 

458 space, if extra space is availiable on the backing device. 

459 Needs to be called after a successful call of _refresh_size. 

460 """ 

461 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] 

462 # We are comparing PV- with VG-sizes that are aligned. Need a threshold 

463 resizethreshold = 100 * 1024 * 1024 # 100MB 

464 devices = self.dconf['device'].split(',') 

465 totaldevicesize = 0 

466 for device in devices: 

467 totaldevicesize = totaldevicesize + scsiutil.getsize(device) 

468 if totaldevicesize >= (currentvgsize + resizethreshold): 

469 try: 

470 if hasattr(self, 'SCSIid'): 470 ↛ 472line 470 didn't jump to line 472, because the condition on line 470 was never true

471 # LVHDoHBASR, LVHDoISCSISR might have slaves 

472 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, 

473 getattr(self, 'SCSIid')) 

474 util.SMlog("LVHDSR._expand_size for %s will resize the pv." % 

475 self.uuid) 

476 for pv in lvutil.get_pv_for_vg(self.vgname): 

477 lvutil.resizePV(pv) 

478 except: 

479 util.logException("LVHDSR._expand_size for %s failed to resize" 

480 " the PV" % self.uuid) 

481 

482 @deviceCheck 

483 def create(self, uuid, size): 

484 util.SMlog("LVHDSR.create for %s" % self.uuid) 

485 if not self.isMaster: 

486 util.SMlog('sr_create blocked for non-master') 

487 raise xs_errors.XenError('LVMMaster') 

488 

489 if lvutil._checkVG(self.vgname): 

490 raise xs_errors.XenError('SRExists') 

491 

492 # Check none of the devices already in use by other PBDs 

493 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']): 

494 raise xs_errors.XenError('SRInUse') 

495 

496 # Check serial number entry in SR records 

497 for dev in self.dconf['device'].split(','): 

498 if util.test_scsiserial(self.session, dev): 

499 raise xs_errors.XenError('SRInUse') 

500 

501 lvutil.createVG(self.dconf['device'], self.vgname) 

502 

503 #Update serial number string 

504 scsiutil.add_serial_record(self.session, self.sr_ref, \ 

505 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

506 

507 # since this is an SR.create turn off legacy mode 

508 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ 

509 self.FLAG_USE_VHD, 'true') 

510 

511 def delete(self, uuid): 

512 util.SMlog("LVHDSR.delete for %s" % self.uuid) 

513 if not self.isMaster: 

514 raise xs_errors.XenError('LVMMaster') 

515 cleanup.gc_force(self.session, self.uuid) 

516 

517 success = True 

518 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

519 if util.extractSRFromDevMapper(fileName) != self.uuid: 

520 continue 

521 

522 if util.doesFileHaveOpenHandles(fileName): 

523 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \ 

524 "handles" % fileName) 

525 success = False 

526 continue 

527 

528 # Now attempt to remove the dev mapper entry 

529 if not lvutil.removeDevMapperEntry(fileName, False): 

530 success = False 

531 continue 

532 

533 try: 

534 lvname = os.path.basename(fileName.replace('-', '/'). \ 

535 replace('//', '-')) 

536 lpath = os.path.join(self.path, lvname) 

537 os.unlink(lpath) 

538 except OSError as e: 

539 if e.errno != errno.ENOENT: 

540 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \ 

541 "file %s. Error: %s" % (fileName, str(e))) 

542 success = False 

543 

544 if success: 

545 try: 

546 if util.pathexists(self.path): 

547 os.rmdir(self.path) 

548 except Exception as e: 

549 util.SMlog("LVHDSR.delete: failed to remove the symlink " \ 

550 "directory %s. Error: %s" % (self.path, str(e))) 

551 success = False 

552 

553 self._removeMetadataVolume() 

554 self.lvmCache.refresh() 

555 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0: 

556 raise xs_errors.XenError('SRNotEmpty') 

557 

558 if not success: 

559 raise Exception("LVHDSR delete failed, please refer to the log " \ 

560 "for details.") 

561 

562 lvutil.removeVG(self.dconf['device'], self.vgname) 

563 self._cleanup() 

564 

565 def attach(self, uuid): 

566 util.SMlog("LVHDSR.attach for %s" % self.uuid) 

567 

568 self._cleanup(True) # in case of host crashes, if detach wasn't called 

569 

570 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 570 ↛ 571line 570 didn't jump to line 571, because the condition on line 570 was never true

571 raise xs_errors.XenError('SRUnavailable', \ 

572 opterr='no such volume group: %s' % self.vgname) 

573 

574 # Refresh the metadata status 

575 self._checkMetadataVolume() 

576 

577 refreshsizeok = self._refresh_size() 

578 

579 if self.isMaster: 579 ↛ 590line 579 didn't jump to line 590, because the condition on line 579 was never false

580 if refreshsizeok: 580 ↛ 584line 580 didn't jump to line 584, because the condition on line 580 was never false

581 self._expand_size() 

582 

583 # Update SCSIid string 

584 util.SMlog("Calling devlist_to_serial") 

585 scsiutil.add_serial_record( 

586 self.session, self.sr_ref, 

587 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

588 

589 # Test Legacy Mode Flag and update if VHD volumes exist 

590 if self.isMaster and self.legacyMode: 590 ↛ 591line 590 didn't jump to line 591, because the condition on line 590 was never true

591 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

592 for uuid, info in vdiInfo.items(): 

593 if info.vdiType == vhdutil.VDI_TYPE_VHD: 

594 self.legacyMode = False 

595 map = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

596 self._introduceMetaDataVolume() 

597 break 

598 

599 # Set the block scheduler 

600 for dev in self.dconf['device'].split(','): 

601 self.block_setscheduler(dev) 

602 

603 def detach(self, uuid): 

604 util.SMlog("LVHDSR.detach for %s" % self.uuid) 

605 cleanup.abort(self.uuid) 

606 

607 # Do a best effort cleanup of the dev mapper entries 

608 # go through all devmapper entries for this VG 

609 success = True 

610 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

611 if util.extractSRFromDevMapper(fileName) != self.uuid: 611 ↛ 612line 611 didn't jump to line 612, because the condition on line 611 was never true

612 continue 

613 

614 with Fairlock('devicemapper'): 

615 # check if any file has open handles 

616 if util.doesFileHaveOpenHandles(fileName): 

617 # if yes, log this and signal failure 

618 util.SMlog( 

619 f"LVHDSR.detach: The dev mapper entry {fileName} has " 

620 "open handles") 

621 success = False 

622 continue 

623 

624 # Now attempt to remove the dev mapper entry 

625 if not lvutil.removeDevMapperEntry(fileName, False): 625 ↛ 626line 625 didn't jump to line 626, because the condition on line 625 was never true

626 success = False 

627 continue 

628 

629 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/* 

630 try: 

631 lvname = os.path.basename(fileName.replace('-', '/'). \ 

632 replace('//', '-')) 

633 lvname = os.path.join(self.path, lvname) 

634 util.force_unlink(lvname) 

635 except Exception as e: 

636 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \ 

637 "file %s. Error: %s" % (fileName, str(e))) 

638 success = False 

639 

640 # now remove the directory where the symlinks are 

641 # this should pass as the directory should be empty by now 

642 if success: 

643 try: 

644 if util.pathexists(self.path): 644 ↛ 645line 644 didn't jump to line 645, because the condition on line 644 was never true

645 os.rmdir(self.path) 

646 except Exception as e: 

647 util.SMlog("LVHDSR.detach: failed to remove the symlink " \ 

648 "directory %s. Error: %s" % (self.path, str(e))) 

649 success = False 

650 

651 if not success: 

652 raise Exception("SR detach failed, please refer to the log " \ 

653 "for details.") 

654 

655 # Don't delete lock files on the master as it will break the locking 

656 # between SM and any GC thread that survives through SR.detach. 

657 # However, we should still delete lock files on slaves as it is the 

658 # only place to do so. 

659 self._cleanup(self.isMaster) 

660 

661 def forget_vdi(self, uuid): 

662 if not self.legacyMode: 

663 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) 

664 super(LVHDSR, self).forget_vdi(uuid) 

665 

666 def scan(self, uuid): 

667 activated = True 

668 try: 

669 lvname = '' 

670 util.SMlog("LVHDSR.scan for %s" % self.uuid) 

671 if not self.isMaster: 671 ↛ 672line 671 didn't jump to line 672, because the condition on line 671 was never true

672 util.SMlog('sr_scan blocked for non-master') 

673 raise xs_errors.XenError('LVMMaster') 

674 

675 if self._refresh_size(): 675 ↛ 677line 675 didn't jump to line 677, because the condition on line 675 was never false

676 self._expand_size() 

677 self.lvmCache.refresh() 

678 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) 

679 self._loadvdis() 

680 stats = lvutil._getVGstats(self.vgname) 

681 self.physical_size = stats['physical_size'] 

682 self.physical_utilisation = stats['physical_utilisation'] 

683 

684 # Now check if there are any VDIs in the metadata, which are not in 

685 # XAPI 

686 if self.mdexists: 686 ↛ 796line 686 didn't jump to line 796, because the condition on line 686 was never false

687 vdiToSnaps = {} 

688 # get VDIs from XAPI 

689 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) 

690 vdi_uuids = set([]) 

691 for vdi in vdis: 

692 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi)) 

693 

694 Dict = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

695 

696 for vdi in list(Dict.keys()): 

697 vdi_uuid = Dict[vdi][UUID_TAG] 

698 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): 698 ↛ 699line 698 didn't jump to line 699, because the condition on line 698 was never true

699 if Dict[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps: 

700 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) 

701 else: 

702 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid] 

703 

704 if vdi_uuid not in vdi_uuids: 704 ↛ 705line 704 didn't jump to line 705, because the condition on line 704 was never true

705 util.SMlog("Introduce VDI %s as it is present in " \ 

706 "metadata and not in XAPI." % vdi_uuid) 

707 sm_config = {} 

708 sm_config['vdi_type'] = Dict[vdi][VDI_TYPE_TAG] 

709 lvname = "%s%s" % \ 

710 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid) 

711 self.lvmCache.activateNoRefcount(lvname) 

712 activated = True 

713 lvPath = os.path.join(self.path, lvname) 

714 

715 if Dict[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW: 

716 size = self.lvmCache.getSize( \ 

717 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \ 

718 vdi_uuid) 

719 utilisation = \ 

720 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

721 int(size)) 

722 else: 

723 parent = \ 

724 vhdutil._getVHDParentNoCheck(lvPath) 

725 

726 if parent is not None: 

727 sm_config['vhd-parent'] = parent[len( \ 

728 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):] 

729 size = vhdutil.getSizeVirt(lvPath) 

730 if self.provision == "thin": 

731 utilisation = \ 

732 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

733 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

734 else: 

735 utilisation = lvhdutil.calcSizeVHDLV(int(size)) 

736 

737 vdi_ref = self.session.xenapi.VDI.db_introduce( 

738 vdi_uuid, 

739 Dict[vdi][NAME_LABEL_TAG], 

740 Dict[vdi][NAME_DESCRIPTION_TAG], 

741 self.sr_ref, 

742 Dict[vdi][TYPE_TAG], 

743 False, 

744 bool(int(Dict[vdi][READ_ONLY_TAG])), 

745 {}, 

746 vdi_uuid, 

747 {}, 

748 sm_config) 

749 

750 self.session.xenapi.VDI.set_managed(vdi_ref, 

751 bool(int(Dict[vdi][MANAGED_TAG]))) 

752 self.session.xenapi.VDI.set_virtual_size(vdi_ref, 

753 str(size)) 

754 self.session.xenapi.VDI.set_physical_utilisation( \ 

755 vdi_ref, str(utilisation)) 

756 self.session.xenapi.VDI.set_is_a_snapshot( \ 

757 vdi_ref, bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG]))) 

758 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): 

759 self.session.xenapi.VDI.set_snapshot_time( \ 

760 vdi_ref, DateTime(Dict[vdi][SNAPSHOT_TIME_TAG])) 

761 if Dict[vdi][TYPE_TAG] == 'metadata': 

762 self.session.xenapi.VDI.set_metadata_of_pool( \ 

763 vdi_ref, Dict[vdi][METADATA_OF_POOL_TAG]) 

764 

765 # Update CBT status of disks either just added 

766 # or already in XAPI 

767 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) 

768 if cbt_logname in cbt_vdis: 768 ↛ 769line 768 didn't jump to line 769, because the condition on line 768 was never true

769 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

770 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

771 # For existing VDIs, update local state too 

772 # Scan in base class SR updates existing VDIs 

773 # again based on local states 

774 if vdi_uuid in self.vdis: 

775 self.vdis[vdi_uuid].cbt_enabled = True 

776 cbt_vdis.remove(cbt_logname) 

777 

778 # Now set the snapshot statuses correctly in XAPI 

779 for srcvdi in vdiToSnaps.keys(): 779 ↛ 780line 779 didn't jump to line 780, because the loop on line 779 never started

780 try: 

781 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) 

782 except: 

783 # the source VDI no longer exists, continue 

784 continue 

785 

786 for snapvdi in vdiToSnaps[srcvdi]: 

787 try: 

788 # this might fail in cases where its already set 

789 snapref = \ 

790 self.session.xenapi.VDI.get_by_uuid(snapvdi) 

791 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) 

792 except Exception as e: 

793 util.SMlog("Setting snapshot failed. " \ 

794 "Error: %s" % str(e)) 

795 

796 if cbt_vdis: 796 ↛ 807line 796 didn't jump to line 807, because the condition on line 796 was never false

797 # If we have items remaining in this list, 

798 # they are cbt_metadata VDI that XAPI doesn't know about 

799 # Add them to self.vdis and they'll get added to the DB 

800 for cbt_vdi in cbt_vdis: 800 ↛ 801line 800 didn't jump to line 801, because the loop on line 800 never started

801 cbt_uuid = cbt_vdi.split(".")[0] 

802 new_vdi = self.vdi(cbt_uuid) 

803 new_vdi.ty = "cbt_metadata" 

804 new_vdi.cbt_enabled = True 

805 self.vdis[cbt_uuid] = new_vdi 

806 

807 super(LVHDSR, self).scan(uuid) 

808 self._kickGC() 

809 

810 finally: 

811 if lvname != '' and activated: 811 ↛ 812line 811 didn't jump to line 812, because the condition on line 811 was never true

812 self.lvmCache.deactivateNoRefcount(lvname) 

813 

814 def update(self, uuid): 

815 if not lvutil._checkVG(self.vgname): 815 ↛ 816line 815 didn't jump to line 816, because the condition on line 815 was never true

816 return 

817 self._updateStats(uuid, 0) 

818 

819 if self.legacyMode: 819 ↛ 820line 819 didn't jump to line 820, because the condition on line 819 was never true

820 return 

821 

822 # synch name_label in metadata with XAPI 

823 update_map = {} 

824 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ 

825 METADATA_OBJECT_TYPE_SR, 

826 NAME_LABEL_TAG: util.to_plain_string( \ 

827 self.session.xenapi.SR.get_name_label(self.sr_ref)), 

828 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

829 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

830 } 

831 LVMMetadataHandler(self.mdpath).updateMetadata(update_map) 

832 

833 def _updateStats(self, uuid, virtAllocDelta): 

834 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

835 self.virtual_allocation = valloc + virtAllocDelta 

836 util.SMlog("Setting virtual_allocation of SR %s to %d" % 

837 (uuid, self.virtual_allocation)) 

838 stats = lvutil._getVGstats(self.vgname) 

839 self.physical_size = stats['physical_size'] 

840 self.physical_utilisation = stats['physical_utilisation'] 

841 self._db_update() 

842 

843 @deviceCheck 

844 def probe(self): 

845 return lvutil.srlist_toxml( 

846 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']), 

847 lvhdutil.VG_PREFIX, 

848 ('metadata' in self.srcmd.params['sr_sm_config'] and \ 

849 self.srcmd.params['sr_sm_config']['metadata'] == 'true')) 

850 

851 def vdi(self, uuid): 

852 return LVHDVDI(self, uuid) 

853 

854 def _loadvdis(self): 

855 self.virtual_allocation = 0 

856 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

857 self.allVDIs = {} 

858 

859 for uuid, info in self.vdiInfo.items(): 

860 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 860 ↛ 861line 860 didn't jump to line 861, because the condition on line 860 was never true

861 continue 

862 if info.scanError: 862 ↛ 863line 862 didn't jump to line 863, because the condition on line 862 was never true

863 raise xs_errors.XenError('VDIUnavailable', \ 

864 opterr='Error scanning VDI %s' % uuid) 

865 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid) 

866 if not self.vdis[uuid].hidden: 866 ↛ 859line 866 didn't jump to line 859, because the condition on line 866 was never false

867 self.virtual_allocation += self.vdis[uuid].utilisation 

868 

869 for uuid, vdi in self.vdis.items(): 

870 if vdi.parent: 870 ↛ 871line 870 didn't jump to line 871, because the condition on line 870 was never true

871 if vdi.parent in self.vdis: 

872 self.vdis[vdi.parent].read_only = True 

873 if vdi.parent in geneology: 

874 geneology[vdi.parent].append(uuid) 

875 else: 

876 geneology[vdi.parent] = [uuid] 

877 

878 # Now remove all hidden leaf nodes to avoid introducing records that 

879 # will be GC'ed 

880 for uuid in list(self.vdis.keys()): 

881 if uuid not in geneology and self.vdis[uuid].hidden: 881 ↛ 882line 881 didn't jump to line 882, because the condition on line 881 was never true

882 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

883 del self.vdis[uuid] 

884 

885 def _ensureSpaceAvailable(self, amount_needed): 

886 space_available = lvutil._getVGstats(self.vgname)['freespace'] 

887 if (space_available < amount_needed): 

888 util.SMlog("Not enough space! free space: %d, need: %d" % \ 

889 (space_available, amount_needed)) 

890 raise xs_errors.XenError('SRNoSpace') 

891 

892 def _handleInterruptedCloneOps(self): 

893 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE) 

894 for uuid, val in entries.items(): 894 ↛ 895line 894 didn't jump to line 895, because the loop on line 894 never started

895 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid) 

896 self._handleInterruptedCloneOp(uuid, val) 

897 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid) 

898 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid) 

899 

900 def _handleInterruptedCoalesceLeaf(self): 

901 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) 

902 if len(entries) > 0: 902 ↛ 903line 902 didn't jump to line 903, because the condition on line 902 was never true

903 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") 

904 cleanup.gc_force(self.session, self.uuid) 

905 self.lvmCache.refresh() 

906 

907 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False): 

908 """Either roll back or finalize the interrupted snapshot/clone 

909 operation. Rolling back is unsafe if the leaf VHDs have already been 

910 in use and written to. However, it is always safe to roll back while 

911 we're still in the context of the failed snapshot operation since the 

912 VBD is paused for the duration of the operation""" 

913 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) 

914 lvs = lvhdutil.getLVInfo(self.lvmCache) 

915 baseUuid, clonUuid = jval.split("_") 

916 

917 # is there a "base copy" VDI? 

918 if not lvs.get(baseUuid): 

919 # no base copy: make sure the original is there 

920 if lvs.get(origUuid): 

921 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") 

922 return 

923 raise util.SMException("base copy %s not present, " \ 

924 "but no original %s found" % (baseUuid, origUuid)) 

925 

926 if forceUndo: 

927 util.SMlog("Explicit revert") 

928 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

929 return 

930 

931 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): 

932 util.SMlog("One or both leaves missing => revert") 

933 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

934 return 

935 

936 vdis = lvhdutil.getVDIInfo(self.lvmCache) 

937 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): 

938 util.SMlog("One or both leaves invalid => revert") 

939 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

940 return 

941 

942 orig = vdis[origUuid] 

943 base = vdis[baseUuid] 

944 self.lvActivator.activate(baseUuid, base.lvName, False) 

945 self.lvActivator.activate(origUuid, orig.lvName, False) 

946 if orig.parentUuid != baseUuid: 

947 parent = vdis[orig.parentUuid] 

948 self.lvActivator.activate(parent.uuid, parent.lvName, False) 

949 origPath = os.path.join(self.path, orig.lvName) 

950 if not vhdutil.check(origPath): 

951 util.SMlog("Orig VHD invalid => revert") 

952 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

953 return 

954 

955 if clonUuid: 

956 clon = vdis[clonUuid] 

957 clonPath = os.path.join(self.path, clon.lvName) 

958 self.lvActivator.activate(clonUuid, clon.lvName, False) 

959 if not vhdutil.check(clonPath): 

960 util.SMlog("Clon VHD invalid => revert") 

961 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

962 return 

963 

964 util.SMlog("Snapshot appears valid, will not roll back") 

965 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid) 

966 

967 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid): 

968 base = lvs[baseUuid] 

969 basePath = os.path.join(self.path, base.name) 

970 

971 # make the parent RW 

972 if base.readonly: 

973 self.lvmCache.setReadonly(base.name, False) 

974 

975 ns = lvhdutil.NS_PREFIX_LVM + self.uuid 

976 origRefcountBinary = RefCounter.check(origUuid, ns)[1] 

977 origRefcountNormal = 0 

978 

979 # un-hide the parent 

980 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

981 self.lvActivator.activate(baseUuid, base.name, False) 

982 origRefcountNormal = 1 

983 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False) 

984 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden: 

985 vhdutil.setHidden(basePath, False) 

986 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden: 

987 self.lvmCache.setHidden(base.name, False) 

988 

989 # remove the child nodes 

990 if clonUuid and lvs.get(clonUuid): 

991 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD: 

992 raise util.SMException("clone %s not VHD" % clonUuid) 

993 self.lvmCache.remove(lvs[clonUuid].name) 

994 if self.lvActivator.get(clonUuid, False): 

995 self.lvActivator.remove(clonUuid, False) 

996 if lvs.get(origUuid): 

997 self.lvmCache.remove(lvs[origUuid].name) 

998 

999 # inflate the parent to fully-allocated size 

1000 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

1001 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1002 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize) 

1003 

1004 # rename back 

1005 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid 

1006 self.lvmCache.rename(base.name, origLV) 

1007 RefCounter.reset(baseUuid, ns) 

1008 if self.lvActivator.get(baseUuid, False): 

1009 self.lvActivator.replace(baseUuid, origUuid, origLV, False) 

1010 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns) 

1011 

1012 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1013 # flag to facilitate vm deactivate 

1014 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1015 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused') 

1016 

1017 # update LVM metadata on slaves 

1018 slaves = util.get_slaves_attached_on(self.session, [origUuid]) 

1019 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname, 

1020 origLV, origUuid, slaves) 

1021 

1022 util.SMlog("*** INTERRUPTED CLONE OP: rollback success") 

1023 

1024 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid): 

1025 """Finalize the interrupted snapshot/clone operation. This must not be 

1026 called from the live snapshot op context because we attempt to pause/ 

1027 unpause the VBD here (the VBD is already paused during snapshot, so it 

1028 would cause a deadlock)""" 

1029 base = vdis[baseUuid] 

1030 clon = None 

1031 if clonUuid: 

1032 clon = vdis[clonUuid] 

1033 

1034 cleanup.abort(self.uuid) 

1035 

1036 # make sure the parent is hidden and read-only 

1037 if not base.hidden: 

1038 if base.vdiType == vhdutil.VDI_TYPE_RAW: 

1039 self.lvmCache.setHidden(base.lvName) 

1040 else: 

1041 basePath = os.path.join(self.path, base.lvName) 

1042 vhdutil.setHidden(basePath) 

1043 if not base.lvReadonly: 

1044 self.lvmCache.setReadonly(base.lvName, True) 

1045 

1046 # NB: since this snapshot-preserving call is only invoked outside the 

1047 # snapshot op context, we assume the LVM metadata on the involved slave 

1048 # has by now been refreshed and do not attempt to do it here 

1049 

1050 # Update the original record 

1051 try: 

1052 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1053 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1054 type = self.session.xenapi.VDI.get_type(vdi_ref) 

1055 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1056 sm_config['vhd-parent'] = baseUuid 

1057 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

1058 except XenAPI.Failure: 

1059 util.SMlog("ERROR updating the orig record") 

1060 

1061 # introduce the new VDI records 

1062 if clonUuid: 

1063 try: 

1064 clon_vdi = VDI.VDI(self, clonUuid) 

1065 clon_vdi.read_only = False 

1066 clon_vdi.location = clonUuid 

1067 clon_vdi.utilisation = clon.sizeLV 

1068 clon_vdi.sm_config = { 

1069 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1070 "vhd-parent": baseUuid} 

1071 

1072 if not self.legacyMode: 

1073 LVMMetadataHandler(self.mdpath). \ 

1074 ensureSpaceIsAvailableForVdis(1) 

1075 

1076 clon_vdi_ref = clon_vdi._db_introduce() 

1077 util.SMlog("introduced clon VDI: %s (%s)" % \ 

1078 (clon_vdi_ref, clonUuid)) 

1079 

1080 vdi_info = {UUID_TAG: clonUuid, 

1081 NAME_LABEL_TAG: clon_vdi.label, 

1082 NAME_DESCRIPTION_TAG: clon_vdi.description, 

1083 IS_A_SNAPSHOT_TAG: 0, 

1084 SNAPSHOT_OF_TAG: '', 

1085 SNAPSHOT_TIME_TAG: '', 

1086 TYPE_TAG: type, 

1087 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], 

1088 READ_ONLY_TAG: int(clon_vdi.read_only), 

1089 MANAGED_TAG: int(clon_vdi.managed), 

1090 METADATA_OF_POOL_TAG: '' 

1091 } 

1092 

1093 if not self.legacyMode: 

1094 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1095 

1096 except XenAPI.Failure: 

1097 util.SMlog("ERROR introducing the clon record") 

1098 

1099 try: 

1100 base_vdi = VDI.VDI(self, baseUuid) # readonly parent 

1101 base_vdi.label = "base copy" 

1102 base_vdi.read_only = True 

1103 base_vdi.location = baseUuid 

1104 base_vdi.size = base.sizeVirt 

1105 base_vdi.utilisation = base.sizeLV 

1106 base_vdi.managed = False 

1107 base_vdi.sm_config = { 

1108 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1109 "vhd-parent": baseUuid} 

1110 

1111 if not self.legacyMode: 

1112 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1) 

1113 

1114 base_vdi_ref = base_vdi._db_introduce() 

1115 util.SMlog("introduced base VDI: %s (%s)" % \ 

1116 (base_vdi_ref, baseUuid)) 

1117 

1118 vdi_info = {UUID_TAG: baseUuid, 

1119 NAME_LABEL_TAG: base_vdi.label, 

1120 NAME_DESCRIPTION_TAG: base_vdi.description, 

1121 IS_A_SNAPSHOT_TAG: 0, 

1122 SNAPSHOT_OF_TAG: '', 

1123 SNAPSHOT_TIME_TAG: '', 

1124 TYPE_TAG: type, 

1125 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], 

1126 READ_ONLY_TAG: int(base_vdi.read_only), 

1127 MANAGED_TAG: int(base_vdi.managed), 

1128 METADATA_OF_POOL_TAG: '' 

1129 } 

1130 

1131 if not self.legacyMode: 

1132 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1133 except XenAPI.Failure: 

1134 util.SMlog("ERROR introducing the base record") 

1135 

1136 util.SMlog("*** INTERRUPTED CLONE OP: complete") 

1137 

1138 def _undoAllJournals(self): 

1139 """Undo all VHD & SM interrupted journaled operations. This call must 

1140 be serialized with respect to all operations that create journals""" 

1141 # undoing interrupted inflates must be done first, since undoing VHD 

1142 # ops might require inflations 

1143 self.lock.acquire() 

1144 try: 

1145 self._undoAllInflateJournals() 

1146 self._undoAllVHDJournals() 

1147 self._handleInterruptedCloneOps() 

1148 self._handleInterruptedCoalesceLeaf() 

1149 finally: 

1150 self.lock.release() 

1151 self.cleanup() 

1152 

1153 def _undoAllInflateJournals(self): 

1154 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE) 

1155 if len(entries) == 0: 

1156 return 

1157 self._loadvdis() 

1158 for uuid, val in entries.items(): 

1159 vdi = self.vdis.get(uuid) 

1160 if vdi: 1160 ↛ 1175line 1160 didn't jump to line 1175, because the condition on line 1160 was never false

1161 util.SMlog("Found inflate journal %s, deflating %s to %s" % \ 

1162 (uuid, vdi.path, val)) 

1163 if vdi.readonly: 1163 ↛ 1164line 1163 didn't jump to line 1164, because the condition on line 1163 was never true

1164 self.lvmCache.setReadonly(vdi.lvname, False) 

1165 self.lvActivator.activate(uuid, vdi.lvname, False) 

1166 currSizeLV = self.lvmCache.getSize(vdi.lvname) 

1167 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE, 

1168 vhdutil.VHD_FOOTER_SIZE) 

1169 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val)) 

1170 if vdi.readonly: 1170 ↛ 1171line 1170 didn't jump to line 1171, because the condition on line 1170 was never true

1171 self.lvmCache.setReadonly(vdi.lvname, True) 

1172 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1172 ↛ 1173line 1172 didn't jump to line 1173, because the condition on line 1172 was never true

1173 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1174 self.vgname, vdi.lvname, uuid) 

1175 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid) 

1176 delattr(self, "vdiInfo") 

1177 delattr(self, "allVDIs") 

1178 

1179 def _undoAllVHDJournals(self): 

1180 """check if there are VHD journals in existence and revert them""" 

1181 journals = lvhdutil.getAllVHDJournals(self.lvmCache) 

1182 if len(journals) == 0: 1182 ↛ 1184line 1182 didn't jump to line 1184, because the condition on line 1182 was never false

1183 return 

1184 self._loadvdis() 

1185 for uuid, jlvName in journals: 

1186 vdi = self.vdis[uuid] 

1187 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path)) 

1188 self.lvActivator.activate(uuid, vdi.lvname, False) 

1189 self.lvmCache.activateNoRefcount(jlvName) 

1190 fullSize = lvhdutil.calcSizeVHDLV(vdi.size) 

1191 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize) 

1192 try: 

1193 jFile = os.path.join(self.path, jlvName) 

1194 vhdutil.revert(vdi.path, jFile) 

1195 except util.CommandException: 

1196 util.logException("VHD journal revert") 

1197 vhdutil.check(vdi.path) 

1198 util.SMlog("VHD revert failed but VHD ok: removing journal") 

1199 # Attempt to reclaim unused space 

1200 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False) 

1201 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1202 if NewSize < fullSize: 

1203 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) 

1204 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1205 self.vgname, vdi.lvname, uuid) 

1206 self.lvmCache.remove(jlvName) 

1207 delattr(self, "vdiInfo") 

1208 delattr(self, "allVDIs") 

1209 

1210 def _updateSlavesPreClone(self, hostRefs, origOldLV): 

1211 masterRef = util.get_this_host_ref(self.session) 

1212 args = {"vgName": self.vgname, 

1213 "action1": "deactivateNoRefcount", 

1214 "lvName1": origOldLV} 

1215 for hostRef in hostRefs: 

1216 if hostRef == masterRef: 1216 ↛ 1217line 1216 didn't jump to line 1217, because the condition on line 1216 was never true

1217 continue 

1218 util.SMlog("Deactivate VDI on %s" % hostRef) 

1219 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1220 util.SMlog("call-plugin returned: %s" % rv) 

1221 if not rv: 1221 ↛ 1222line 1221 didn't jump to line 1222, because the condition on line 1221 was never true

1222 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1223 

1224 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV, 

1225 baseUuid, baseLV): 

1226 """We need to reactivate the original LV on each slave (note that the 

1227 name for the original LV might change), as well as init the refcount 

1228 for the base LV""" 

1229 args = {"vgName": self.vgname, 

1230 "action1": "refresh", 

1231 "lvName1": origLV, 

1232 "action2": "activate", 

1233 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid, 

1234 "lvName2": baseLV, 

1235 "uuid2": baseUuid} 

1236 

1237 masterRef = util.get_this_host_ref(self.session) 

1238 for hostRef in hostRefs: 

1239 if hostRef == masterRef: 1239 ↛ 1240line 1239 didn't jump to line 1240, because the condition on line 1239 was never true

1240 continue 

1241 util.SMlog("Updating %s, %s, %s on slave %s" % \ 

1242 (origOldLV, origLV, baseLV, hostRef)) 

1243 rv = self.session.xenapi.host.call_plugin( 

1244 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1245 util.SMlog("call-plugin returned: %s" % rv) 

1246 if not rv: 1246 ↛ 1247line 1246 didn't jump to line 1247, because the condition on line 1246 was never true

1247 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1248 

1249 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog): 

1250 """Reactivate and refresh CBT log file on slaves""" 

1251 args = {"vgName": self.vgname, 

1252 "action1": "deactivateNoRefcount", 

1253 "lvName1": cbtlog, 

1254 "action2": "refresh", 

1255 "lvName2": cbtlog} 

1256 

1257 masterRef = util.get_this_host_ref(self.session) 

1258 for hostRef in hostRefs: 

1259 if hostRef == masterRef: 1259 ↛ 1260line 1259 didn't jump to line 1260, because the condition on line 1259 was never true

1260 continue 

1261 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef)) 

1262 rv = self.session.xenapi.host.call_plugin( 

1263 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1264 util.SMlog("call-plugin returned: %s" % rv) 

1265 if not rv: 1265 ↛ 1266line 1265 didn't jump to line 1266, because the condition on line 1265 was never true

1266 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1267 

1268 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV): 

1269 """Tell the slave we deleted the base image""" 

1270 args = {"vgName": self.vgname, 

1271 "action1": "cleanupLockAndRefcount", 

1272 "uuid1": baseUuid, 

1273 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid} 

1274 

1275 masterRef = util.get_this_host_ref(self.session) 

1276 for hostRef in hostRefs: 

1277 if hostRef == masterRef: 1277 ↛ 1278line 1277 didn't jump to line 1278, because the condition on line 1277 was never true

1278 continue 

1279 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef)) 

1280 rv = self.session.xenapi.host.call_plugin( 

1281 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1282 util.SMlog("call-plugin returned: %s" % rv) 

1283 if not rv: 1283 ↛ 1284line 1283 didn't jump to line 1284, because the condition on line 1283 was never true

1284 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1285 

1286 def _cleanup(self, skipLockCleanup=False): 

1287 """delete stale refcounter, flag, and lock files""" 

1288 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1289 IPCFlag(self.uuid).clearAll() 

1290 if not skipLockCleanup: 1290 ↛ 1291line 1290 didn't jump to line 1291, because the condition on line 1290 was never true

1291 Lock.cleanupAll(self.uuid) 

1292 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1293 

1294 def _prepareTestMode(self): 

1295 util.SMlog("Test mode: %s" % self.testMode) 

1296 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1296 ↛ 1297line 1296 didn't jump to line 1297, because the condition on line 1296 was never true

1297 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" 

1298 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode]) 

1299 

1300 def _kickGC(self): 

1301 util.SMlog("Kicking GC") 

1302 cleanup.start_gc_service(self.uuid) 

1303 

1304 def ensureCBTSpace(self): 

1305 # Ensure we have space for at least one LV 

1306 self._ensureSpaceAvailable(self.journaler.LV_SIZE) 

1307 

1308 

1309class LVHDVDI(VDI.VDI): 

1310 

1311 JRN_CLONE = "clone" # journal entry type for the clone operation 

1312 

1313 def load(self, vdi_uuid): 

1314 self.lock = self.sr.lock 

1315 self.lvActivator = self.sr.lvActivator 

1316 self.loaded = False 

1317 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1318 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1318 ↛ 1320line 1318 didn't jump to line 1320, because the condition on line 1318 was never false

1319 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1320 self.uuid = vdi_uuid 

1321 self.location = self.uuid 

1322 self.exists = True 

1323 

1324 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid): 

1325 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid]) 

1326 if self.parent: 1326 ↛ 1327line 1326 didn't jump to line 1327, because the condition on line 1326 was never true

1327 self.sm_config_override['vhd-parent'] = self.parent 

1328 else: 

1329 self.sm_config_override['vhd-parent'] = None 

1330 return 

1331 

1332 # scan() didn't run: determine the type of the VDI manually 

1333 if self._determineType(): 

1334 return 

1335 

1336 # the VDI must be in the process of being created 

1337 self.exists = False 

1338 if "vdi_sm_config" in self.sr.srcmd.params and \ 1338 ↛ 1340line 1338 didn't jump to line 1340, because the condition on line 1338 was never true

1339 "type" in self.sr.srcmd.params["vdi_sm_config"]: 

1340 type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

1341 if type == PARAM_RAW: 

1342 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1343 elif type == PARAM_VHD: 

1344 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1345 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode: 

1346 raise xs_errors.XenError('VDICreate', \ 

1347 opterr='Cannot create VHD type disk in legacy mode') 

1348 else: 

1349 raise xs_errors.XenError('VDICreate', opterr='bad type') 

1350 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid) 

1351 self.path = os.path.join(self.sr.path, self.lvname) 

1352 

1353 def create(self, sr_uuid, vdi_uuid, size): 

1354 util.SMlog("LVHDVDI.create for %s" % self.uuid) 

1355 if not self.sr.isMaster: 

1356 raise xs_errors.XenError('LVMMaster') 

1357 if self.exists: 

1358 raise xs_errors.XenError('VDIExists') 

1359 

1360 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1361 

1362 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \ 

1363 (self.vdi_type, self.path, size)) 

1364 lvSize = 0 

1365 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] 

1366 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1367 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size)) 

1368 else: 

1369 if self.sr.provision == "thin": 

1370 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1371 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1372 elif self.sr.provision == "thick": 

1373 lvSize = lvhdutil.calcSizeVHDLV(int(size)) 

1374 

1375 self.sr._ensureSpaceAvailable(lvSize) 

1376 

1377 try: 

1378 self.sr.lvmCache.create(self.lvname, lvSize) 

1379 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1380 self.size = self.sr.lvmCache.getSize(self.lvname) 

1381 else: 

1382 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB) 

1383 self.size = vhdutil.getSizeVirt(self.path) 

1384 self.sr.lvmCache.deactivateNoRefcount(self.lvname) 

1385 except util.CommandException as e: 

1386 util.SMlog("Unable to create VDI") 

1387 self.sr.lvmCache.remove(self.lvname) 

1388 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code) 

1389 

1390 self.utilisation = lvSize 

1391 self.sm_config["vdi_type"] = self.vdi_type 

1392 

1393 if not self.sr.legacyMode: 

1394 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1395 

1396 self.ref = self._db_introduce() 

1397 self.sr._updateStats(self.sr.uuid, self.size) 

1398 

1399 vdi_info = {UUID_TAG: self.uuid, 

1400 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1401 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1402 IS_A_SNAPSHOT_TAG: 0, 

1403 SNAPSHOT_OF_TAG: '', 

1404 SNAPSHOT_TIME_TAG: '', 

1405 TYPE_TAG: self.ty, 

1406 VDI_TYPE_TAG: self.vdi_type, 

1407 READ_ONLY_TAG: int(self.read_only), 

1408 MANAGED_TAG: int(self.managed), 

1409 METADATA_OF_POOL_TAG: '' 

1410 } 

1411 

1412 if not self.sr.legacyMode: 

1413 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1414 

1415 return VDI.VDI.get_params(self) 

1416 

1417 def delete(self, sr_uuid, vdi_uuid, data_only=False): 

1418 util.SMlog("LVHDVDI.delete for %s" % self.uuid) 

1419 try: 

1420 self._loadThis() 

1421 except xs_errors.SRException as e: 

1422 # Catch 'VDI doesn't exist' exception 

1423 if e.errno == 46: 

1424 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1425 raise 

1426 

1427 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1428 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1429 raise xs_errors.XenError("VDIDelete", \ 

1430 opterr="Deleting non-leaf node not permitted") 

1431 

1432 if not self.hidden: 

1433 self._markHidden() 

1434 

1435 if not data_only: 

1436 # Remove from XAPI and delete from MGT 

1437 self._db_forget() 

1438 else: 

1439 # If this is a data_destroy call, don't remove from XAPI db 

1440 # Only delete from MGT 

1441 if not self.sr.legacyMode: 

1442 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid) 

1443 

1444 # deactivate here because it might be too late to do it in the "final" 

1445 # step: GC might have removed the LV by then 

1446 if self.sr.lvActivator.get(self.uuid, False): 

1447 self.sr.lvActivator.deactivate(self.uuid, False) 

1448 

1449 try: 

1450 self.sr.lvmCache.remove(self.lvname) 

1451 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid) 

1452 self.sr.lock.cleanupAll(vdi_uuid) 

1453 except xs_errors.SRException as e: 

1454 util.SMlog( 

1455 "Failed to remove the volume (maybe is leaf coalescing) " 

1456 "for %s err:%d" % (self.uuid, e.errno)) 

1457 

1458 self.sr._updateStats(self.sr.uuid, -self.size) 

1459 self.sr._kickGC() 

1460 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1461 

1462 def attach(self, sr_uuid, vdi_uuid): 

1463 util.SMlog("LVHDVDI.attach for %s" % self.uuid) 

1464 if self.sr.journaler.hasJournals(self.uuid): 

1465 raise xs_errors.XenError('VDIUnavailable', 

1466 opterr='Interrupted operation detected on this VDI, ' 

1467 'scan SR first to trigger auto-repair') 

1468 

1469 writable = ('args' not in self.sr.srcmd.params) or \ 

1470 (self.sr.srcmd.params['args'][0] == "true") 

1471 needInflate = True 

1472 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable: 

1473 needInflate = False 

1474 else: 

1475 self._loadThis() 

1476 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size): 

1477 needInflate = False 

1478 

1479 if needInflate: 

1480 try: 

1481 self._prepareThin(True) 

1482 except: 

1483 util.logException("attach") 

1484 raise xs_errors.XenError('LVMProvisionAttach') 

1485 

1486 try: 

1487 return self._attach() 

1488 finally: 

1489 if not self.sr.lvActivator.deactivateAll(): 

1490 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid) 

1491 

1492 def detach(self, sr_uuid, vdi_uuid): 

1493 util.SMlog("LVHDVDI.detach for %s" % self.uuid) 

1494 self._loadThis() 

1495 already_deflated = (self.utilisation < \ 

1496 lvhdutil.calcSizeVHDLV(self.size)) 

1497 needDeflate = True 

1498 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated: 

1499 needDeflate = False 

1500 elif self.sr.provision == "thick": 

1501 needDeflate = False 

1502 # except for snapshots, which are always deflated 

1503 if self.sr.srcmd.cmd != 'vdi_detach_from_config': 

1504 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1505 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) 

1506 if snap: 

1507 needDeflate = True 

1508 

1509 if needDeflate: 

1510 try: 

1511 self._prepareThin(False) 

1512 except: 

1513 util.logException("_prepareThin") 

1514 raise xs_errors.XenError('VDIUnavailable', opterr='deflate') 

1515 

1516 try: 

1517 self._detach() 

1518 finally: 

1519 if not self.sr.lvActivator.deactivateAll(): 

1520 raise xs_errors.XenError("SMGeneral", opterr="deactivation") 

1521 

1522 # We only support offline resize 

1523 def resize(self, sr_uuid, vdi_uuid, size): 

1524 util.SMlog("LVHDVDI.resize for %s" % self.uuid) 

1525 if not self.sr.isMaster: 

1526 raise xs_errors.XenError('LVMMaster') 

1527 

1528 self._loadThis() 

1529 if self.hidden: 

1530 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1531 

1532 if size < self.size: 

1533 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

1534 '(current size: %d, new size: %d)' % (self.size, size)) 

1535 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1536 

1537 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1538 

1539 if size == self.size: 

1540 return VDI.VDI.get_params(self) 

1541 

1542 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1543 lvSizeOld = self.size 

1544 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) 

1545 else: 

1546 lvSizeOld = self.utilisation 

1547 lvSizeNew = lvhdutil.calcSizeVHDLV(size) 

1548 if self.sr.provision == "thin": 

1549 # VDI is currently deflated, so keep it deflated 

1550 lvSizeNew = lvSizeOld 

1551 assert(lvSizeNew >= lvSizeOld) 

1552 spaceNeeded = lvSizeNew - lvSizeOld 

1553 self.sr._ensureSpaceAvailable(spaceNeeded) 

1554 

1555 oldSize = self.size 

1556 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1557 self.sr.lvmCache.setSize(self.lvname, lvSizeNew) 

1558 self.size = self.sr.lvmCache.getSize(self.lvname) 

1559 self.utilisation = self.size 

1560 else: 

1561 if lvSizeNew != lvSizeOld: 

1562 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, 

1563 lvSizeNew) 

1564 vhdutil.setSizeVirtFast(self.path, size) 

1565 self.size = vhdutil.getSizeVirt(self.path) 

1566 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

1567 

1568 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1569 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1570 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

1571 str(self.utilisation)) 

1572 self.sr._updateStats(self.sr.uuid, self.size - oldSize) 

1573 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

1574 return VDI.VDI.get_params(self) 

1575 

1576 def clone(self, sr_uuid, vdi_uuid): 

1577 return self._do_snapshot( 

1578 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True) 

1579 

1580 def compose(self, sr_uuid, vdi1, vdi2): 

1581 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1)) 

1582 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1583 raise xs_errors.XenError('Unimplemented') 

1584 

1585 parent_uuid = vdi1 

1586 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid 

1587 assert(self.sr.lvmCache.checkLV(parent_lvname)) 

1588 parent_path = os.path.join(self.sr.path, parent_lvname) 

1589 

1590 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1591 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False) 

1592 

1593 vhdutil.setParent(self.path, parent_path, False) 

1594 vhdutil.setHidden(parent_path) 

1595 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

1596 

1597 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, 

1598 True): 

1599 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

1600 

1601 util.SMlog("Compose done") 

1602 

1603 def reset_leaf(self, sr_uuid, vdi_uuid): 

1604 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid) 

1605 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1606 raise xs_errors.XenError('Unimplemented') 

1607 

1608 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1609 

1610 # safety check 

1611 if not vhdutil.hasParent(self.path): 

1612 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

1613 "will not reset contents" % self.uuid) 

1614 

1615 vhdutil.killData(self.path) 

1616 

1617 def _attach(self): 

1618 self._chainSetActive(True, True, True) 

1619 if not util.pathexists(self.path): 

1620 raise xs_errors.XenError('VDIUnavailable', \ 

1621 opterr='Could not find: %s' % self.path) 

1622 

1623 if not hasattr(self, 'xenstore_data'): 

1624 self.xenstore_data = {} 

1625 

1626 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ 

1627 scsiutil.gen_synthetic_page_data(self.uuid))) 

1628 

1629 self.xenstore_data['storage-type'] = 'lvm' 

1630 self.xenstore_data['vdi-type'] = self.vdi_type 

1631 

1632 self.attached = True 

1633 self.sr.lvActivator.persist() 

1634 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1635 

1636 def _detach(self): 

1637 self._chainSetActive(False, True) 

1638 self.attached = False 

1639 

1640 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

1641 cloneOp=False, secondary=None, cbtlog=None): 

1642 # If cbt enabled, save file consistency state 

1643 if cbtlog is not None: 

1644 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1644 ↛ 1645line 1644 didn't jump to line 1645, because the condition on line 1644 was never true

1645 consistency_state = False 

1646 else: 

1647 consistency_state = True 

1648 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

1649 (consistency_state, vdi_uuid)) 

1650 else: 

1651 consistency_state = None 

1652 

1653 pause_time = time.time() 

1654 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1654 ↛ 1655line 1654 didn't jump to line 1655, because the condition on line 1654 was never true

1655 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

1656 

1657 snapResult = None 

1658 try: 

1659 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) 

1660 except Exception as e1: 

1661 try: 

1662 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, 

1663 secondary=None) 

1664 except Exception as e2: 

1665 util.SMlog('WARNING: failed to clean up failed snapshot: ' 

1666 '%s (error ignored)' % e2) 

1667 raise 

1668 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1669 unpause_time = time.time() 

1670 if (unpause_time - pause_time) > LONG_SNAPTIME: 1670 ↛ 1671line 1670 didn't jump to line 1671, because the condition on line 1670 was never true

1671 util.SMlog('WARNING: snapshot paused VM for %s seconds' % 

1672 (unpause_time - pause_time)) 

1673 return snapResult 

1674 

1675 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None): 

1676 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType)) 

1677 

1678 if not self.sr.isMaster: 1678 ↛ 1679line 1678 didn't jump to line 1679, because the condition on line 1678 was never true

1679 raise xs_errors.XenError('LVMMaster') 

1680 if self.sr.legacyMode: 1680 ↛ 1681line 1680 didn't jump to line 1681, because the condition on line 1680 was never true

1681 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode') 

1682 

1683 self._loadThis() 

1684 if self.hidden: 1684 ↛ 1685line 1684 didn't jump to line 1685, because the condition on line 1684 was never true

1685 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI') 

1686 

1687 self.sm_config = self.session.xenapi.VDI.get_sm_config( \ 

1688 self.sr.srcmd.params['vdi_ref']) 

1689 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1689 ↛ 1690line 1689 didn't jump to line 1690, because the condition on line 1689 was never true

1690 if not util.fistpoint.is_active("testsm_clone_allow_raw"): 

1691 raise xs_errors.XenError('Unimplemented', \ 

1692 opterr='Raw VDI, snapshot or clone not permitted') 

1693 

1694 # we must activate the entire VHD chain because the real parent could 

1695 # theoretically be anywhere in the chain if all VHDs under it are empty 

1696 self._chainSetActive(True, False) 

1697 if not util.pathexists(self.path): 1697 ↛ 1698line 1697 didn't jump to line 1698, because the condition on line 1697 was never true

1698 raise xs_errors.XenError('VDIUnavailable', \ 

1699 opterr='VDI unavailable: %s' % (self.path)) 

1700 

1701 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1701 ↛ 1709line 1701 didn't jump to line 1709, because the condition on line 1701 was never false

1702 depth = vhdutil.getDepth(self.path) 

1703 if depth == -1: 1703 ↛ 1704line 1703 didn't jump to line 1704, because the condition on line 1703 was never true

1704 raise xs_errors.XenError('VDIUnavailable', \ 

1705 opterr='failed to get VHD depth') 

1706 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1706 ↛ 1707line 1706 didn't jump to line 1707, because the condition on line 1706 was never true

1707 raise xs_errors.XenError('SnapshotChainTooLong') 

1708 

1709 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ 

1710 self.sr.srcmd.params['vdi_ref']) 

1711 

1712 fullpr = lvhdutil.calcSizeVHDLV(self.size) 

1713 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \ 

1714 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1715 lvSizeOrig = thinpr 

1716 lvSizeClon = thinpr 

1717 

1718 hostRefs = [] 

1719 if self.sr.cmd == "vdi_snapshot": 

1720 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) 

1721 if hostRefs: 1721 ↛ 1723line 1721 didn't jump to line 1723, because the condition on line 1721 was never false

1722 lvSizeOrig = fullpr 

1723 if self.sr.provision == "thick": 1723 ↛ 1729line 1723 didn't jump to line 1729, because the condition on line 1723 was never false

1724 if not self.issnap: 1724 ↛ 1725line 1724 didn't jump to line 1725, because the condition on line 1724 was never true

1725 lvSizeOrig = fullpr 

1726 if self.sr.cmd != "vdi_snapshot": 

1727 lvSizeClon = fullpr 

1728 

1729 if (snapType == VDI.SNAPSHOT_SINGLE or 1729 ↛ 1731line 1729 didn't jump to line 1731, because the condition on line 1729 was never true

1730 snapType == VDI.SNAPSHOT_INTERNAL): 

1731 lvSizeClon = 0 

1732 

1733 # the space required must include 2 journal LVs: a clone journal and an 

1734 # inflate journal (for the failure handling 

1735 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE 

1736 lvSizeBase = self.size 

1737 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1737 ↛ 1741line 1737 didn't jump to line 1741, because the condition on line 1737 was never false

1738 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1739 vhdutil.getSizePhys(self.path)) 

1740 size_req -= (self.utilisation - lvSizeBase) 

1741 self.sr._ensureSpaceAvailable(size_req) 

1742 

1743 if hostRefs: 

1744 self.sr._updateSlavesPreClone(hostRefs, self.lvname) 

1745 

1746 baseUuid = util.gen_uuid() 

1747 origUuid = self.uuid 

1748 clonUuid = "" 

1749 if snapType == VDI.SNAPSHOT_DOUBLE: 1749 ↛ 1751line 1749 didn't jump to line 1751, because the condition on line 1749 was never false

1750 clonUuid = util.gen_uuid() 

1751 jval = "%s_%s" % (baseUuid, clonUuid) 

1752 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) 

1753 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid) 

1754 

1755 try: 

1756 # self becomes the "base vdi" 

1757 origOldLV = self.lvname 

1758 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid 

1759 self.sr.lvmCache.rename(self.lvname, baseLV) 

1760 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) 

1761 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1762 self.uuid = baseUuid 

1763 self.lvname = baseLV 

1764 self.path = os.path.join(self.sr.path, baseLV) 

1765 self.label = "base copy" 

1766 self.read_only = True 

1767 self.location = self.uuid 

1768 self.managed = False 

1769 

1770 # shrink the base copy to the minimum - we do it before creating 

1771 # the snapshot volumes to avoid requiring double the space 

1772 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1772 ↛ 1775line 1772 didn't jump to line 1775, because the condition on line 1772 was never false

1773 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) 

1774 self.utilisation = lvSizeBase 

1775 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid) 

1776 

1777 snapVDI = self._createSnap(origUuid, lvSizeOrig, False) 

1778 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) 

1779 snapVDI2 = None 

1780 if snapType == VDI.SNAPSHOT_DOUBLE: 1780 ↛ 1786line 1780 didn't jump to line 1786, because the condition on line 1780 was never false

1781 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True) 

1782 # If we have CBT enabled on the VDI, 

1783 # set CBT status for the new snapshot disk 

1784 if cbtlog: 

1785 snapVDI2.cbt_enabled = True 

1786 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid) 

1787 

1788 # note: it is important to mark the parent hidden only AFTER the 

1789 # new VHD children have been created, which are referencing it; 

1790 # otherwise we would introduce a race with GC that could reclaim 

1791 # the parent before we snapshot it 

1792 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1792 ↛ 1793line 1792 didn't jump to line 1793, because the condition on line 1792 was never true

1793 self.sr.lvmCache.setHidden(self.lvname) 

1794 else: 

1795 vhdutil.setHidden(self.path) 

1796 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid) 

1797 

1798 # set the base copy to ReadOnly 

1799 self.sr.lvmCache.setReadonly(self.lvname, True) 

1800 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid) 

1801 

1802 if hostRefs: 

1803 self.sr._updateSlavesOnClone(hostRefs, origOldLV, 

1804 snapVDI.lvname, self.uuid, self.lvname) 

1805 

1806 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

1807 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: 

1808 snapVDI._cbt_snapshot(clonUuid, cbt_consistency) 

1809 if hostRefs: 1809 ↛ 1823line 1809 didn't jump to line 1823, because the condition on line 1809 was never false

1810 cbtlog_file = self._get_cbt_logname(snapVDI.uuid) 

1811 try: 

1812 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) 

1813 except: 

1814 alert_name = "VDI_CBT_SNAPSHOT_FAILED" 

1815 alert_str = ("Creating CBT snapshot for {} failed" 

1816 .format(snapVDI.uuid)) 

1817 snapVDI._disable_cbt_on_error(alert_name, alert_str) 

1818 pass 

1819 

1820 except (util.SMException, XenAPI.Failure) as e: 

1821 util.logException("LVHDVDI._snapshot") 

1822 self._failClone(origUuid, jval, str(e)) 

1823 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid) 

1824 

1825 self.sr.journaler.remove(self.JRN_CLONE, origUuid) 

1826 

1827 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType) 

1828 

1829 def _createSnap(self, snapUuid, snapSizeLV, isNew): 

1830 """Snapshot self and return the snapshot VDI object""" 

1831 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid 

1832 snapPath = os.path.join(self.sr.path, snapLV) 

1833 self.sr.lvmCache.create(snapLV, int(snapSizeLV)) 

1834 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) 

1835 if isNew: 

1836 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1837 self.sr.lvActivator.add(snapUuid, snapLV, False) 

1838 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) 

1839 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB) 

1840 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid) 

1841 

1842 snapVDI = LVHDVDI(self.sr, snapUuid) 

1843 snapVDI.read_only = False 

1844 snapVDI.location = snapUuid 

1845 snapVDI.size = self.size 

1846 snapVDI.utilisation = snapSizeLV 

1847 snapVDI.sm_config = dict() 

1848 for key, val in self.sm_config.items(): 1848 ↛ 1849line 1848 didn't jump to line 1849, because the loop on line 1848 never started

1849 if key not in [ 

1850 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \ 

1851 not key.startswith("host_"): 

1852 snapVDI.sm_config[key] = val 

1853 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1854 snapVDI.sm_config["vhd-parent"] = snapParent 

1855 snapVDI.lvname = snapLV 

1856 return snapVDI 

1857 

1858 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None): 

1859 if snapType is not VDI.SNAPSHOT_INTERNAL: 1859 ↛ 1861line 1859 didn't jump to line 1861, because the condition on line 1859 was never false

1860 self.sr._updateStats(self.sr.uuid, self.size) 

1861 basePresent = True 

1862 

1863 # Verify parent locator field of both children and delete basePath if 

1864 # unused 

1865 snapParent = snapVDI.sm_config["vhd-parent"] 

1866 snap2Parent = "" 

1867 if snapVDI2: 1867 ↛ 1869line 1867 didn't jump to line 1869, because the condition on line 1867 was never false

1868 snap2Parent = snapVDI2.sm_config["vhd-parent"] 

1869 if snapParent != self.uuid and \ 1869 ↛ 1896line 1869 didn't jump to line 1896, because the condition on line 1869 was never false

1870 (not snapVDI2 or snap2Parent != self.uuid): 

1871 util.SMlog("%s != %s != %s => deleting unused base %s" % \ 

1872 (snapParent, self.uuid, snap2Parent, self.lvname)) 

1873 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1874 self.sr.lvmCache.remove(self.lvname) 

1875 self.sr.lvActivator.remove(self.uuid, False) 

1876 if hostRefs: 

1877 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) 

1878 basePresent = False 

1879 else: 

1880 # assign the _binary_ refcount of the original VDI to the new base 

1881 # VDI (but as the normal refcount, since binary refcounts are only 

1882 # for leaf nodes). The normal refcount of the child is not 

1883 # transferred to to the base VDI because normal refcounts are 

1884 # incremented and decremented individually, and not based on the 

1885 # VHD chain (i.e., the child's normal refcount will be decremented 

1886 # independently of its parent situation). Add 1 for this clone op. 

1887 # Note that we do not need to do protect the refcount operations 

1888 # below with per-VDI locking like we do in lvutil because at this 

1889 # point we have exclusive access to the VDIs involved. Other SM 

1890 # operations are serialized by the Agent or with the SR lock, and 

1891 # any coalesce activations are serialized with the SR lock. (The 

1892 # coalesce activates the coalesced VDI pair in the beginning, which 

1893 # cannot affect the VDIs here because they cannot possibly be 

1894 # involved in coalescing at this point, and at the relinkSkip step 

1895 # that activates the children, which takes the SR lock.) 

1896 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid 

1897 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) 

1898 RefCounter.set(self.uuid, bcnt + 1, 0, ns) 

1899 

1900 # the "paused" and "host_*" sm-config keys are special and must stay on 

1901 # the leaf without being inherited by anyone else 

1902 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1902 ↛ 1903line 1902 didn't jump to line 1903, because the loop on line 1902 never started

1903 snapVDI.sm_config[key] = self.sm_config[key] 

1904 del self.sm_config[key] 

1905 

1906 # Introduce any new VDI records & update the existing one 

1907 type = self.session.xenapi.VDI.get_type( \ 

1908 self.sr.srcmd.params['vdi_ref']) 

1909 if snapVDI2: 1909 ↛ 1951line 1909 didn't jump to line 1951, because the condition on line 1909 was never false

1910 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1911 vdiRef = snapVDI2._db_introduce() 

1912 if cloneOp: 

1913 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1914 NAME_LABEL_TAG: util.to_plain_string( \ 

1915 self.session.xenapi.VDI.get_name_label( \ 

1916 self.sr.srcmd.params['vdi_ref'])), 

1917 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1918 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1919 IS_A_SNAPSHOT_TAG: 0, 

1920 SNAPSHOT_OF_TAG: '', 

1921 SNAPSHOT_TIME_TAG: '', 

1922 TYPE_TAG: type, 

1923 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1924 READ_ONLY_TAG: 0, 

1925 MANAGED_TAG: int(snapVDI2.managed), 

1926 METADATA_OF_POOL_TAG: '' 

1927 } 

1928 else: 

1929 util.SMlog("snapshot VDI params: %s" % \ 

1930 self.session.xenapi.VDI.get_snapshot_time(vdiRef)) 

1931 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1932 NAME_LABEL_TAG: util.to_plain_string( \ 

1933 self.session.xenapi.VDI.get_name_label( \ 

1934 self.sr.srcmd.params['vdi_ref'])), 

1935 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1936 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1937 IS_A_SNAPSHOT_TAG: 1, 

1938 SNAPSHOT_OF_TAG: snapVDI.uuid, 

1939 SNAPSHOT_TIME_TAG: '', 

1940 TYPE_TAG: type, 

1941 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1942 READ_ONLY_TAG: 0, 

1943 MANAGED_TAG: int(snapVDI2.managed), 

1944 METADATA_OF_POOL_TAG: '' 

1945 } 

1946 

1947 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1948 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ 

1949 (vdiRef, snapVDI2.uuid)) 

1950 

1951 if basePresent: 1951 ↛ 1952line 1951 didn't jump to line 1952, because the condition on line 1951 was never true

1952 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1953 vdiRef = self._db_introduce() 

1954 vdi_info = {UUID_TAG: self.uuid, 

1955 NAME_LABEL_TAG: self.label, 

1956 NAME_DESCRIPTION_TAG: self.description, 

1957 IS_A_SNAPSHOT_TAG: 0, 

1958 SNAPSHOT_OF_TAG: '', 

1959 SNAPSHOT_TIME_TAG: '', 

1960 TYPE_TAG: type, 

1961 VDI_TYPE_TAG: self.sm_config['vdi_type'], 

1962 READ_ONLY_TAG: 1, 

1963 MANAGED_TAG: 0, 

1964 METADATA_OF_POOL_TAG: '' 

1965 } 

1966 

1967 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1968 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ 

1969 (vdiRef, self.uuid)) 

1970 

1971 # Update the original record 

1972 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1973 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) 

1974 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ 

1975 str(snapVDI.utilisation)) 

1976 

1977 # Return the info on the new snap VDI 

1978 snap = snapVDI2 

1979 if not snap: 1979 ↛ 1980line 1979 didn't jump to line 1980, because the condition on line 1979 was never true

1980 snap = self 

1981 if not basePresent: 

1982 # a single-snapshot of an empty VDI will be a noop, resulting 

1983 # in no new VDIs, so return the existing one. The GC wouldn't 

1984 # normally try to single-snapshot an empty VHD of course, but 

1985 # if an external snapshot operation manages to sneak in right 

1986 # before a snapshot-coalesce phase, we would get here 

1987 snap = snapVDI 

1988 return snap.get_params() 

1989 

1990 def _initFromVDIInfo(self, vdiInfo): 

1991 self.vdi_type = vdiInfo.vdiType 

1992 self.lvname = vdiInfo.lvName 

1993 self.size = vdiInfo.sizeVirt 

1994 self.utilisation = vdiInfo.sizeLV 

1995 self.hidden = vdiInfo.hidden 

1996 if self.hidden: 1996 ↛ 1997line 1996 didn't jump to line 1997, because the condition on line 1996 was never true

1997 self.managed = False 

1998 self.active = vdiInfo.lvActive 

1999 self.readonly = vdiInfo.lvReadonly 

2000 self.parent = vdiInfo.parentUuid 

2001 self.path = os.path.join(self.sr.path, self.lvname) 

2002 if hasattr(self, "sm_config_override"): 2002 ↛ 2005line 2002 didn't jump to line 2005, because the condition on line 2002 was never false

2003 self.sm_config_override["vdi_type"] = self.vdi_type 

2004 else: 

2005 self.sm_config_override = {'vdi_type': self.vdi_type} 

2006 self.loaded = True 

2007 

2008 def _initFromLVInfo(self, lvInfo): 

2009 self.vdi_type = lvInfo.vdiType 

2010 self.lvname = lvInfo.name 

2011 self.size = lvInfo.size 

2012 self.utilisation = lvInfo.size 

2013 self.hidden = lvInfo.hidden 

2014 self.active = lvInfo.active 

2015 self.readonly = lvInfo.readonly 

2016 self.parent = '' 

2017 self.path = os.path.join(self.sr.path, self.lvname) 

2018 if hasattr(self, "sm_config_override"): 2018 ↛ 2021line 2018 didn't jump to line 2021, because the condition on line 2018 was never false

2019 self.sm_config_override["vdi_type"] = self.vdi_type 

2020 else: 

2021 self.sm_config_override = {'vdi_type': self.vdi_type} 

2022 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2022 ↛ 2023line 2022 didn't jump to line 2023, because the condition on line 2022 was never true

2023 self.loaded = True 

2024 

2025 def _initFromVHDInfo(self, vhdInfo): 

2026 self.size = vhdInfo.sizeVirt 

2027 self.parent = vhdInfo.parentUuid 

2028 self.hidden = vhdInfo.hidden 

2029 self.loaded = True 

2030 

2031 def _determineType(self): 

2032 """Determine whether this is a raw or a VHD VDI""" 

2033 if "vdi_ref" in self.sr.srcmd.params: 2033 ↛ 2046line 2033 didn't jump to line 2046, because the condition on line 2033 was never false

2034 vdi_ref = self.sr.srcmd.params["vdi_ref"] 

2035 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2036 if sm_config.get("vdi_type"): 2036 ↛ 2037line 2036 didn't jump to line 2037, because the condition on line 2036 was never true

2037 self.vdi_type = sm_config["vdi_type"] 

2038 prefix = lvhdutil.LV_PREFIX[self.vdi_type] 

2039 self.lvname = "%s%s" % (prefix, self.uuid) 

2040 self.path = os.path.join(self.sr.path, self.lvname) 

2041 self.sm_config_override = sm_config 

2042 return True 

2043 

2044 # LVM commands can be costly, so check the file directly first in case 

2045 # the LV is active 

2046 found = False 

2047 for t in lvhdutil.VDI_TYPES: 2047 ↛ 2048line 2047 didn't jump to line 2048, because the loop on line 2047 never started

2048 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid) 

2049 path = os.path.join(self.sr.path, lvname) 

2050 if util.pathexists(path): 

2051 if found: 

2052 raise xs_errors.XenError('VDILoad', 

2053 opterr="multiple VDI's: uuid %s" % self.uuid) 

2054 found = True 

2055 self.vdi_type = t 

2056 self.lvname = lvname 

2057 self.path = path 

2058 if found: 2058 ↛ 2059line 2058 didn't jump to line 2059, because the condition on line 2058 was never true

2059 return True 

2060 

2061 # now list all LV's 

2062 if not lvutil._checkVG(self.sr.vgname): 2062 ↛ 2064line 2062 didn't jump to line 2064, because the condition on line 2062 was never true

2063 # when doing attach_from_config, the VG won't be there yet 

2064 return False 

2065 

2066 lvs = lvhdutil.getLVInfo(self.sr.lvmCache) 

2067 if lvs.get(self.uuid): 

2068 self._initFromLVInfo(lvs[self.uuid]) 

2069 return True 

2070 return False 

2071 

2072 def _loadThis(self): 

2073 """Load VDI info for this VDI and activate the LV if it's VHD. We 

2074 don't do it in VDI.load() because not all VDI operations need it.""" 

2075 if self.loaded: 2075 ↛ 2076line 2075 didn't jump to line 2076, because the condition on line 2075 was never true

2076 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

2077 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2078 return 

2079 try: 

2080 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname) 

2081 except util.CommandException as e: 

2082 raise xs_errors.XenError('VDIUnavailable', 

2083 opterr='%s (LV scan error)' % os.strerror(abs(e.code))) 

2084 if not lvs.get(self.uuid): 2084 ↛ 2085line 2084 didn't jump to line 2085, because the condition on line 2084 was never true

2085 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') 

2086 self._initFromLVInfo(lvs[self.uuid]) 

2087 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2087 ↛ 2094line 2087 didn't jump to line 2094, because the condition on line 2087 was never false

2088 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2089 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False) 

2090 if not vhdInfo: 2090 ↛ 2091line 2090 didn't jump to line 2091, because the condition on line 2090 was never true

2091 raise xs_errors.XenError('VDIUnavailable', \ 

2092 opterr='getVHDInfo failed') 

2093 self._initFromVHDInfo(vhdInfo) 

2094 self.loaded = True 

2095 

2096 def _chainSetActive(self, active, binary, persistent=False): 

2097 if binary: 2097 ↛ 2098line 2097 didn't jump to line 2098, because the condition on line 2097 was never true

2098 (count, bcount) = RefCounter.checkLocked(self.uuid, 

2099 lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

2100 if (active and bcount > 0) or (not active and bcount == 0): 

2101 return # this is a redundant activation/deactivation call 

2102 

2103 vdiList = {self.uuid: self.lvname} 

2104 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2104 ↛ 2107line 2104 didn't jump to line 2107, because the condition on line 2104 was never false

2105 vdiList = vhdutil.getParentChain(self.lvname, 

2106 lvhdutil.extractUuid, self.sr.vgname) 

2107 for uuid, lvName in vdiList.items(): 2107 ↛ 2108line 2107 didn't jump to line 2108, because the loop on line 2107 never started

2108 binaryParam = binary 

2109 if uuid != self.uuid: 

2110 binaryParam = False # binary param only applies to leaf nodes 

2111 if active: 

2112 self.sr.lvActivator.activate(uuid, lvName, binaryParam, 

2113 persistent) 

2114 else: 

2115 # just add the LVs for deactivation in the final (cleanup) 

2116 # step. The LVs must not have been activated during the current 

2117 # operation 

2118 self.sr.lvActivator.add(uuid, lvName, binaryParam) 

2119 

2120 def _failClone(self, uuid, jval, msg): 

2121 try: 

2122 self.sr._handleInterruptedCloneOp(uuid, jval, True) 

2123 self.sr.journaler.remove(self.JRN_CLONE, uuid) 

2124 except Exception as e: 

2125 util.SMlog('WARNING: failed to clean up failed snapshot: ' \ 

2126 ' %s (error ignored)' % e) 

2127 raise xs_errors.XenError('VDIClone', opterr=msg) 

2128 

2129 def _markHidden(self): 

2130 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

2131 self.sr.lvmCache.setHidden(self.lvname) 

2132 else: 

2133 vhdutil.setHidden(self.path) 

2134 self.hidden = 1 

2135 

2136 def _prepareThin(self, attach): 

2137 origUtilisation = self.sr.lvmCache.getSize(self.lvname) 

2138 if self.sr.isMaster: 

2139 # the master can prepare the VDI locally 

2140 if attach: 

2141 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid) 

2142 else: 

2143 lvhdutil.detachThin(self.session, self.sr.lvmCache, 

2144 self.sr.uuid, self.uuid) 

2145 else: 

2146 fn = "attach" 

2147 if not attach: 

2148 fn = "detach" 

2149 pools = self.session.xenapi.pool.get_all() 

2150 master = self.session.xenapi.pool.get_master(pools[0]) 

2151 rv = self.session.xenapi.host.call_plugin( 

2152 master, self.sr.THIN_PLUGIN, fn, 

2153 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid}) 

2154 util.SMlog("call-plugin returned: %s" % rv) 

2155 if not rv: 

2156 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) 

2157 # refresh to pick up the size change on this slave 

2158 self.sr.lvmCache.activateNoRefcount(self.lvname, True) 

2159 

2160 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

2161 if origUtilisation != self.utilisation: 

2162 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2163 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

2164 str(self.utilisation)) 

2165 stats = lvutil._getVGstats(self.sr.vgname) 

2166 sr_utilisation = stats['physical_utilisation'] 

2167 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, 

2168 str(sr_utilisation)) 

2169 

2170 def update(self, sr_uuid, vdi_uuid): 

2171 if self.sr.legacyMode: 

2172 return 

2173 

2174 #Synch the name_label of this VDI on storage with the name_label in XAPI 

2175 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

2176 update_map = {} 

2177 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

2178 METADATA_OBJECT_TYPE_VDI 

2179 update_map[UUID_TAG] = self.uuid 

2180 update_map[NAME_LABEL_TAG] = util.to_plain_string( \ 

2181 self.session.xenapi.VDI.get_name_label(vdi_ref)) 

2182 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \ 

2183 self.session.xenapi.VDI.get_name_description(vdi_ref)) 

2184 update_map[SNAPSHOT_TIME_TAG] = \ 

2185 self.session.xenapi.VDI.get_snapshot_time(vdi_ref) 

2186 update_map[METADATA_OF_POOL_TAG] = \ 

2187 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) 

2188 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map) 

2189 

2190 def _ensure_cbt_space(self): 

2191 self.sr.ensureCBTSpace() 

2192 

2193 def _create_cbt_log(self): 

2194 logname = self._get_cbt_logname(self.uuid) 

2195 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) 

2196 logpath = super(LVHDVDI, self)._create_cbt_log() 

2197 self.sr.lvmCache.deactivateNoRefcount(logname) 

2198 return logpath 

2199 

2200 def _delete_cbt_log(self): 

2201 logpath = self._get_cbt_logpath(self.uuid) 

2202 if self._cbt_log_exists(logpath): 

2203 logname = self._get_cbt_logname(self.uuid) 

2204 self.sr.lvmCache.remove(logname) 

2205 

2206 def _rename(self, oldpath, newpath): 

2207 oldname = os.path.basename(oldpath) 

2208 newname = os.path.basename(newpath) 

2209 self.sr.lvmCache.rename(oldname, newname) 

2210 

2211 def _activate_cbt_log(self, lv_name): 

2212 self.sr.lvmCache.refresh() 

2213 if not self.sr.lvmCache.is_active(lv_name): 2213 ↛ 2214line 2213 didn't jump to line 2214, because the condition on line 2213 was never true

2214 try: 

2215 self.sr.lvmCache.activateNoRefcount(lv_name) 

2216 return True 

2217 except Exception as e: 

2218 util.SMlog("Exception in _activate_cbt_log, " 

2219 "Error: %s." % str(e)) 

2220 raise 

2221 else: 

2222 return False 

2223 

2224 def _deactivate_cbt_log(self, lv_name): 

2225 try: 

2226 self.sr.lvmCache.deactivateNoRefcount(lv_name) 

2227 except Exception as e: 

2228 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e)) 

2229 raise 

2230 

2231 def _cbt_log_exists(self, logpath): 

2232 return lvutil.exists(logpath) 

2233 

2234if __name__ == '__main__': 2234 ↛ 2235line 2234 didn't jump to line 2235, because the condition on line 2234 was never true

2235 SRCommand.run(LVHDSR, DRIVER_INFO) 

2236else: 

2237 SR.registerSR(LVHDSR)