Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# LVHDSR: VHD on LVM storage repository 

19# 

20 

21import SR 

22from SR import deviceCheck 

23import VDI 

24import SRCommand 

25import util 

26import lvutil 

27import lvmcache 

28import vhdutil 

29import lvhdutil 

30import scsiutil 

31import os 

32import sys 

33import time 

34import errno 

35import xs_errors 

36import cleanup 

37import blktap2 

38from journaler import Journaler 

39from lock import Lock 

40from refcounter import RefCounter 

41from ipc import IPCFlag 

42from lvmanager import LVActivator 

43import XenAPI 

44import re 

45from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \ 

46 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ 

47 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ 

48 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ 

49 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG 

50from metadata import retrieveXMLfromFile, _parseXML 

51from xmlrpc.client import DateTime 

52import glob 

53from constants import CBTLOG_TAG 

54DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX) 

55 

56geneology = {} 

57CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM", 

58 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", 

59 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", 

60 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", 

61 "VDI_ACTIVATE", "VDI_DEACTIVATE"] 

62 

63CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']] 

64 

65DRIVER_INFO = { 

66 'name': 'Local VHD on LVM', 

67 'description': 'SR plugin which represents disks as VHD disks on ' + \ 

68 'Logical Volumes within a locally-attached Volume Group', 

69 'vendor': 'XenSource Inc', 

70 'copyright': '(C) 2008 XenSource Inc', 

71 'driver_version': '1.0', 

72 'required_api_version': '1.0', 

73 'capabilities': CAPABILITIES, 

74 'configuration': CONFIGURATION 

75 } 

76 

77PARAM_VHD = "vhd" 

78PARAM_RAW = "raw" 

79 

80OPS_EXCLUSIVE = [ 

81 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", 

82 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", 

83 "vdi_clone"] 

84 

85# Log if snapshot pauses VM for more than this many seconds 

86LONG_SNAPTIME = 60 

87 

88class LVHDSR(SR.SR): 

89 DRIVER_TYPE = 'lvhd' 

90 

91 PROVISIONING_TYPES = ["thin", "thick"] 

92 PROVISIONING_DEFAULT = "thick" 

93 THIN_PLUGIN = "lvhd-thin" 

94 

95 PLUGIN_ON_SLAVE = "on-slave" 

96 

97 FLAG_USE_VHD = "use_vhd" 

98 MDVOLUME_NAME = "MGT" 

99 

100 ALLOCATION_QUANTUM = "allocation_quantum" 

101 INITIAL_ALLOCATION = "initial_allocation" 

102 

103 LOCK_RETRY_INTERVAL = 3 

104 LOCK_RETRY_ATTEMPTS = 10 

105 

106 TEST_MODE_KEY = "testmode" 

107 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin" 

108 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator" 

109 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end" 

110 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin" 

111 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data" 

112 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata" 

113 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end" 

114 

115 ENV_VAR_VHD_TEST = { 

116 TEST_MODE_VHD_FAIL_REPARENT_BEGIN: 

117 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", 

118 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: 

119 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", 

120 TEST_MODE_VHD_FAIL_REPARENT_END: 

121 "VHD_UTIL_TEST_FAIL_REPARENT_END", 

122 TEST_MODE_VHD_FAIL_RESIZE_BEGIN: 

123 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", 

124 TEST_MODE_VHD_FAIL_RESIZE_DATA: 

125 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", 

126 TEST_MODE_VHD_FAIL_RESIZE_METADATA: 

127 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", 

128 TEST_MODE_VHD_FAIL_RESIZE_END: 

129 "VHD_UTIL_TEST_FAIL_RESIZE_END" 

130 } 

131 testMode = "" 

132 

133 legacyMode = True 

134 

135 def handles(type): 

136 """Returns True if this SR class understands the given dconf string""" 

137 # we can pose as LVMSR or EXTSR for compatibility purposes 

138 if __name__ == '__main__': 

139 name = sys.argv[0] 

140 else: 

141 name = __name__ 

142 if name.endswith("LVMSR"): 

143 return type == "lvm" 

144 elif name.endswith("EXTSR"): 

145 return type == "ext" 

146 return type == LVHDSR.DRIVER_TYPE 

147 handles = staticmethod(handles) 

148 

149 def load(self, sr_uuid): 

150 self.ops_exclusive = OPS_EXCLUSIVE 

151 

152 self.isMaster = False 

153 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

154 self.isMaster = True 

155 

156 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

157 self.sr_vditype = SR.DEFAULT_TAP 

158 self.uuid = sr_uuid 

159 self.vgname = lvhdutil.VG_PREFIX + self.uuid 

160 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname) 

161 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME) 

162 self.provision = self.PROVISIONING_DEFAULT 

163 try: 

164 self.lvmCache = lvmcache.LVMCache(self.vgname) 

165 except: 

166 raise xs_errors.XenError('SRUnavailable', \ 

167 opterr='Failed to initialise the LVMCache') 

168 self.lvActivator = LVActivator(self.uuid, self.lvmCache) 

169 self.journaler = Journaler(self.lvmCache) 

170 if not self.srcmd.params.get("sr_ref"): 

171 return # must be a probe call 

172 # Test for thick vs thin provisioning conf parameter 

173 if 'allocation' in self.dconf: 173 ↛ 174line 173 didn't jump to line 174, because the condition on line 173 was never true

174 if self.dconf['allocation'] in self.PROVISIONING_TYPES: 

175 self.provision = self.dconf['allocation'] 

176 else: 

177 raise xs_errors.XenError('InvalidArg', \ 

178 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES) 

179 

180 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) 

181 if self.other_conf.get(self.TEST_MODE_KEY): 181 ↛ 185line 181 didn't jump to line 185, because the condition on line 181 was never false

182 self.testMode = self.other_conf[self.TEST_MODE_KEY] 

183 self._prepareTestMode() 

184 

185 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

186 # sm_config flag overrides PBD, if any 

187 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: 

188 self.provision = self.sm_config.get('allocation') 

189 

190 if self.sm_config.get(self.FLAG_USE_VHD) == "true": 

191 self.legacyMode = False 

192 

193 if lvutil._checkVG(self.vgname): 

194 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 194 ↛ 197line 194 didn't jump to line 197, because the condition on line 194 was never false

195 "vdi_activate", "vdi_deactivate"]: 

196 self._undoAllJournals() 

197 if not self.cmd in ["sr_attach", "sr_probe"]: 

198 self._checkMetadataVolume() 

199 

200 self.mdexists = False 

201 

202 # get a VDI -> TYPE map from the storage 

203 contains_uuid_regex = \ 

204 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") 

205 self.storageVDIs = {} 

206 

207 for key in self.lvmCache.lvs.keys(): 207 ↛ 209line 207 didn't jump to line 209, because the loop on line 207 never started

208 # if the lvname has a uuid in it 

209 type = None 

210 if contains_uuid_regex.search(key) is not None: 

211 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]): 

212 type = vhdutil.VDI_TYPE_VHD 

213 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

214 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]): 

215 type = vhdutil.VDI_TYPE_RAW 

216 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

217 else: 

218 continue 

219 

220 if type is not None: 

221 self.storageVDIs[vdi] = type 

222 

223 # check if metadata volume exists 

224 try: 

225 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

226 except: 

227 pass 

228 

229 def cleanup(self): 

230 # we don't need to hold the lock to dec refcounts of activated LVs 

231 if not self.lvActivator.deactivateAll(): 231 ↛ 232line 231 didn't jump to line 232, because the condition on line 231 was never true

232 raise util.SMException("failed to deactivate LVs") 

233 

234 def updateSRMetadata(self, allocation): 

235 try: 

236 # Add SR specific SR metadata 

237 sr_info = \ 

238 {ALLOCATION_TAG: allocation, 

239 UUID_TAG: self.uuid, 

240 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)), 

241 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref)) 

242 } 

243 

244 vdi_info = {} 

245 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): 

246 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi) 

247 

248 # Create the VDI entry in the SR metadata 

249 vdi_info[vdi_uuid] = \ 

250 { 

251 UUID_TAG: vdi_uuid, 

252 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)), 

253 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)), 

254 IS_A_SNAPSHOT_TAG: \ 

255 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), 

256 SNAPSHOT_OF_TAG: \ 

257 self.session.xenapi.VDI.get_snapshot_of(vdi), 

258 SNAPSHOT_TIME_TAG: \ 

259 self.session.xenapi.VDI.get_snapshot_time(vdi), 

260 TYPE_TAG: \ 

261 self.session.xenapi.VDI.get_type(vdi), 

262 VDI_TYPE_TAG: \ 

263 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'], 

264 READ_ONLY_TAG: \ 

265 int(self.session.xenapi.VDI.get_read_only(vdi)), 

266 METADATA_OF_POOL_TAG: \ 

267 self.session.xenapi.VDI.get_metadata_of_pool(vdi), 

268 MANAGED_TAG: \ 

269 int(self.session.xenapi.VDI.get_managed(vdi)) 

270 } 

271 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info) 

272 

273 except Exception as e: 

274 raise xs_errors.XenError('MetadataError', \ 

275 opterr='Error upgrading SR Metadata: %s' % str(e)) 

276 

277 def syncMetadataAndStorage(self): 

278 try: 

279 # if a VDI is present in the metadata but not in the storage 

280 # then delete it from the metadata 

281 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

282 for vdi in list(vdi_info.keys()): 

283 update_map = {} 

284 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 284 ↛ 291line 284 didn't jump to line 291, because the condition on line 284 was never false

285 # delete this from metadata 

286 LVMMetadataHandler(self.mdpath). \ 

287 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) 

288 else: 

289 # search for this in the metadata, compare types 

290 # self.storageVDIs is a map of vdi_uuid to vdi_type 

291 if vdi_info[vdi][VDI_TYPE_TAG] != \ 

292 self.storageVDIs[vdi_info[vdi][UUID_TAG]]: 

293 # storage type takes authority 

294 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ 

295 = METADATA_OBJECT_TYPE_VDI 

296 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] 

297 update_map[VDI_TYPE_TAG] = \ 

298 self.storageVDIs[vdi_info[vdi][UUID_TAG]] 

299 LVMMetadataHandler(self.mdpath) \ 

300 .updateMetadata(update_map) 

301 else: 

302 # This should never happen 

303 pass 

304 

305 except Exception as e: 

306 raise xs_errors.XenError('MetadataError', \ 

307 opterr='Error synching SR Metadata and storage: %s' % str(e)) 

308 

309 def syncMetadataAndXapi(self): 

310 try: 

311 # get metadata 

312 (sr_info, vdi_info) = \ 

313 LVMMetadataHandler(self.mdpath, False).getMetadata() 

314 

315 # First synch SR parameters 

316 self.update(self.uuid) 

317 

318 # Now update the VDI information in the metadata if required 

319 for vdi_offset in vdi_info.keys(): 

320 try: 

321 vdi_ref = \ 

322 self.session.xenapi.VDI.get_by_uuid( \ 

323 vdi_info[vdi_offset][UUID_TAG]) 

324 except: 

325 # may be the VDI is not in XAPI yet dont bother 

326 continue 

327 

328 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref)) 

329 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref)) 

330 

331 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ 

332 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ 

333 new_name_description: 

334 update_map = {} 

335 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

336 METADATA_OBJECT_TYPE_VDI 

337 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] 

338 update_map[NAME_LABEL_TAG] = new_name_label 

339 update_map[NAME_DESCRIPTION_TAG] = new_name_description 

340 LVMMetadataHandler(self.mdpath) \ 

341 .updateMetadata(update_map) 

342 except Exception as e: 

343 raise xs_errors.XenError('MetadataError', \ 

344 opterr='Error synching SR Metadata and XAPI: %s' % str(e)) 

345 

346 def _checkMetadataVolume(self): 

347 util.SMlog("Entering _checkMetadataVolume") 

348 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

349 if self.isMaster: 349 ↛ 365line 349 didn't jump to line 365, because the condition on line 349 was never false

350 if self.mdexists and self.cmd == "sr_attach": 

351 try: 

352 # activate the management volume 

353 # will be deactivated at detach time 

354 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

355 self._synchSmConfigWithMetaData() 

356 util.SMlog("Sync SR metadata and the state on the storage.") 

357 self.syncMetadataAndStorage() 

358 self.syncMetadataAndXapi() 

359 except Exception as e: 

360 util.SMlog("Exception in _checkMetadataVolume, " \ 

361 "Error: %s." % str(e)) 

362 elif not self.mdexists and not self.legacyMode: 362 ↛ 365line 362 didn't jump to line 365, because the condition on line 362 was never false

363 self._introduceMetaDataVolume() 

364 

365 if self.mdexists: 

366 self.legacyMode = False 

367 

368 def _synchSmConfigWithMetaData(self): 

369 util.SMlog("Synching sm-config with metadata volume") 

370 

371 try: 

372 # get SR info from metadata 

373 sr_info = {} 

374 map = {} 

375 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0] 

376 

377 if sr_info == {}: 377 ↛ 378line 377 didn't jump to line 378, because the condition on line 377 was never true

378 raise Exception("Failed to get SR information from metadata.") 

379 

380 if "allocation" in sr_info: 380 ↛ 384line 380 didn't jump to line 384, because the condition on line 380 was never false

381 self.provision = sr_info.get("allocation") 

382 map['allocation'] = sr_info.get("allocation") 

383 else: 

384 raise Exception("Allocation key not found in SR metadata. " 

385 "SR info found: %s" % sr_info) 

386 

387 except Exception as e: 

388 raise xs_errors.XenError( 

389 'MetadataError', 

390 opterr='Error reading SR params from ' 

391 'metadata Volume: %s' % str(e)) 

392 try: 

393 map[self.FLAG_USE_VHD] = 'true' 

394 self.session.xenapi.SR.set_sm_config(self.sr_ref, map) 

395 except: 

396 raise xs_errors.XenError( 

397 'MetadataError', 

398 opterr='Error updating sm_config key') 

399 

400 def _introduceMetaDataVolume(self): 

401 util.SMlog("Creating Metadata volume") 

402 try: 

403 config = {} 

404 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024) 

405 

406 # activate the management volume, will be deactivated at detach time 

407 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

408 

409 name_label = util.to_plain_string( \ 

410 self.session.xenapi.SR.get_name_label(self.sr_ref)) 

411 name_description = util.to_plain_string( \ 

412 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

413 config[self.FLAG_USE_VHD] = "true" 

414 config['allocation'] = self.provision 

415 self.session.xenapi.SR.set_sm_config(self.sr_ref, config) 

416 

417 # Add the SR metadata 

418 self.updateSRMetadata(self.provision) 

419 except Exception as e: 

420 raise xs_errors.XenError('MetadataError', \ 

421 opterr='Error introducing Metadata Volume: %s' % str(e)) 

422 

423 def _removeMetadataVolume(self): 

424 if self.mdexists: 

425 try: 

426 self.lvmCache.remove(self.MDVOLUME_NAME) 

427 except: 

428 raise xs_errors.XenError('MetadataError', \ 

429 opterr='Failed to delete MGT Volume') 

430 

431 def _refresh_size(self): 

432 """ 

433 Refreshs the size of the backing device. 

434 Return true if all paths/devices agree on the same size. 

435 """ 

436 if hasattr(self, 'SCSIid'): 436 ↛ 438line 436 didn't jump to line 438, because the condition on line 436 was never true

437 # LVHDoHBASR, LVHDoISCSISR 

438 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) 

439 else: 

440 # LVHDSR 

441 devices = self.dconf['device'].split(',') 

442 scsiutil.refreshdev(devices) 

443 return True 

444 

445 def _expand_size(self): 

446 """ 

447 Expands the size of the SR by growing into additional availiable 

448 space, if extra space is availiable on the backing device. 

449 Needs to be called after a successful call of _refresh_size. 

450 """ 

451 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] 

452 # We are comparing PV- with VG-sizes that are aligned. Need a threshold 

453 resizethreshold = 100 * 1024 * 1024 # 100MB 

454 devices = self.dconf['device'].split(',') 

455 totaldevicesize = 0 

456 for device in devices: 

457 totaldevicesize = totaldevicesize + scsiutil.getsize(device) 

458 if totaldevicesize >= (currentvgsize + resizethreshold): 

459 try: 

460 if hasattr(self, 'SCSIid'): 460 ↛ 462line 460 didn't jump to line 462, because the condition on line 460 was never true

461 # LVHDoHBASR, LVHDoISCSISR might have slaves 

462 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, 

463 getattr(self, 'SCSIid')) 

464 util.SMlog("LVHDSR._expand_size for %s will resize the pv." % 

465 self.uuid) 

466 for pv in lvutil.get_pv_for_vg(self.vgname): 

467 lvutil.resizePV(pv) 

468 except: 

469 util.logException("LVHDSR._expand_size for %s failed to resize" 

470 " the PV" % self.uuid) 

471 

472 @deviceCheck 

473 def create(self, uuid, size): 

474 util.SMlog("LVHDSR.create for %s" % self.uuid) 

475 if not self.isMaster: 

476 util.SMlog('sr_create blocked for non-master') 

477 raise xs_errors.XenError('LVMMaster') 

478 

479 if lvutil._checkVG(self.vgname): 

480 raise xs_errors.XenError('SRExists') 

481 

482 # Check none of the devices already in use by other PBDs 

483 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']): 

484 raise xs_errors.XenError('SRInUse') 

485 

486 # Check serial number entry in SR records 

487 for dev in self.dconf['device'].split(','): 

488 if util.test_scsiserial(self.session, dev): 

489 raise xs_errors.XenError('SRInUse') 

490 

491 lvutil.createVG(self.dconf['device'], self.vgname) 

492 

493 #Update serial number string 

494 scsiutil.add_serial_record(self.session, self.sr_ref, \ 

495 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

496 

497 # since this is an SR.create turn off legacy mode 

498 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ 

499 self.FLAG_USE_VHD, 'true') 

500 

501 def delete(self, uuid): 

502 util.SMlog("LVHDSR.delete for %s" % self.uuid) 

503 if not self.isMaster: 

504 raise xs_errors.XenError('LVMMaster') 

505 cleanup.gc_force(self.session, self.uuid) 

506 

507 success = True 

508 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

509 if util.extractSRFromDevMapper(fileName) != self.uuid: 

510 continue 

511 

512 if util.doesFileHaveOpenHandles(fileName): 

513 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \ 

514 "handles" % fileName) 

515 success = False 

516 continue 

517 

518 # Now attempt to remove the dev mapper entry 

519 if not lvutil.removeDevMapperEntry(fileName, False): 

520 success = False 

521 continue 

522 

523 try: 

524 lvname = os.path.basename(fileName.replace('-', '/'). \ 

525 replace('//', '-')) 

526 lpath = os.path.join(self.path, lvname) 

527 os.unlink(lpath) 

528 except OSError as e: 

529 if e.errno != errno.ENOENT: 

530 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \ 

531 "file %s. Error: %s" % (fileName, str(e))) 

532 success = False 

533 

534 if success: 

535 try: 

536 if util.pathexists(self.path): 

537 os.rmdir(self.path) 

538 except Exception as e: 

539 util.SMlog("LVHDSR.delete: failed to remove the symlink " \ 

540 "directory %s. Error: %s" % (self.path, str(e))) 

541 success = False 

542 

543 self._removeMetadataVolume() 

544 self.lvmCache.refresh() 

545 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0: 

546 raise xs_errors.XenError('SRNotEmpty') 

547 

548 if not success: 

549 raise Exception("LVHDSR delete failed, please refer to the log " \ 

550 "for details.") 

551 

552 lvutil.removeVG(self.dconf['device'], self.vgname) 

553 self._cleanup() 

554 

555 def attach(self, uuid): 

556 util.SMlog("LVHDSR.attach for %s" % self.uuid) 

557 

558 self._cleanup(True) # in case of host crashes, if detach wasn't called 

559 

560 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 560 ↛ 561line 560 didn't jump to line 561, because the condition on line 560 was never true

561 raise xs_errors.XenError('SRUnavailable', \ 

562 opterr='no such volume group: %s' % self.vgname) 

563 

564 # Refresh the metadata status 

565 self._checkMetadataVolume() 

566 

567 refreshsizeok = self._refresh_size() 

568 

569 if self.isMaster: 569 ↛ 580line 569 didn't jump to line 580, because the condition on line 569 was never false

570 if refreshsizeok: 570 ↛ 574line 570 didn't jump to line 574, because the condition on line 570 was never false

571 self._expand_size() 

572 

573 # Update SCSIid string 

574 util.SMlog("Calling devlist_to_serial") 

575 scsiutil.add_serial_record( 

576 self.session, self.sr_ref, 

577 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

578 

579 # Test Legacy Mode Flag and update if VHD volumes exist 

580 if self.isMaster and self.legacyMode: 580 ↛ 581line 580 didn't jump to line 581, because the condition on line 580 was never true

581 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

582 for uuid, info in vdiInfo.items(): 

583 if info.vdiType == vhdutil.VDI_TYPE_VHD: 

584 self.legacyMode = False 

585 map = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

586 self._introduceMetaDataVolume() 

587 break 

588 

589 # Set the block scheduler 

590 for dev in self.dconf['device'].split(','): 

591 self.block_setscheduler(dev) 

592 

593 def detach(self, uuid): 

594 util.SMlog("LVHDSR.detach for %s" % self.uuid) 

595 cleanup.abort(self.uuid) 

596 

597 # Do a best effort cleanup of the dev mapper entries 

598 # go through all devmapper entries for this VG 

599 success = True 

600 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 600 ↛ 601line 600 didn't jump to line 601, because the loop on line 600 never started

601 if util.extractSRFromDevMapper(fileName) != self.uuid: 

602 continue 

603 

604 # check if any file has open handles 

605 if util.doesFileHaveOpenHandles(fileName): 

606 # if yes, log this and signal failure 

607 util.SMlog("LVHDSR.detach: The dev mapper entry %s has open " \ 

608 "handles" % fileName) 

609 success = False 

610 continue 

611 

612 # Now attempt to remove the dev mapper entry 

613 if not lvutil.removeDevMapperEntry(fileName, False): 

614 success = False 

615 continue 

616 

617 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/* 

618 try: 

619 lvname = os.path.basename(fileName.replace('-', '/'). \ 

620 replace('//', '-')) 

621 lvname = os.path.join(self.path, lvname) 

622 util.force_unlink(lvname) 

623 except Exception as e: 

624 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \ 

625 "file %s. Error: %s" % (fileName, str(e))) 

626 success = False 

627 

628 # now remove the directory where the symlinks are 

629 # this should pass as the directory should be empty by now 

630 if success: 630 ↛ 639line 630 didn't jump to line 639, because the condition on line 630 was never false

631 try: 

632 if util.pathexists(self.path): 632 ↛ 633line 632 didn't jump to line 633, because the condition on line 632 was never true

633 os.rmdir(self.path) 

634 except Exception as e: 

635 util.SMlog("LVHDSR.detach: failed to remove the symlink " \ 

636 "directory %s. Error: %s" % (self.path, str(e))) 

637 success = False 

638 

639 if not success: 639 ↛ 640line 639 didn't jump to line 640, because the condition on line 639 was never true

640 raise Exception("SR detach failed, please refer to the log " \ 

641 "for details.") 

642 

643 # Don't delete lock files on the master as it will break the locking 

644 # between SM and any GC thread that survives through SR.detach. 

645 # However, we should still delete lock files on slaves as it is the 

646 # only place to do so. 

647 self._cleanup(self.isMaster) 

648 

649 def forget_vdi(self, uuid): 

650 if not self.legacyMode: 

651 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) 

652 super(LVHDSR, self).forget_vdi(uuid) 

653 

654 def scan(self, uuid): 

655 try: 

656 lvname = '' 

657 activated = True 

658 util.SMlog("LVHDSR.scan for %s" % self.uuid) 

659 if not self.isMaster: 659 ↛ 660line 659 didn't jump to line 660, because the condition on line 659 was never true

660 util.SMlog('sr_scan blocked for non-master') 

661 raise xs_errors.XenError('LVMMaster') 

662 

663 if self._refresh_size(): 663 ↛ 665line 663 didn't jump to line 665, because the condition on line 663 was never false

664 self._expand_size() 

665 self.lvmCache.refresh() 

666 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) 

667 self._loadvdis() 

668 stats = lvutil._getVGstats(self.vgname) 

669 self.physical_size = stats['physical_size'] 

670 self.physical_utilisation = stats['physical_utilisation'] 

671 

672 # Now check if there are any VDIs in the metadata, which are not in 

673 # XAPI 

674 if self.mdexists: 674 ↛ 784line 674 didn't jump to line 784, because the condition on line 674 was never false

675 vdiToSnaps = {} 

676 # get VDIs from XAPI 

677 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) 

678 vdi_uuids = set([]) 

679 for vdi in vdis: 

680 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi)) 

681 

682 Dict = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

683 

684 for vdi in list(Dict.keys()): 

685 vdi_uuid = Dict[vdi][UUID_TAG] 

686 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): 686 ↛ 687line 686 didn't jump to line 687, because the condition on line 686 was never true

687 if Dict[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps: 

688 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) 

689 else: 

690 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid] 

691 

692 if vdi_uuid not in vdi_uuids: 692 ↛ 693line 692 didn't jump to line 693, because the condition on line 692 was never true

693 util.SMlog("Introduce VDI %s as it is present in " \ 

694 "metadata and not in XAPI." % vdi_uuid) 

695 sm_config = {} 

696 sm_config['vdi_type'] = Dict[vdi][VDI_TYPE_TAG] 

697 lvname = "%s%s" % \ 

698 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid) 

699 self.lvmCache.activateNoRefcount(lvname) 

700 activated = True 

701 lvPath = os.path.join(self.path, lvname) 

702 

703 if Dict[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW: 

704 size = self.lvmCache.getSize( \ 

705 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \ 

706 vdi_uuid) 

707 utilisation = \ 

708 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

709 int(size)) 

710 else: 

711 parent = \ 

712 vhdutil._getVHDParentNoCheck(lvPath) 

713 

714 if parent is not None: 

715 sm_config['vhd-parent'] = parent[len( \ 

716 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):] 

717 size = vhdutil.getSizeVirt(lvPath) 

718 if self.provision == "thin": 

719 utilisation = \ 

720 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

721 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

722 else: 

723 utilisation = lvhdutil.calcSizeVHDLV(int(size)) 

724 

725 vdi_ref = self.session.xenapi.VDI.db_introduce( 

726 vdi_uuid, 

727 Dict[vdi][NAME_LABEL_TAG], 

728 Dict[vdi][NAME_DESCRIPTION_TAG], 

729 self.sr_ref, 

730 Dict[vdi][TYPE_TAG], 

731 False, 

732 bool(int(Dict[vdi][READ_ONLY_TAG])), 

733 {}, 

734 vdi_uuid, 

735 {}, 

736 sm_config) 

737 

738 self.session.xenapi.VDI.set_managed(vdi_ref, 

739 bool(int(Dict[vdi][MANAGED_TAG]))) 

740 self.session.xenapi.VDI.set_virtual_size(vdi_ref, 

741 str(size)) 

742 self.session.xenapi.VDI.set_physical_utilisation( \ 

743 vdi_ref, str(utilisation)) 

744 self.session.xenapi.VDI.set_is_a_snapshot( \ 

745 vdi_ref, bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG]))) 

746 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): 

747 self.session.xenapi.VDI.set_snapshot_time( \ 

748 vdi_ref, DateTime(Dict[vdi][SNAPSHOT_TIME_TAG])) 

749 if Dict[vdi][TYPE_TAG] == 'metadata': 

750 self.session.xenapi.VDI.set_metadata_of_pool( \ 

751 vdi_ref, Dict[vdi][METADATA_OF_POOL_TAG]) 

752 

753 # Update CBT status of disks either just added 

754 # or already in XAPI 

755 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) 

756 if cbt_logname in cbt_vdis: 756 ↛ 757line 756 didn't jump to line 757, because the condition on line 756 was never true

757 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

758 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

759 # For existing VDIs, update local state too 

760 # Scan in base class SR updates existing VDIs 

761 # again based on local states 

762 if vdi_uuid in self.vdis: 

763 self.vdis[vdi_uuid].cbt_enabled = True 

764 cbt_vdis.remove(cbt_logname) 

765 

766 # Now set the snapshot statuses correctly in XAPI 

767 for srcvdi in vdiToSnaps.keys(): 767 ↛ 768line 767 didn't jump to line 768, because the loop on line 767 never started

768 try: 

769 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) 

770 except: 

771 # the source VDI no longer exists, continue 

772 continue 

773 

774 for snapvdi in vdiToSnaps[srcvdi]: 

775 try: 

776 # this might fail in cases where its already set 

777 snapref = \ 

778 self.session.xenapi.VDI.get_by_uuid(snapvdi) 

779 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) 

780 except Exception as e: 

781 util.SMlog("Setting snapshot failed. " \ 

782 "Error: %s" % str(e)) 

783 

784 if cbt_vdis: 784 ↛ 795line 784 didn't jump to line 795, because the condition on line 784 was never false

785 # If we have items remaining in this list, 

786 # they are cbt_metadata VDI that XAPI doesn't know about 

787 # Add them to self.vdis and they'll get added to the DB 

788 for cbt_vdi in cbt_vdis: 788 ↛ 789line 788 didn't jump to line 789, because the loop on line 788 never started

789 cbt_uuid = cbt_vdi.split(".")[0] 

790 new_vdi = self.vdi(cbt_uuid) 

791 new_vdi.ty = "cbt_metadata" 

792 new_vdi.cbt_enabled = True 

793 self.vdis[cbt_uuid] = new_vdi 

794 

795 ret = super(LVHDSR, self).scan(uuid) 

796 self._kickGC() 

797 return ret 

798 

799 finally: 

800 if lvname != '' and activated: 800 ↛ 801line 800 didn't jump to line 801, because the condition on line 800 was never true

801 self.lvmCache.deactivateNoRefcount(lvname) 

802 

803 def update(self, uuid): 

804 if not lvutil._checkVG(self.vgname): 804 ↛ 805line 804 didn't jump to line 805, because the condition on line 804 was never true

805 return 

806 self._updateStats(uuid, 0) 

807 

808 if self.legacyMode: 808 ↛ 809line 808 didn't jump to line 809, because the condition on line 808 was never true

809 return 

810 

811 # synch name_label in metadata with XAPI 

812 update_map = {} 

813 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ 

814 METADATA_OBJECT_TYPE_SR, 

815 NAME_LABEL_TAG: util.to_plain_string( \ 

816 self.session.xenapi.SR.get_name_label(self.sr_ref)), 

817 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

818 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

819 } 

820 LVMMetadataHandler(self.mdpath).updateMetadata(update_map) 

821 

822 def _updateStats(self, uuid, virtAllocDelta): 

823 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

824 self.virtual_allocation = valloc + virtAllocDelta 

825 util.SMlog("Setting virtual_allocation of SR %s to %d" % 

826 (uuid, self.virtual_allocation)) 

827 stats = lvutil._getVGstats(self.vgname) 

828 self.physical_size = stats['physical_size'] 

829 self.physical_utilisation = stats['physical_utilisation'] 

830 self._db_update() 

831 

832 @deviceCheck 

833 def probe(self): 

834 return lvutil.srlist_toxml( 

835 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']), 

836 lvhdutil.VG_PREFIX, 

837 ('metadata' in self.srcmd.params['sr_sm_config'] and \ 

838 self.srcmd.params['sr_sm_config']['metadata'] == 'true')) 

839 

840 def vdi(self, uuid): 

841 return LVHDVDI(self, uuid) 

842 

843 def _loadvdis(self): 

844 self.virtual_allocation = 0 

845 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

846 self.allVDIs = {} 

847 

848 for uuid, info in self.vdiInfo.items(): 

849 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 849 ↛ 850line 849 didn't jump to line 850, because the condition on line 849 was never true

850 continue 

851 if info.scanError: 851 ↛ 852line 851 didn't jump to line 852, because the condition on line 851 was never true

852 raise xs_errors.XenError('VDIUnavailable', \ 

853 opterr='Error scanning VDI %s' % uuid) 

854 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid) 

855 if not self.vdis[uuid].hidden: 855 ↛ 848line 855 didn't jump to line 848, because the condition on line 855 was never false

856 self.virtual_allocation += self.vdis[uuid].utilisation 

857 

858 for uuid, vdi in self.vdis.items(): 

859 if vdi.parent: 859 ↛ 860line 859 didn't jump to line 860, because the condition on line 859 was never true

860 if vdi.parent in self.vdis: 

861 self.vdis[vdi.parent].read_only = True 

862 if vdi.parent in geneology: 

863 geneology[vdi.parent].append(uuid) 

864 else: 

865 geneology[vdi.parent] = [uuid] 

866 

867 # Now remove all hidden leaf nodes to avoid introducing records that 

868 # will be GC'ed 

869 for uuid in list(self.vdis.keys()): 

870 if uuid not in geneology and self.vdis[uuid].hidden: 870 ↛ 871line 870 didn't jump to line 871, because the condition on line 870 was never true

871 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

872 del self.vdis[uuid] 

873 

874 def _ensureSpaceAvailable(self, amount_needed): 

875 space_available = lvutil._getVGstats(self.vgname)['freespace'] 

876 if (space_available < amount_needed): 

877 util.SMlog("Not enough space! free space: %d, need: %d" % \ 

878 (space_available, amount_needed)) 

879 raise xs_errors.XenError('SRNoSpace') 

880 

881 def _handleInterruptedCloneOps(self): 

882 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE) 

883 for uuid, val in entries.items(): 883 ↛ 884line 883 didn't jump to line 884, because the loop on line 883 never started

884 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid) 

885 self._handleInterruptedCloneOp(uuid, val) 

886 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid) 

887 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid) 

888 

889 def _handleInterruptedCoalesceLeaf(self): 

890 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) 

891 if len(entries) > 0: 891 ↛ 892line 891 didn't jump to line 892, because the condition on line 891 was never true

892 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") 

893 cleanup.gc_force(self.session, self.uuid) 

894 self.lvmCache.refresh() 

895 

896 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False): 

897 """Either roll back or finalize the interrupted snapshot/clone 

898 operation. Rolling back is unsafe if the leaf VHDs have already been 

899 in use and written to. However, it is always safe to roll back while 

900 we're still in the context of the failed snapshot operation since the 

901 VBD is paused for the duration of the operation""" 

902 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) 

903 lvs = lvhdutil.getLVInfo(self.lvmCache) 

904 baseUuid, clonUuid = jval.split("_") 

905 

906 # is there a "base copy" VDI? 

907 if not lvs.get(baseUuid): 

908 # no base copy: make sure the original is there 

909 if lvs.get(origUuid): 

910 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") 

911 return 

912 raise util.SMException("base copy %s not present, " \ 

913 "but no original %s found" % (baseUuid, origUuid)) 

914 

915 if forceUndo: 

916 util.SMlog("Explicit revert") 

917 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

918 return 

919 

920 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): 

921 util.SMlog("One or both leaves missing => revert") 

922 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

923 return 

924 

925 vdis = lvhdutil.getVDIInfo(self.lvmCache) 

926 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): 

927 util.SMlog("One or both leaves invalid => revert") 

928 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

929 return 

930 

931 orig = vdis[origUuid] 

932 base = vdis[baseUuid] 

933 self.lvActivator.activate(baseUuid, base.lvName, False) 

934 self.lvActivator.activate(origUuid, orig.lvName, False) 

935 if orig.parentUuid != baseUuid: 

936 parent = vdis[orig.parentUuid] 

937 self.lvActivator.activate(parent.uuid, parent.lvName, False) 

938 origPath = os.path.join(self.path, orig.lvName) 

939 if not vhdutil.check(origPath): 

940 util.SMlog("Orig VHD invalid => revert") 

941 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

942 return 

943 

944 if clonUuid: 

945 clon = vdis[clonUuid] 

946 clonPath = os.path.join(self.path, clon.lvName) 

947 self.lvActivator.activate(clonUuid, clon.lvName, False) 

948 if not vhdutil.check(clonPath): 

949 util.SMlog("Clon VHD invalid => revert") 

950 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

951 return 

952 

953 util.SMlog("Snapshot appears valid, will not roll back") 

954 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid) 

955 

956 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid): 

957 base = lvs[baseUuid] 

958 basePath = os.path.join(self.path, base.name) 

959 

960 # make the parent RW 

961 if base.readonly: 

962 self.lvmCache.setReadonly(base.name, False) 

963 

964 ns = lvhdutil.NS_PREFIX_LVM + self.uuid 

965 origRefcountBinary = RefCounter.check(origUuid, ns)[1] 

966 origRefcountNormal = 0 

967 

968 # un-hide the parent 

969 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

970 self.lvActivator.activate(baseUuid, base.name, False) 

971 origRefcountNormal = 1 

972 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False) 

973 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden: 

974 vhdutil.setHidden(basePath, False) 

975 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden: 

976 self.lvmCache.setHidden(base.name, False) 

977 

978 # remove the child nodes 

979 if clonUuid and lvs.get(clonUuid): 

980 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD: 

981 raise util.SMException("clone %s not VHD" % clonUuid) 

982 self.lvmCache.remove(lvs[clonUuid].name) 

983 if self.lvActivator.get(clonUuid, False): 

984 self.lvActivator.remove(clonUuid, False) 

985 if lvs.get(origUuid): 

986 self.lvmCache.remove(lvs[origUuid].name) 

987 

988 # inflate the parent to fully-allocated size 

989 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

990 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

991 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize) 

992 

993 # rename back 

994 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid 

995 self.lvmCache.rename(base.name, origLV) 

996 RefCounter.reset(baseUuid, ns) 

997 if self.lvActivator.get(baseUuid, False): 

998 self.lvActivator.replace(baseUuid, origUuid, origLV, False) 

999 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns) 

1000 

1001 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1002 # flag to facilitate vm deactivate 

1003 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1004 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused') 

1005 

1006 # update LVM metadata on slaves 

1007 slaves = util.get_slaves_attached_on(self.session, [origUuid]) 

1008 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname, 

1009 origLV, origUuid, slaves) 

1010 

1011 util.SMlog("*** INTERRUPTED CLONE OP: rollback success") 

1012 

1013 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid): 

1014 """Finalize the interrupted snapshot/clone operation. This must not be 

1015 called from the live snapshot op context because we attempt to pause/ 

1016 unpause the VBD here (the VBD is already paused during snapshot, so it 

1017 would cause a deadlock)""" 

1018 base = vdis[baseUuid] 

1019 clon = None 

1020 if clonUuid: 

1021 clon = vdis[clonUuid] 

1022 

1023 cleanup.abort(self.uuid) 

1024 

1025 # make sure the parent is hidden and read-only 

1026 if not base.hidden: 

1027 if base.vdiType == vhdutil.VDI_TYPE_RAW: 

1028 self.lvmCache.setHidden(base.lvName) 

1029 else: 

1030 basePath = os.path.join(self.path, base.lvName) 

1031 vhdutil.setHidden(basePath) 

1032 if not base.lvReadonly: 

1033 self.lvmCache.setReadonly(base.lvName, True) 

1034 

1035 # NB: since this snapshot-preserving call is only invoked outside the 

1036 # snapshot op context, we assume the LVM metadata on the involved slave 

1037 # has by now been refreshed and do not attempt to do it here 

1038 

1039 # Update the original record 

1040 try: 

1041 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1042 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1043 type = self.session.xenapi.VDI.get_type(vdi_ref) 

1044 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1045 sm_config['vhd-parent'] = baseUuid 

1046 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

1047 except XenAPI.Failure: 

1048 util.SMlog("ERROR updating the orig record") 

1049 

1050 # introduce the new VDI records 

1051 if clonUuid: 

1052 try: 

1053 clon_vdi = VDI.VDI(self, clonUuid) 

1054 clon_vdi.read_only = False 

1055 clon_vdi.location = clonUuid 

1056 clon_vdi.utilisation = clon.sizeLV 

1057 clon_vdi.sm_config = { 

1058 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1059 "vhd-parent": baseUuid} 

1060 

1061 if not self.legacyMode: 

1062 LVMMetadataHandler(self.mdpath). \ 

1063 ensureSpaceIsAvailableForVdis(1) 

1064 

1065 clon_vdi_ref = clon_vdi._db_introduce() 

1066 util.SMlog("introduced clon VDI: %s (%s)" % \ 

1067 (clon_vdi_ref, clonUuid)) 

1068 

1069 vdi_info = {UUID_TAG: clonUuid, 

1070 NAME_LABEL_TAG: clon_vdi.label, 

1071 NAME_DESCRIPTION_TAG: clon_vdi.description, 

1072 IS_A_SNAPSHOT_TAG: 0, 

1073 SNAPSHOT_OF_TAG: '', 

1074 SNAPSHOT_TIME_TAG: '', 

1075 TYPE_TAG: type, 

1076 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], 

1077 READ_ONLY_TAG: int(clon_vdi.read_only), 

1078 MANAGED_TAG: int(clon_vdi.managed), 

1079 METADATA_OF_POOL_TAG: '' 

1080 } 

1081 

1082 if not self.legacyMode: 

1083 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1084 

1085 except XenAPI.Failure: 

1086 util.SMlog("ERROR introducing the clon record") 

1087 

1088 try: 

1089 base_vdi = VDI.VDI(self, baseUuid) # readonly parent 

1090 base_vdi.label = "base copy" 

1091 base_vdi.read_only = True 

1092 base_vdi.location = baseUuid 

1093 base_vdi.size = base.sizeVirt 

1094 base_vdi.utilisation = base.sizeLV 

1095 base_vdi.managed = False 

1096 base_vdi.sm_config = { 

1097 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1098 "vhd-parent": baseUuid} 

1099 

1100 if not self.legacyMode: 

1101 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1) 

1102 

1103 base_vdi_ref = base_vdi._db_introduce() 

1104 util.SMlog("introduced base VDI: %s (%s)" % \ 

1105 (base_vdi_ref, baseUuid)) 

1106 

1107 vdi_info = {UUID_TAG: baseUuid, 

1108 NAME_LABEL_TAG: base_vdi.label, 

1109 NAME_DESCRIPTION_TAG: base_vdi.description, 

1110 IS_A_SNAPSHOT_TAG: 0, 

1111 SNAPSHOT_OF_TAG: '', 

1112 SNAPSHOT_TIME_TAG: '', 

1113 TYPE_TAG: type, 

1114 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], 

1115 READ_ONLY_TAG: int(base_vdi.read_only), 

1116 MANAGED_TAG: int(base_vdi.managed), 

1117 METADATA_OF_POOL_TAG: '' 

1118 } 

1119 

1120 if not self.legacyMode: 

1121 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1122 except XenAPI.Failure: 

1123 util.SMlog("ERROR introducing the base record") 

1124 

1125 util.SMlog("*** INTERRUPTED CLONE OP: complete") 

1126 

1127 def _undoAllJournals(self): 

1128 """Undo all VHD & SM interrupted journaled operations. This call must 

1129 be serialized with respect to all operations that create journals""" 

1130 # undoing interrupted inflates must be done first, since undoing VHD 

1131 # ops might require inflations 

1132 self.lock.acquire() 

1133 try: 

1134 self._undoAllInflateJournals() 

1135 self._undoAllVHDJournals() 

1136 self._handleInterruptedCloneOps() 

1137 self._handleInterruptedCoalesceLeaf() 

1138 finally: 

1139 self.lock.release() 

1140 self.cleanup() 

1141 

1142 def _undoAllInflateJournals(self): 

1143 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE) 

1144 if len(entries) == 0: 

1145 return 

1146 self._loadvdis() 

1147 for uuid, val in entries.items(): 

1148 vdi = self.vdis.get(uuid) 

1149 if vdi: 1149 ↛ 1164line 1149 didn't jump to line 1164, because the condition on line 1149 was never false

1150 util.SMlog("Found inflate journal %s, deflating %s to %s" % \ 

1151 (uuid, vdi.path, val)) 

1152 if vdi.readonly: 1152 ↛ 1153line 1152 didn't jump to line 1153, because the condition on line 1152 was never true

1153 self.lvmCache.setReadonly(vdi.lvname, False) 

1154 self.lvActivator.activate(uuid, vdi.lvname, False) 

1155 currSizeLV = self.lvmCache.getSize(vdi.lvname) 

1156 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE, 

1157 vhdutil.VHD_FOOTER_SIZE) 

1158 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val)) 

1159 if vdi.readonly: 1159 ↛ 1160line 1159 didn't jump to line 1160, because the condition on line 1159 was never true

1160 self.lvmCache.setReadonly(vdi.lvname, True) 

1161 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1161 ↛ 1162line 1161 didn't jump to line 1162, because the condition on line 1161 was never true

1162 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1163 self.vgname, vdi.lvname, uuid) 

1164 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid) 

1165 delattr(self, "vdiInfo") 

1166 delattr(self, "allVDIs") 

1167 

1168 def _undoAllVHDJournals(self): 

1169 """check if there are VHD journals in existence and revert them""" 

1170 journals = lvhdutil.getAllVHDJournals(self.lvmCache) 

1171 if len(journals) == 0: 1171 ↛ 1173line 1171 didn't jump to line 1173, because the condition on line 1171 was never false

1172 return 

1173 self._loadvdis() 

1174 for uuid, jlvName in journals: 

1175 vdi = self.vdis[uuid] 

1176 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path)) 

1177 self.lvActivator.activate(uuid, vdi.lvname, False) 

1178 self.lvmCache.activateNoRefcount(jlvName) 

1179 fullSize = lvhdutil.calcSizeVHDLV(vdi.size) 

1180 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize) 

1181 try: 

1182 jFile = os.path.join(self.path, jlvName) 

1183 vhdutil.revert(vdi.path, jFile) 

1184 except util.CommandException: 

1185 util.logException("VHD journal revert") 

1186 vhdutil.check(vdi.path) 

1187 util.SMlog("VHD revert failed but VHD ok: removing journal") 

1188 # Attempt to reclaim unused space 

1189 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False) 

1190 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1191 if NewSize < fullSize: 

1192 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) 

1193 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1194 self.vgname, vdi.lvname, uuid) 

1195 self.lvmCache.remove(jlvName) 

1196 delattr(self, "vdiInfo") 

1197 delattr(self, "allVDIs") 

1198 

1199 def _updateSlavesPreClone(self, hostRefs, origOldLV): 

1200 masterRef = util.get_this_host_ref(self.session) 

1201 args = {"vgName": self.vgname, 

1202 "action1": "deactivateNoRefcount", 

1203 "lvName1": origOldLV} 

1204 for hostRef in hostRefs: 

1205 if hostRef == masterRef: 1205 ↛ 1206line 1205 didn't jump to line 1206, because the condition on line 1205 was never true

1206 continue 

1207 util.SMlog("Deactivate VDI on %s" % hostRef) 

1208 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1209 util.SMlog("call-plugin returned: %s" % rv) 

1210 if not rv: 1210 ↛ 1211line 1210 didn't jump to line 1211, because the condition on line 1210 was never true

1211 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1212 

1213 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV, 

1214 baseUuid, baseLV): 

1215 """We need to reactivate the original LV on each slave (note that the 

1216 name for the original LV might change), as well as init the refcount 

1217 for the base LV""" 

1218 args = {"vgName": self.vgname, 

1219 "action1": "refresh", 

1220 "lvName1": origLV, 

1221 "action2": "activate", 

1222 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid, 

1223 "lvName2": baseLV, 

1224 "uuid2": baseUuid} 

1225 

1226 masterRef = util.get_this_host_ref(self.session) 

1227 for hostRef in hostRefs: 

1228 if hostRef == masterRef: 1228 ↛ 1229line 1228 didn't jump to line 1229, because the condition on line 1228 was never true

1229 continue 

1230 util.SMlog("Updating %s, %s, %s on slave %s" % \ 

1231 (origOldLV, origLV, baseLV, hostRef)) 

1232 rv = self.session.xenapi.host.call_plugin( 

1233 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1234 util.SMlog("call-plugin returned: %s" % rv) 

1235 if not rv: 1235 ↛ 1236line 1235 didn't jump to line 1236, because the condition on line 1235 was never true

1236 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1237 

1238 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog): 

1239 """Reactivate and refresh CBT log file on slaves""" 

1240 args = {"vgName": self.vgname, 

1241 "action1": "deactivateNoRefcount", 

1242 "lvName1": cbtlog, 

1243 "action2": "refresh", 

1244 "lvName2": cbtlog} 

1245 

1246 masterRef = util.get_this_host_ref(self.session) 

1247 for hostRef in hostRefs: 

1248 if hostRef == masterRef: 1248 ↛ 1249line 1248 didn't jump to line 1249, because the condition on line 1248 was never true

1249 continue 

1250 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef)) 

1251 rv = self.session.xenapi.host.call_plugin( 

1252 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1253 util.SMlog("call-plugin returned: %s" % rv) 

1254 if not rv: 1254 ↛ 1255line 1254 didn't jump to line 1255, because the condition on line 1254 was never true

1255 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1256 

1257 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV): 

1258 """Tell the slave we deleted the base image""" 

1259 args = {"vgName": self.vgname, 

1260 "action1": "cleanupLockAndRefcount", 

1261 "uuid1": baseUuid, 

1262 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid} 

1263 

1264 masterRef = util.get_this_host_ref(self.session) 

1265 for hostRef in hostRefs: 

1266 if hostRef == masterRef: 1266 ↛ 1267line 1266 didn't jump to line 1267, because the condition on line 1266 was never true

1267 continue 

1268 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef)) 

1269 rv = self.session.xenapi.host.call_plugin( 

1270 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1271 util.SMlog("call-plugin returned: %s" % rv) 

1272 if not rv: 1272 ↛ 1273line 1272 didn't jump to line 1273, because the condition on line 1272 was never true

1273 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1274 

1275 def _cleanup(self, skipLockCleanup=False): 

1276 """delete stale refcounter, flag, and lock files""" 

1277 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1278 IPCFlag(self.uuid).clearAll() 

1279 if not skipLockCleanup: 1279 ↛ 1280line 1279 didn't jump to line 1280, because the condition on line 1279 was never true

1280 Lock.cleanupAll(self.uuid) 

1281 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1282 

1283 def _prepareTestMode(self): 

1284 util.SMlog("Test mode: %s" % self.testMode) 

1285 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1285 ↛ 1286line 1285 didn't jump to line 1286, because the condition on line 1285 was never true

1286 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" 

1287 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode]) 

1288 

1289 def _kickGC(self): 

1290 # don't bother if an instance already running (this is just an 

1291 # optimization to reduce the overhead of forking a new process if we 

1292 # don't have to, but the process will check the lock anyways) 

1293 lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) 

1294 if not lockRunning.acquireNoblock(): 1294 ↛ 1295line 1294 didn't jump to line 1295, because the condition on line 1294 was never true

1295 if cleanup.should_preempt(self.session, self.uuid): 

1296 util.SMlog("Aborting currently-running coalesce of garbage VDI") 

1297 try: 

1298 if not cleanup.abort(self.uuid, soft=True): 

1299 util.SMlog("The GC has already been scheduled to " 

1300 "re-start") 

1301 except util.CommandException as e: 

1302 if e.code != errno.ETIMEDOUT: 

1303 raise 

1304 util.SMlog('failed to abort the GC') 

1305 else: 

1306 util.SMlog("A GC instance already running, not kicking") 

1307 return 

1308 else: 

1309 lockRunning.release() 

1310 

1311 util.SMlog("Kicking GC") 

1312 cleanup.gc(self.session, self.uuid, True) 

1313 

1314 def ensureCBTSpace(self): 

1315 # Ensure we have space for at least one LV 

1316 self._ensureSpaceAvailable(self.journaler.LV_SIZE) 

1317 

1318 

1319class LVHDVDI(VDI.VDI): 

1320 

1321 JRN_CLONE = "clone" # journal entry type for the clone operation 

1322 

1323 def load(self, vdi_uuid): 

1324 self.lock = self.sr.lock 

1325 self.lvActivator = self.sr.lvActivator 

1326 self.loaded = False 

1327 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1328 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1328 ↛ 1330line 1328 didn't jump to line 1330, because the condition on line 1328 was never false

1329 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1330 self.uuid = vdi_uuid 

1331 self.location = self.uuid 

1332 self.exists = True 

1333 

1334 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid): 

1335 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid]) 

1336 if self.parent: 1336 ↛ 1337line 1336 didn't jump to line 1337, because the condition on line 1336 was never true

1337 self.sm_config_override['vhd-parent'] = self.parent 

1338 else: 

1339 self.sm_config_override['vhd-parent'] = None 

1340 return 

1341 

1342 # scan() didn't run: determine the type of the VDI manually 

1343 if self._determineType(): 

1344 return 

1345 

1346 # the VDI must be in the process of being created 

1347 self.exists = False 

1348 if "vdi_sm_config" in self.sr.srcmd.params and \ 1348 ↛ 1350line 1348 didn't jump to line 1350, because the condition on line 1348 was never true

1349 "type" in self.sr.srcmd.params["vdi_sm_config"]: 

1350 type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

1351 if type == PARAM_RAW: 

1352 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1353 elif type == PARAM_VHD: 

1354 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1355 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode: 

1356 raise xs_errors.XenError('VDICreate', \ 

1357 opterr='Cannot create VHD type disk in legacy mode') 

1358 else: 

1359 raise xs_errors.XenError('VDICreate', opterr='bad type') 

1360 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid) 

1361 self.path = os.path.join(self.sr.path, self.lvname) 

1362 

1363 def create(self, sr_uuid, vdi_uuid, size): 

1364 util.SMlog("LVHDVDI.create for %s" % self.uuid) 

1365 if not self.sr.isMaster: 

1366 raise xs_errors.XenError('LVMMaster') 

1367 if self.exists: 

1368 raise xs_errors.XenError('VDIExists') 

1369 

1370 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1371 

1372 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \ 

1373 (self.vdi_type, self.path, size)) 

1374 lvSize = 0 

1375 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] 

1376 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1377 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size)) 

1378 else: 

1379 if self.sr.provision == "thin": 

1380 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1381 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1382 elif self.sr.provision == "thick": 

1383 lvSize = lvhdutil.calcSizeVHDLV(int(size)) 

1384 

1385 self.sr._ensureSpaceAvailable(lvSize) 

1386 

1387 try: 

1388 self.sr.lvmCache.create(self.lvname, lvSize) 

1389 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1390 self.size = self.sr.lvmCache.getSize(self.lvname) 

1391 else: 

1392 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB) 

1393 self.size = vhdutil.getSizeVirt(self.path) 

1394 self.sr.lvmCache.deactivateNoRefcount(self.lvname) 

1395 except util.CommandException as e: 

1396 util.SMlog("Unable to create VDI") 

1397 self.sr.lvmCache.remove(self.lvname) 

1398 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code) 

1399 

1400 self.utilisation = lvSize 

1401 self.sm_config["vdi_type"] = self.vdi_type 

1402 

1403 if not self.sr.legacyMode: 

1404 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1405 

1406 self.ref = self._db_introduce() 

1407 self.sr._updateStats(self.sr.uuid, self.size) 

1408 

1409 vdi_info = {UUID_TAG: self.uuid, 

1410 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1411 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1412 IS_A_SNAPSHOT_TAG: 0, 

1413 SNAPSHOT_OF_TAG: '', 

1414 SNAPSHOT_TIME_TAG: '', 

1415 TYPE_TAG: self.ty, 

1416 VDI_TYPE_TAG: self.vdi_type, 

1417 READ_ONLY_TAG: int(self.read_only), 

1418 MANAGED_TAG: int(self.managed), 

1419 METADATA_OF_POOL_TAG: '' 

1420 } 

1421 

1422 if not self.sr.legacyMode: 

1423 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1424 

1425 return VDI.VDI.get_params(self) 

1426 

1427 def delete(self, sr_uuid, vdi_uuid, data_only=False): 

1428 util.SMlog("LVHDVDI.delete for %s" % self.uuid) 

1429 try: 

1430 self._loadThis() 

1431 except SR.SRException as e: 

1432 # Catch 'VDI doesn't exist' exception 

1433 if e.errno == 46: 

1434 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1435 raise 

1436 

1437 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1438 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1439 raise xs_errors.XenError("VDIDelete", \ 

1440 opterr="Deleting non-leaf node not permitted") 

1441 

1442 if not self.hidden: 

1443 self._markHidden() 

1444 

1445 if not data_only: 

1446 # Remove from XAPI and delete from MGT 

1447 self._db_forget() 

1448 else: 

1449 # If this is a data_destroy call, don't remove from XAPI db 

1450 # Only delete from MGT 

1451 if not self.sr.legacyMode: 

1452 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid) 

1453 

1454 # deactivate here because it might be too late to do it in the "final" 

1455 # step: GC might have removed the LV by then 

1456 if self.sr.lvActivator.get(self.uuid, False): 

1457 self.sr.lvActivator.deactivate(self.uuid, False) 

1458 

1459 try: 

1460 self.sr.lvmCache.remove(self.lvname) 

1461 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid) 

1462 self.sr.lock.cleanupAll(vdi_uuid) 

1463 except SR.SRException as e: 

1464 util.SMlog( 

1465 "Failed to remove the volume (maybe is leaf coalescing) " 

1466 "for %s err:%d" % (self.uuid, e.errno)) 

1467 

1468 self.sr._updateStats(self.sr.uuid, -self.size) 

1469 self.sr._kickGC() 

1470 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1471 

1472 def attach(self, sr_uuid, vdi_uuid): 

1473 util.SMlog("LVHDVDI.attach for %s" % self.uuid) 

1474 if self.sr.journaler.hasJournals(self.uuid): 

1475 raise xs_errors.XenError('VDIUnavailable', 

1476 opterr='Interrupted operation detected on this VDI, ' 

1477 'scan SR first to trigger auto-repair') 

1478 

1479 writable = ('args' not in self.sr.srcmd.params) or \ 

1480 (self.sr.srcmd.params['args'][0] == "true") 

1481 needInflate = True 

1482 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable: 

1483 needInflate = False 

1484 else: 

1485 self._loadThis() 

1486 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size): 

1487 needInflate = False 

1488 

1489 if needInflate: 

1490 try: 

1491 self._prepareThin(True) 

1492 except: 

1493 util.logException("attach") 

1494 raise xs_errors.XenError('LVMProvisionAttach') 

1495 

1496 try: 

1497 return self._attach() 

1498 finally: 

1499 if not self.sr.lvActivator.deactivateAll(): 

1500 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid) 

1501 

1502 def detach(self, sr_uuid, vdi_uuid): 

1503 util.SMlog("LVHDVDI.detach for %s" % self.uuid) 

1504 self._loadThis() 

1505 already_deflated = (self.utilisation < \ 

1506 lvhdutil.calcSizeVHDLV(self.size)) 

1507 needDeflate = True 

1508 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated: 

1509 needDeflate = False 

1510 elif self.sr.provision == "thick": 

1511 needDeflate = False 

1512 # except for snapshots, which are always deflated 

1513 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1514 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) 

1515 if snap: 

1516 needDeflate = True 

1517 

1518 if needDeflate: 

1519 try: 

1520 self._prepareThin(False) 

1521 except: 

1522 util.logException("_prepareThin") 

1523 raise xs_errors.XenError('VDIUnavailable', opterr='deflate') 

1524 

1525 try: 

1526 self._detach() 

1527 finally: 

1528 if not self.sr.lvActivator.deactivateAll(): 

1529 raise xs_errors.XenError("SMGeneral", opterr="deactivation") 

1530 

1531 # We only support offline resize 

1532 def resize(self, sr_uuid, vdi_uuid, size): 

1533 util.SMlog("LVHDVDI.resize for %s" % self.uuid) 

1534 if not self.sr.isMaster: 

1535 raise xs_errors.XenError('LVMMaster') 

1536 

1537 self._loadThis() 

1538 if self.hidden: 

1539 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1540 

1541 if size < self.size: 

1542 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

1543 '(current size: %d, new size: %d)' % (self.size, size)) 

1544 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1545 

1546 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1547 

1548 if size == self.size: 

1549 return VDI.VDI.get_params(self) 

1550 

1551 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1552 lvSizeOld = self.size 

1553 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) 

1554 else: 

1555 lvSizeOld = self.utilisation 

1556 lvSizeNew = lvhdutil.calcSizeVHDLV(size) 

1557 if self.sr.provision == "thin": 

1558 # VDI is currently deflated, so keep it deflated 

1559 lvSizeNew = lvSizeOld 

1560 assert(lvSizeNew >= lvSizeOld) 

1561 spaceNeeded = lvSizeNew - lvSizeOld 

1562 self.sr._ensureSpaceAvailable(spaceNeeded) 

1563 

1564 oldSize = self.size 

1565 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1566 self.sr.lvmCache.setSize(self.lvname, lvSizeNew) 

1567 self.size = self.sr.lvmCache.getSize(self.lvname) 

1568 self.utilisation = self.size 

1569 else: 

1570 if lvSizeNew != lvSizeOld: 

1571 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, 

1572 lvSizeNew) 

1573 vhdutil.setSizeVirtFast(self.path, size) 

1574 self.size = vhdutil.getSizeVirt(self.path) 

1575 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

1576 

1577 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1578 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1579 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

1580 str(self.utilisation)) 

1581 self.sr._updateStats(self.sr.uuid, self.size - oldSize) 

1582 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

1583 return VDI.VDI.get_params(self) 

1584 

1585 def clone(self, sr_uuid, vdi_uuid): 

1586 return self._do_snapshot( 

1587 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True) 

1588 

1589 def compose(self, sr_uuid, vdi1, vdi2): 

1590 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1)) 

1591 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1592 raise xs_errors.XenError('Unimplemented') 

1593 

1594 parent_uuid = vdi1 

1595 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid 

1596 assert(self.sr.lvmCache.checkLV(parent_lvname)) 

1597 parent_path = os.path.join(self.sr.path, parent_lvname) 

1598 

1599 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1600 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False) 

1601 

1602 vhdutil.setParent(self.path, parent_path, False) 

1603 vhdutil.setHidden(parent_path) 

1604 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

1605 

1606 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, 

1607 True): 

1608 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

1609 

1610 util.SMlog("Compose done") 

1611 

1612 def reset_leaf(self, sr_uuid, vdi_uuid): 

1613 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid) 

1614 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1615 raise xs_errors.XenError('Unimplemented') 

1616 

1617 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1618 

1619 # safety check 

1620 if not vhdutil.hasParent(self.path): 

1621 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

1622 "will not reset contents" % self.uuid) 

1623 

1624 vhdutil.killData(self.path) 

1625 

1626 def _attach(self): 

1627 self._chainSetActive(True, True, True) 

1628 if not util.pathexists(self.path): 

1629 raise xs_errors.XenError('VDIUnavailable', \ 

1630 opterr='Could not find: %s' % self.path) 

1631 

1632 if not hasattr(self, 'xenstore_data'): 

1633 self.xenstore_data = {} 

1634 

1635 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ 

1636 scsiutil.gen_synthetic_page_data(self.uuid))) 

1637 

1638 self.xenstore_data['storage-type'] = 'lvm' 

1639 self.xenstore_data['vdi-type'] = self.vdi_type 

1640 

1641 self.attached = True 

1642 self.sr.lvActivator.persist() 

1643 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1644 

1645 def _detach(self): 

1646 self._chainSetActive(False, True) 

1647 self.attached = False 

1648 

1649 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

1650 cloneOp=False, secondary=None, cbtlog=None): 

1651 # If cbt enabled, save file consistency state 

1652 if cbtlog is not None: 

1653 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1653 ↛ 1654line 1653 didn't jump to line 1654, because the condition on line 1653 was never true

1654 consistency_state = False 

1655 else: 

1656 consistency_state = True 

1657 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

1658 (consistency_state, vdi_uuid)) 

1659 else: 

1660 consistency_state = None 

1661 

1662 pause_time = time.time() 

1663 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1663 ↛ 1664line 1663 didn't jump to line 1664, because the condition on line 1663 was never true

1664 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

1665 

1666 snapResult = None 

1667 try: 

1668 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) 

1669 except Exception as e1: 

1670 try: 

1671 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, 

1672 secondary=None) 

1673 except Exception as e2: 

1674 util.SMlog('WARNING: failed to clean up failed snapshot: ' 

1675 '%s (error ignored)' % e2) 

1676 raise 

1677 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1678 unpause_time = time.time() 

1679 if (unpause_time - pause_time) > LONG_SNAPTIME: 1679 ↛ 1680line 1679 didn't jump to line 1680, because the condition on line 1679 was never true

1680 util.SMlog('WARNING: snapshot paused VM for %s seconds' % 

1681 (unpause_time - pause_time)) 

1682 return snapResult 

1683 

1684 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None): 

1685 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType)) 

1686 

1687 if not self.sr.isMaster: 1687 ↛ 1688line 1687 didn't jump to line 1688, because the condition on line 1687 was never true

1688 raise xs_errors.XenError('LVMMaster') 

1689 if self.sr.legacyMode: 1689 ↛ 1690line 1689 didn't jump to line 1690, because the condition on line 1689 was never true

1690 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode') 

1691 

1692 self._loadThis() 

1693 if self.hidden: 1693 ↛ 1694line 1693 didn't jump to line 1694, because the condition on line 1693 was never true

1694 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI') 

1695 

1696 self.sm_config = self.session.xenapi.VDI.get_sm_config( \ 

1697 self.sr.srcmd.params['vdi_ref']) 

1698 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1698 ↛ 1699line 1698 didn't jump to line 1699, because the condition on line 1698 was never true

1699 if not util.fistpoint.is_active("testsm_clone_allow_raw"): 

1700 raise xs_errors.XenError('Unimplemented', \ 

1701 opterr='Raw VDI, snapshot or clone not permitted') 

1702 

1703 # we must activate the entire VHD chain because the real parent could 

1704 # theoretically be anywhere in the chain if all VHDs under it are empty 

1705 self._chainSetActive(True, False) 

1706 if not util.pathexists(self.path): 1706 ↛ 1707line 1706 didn't jump to line 1707, because the condition on line 1706 was never true

1707 raise xs_errors.XenError('VDIUnavailable', \ 

1708 opterr='VDI unavailable: %s' % (self.path)) 

1709 

1710 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1710 ↛ 1718line 1710 didn't jump to line 1718, because the condition on line 1710 was never false

1711 depth = vhdutil.getDepth(self.path) 

1712 if depth == -1: 1712 ↛ 1713line 1712 didn't jump to line 1713, because the condition on line 1712 was never true

1713 raise xs_errors.XenError('VDIUnavailable', \ 

1714 opterr='failed to get VHD depth') 

1715 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1715 ↛ 1716line 1715 didn't jump to line 1716, because the condition on line 1715 was never true

1716 raise xs_errors.XenError('SnapshotChainTooLong') 

1717 

1718 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ 

1719 self.sr.srcmd.params['vdi_ref']) 

1720 

1721 fullpr = lvhdutil.calcSizeVHDLV(self.size) 

1722 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \ 

1723 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1724 lvSizeOrig = thinpr 

1725 lvSizeClon = thinpr 

1726 

1727 hostRefs = [] 

1728 if self.sr.cmd == "vdi_snapshot": 

1729 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) 

1730 if hostRefs: 1730 ↛ 1732line 1730 didn't jump to line 1732, because the condition on line 1730 was never false

1731 lvSizeOrig = fullpr 

1732 if self.sr.provision == "thick": 1732 ↛ 1738line 1732 didn't jump to line 1738, because the condition on line 1732 was never false

1733 if not self.issnap: 1733 ↛ 1734line 1733 didn't jump to line 1734, because the condition on line 1733 was never true

1734 lvSizeOrig = fullpr 

1735 if self.sr.cmd != "vdi_snapshot": 

1736 lvSizeClon = fullpr 

1737 

1738 if (snapType == VDI.SNAPSHOT_SINGLE or 1738 ↛ 1740line 1738 didn't jump to line 1740, because the condition on line 1738 was never true

1739 snapType == VDI.SNAPSHOT_INTERNAL): 

1740 lvSizeClon = 0 

1741 

1742 # the space required must include 2 journal LVs: a clone journal and an 

1743 # inflate journal (for the failure handling 

1744 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE 

1745 lvSizeBase = self.size 

1746 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1746 ↛ 1750line 1746 didn't jump to line 1750, because the condition on line 1746 was never false

1747 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1748 vhdutil.getSizePhys(self.path)) 

1749 size_req -= (self.utilisation - lvSizeBase) 

1750 self.sr._ensureSpaceAvailable(size_req) 

1751 

1752 if hostRefs: 

1753 self.sr._updateSlavesPreClone(hostRefs, self.lvname) 

1754 

1755 baseUuid = util.gen_uuid() 

1756 origUuid = self.uuid 

1757 clonUuid = "" 

1758 if snapType == VDI.SNAPSHOT_DOUBLE: 1758 ↛ 1760line 1758 didn't jump to line 1760, because the condition on line 1758 was never false

1759 clonUuid = util.gen_uuid() 

1760 jval = "%s_%s" % (baseUuid, clonUuid) 

1761 with lvutil.LvmLockContext(): 

1762 # This makes multiple LVM calls so take the lock early 

1763 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) 

1764 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid) 

1765 

1766 try: 

1767 with lvutil.LvmLockContext(): 

1768 # self becomes the "base vdi" 

1769 origOldLV = self.lvname 

1770 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid 

1771 self.sr.lvmCache.rename(self.lvname, baseLV) 

1772 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) 

1773 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1774 self.uuid = baseUuid 

1775 self.lvname = baseLV 

1776 self.path = os.path.join(self.sr.path, baseLV) 

1777 self.label = "base copy" 

1778 self.read_only = True 

1779 self.location = self.uuid 

1780 self.managed = False 

1781 

1782 # shrink the base copy to the minimum - we do it before creating 

1783 # the snapshot volumes to avoid requiring double the space 

1784 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1784 ↛ 1787line 1784 didn't jump to line 1787, because the condition on line 1784 was never false

1785 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) 

1786 self.utilisation = lvSizeBase 

1787 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid) 

1788 

1789 snapVDI = self._createSnap(origUuid, lvSizeOrig, False) 

1790 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) 

1791 snapVDI2 = None 

1792 if snapType == VDI.SNAPSHOT_DOUBLE: 1792 ↛ 1798line 1792 didn't jump to line 1798, because the condition on line 1792 was never false

1793 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True) 

1794 # If we have CBT enabled on the VDI, 

1795 # set CBT status for the new snapshot disk 

1796 if cbtlog: 

1797 snapVDI2.cbt_enabled = True 

1798 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid) 

1799 

1800 # note: it is important to mark the parent hidden only AFTER the 

1801 # new VHD children have been created, which are referencing it; 

1802 # otherwise we would introduce a race with GC that could reclaim 

1803 # the parent before we snapshot it 

1804 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1804 ↛ 1805line 1804 didn't jump to line 1805, because the condition on line 1804 was never true

1805 self.sr.lvmCache.setHidden(self.lvname) 

1806 else: 

1807 vhdutil.setHidden(self.path) 

1808 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid) 

1809 

1810 # set the base copy to ReadOnly 

1811 # Do this outside the LvmLockContext to avoid deadlock 

1812 self.sr.lvmCache.setReadonly(self.lvname, True) 

1813 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid) 

1814 

1815 if hostRefs: 

1816 self.sr._updateSlavesOnClone(hostRefs, origOldLV, 

1817 snapVDI.lvname, self.uuid, self.lvname) 

1818 

1819 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

1820 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: 

1821 snapVDI._cbt_snapshot(clonUuid, cbt_consistency) 

1822 if hostRefs: 1822 ↛ 1836line 1822 didn't jump to line 1836, because the condition on line 1822 was never false

1823 cbtlog_file = self._get_cbt_logname(snapVDI.uuid) 

1824 try: 

1825 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) 

1826 except: 

1827 alert_name = "VDI_CBT_SNAPSHOT_FAILED" 

1828 alert_str = ("Creating CBT snapshot for {} failed" 

1829 .format(snapVDI.uuid)) 

1830 snapVDI._disable_cbt_on_error(alert_name, alert_str) 

1831 pass 

1832 

1833 except (util.SMException, XenAPI.Failure) as e: 

1834 util.logException("LVHDVDI._snapshot") 

1835 self._failClone(origUuid, jval, str(e)) 

1836 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid) 

1837 

1838 with lvutil.LvmLockContext(): 

1839 # This makes multiple LVM calls so take the lock early 

1840 self.sr.journaler.remove(self.JRN_CLONE, origUuid) 

1841 

1842 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType) 

1843 

1844 def _createSnap(self, snapUuid, snapSizeLV, isNew): 

1845 """Snapshot self and return the snapshot VDI object""" 

1846 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid 

1847 snapPath = os.path.join(self.sr.path, snapLV) 

1848 self.sr.lvmCache.create(snapLV, int(snapSizeLV)) 

1849 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) 

1850 if isNew: 

1851 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1852 self.sr.lvActivator.add(snapUuid, snapLV, False) 

1853 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) 

1854 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB) 

1855 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid) 

1856 

1857 snapVDI = LVHDVDI(self.sr, snapUuid) 

1858 snapVDI.read_only = False 

1859 snapVDI.location = snapUuid 

1860 snapVDI.size = self.size 

1861 snapVDI.utilisation = snapSizeLV 

1862 snapVDI.sm_config = dict() 

1863 for key, val in self.sm_config.items(): 1863 ↛ 1864line 1863 didn't jump to line 1864, because the loop on line 1863 never started

1864 if key not in [ 

1865 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \ 

1866 not key.startswith("host_"): 

1867 snapVDI.sm_config[key] = val 

1868 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1869 snapVDI.sm_config["vhd-parent"] = snapParent 

1870 snapVDI.lvname = snapLV 

1871 return snapVDI 

1872 

1873 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None): 

1874 if snapType is not VDI.SNAPSHOT_INTERNAL: 1874 ↛ 1876line 1874 didn't jump to line 1876, because the condition on line 1874 was never false

1875 self.sr._updateStats(self.sr.uuid, self.size) 

1876 basePresent = True 

1877 

1878 # Verify parent locator field of both children and delete basePath if 

1879 # unused 

1880 snapParent = snapVDI.sm_config["vhd-parent"] 

1881 snap2Parent = "" 

1882 if snapVDI2: 1882 ↛ 1884line 1882 didn't jump to line 1884, because the condition on line 1882 was never false

1883 snap2Parent = snapVDI2.sm_config["vhd-parent"] 

1884 if snapParent != self.uuid and \ 1884 ↛ 1911line 1884 didn't jump to line 1911, because the condition on line 1884 was never false

1885 (not snapVDI2 or snap2Parent != self.uuid): 

1886 util.SMlog("%s != %s != %s => deleting unused base %s" % \ 

1887 (snapParent, self.uuid, snap2Parent, self.lvname)) 

1888 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1889 self.sr.lvmCache.remove(self.lvname) 

1890 self.sr.lvActivator.remove(self.uuid, False) 

1891 if hostRefs: 

1892 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) 

1893 basePresent = False 

1894 else: 

1895 # assign the _binary_ refcount of the original VDI to the new base 

1896 # VDI (but as the normal refcount, since binary refcounts are only 

1897 # for leaf nodes). The normal refcount of the child is not 

1898 # transferred to to the base VDI because normal refcounts are 

1899 # incremented and decremented individually, and not based on the 

1900 # VHD chain (i.e., the child's normal refcount will be decremented 

1901 # independently of its parent situation). Add 1 for this clone op. 

1902 # Note that we do not need to do protect the refcount operations 

1903 # below with per-VDI locking like we do in lvutil because at this 

1904 # point we have exclusive access to the VDIs involved. Other SM 

1905 # operations are serialized by the Agent or with the SR lock, and 

1906 # any coalesce activations are serialized with the SR lock. (The 

1907 # coalesce activates the coalesced VDI pair in the beginning, which 

1908 # cannot affect the VDIs here because they cannot possibly be 

1909 # involved in coalescing at this point, and at the relinkSkip step 

1910 # that activates the children, which takes the SR lock.) 

1911 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid 

1912 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) 

1913 RefCounter.set(self.uuid, bcnt + 1, 0, ns) 

1914 

1915 # the "paused" and "host_*" sm-config keys are special and must stay on 

1916 # the leaf without being inherited by anyone else 

1917 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1917 ↛ 1918line 1917 didn't jump to line 1918, because the loop on line 1917 never started

1918 snapVDI.sm_config[key] = self.sm_config[key] 

1919 del self.sm_config[key] 

1920 

1921 # Introduce any new VDI records & update the existing one 

1922 type = self.session.xenapi.VDI.get_type( \ 

1923 self.sr.srcmd.params['vdi_ref']) 

1924 if snapVDI2: 1924 ↛ 1966line 1924 didn't jump to line 1966, because the condition on line 1924 was never false

1925 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1926 vdiRef = snapVDI2._db_introduce() 

1927 if cloneOp: 

1928 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1929 NAME_LABEL_TAG: util.to_plain_string( \ 

1930 self.session.xenapi.VDI.get_name_label( \ 

1931 self.sr.srcmd.params['vdi_ref'])), 

1932 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1933 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1934 IS_A_SNAPSHOT_TAG: 0, 

1935 SNAPSHOT_OF_TAG: '', 

1936 SNAPSHOT_TIME_TAG: '', 

1937 TYPE_TAG: type, 

1938 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1939 READ_ONLY_TAG: 0, 

1940 MANAGED_TAG: int(snapVDI2.managed), 

1941 METADATA_OF_POOL_TAG: '' 

1942 } 

1943 else: 

1944 util.SMlog("snapshot VDI params: %s" % \ 

1945 self.session.xenapi.VDI.get_snapshot_time(vdiRef)) 

1946 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1947 NAME_LABEL_TAG: util.to_plain_string( \ 

1948 self.session.xenapi.VDI.get_name_label( \ 

1949 self.sr.srcmd.params['vdi_ref'])), 

1950 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1951 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1952 IS_A_SNAPSHOT_TAG: 1, 

1953 SNAPSHOT_OF_TAG: snapVDI.uuid, 

1954 SNAPSHOT_TIME_TAG: '', 

1955 TYPE_TAG: type, 

1956 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1957 READ_ONLY_TAG: 0, 

1958 MANAGED_TAG: int(snapVDI2.managed), 

1959 METADATA_OF_POOL_TAG: '' 

1960 } 

1961 

1962 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1963 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ 

1964 (vdiRef, snapVDI2.uuid)) 

1965 

1966 if basePresent: 1966 ↛ 1967line 1966 didn't jump to line 1967, because the condition on line 1966 was never true

1967 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1968 vdiRef = self._db_introduce() 

1969 vdi_info = {UUID_TAG: self.uuid, 

1970 NAME_LABEL_TAG: self.label, 

1971 NAME_DESCRIPTION_TAG: self.description, 

1972 IS_A_SNAPSHOT_TAG: 0, 

1973 SNAPSHOT_OF_TAG: '', 

1974 SNAPSHOT_TIME_TAG: '', 

1975 TYPE_TAG: type, 

1976 VDI_TYPE_TAG: self.sm_config['vdi_type'], 

1977 READ_ONLY_TAG: 1, 

1978 MANAGED_TAG: 0, 

1979 METADATA_OF_POOL_TAG: '' 

1980 } 

1981 

1982 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1983 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ 

1984 (vdiRef, self.uuid)) 

1985 

1986 # Update the original record 

1987 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1988 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) 

1989 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ 

1990 str(snapVDI.utilisation)) 

1991 

1992 # Return the info on the new snap VDI 

1993 snap = snapVDI2 

1994 if not snap: 1994 ↛ 1995line 1994 didn't jump to line 1995, because the condition on line 1994 was never true

1995 snap = self 

1996 if not basePresent: 

1997 # a single-snapshot of an empty VDI will be a noop, resulting 

1998 # in no new VDIs, so return the existing one. The GC wouldn't 

1999 # normally try to single-snapshot an empty VHD of course, but 

2000 # if an external snapshot operation manages to sneak in right 

2001 # before a snapshot-coalesce phase, we would get here 

2002 snap = snapVDI 

2003 return snap.get_params() 

2004 

2005 def _initFromVDIInfo(self, vdiInfo): 

2006 self.vdi_type = vdiInfo.vdiType 

2007 self.lvname = vdiInfo.lvName 

2008 self.size = vdiInfo.sizeVirt 

2009 self.utilisation = vdiInfo.sizeLV 

2010 self.hidden = vdiInfo.hidden 

2011 if self.hidden: 2011 ↛ 2012line 2011 didn't jump to line 2012, because the condition on line 2011 was never true

2012 self.managed = False 

2013 self.active = vdiInfo.lvActive 

2014 self.readonly = vdiInfo.lvReadonly 

2015 self.parent = vdiInfo.parentUuid 

2016 self.path = os.path.join(self.sr.path, self.lvname) 

2017 if hasattr(self, "sm_config_override"): 2017 ↛ 2020line 2017 didn't jump to line 2020, because the condition on line 2017 was never false

2018 self.sm_config_override["vdi_type"] = self.vdi_type 

2019 else: 

2020 self.sm_config_override = {'vdi_type': self.vdi_type} 

2021 self.loaded = True 

2022 

2023 def _initFromLVInfo(self, lvInfo): 

2024 self.vdi_type = lvInfo.vdiType 

2025 self.lvname = lvInfo.name 

2026 self.size = lvInfo.size 

2027 self.utilisation = lvInfo.size 

2028 self.hidden = lvInfo.hidden 

2029 self.active = lvInfo.active 

2030 self.readonly = lvInfo.readonly 

2031 self.parent = '' 

2032 self.path = os.path.join(self.sr.path, self.lvname) 

2033 if hasattr(self, "sm_config_override"): 2033 ↛ 2036line 2033 didn't jump to line 2036, because the condition on line 2033 was never false

2034 self.sm_config_override["vdi_type"] = self.vdi_type 

2035 else: 

2036 self.sm_config_override = {'vdi_type': self.vdi_type} 

2037 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2037 ↛ 2038line 2037 didn't jump to line 2038, because the condition on line 2037 was never true

2038 self.loaded = True 

2039 

2040 def _initFromVHDInfo(self, vhdInfo): 

2041 self.size = vhdInfo.sizeVirt 

2042 self.parent = vhdInfo.parentUuid 

2043 self.hidden = vhdInfo.hidden 

2044 self.loaded = True 

2045 

2046 def _determineType(self): 

2047 """Determine whether this is a raw or a VHD VDI""" 

2048 if "vdi_ref" in self.sr.srcmd.params: 2048 ↛ 2061line 2048 didn't jump to line 2061, because the condition on line 2048 was never false

2049 vdi_ref = self.sr.srcmd.params["vdi_ref"] 

2050 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2051 if sm_config.get("vdi_type"): 2051 ↛ 2052line 2051 didn't jump to line 2052, because the condition on line 2051 was never true

2052 self.vdi_type = sm_config["vdi_type"] 

2053 prefix = lvhdutil.LV_PREFIX[self.vdi_type] 

2054 self.lvname = "%s%s" % (prefix, self.uuid) 

2055 self.path = os.path.join(self.sr.path, self.lvname) 

2056 self.sm_config_override = sm_config 

2057 return True 

2058 

2059 # LVM commands can be costly, so check the file directly first in case 

2060 # the LV is active 

2061 found = False 

2062 for t in lvhdutil.VDI_TYPES: 2062 ↛ 2063line 2062 didn't jump to line 2063, because the loop on line 2062 never started

2063 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid) 

2064 path = os.path.join(self.sr.path, lvname) 

2065 if util.pathexists(path): 

2066 if found: 

2067 raise xs_errors.XenError('VDILoad', 

2068 opterr="multiple VDI's: uuid %s" % self.uuid) 

2069 found = True 

2070 self.vdi_type = t 

2071 self.lvname = lvname 

2072 self.path = path 

2073 if found: 2073 ↛ 2074line 2073 didn't jump to line 2074, because the condition on line 2073 was never true

2074 return True 

2075 

2076 # now list all LV's 

2077 if not lvutil._checkVG(self.sr.vgname): 2077 ↛ 2079line 2077 didn't jump to line 2079, because the condition on line 2077 was never true

2078 # when doing attach_from_config, the VG won't be there yet 

2079 return False 

2080 

2081 lvs = lvhdutil.getLVInfo(self.sr.lvmCache) 

2082 if lvs.get(self.uuid): 

2083 self._initFromLVInfo(lvs[self.uuid]) 

2084 return True 

2085 return False 

2086 

2087 def _loadThis(self): 

2088 """Load VDI info for this VDI and activate the LV if it's VHD. We 

2089 don't do it in VDI.load() because not all VDI operations need it.""" 

2090 if self.loaded: 2090 ↛ 2091line 2090 didn't jump to line 2091, because the condition on line 2090 was never true

2091 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

2092 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2093 return 

2094 try: 

2095 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname) 

2096 except util.CommandException as e: 

2097 raise xs_errors.XenError('VDIUnavailable', 

2098 opterr='%s (LV scan error)' % os.strerror(abs(e.code))) 

2099 if not lvs.get(self.uuid): 2099 ↛ 2100line 2099 didn't jump to line 2100, because the condition on line 2099 was never true

2100 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') 

2101 self._initFromLVInfo(lvs[self.uuid]) 

2102 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2102 ↛ 2109line 2102 didn't jump to line 2109, because the condition on line 2102 was never false

2103 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2104 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False) 

2105 if not vhdInfo: 2105 ↛ 2106line 2105 didn't jump to line 2106, because the condition on line 2105 was never true

2106 raise xs_errors.XenError('VDIUnavailable', \ 

2107 opterr='getVHDInfo failed') 

2108 self._initFromVHDInfo(vhdInfo) 

2109 self.loaded = True 

2110 

2111 def _chainSetActive(self, active, binary, persistent=False): 

2112 if binary: 2112 ↛ 2113line 2112 didn't jump to line 2113, because the condition on line 2112 was never true

2113 (count, bcount) = RefCounter.checkLocked(self.uuid, 

2114 lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

2115 if (active and bcount > 0) or (not active and bcount == 0): 

2116 return # this is a redundant activation/deactivation call 

2117 

2118 vdiList = {self.uuid: self.lvname} 

2119 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2119 ↛ 2122line 2119 didn't jump to line 2122, because the condition on line 2119 was never false

2120 vdiList = vhdutil.getParentChain(self.lvname, 

2121 lvhdutil.extractUuid, self.sr.vgname) 

2122 for uuid, lvName in vdiList.items(): 2122 ↛ 2123line 2122 didn't jump to line 2123, because the loop on line 2122 never started

2123 binaryParam = binary 

2124 if uuid != self.uuid: 

2125 binaryParam = False # binary param only applies to leaf nodes 

2126 if active: 

2127 self.sr.lvActivator.activate(uuid, lvName, binaryParam, 

2128 persistent) 

2129 else: 

2130 # just add the LVs for deactivation in the final (cleanup) 

2131 # step. The LVs must not have been activated during the current 

2132 # operation 

2133 self.sr.lvActivator.add(uuid, lvName, binaryParam) 

2134 

2135 def _failClone(self, uuid, jval, msg): 

2136 try: 

2137 self.sr._handleInterruptedCloneOp(uuid, jval, True) 

2138 self.sr.journaler.remove(self.JRN_CLONE, uuid) 

2139 except Exception as e: 

2140 util.SMlog('WARNING: failed to clean up failed snapshot: ' \ 

2141 ' %s (error ignored)' % e) 

2142 raise xs_errors.XenError('VDIClone', opterr=msg) 

2143 

2144 def _markHidden(self): 

2145 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

2146 self.sr.lvmCache.setHidden(self.lvname) 

2147 else: 

2148 vhdutil.setHidden(self.path) 

2149 self.hidden = 1 

2150 

2151 def _prepareThin(self, attach): 

2152 origUtilisation = self.sr.lvmCache.getSize(self.lvname) 

2153 if self.sr.isMaster: 

2154 # the master can prepare the VDI locally 

2155 if attach: 

2156 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid) 

2157 else: 

2158 lvhdutil.detachThin(self.session, self.sr.lvmCache, 

2159 self.sr.uuid, self.uuid) 

2160 else: 

2161 fn = "attach" 

2162 if not attach: 

2163 fn = "detach" 

2164 pools = self.session.xenapi.pool.get_all() 

2165 master = self.session.xenapi.pool.get_master(pools[0]) 

2166 rv = self.session.xenapi.host.call_plugin( 

2167 master, self.sr.THIN_PLUGIN, fn, 

2168 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid}) 

2169 util.SMlog("call-plugin returned: %s" % rv) 

2170 if not rv: 

2171 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) 

2172 # refresh to pick up the size change on this slave 

2173 self.sr.lvmCache.activateNoRefcount(self.lvname, True) 

2174 

2175 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

2176 if origUtilisation != self.utilisation: 

2177 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2178 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

2179 str(self.utilisation)) 

2180 stats = lvutil._getVGstats(self.sr.vgname) 

2181 sr_utilisation = stats['physical_utilisation'] 

2182 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, 

2183 str(sr_utilisation)) 

2184 

2185 def update(self, sr_uuid, vdi_uuid): 

2186 if self.sr.legacyMode: 

2187 return 

2188 

2189 #Synch the name_label of this VDI on storage with the name_label in XAPI 

2190 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

2191 update_map = {} 

2192 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

2193 METADATA_OBJECT_TYPE_VDI 

2194 update_map[UUID_TAG] = self.uuid 

2195 update_map[NAME_LABEL_TAG] = util.to_plain_string( \ 

2196 self.session.xenapi.VDI.get_name_label(vdi_ref)) 

2197 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \ 

2198 self.session.xenapi.VDI.get_name_description(vdi_ref)) 

2199 update_map[SNAPSHOT_TIME_TAG] = \ 

2200 self.session.xenapi.VDI.get_snapshot_time(vdi_ref) 

2201 update_map[METADATA_OF_POOL_TAG] = \ 

2202 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) 

2203 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map) 

2204 

2205 def _ensure_cbt_space(self): 

2206 self.sr.ensureCBTSpace() 

2207 

2208 def _create_cbt_log(self): 

2209 logname = self._get_cbt_logname(self.uuid) 

2210 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) 

2211 logpath = super(LVHDVDI, self)._create_cbt_log() 

2212 self.sr.lvmCache.deactivateNoRefcount(logname) 

2213 return logpath 

2214 

2215 def _delete_cbt_log(self): 

2216 logpath = self._get_cbt_logpath(self.uuid) 

2217 if self._cbt_log_exists(logpath): 

2218 logname = self._get_cbt_logname(self.uuid) 

2219 self.sr.lvmCache.remove(logname) 

2220 

2221 def _rename(self, oldpath, newpath): 

2222 oldname = os.path.basename(oldpath) 

2223 newname = os.path.basename(newpath) 

2224 self.sr.lvmCache.rename(oldname, newname) 

2225 

2226 def _activate_cbt_log(self, lv_name): 

2227 self.sr.lvmCache.refresh() 

2228 if not self.sr.lvmCache.is_active(lv_name): 2228 ↛ 2229line 2228 didn't jump to line 2229, because the condition on line 2228 was never true

2229 try: 

2230 self.sr.lvmCache.activateNoRefcount(lv_name) 

2231 return True 

2232 except Exception as e: 

2233 util.SMlog("Exception in _activate_cbt_log, " 

2234 "Error: %s." % str(e)) 

2235 else: 

2236 return False 

2237 

2238 def _deactivate_cbt_log(self, lv_name): 

2239 try: 

2240 self.sr.lvmCache.deactivateNoRefcount(lv_name) 

2241 except Exception as e: 

2242 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e)) 

2243 

2244 def _cbt_log_exists(self, logpath): 

2245 return lvutil.exists(logpath) 

2246 

2247if __name__ == '__main__': 2247 ↛ 2248line 2247 didn't jump to line 2248, because the condition on line 2247 was never true

2248 SRCommand.run(LVHDSR, DRIVER_INFO) 

2249else: 

2250 SR.registerSR(LVHDSR)