Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# FileSR: local-file storage repository 

19 

20import SR 

21import VDI 

22import SRCommand 

23import util 

24import scsiutil 

25import vhdutil 

26import os 

27import errno 

28import xs_errors 

29import cleanup 

30import blktap2 

31import time 

32import glob 

33from uuid import uuid4 

34from lock import Lock 

35import xmlrpc.client 

36import XenAPI # pylint: disable=import-error 

37from constants import CBTLOG_TAG 

38 

39geneology = {} 

40CAPABILITIES = ["SR_PROBE", "SR_UPDATE", \ 

41 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", \ 

42 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR", 

43 "VDI_GENERATE_CONFIG", "ATOMIC_PAUSE", "VDI_CONFIG_CBT", 

44 "VDI_ACTIVATE", "VDI_DEACTIVATE", "THIN_PROVISIONING"] 

45 

46CONFIGURATION = [['location', 'local directory path (required)']] 

47 

48DRIVER_INFO = { 

49 'name': 'Local Path VHD', 

50 'description': 'SR plugin which represents disks as VHD files stored on a local path', 

51 'vendor': 'Citrix Systems Inc', 

52 'copyright': '(C) 2008 Citrix Systems Inc', 

53 'driver_version': '1.0', 

54 'required_api_version': '1.0', 

55 'capabilities': CAPABILITIES, 

56 'configuration': CONFIGURATION 

57 } 

58 

59JOURNAL_FILE_PREFIX = ".journal-" 

60 

61OPS_EXCLUSIVE = [ 

62 "sr_create", "sr_delete", "sr_probe", "sr_attach", "sr_detach", 

63 "sr_scan", "vdi_init", "vdi_create", "vdi_delete", "vdi_attach", 

64 "vdi_detach", "vdi_resize_online", "vdi_snapshot", "vdi_clone"] 

65 

66DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True} 

67 

68 

69class FileSR(SR.SR): 

70 """Local file storage repository""" 

71 

72 SR_TYPE = "file" 

73 

74 def handles(srtype): 

75 return srtype == 'file' 

76 handles = staticmethod(handles) 

77 

78 def _check_o_direct(self): 

79 if self.sr_ref and self.session is not None: 

80 other_config = self.session.xenapi.SR.get_other_config(self.sr_ref) 

81 o_direct = other_config.get("o_direct") 

82 self.o_direct = o_direct is not None and o_direct == "true" 

83 else: 

84 self.o_direct = True 

85 

86 def __init__(self, srcmd, sr_uuid): 

87 # We call SR.SR.__init__ explicitly because 

88 # "super" sometimes failed due to circular imports 

89 SR.SR.__init__(self, srcmd, sr_uuid) 

90 self._check_o_direct() 

91 

92 def load(self, sr_uuid): 

93 self.ops_exclusive = OPS_EXCLUSIVE 

94 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

95 self.sr_vditype = vhdutil.VDI_TYPE_VHD 

96 if 'location' not in self.dconf or not self.dconf['location']: 96 ↛ 97line 96 didn't jump to line 97, because the condition on line 96 was never true

97 raise xs_errors.XenError('ConfigLocationMissing') 

98 self.remotepath = self.dconf['location'] 

99 self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) 

100 self.linkpath = self.path 

101 self.mountpoint = self.path 

102 self.attached = False 

103 self.driver_config = DRIVER_CONFIG 

104 

105 def create(self, sr_uuid, size): 

106 """ Create the SR. The path must not already exist, or if it does,  

107 it must be empty. (This accounts for the case where the user has 

108 mounted a device onto a directory manually and want to use this as the 

109 root of a file-based SR.) """ 

110 try: 

111 if util.ioretry(lambda: util.pathexists(self.remotepath)): 111 ↛ 112line 111 didn't jump to line 112, because the condition on line 111 was never true

112 if len(util.ioretry(lambda: util.listdir(self.remotepath))) != 0: 

113 raise xs_errors.XenError('SRExists') 

114 else: 

115 try: 

116 util.ioretry(lambda: os.mkdir(self.remotepath)) 

117 except util.CommandException as inst: 

118 if inst.code == errno.EEXIST: 

119 raise xs_errors.XenError('SRExists') 

120 else: 

121 raise xs_errors.XenError('FileSRCreate', \ 

122 opterr='directory creation failure %d' \ 

123 % inst.code) 

124 except: 

125 raise xs_errors.XenError('FileSRCreate') 

126 

127 def delete(self, sr_uuid): 

128 self.attach(sr_uuid) 

129 cleanup.gc_force(self.session, self.uuid) 

130 

131 # check to make sure no VDIs are present; then remove old 

132 # files that are non VDI's 

133 try: 

134 if util.ioretry(lambda: util.pathexists(self.path)): 

135 #Load the VDI list 

136 self._loadvdis() 

137 for uuid in self.vdis: 

138 if not self.vdis[uuid].deleted: 

139 raise xs_errors.XenError('SRNotEmpty', \ 

140 opterr='VDIs still exist in SR') 

141 

142 # remove everything else, there are no vdi's 

143 for name in util.ioretry(lambda: util.listdir(self.path)): 

144 fullpath = os.path.join(self.path, name) 

145 try: 

146 util.ioretry(lambda: os.unlink(fullpath)) 

147 except util.CommandException as inst: 

148 if inst.code != errno.ENOENT and \ 

149 inst.code != errno.EISDIR: 

150 raise xs_errors.XenError('FileSRDelete', \ 

151 opterr='failed to remove %s error %d' \ 

152 % (fullpath, inst.code)) 

153 self.detach(sr_uuid) 

154 except util.CommandException as inst: 

155 self.detach(sr_uuid) 

156 raise xs_errors.XenError('FileSRDelete', \ 

157 opterr='error %d' % inst.code) 

158 

159 def attach(self, sr_uuid, bind=True): 

160 if not self._checkmount(): 

161 try: 

162 util.ioretry(lambda: util.makedirs(self.path, mode=0o700)) 

163 except util.CommandException as inst: 

164 if inst.code != errno.EEXIST: 

165 raise xs_errors.XenError("FileSRCreate", \ 

166 opterr='fail to create mount point. Errno is %s' % inst.code) 

167 try: 

168 cmd = ["mount", self.remotepath, self.path] 

169 if bind: 

170 cmd.append("--bind") 

171 util.pread(cmd) 

172 os.chmod(self.path, mode=0o0700) 

173 except util.CommandException as inst: 

174 raise xs_errors.XenError('FileSRCreate', \ 

175 opterr='fail to mount FileSR. Errno is %s' % inst.code) 

176 self.attached = True 

177 

178 def detach(self, sr_uuid): 

179 if self._checkmount(): 

180 try: 

181 util.SMlog("Aborting GC/coalesce") 

182 cleanup.abort(self.uuid) 

183 os.chdir(SR.MOUNT_BASE) 

184 util.pread(["umount", self.path]) 

185 os.rmdir(self.path) 

186 except Exception as e: 

187 raise xs_errors.XenError('SRInUse', opterr=str(e)) 

188 self.attached = False 

189 

190 def scan(self, sr_uuid): 

191 if not self._checkmount(): 

192 raise xs_errors.XenError('SRUnavailable', \ 

193 opterr='no such directory %s' % self.path) 

194 

195 if not self.vdis: 195 ↛ 198line 195 didn't jump to line 198, because the condition on line 195 was never false

196 self._loadvdis() 

197 

198 if not self.passthrough: 

199 self.physical_size = self._getsize() 

200 self.physical_utilisation = self._getutilisation() 

201 

202 for uuid in list(self.vdis.keys()): 

203 if self.vdis[uuid].deleted: 203 ↛ 204line 203 didn't jump to line 204, because the condition on line 203 was never true

204 del self.vdis[uuid] 

205 

206 # CA-15607: make sure we are robust to the directory being unmounted beneath 

207 # us (eg by a confused user). Without this we might forget all our VDI references 

208 # which would be a shame. 

209 # For SMB SRs, this path is mountpoint 

210 mount_path = self.path 

211 if self.handles("smb"): 211 ↛ 212line 211 didn't jump to line 212, because the condition on line 211 was never true

212 mount_path = self.mountpoint 

213 

214 if not self.handles("file") and not os.path.ismount(mount_path): 214 ↛ 215line 214 didn't jump to line 215, because the condition on line 214 was never true

215 util.SMlog("Error: FileSR.scan called but directory %s isn't a mountpoint" % mount_path) 

216 raise xs_errors.XenError('SRUnavailable', \ 

217 opterr='not mounted %s' % mount_path) 

218 

219 self._kickGC() 

220 

221 # default behaviour from here on 

222 super(FileSR, self).scan(sr_uuid) 

223 

224 def update(self, sr_uuid): 

225 if not self._checkmount(): 

226 raise xs_errors.XenError('SRUnavailable', \ 

227 opterr='no such directory %s' % self.path) 

228 self._update(sr_uuid, 0) 

229 

230 def _update(self, sr_uuid, virt_alloc_delta): 

231 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

232 self.virtual_allocation = valloc + virt_alloc_delta 

233 self.physical_size = self._getsize() 

234 self.physical_utilisation = self._getutilisation() 

235 self._db_update() 

236 

237 def content_type(self, sr_uuid): 

238 return super(FileSR, self).content_type(sr_uuid) 

239 

240 def vdi(self, uuid): 

241 return FileVDI(self, uuid) 

242 

243 def added_vdi(self, vdi): 

244 self.vdis[vdi.uuid] = vdi 

245 

246 def deleted_vdi(self, uuid): 

247 if uuid in self.vdis: 

248 del self.vdis[uuid] 

249 

250 def replay(self, uuid): 

251 try: 

252 file = open(self.path + "/filelog.txt", "r") 

253 data = file.readlines() 

254 file.close() 

255 self._process_replay(data) 

256 except: 

257 raise xs_errors.XenError('SRLog') 

258 

259 def _loadvdis(self): 

260 if self.vdis: 260 ↛ 261line 260 didn't jump to line 261, because the condition on line 260 was never true

261 return 

262 

263 pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) 

264 try: 

265 self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) 

266 except util.CommandException as inst: 

267 raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ 

268 "path %s (%s)" % (self.path, inst)) 

269 try: 

270 list_vhds = [FileVDI.extractUuid(v) for v in util.ioretry(lambda: glob.glob(pattern))] 

271 if len(self.vhds) != len(list_vhds): 271 ↛ 276line 271 didn't jump to line 276, because the condition on line 271 was never false

272 util.SMlog("VHD scan returns %d VHDs: %s" % (len(self.vhds), sorted(self.vhds))) 

273 util.SMlog("VHD list returns %d VHDs: %s" % (len(list_vhds), sorted(list_vhds))) 

274 except: 

275 pass 

276 for uuid in self.vhds.keys(): 

277 if self.vhds[uuid].error: 277 ↛ 278line 277 didn't jump to line 278, because the condition on line 277 was never true

278 raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) 

279 self.vdis[uuid] = self.vdi(uuid) 

280 # Get the key hash of any encrypted VDIs: 

281 vhd_path = os.path.join(self.path, self.vhds[uuid].path) 

282 key_hash = vhdutil.getKeyHash(vhd_path) 

283 self.vdis[uuid].sm_config_override['key_hash'] = key_hash 

284 

285 # raw VDIs and CBT log files 

286 files = util.ioretry(lambda: util.listdir(self.path)) 286 ↛ exitline 286 didn't run the lambda on line 286

287 for fn in files: 287 ↛ 288line 287 didn't jump to line 288, because the loop on line 287 never started

288 if fn.endswith(vhdutil.FILE_EXTN_RAW): 

289 uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] 

290 self.vdis[uuid] = self.vdi(uuid) 

291 elif fn.endswith(CBTLOG_TAG): 

292 cbt_uuid = fn.split(".")[0] 

293 # If an associated disk exists, update CBT status 

294 # else create new VDI of type cbt_metadata 

295 if cbt_uuid in self.vdis: 

296 self.vdis[cbt_uuid].cbt_enabled = True 

297 else: 

298 new_vdi = self.vdi(cbt_uuid) 

299 new_vdi.ty = "cbt_metadata" 

300 new_vdi.cbt_enabled = True 

301 self.vdis[cbt_uuid] = new_vdi 

302 

303 # Mark parent VDIs as Read-only and generate virtual allocation 

304 self.virtual_allocation = 0 

305 for uuid, vdi in self.vdis.items(): 

306 if vdi.parent: 306 ↛ 307line 306 didn't jump to line 307, because the condition on line 306 was never true

307 if vdi.parent in self.vdis: 

308 self.vdis[vdi.parent].read_only = True 

309 if vdi.parent in geneology: 

310 geneology[vdi.parent].append(uuid) 

311 else: 

312 geneology[vdi.parent] = [uuid] 

313 if not vdi.hidden: 313 ↛ 305line 313 didn't jump to line 305, because the condition on line 313 was never false

314 self.virtual_allocation += (vdi.size) 

315 

316 # now remove all hidden leaf nodes from self.vdis so that they are not 

317 # introduced into the Agent DB when SR is synchronized. With the 

318 # asynchronous GC, a deleted VDI might stay around until the next 

319 # SR.scan, so if we don't ignore hidden leaves we would pick up 

320 # freshly-deleted VDIs as newly-added VDIs 

321 for uuid in list(self.vdis.keys()): 

322 if uuid not in geneology and self.vdis[uuid].hidden: 322 ↛ 323line 322 didn't jump to line 323, because the condition on line 322 was never true

323 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

324 del self.vdis[uuid] 

325 

326 def _getsize(self): 

327 path = self.path 

328 if self.handles("smb"): 328 ↛ 329line 328 didn't jump to line 329, because the condition on line 328 was never true

329 path = self.linkpath 

330 return util.get_fs_size(path) 

331 

332 def _getutilisation(self): 

333 return util.get_fs_utilisation(self.path) 

334 

335 def _replay(self, logentry): 

336 # all replay commands have the same 5,6,7th arguments 

337 # vdi_command, sr-uuid, vdi-uuid 

338 back_cmd = logentry[5].replace("vdi_", "") 

339 target = self.vdi(logentry[7]) 

340 cmd = getattr(target, back_cmd) 

341 args = [] 

342 for item in logentry[6:]: 

343 item = item.replace("\n", "") 

344 args.append(item) 

345 ret = cmd( * args) 

346 if ret: 

347 print(ret) 

348 

349 def _compare_args(self, a, b): 

350 try: 

351 if a[2] != "log:": 

352 return 1 

353 if b[2] != "end:" and b[2] != "error:": 

354 return 1 

355 if a[3] != b[3]: 

356 return 1 

357 if a[4] != b[4]: 

358 return 1 

359 return 0 

360 except: 

361 return 1 

362 

363 def _process_replay(self, data): 

364 logentries = [] 

365 for logentry in data: 

366 logentry = logentry.split(" ") 

367 logentries.append(logentry) 

368 # we are looking for a log entry that has a log but no end or error 

369 # wkcfix -- recreate (adjusted) logfile 

370 index = 0 

371 while index < len(logentries) - 1: 

372 if self._compare_args(logentries[index], logentries[index + 1]): 

373 self._replay(logentries[index]) 

374 else: 

375 # skip the paired one 

376 index += 1 

377 # next 

378 index += 1 

379 

380 def _kickGC(self): 

381 # don't bother if an instance already running (this is just an 

382 # optimization to reduce the overhead of forking a new process if we 

383 # don't have to, but the process will check the lock anyways) 

384 lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) 

385 if not lockRunning.acquireNoblock(): 385 ↛ 386line 385 didn't jump to line 386, because the condition on line 385 was never true

386 if cleanup.should_preempt(self.session, self.uuid): 

387 util.SMlog("Aborting currently-running coalesce of garbage VDI") 

388 try: 

389 if not cleanup.abort(self.uuid, soft=True): 

390 util.SMlog("The GC has already been scheduled to " 

391 "re-start") 

392 except util.CommandException as e: 

393 if e.code != errno.ETIMEDOUT: 

394 raise 

395 util.SMlog('failed to abort the GC') 

396 finally: 

397 return 

398 else: 

399 util.SMlog("A GC instance already running, not kicking") 

400 return 

401 else: 

402 lockRunning.release() 

403 

404 util.SMlog("Kicking GC") 

405 cleanup.gc(self.session, self.uuid, True) 

406 

407 def _isbind(self): 

408 # os.path.ismount can't deal with bind mount 

409 st1 = os.stat(self.path) 

410 st2 = os.stat(self.remotepath) 

411 return st1.st_dev == st2.st_dev and st1.st_ino == st2.st_ino 

412 

413 def _checkmount(self): 

414 mount_path = self.path 

415 if self.handles("smb"): 415 ↛ 416line 415 didn't jump to line 416, because the condition on line 415 was never true

416 mount_path = self.mountpoint 

417 

418 return util.ioretry(lambda: util.pathexists(mount_path) and \ 

419 (util.ismount(mount_path) or \ 

420 util.pathexists(self.remotepath) and self._isbind())) 

421 

422 

423class FileVDI(VDI.VDI): 

424 PARAM_VHD = "vhd" 

425 PARAM_RAW = "raw" 

426 VDI_TYPE = { 

427 PARAM_VHD: vhdutil.VDI_TYPE_VHD, 

428 PARAM_RAW: vhdutil.VDI_TYPE_RAW 

429 } 

430 

431 def _find_path_with_retries(self, vdi_uuid, maxretry=5, period=2.0): 

432 vhd_path = os.path.join(self.sr.path, "%s.%s" % \ 

433 (vdi_uuid, self.PARAM_VHD)) 

434 raw_path = os.path.join(self.sr.path, "%s.%s" % \ 

435 (vdi_uuid, self.PARAM_RAW)) 

436 cbt_path = os.path.join(self.sr.path, "%s.%s" % 

437 (vdi_uuid, CBTLOG_TAG)) 

438 found = False 

439 tries = 0 

440 while tries < maxretry and not found: 

441 tries += 1 

442 if util.ioretry(lambda: util.pathexists(vhd_path)): 

443 self.vdi_type = vhdutil.VDI_TYPE_VHD 

444 self.path = vhd_path 

445 found = True 

446 elif util.ioretry(lambda: util.pathexists(raw_path)): 

447 self.vdi_type = vhdutil.VDI_TYPE_RAW 

448 self.path = raw_path 

449 self.hidden = False 

450 found = True 

451 elif util.ioretry(lambda: util.pathexists(cbt_path)): 451 ↛ 452line 451 didn't jump to line 452, because the condition on line 451 was never true

452 self.vdi_type = CBTLOG_TAG 

453 self.path = cbt_path 

454 self.hidden = False 

455 found = True 

456 

457 if not found: 

458 util.SMlog("VHD %s not found, retry %s of %s" % (vhd_path, tries, maxretry)) 

459 time.sleep(period) 

460 

461 return found 

462 

463 def load(self, vdi_uuid): 

464 self.lock = self.sr.lock 

465 

466 self.sr.srcmd.params['o_direct'] = self.sr.o_direct 

467 

468 if self.sr.srcmd.cmd == "vdi_create": 

469 self.vdi_type = vhdutil.VDI_TYPE_VHD 

470 self.key_hash = None 

471 if "vdi_sm_config" in self.sr.srcmd.params: 471 ↛ 472line 471 didn't jump to line 472, because the condition on line 471 was never true

472 if "key_hash" in self.sr.srcmd.params["vdi_sm_config"]: 

473 self.key_hash = self.sr.srcmd.params["vdi_sm_config"]["key_hash"] 

474 

475 if "type" in self.sr.srcmd.params["vdi_sm_config"]: 

476 vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

477 if not self.VDI_TYPE.get(vdi_type): 

478 raise xs_errors.XenError('VDIType', 

479 opterr='Invalid VDI type %s' % vdi_type) 

480 self.vdi_type = self.VDI_TYPE[vdi_type] 

481 self.path = os.path.join(self.sr.path, "%s%s" % 

482 (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) 

483 else: 

484 found = self._find_path_with_retries(vdi_uuid) 

485 if not found: 485 ↛ 486line 485 didn't jump to line 486, because the condition on line 485 was never true

486 if self.sr.srcmd.cmd == "vdi_delete": 

487 # Could be delete for CBT log file 

488 self.path = os.path.join(self.sr.path, "%s.%s" % 

489 (vdi_uuid, self.PARAM_VHD)) 

490 return 

491 if self.sr.srcmd.cmd == "vdi_attach_from_config": 

492 return 

493 raise xs_errors.XenError('VDIUnavailable', 

494 opterr="VDI %s not found" % vdi_uuid) 

495 

496 

497 if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ 

498 self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): 

499 # VHD info already preloaded: use it instead of querying directly 

500 vhdInfo = self.sr.vhds[vdi_uuid] 

501 self.utilisation = vhdInfo.sizePhys 

502 self.size = vhdInfo.sizeVirt 

503 self.hidden = vhdInfo.hidden 

504 if self.hidden: 504 ↛ 505line 504 didn't jump to line 505, because the condition on line 504 was never true

505 self.managed = False 

506 self.parent = vhdInfo.parentUuid 

507 if self.parent: 507 ↛ 508line 507 didn't jump to line 508, because the condition on line 507 was never true

508 self.sm_config_override = {'vhd-parent': self.parent} 

509 else: 

510 self.sm_config_override = {'vhd-parent': None} 

511 return 

512 

513 try: 

514 # Change to the SR directory in case parent 

515 # locator field path has changed 

516 os.chdir(self.sr.path) 

517 except Exception as chdir_exception: 

518 util.SMlog("Unable to change to SR directory, SR unavailable, %s" % 

519 str(chdir_exception)) 

520 raise xs_errors.XenError('SRUnavailable', opterr=str(chdir_exception)) 

521 

522 if util.ioretry( 522 ↛ exitline 522 didn't return from function 'load', because the condition on line 522 was never false

523 lambda: util.pathexists(self.path), 

524 errlist=[errno.EIO, errno.ENOENT]): 

525 try: 

526 st = util.ioretry(lambda: os.stat(self.path), 

527 errlist=[errno.EIO, errno.ENOENT]) 

528 self.utilisation = int(st.st_size) 

529 except util.CommandException as inst: 

530 if inst.code == errno.EIO: 

531 raise xs_errors.XenError('VDILoad', \ 

532 opterr='Failed load VDI information %s' % self.path) 

533 else: 

534 util.SMlog("Stat failed for %s, %s" % ( 

535 self.path, str(inst))) 

536 raise xs_errors.XenError('VDIType', \ 

537 opterr='Invalid VDI type %s' % self.vdi_type) 

538 

539 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 539 ↛ 540line 539 didn't jump to line 540, because the condition on line 539 was never true

540 self.exists = True 

541 self.size = self.utilisation 

542 self.sm_config_override = {'type': self.PARAM_RAW} 

543 return 

544 

545 if self.vdi_type == CBTLOG_TAG: 545 ↛ 546line 545 didn't jump to line 546, because the condition on line 545 was never true

546 self.exists = True 

547 self.size = self.utilisation 

548 return 

549 

550 try: 

551 # The VDI might be activated in R/W mode so the VHD footer 

552 # won't be valid, use the back-up one instead. 

553 diskinfo = util.ioretry( 

554 lambda: self._query_info(self.path, True), 

555 errlist=[errno.EIO, errno.ENOENT]) 

556 

557 if 'parent' in diskinfo: 557 ↛ 558line 557 didn't jump to line 558, because the condition on line 557 was never true

558 self.parent = diskinfo['parent'] 

559 self.sm_config_override = {'vhd-parent': self.parent} 

560 else: 

561 self.sm_config_override = {'vhd-parent': None} 

562 self.parent = '' 

563 self.size = int(diskinfo['size']) * 1024 * 1024 

564 self.hidden = int(diskinfo['hidden']) 

565 if self.hidden: 565 ↛ 566line 565 didn't jump to line 566, because the condition on line 565 was never true

566 self.managed = False 

567 self.exists = True 

568 except util.CommandException as inst: 

569 raise xs_errors.XenError('VDILoad', \ 

570 opterr='Failed load VDI information %s' % self.path) 

571 

572 def update(self, sr_uuid, vdi_location): 

573 self.load(vdi_location) 

574 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

575 self.sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

576 self._db_update() 

577 

578 def create(self, sr_uuid, vdi_uuid, size): 

579 if util.ioretry(lambda: util.pathexists(self.path)): 579 ↛ 580line 579 didn't jump to line 580, because the condition on line 579 was never true

580 raise xs_errors.XenError('VDIExists') 

581 

582 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

583 try: 

584 size = vhdutil.validate_and_round_vhd_size(int(size)) 

585 mb = 1024 * 1024 

586 size_mb = size // mb 

587 util.ioretry(lambda: self._create(str(size_mb), self.path)) 

588 self.size = util.ioretry(lambda: self._query_v(self.path)) 

589 except util.CommandException as inst: 

590 raise xs_errors.XenError('VDICreate', 

591 opterr='error %d' % inst.code) 

592 else: 

593 f = open(self.path, 'w') 

594 f.truncate(int(size)) 

595 f.close() 

596 self.size = size 

597 

598 self.sr.added_vdi(self) 

599 

600 st = util.ioretry(lambda: os.stat(self.path)) 

601 self.utilisation = int(st.st_size) 

602 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

603 self.sm_config = {"type": self.PARAM_RAW} 

604 

605 self._db_introduce() 

606 self.sr._update(self.sr.uuid, self.size) 

607 return super(FileVDI, self).get_params() 

608 

609 def delete(self, sr_uuid, vdi_uuid, data_only=False): 

610 if not util.ioretry(lambda: util.pathexists(self.path)): 

611 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

612 

613 if self.attached: 

614 raise xs_errors.XenError('VDIInUse') 

615 

616 try: 

617 util.force_unlink(self.path) 

618 except Exception as e: 

619 raise xs_errors.XenError( 

620 'VDIDelete', 

621 opterr='Failed to unlink file during deleting VDI: %s' % str(e)) 

622 

623 self.sr.deleted_vdi(vdi_uuid) 

624 # If this is a data_destroy call, don't remove from XAPI db 

625 if not data_only: 

626 self._db_forget() 

627 self.sr._update(self.sr.uuid, -self.size) 

628 self.sr.lock.cleanupAll(vdi_uuid) 

629 self.sr._kickGC() 

630 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

631 

632 def attach(self, sr_uuid, vdi_uuid): 

633 if self.path is None: 

634 self._find_path_with_retries(vdi_uuid) 

635 if not self._checkpath(self.path): 

636 raise xs_errors.XenError('VDIUnavailable', \ 

637 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

638 try: 

639 self.attached = True 

640 

641 if not hasattr(self, 'xenstore_data'): 

642 self.xenstore_data = {} 

643 

644 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(vdi_uuid, \ 

645 scsiutil.gen_synthetic_page_data(vdi_uuid))) 

646 

647 if self.sr.handles("file"): 

648 # XXX: PR-1255: if these are constants then they should 

649 # be returned by the attach API call, not persisted in the 

650 # pool database. 

651 self.xenstore_data['storage-type'] = 'ext' 

652 return super(FileVDI, self).attach(sr_uuid, vdi_uuid) 

653 except util.CommandException as inst: 

654 raise xs_errors.XenError('VDILoad', opterr='error %d' % inst.code) 

655 

656 def detach(self, sr_uuid, vdi_uuid): 

657 self.attached = False 

658 

659 def resize(self, sr_uuid, vdi_uuid, size): 

660 if not self.exists: 

661 raise xs_errors.XenError('VDIUnavailable', \ 

662 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

663 

664 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

665 raise xs_errors.XenError('Unimplemented') 

666 

667 if self.hidden: 

668 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

669 

670 if size < self.size: 

671 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

672 '(current size: %d, new size: %d)' % (self.size, size)) 

673 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

674 

675 if size == self.size: 

676 return VDI.VDI.get_params(self) 

677 

678 # We already checked it is a VDI_TYPE_VHD 

679 size = vhdutil.validate_and_round_vhd_size(int(size)) 

680 

681 jFile = JOURNAL_FILE_PREFIX + self.uuid 

682 try: 

683 vhdutil.setSizeVirt(self.path, size, jFile) 

684 except: 

685 # Revert the operation 

686 vhdutil.revert(self.path, jFile) 

687 raise xs_errors.XenError('VDISize', opterr='resize operation failed') 

688 

689 old_size = self.size 

690 self.size = vhdutil.getSizeVirt(self.path) 

691 st = util.ioretry(lambda: os.stat(self.path)) 

692 self.utilisation = int(st.st_size) 

693 

694 self._db_update() 

695 self.sr._update(self.sr.uuid, self.size - old_size) 

696 super(FileVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

697 return VDI.VDI.get_params(self) 

698 

699 def clone(self, sr_uuid, vdi_uuid): 

700 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE) 

701 

702 def compose(self, sr_uuid, vdi1, vdi2): 

703 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

704 raise xs_errors.XenError('Unimplemented') 

705 parent_fn = vdi1 + vhdutil.FILE_EXTN[vhdutil.VDI_TYPE_VHD] 

706 parent_path = os.path.join(self.sr.path, parent_fn) 

707 assert(util.pathexists(parent_path)) 

708 vhdutil.setParent(self.path, parent_path, False) 

709 vhdutil.setHidden(parent_path) 

710 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

711 util.pread2([vhdutil.VHD_UTIL, "modify", "-p", parent_path, 

712 "-n", self.path]) 

713 # Tell tapdisk the chain has changed 

714 if not blktap2.VDI.tap_refresh(self.session, sr_uuid, vdi2): 

715 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

716 util.SMlog("VDI.compose: relinked %s->%s" % (vdi2, vdi1)) 

717 

718 def reset_leaf(self, sr_uuid, vdi_uuid): 

719 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

720 raise xs_errors.XenError('Unimplemented') 

721 

722 # safety check 

723 if not vhdutil.hasParent(self.path): 

724 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

725 "will not reset contents" % self.uuid) 

726 

727 vhdutil.killData(self.path) 

728 

729 def _do_snapshot(self, sr_uuid, vdi_uuid, snap_type, 

730 secondary=None, cbtlog=None): 

731 # If cbt enabled, save file consistency state 

732 if cbtlog is not None: 732 ↛ 733line 732 didn't jump to line 733, because the condition on line 732 was never true

733 if blktap2.VDI.tap_status(self.session, vdi_uuid): 

734 consistency_state = False 

735 else: 

736 consistency_state = True 

737 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

738 (consistency_state, vdi_uuid)) 

739 else: 

740 consistency_state = None 

741 

742 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 742 ↛ 743line 742 didn't jump to line 743, because the condition on line 742 was never true

743 raise xs_errors.XenError('Unimplemented') 

744 

745 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 745 ↛ 746line 745 didn't jump to line 746, because the condition on line 745 was never true

746 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

747 try: 

748 return self._snapshot(snap_type, cbtlog, consistency_state) 

749 finally: 

750 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

751 

752 def _rename(self, src, dst): 

753 util.SMlog("FileVDI._rename %s to %s" % (src, dst)) 

754 util.ioretry(lambda: os.rename(src, dst)) 

755 

756 def _link(self, src, dst): 

757 util.SMlog("FileVDI._link %s to %s" % (src, dst)) 

758 os.link(src, dst) 

759 

760 def _unlink(self, path): 

761 util.SMlog("FileVDI._unlink %s" % (path)) 

762 os.unlink(path) 

763 

764 def _create_new_parent(self, src, newsrc): 

765 sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref) 

766 if SharedFileSR.NO_HARDLINK_SUPPORT in sr_sm_config: 

767 self._rename(src, newsrc) 

768 else: 

769 self._link(src, newsrc) 

770 

771 def __fist_enospace(self): 

772 raise util.CommandException(28, "vhd-util snapshot", reason="No space") 

773 

774 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): 

775 util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) 

776 

777 args = [] 

778 args.append("vdi_clone") 

779 args.append(self.sr.uuid) 

780 args.append(self.uuid) 

781 

782 dest = None 

783 dst = None 

784 if snap_type == VDI.SNAPSHOT_DOUBLE: 784 ↛ 789line 784 didn't jump to line 789, because the condition on line 784 was never false

785 dest = util.gen_uuid() 

786 dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) 

787 args.append(dest) 

788 

789 if self.hidden: 789 ↛ 790line 789 didn't jump to line 790, because the condition on line 789 was never true

790 raise xs_errors.XenError('VDIClone', opterr='hidden VDI') 

791 

792 depth = vhdutil.getDepth(self.path) 

793 if depth == -1: 793 ↛ 794line 793 didn't jump to line 794, because the condition on line 793 was never true

794 raise xs_errors.XenError('VDIUnavailable', \ 

795 opterr='failed to get VHD depth') 

796 elif depth >= vhdutil.MAX_CHAIN_SIZE: 796 ↛ 797line 796 didn't jump to line 797, because the condition on line 796 was never true

797 raise xs_errors.XenError('SnapshotChainTooLong') 

798 

799 newuuid = util.gen_uuid() 

800 src = self.path 

801 newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) 

802 newsrcname = "%s.%s" % (newuuid, self.vdi_type) 

803 

804 if not self._checkpath(src): 804 ↛ 805line 804 didn't jump to line 805, because the condition on line 804 was never true

805 raise xs_errors.XenError('VDIUnavailable', \ 

806 opterr='VDI %s unavailable %s' % (self.uuid, src)) 

807 

808 # wkcfix: multiphase 

809 util.start_log_entry(self.sr.path, self.path, args) 

810 

811 # We assume the filehandle has been released 

812 try: 

813 self._create_new_parent(src, newsrc) 

814 

815 # Create the snapshot under a temporary name, then rename 

816 # it afterwards. This avoids a small window where it exists 

817 # but is invalid. We do not need to do this for 

818 # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed 

819 # before so nobody will try to query it. 

820 tmpsrc = "%s.%s" % (src, "new") 

821 # Fault injection site to fail the snapshot with ENOSPACE 

822 util.fistpoint.activate_custom_fn( 

823 "FileSR_fail_snap1", 

824 self.__fist_enospace) 

825 util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) 

826 self._rename(tmpsrc, src) 

827 if snap_type == VDI.SNAPSHOT_DOUBLE: 827 ↛ 835line 827 didn't jump to line 835, because the condition on line 827 was never false

828 # Fault injection site to fail the snapshot with ENOSPACE 

829 util.fistpoint.activate_custom_fn( 

830 "FileSR_fail_snap2", 

831 self.__fist_enospace) 

832 util.ioretry(lambda: self._snap(dst, newsrcname)) 

833 # mark the original file (in this case, its newsrc) 

834 # as hidden so that it does not show up in subsequent scans 

835 util.ioretry(lambda: self._mark_hidden(newsrc)) 

836 

837 #Verify parent locator field of both children and delete newsrc if unused 

838 introduce_parent = True 

839 try: 

840 srcparent = util.ioretry(lambda: self._query_p_uuid(src)) 

841 dstparent = None 

842 if snap_type == VDI.SNAPSHOT_DOUBLE: 842 ↛ 844line 842 didn't jump to line 844, because the condition on line 842 was never false

843 dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) 

844 if srcparent != newuuid and \ 844 ↛ 848line 844 didn't jump to line 848, because the condition on line 844 was never true

845 (snap_type == VDI.SNAPSHOT_SINGLE or \ 

846 snap_type == VDI.SNAPSHOT_INTERNAL or \ 

847 dstparent != newuuid): 

848 util.ioretry(lambda: self._unlink(newsrc)) 

849 introduce_parent = False 

850 except: 

851 pass 

852 

853 # Introduce the new VDI records 

854 leaf_vdi = None 

855 if snap_type == VDI.SNAPSHOT_DOUBLE: 855 ↛ 874line 855 didn't jump to line 874, because the condition on line 855 was never false

856 leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI 

857 leaf_vdi.read_only = False 

858 leaf_vdi.location = dest 

859 leaf_vdi.size = self.size 

860 leaf_vdi.utilisation = self.utilisation 

861 leaf_vdi.sm_config = {} 

862 leaf_vdi.sm_config['vhd-parent'] = dstparent 

863 # If the parent is encrypted set the key_hash 

864 # for the new snapshot disk 

865 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

866 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

867 if "key_hash" in sm_config: 867 ↛ 868line 867 didn't jump to line 868, because the condition on line 867 was never true

868 leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] 

869 # If we have CBT enabled on the VDI, 

870 # set CBT status for the new snapshot disk 

871 if cbtlog: 871 ↛ 872line 871 didn't jump to line 872, because the condition on line 871 was never true

872 leaf_vdi.cbt_enabled = True 

873 

874 base_vdi = None 

875 if introduce_parent: 875 ↛ 887line 875 didn't jump to line 887, because the condition on line 875 was never false

876 base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent 

877 base_vdi.label = "base copy" 

878 base_vdi.read_only = True 

879 base_vdi.location = newuuid 

880 base_vdi.size = self.size 

881 base_vdi.utilisation = self.utilisation 

882 base_vdi.sm_config = {} 

883 grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) 

884 if grandparent.find("no parent") == -1: 884 ↛ 887line 884 didn't jump to line 887, because the condition on line 884 was never false

885 base_vdi.sm_config['vhd-parent'] = grandparent 

886 

887 try: 

888 if snap_type == VDI.SNAPSHOT_DOUBLE: 888 ↛ 893line 888 didn't jump to line 893, because the condition on line 888 was never false

889 leaf_vdi_ref = leaf_vdi._db_introduce() 

890 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ 

891 (leaf_vdi_ref, dest)) 

892 

893 if introduce_parent: 893 ↛ 897line 893 didn't jump to line 897, because the condition on line 893 was never false

894 base_vdi_ref = base_vdi._db_introduce() 

895 self.session.xenapi.VDI.set_managed(base_vdi_ref, False) 

896 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) 

897 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

898 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

899 sm_config['vhd-parent'] = srcparent 

900 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

901 except Exception as e: 

902 util.SMlog("vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) 

903 # Note it's too late to actually clean stuff up here: the base disk has 

904 # been marked as deleted already. 

905 util.end_log_entry(self.sr.path, self.path, ["error"]) 

906 raise 

907 except util.CommandException as inst: 

908 # XXX: it might be too late if the base disk has been marked as deleted! 

909 self._clonecleanup(src, dst, newsrc) 

910 util.end_log_entry(self.sr.path, self.path, ["error"]) 

911 raise xs_errors.XenError('VDIClone', 

912 opterr='VDI clone failed error %d' % inst.code) 

913 

914 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

915 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog: 915 ↛ 916line 915 didn't jump to line 916, because the condition on line 915 was never true

916 try: 

917 self._cbt_snapshot(dest, cbt_consistency) 

918 except: 

919 # CBT operation failed. 

920 util.end_log_entry(self.sr.path, self.path, ["error"]) 

921 raise 

922 

923 util.end_log_entry(self.sr.path, self.path, ["done"]) 

924 if snap_type != VDI.SNAPSHOT_INTERNAL: 924 ↛ 927line 924 didn't jump to line 927, because the condition on line 924 was never false

925 self.sr._update(self.sr.uuid, self.size) 

926 # Return info on the new user-visible leaf VDI 

927 ret_vdi = leaf_vdi 

928 if not ret_vdi: 928 ↛ 929line 928 didn't jump to line 929, because the condition on line 928 was never true

929 ret_vdi = base_vdi 

930 if not ret_vdi: 930 ↛ 931line 930 didn't jump to line 931, because the condition on line 930 was never true

931 ret_vdi = self 

932 return ret_vdi.get_params() 

933 

934 def get_params(self): 

935 if not self._checkpath(self.path): 

936 raise xs_errors.XenError('VDIUnavailable', \ 

937 opterr='VDI %s unavailable %s' % (self.uuid, self.path)) 

938 return super(FileVDI, self).get_params() 

939 

940 def _snap(self, child, parent): 

941 cmd = [SR.TAPDISK_UTIL, "snapshot", vhdutil.VDI_TYPE_VHD, child, parent] 

942 text = util.pread(cmd) 

943 

944 def _clonecleanup(self, src, dst, newsrc): 

945 try: 

946 if dst: 946 ↛ 950line 946 didn't jump to line 950, because the condition on line 946 was never false

947 util.ioretry(lambda: self._unlink(dst)) 

948 except util.CommandException as inst: 

949 pass 

950 try: 

951 if util.ioretry(lambda: util.pathexists(newsrc)): 951 ↛ exitline 951 didn't return from function '_clonecleanup', because the condition on line 951 was never false

952 stats = os.stat(newsrc) 

953 # Check if we have more than one link to newsrc 

954 if (stats.st_nlink > 1): 

955 util.ioretry(lambda: self._unlink(newsrc)) 

956 elif not self._is_hidden(newsrc): 956 ↛ exitline 956 didn't return from function '_clonecleanup', because the condition on line 956 was never false

957 self._rename(newsrc, src) 

958 except util.CommandException as inst: 

959 pass 

960 

961 def _checkpath(self, path): 

962 try: 

963 if not util.ioretry(lambda: util.pathexists(path)): 963 ↛ 964line 963 didn't jump to line 964, because the condition on line 963 was never true

964 return False 

965 return True 

966 except util.CommandException as inst: 

967 raise xs_errors.XenError('EIO', \ 

968 opterr='IO error checking path %s' % path) 

969 

970 def _query_v(self, path): 

971 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-v", path] 

972 return int(util.pread(cmd)) * 1024 * 1024 

973 

974 def _query_p_uuid(self, path): 

975 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-p", path] 

976 parent = util.pread(cmd) 

977 parent = parent[:-1] 

978 ls = parent.split('/') 

979 return ls[len(ls) - 1].replace(vhdutil.FILE_EXTN_VHD, '') 

980 

981 def _query_info(self, path, use_bkp_footer=False): 

982 diskinfo = {} 

983 qopts = '-vpf' 

984 if use_bkp_footer: 984 ↛ 986line 984 didn't jump to line 986, because the condition on line 984 was never false

985 qopts += 'b' 

986 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, qopts, path] 

987 txt = util.pread(cmd).split('\n') 

988 diskinfo['size'] = txt[0] 

989 lst = [txt[1].split('/')[-1].replace(vhdutil.FILE_EXTN_VHD, "")] 

990 for val in filter(util.exactmatch_uuid, lst): 990 ↛ 991line 990 didn't jump to line 991, because the loop on line 990 never started

991 diskinfo['parent'] = val 

992 diskinfo['hidden'] = txt[2].split()[1] 

993 return diskinfo 

994 

995 def _create(self, size, path): 

996 cmd = [SR.TAPDISK_UTIL, "create", vhdutil.VDI_TYPE_VHD, size, path] 

997 text = util.pread(cmd) 

998 if self.key_hash: 998 ↛ 999line 998 didn't jump to line 999, because the condition on line 998 was never true

999 vhdutil.setKey(path, self.key_hash) 

1000 

1001 def _mark_hidden(self, path): 

1002 vhdutil.setHidden(path, True) 

1003 self.hidden = 1 

1004 

1005 def _is_hidden(self, path): 

1006 return vhdutil.getHidden(path) == 1 

1007 

1008 def extractUuid(path): 

1009 fileName = os.path.basename(path) 

1010 uuid = fileName.replace(vhdutil.FILE_EXTN_VHD, "") 

1011 return uuid 

1012 extractUuid = staticmethod(extractUuid) 

1013 

1014 def generate_config(self, sr_uuid, vdi_uuid): 

1015 """ 

1016 Generate the XML config required to attach and activate 

1017 a VDI for use when XAPI is not running. Attach and 

1018 activation is handled by vdi_attach_from_config below. 

1019 """ 

1020 util.SMlog("FileVDI.generate_config") 

1021 if not util.pathexists(self.path): 1021 ↛ 1022line 1021 didn't jump to line 1022, because the condition on line 1021 was never true

1022 raise xs_errors.XenError('VDIUnavailable') 

1023 resp = {} 

1024 resp['device_config'] = self.sr.dconf 

1025 resp['sr_uuid'] = sr_uuid 

1026 resp['vdi_uuid'] = vdi_uuid 

1027 resp['command'] = 'vdi_attach_from_config' 

1028 # Return the 'config' encoded within a normal XMLRPC response so that 

1029 # we can use the regular response/error parsing code. 

1030 config = xmlrpc.client.dumps(tuple([resp]), "vdi_attach_from_config") 

1031 return xmlrpc.client.dumps((config, ), "", True) 

1032 

1033 def attach_from_config(self, sr_uuid, vdi_uuid): 

1034 """ 

1035 Attach and activate a VDI using config generated by 

1036 vdi_generate_config above. This is used for cases such as 

1037 the HA state-file and the redo-log. 

1038 """ 

1039 util.SMlog("FileVDI.attach_from_config") 

1040 try: 

1041 if not util.pathexists(self.sr.path): 

1042 self.sr.attach(sr_uuid) 

1043 except: 

1044 util.logException("FileVDI.attach_from_config") 

1045 raise xs_errors.XenError( 

1046 'SRUnavailable', 

1047 opterr='Unable to attach from config' 

1048 ) 

1049 

1050 def _create_cbt_log(self): 

1051 # Create CBT log file 

1052 # Name: <vdi_uuid>.cbtlog 

1053 #Handle if file already exists 

1054 log_path = self._get_cbt_logpath(self.uuid) 

1055 open_file = open(log_path, "w+") 

1056 open_file.close() 

1057 return super(FileVDI, self)._create_cbt_log() 

1058 

1059 def _delete_cbt_log(self): 

1060 logPath = self._get_cbt_logpath(self.uuid) 

1061 try: 

1062 os.remove(logPath) 

1063 except OSError as e: 

1064 if e.errno != errno.ENOENT: 

1065 raise 

1066 

1067 def _cbt_log_exists(self, logpath): 

1068 return util.pathexists(logpath) 

1069 

1070 

1071class SharedFileSR(FileSR): 

1072 """ 

1073 FileSR subclass for SRs that use shared network storage 

1074 """ 

1075 NO_HARDLINK_SUPPORT = "no_hardlinks" 

1076 

1077 def _raise_hardlink_error(self): 

1078 raise OSError(524, "Unknown error 524") 

1079 

1080 def _check_hardlinks(self): 

1081 test_name = os.path.join(self.path, str(uuid4())) 

1082 open(test_name, 'ab').close() 

1083 

1084 link_name = '%s.new' % test_name 

1085 try: 

1086 # XSI-1100: Let tests simulate failure of the link operation 

1087 util.fistpoint.activate_custom_fn( 

1088 "FileSR_fail_hardlink", 

1089 self._raise_hardlink_error) 

1090 

1091 os.link(test_name, link_name) 

1092 self.session.xenapi.SR.remove_from_sm_config( 

1093 self.sr_ref, SharedFileSR.NO_HARDLINK_SUPPORT) 

1094 except OSError: 

1095 msg = "File system for SR %s does not support hardlinks, crash " \ 

1096 "consistency of snapshots cannot be assured" % self.uuid 

1097 util.SMlog(msg, priority=util.LOG_WARNING) 

1098 try: 

1099 self.session.xenapi.SR.add_to_sm_config( 

1100 self.sr_ref, SharedFileSR.NO_HARDLINK_SUPPORT, 'True') 

1101 self.session.xenapi.message.create( 

1102 "sr_does_not_support_hardlinks", 2, "SR", self.uuid, 

1103 msg) 

1104 except XenAPI.Failure: 

1105 # Might already be set and checking has TOCTOU issues 

1106 pass 

1107 finally: 

1108 util.force_unlink(link_name) 

1109 util.force_unlink(test_name) 

1110 

1111 

1112if __name__ == '__main__': 1112 ↛ 1113line 1112 didn't jump to line 1113, because the condition on line 1112 was never true

1113 SRCommand.run(FileSR, DRIVER_INFO) 

1114else: 

1115 SR.registerSR(FileSR)