Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# FileSR: local-file storage repository 

19 

20import SR 

21import VDI 

22import SRCommand 

23import util 

24import scsiutil 

25import vhdutil 

26import os 

27import errno 

28import xs_errors 

29import cleanup 

30import blktap2 

31import time 

32import glob 

33from uuid import uuid4 

34from lock import Lock 

35import xmlrpc.client 

36import XenAPI # pylint: disable=import-error 

37from constants import CBTLOG_TAG 

38 

39geneology = {} 

40CAPABILITIES = ["SR_PROBE", "SR_UPDATE", \ 

41 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", \ 

42 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR", 

43 "VDI_GENERATE_CONFIG", "ATOMIC_PAUSE", "VDI_CONFIG_CBT", 

44 "VDI_ACTIVATE", "VDI_DEACTIVATE", "THIN_PROVISIONING"] 

45 

46CONFIGURATION = [['location', 'local directory path (required)']] 

47 

48DRIVER_INFO = { 

49 'name': 'Local Path VHD', 

50 'description': 'SR plugin which represents disks as VHD files stored on a local path', 

51 'vendor': 'Citrix Systems Inc', 

52 'copyright': '(C) 2008 Citrix Systems Inc', 

53 'driver_version': '1.0', 

54 'required_api_version': '1.0', 

55 'capabilities': CAPABILITIES, 

56 'configuration': CONFIGURATION 

57 } 

58 

59JOURNAL_FILE_PREFIX = ".journal-" 

60 

61OPS_EXCLUSIVE = [ 

62 "sr_create", "sr_delete", "sr_probe", "sr_attach", "sr_detach", 

63 "sr_scan", "vdi_init", "vdi_create", "vdi_delete", "vdi_attach", 

64 "vdi_detach", "vdi_resize_online", "vdi_snapshot", "vdi_clone"] 

65 

66DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True} 

67 

68 

69class FileSR(SR.SR): 

70 """Local file storage repository""" 

71 

72 SR_TYPE = "file" 

73 

74 def handles(srtype): 

75 return srtype == 'file' 

76 handles = staticmethod(handles) 

77 

78 def _check_o_direct(self): 

79 if self.sr_ref and self.session is not None: 

80 other_config = self.session.xenapi.SR.get_other_config(self.sr_ref) 

81 o_direct = other_config.get("o_direct") 

82 self.o_direct = o_direct is not None and o_direct == "true" 

83 else: 

84 self.o_direct = True 

85 

86 def __init__(self, srcmd, sr_uuid): 

87 # We call SR.SR.__init__ explicitly because 

88 # "super" sometimes failed due to circular imports 

89 SR.SR.__init__(self, srcmd, sr_uuid) 

90 self._check_o_direct() 

91 

92 def load(self, sr_uuid): 

93 self.ops_exclusive = OPS_EXCLUSIVE 

94 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

95 self.sr_vditype = vhdutil.VDI_TYPE_VHD 

96 if 'location' not in self.dconf or not self.dconf['location']: 96 ↛ 97line 96 didn't jump to line 97, because the condition on line 96 was never true

97 raise xs_errors.XenError('ConfigLocationMissing') 

98 self.remotepath = self.dconf['location'] 

99 self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) 

100 self.linkpath = self.path 

101 self.mountpoint = self.path 

102 self.attached = False 

103 self.driver_config = DRIVER_CONFIG 

104 

105 def create(self, sr_uuid, size): 

106 """ Create the SR. The path must not already exist, or if it does,  

107 it must be empty. (This accounts for the case where the user has 

108 mounted a device onto a directory manually and want to use this as the 

109 root of a file-based SR.) """ 

110 try: 

111 if util.ioretry(lambda: util.pathexists(self.remotepath)): 111 ↛ 112line 111 didn't jump to line 112, because the condition on line 111 was never true

112 if len(util.ioretry(lambda: util.listdir(self.remotepath))) != 0: 

113 raise xs_errors.XenError('SRExists') 

114 else: 

115 try: 

116 util.ioretry(lambda: os.mkdir(self.remotepath)) 

117 except util.CommandException as inst: 

118 if inst.code == errno.EEXIST: 

119 raise xs_errors.XenError('SRExists') 

120 else: 

121 raise xs_errors.XenError('FileSRCreate', \ 

122 opterr='directory creation failure %d' \ 

123 % inst.code) 

124 except: 

125 raise xs_errors.XenError('FileSRCreate') 

126 

127 def delete(self, sr_uuid): 

128 self.attach(sr_uuid) 

129 cleanup.gc_force(self.session, self.uuid) 

130 

131 # check to make sure no VDIs are present; then remove old 

132 # files that are non VDI's 

133 try: 

134 if util.ioretry(lambda: util.pathexists(self.path)): 

135 #Load the VDI list 

136 self._loadvdis() 

137 for uuid in self.vdis: 

138 if not self.vdis[uuid].deleted: 

139 raise xs_errors.XenError('SRNotEmpty', \ 

140 opterr='VDIs still exist in SR') 

141 

142 # remove everything else, there are no vdi's 

143 for name in util.ioretry(lambda: util.listdir(self.path)): 

144 fullpath = os.path.join(self.path, name) 

145 try: 

146 util.ioretry(lambda: os.unlink(fullpath)) 

147 except util.CommandException as inst: 

148 if inst.code != errno.ENOENT and \ 

149 inst.code != errno.EISDIR: 

150 raise xs_errors.XenError('FileSRDelete', \ 

151 opterr='failed to remove %s error %d' \ 

152 % (fullpath, inst.code)) 

153 self.detach(sr_uuid) 

154 except util.CommandException as inst: 

155 self.detach(sr_uuid) 

156 raise xs_errors.XenError('FileSRDelete', \ 

157 opterr='error %d' % inst.code) 

158 

159 def attach(self, sr_uuid, bind=True): 

160 if not self._checkmount(): 

161 try: 

162 util.ioretry(lambda: util.makedirs(self.path, mode=0o700)) 

163 except util.CommandException as inst: 

164 if inst.code != errno.EEXIST: 

165 raise xs_errors.XenError("FileSRCreate", \ 

166 opterr='fail to create mount point. Errno is %s' % inst.code) 

167 try: 

168 cmd = ["mount", self.remotepath, self.path] 

169 if bind: 

170 cmd.append("--bind") 

171 util.pread(cmd) 

172 os.chmod(self.path, mode=0o0700) 

173 except util.CommandException as inst: 

174 raise xs_errors.XenError('FileSRCreate', \ 

175 opterr='fail to mount FileSR. Errno is %s' % inst.code) 

176 self.attached = True 

177 

178 def detach(self, sr_uuid): 

179 if self._checkmount(): 

180 try: 

181 util.SMlog("Aborting GC/coalesce") 

182 cleanup.abort(self.uuid) 

183 os.chdir(SR.MOUNT_BASE) 

184 util.pread(["umount", self.path]) 

185 os.rmdir(self.path) 

186 except Exception as e: 

187 raise xs_errors.XenError('SRInUse', opterr=str(e)) 

188 self.attached = False 

189 

190 def scan(self, sr_uuid): 

191 if not self._checkmount(): 

192 raise xs_errors.XenError('SRUnavailable', \ 

193 opterr='no such directory %s' % self.path) 

194 

195 if not self.vdis: 195 ↛ 198line 195 didn't jump to line 198, because the condition on line 195 was never false

196 self._loadvdis() 

197 

198 if not self.passthrough: 

199 self.physical_size = self._getsize() 

200 self.physical_utilisation = self._getutilisation() 

201 

202 for uuid in list(self.vdis.keys()): 

203 if self.vdis[uuid].deleted: 203 ↛ 204line 203 didn't jump to line 204, because the condition on line 203 was never true

204 del self.vdis[uuid] 

205 

206 # CA-15607: make sure we are robust to the directory being unmounted beneath 

207 # us (eg by a confused user). Without this we might forget all our VDI references 

208 # which would be a shame. 

209 # For SMB SRs, this path is mountpoint 

210 mount_path = self.path 

211 if self.handles("smb"): 211 ↛ 212line 211 didn't jump to line 212, because the condition on line 211 was never true

212 mount_path = self.mountpoint 

213 

214 if not self.handles("file") and not os.path.ismount(mount_path): 214 ↛ 215line 214 didn't jump to line 215, because the condition on line 214 was never true

215 util.SMlog("Error: FileSR.scan called but directory %s isn't a mountpoint" % mount_path) 

216 raise xs_errors.XenError('SRUnavailable', \ 

217 opterr='not mounted %s' % mount_path) 

218 

219 self._kickGC() 

220 

221 # default behaviour from here on 

222 super(FileSR, self).scan(sr_uuid) 

223 

224 def update(self, sr_uuid): 

225 if not self._checkmount(): 

226 raise xs_errors.XenError('SRUnavailable', \ 

227 opterr='no such directory %s' % self.path) 

228 self._update(sr_uuid, 0) 

229 

230 def _update(self, sr_uuid, virt_alloc_delta): 

231 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

232 self.virtual_allocation = valloc + virt_alloc_delta 

233 self.physical_size = self._getsize() 

234 self.physical_utilisation = self._getutilisation() 

235 self._db_update() 

236 

237 def content_type(self, sr_uuid): 

238 return super(FileSR, self).content_type(sr_uuid) 

239 

240 def vdi(self, uuid): 

241 return FileVDI(self, uuid) 

242 

243 def added_vdi(self, vdi): 

244 self.vdis[vdi.uuid] = vdi 

245 

246 def deleted_vdi(self, uuid): 

247 if uuid in self.vdis: 

248 del self.vdis[uuid] 

249 

250 def replay(self, uuid): 

251 try: 

252 file = open(self.path + "/filelog.txt", "r") 

253 data = file.readlines() 

254 file.close() 

255 self._process_replay(data) 

256 except: 

257 raise xs_errors.XenError('SRLog') 

258 

259 def _loadvdis(self): 

260 if self.vdis: 260 ↛ 261line 260 didn't jump to line 261, because the condition on line 260 was never true

261 return 

262 

263 pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) 

264 try: 

265 self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) 

266 except util.CommandException as inst: 

267 raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ 

268 "path %s (%s)" % (self.path, inst)) 

269 try: 

270 list_vhds = [FileVDI.extractUuid(v) for v in util.ioretry(lambda: glob.glob(pattern))] 

271 if len(self.vhds) != len(list_vhds): 271 ↛ 276line 271 didn't jump to line 276, because the condition on line 271 was never false

272 util.SMlog("VHD scan returns %d VHDs: %s" % (len(self.vhds), sorted(self.vhds))) 

273 util.SMlog("VHD list returns %d VHDs: %s" % (len(list_vhds), sorted(list_vhds))) 

274 except: 

275 pass 

276 for uuid in self.vhds.keys(): 

277 if self.vhds[uuid].error: 277 ↛ 278line 277 didn't jump to line 278, because the condition on line 277 was never true

278 raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) 

279 self.vdis[uuid] = self.vdi(uuid) 

280 # Get the key hash of any encrypted VDIs: 

281 vhd_path = os.path.join(self.path, self.vhds[uuid].path) 

282 key_hash = vhdutil.getKeyHash(vhd_path) 

283 self.vdis[uuid].sm_config_override['key_hash'] = key_hash 

284 

285 # raw VDIs and CBT log files 

286 files = util.ioretry(lambda: util.listdir(self.path)) 286 ↛ exitline 286 didn't run the lambda on line 286

287 for fn in files: 287 ↛ 288line 287 didn't jump to line 288, because the loop on line 287 never started

288 if fn.endswith(vhdutil.FILE_EXTN_RAW): 

289 uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] 

290 self.vdis[uuid] = self.vdi(uuid) 

291 elif fn.endswith(CBTLOG_TAG): 

292 cbt_uuid = fn.split(".")[0] 

293 # If an associated disk exists, update CBT status 

294 # else create new VDI of type cbt_metadata 

295 if cbt_uuid in self.vdis: 

296 self.vdis[cbt_uuid].cbt_enabled = True 

297 else: 

298 new_vdi = self.vdi(cbt_uuid) 

299 new_vdi.ty = "cbt_metadata" 

300 new_vdi.cbt_enabled = True 

301 self.vdis[cbt_uuid] = new_vdi 

302 

303 # Mark parent VDIs as Read-only and generate virtual allocation 

304 self.virtual_allocation = 0 

305 for uuid, vdi in self.vdis.items(): 

306 if vdi.parent: 306 ↛ 307line 306 didn't jump to line 307, because the condition on line 306 was never true

307 if vdi.parent in self.vdis: 

308 self.vdis[vdi.parent].read_only = True 

309 if vdi.parent in geneology: 

310 geneology[vdi.parent].append(uuid) 

311 else: 

312 geneology[vdi.parent] = [uuid] 

313 if not vdi.hidden: 313 ↛ 305line 313 didn't jump to line 305, because the condition on line 313 was never false

314 self.virtual_allocation += (vdi.size) 

315 

316 # now remove all hidden leaf nodes from self.vdis so that they are not 

317 # introduced into the Agent DB when SR is synchronized. With the 

318 # asynchronous GC, a deleted VDI might stay around until the next 

319 # SR.scan, so if we don't ignore hidden leaves we would pick up 

320 # freshly-deleted VDIs as newly-added VDIs 

321 for uuid in list(self.vdis.keys()): 

322 if uuid not in geneology and self.vdis[uuid].hidden: 322 ↛ 323line 322 didn't jump to line 323, because the condition on line 322 was never true

323 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

324 del self.vdis[uuid] 

325 

326 def _getsize(self): 

327 path = self.path 

328 if self.handles("smb"): 328 ↛ 329line 328 didn't jump to line 329, because the condition on line 328 was never true

329 path = self.linkpath 

330 return util.get_fs_size(path) 

331 

332 def _getutilisation(self): 

333 return util.get_fs_utilisation(self.path) 

334 

335 def _replay(self, logentry): 

336 # all replay commands have the same 5,6,7th arguments 

337 # vdi_command, sr-uuid, vdi-uuid 

338 back_cmd = logentry[5].replace("vdi_", "") 

339 target = self.vdi(logentry[7]) 

340 cmd = getattr(target, back_cmd) 

341 args = [] 

342 for item in logentry[6:]: 

343 item = item.replace("\n", "") 

344 args.append(item) 

345 ret = cmd( * args) 

346 if ret: 

347 print(ret) 

348 

349 def _compare_args(self, a, b): 

350 try: 

351 if a[2] != "log:": 

352 return 1 

353 if b[2] != "end:" and b[2] != "error:": 

354 return 1 

355 if a[3] != b[3]: 

356 return 1 

357 if a[4] != b[4]: 

358 return 1 

359 return 0 

360 except: 

361 return 1 

362 

363 def _process_replay(self, data): 

364 logentries = [] 

365 for logentry in data: 

366 logentry = logentry.split(" ") 

367 logentries.append(logentry) 

368 # we are looking for a log entry that has a log but no end or error 

369 # wkcfix -- recreate (adjusted) logfile 

370 index = 0 

371 while index < len(logentries) - 1: 

372 if self._compare_args(logentries[index], logentries[index + 1]): 

373 self._replay(logentries[index]) 

374 else: 

375 # skip the paired one 

376 index += 1 

377 # next 

378 index += 1 

379 

380 def _kickGC(self): 

381 # don't bother if an instance already running (this is just an 

382 # optimization to reduce the overhead of forking a new process if we 

383 # don't have to, but the process will check the lock anyways) 

384 lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) 

385 if not lockRunning.acquireNoblock(): 385 ↛ 386line 385 didn't jump to line 386, because the condition on line 385 was never true

386 if cleanup.should_preempt(self.session, self.uuid): 

387 util.SMlog("Aborting currently-running coalesce of garbage VDI") 

388 try: 

389 if not cleanup.abort(self.uuid, soft=True): 

390 util.SMlog("The GC has already been scheduled to " 

391 "re-start") 

392 except util.CommandException as e: 

393 if e.code != errno.ETIMEDOUT: 

394 raise 

395 util.SMlog('failed to abort the GC') 

396 finally: 

397 return 

398 else: 

399 util.SMlog("A GC instance already running, not kicking") 

400 return 

401 else: 

402 lockRunning.release() 

403 

404 util.SMlog("Kicking GC") 

405 cleanup.gc(self.session, self.uuid, True) 

406 

407 def _isbind(self): 

408 # os.path.ismount can't deal with bind mount 

409 st1 = os.stat(self.path) 

410 st2 = os.stat(self.remotepath) 

411 return st1.st_dev == st2.st_dev and st1.st_ino == st2.st_ino 

412 

413 def _checkmount(self): 

414 mount_path = self.path 

415 if self.handles("smb"): 415 ↛ 416line 415 didn't jump to line 416, because the condition on line 415 was never true

416 mount_path = self.mountpoint 

417 

418 return util.ioretry(lambda: util.pathexists(mount_path) and \ 

419 (util.ismount(mount_path) or \ 

420 util.pathexists(self.remotepath) and self._isbind())) 

421 

422 # Override in SharedFileSR. 

423 def _check_hardlinks(self): 

424 return True 

425 

426class FileVDI(VDI.VDI): 

427 PARAM_VHD = "vhd" 

428 PARAM_RAW = "raw" 

429 VDI_TYPE = { 

430 PARAM_VHD: vhdutil.VDI_TYPE_VHD, 

431 PARAM_RAW: vhdutil.VDI_TYPE_RAW 

432 } 

433 

434 def _find_path_with_retries(self, vdi_uuid, maxretry=5, period=2.0): 

435 vhd_path = os.path.join(self.sr.path, "%s.%s" % \ 

436 (vdi_uuid, self.PARAM_VHD)) 

437 raw_path = os.path.join(self.sr.path, "%s.%s" % \ 

438 (vdi_uuid, self.PARAM_RAW)) 

439 cbt_path = os.path.join(self.sr.path, "%s.%s" % 

440 (vdi_uuid, CBTLOG_TAG)) 

441 found = False 

442 tries = 0 

443 while tries < maxretry and not found: 

444 tries += 1 

445 if util.ioretry(lambda: util.pathexists(vhd_path)): 

446 self.vdi_type = vhdutil.VDI_TYPE_VHD 

447 self.path = vhd_path 

448 found = True 

449 elif util.ioretry(lambda: util.pathexists(raw_path)): 

450 self.vdi_type = vhdutil.VDI_TYPE_RAW 

451 self.path = raw_path 

452 self.hidden = False 

453 found = True 

454 elif util.ioretry(lambda: util.pathexists(cbt_path)): 454 ↛ 455line 454 didn't jump to line 455, because the condition on line 454 was never true

455 self.vdi_type = CBTLOG_TAG 

456 self.path = cbt_path 

457 self.hidden = False 

458 found = True 

459 

460 if not found: 

461 util.SMlog("VHD %s not found, retry %s of %s" % (vhd_path, tries, maxretry)) 

462 time.sleep(period) 

463 

464 return found 

465 

466 def load(self, vdi_uuid): 

467 self.lock = self.sr.lock 

468 

469 self.sr.srcmd.params['o_direct'] = self.sr.o_direct 

470 

471 if self.sr.srcmd.cmd == "vdi_create": 

472 self.vdi_type = vhdutil.VDI_TYPE_VHD 

473 self.key_hash = None 

474 if "vdi_sm_config" in self.sr.srcmd.params: 474 ↛ 475line 474 didn't jump to line 475, because the condition on line 474 was never true

475 if "key_hash" in self.sr.srcmd.params["vdi_sm_config"]: 

476 self.key_hash = self.sr.srcmd.params["vdi_sm_config"]["key_hash"] 

477 

478 if "type" in self.sr.srcmd.params["vdi_sm_config"]: 

479 vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

480 if not self.VDI_TYPE.get(vdi_type): 

481 raise xs_errors.XenError('VDIType', 

482 opterr='Invalid VDI type %s' % vdi_type) 

483 self.vdi_type = self.VDI_TYPE[vdi_type] 

484 self.path = os.path.join(self.sr.path, "%s%s" % 

485 (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) 

486 else: 

487 found = self._find_path_with_retries(vdi_uuid) 

488 if not found: 488 ↛ 489line 488 didn't jump to line 489, because the condition on line 488 was never true

489 if self.sr.srcmd.cmd == "vdi_delete": 

490 # Could be delete for CBT log file 

491 self.path = os.path.join(self.sr.path, "%s.%s" % 

492 (vdi_uuid, self.PARAM_VHD)) 

493 return 

494 if self.sr.srcmd.cmd == "vdi_attach_from_config": 

495 return 

496 raise xs_errors.XenError('VDIUnavailable', 

497 opterr="VDI %s not found" % vdi_uuid) 

498 

499 

500 if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ 

501 self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): 

502 # VHD info already preloaded: use it instead of querying directly 

503 vhdInfo = self.sr.vhds[vdi_uuid] 

504 self.utilisation = vhdInfo.sizePhys 

505 self.size = vhdInfo.sizeVirt 

506 self.hidden = vhdInfo.hidden 

507 if self.hidden: 507 ↛ 508line 507 didn't jump to line 508, because the condition on line 507 was never true

508 self.managed = False 

509 self.parent = vhdInfo.parentUuid 

510 if self.parent: 510 ↛ 511line 510 didn't jump to line 511, because the condition on line 510 was never true

511 self.sm_config_override = {'vhd-parent': self.parent} 

512 else: 

513 self.sm_config_override = {'vhd-parent': None} 

514 return 

515 

516 try: 

517 # Change to the SR directory in case parent 

518 # locator field path has changed 

519 os.chdir(self.sr.path) 

520 except Exception as chdir_exception: 

521 util.SMlog("Unable to change to SR directory, SR unavailable, %s" % 

522 str(chdir_exception)) 

523 raise xs_errors.XenError('SRUnavailable', opterr=str(chdir_exception)) 

524 

525 if util.ioretry( 525 ↛ exitline 525 didn't return from function 'load', because the condition on line 525 was never false

526 lambda: util.pathexists(self.path), 

527 errlist=[errno.EIO, errno.ENOENT]): 

528 try: 

529 st = util.ioretry(lambda: os.stat(self.path), 

530 errlist=[errno.EIO, errno.ENOENT]) 

531 self.utilisation = int(st.st_size) 

532 except util.CommandException as inst: 

533 if inst.code == errno.EIO: 

534 raise xs_errors.XenError('VDILoad', \ 

535 opterr='Failed load VDI information %s' % self.path) 

536 else: 

537 util.SMlog("Stat failed for %s, %s" % ( 

538 self.path, str(inst))) 

539 raise xs_errors.XenError('VDIType', \ 

540 opterr='Invalid VDI type %s' % self.vdi_type) 

541 

542 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 542 ↛ 543line 542 didn't jump to line 543, because the condition on line 542 was never true

543 self.exists = True 

544 self.size = self.utilisation 

545 self.sm_config_override = {'type': self.PARAM_RAW} 

546 return 

547 

548 if self.vdi_type == CBTLOG_TAG: 548 ↛ 549line 548 didn't jump to line 549, because the condition on line 548 was never true

549 self.exists = True 

550 self.size = self.utilisation 

551 return 

552 

553 try: 

554 # The VDI might be activated in R/W mode so the VHD footer 

555 # won't be valid, use the back-up one instead. 

556 diskinfo = util.ioretry( 

557 lambda: self._query_info(self.path, True), 

558 errlist=[errno.EIO, errno.ENOENT]) 

559 

560 if 'parent' in diskinfo: 560 ↛ 561line 560 didn't jump to line 561, because the condition on line 560 was never true

561 self.parent = diskinfo['parent'] 

562 self.sm_config_override = {'vhd-parent': self.parent} 

563 else: 

564 self.sm_config_override = {'vhd-parent': None} 

565 self.parent = '' 

566 self.size = int(diskinfo['size']) * 1024 * 1024 

567 self.hidden = int(diskinfo['hidden']) 

568 if self.hidden: 568 ↛ 569line 568 didn't jump to line 569, because the condition on line 568 was never true

569 self.managed = False 

570 self.exists = True 

571 except util.CommandException as inst: 

572 raise xs_errors.XenError('VDILoad', \ 

573 opterr='Failed load VDI information %s' % self.path) 

574 

575 def update(self, sr_uuid, vdi_location): 

576 self.load(vdi_location) 

577 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

578 self.sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

579 self._db_update() 

580 

581 def create(self, sr_uuid, vdi_uuid, size): 

582 if util.ioretry(lambda: util.pathexists(self.path)): 582 ↛ 583line 582 didn't jump to line 583, because the condition on line 582 was never true

583 raise xs_errors.XenError('VDIExists') 

584 

585 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

586 try: 

587 size = vhdutil.validate_and_round_vhd_size(int(size)) 

588 mb = 1024 * 1024 

589 size_mb = size // mb 

590 util.ioretry(lambda: self._create(str(size_mb), self.path)) 

591 self.size = util.ioretry(lambda: self._query_v(self.path)) 

592 except util.CommandException as inst: 

593 raise xs_errors.XenError('VDICreate', 

594 opterr='error %d' % inst.code) 

595 else: 

596 f = open(self.path, 'w') 

597 f.truncate(int(size)) 

598 f.close() 

599 self.size = size 

600 

601 self.sr.added_vdi(self) 

602 

603 st = util.ioretry(lambda: os.stat(self.path)) 

604 self.utilisation = int(st.st_size) 

605 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

606 self.sm_config = {"type": self.PARAM_RAW} 

607 

608 self._db_introduce() 

609 self.sr._update(self.sr.uuid, self.size) 

610 return super(FileVDI, self).get_params() 

611 

612 def delete(self, sr_uuid, vdi_uuid, data_only=False): 

613 if not util.ioretry(lambda: util.pathexists(self.path)): 

614 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

615 

616 if self.attached: 

617 raise xs_errors.XenError('VDIInUse') 

618 

619 try: 

620 util.force_unlink(self.path) 

621 except Exception as e: 

622 raise xs_errors.XenError( 

623 'VDIDelete', 

624 opterr='Failed to unlink file during deleting VDI: %s' % str(e)) 

625 

626 self.sr.deleted_vdi(vdi_uuid) 

627 # If this is a data_destroy call, don't remove from XAPI db 

628 if not data_only: 

629 self._db_forget() 

630 self.sr._update(self.sr.uuid, -self.size) 

631 self.sr.lock.cleanupAll(vdi_uuid) 

632 self.sr._kickGC() 

633 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

634 

635 def attach(self, sr_uuid, vdi_uuid): 

636 if self.path is None: 

637 self._find_path_with_retries(vdi_uuid) 

638 if not self._checkpath(self.path): 

639 raise xs_errors.XenError('VDIUnavailable', \ 

640 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

641 try: 

642 self.attached = True 

643 

644 if not hasattr(self, 'xenstore_data'): 

645 self.xenstore_data = {} 

646 

647 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(vdi_uuid, \ 

648 scsiutil.gen_synthetic_page_data(vdi_uuid))) 

649 

650 if self.sr.handles("file"): 

651 # XXX: PR-1255: if these are constants then they should 

652 # be returned by the attach API call, not persisted in the 

653 # pool database. 

654 self.xenstore_data['storage-type'] = 'ext' 

655 return super(FileVDI, self).attach(sr_uuid, vdi_uuid) 

656 except util.CommandException as inst: 

657 raise xs_errors.XenError('VDILoad', opterr='error %d' % inst.code) 

658 

659 def detach(self, sr_uuid, vdi_uuid): 

660 self.attached = False 

661 

662 def resize(self, sr_uuid, vdi_uuid, size): 

663 if not self.exists: 

664 raise xs_errors.XenError('VDIUnavailable', \ 

665 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

666 

667 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

668 raise xs_errors.XenError('Unimplemented') 

669 

670 if self.hidden: 

671 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

672 

673 if size < self.size: 

674 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

675 '(current size: %d, new size: %d)' % (self.size, size)) 

676 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

677 

678 if size == self.size: 

679 return VDI.VDI.get_params(self) 

680 

681 # We already checked it is a VDI_TYPE_VHD 

682 size = vhdutil.validate_and_round_vhd_size(int(size)) 

683 

684 jFile = JOURNAL_FILE_PREFIX + self.uuid 

685 try: 

686 vhdutil.setSizeVirt(self.path, size, jFile) 

687 except: 

688 # Revert the operation 

689 vhdutil.revert(self.path, jFile) 

690 raise xs_errors.XenError('VDISize', opterr='resize operation failed') 

691 

692 old_size = self.size 

693 self.size = vhdutil.getSizeVirt(self.path) 

694 st = util.ioretry(lambda: os.stat(self.path)) 

695 self.utilisation = int(st.st_size) 

696 

697 self._db_update() 

698 self.sr._update(self.sr.uuid, self.size - old_size) 

699 super(FileVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

700 return VDI.VDI.get_params(self) 

701 

702 def clone(self, sr_uuid, vdi_uuid): 

703 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE) 

704 

705 def compose(self, sr_uuid, vdi1, vdi2): 

706 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

707 raise xs_errors.XenError('Unimplemented') 

708 parent_fn = vdi1 + vhdutil.FILE_EXTN[vhdutil.VDI_TYPE_VHD] 

709 parent_path = os.path.join(self.sr.path, parent_fn) 

710 assert(util.pathexists(parent_path)) 

711 vhdutil.setParent(self.path, parent_path, False) 

712 vhdutil.setHidden(parent_path) 

713 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

714 util.pread2([vhdutil.VHD_UTIL, "modify", "-p", parent_path, 

715 "-n", self.path]) 

716 # Tell tapdisk the chain has changed 

717 if not blktap2.VDI.tap_refresh(self.session, sr_uuid, vdi2): 

718 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

719 util.SMlog("VDI.compose: relinked %s->%s" % (vdi2, vdi1)) 

720 

721 def reset_leaf(self, sr_uuid, vdi_uuid): 

722 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

723 raise xs_errors.XenError('Unimplemented') 

724 

725 # safety check 

726 if not vhdutil.hasParent(self.path): 

727 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

728 "will not reset contents" % self.uuid) 

729 

730 vhdutil.killData(self.path) 

731 

732 def _do_snapshot(self, sr_uuid, vdi_uuid, snap_type, 

733 secondary=None, cbtlog=None): 

734 # If cbt enabled, save file consistency state 

735 if cbtlog is not None: 735 ↛ 736line 735 didn't jump to line 736, because the condition on line 735 was never true

736 if blktap2.VDI.tap_status(self.session, vdi_uuid): 

737 consistency_state = False 

738 else: 

739 consistency_state = True 

740 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

741 (consistency_state, vdi_uuid)) 

742 else: 

743 consistency_state = None 

744 

745 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 745 ↛ 746line 745 didn't jump to line 746, because the condition on line 745 was never true

746 raise xs_errors.XenError('Unimplemented') 

747 

748 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 748 ↛ 749line 748 didn't jump to line 749, because the condition on line 748 was never true

749 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

750 try: 

751 return self._snapshot(snap_type, cbtlog, consistency_state) 

752 finally: 

753 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

754 

755 def _rename(self, src, dst): 

756 util.SMlog("FileVDI._rename %s to %s" % (src, dst)) 

757 util.ioretry(lambda: os.rename(src, dst)) 

758 

759 def _link(self, src, dst): 

760 util.SMlog("FileVDI._link %s to %s" % (src, dst)) 

761 os.link(src, dst) 

762 

763 def _unlink(self, path): 

764 util.SMlog("FileVDI._unlink %s" % (path)) 

765 os.unlink(path) 

766 

767 def _create_new_parent(self, src, newsrc): 

768 if self.sr._check_hardlinks(): 

769 self._link(src, newsrc) 

770 else: 

771 self._rename(src, newsrc) 

772 

773 def __fist_enospace(self): 

774 raise util.CommandException(28, "vhd-util snapshot", reason="No space") 

775 

776 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): 

777 util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) 

778 

779 args = [] 

780 args.append("vdi_clone") 

781 args.append(self.sr.uuid) 

782 args.append(self.uuid) 

783 

784 dest = None 

785 dst = None 

786 if snap_type == VDI.SNAPSHOT_DOUBLE: 786 ↛ 791line 786 didn't jump to line 791, because the condition on line 786 was never false

787 dest = util.gen_uuid() 

788 dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) 

789 args.append(dest) 

790 

791 if self.hidden: 791 ↛ 792line 791 didn't jump to line 792, because the condition on line 791 was never true

792 raise xs_errors.XenError('VDIClone', opterr='hidden VDI') 

793 

794 depth = vhdutil.getDepth(self.path) 

795 if depth == -1: 795 ↛ 796line 795 didn't jump to line 796, because the condition on line 795 was never true

796 raise xs_errors.XenError('VDIUnavailable', \ 

797 opterr='failed to get VHD depth') 

798 elif depth >= vhdutil.MAX_CHAIN_SIZE: 798 ↛ 799line 798 didn't jump to line 799, because the condition on line 798 was never true

799 raise xs_errors.XenError('SnapshotChainTooLong') 

800 

801 newuuid = util.gen_uuid() 

802 src = self.path 

803 newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) 

804 newsrcname = "%s.%s" % (newuuid, self.vdi_type) 

805 

806 if not self._checkpath(src): 806 ↛ 807line 806 didn't jump to line 807, because the condition on line 806 was never true

807 raise xs_errors.XenError('VDIUnavailable', \ 

808 opterr='VDI %s unavailable %s' % (self.uuid, src)) 

809 

810 # wkcfix: multiphase 

811 util.start_log_entry(self.sr.path, self.path, args) 

812 

813 # We assume the filehandle has been released 

814 try: 

815 self._create_new_parent(src, newsrc) 

816 

817 # Create the snapshot under a temporary name, then rename 

818 # it afterwards. This avoids a small window where it exists 

819 # but is invalid. We do not need to do this for 

820 # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed 

821 # before so nobody will try to query it. 

822 tmpsrc = "%s.%s" % (src, "new") 

823 # Fault injection site to fail the snapshot with ENOSPACE 

824 util.fistpoint.activate_custom_fn( 

825 "FileSR_fail_snap1", 

826 self.__fist_enospace) 

827 util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) 

828 self._rename(tmpsrc, src) 

829 if snap_type == VDI.SNAPSHOT_DOUBLE: 829 ↛ 837line 829 didn't jump to line 837, because the condition on line 829 was never false

830 # Fault injection site to fail the snapshot with ENOSPACE 

831 util.fistpoint.activate_custom_fn( 

832 "FileSR_fail_snap2", 

833 self.__fist_enospace) 

834 util.ioretry(lambda: self._snap(dst, newsrcname)) 

835 # mark the original file (in this case, its newsrc) 

836 # as hidden so that it does not show up in subsequent scans 

837 util.ioretry(lambda: self._mark_hidden(newsrc)) 

838 

839 #Verify parent locator field of both children and delete newsrc if unused 

840 introduce_parent = True 

841 try: 

842 srcparent = util.ioretry(lambda: self._query_p_uuid(src)) 

843 dstparent = None 

844 if snap_type == VDI.SNAPSHOT_DOUBLE: 844 ↛ 846line 844 didn't jump to line 846, because the condition on line 844 was never false

845 dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) 

846 if srcparent != newuuid and \ 846 ↛ 850line 846 didn't jump to line 850, because the condition on line 846 was never true

847 (snap_type == VDI.SNAPSHOT_SINGLE or \ 

848 snap_type == VDI.SNAPSHOT_INTERNAL or \ 

849 dstparent != newuuid): 

850 util.ioretry(lambda: self._unlink(newsrc)) 

851 introduce_parent = False 

852 except: 

853 pass 

854 

855 # Introduce the new VDI records 

856 leaf_vdi = None 

857 if snap_type == VDI.SNAPSHOT_DOUBLE: 857 ↛ 876line 857 didn't jump to line 876, because the condition on line 857 was never false

858 leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI 

859 leaf_vdi.read_only = False 

860 leaf_vdi.location = dest 

861 leaf_vdi.size = self.size 

862 leaf_vdi.utilisation = self.utilisation 

863 leaf_vdi.sm_config = {} 

864 leaf_vdi.sm_config['vhd-parent'] = dstparent 

865 # If the parent is encrypted set the key_hash 

866 # for the new snapshot disk 

867 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

868 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

869 if "key_hash" in sm_config: 869 ↛ 870line 869 didn't jump to line 870, because the condition on line 869 was never true

870 leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] 

871 # If we have CBT enabled on the VDI, 

872 # set CBT status for the new snapshot disk 

873 if cbtlog: 873 ↛ 874line 873 didn't jump to line 874, because the condition on line 873 was never true

874 leaf_vdi.cbt_enabled = True 

875 

876 base_vdi = None 

877 if introduce_parent: 877 ↛ 889line 877 didn't jump to line 889, because the condition on line 877 was never false

878 base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent 

879 base_vdi.label = "base copy" 

880 base_vdi.read_only = True 

881 base_vdi.location = newuuid 

882 base_vdi.size = self.size 

883 base_vdi.utilisation = self.utilisation 

884 base_vdi.sm_config = {} 

885 grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) 

886 if grandparent.find("no parent") == -1: 886 ↛ 889line 886 didn't jump to line 889, because the condition on line 886 was never false

887 base_vdi.sm_config['vhd-parent'] = grandparent 

888 

889 try: 

890 if snap_type == VDI.SNAPSHOT_DOUBLE: 890 ↛ 895line 890 didn't jump to line 895, because the condition on line 890 was never false

891 leaf_vdi_ref = leaf_vdi._db_introduce() 

892 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ 

893 (leaf_vdi_ref, dest)) 

894 

895 if introduce_parent: 895 ↛ 899line 895 didn't jump to line 899, because the condition on line 895 was never false

896 base_vdi_ref = base_vdi._db_introduce() 

897 self.session.xenapi.VDI.set_managed(base_vdi_ref, False) 

898 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) 

899 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

900 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

901 sm_config['vhd-parent'] = srcparent 

902 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

903 except Exception as e: 

904 util.SMlog("vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) 

905 # Note it's too late to actually clean stuff up here: the base disk has 

906 # been marked as deleted already. 

907 util.end_log_entry(self.sr.path, self.path, ["error"]) 

908 raise 

909 except util.CommandException as inst: 

910 # XXX: it might be too late if the base disk has been marked as deleted! 

911 self._clonecleanup(src, dst, newsrc) 

912 util.end_log_entry(self.sr.path, self.path, ["error"]) 

913 raise xs_errors.XenError('VDIClone', 

914 opterr='VDI clone failed error %d' % inst.code) 

915 

916 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

917 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog: 917 ↛ 918line 917 didn't jump to line 918, because the condition on line 917 was never true

918 try: 

919 self._cbt_snapshot(dest, cbt_consistency) 

920 except: 

921 # CBT operation failed. 

922 util.end_log_entry(self.sr.path, self.path, ["error"]) 

923 raise 

924 

925 util.end_log_entry(self.sr.path, self.path, ["done"]) 

926 if snap_type != VDI.SNAPSHOT_INTERNAL: 926 ↛ 929line 926 didn't jump to line 929, because the condition on line 926 was never false

927 self.sr._update(self.sr.uuid, self.size) 

928 # Return info on the new user-visible leaf VDI 

929 ret_vdi = leaf_vdi 

930 if not ret_vdi: 930 ↛ 931line 930 didn't jump to line 931, because the condition on line 930 was never true

931 ret_vdi = base_vdi 

932 if not ret_vdi: 932 ↛ 933line 932 didn't jump to line 933, because the condition on line 932 was never true

933 ret_vdi = self 

934 return ret_vdi.get_params() 

935 

936 def get_params(self): 

937 if not self._checkpath(self.path): 

938 raise xs_errors.XenError('VDIUnavailable', \ 

939 opterr='VDI %s unavailable %s' % (self.uuid, self.path)) 

940 return super(FileVDI, self).get_params() 

941 

942 def _snap(self, child, parent): 

943 cmd = [SR.TAPDISK_UTIL, "snapshot", vhdutil.VDI_TYPE_VHD, child, parent] 

944 text = util.pread(cmd) 

945 

946 def _clonecleanup(self, src, dst, newsrc): 

947 try: 

948 if dst: 948 ↛ 952line 948 didn't jump to line 952, because the condition on line 948 was never false

949 util.ioretry(lambda: self._unlink(dst)) 

950 except util.CommandException as inst: 

951 pass 

952 try: 

953 if util.ioretry(lambda: util.pathexists(newsrc)): 953 ↛ exitline 953 didn't return from function '_clonecleanup', because the condition on line 953 was never false

954 stats = os.stat(newsrc) 

955 # Check if we have more than one link to newsrc 

956 if (stats.st_nlink > 1): 

957 util.ioretry(lambda: self._unlink(newsrc)) 

958 elif not self._is_hidden(newsrc): 958 ↛ exitline 958 didn't return from function '_clonecleanup', because the condition on line 958 was never false

959 self._rename(newsrc, src) 

960 except util.CommandException as inst: 

961 pass 

962 

963 def _checkpath(self, path): 

964 try: 

965 if not util.ioretry(lambda: util.pathexists(path)): 965 ↛ 966line 965 didn't jump to line 966, because the condition on line 965 was never true

966 return False 

967 return True 

968 except util.CommandException as inst: 

969 raise xs_errors.XenError('EIO', \ 

970 opterr='IO error checking path %s' % path) 

971 

972 def _query_v(self, path): 

973 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-v", path] 

974 return int(util.pread(cmd)) * 1024 * 1024 

975 

976 def _query_p_uuid(self, path): 

977 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-p", path] 

978 parent = util.pread(cmd) 

979 parent = parent[:-1] 

980 ls = parent.split('/') 

981 return ls[len(ls) - 1].replace(vhdutil.FILE_EXTN_VHD, '') 

982 

983 def _query_info(self, path, use_bkp_footer=False): 

984 diskinfo = {} 

985 qopts = '-vpf' 

986 if use_bkp_footer: 986 ↛ 988line 986 didn't jump to line 988, because the condition on line 986 was never false

987 qopts += 'b' 

988 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, qopts, path] 

989 txt = util.pread(cmd).split('\n') 

990 diskinfo['size'] = txt[0] 

991 lst = [txt[1].split('/')[-1].replace(vhdutil.FILE_EXTN_VHD, "")] 

992 for val in filter(util.exactmatch_uuid, lst): 992 ↛ 993line 992 didn't jump to line 993, because the loop on line 992 never started

993 diskinfo['parent'] = val 

994 diskinfo['hidden'] = txt[2].split()[1] 

995 return diskinfo 

996 

997 def _create(self, size, path): 

998 cmd = [SR.TAPDISK_UTIL, "create", vhdutil.VDI_TYPE_VHD, size, path] 

999 text = util.pread(cmd) 

1000 if self.key_hash: 1000 ↛ 1001line 1000 didn't jump to line 1001, because the condition on line 1000 was never true

1001 vhdutil.setKey(path, self.key_hash) 

1002 

1003 def _mark_hidden(self, path): 

1004 vhdutil.setHidden(path, True) 

1005 self.hidden = 1 

1006 

1007 def _is_hidden(self, path): 

1008 return vhdutil.getHidden(path) == 1 

1009 

1010 def extractUuid(path): 

1011 fileName = os.path.basename(path) 

1012 uuid = fileName.replace(vhdutil.FILE_EXTN_VHD, "") 

1013 return uuid 

1014 extractUuid = staticmethod(extractUuid) 

1015 

1016 def generate_config(self, sr_uuid, vdi_uuid): 

1017 """ 

1018 Generate the XML config required to attach and activate 

1019 a VDI for use when XAPI is not running. Attach and 

1020 activation is handled by vdi_attach_from_config below. 

1021 """ 

1022 util.SMlog("FileVDI.generate_config") 

1023 if not util.pathexists(self.path): 1023 ↛ 1024line 1023 didn't jump to line 1024, because the condition on line 1023 was never true

1024 raise xs_errors.XenError('VDIUnavailable') 

1025 resp = {} 

1026 resp['device_config'] = self.sr.dconf 

1027 resp['sr_uuid'] = sr_uuid 

1028 resp['vdi_uuid'] = vdi_uuid 

1029 resp['command'] = 'vdi_attach_from_config' 

1030 # Return the 'config' encoded within a normal XMLRPC response so that 

1031 # we can use the regular response/error parsing code. 

1032 config = xmlrpc.client.dumps(tuple([resp]), "vdi_attach_from_config") 

1033 return xmlrpc.client.dumps((config, ), "", True) 

1034 

1035 def attach_from_config(self, sr_uuid, vdi_uuid): 

1036 """ 

1037 Attach and activate a VDI using config generated by 

1038 vdi_generate_config above. This is used for cases such as 

1039 the HA state-file and the redo-log. 

1040 """ 

1041 util.SMlog("FileVDI.attach_from_config") 

1042 try: 

1043 if not util.pathexists(self.sr.path): 

1044 self.sr.attach(sr_uuid) 

1045 except: 

1046 util.logException("FileVDI.attach_from_config") 

1047 raise xs_errors.XenError( 

1048 'SRUnavailable', 

1049 opterr='Unable to attach from config' 

1050 ) 

1051 

1052 def _create_cbt_log(self): 

1053 # Create CBT log file 

1054 # Name: <vdi_uuid>.cbtlog 

1055 #Handle if file already exists 

1056 log_path = self._get_cbt_logpath(self.uuid) 

1057 open_file = open(log_path, "w+") 

1058 open_file.close() 

1059 return super(FileVDI, self)._create_cbt_log() 

1060 

1061 def _delete_cbt_log(self): 

1062 logPath = self._get_cbt_logpath(self.uuid) 

1063 try: 

1064 os.remove(logPath) 

1065 except OSError as e: 

1066 if e.errno != errno.ENOENT: 

1067 raise 

1068 

1069 def _cbt_log_exists(self, logpath): 

1070 return util.pathexists(logpath) 

1071 

1072 

1073class SharedFileSR(FileSR): 

1074 """ 

1075 FileSR subclass for SRs that use shared network storage 

1076 """ 

1077 

1078 def _check_writable(self): 

1079 """ 

1080 Checks that the filesystem being used by the SR can be written to, 

1081 raising an exception if it can't. 

1082 """ 

1083 test_name = os.path.join(self.path, str(uuid4())) 

1084 try: 

1085 open(test_name, 'ab').close() 

1086 except OSError as e: 

1087 util.SMlog("Cannot write to SR file system: %s" % e) 

1088 raise xs_errors.XenError('SharedFileSystemNoWrite') 

1089 finally: 

1090 util.force_unlink(test_name) 

1091 

1092 def _raise_hardlink_error(self): 

1093 raise OSError(524, "Unknown error 524") 

1094 

1095 def _check_hardlinks(self): 

1096 hardlink_conf = self._read_hardlink_conf() 

1097 if hardlink_conf is not None: 1097 ↛ 1098line 1097 didn't jump to line 1098, because the condition on line 1097 was never true

1098 return hardlink_conf 

1099 

1100 test_name = os.path.join(self.path, str(uuid4())) 

1101 open(test_name, 'ab').close() 

1102 

1103 link_name = '%s.new' % test_name 

1104 try: 

1105 # XSI-1100: Let tests simulate failure of the link operation 

1106 util.fistpoint.activate_custom_fn( 

1107 "FileSR_fail_hardlink", 

1108 self._raise_hardlink_error) 

1109 

1110 os.link(test_name, link_name) 

1111 self._write_hardlink_conf(supported=True) 

1112 return True 

1113 except OSError: 

1114 self._write_hardlink_conf(supported=False) 

1115 

1116 msg = "File system for SR %s does not support hardlinks, crash " \ 

1117 "consistency of snapshots cannot be assured" % self.uuid 

1118 util.SMlog(msg, priority=util.LOG_WARNING) 

1119 # Note: session can be not set during attach/detach_from_config calls. 

1120 if self.session: 1120 ↛ 1129line 1120 didn't jump to line 1129, because the condition on line 1120 was never false

1121 try: 

1122 self.session.xenapi.message.create( 

1123 "sr_does_not_support_hardlinks", 2, "SR", self.uuid, 

1124 msg) 

1125 except XenAPI.Failure: 

1126 # Might already be set and checking has TOCTOU issues 

1127 pass 

1128 finally: 

1129 util.force_unlink(link_name) 

1130 util.force_unlink(test_name) 

1131 

1132 return False 

1133 

1134 def _get_hardlink_conf_path(self): 

1135 return os.path.join(self.path, 'sm-hardlink.conf') 

1136 

1137 def _read_hardlink_conf(self): 

1138 try: 

1139 with open(self._get_hardlink_conf_path(), 'r') as f: 

1140 try: 

1141 return bool(int(f.read())) 

1142 except Exception as e: 

1143 # If we can't read, assume the file is empty and test for hardlink support. 

1144 return None 

1145 except IOError as e: 

1146 if e.errno == errno.ENOENT: 

1147 # If the config file doesn't exist, assume we want to support hardlinks. 

1148 return None 

1149 util.SMlog('Failed to read hardlink conf: {}'.format(e)) 

1150 # Can be caused by a concurrent access, not a major issue. 

1151 return None 

1152 

1153 def _write_hardlink_conf(self, supported): 

1154 try: 

1155 with open(self._get_hardlink_conf_path(), 'w') as f: 

1156 f.write('1' if supported else '0') 

1157 except Exception as e: 

1158 # Can be caused by a concurrent access, not a major issue. 

1159 util.SMlog('Failed to write hardlink conf: {}'.format(e)) 

1160 

1161if __name__ == '__main__': 1161 ↛ 1162line 1161 didn't jump to line 1162, because the condition on line 1161 was never true

1162 SRCommand.run(FileSR, DRIVER_INFO) 

1163else: 

1164 SR.registerSR(FileSR)