Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# FileSR: local-file storage repository 

19 

20import SR 

21import VDI 

22import SRCommand 

23import util 

24import scsiutil 

25import vhdutil 

26import os 

27import errno 

28import xs_errors 

29import cleanup 

30import blktap2 

31import time 

32import glob 

33from uuid import uuid4 

34from lock import Lock 

35import xmlrpc.client 

36import XenAPI # pylint: disable=import-error 

37from constants import CBTLOG_TAG 

38 

39geneology = {} 

40CAPABILITIES = ["SR_PROBE", "SR_UPDATE", \ 

41 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", \ 

42 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR", 

43 "VDI_GENERATE_CONFIG", "ATOMIC_PAUSE", "VDI_CONFIG_CBT", 

44 "VDI_ACTIVATE", "VDI_DEACTIVATE", "THIN_PROVISIONING"] 

45 

46CONFIGURATION = [['location', 'local directory path (required)']] 

47 

48DRIVER_INFO = { 

49 'name': 'Local Path VHD', 

50 'description': 'SR plugin which represents disks as VHD files stored on a local path', 

51 'vendor': 'Citrix Systems Inc', 

52 'copyright': '(C) 2008 Citrix Systems Inc', 

53 'driver_version': '1.0', 

54 'required_api_version': '1.0', 

55 'capabilities': CAPABILITIES, 

56 'configuration': CONFIGURATION 

57 } 

58 

59JOURNAL_FILE_PREFIX = ".journal-" 

60 

61OPS_EXCLUSIVE = [ 

62 "sr_create", "sr_delete", "sr_probe", "sr_attach", "sr_detach", 

63 "sr_scan", "vdi_init", "vdi_create", "vdi_delete", "vdi_attach", 

64 "vdi_detach", "vdi_resize_online", "vdi_snapshot", "vdi_clone"] 

65 

66DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True} 

67 

68 

69class FileSR(SR.SR): 

70 """Local file storage repository""" 

71 

72 SR_TYPE = "file" 

73 

74 def handles(srtype): 

75 return srtype == 'file' 

76 handles = staticmethod(handles) 

77 

78 def _check_o_direct(self): 

79 if self.sr_ref and self.session is not None: 

80 other_config = self.session.xenapi.SR.get_other_config(self.sr_ref) 

81 o_direct = other_config.get("o_direct") 

82 self.o_direct = o_direct is not None and o_direct == "true" 

83 else: 

84 self.o_direct = True 

85 

86 def __init__(self, srcmd, sr_uuid): 

87 # We call SR.SR.__init__ explicitly because 

88 # "super" sometimes failed due to circular imports 

89 SR.SR.__init__(self, srcmd, sr_uuid) 

90 self._check_o_direct() 

91 

92 def load(self, sr_uuid): 

93 self.ops_exclusive = OPS_EXCLUSIVE 

94 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

95 self.sr_vditype = vhdutil.VDI_TYPE_VHD 

96 if 'location' not in self.dconf or not self.dconf['location']: 96 ↛ 97line 96 didn't jump to line 97, because the condition on line 96 was never true

97 raise xs_errors.XenError('ConfigLocationMissing') 

98 self.remotepath = self.dconf['location'] 

99 self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) 

100 self.linkpath = self.path 

101 self.mountpoint = self.path 

102 self.attached = False 

103 self.driver_config = DRIVER_CONFIG 

104 

105 def create(self, sr_uuid, size): 

106 """ Create the SR. The path must not already exist, or if it does,  

107 it must be empty. (This accounts for the case where the user has 

108 mounted a device onto a directory manually and want to use this as the 

109 root of a file-based SR.) """ 

110 try: 

111 if util.ioretry(lambda: util.pathexists(self.remotepath)): 111 ↛ 112line 111 didn't jump to line 112, because the condition on line 111 was never true

112 if len(util.ioretry(lambda: util.listdir(self.remotepath))) != 0: 

113 raise xs_errors.XenError('SRExists') 

114 else: 

115 try: 

116 util.ioretry(lambda: os.mkdir(self.remotepath)) 

117 except util.CommandException as inst: 

118 if inst.code == errno.EEXIST: 

119 raise xs_errors.XenError('SRExists') 

120 else: 

121 raise xs_errors.XenError('FileSRCreate', \ 

122 opterr='directory creation failure %d' \ 

123 % inst.code) 

124 except: 

125 raise xs_errors.XenError('FileSRCreate') 

126 

127 def delete(self, sr_uuid): 

128 self.attach(sr_uuid) 

129 cleanup.gc_force(self.session, self.uuid) 

130 

131 # check to make sure no VDIs are present; then remove old 

132 # files that are non VDI's 

133 try: 

134 if util.ioretry(lambda: util.pathexists(self.path)): 

135 #Load the VDI list 

136 self._loadvdis() 

137 for uuid in self.vdis: 

138 if not self.vdis[uuid].deleted: 

139 raise xs_errors.XenError('SRNotEmpty', \ 

140 opterr='VDIs still exist in SR') 

141 

142 # remove everything else, there are no vdi's 

143 for name in util.ioretry(lambda: util.listdir(self.path)): 

144 fullpath = os.path.join(self.path, name) 

145 try: 

146 util.ioretry(lambda: os.unlink(fullpath)) 

147 except util.CommandException as inst: 

148 if inst.code != errno.ENOENT and \ 

149 inst.code != errno.EISDIR: 

150 raise xs_errors.XenError('FileSRDelete', \ 

151 opterr='failed to remove %s error %d' \ 

152 % (fullpath, inst.code)) 

153 self.detach(sr_uuid) 

154 except util.CommandException as inst: 

155 self.detach(sr_uuid) 

156 raise xs_errors.XenError('FileSRDelete', \ 

157 opterr='error %d' % inst.code) 

158 

159 def attach(self, sr_uuid, bind=True): 

160 if not self._checkmount(): 

161 try: 

162 util.ioretry(lambda: util.makedirs(self.path, mode=0o700)) 

163 except util.CommandException as inst: 

164 if inst.code != errno.EEXIST: 

165 raise xs_errors.XenError("FileSRCreate", \ 

166 opterr='fail to create mount point. Errno is %s' % inst.code) 

167 try: 

168 cmd = ["mount", self.remotepath, self.path] 

169 if bind: 

170 cmd.append("--bind") 

171 util.pread(cmd) 

172 os.chmod(self.path, mode=0o0700) 

173 except util.CommandException as inst: 

174 raise xs_errors.XenError('FileSRCreate', \ 

175 opterr='fail to mount FileSR. Errno is %s' % inst.code) 

176 self.attached = True 

177 

178 def detach(self, sr_uuid): 

179 if self._checkmount(): 

180 try: 

181 util.SMlog("Aborting GC/coalesce") 

182 cleanup.abort(self.uuid) 

183 os.chdir(SR.MOUNT_BASE) 

184 util.pread(["umount", self.path]) 

185 os.rmdir(self.path) 

186 except Exception as e: 

187 raise xs_errors.XenError('SRInUse', opterr=str(e)) 

188 self.attached = False 

189 

190 def scan(self, sr_uuid): 

191 if not self._checkmount(): 

192 raise xs_errors.XenError('SRUnavailable', \ 

193 opterr='no such directory %s' % self.path) 

194 

195 if not self.vdis: 195 ↛ 198line 195 didn't jump to line 198, because the condition on line 195 was never false

196 self._loadvdis() 

197 

198 if not self.passthrough: 

199 self.physical_size = self._getsize() 

200 self.physical_utilisation = self._getutilisation() 

201 

202 for uuid in list(self.vdis.keys()): 

203 if self.vdis[uuid].deleted: 203 ↛ 204line 203 didn't jump to line 204, because the condition on line 203 was never true

204 del self.vdis[uuid] 

205 

206 # CA-15607: make sure we are robust to the directory being unmounted beneath 

207 # us (eg by a confused user). Without this we might forget all our VDI references 

208 # which would be a shame. 

209 # For SMB SRs, this path is mountpoint 

210 mount_path = self.path 

211 if self.handles("smb"): 211 ↛ 212line 211 didn't jump to line 212, because the condition on line 211 was never true

212 mount_path = self.mountpoint 

213 

214 if not self.handles("file") and not os.path.ismount(mount_path): 214 ↛ 215line 214 didn't jump to line 215, because the condition on line 214 was never true

215 util.SMlog("Error: FileSR.scan called but directory %s isn't a mountpoint" % mount_path) 

216 raise xs_errors.XenError('SRUnavailable', \ 

217 opterr='not mounted %s' % mount_path) 

218 

219 self._kickGC() 

220 

221 # default behaviour from here on 

222 super(FileSR, self).scan(sr_uuid) 

223 

224 def update(self, sr_uuid): 

225 if not self._checkmount(): 

226 raise xs_errors.XenError('SRUnavailable', \ 

227 opterr='no such directory %s' % self.path) 

228 self._update(sr_uuid, 0) 

229 

230 def _update(self, sr_uuid, virt_alloc_delta): 

231 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

232 self.virtual_allocation = valloc + virt_alloc_delta 

233 self.physical_size = self._getsize() 

234 self.physical_utilisation = self._getutilisation() 

235 self._db_update() 

236 

237 def content_type(self, sr_uuid): 

238 return super(FileSR, self).content_type(sr_uuid) 

239 

240 def vdi(self, uuid): 

241 return FileVDI(self, uuid) 

242 

243 def added_vdi(self, vdi): 

244 self.vdis[vdi.uuid] = vdi 

245 

246 def deleted_vdi(self, uuid): 

247 if uuid in self.vdis: 

248 del self.vdis[uuid] 

249 

250 def replay(self, uuid): 

251 try: 

252 file = open(self.path + "/filelog.txt", "r") 

253 data = file.readlines() 

254 file.close() 

255 self._process_replay(data) 

256 except: 

257 raise xs_errors.XenError('SRLog') 

258 

259 def _loadvdis(self): 

260 if self.vdis: 260 ↛ 261line 260 didn't jump to line 261, because the condition on line 260 was never true

261 return 

262 

263 pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) 

264 try: 

265 self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) 

266 except util.CommandException as inst: 

267 raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ 

268 "path %s (%s)" % (self.path, inst)) 

269 try: 

270 list_vhds = [FileVDI.extractUuid(v) for v in util.ioretry(lambda: glob.glob(pattern))] 

271 if len(self.vhds) != len(list_vhds): 271 ↛ 276line 271 didn't jump to line 276, because the condition on line 271 was never false

272 util.SMlog("VHD scan returns %d VHDs: %s" % (len(self.vhds), sorted(self.vhds))) 

273 util.SMlog("VHD list returns %d VHDs: %s" % (len(list_vhds), sorted(list_vhds))) 

274 except: 

275 pass 

276 for uuid in self.vhds.keys(): 

277 if self.vhds[uuid].error: 277 ↛ 278line 277 didn't jump to line 278, because the condition on line 277 was never true

278 raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) 

279 self.vdis[uuid] = self.vdi(uuid) 

280 # Get the key hash of any encrypted VDIs: 

281 vhd_path = os.path.join(self.path, self.vhds[uuid].path) 

282 key_hash = vhdutil.getKeyHash(vhd_path) 

283 self.vdis[uuid].sm_config_override['key_hash'] = key_hash 

284 

285 # raw VDIs and CBT log files 

286 files = util.ioretry(lambda: util.listdir(self.path)) 286 ↛ exitline 286 didn't run the lambda on line 286

287 for fn in files: 287 ↛ 288line 287 didn't jump to line 288, because the loop on line 287 never started

288 if fn.endswith(vhdutil.FILE_EXTN_RAW): 

289 uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] 

290 self.vdis[uuid] = self.vdi(uuid) 

291 elif fn.endswith(CBTLOG_TAG): 

292 cbt_uuid = fn.split(".")[0] 

293 # If an associated disk exists, update CBT status 

294 # else create new VDI of type cbt_metadata 

295 if cbt_uuid in self.vdis: 

296 self.vdis[cbt_uuid].cbt_enabled = True 

297 else: 

298 new_vdi = self.vdi(cbt_uuid) 

299 new_vdi.ty = "cbt_metadata" 

300 new_vdi.cbt_enabled = True 

301 self.vdis[cbt_uuid] = new_vdi 

302 

303 # Mark parent VDIs as Read-only and generate virtual allocation 

304 self.virtual_allocation = 0 

305 for uuid, vdi in self.vdis.items(): 

306 if vdi.parent: 306 ↛ 307line 306 didn't jump to line 307, because the condition on line 306 was never true

307 if vdi.parent in self.vdis: 

308 self.vdis[vdi.parent].read_only = True 

309 if vdi.parent in geneology: 

310 geneology[vdi.parent].append(uuid) 

311 else: 

312 geneology[vdi.parent] = [uuid] 

313 if not vdi.hidden: 313 ↛ 305line 313 didn't jump to line 305, because the condition on line 313 was never false

314 self.virtual_allocation += (vdi.size) 

315 

316 # now remove all hidden leaf nodes from self.vdis so that they are not 

317 # introduced into the Agent DB when SR is synchronized. With the 

318 # asynchronous GC, a deleted VDI might stay around until the next 

319 # SR.scan, so if we don't ignore hidden leaves we would pick up 

320 # freshly-deleted VDIs as newly-added VDIs 

321 for uuid in list(self.vdis.keys()): 

322 if uuid not in geneology and self.vdis[uuid].hidden: 322 ↛ 323line 322 didn't jump to line 323, because the condition on line 322 was never true

323 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

324 del self.vdis[uuid] 

325 

326 def _getsize(self): 

327 path = self.path 

328 if self.handles("smb"): 328 ↛ 329line 328 didn't jump to line 329, because the condition on line 328 was never true

329 path = self.linkpath 

330 return util.get_fs_size(path) 

331 

332 def _getutilisation(self): 

333 return util.get_fs_utilisation(self.path) 

334 

335 def _replay(self, logentry): 

336 # all replay commands have the same 5,6,7th arguments 

337 # vdi_command, sr-uuid, vdi-uuid 

338 back_cmd = logentry[5].replace("vdi_", "") 

339 target = self.vdi(logentry[7]) 

340 cmd = getattr(target, back_cmd) 

341 args = [] 

342 for item in logentry[6:]: 

343 item = item.replace("\n", "") 

344 args.append(item) 

345 ret = cmd( * args) 

346 if ret: 

347 print(ret) 

348 

349 def _compare_args(self, a, b): 

350 try: 

351 if a[2] != "log:": 

352 return 1 

353 if b[2] != "end:" and b[2] != "error:": 

354 return 1 

355 if a[3] != b[3]: 

356 return 1 

357 if a[4] != b[4]: 

358 return 1 

359 return 0 

360 except: 

361 return 1 

362 

363 def _process_replay(self, data): 

364 logentries = [] 

365 for logentry in data: 

366 logentry = logentry.split(" ") 

367 logentries.append(logentry) 

368 # we are looking for a log entry that has a log but no end or error 

369 # wkcfix -- recreate (adjusted) logfile 

370 index = 0 

371 while index < len(logentries) - 1: 

372 if self._compare_args(logentries[index], logentries[index + 1]): 

373 self._replay(logentries[index]) 

374 else: 

375 # skip the paired one 

376 index += 1 

377 # next 

378 index += 1 

379 

380 def _kickGC(self): 

381 util.SMlog("Kicking GC") 

382 cleanup.start_gc_service(self.uuid) 

383 

384 def _isbind(self): 

385 # os.path.ismount can't deal with bind mount 

386 st1 = os.stat(self.path) 

387 st2 = os.stat(self.remotepath) 

388 return st1.st_dev == st2.st_dev and st1.st_ino == st2.st_ino 

389 

390 def _checkmount(self): 

391 mount_path = self.path 

392 if self.handles("smb"): 392 ↛ 393line 392 didn't jump to line 393, because the condition on line 392 was never true

393 mount_path = self.mountpoint 

394 

395 return util.ioretry(lambda: util.pathexists(mount_path) and \ 

396 (util.ismount(mount_path) or \ 

397 util.pathexists(self.remotepath) and self._isbind())) 

398 

399 # Override in SharedFileSR. 

400 def _check_hardlinks(self): 

401 return True 

402 

403class FileVDI(VDI.VDI): 

404 PARAM_VHD = "vhd" 

405 PARAM_RAW = "raw" 

406 VDI_TYPE = { 

407 PARAM_VHD: vhdutil.VDI_TYPE_VHD, 

408 PARAM_RAW: vhdutil.VDI_TYPE_RAW 

409 } 

410 

411 def _find_path_with_retries(self, vdi_uuid, maxretry=5, period=2.0): 

412 vhd_path = os.path.join(self.sr.path, "%s.%s" % \ 

413 (vdi_uuid, self.PARAM_VHD)) 

414 raw_path = os.path.join(self.sr.path, "%s.%s" % \ 

415 (vdi_uuid, self.PARAM_RAW)) 

416 cbt_path = os.path.join(self.sr.path, "%s.%s" % 

417 (vdi_uuid, CBTLOG_TAG)) 

418 found = False 

419 tries = 0 

420 while tries < maxretry and not found: 

421 tries += 1 

422 if util.ioretry(lambda: util.pathexists(vhd_path)): 

423 self.vdi_type = vhdutil.VDI_TYPE_VHD 

424 self.path = vhd_path 

425 found = True 

426 elif util.ioretry(lambda: util.pathexists(raw_path)): 

427 self.vdi_type = vhdutil.VDI_TYPE_RAW 

428 self.path = raw_path 

429 self.hidden = False 

430 found = True 

431 elif util.ioretry(lambda: util.pathexists(cbt_path)): 431 ↛ 432line 431 didn't jump to line 432, because the condition on line 431 was never true

432 self.vdi_type = CBTLOG_TAG 

433 self.path = cbt_path 

434 self.hidden = False 

435 found = True 

436 

437 if not found: 

438 util.SMlog("VHD %s not found, retry %s of %s" % (vhd_path, tries, maxretry)) 

439 time.sleep(period) 

440 

441 return found 

442 

443 def load(self, vdi_uuid): 

444 self.lock = self.sr.lock 

445 

446 self.sr.srcmd.params['o_direct'] = self.sr.o_direct 

447 

448 if self.sr.srcmd.cmd == "vdi_create": 

449 self.vdi_type = vhdutil.VDI_TYPE_VHD 

450 self.key_hash = None 

451 if "vdi_sm_config" in self.sr.srcmd.params: 451 ↛ 452line 451 didn't jump to line 452, because the condition on line 451 was never true

452 if "key_hash" in self.sr.srcmd.params["vdi_sm_config"]: 

453 self.key_hash = self.sr.srcmd.params["vdi_sm_config"]["key_hash"] 

454 

455 if "type" in self.sr.srcmd.params["vdi_sm_config"]: 

456 vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

457 if not self.VDI_TYPE.get(vdi_type): 

458 raise xs_errors.XenError('VDIType', 

459 opterr='Invalid VDI type %s' % vdi_type) 

460 self.vdi_type = self.VDI_TYPE[vdi_type] 

461 self.path = os.path.join(self.sr.path, "%s%s" % 

462 (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) 

463 else: 

464 found = self._find_path_with_retries(vdi_uuid) 

465 if not found: 465 ↛ 466line 465 didn't jump to line 466, because the condition on line 465 was never true

466 if self.sr.srcmd.cmd == "vdi_delete": 

467 # Could be delete for CBT log file 

468 self.path = os.path.join(self.sr.path, "%s.%s" % 

469 (vdi_uuid, self.PARAM_VHD)) 

470 return 

471 if self.sr.srcmd.cmd == "vdi_attach_from_config": 

472 return 

473 raise xs_errors.XenError('VDIUnavailable', 

474 opterr="VDI %s not found" % vdi_uuid) 

475 

476 

477 if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ 

478 self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): 

479 # VHD info already preloaded: use it instead of querying directly 

480 vhdInfo = self.sr.vhds[vdi_uuid] 

481 self.utilisation = vhdInfo.sizePhys 

482 self.size = vhdInfo.sizeVirt 

483 self.hidden = vhdInfo.hidden 

484 if self.hidden: 484 ↛ 485line 484 didn't jump to line 485, because the condition on line 484 was never true

485 self.managed = False 

486 self.parent = vhdInfo.parentUuid 

487 if self.parent: 487 ↛ 488line 487 didn't jump to line 488, because the condition on line 487 was never true

488 self.sm_config_override = {'vhd-parent': self.parent} 

489 else: 

490 self.sm_config_override = {'vhd-parent': None} 

491 return 

492 

493 try: 

494 # Change to the SR directory in case parent 

495 # locator field path has changed 

496 os.chdir(self.sr.path) 

497 except Exception as chdir_exception: 

498 util.SMlog("Unable to change to SR directory, SR unavailable, %s" % 

499 str(chdir_exception)) 

500 raise xs_errors.XenError('SRUnavailable', opterr=str(chdir_exception)) 

501 

502 if util.ioretry( 502 ↛ exitline 502 didn't return from function 'load', because the condition on line 502 was never false

503 lambda: util.pathexists(self.path), 

504 errlist=[errno.EIO, errno.ENOENT]): 

505 try: 

506 st = util.ioretry(lambda: os.stat(self.path), 

507 errlist=[errno.EIO, errno.ENOENT]) 

508 self.utilisation = int(st.st_size) 

509 except util.CommandException as inst: 

510 if inst.code == errno.EIO: 

511 raise xs_errors.XenError('VDILoad', \ 

512 opterr='Failed load VDI information %s' % self.path) 

513 else: 

514 util.SMlog("Stat failed for %s, %s" % ( 

515 self.path, str(inst))) 

516 raise xs_errors.XenError('VDIType', \ 

517 opterr='Invalid VDI type %s' % self.vdi_type) 

518 

519 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 519 ↛ 520line 519 didn't jump to line 520, because the condition on line 519 was never true

520 self.exists = True 

521 self.size = self.utilisation 

522 self.sm_config_override = {'type': self.PARAM_RAW} 

523 return 

524 

525 if self.vdi_type == CBTLOG_TAG: 525 ↛ 526line 525 didn't jump to line 526, because the condition on line 525 was never true

526 self.exists = True 

527 self.size = self.utilisation 

528 return 

529 

530 try: 

531 # The VDI might be activated in R/W mode so the VHD footer 

532 # won't be valid, use the back-up one instead. 

533 diskinfo = util.ioretry( 

534 lambda: self._query_info(self.path, True), 

535 errlist=[errno.EIO, errno.ENOENT]) 

536 

537 if 'parent' in diskinfo: 537 ↛ 538line 537 didn't jump to line 538, because the condition on line 537 was never true

538 self.parent = diskinfo['parent'] 

539 self.sm_config_override = {'vhd-parent': self.parent} 

540 else: 

541 self.sm_config_override = {'vhd-parent': None} 

542 self.parent = '' 

543 self.size = int(diskinfo['size']) * 1024 * 1024 

544 self.hidden = int(diskinfo['hidden']) 

545 if self.hidden: 545 ↛ 546line 545 didn't jump to line 546, because the condition on line 545 was never true

546 self.managed = False 

547 self.exists = True 

548 except util.CommandException as inst: 

549 raise xs_errors.XenError('VDILoad', \ 

550 opterr='Failed load VDI information %s' % self.path) 

551 

552 def update(self, sr_uuid, vdi_location): 

553 self.load(vdi_location) 

554 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

555 self.sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

556 self._db_update() 

557 

558 def create(self, sr_uuid, vdi_uuid, size): 

559 if util.ioretry(lambda: util.pathexists(self.path)): 559 ↛ 560line 559 didn't jump to line 560, because the condition on line 559 was never true

560 raise xs_errors.XenError('VDIExists') 

561 

562 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

563 try: 

564 size = vhdutil.validate_and_round_vhd_size(int(size)) 

565 mb = 1024 * 1024 

566 size_mb = size // mb 

567 util.ioretry(lambda: self._create(str(size_mb), self.path)) 

568 self.size = util.ioretry(lambda: self._query_v(self.path)) 

569 except util.CommandException as inst: 

570 raise xs_errors.XenError('VDICreate', 

571 opterr='error %d' % inst.code) 

572 else: 

573 f = open(self.path, 'w') 

574 f.truncate(int(size)) 

575 f.close() 

576 self.size = size 

577 

578 self.sr.added_vdi(self) 

579 

580 st = util.ioretry(lambda: os.stat(self.path)) 

581 self.utilisation = int(st.st_size) 

582 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

583 self.sm_config = {"type": self.PARAM_RAW} 

584 

585 self._db_introduce() 

586 self.sr._update(self.sr.uuid, self.size) 

587 return super(FileVDI, self).get_params() 

588 

589 def delete(self, sr_uuid, vdi_uuid, data_only=False): 

590 if not util.ioretry(lambda: util.pathexists(self.path)): 

591 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

592 

593 if self.attached: 

594 raise xs_errors.XenError('VDIInUse') 

595 

596 try: 

597 util.force_unlink(self.path) 

598 except Exception as e: 

599 raise xs_errors.XenError( 

600 'VDIDelete', 

601 opterr='Failed to unlink file during deleting VDI: %s' % str(e)) 

602 

603 self.sr.deleted_vdi(vdi_uuid) 

604 # If this is a data_destroy call, don't remove from XAPI db 

605 if not data_only: 

606 self._db_forget() 

607 self.sr._update(self.sr.uuid, -self.size) 

608 self.sr.lock.cleanupAll(vdi_uuid) 

609 self.sr._kickGC() 

610 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

611 

612 def attach(self, sr_uuid, vdi_uuid): 

613 if self.path is None: 

614 self._find_path_with_retries(vdi_uuid) 

615 if not self._checkpath(self.path): 

616 raise xs_errors.XenError('VDIUnavailable', \ 

617 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

618 try: 

619 self.attached = True 

620 

621 if not hasattr(self, 'xenstore_data'): 

622 self.xenstore_data = {} 

623 

624 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(vdi_uuid, \ 

625 scsiutil.gen_synthetic_page_data(vdi_uuid))) 

626 

627 if self.sr.handles("file"): 

628 # XXX: PR-1255: if these are constants then they should 

629 # be returned by the attach API call, not persisted in the 

630 # pool database. 

631 self.xenstore_data['storage-type'] = 'ext' 

632 return super(FileVDI, self).attach(sr_uuid, vdi_uuid) 

633 except util.CommandException as inst: 

634 raise xs_errors.XenError('VDILoad', opterr='error %d' % inst.code) 

635 

636 def detach(self, sr_uuid, vdi_uuid): 

637 self.attached = False 

638 

639 def resize(self, sr_uuid, vdi_uuid, size): 

640 if not self.exists: 

641 raise xs_errors.XenError('VDIUnavailable', \ 

642 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

643 

644 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

645 raise xs_errors.XenError('Unimplemented') 

646 

647 if self.hidden: 

648 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

649 

650 if size < self.size: 

651 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

652 '(current size: %d, new size: %d)' % (self.size, size)) 

653 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

654 

655 if size == self.size: 

656 return VDI.VDI.get_params(self) 

657 

658 # We already checked it is a VDI_TYPE_VHD 

659 size = vhdutil.validate_and_round_vhd_size(int(size)) 

660 

661 jFile = JOURNAL_FILE_PREFIX + self.uuid 

662 try: 

663 vhdutil.setSizeVirt(self.path, size, jFile) 

664 except: 

665 # Revert the operation 

666 vhdutil.revert(self.path, jFile) 

667 raise xs_errors.XenError('VDISize', opterr='resize operation failed') 

668 

669 old_size = self.size 

670 self.size = vhdutil.getSizeVirt(self.path) 

671 st = util.ioretry(lambda: os.stat(self.path)) 

672 self.utilisation = int(st.st_size) 

673 

674 self._db_update() 

675 self.sr._update(self.sr.uuid, self.size - old_size) 

676 super(FileVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

677 return VDI.VDI.get_params(self) 

678 

679 def clone(self, sr_uuid, vdi_uuid): 

680 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE) 

681 

682 def compose(self, sr_uuid, vdi1, vdi2): 

683 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

684 raise xs_errors.XenError('Unimplemented') 

685 parent_fn = vdi1 + vhdutil.FILE_EXTN[vhdutil.VDI_TYPE_VHD] 

686 parent_path = os.path.join(self.sr.path, parent_fn) 

687 assert(util.pathexists(parent_path)) 

688 vhdutil.setParent(self.path, parent_path, False) 

689 vhdutil.setHidden(parent_path) 

690 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

691 util.pread2([vhdutil.VHD_UTIL, "modify", "-p", parent_path, 

692 "-n", self.path]) 

693 # Tell tapdisk the chain has changed 

694 if not blktap2.VDI.tap_refresh(self.session, sr_uuid, vdi2): 

695 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

696 util.SMlog("VDI.compose: relinked %s->%s" % (vdi2, vdi1)) 

697 

698 def reset_leaf(self, sr_uuid, vdi_uuid): 

699 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

700 raise xs_errors.XenError('Unimplemented') 

701 

702 # safety check 

703 if not vhdutil.hasParent(self.path): 

704 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

705 "will not reset contents" % self.uuid) 

706 

707 vhdutil.killData(self.path) 

708 

709 def _do_snapshot(self, sr_uuid, vdi_uuid, snap_type, 

710 secondary=None, cbtlog=None): 

711 # If cbt enabled, save file consistency state 

712 if cbtlog is not None: 712 ↛ 713line 712 didn't jump to line 713, because the condition on line 712 was never true

713 if blktap2.VDI.tap_status(self.session, vdi_uuid): 

714 consistency_state = False 

715 else: 

716 consistency_state = True 

717 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

718 (consistency_state, vdi_uuid)) 

719 else: 

720 consistency_state = None 

721 

722 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 722 ↛ 723line 722 didn't jump to line 723, because the condition on line 722 was never true

723 raise xs_errors.XenError('Unimplemented') 

724 

725 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 725 ↛ 726line 725 didn't jump to line 726, because the condition on line 725 was never true

726 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

727 try: 

728 return self._snapshot(snap_type, cbtlog, consistency_state) 

729 finally: 

730 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

731 

732 def _rename(self, src, dst): 

733 util.SMlog("FileVDI._rename %s to %s" % (src, dst)) 

734 util.ioretry(lambda: os.rename(src, dst)) 

735 

736 def _link(self, src, dst): 

737 util.SMlog("FileVDI._link %s to %s" % (src, dst)) 

738 os.link(src, dst) 

739 

740 def _unlink(self, path): 

741 util.SMlog("FileVDI._unlink %s" % (path)) 

742 os.unlink(path) 

743 

744 def _create_new_parent(self, src, newsrc): 

745 if self.sr._check_hardlinks(): 

746 self._link(src, newsrc) 

747 else: 

748 self._rename(src, newsrc) 

749 

750 def __fist_enospace(self): 

751 raise util.CommandException(28, "vhd-util snapshot", reason="No space") 

752 

753 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): 

754 util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) 

755 

756 args = [] 

757 args.append("vdi_clone") 

758 args.append(self.sr.uuid) 

759 args.append(self.uuid) 

760 

761 dest = None 

762 dst = None 

763 if snap_type == VDI.SNAPSHOT_DOUBLE: 763 ↛ 768line 763 didn't jump to line 768, because the condition on line 763 was never false

764 dest = util.gen_uuid() 

765 dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) 

766 args.append(dest) 

767 

768 if self.hidden: 768 ↛ 769line 768 didn't jump to line 769, because the condition on line 768 was never true

769 raise xs_errors.XenError('VDIClone', opterr='hidden VDI') 

770 

771 depth = vhdutil.getDepth(self.path) 

772 if depth == -1: 772 ↛ 773line 772 didn't jump to line 773, because the condition on line 772 was never true

773 raise xs_errors.XenError('VDIUnavailable', \ 

774 opterr='failed to get VHD depth') 

775 elif depth >= vhdutil.MAX_CHAIN_SIZE: 775 ↛ 776line 775 didn't jump to line 776, because the condition on line 775 was never true

776 raise xs_errors.XenError('SnapshotChainTooLong') 

777 

778 newuuid = util.gen_uuid() 

779 src = self.path 

780 newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) 

781 newsrcname = "%s.%s" % (newuuid, self.vdi_type) 

782 

783 if not self._checkpath(src): 783 ↛ 784line 783 didn't jump to line 784, because the condition on line 783 was never true

784 raise xs_errors.XenError('VDIUnavailable', \ 

785 opterr='VDI %s unavailable %s' % (self.uuid, src)) 

786 

787 # wkcfix: multiphase 

788 util.start_log_entry(self.sr.path, self.path, args) 

789 

790 # We assume the filehandle has been released 

791 try: 

792 self._create_new_parent(src, newsrc) 

793 

794 # Create the snapshot under a temporary name, then rename 

795 # it afterwards. This avoids a small window where it exists 

796 # but is invalid. We do not need to do this for 

797 # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed 

798 # before so nobody will try to query it. 

799 tmpsrc = "%s.%s" % (src, "new") 

800 # Fault injection site to fail the snapshot with ENOSPACE 

801 util.fistpoint.activate_custom_fn( 

802 "FileSR_fail_snap1", 

803 self.__fist_enospace) 

804 util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) 

805 self._rename(tmpsrc, src) 

806 if snap_type == VDI.SNAPSHOT_DOUBLE: 806 ↛ 814line 806 didn't jump to line 814, because the condition on line 806 was never false

807 # Fault injection site to fail the snapshot with ENOSPACE 

808 util.fistpoint.activate_custom_fn( 

809 "FileSR_fail_snap2", 

810 self.__fist_enospace) 

811 util.ioretry(lambda: self._snap(dst, newsrcname)) 

812 # mark the original file (in this case, its newsrc) 

813 # as hidden so that it does not show up in subsequent scans 

814 util.ioretry(lambda: self._mark_hidden(newsrc)) 

815 

816 #Verify parent locator field of both children and delete newsrc if unused 

817 introduce_parent = True 

818 try: 

819 srcparent = util.ioretry(lambda: self._query_p_uuid(src)) 

820 dstparent = None 

821 if snap_type == VDI.SNAPSHOT_DOUBLE: 821 ↛ 823line 821 didn't jump to line 823, because the condition on line 821 was never false

822 dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) 

823 if srcparent != newuuid and \ 823 ↛ 827line 823 didn't jump to line 827, because the condition on line 823 was never true

824 (snap_type == VDI.SNAPSHOT_SINGLE or \ 

825 snap_type == VDI.SNAPSHOT_INTERNAL or \ 

826 dstparent != newuuid): 

827 util.ioretry(lambda: self._unlink(newsrc)) 

828 introduce_parent = False 

829 except: 

830 pass 

831 

832 # Introduce the new VDI records 

833 leaf_vdi = None 

834 if snap_type == VDI.SNAPSHOT_DOUBLE: 834 ↛ 853line 834 didn't jump to line 853, because the condition on line 834 was never false

835 leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI 

836 leaf_vdi.read_only = False 

837 leaf_vdi.location = dest 

838 leaf_vdi.size = self.size 

839 leaf_vdi.utilisation = self.utilisation 

840 leaf_vdi.sm_config = {} 

841 leaf_vdi.sm_config['vhd-parent'] = dstparent 

842 # If the parent is encrypted set the key_hash 

843 # for the new snapshot disk 

844 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

845 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

846 if "key_hash" in sm_config: 846 ↛ 847line 846 didn't jump to line 847, because the condition on line 846 was never true

847 leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] 

848 # If we have CBT enabled on the VDI, 

849 # set CBT status for the new snapshot disk 

850 if cbtlog: 850 ↛ 851line 850 didn't jump to line 851, because the condition on line 850 was never true

851 leaf_vdi.cbt_enabled = True 

852 

853 base_vdi = None 

854 if introduce_parent: 854 ↛ 866line 854 didn't jump to line 866, because the condition on line 854 was never false

855 base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent 

856 base_vdi.label = "base copy" 

857 base_vdi.read_only = True 

858 base_vdi.location = newuuid 

859 base_vdi.size = self.size 

860 base_vdi.utilisation = self.utilisation 

861 base_vdi.sm_config = {} 

862 grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) 

863 if grandparent.find("no parent") == -1: 863 ↛ 866line 863 didn't jump to line 866, because the condition on line 863 was never false

864 base_vdi.sm_config['vhd-parent'] = grandparent 

865 

866 try: 

867 if snap_type == VDI.SNAPSHOT_DOUBLE: 867 ↛ 872line 867 didn't jump to line 872, because the condition on line 867 was never false

868 leaf_vdi_ref = leaf_vdi._db_introduce() 

869 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ 

870 (leaf_vdi_ref, dest)) 

871 

872 if introduce_parent: 872 ↛ 876line 872 didn't jump to line 876, because the condition on line 872 was never false

873 base_vdi_ref = base_vdi._db_introduce() 

874 self.session.xenapi.VDI.set_managed(base_vdi_ref, False) 

875 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) 

876 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

877 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

878 sm_config['vhd-parent'] = srcparent 

879 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

880 except Exception as e: 

881 util.SMlog("vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) 

882 # Note it's too late to actually clean stuff up here: the base disk has 

883 # been marked as deleted already. 

884 util.end_log_entry(self.sr.path, self.path, ["error"]) 

885 raise 

886 except util.CommandException as inst: 

887 # XXX: it might be too late if the base disk has been marked as deleted! 

888 self._clonecleanup(src, dst, newsrc) 

889 util.end_log_entry(self.sr.path, self.path, ["error"]) 

890 raise xs_errors.XenError('VDIClone', 

891 opterr='VDI clone failed error %d' % inst.code) 

892 

893 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

894 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog: 894 ↛ 895line 894 didn't jump to line 895, because the condition on line 894 was never true

895 try: 

896 self._cbt_snapshot(dest, cbt_consistency) 

897 except: 

898 # CBT operation failed. 

899 util.end_log_entry(self.sr.path, self.path, ["error"]) 

900 raise 

901 

902 util.end_log_entry(self.sr.path, self.path, ["done"]) 

903 if snap_type != VDI.SNAPSHOT_INTERNAL: 903 ↛ 906line 903 didn't jump to line 906, because the condition on line 903 was never false

904 self.sr._update(self.sr.uuid, self.size) 

905 # Return info on the new user-visible leaf VDI 

906 ret_vdi = leaf_vdi 

907 if not ret_vdi: 907 ↛ 908line 907 didn't jump to line 908, because the condition on line 907 was never true

908 ret_vdi = base_vdi 

909 if not ret_vdi: 909 ↛ 910line 909 didn't jump to line 910, because the condition on line 909 was never true

910 ret_vdi = self 

911 return ret_vdi.get_params() 

912 

913 def get_params(self): 

914 if not self._checkpath(self.path): 

915 raise xs_errors.XenError('VDIUnavailable', \ 

916 opterr='VDI %s unavailable %s' % (self.uuid, self.path)) 

917 return super(FileVDI, self).get_params() 

918 

919 def _snap(self, child, parent): 

920 cmd = [SR.TAPDISK_UTIL, "snapshot", vhdutil.VDI_TYPE_VHD, child, parent] 

921 text = util.pread(cmd) 

922 

923 def _clonecleanup(self, src, dst, newsrc): 

924 try: 

925 if dst: 925 ↛ 929line 925 didn't jump to line 929, because the condition on line 925 was never false

926 util.ioretry(lambda: self._unlink(dst)) 

927 except util.CommandException as inst: 

928 pass 

929 try: 

930 if util.ioretry(lambda: util.pathexists(newsrc)): 930 ↛ exitline 930 didn't return from function '_clonecleanup', because the condition on line 930 was never false

931 stats = os.stat(newsrc) 

932 # Check if we have more than one link to newsrc 

933 if (stats.st_nlink > 1): 

934 util.ioretry(lambda: self._unlink(newsrc)) 

935 elif not self._is_hidden(newsrc): 935 ↛ exitline 935 didn't return from function '_clonecleanup', because the condition on line 935 was never false

936 self._rename(newsrc, src) 

937 except util.CommandException as inst: 

938 pass 

939 

940 def _checkpath(self, path): 

941 try: 

942 if not util.ioretry(lambda: util.pathexists(path)): 942 ↛ 943line 942 didn't jump to line 943, because the condition on line 942 was never true

943 return False 

944 return True 

945 except util.CommandException as inst: 

946 raise xs_errors.XenError('EIO', \ 

947 opterr='IO error checking path %s' % path) 

948 

949 def _query_v(self, path): 

950 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-v", path] 

951 return int(util.pread(cmd)) * 1024 * 1024 

952 

953 def _query_p_uuid(self, path): 

954 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-p", path] 

955 parent = util.pread(cmd) 

956 parent = parent[:-1] 

957 ls = parent.split('/') 

958 return ls[len(ls) - 1].replace(vhdutil.FILE_EXTN_VHD, '') 

959 

960 def _query_info(self, path, use_bkp_footer=False): 

961 diskinfo = {} 

962 qopts = '-vpf' 

963 if use_bkp_footer: 963 ↛ 965line 963 didn't jump to line 965, because the condition on line 963 was never false

964 qopts += 'b' 

965 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, qopts, path] 

966 txt = util.pread(cmd).split('\n') 

967 diskinfo['size'] = txt[0] 

968 lst = [txt[1].split('/')[-1].replace(vhdutil.FILE_EXTN_VHD, "")] 

969 for val in filter(util.exactmatch_uuid, lst): 969 ↛ 970line 969 didn't jump to line 970, because the loop on line 969 never started

970 diskinfo['parent'] = val 

971 diskinfo['hidden'] = txt[2].split()[1] 

972 return diskinfo 

973 

974 def _create(self, size, path): 

975 cmd = [SR.TAPDISK_UTIL, "create", vhdutil.VDI_TYPE_VHD, size, path] 

976 text = util.pread(cmd) 

977 if self.key_hash: 977 ↛ 978line 977 didn't jump to line 978, because the condition on line 977 was never true

978 vhdutil.setKey(path, self.key_hash) 

979 

980 def _mark_hidden(self, path): 

981 vhdutil.setHidden(path, True) 

982 self.hidden = 1 

983 

984 def _is_hidden(self, path): 

985 return vhdutil.getHidden(path) == 1 

986 

987 def extractUuid(path): 

988 fileName = os.path.basename(path) 

989 uuid = fileName.replace(vhdutil.FILE_EXTN_VHD, "") 

990 return uuid 

991 extractUuid = staticmethod(extractUuid) 

992 

993 def generate_config(self, sr_uuid, vdi_uuid): 

994 """ 

995 Generate the XML config required to attach and activate 

996 a VDI for use when XAPI is not running. Attach and 

997 activation is handled by vdi_attach_from_config below. 

998 """ 

999 util.SMlog("FileVDI.generate_config") 

1000 if not util.pathexists(self.path): 1000 ↛ 1001line 1000 didn't jump to line 1001, because the condition on line 1000 was never true

1001 raise xs_errors.XenError('VDIUnavailable') 

1002 resp = {} 

1003 resp['device_config'] = self.sr.dconf 

1004 resp['sr_uuid'] = sr_uuid 

1005 resp['vdi_uuid'] = vdi_uuid 

1006 resp['command'] = 'vdi_attach_from_config' 

1007 # Return the 'config' encoded within a normal XMLRPC response so that 

1008 # we can use the regular response/error parsing code. 

1009 config = xmlrpc.client.dumps(tuple([resp]), "vdi_attach_from_config") 

1010 return xmlrpc.client.dumps((config, ), "", True) 

1011 

1012 def attach_from_config(self, sr_uuid, vdi_uuid): 

1013 """ 

1014 Attach and activate a VDI using config generated by 

1015 vdi_generate_config above. This is used for cases such as 

1016 the HA state-file and the redo-log. 

1017 """ 

1018 util.SMlog("FileVDI.attach_from_config") 

1019 try: 

1020 if not util.pathexists(self.sr.path): 

1021 self.sr.attach(sr_uuid) 

1022 except: 

1023 util.logException("FileVDI.attach_from_config") 

1024 raise xs_errors.XenError( 

1025 'SRUnavailable', 

1026 opterr='Unable to attach from config' 

1027 ) 

1028 

1029 def _create_cbt_log(self): 

1030 # Create CBT log file 

1031 # Name: <vdi_uuid>.cbtlog 

1032 #Handle if file already exists 

1033 log_path = self._get_cbt_logpath(self.uuid) 

1034 open_file = open(log_path, "w+") 

1035 open_file.close() 

1036 return super(FileVDI, self)._create_cbt_log() 

1037 

1038 def _delete_cbt_log(self): 

1039 logPath = self._get_cbt_logpath(self.uuid) 

1040 try: 

1041 os.remove(logPath) 

1042 except OSError as e: 

1043 if e.errno != errno.ENOENT: 

1044 raise 

1045 

1046 def _cbt_log_exists(self, logpath): 

1047 return util.pathexists(logpath) 

1048 

1049 

1050class SharedFileSR(FileSR): 

1051 """ 

1052 FileSR subclass for SRs that use shared network storage 

1053 """ 

1054 

1055 def _check_writable(self): 

1056 """ 

1057 Checks that the filesystem being used by the SR can be written to, 

1058 raising an exception if it can't. 

1059 """ 

1060 test_name = os.path.join(self.path, str(uuid4())) 

1061 try: 

1062 open(test_name, 'ab').close() 

1063 except OSError as e: 

1064 util.SMlog("Cannot write to SR file system: %s" % e) 

1065 raise xs_errors.XenError('SharedFileSystemNoWrite') 

1066 finally: 

1067 util.force_unlink(test_name) 

1068 

1069 def _raise_hardlink_error(self): 

1070 raise OSError(524, "Unknown error 524") 

1071 

1072 def _check_hardlinks(self): 

1073 hardlink_conf = self._read_hardlink_conf() 

1074 if hardlink_conf is not None: 1074 ↛ 1075line 1074 didn't jump to line 1075, because the condition on line 1074 was never true

1075 return hardlink_conf 

1076 

1077 test_name = os.path.join(self.path, str(uuid4())) 

1078 open(test_name, 'ab').close() 

1079 

1080 link_name = '%s.new' % test_name 

1081 try: 

1082 # XSI-1100: Let tests simulate failure of the link operation 

1083 util.fistpoint.activate_custom_fn( 

1084 "FileSR_fail_hardlink", 

1085 self._raise_hardlink_error) 

1086 

1087 os.link(test_name, link_name) 

1088 self._write_hardlink_conf(supported=True) 

1089 return True 

1090 except OSError: 

1091 self._write_hardlink_conf(supported=False) 

1092 

1093 msg = "File system for SR %s does not support hardlinks, crash " \ 

1094 "consistency of snapshots cannot be assured" % self.uuid 

1095 util.SMlog(msg, priority=util.LOG_WARNING) 

1096 # Note: session can be not set during attach/detach_from_config calls. 

1097 if self.session: 1097 ↛ 1106line 1097 didn't jump to line 1106, because the condition on line 1097 was never false

1098 try: 

1099 self.session.xenapi.message.create( 

1100 "sr_does_not_support_hardlinks", 2, "SR", self.uuid, 

1101 msg) 

1102 except XenAPI.Failure: 

1103 # Might already be set and checking has TOCTOU issues 

1104 pass 

1105 finally: 

1106 util.force_unlink(link_name) 

1107 util.force_unlink(test_name) 

1108 

1109 return False 

1110 

1111 def _get_hardlink_conf_path(self): 

1112 return os.path.join(self.path, 'sm-hardlink.conf') 

1113 

1114 def _read_hardlink_conf(self): 

1115 try: 

1116 with open(self._get_hardlink_conf_path(), 'r') as f: 

1117 try: 

1118 return bool(int(f.read())) 

1119 except Exception as e: 

1120 # If we can't read, assume the file is empty and test for hardlink support. 

1121 return None 

1122 except IOError as e: 

1123 if e.errno == errno.ENOENT: 

1124 # If the config file doesn't exist, assume we want to support hardlinks. 

1125 return None 

1126 util.SMlog('Failed to read hardlink conf: {}'.format(e)) 

1127 # Can be caused by a concurrent access, not a major issue. 

1128 return None 

1129 

1130 def _write_hardlink_conf(self, supported): 

1131 try: 

1132 with open(self._get_hardlink_conf_path(), 'w') as f: 

1133 f.write('1' if supported else '0') 

1134 except Exception as e: 

1135 # Can be caused by a concurrent access, not a major issue. 

1136 util.SMlog('Failed to write hardlink conf: {}'.format(e)) 

1137 

1138if __name__ == '__main__': 1138 ↛ 1139line 1138 didn't jump to line 1139, because the condition on line 1138 was never true

1139 SRCommand.run(FileSR, DRIVER_INFO) 

1140else: 

1141 SR.registerSR(FileSR)