Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# FileSR: local-file storage repository 

19 

20from sm_typing import Dict, Optional, List, override 

21 

22import SR 

23import VDI 

24import SRCommand 

25import util 

26import scsiutil 

27import vhdutil 

28import os 

29import errno 

30import xs_errors 

31import cleanup 

32import blktap2 

33import time 

34import glob 

35from uuid import uuid4 

36from lock import Lock 

37import xmlrpc.client 

38import XenAPI # pylint: disable=import-error 

39from constants import CBTLOG_TAG 

40 

41geneology: Dict[str, List[str]] = {} 

42CAPABILITIES = ["SR_PROBE", "SR_UPDATE", \ 

43 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", \ 

44 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR", 

45 "VDI_GENERATE_CONFIG", "ATOMIC_PAUSE", "VDI_CONFIG_CBT", 

46 "VDI_ACTIVATE", "VDI_DEACTIVATE", "THIN_PROVISIONING"] 

47 

48CONFIGURATION = [['location', 'local directory path (required)']] 

49 

50DRIVER_INFO = { 

51 'name': 'Local Path VHD', 

52 'description': 'SR plugin which represents disks as VHD files stored on a local path', 

53 'vendor': 'Citrix Systems Inc', 

54 'copyright': '(C) 2008 Citrix Systems Inc', 

55 'driver_version': '1.0', 

56 'required_api_version': '1.0', 

57 'capabilities': CAPABILITIES, 

58 'configuration': CONFIGURATION 

59 } 

60 

61JOURNAL_FILE_PREFIX = ".journal-" 

62 

63OPS_EXCLUSIVE = [ 

64 "sr_create", "sr_delete", "sr_probe", "sr_attach", "sr_detach", 

65 "sr_scan", "vdi_init", "vdi_create", "vdi_delete", "vdi_attach", 

66 "vdi_detach", "vdi_resize_online", "vdi_snapshot", "vdi_clone"] 

67 

68DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True} 

69 

70 

71class FileSR(SR.SR): 

72 """Local file storage repository""" 

73 

74 SR_TYPE = "file" 

75 

76 @override 

77 @staticmethod 

78 def handles(srtype) -> bool: 

79 return srtype == 'file' 

80 

81 def _check_o_direct(self): 

82 if self.sr_ref and self.session is not None: 

83 other_config = self.session.xenapi.SR.get_other_config(self.sr_ref) 

84 o_direct = other_config.get("o_direct") 

85 self.o_direct = o_direct is not None and o_direct == "true" 

86 else: 

87 self.o_direct = True 

88 

89 def __init__(self, srcmd, sr_uuid): 

90 # We call SR.SR.__init__ explicitly because 

91 # "super" sometimes failed due to circular imports 

92 SR.SR.__init__(self, srcmd, sr_uuid) 

93 self._check_o_direct() 

94 

95 @override 

96 def load(self, sr_uuid) -> None: 

97 self.ops_exclusive = OPS_EXCLUSIVE 

98 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

99 self.sr_vditype = vhdutil.VDI_TYPE_VHD 

100 if 'location' not in self.dconf or not self.dconf['location']: 100 ↛ 101line 100 didn't jump to line 101, because the condition on line 100 was never true

101 raise xs_errors.XenError('ConfigLocationMissing') 

102 self.remotepath = self.dconf['location'] 

103 self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) 

104 self.linkpath = self.path 

105 self.mountpoint = self.path 

106 self.attached = False 

107 self.driver_config = DRIVER_CONFIG 

108 

109 @override 

110 def create(self, sr_uuid, size) -> None: 

111 """ Create the SR. The path must not already exist, or if it does,  

112 it must be empty. (This accounts for the case where the user has 

113 mounted a device onto a directory manually and want to use this as the 

114 root of a file-based SR.) """ 

115 try: 

116 if util.ioretry(lambda: util.pathexists(self.remotepath)): 116 ↛ 117line 116 didn't jump to line 117, because the condition on line 116 was never true

117 if len(util.ioretry(lambda: util.listdir(self.remotepath))) != 0: 

118 raise xs_errors.XenError('SRExists') 

119 else: 

120 try: 

121 util.ioretry(lambda: os.mkdir(self.remotepath)) 

122 except util.CommandException as inst: 

123 if inst.code == errno.EEXIST: 

124 raise xs_errors.XenError('SRExists') 

125 else: 

126 raise xs_errors.XenError('FileSRCreate', \ 

127 opterr='directory creation failure %d' \ 

128 % inst.code) 

129 except: 

130 raise xs_errors.XenError('FileSRCreate') 

131 

132 @override 

133 def delete(self, sr_uuid) -> None: 

134 self.attach(sr_uuid) 

135 cleanup.gc_force(self.session, self.uuid) 

136 

137 # check to make sure no VDIs are present; then remove old 

138 # files that are non VDI's 

139 try: 

140 if util.ioretry(lambda: util.pathexists(self.path)): 

141 #Load the VDI list 

142 self._loadvdis() 

143 for uuid in self.vdis: 

144 if not self.vdis[uuid].deleted: 

145 raise xs_errors.XenError('SRNotEmpty', \ 

146 opterr='VDIs still exist in SR') 

147 

148 # remove everything else, there are no vdi's 

149 for name in util.ioretry(lambda: util.listdir(self.path)): 

150 fullpath = os.path.join(self.path, name) 

151 try: 

152 util.ioretry(lambda: os.unlink(fullpath)) 

153 except util.CommandException as inst: 

154 if inst.code != errno.ENOENT and \ 

155 inst.code != errno.EISDIR: 

156 raise xs_errors.XenError('FileSRDelete', \ 

157 opterr='failed to remove %s error %d' \ 

158 % (fullpath, inst.code)) 

159 self.detach(sr_uuid) 

160 except util.CommandException as inst: 

161 self.detach(sr_uuid) 

162 raise xs_errors.XenError('FileSRDelete', \ 

163 opterr='error %d' % inst.code) 

164 

165 @override 

166 def attach(self, sr_uuid) -> None: 

167 self.attach_and_bind(sr_uuid) 

168 

169 def attach_and_bind(self, sr_uuid, bind=True) -> None: 

170 if not self._checkmount(): 

171 try: 

172 util.ioretry(lambda: util.makedirs(self.path, mode=0o700)) 

173 except util.CommandException as inst: 

174 if inst.code != errno.EEXIST: 

175 raise xs_errors.XenError("FileSRCreate", \ 

176 opterr='fail to create mount point. Errno is %s' % inst.code) 

177 try: 

178 cmd = ["mount", self.remotepath, self.path] 

179 if bind: 

180 cmd.append("--bind") 

181 util.pread(cmd) 

182 os.chmod(self.path, mode=0o0700) 

183 except util.CommandException as inst: 

184 raise xs_errors.XenError('FileSRCreate', \ 

185 opterr='fail to mount FileSR. Errno is %s' % inst.code) 

186 self.attached = True 

187 

188 @override 

189 def detach(self, sr_uuid) -> None: 

190 if self._checkmount(): 

191 try: 

192 util.SMlog("Aborting GC/coalesce") 

193 cleanup.abort(self.uuid) 

194 os.chdir(SR.MOUNT_BASE) 

195 util.pread(["umount", self.path]) 

196 os.rmdir(self.path) 

197 except Exception as e: 

198 raise xs_errors.XenError('SRInUse', opterr=str(e)) 

199 self.attached = False 

200 

201 @override 

202 def scan(self, sr_uuid) -> None: 

203 if not self._checkmount(): 

204 raise xs_errors.XenError('SRUnavailable', \ 

205 opterr='no such directory %s' % self.path) 

206 

207 if not self.vdis: 207 ↛ 210line 207 didn't jump to line 210, because the condition on line 207 was never false

208 self._loadvdis() 

209 

210 if not self.passthrough: 

211 self.physical_size = self._getsize() 

212 self.physical_utilisation = self._getutilisation() 

213 

214 for uuid in list(self.vdis.keys()): 

215 if self.vdis[uuid].deleted: 215 ↛ 216line 215 didn't jump to line 216, because the condition on line 215 was never true

216 del self.vdis[uuid] 

217 

218 # CA-15607: make sure we are robust to the directory being unmounted beneath 

219 # us (eg by a confused user). Without this we might forget all our VDI references 

220 # which would be a shame. 

221 # For SMB SRs, this path is mountpoint 

222 mount_path = self.path 

223 if self.handles("smb"): 223 ↛ 224line 223 didn't jump to line 224, because the condition on line 223 was never true

224 mount_path = self.mountpoint 

225 

226 if not self.handles("file") and not os.path.ismount(mount_path): 226 ↛ 227line 226 didn't jump to line 227, because the condition on line 226 was never true

227 util.SMlog("Error: FileSR.scan called but directory %s isn't a mountpoint" % mount_path) 

228 raise xs_errors.XenError('SRUnavailable', \ 

229 opterr='not mounted %s' % mount_path) 

230 

231 self._kickGC() 

232 

233 # default behaviour from here on 

234 super(FileSR, self).scan(sr_uuid) 

235 

236 @override 

237 def update(self, sr_uuid) -> None: 

238 if not self._checkmount(): 

239 raise xs_errors.XenError('SRUnavailable', \ 

240 opterr='no such directory %s' % self.path) 

241 self._update(sr_uuid, 0) 

242 

243 def _update(self, sr_uuid, virt_alloc_delta): 

244 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

245 self.virtual_allocation = valloc + virt_alloc_delta 

246 self.physical_size = self._getsize() 

247 self.physical_utilisation = self._getutilisation() 

248 self._db_update() 

249 

250 @override 

251 def content_type(self, sr_uuid) -> str: 

252 return super(FileSR, self).content_type(sr_uuid) 

253 

254 @override 

255 def vdi(self, uuid) -> VDI.VDI: 

256 return FileVDI(self, uuid) 

257 

258 def added_vdi(self, vdi): 

259 self.vdis[vdi.uuid] = vdi 

260 

261 def deleted_vdi(self, uuid): 

262 if uuid in self.vdis: 

263 del self.vdis[uuid] 

264 

265 @override 

266 def replay(self, uuid) -> None: 

267 try: 

268 file = open(self.path + "/filelog.txt", "r") 

269 data = file.readlines() 

270 file.close() 

271 self._process_replay(data) 

272 except: 

273 raise xs_errors.XenError('SRLog') 

274 

275 def _loadvdis(self): 

276 if self.vdis: 276 ↛ 277line 276 didn't jump to line 277, because the condition on line 276 was never true

277 return 

278 

279 pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) 

280 try: 

281 self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) 

282 except util.CommandException as inst: 

283 raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ 

284 "path %s (%s)" % (self.path, inst)) 

285 try: 

286 list_vhds = [FileVDI.extractUuid(v) for v in util.ioretry(lambda: glob.glob(pattern))] 

287 if len(self.vhds) != len(list_vhds): 287 ↛ 292line 287 didn't jump to line 292, because the condition on line 287 was never false

288 util.SMlog("VHD scan returns %d VHDs: %s" % (len(self.vhds), sorted(self.vhds))) 

289 util.SMlog("VHD list returns %d VHDs: %s" % (len(list_vhds), sorted(list_vhds))) 

290 except: 

291 pass 

292 for uuid in self.vhds.keys(): 

293 if self.vhds[uuid].error: 293 ↛ 294line 293 didn't jump to line 294, because the condition on line 293 was never true

294 raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) 

295 self.vdis[uuid] = self.vdi(uuid) 

296 # Get the key hash of any encrypted VDIs: 

297 vhd_path = os.path.join(self.path, self.vhds[uuid].path) 

298 key_hash = vhdutil.getKeyHash(vhd_path) 

299 self.vdis[uuid].sm_config_override['key_hash'] = key_hash 

300 

301 # raw VDIs and CBT log files 

302 files = util.ioretry(lambda: util.listdir(self.path)) 302 ↛ exitline 302 didn't run the lambda on line 302

303 for fn in files: 303 ↛ 304line 303 didn't jump to line 304, because the loop on line 303 never started

304 if fn.endswith(vhdutil.FILE_EXTN_RAW): 

305 uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] 

306 self.vdis[uuid] = self.vdi(uuid) 

307 elif fn.endswith(CBTLOG_TAG): 

308 cbt_uuid = fn.split(".")[0] 

309 # If an associated disk exists, update CBT status 

310 # else create new VDI of type cbt_metadata 

311 if cbt_uuid in self.vdis: 

312 self.vdis[cbt_uuid].cbt_enabled = True 

313 else: 

314 new_vdi = self.vdi(cbt_uuid) 

315 new_vdi.ty = "cbt_metadata" 

316 new_vdi.cbt_enabled = True 

317 self.vdis[cbt_uuid] = new_vdi 

318 

319 # Mark parent VDIs as Read-only and generate virtual allocation 

320 self.virtual_allocation = 0 

321 for uuid, vdi in self.vdis.items(): 

322 if vdi.parent: 322 ↛ 323line 322 didn't jump to line 323, because the condition on line 322 was never true

323 if vdi.parent in self.vdis: 

324 self.vdis[vdi.parent].read_only = True 

325 if vdi.parent in geneology: 

326 geneology[vdi.parent].append(uuid) 

327 else: 

328 geneology[vdi.parent] = [uuid] 

329 if not vdi.hidden: 329 ↛ 321line 329 didn't jump to line 321, because the condition on line 329 was never false

330 self.virtual_allocation += (vdi.size) 

331 

332 # now remove all hidden leaf nodes from self.vdis so that they are not 

333 # introduced into the Agent DB when SR is synchronized. With the 

334 # asynchronous GC, a deleted VDI might stay around until the next 

335 # SR.scan, so if we don't ignore hidden leaves we would pick up 

336 # freshly-deleted VDIs as newly-added VDIs 

337 for uuid in list(self.vdis.keys()): 

338 if uuid not in geneology and self.vdis[uuid].hidden: 338 ↛ 339line 338 didn't jump to line 339, because the condition on line 338 was never true

339 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

340 del self.vdis[uuid] 

341 

342 def _getsize(self): 

343 path = self.path 

344 if self.handles("smb"): 344 ↛ 345line 344 didn't jump to line 345, because the condition on line 344 was never true

345 path = self.linkpath 

346 return util.get_fs_size(path) 

347 

348 def _getutilisation(self): 

349 return util.get_fs_utilisation(self.path) 

350 

351 def _replay(self, logentry): 

352 # all replay commands have the same 5,6,7th arguments 

353 # vdi_command, sr-uuid, vdi-uuid 

354 back_cmd = logentry[5].replace("vdi_", "") 

355 target = self.vdi(logentry[7]) 

356 cmd = getattr(target, back_cmd) 

357 args = [] 

358 for item in logentry[6:]: 

359 item = item.replace("\n", "") 

360 args.append(item) 

361 ret = cmd( * args) 

362 if ret: 

363 print(ret) 

364 

365 def _compare_args(self, a, b): 

366 try: 

367 if a[2] != "log:": 

368 return 1 

369 if b[2] != "end:" and b[2] != "error:": 

370 return 1 

371 if a[3] != b[3]: 

372 return 1 

373 if a[4] != b[4]: 

374 return 1 

375 return 0 

376 except: 

377 return 1 

378 

379 def _process_replay(self, data): 

380 logentries = [] 

381 for logentry in data: 

382 logentry = logentry.split(" ") 

383 logentries.append(logentry) 

384 # we are looking for a log entry that has a log but no end or error 

385 # wkcfix -- recreate (adjusted) logfile 

386 index = 0 

387 while index < len(logentries) - 1: 

388 if self._compare_args(logentries[index], logentries[index + 1]): 

389 self._replay(logentries[index]) 

390 else: 

391 # skip the paired one 

392 index += 1 

393 # next 

394 index += 1 

395 

396 def _kickGC(self): 

397 util.SMlog("Kicking GC") 

398 cleanup.start_gc_service(self.uuid) 

399 

400 def _isbind(self): 

401 # os.path.ismount can't deal with bind mount 

402 st1 = os.stat(self.path) 

403 st2 = os.stat(self.remotepath) 

404 return st1.st_dev == st2.st_dev and st1.st_ino == st2.st_ino 

405 

406 def _checkmount(self) -> bool: 

407 mount_path = self.path 

408 if self.handles("smb"): 408 ↛ 409line 408 didn't jump to line 409, because the condition on line 408 was never true

409 mount_path = self.mountpoint 

410 

411 return util.ioretry(lambda: util.pathexists(mount_path) and \ 

412 (util.ismount(mount_path) or \ 

413 util.pathexists(self.remotepath) and self._isbind())) 

414 

415 # Override in SharedFileSR. 

416 def _check_hardlinks(self) -> bool: 

417 return True 

418 

419class FileVDI(VDI.VDI): 

420 PARAM_VHD = "vhd" 

421 PARAM_RAW = "raw" 

422 VDI_TYPE = { 

423 PARAM_VHD: vhdutil.VDI_TYPE_VHD, 

424 PARAM_RAW: vhdutil.VDI_TYPE_RAW 

425 } 

426 

427 def _find_path_with_retries(self, vdi_uuid, maxretry=5, period=2.0): 

428 vhd_path = os.path.join(self.sr.path, "%s.%s" % \ 

429 (vdi_uuid, self.PARAM_VHD)) 

430 raw_path = os.path.join(self.sr.path, "%s.%s" % \ 

431 (vdi_uuid, self.PARAM_RAW)) 

432 cbt_path = os.path.join(self.sr.path, "%s.%s" % 

433 (vdi_uuid, CBTLOG_TAG)) 

434 found = False 

435 tries = 0 

436 while tries < maxretry and not found: 

437 tries += 1 

438 if util.ioretry(lambda: util.pathexists(vhd_path)): 

439 self.vdi_type = vhdutil.VDI_TYPE_VHD 

440 self.path = vhd_path 

441 found = True 

442 elif util.ioretry(lambda: util.pathexists(raw_path)): 

443 self.vdi_type = vhdutil.VDI_TYPE_RAW 

444 self.path = raw_path 

445 self.hidden = False 

446 found = True 

447 elif util.ioretry(lambda: util.pathexists(cbt_path)): 447 ↛ 448line 447 didn't jump to line 448, because the condition on line 447 was never true

448 self.vdi_type = CBTLOG_TAG 

449 self.path = cbt_path 

450 self.hidden = False 

451 found = True 

452 

453 if not found: 

454 util.SMlog("VHD %s not found, retry %s of %s" % (vhd_path, tries, maxretry)) 

455 time.sleep(period) 

456 

457 return found 

458 

459 @override 

460 def load(self, vdi_uuid) -> None: 

461 self.lock = self.sr.lock 

462 

463 self.sr.srcmd.params['o_direct'] = self.sr.o_direct 

464 

465 if self.sr.srcmd.cmd == "vdi_create": 

466 self.vdi_type = vhdutil.VDI_TYPE_VHD 

467 self.key_hash = None 

468 if "vdi_sm_config" in self.sr.srcmd.params: 468 ↛ 469line 468 didn't jump to line 469, because the condition on line 468 was never true

469 if "key_hash" in self.sr.srcmd.params["vdi_sm_config"]: 

470 self.key_hash = self.sr.srcmd.params["vdi_sm_config"]["key_hash"] 

471 

472 if "type" in self.sr.srcmd.params["vdi_sm_config"]: 

473 vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

474 if not self.VDI_TYPE.get(vdi_type): 

475 raise xs_errors.XenError('VDIType', 

476 opterr='Invalid VDI type %s' % vdi_type) 

477 self.vdi_type = self.VDI_TYPE[vdi_type] 

478 self.path = os.path.join(self.sr.path, "%s%s" % 

479 (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) 

480 else: 

481 found = self._find_path_with_retries(vdi_uuid) 

482 if not found: 482 ↛ 483line 482 didn't jump to line 483, because the condition on line 482 was never true

483 if self.sr.srcmd.cmd == "vdi_delete": 

484 # Could be delete for CBT log file 

485 self.path = os.path.join(self.sr.path, "%s.%s" % 

486 (vdi_uuid, self.PARAM_VHD)) 

487 return 

488 if self.sr.srcmd.cmd == "vdi_attach_from_config": 

489 return 

490 raise xs_errors.XenError('VDIUnavailable', 

491 opterr="VDI %s not found" % vdi_uuid) 

492 

493 

494 if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ 

495 self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): 

496 # VHD info already preloaded: use it instead of querying directly 

497 vhdInfo = self.sr.vhds[vdi_uuid] 

498 self.utilisation = vhdInfo.sizePhys 

499 self.size = vhdInfo.sizeVirt 

500 self.hidden = vhdInfo.hidden 

501 if self.hidden: 501 ↛ 502line 501 didn't jump to line 502, because the condition on line 501 was never true

502 self.managed = False 

503 self.parent = vhdInfo.parentUuid 

504 if self.parent: 504 ↛ 505line 504 didn't jump to line 505, because the condition on line 504 was never true

505 self.sm_config_override = {'vhd-parent': self.parent} 

506 else: 

507 self.sm_config_override = {'vhd-parent': None} 

508 return 

509 

510 try: 

511 # Change to the SR directory in case parent 

512 # locator field path has changed 

513 os.chdir(self.sr.path) 

514 except Exception as chdir_exception: 

515 util.SMlog("Unable to change to SR directory, SR unavailable, %s" % 

516 str(chdir_exception)) 

517 raise xs_errors.XenError('SRUnavailable', opterr=str(chdir_exception)) 

518 

519 if util.ioretry( 519 ↛ exitline 519 didn't return from function 'load', because the condition on line 519 was never false

520 lambda: util.pathexists(self.path), 

521 errlist=[errno.EIO, errno.ENOENT]): 

522 try: 

523 st = util.ioretry(lambda: os.stat(self.path), 

524 errlist=[errno.EIO, errno.ENOENT]) 

525 self.utilisation = int(st.st_size) 

526 except util.CommandException as inst: 

527 if inst.code == errno.EIO: 

528 raise xs_errors.XenError('VDILoad', \ 

529 opterr='Failed load VDI information %s' % self.path) 

530 else: 

531 util.SMlog("Stat failed for %s, %s" % ( 

532 self.path, str(inst))) 

533 raise xs_errors.XenError('VDIType', \ 

534 opterr='Invalid VDI type %s' % self.vdi_type) 

535 

536 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 536 ↛ 537line 536 didn't jump to line 537, because the condition on line 536 was never true

537 self.exists = True 

538 self.size = self.utilisation 

539 self.sm_config_override = {'type': self.PARAM_RAW} 

540 return 

541 

542 if self.vdi_type == CBTLOG_TAG: 542 ↛ 543line 542 didn't jump to line 543, because the condition on line 542 was never true

543 self.exists = True 

544 self.size = self.utilisation 

545 return 

546 

547 try: 

548 # The VDI might be activated in R/W mode so the VHD footer 

549 # won't be valid, use the back-up one instead. 

550 diskinfo = util.ioretry( 

551 lambda: self._query_info(self.path, True), 

552 errlist=[errno.EIO, errno.ENOENT]) 

553 

554 if 'parent' in diskinfo: 554 ↛ 555line 554 didn't jump to line 555, because the condition on line 554 was never true

555 self.parent = diskinfo['parent'] 

556 self.sm_config_override = {'vhd-parent': self.parent} 

557 else: 

558 self.sm_config_override = {'vhd-parent': None} 

559 self.parent = '' 

560 self.size = int(diskinfo['size']) * 1024 * 1024 

561 self.hidden = int(diskinfo['hidden']) 

562 if self.hidden: 562 ↛ 563line 562 didn't jump to line 563, because the condition on line 562 was never true

563 self.managed = False 

564 self.exists = True 

565 except util.CommandException as inst: 

566 raise xs_errors.XenError('VDILoad', \ 

567 opterr='Failed load VDI information %s' % self.path) 

568 

569 @override 

570 def update(self, sr_uuid, vdi_location) -> None: 

571 self.load(vdi_location) 

572 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

573 self.sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

574 self._db_update() 

575 

576 @override 

577 def create(self, sr_uuid, vdi_uuid, size) -> str: 

578 if util.ioretry(lambda: util.pathexists(self.path)): 578 ↛ 579line 578 didn't jump to line 579, because the condition on line 578 was never true

579 raise xs_errors.XenError('VDIExists') 

580 

581 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

582 try: 

583 size = vhdutil.validate_and_round_vhd_size(int(size)) 

584 mb = 1024 * 1024 

585 size_mb = size // mb 

586 util.ioretry(lambda: self._create(str(size_mb), self.path)) 

587 self.size = util.ioretry(lambda: self._query_v(self.path)) 

588 except util.CommandException as inst: 

589 raise xs_errors.XenError('VDICreate', 

590 opterr='error %d' % inst.code) 

591 else: 

592 f = open(self.path, 'w') 

593 f.truncate(int(size)) 

594 f.close() 

595 self.size = size 

596 

597 self.sr.added_vdi(self) 

598 

599 st = util.ioretry(lambda: os.stat(self.path)) 

600 self.utilisation = int(st.st_size) 

601 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

602 self.sm_config = {"type": self.PARAM_RAW} 

603 

604 self._db_introduce() 

605 self.sr._update(self.sr.uuid, self.size) 

606 return super(FileVDI, self).get_params() 

607 

608 @override 

609 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None: 

610 if not util.ioretry(lambda: util.pathexists(self.path)): 

611 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

612 

613 if self.attached: 

614 raise xs_errors.XenError('VDIInUse') 

615 

616 try: 

617 util.force_unlink(self.path) 

618 except Exception as e: 

619 raise xs_errors.XenError( 

620 'VDIDelete', 

621 opterr='Failed to unlink file during deleting VDI: %s' % str(e)) 

622 

623 self.sr.deleted_vdi(vdi_uuid) 

624 # If this is a data_destroy call, don't remove from XAPI db 

625 if not data_only: 

626 self._db_forget() 

627 self.sr._update(self.sr.uuid, -self.size) 

628 self.sr.lock.cleanupAll(vdi_uuid) 

629 self.sr._kickGC() 

630 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

631 

632 @override 

633 def attach(self, sr_uuid, vdi_uuid) -> str: 

634 if self.path is None: 

635 self._find_path_with_retries(vdi_uuid) 

636 if not self._checkpath(self.path): 

637 raise xs_errors.XenError('VDIUnavailable', \ 

638 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

639 try: 

640 self.attached = True 

641 

642 if not hasattr(self, 'xenstore_data'): 

643 self.xenstore_data = {} 

644 

645 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(vdi_uuid, \ 

646 scsiutil.gen_synthetic_page_data(vdi_uuid))) 

647 

648 if self.sr.handles("file"): 

649 # XXX: PR-1255: if these are constants then they should 

650 # be returned by the attach API call, not persisted in the 

651 # pool database. 

652 self.xenstore_data['storage-type'] = 'ext' 

653 return super(FileVDI, self).attach(sr_uuid, vdi_uuid) 

654 except util.CommandException as inst: 

655 raise xs_errors.XenError('VDILoad', opterr='error %d' % inst.code) 

656 

657 @override 

658 def detach(self, sr_uuid, vdi_uuid) -> None: 

659 self.attached = False 

660 

661 @override 

662 def resize(self, sr_uuid, vdi_uuid, size) -> str: 

663 if not self.exists: 

664 raise xs_errors.XenError('VDIUnavailable', \ 

665 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

666 

667 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

668 raise xs_errors.XenError('Unimplemented') 

669 

670 if self.hidden: 

671 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

672 

673 if size < self.size: 

674 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

675 '(current size: %d, new size: %d)' % (self.size, size)) 

676 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

677 

678 if size == self.size: 

679 return VDI.VDI.get_params(self) 

680 

681 # We already checked it is a VDI_TYPE_VHD 

682 size = vhdutil.validate_and_round_vhd_size(int(size)) 

683 

684 jFile = JOURNAL_FILE_PREFIX + self.uuid 

685 try: 

686 vhdutil.setSizeVirt(self.path, size, jFile) 

687 except: 

688 # Revert the operation 

689 vhdutil.revert(self.path, jFile) 

690 raise xs_errors.XenError('VDISize', opterr='resize operation failed') 

691 

692 old_size = self.size 

693 self.size = vhdutil.getSizeVirt(self.path) 

694 st = util.ioretry(lambda: os.stat(self.path)) 

695 self.utilisation = int(st.st_size) 

696 

697 self._db_update() 

698 self.sr._update(self.sr.uuid, self.size - old_size) 

699 super(FileVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

700 return VDI.VDI.get_params(self) 

701 

702 @override 

703 def clone(self, sr_uuid, vdi_uuid) -> str: 

704 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE) 

705 

706 @override 

707 def compose(self, sr_uuid, vdi1, vdi2) -> None: 

708 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

709 raise xs_errors.XenError('Unimplemented') 

710 parent_fn = vdi1 + vhdutil.FILE_EXTN[vhdutil.VDI_TYPE_VHD] 

711 parent_path = os.path.join(self.sr.path, parent_fn) 

712 assert(util.pathexists(parent_path)) 

713 vhdutil.setParent(self.path, parent_path, False) 

714 vhdutil.setHidden(parent_path) 

715 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

716 util.pread2([vhdutil.VHD_UTIL, "modify", "-p", parent_path, 

717 "-n", self.path]) 

718 # Tell tapdisk the chain has changed 

719 if not blktap2.VDI.tap_refresh(self.session, sr_uuid, vdi2): 

720 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

721 util.SMlog("VDI.compose: relinked %s->%s" % (vdi2, vdi1)) 

722 

723 def reset_leaf(self, sr_uuid, vdi_uuid): 

724 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

725 raise xs_errors.XenError('Unimplemented') 

726 

727 # safety check 

728 if not vhdutil.hasParent(self.path): 

729 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

730 "will not reset contents" % self.uuid) 

731 

732 vhdutil.killData(self.path) 

733 

734 @override 

735 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

736 cloneOp=False, secondary=None, cbtlog=None) -> str: 

737 # If cbt enabled, save file consistency state 

738 if cbtlog is not None: 738 ↛ 739line 738 didn't jump to line 739, because the condition on line 738 was never true

739 if blktap2.VDI.tap_status(self.session, vdi_uuid): 

740 consistency_state = False 

741 else: 

742 consistency_state = True 

743 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

744 (consistency_state, vdi_uuid)) 

745 else: 

746 consistency_state = None 

747 

748 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 748 ↛ 749line 748 didn't jump to line 749, because the condition on line 748 was never true

749 raise xs_errors.XenError('Unimplemented') 

750 

751 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 751 ↛ 752line 751 didn't jump to line 752, because the condition on line 751 was never true

752 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

753 try: 

754 return self._snapshot(snapType, cbtlog, consistency_state) 

755 finally: 

756 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary) 

757 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

758 

759 @override 

760 def _rename(self, src, dst) -> None: 

761 util.SMlog("FileVDI._rename %s to %s" % (src, dst)) 

762 util.ioretry(lambda: os.rename(src, dst)) 

763 

764 def _link(self, src, dst): 

765 util.SMlog("FileVDI._link %s to %s" % (src, dst)) 

766 os.link(src, dst) 

767 

768 def _unlink(self, path): 

769 util.SMlog("FileVDI._unlink %s" % (path)) 

770 os.unlink(path) 

771 

772 def _create_new_parent(self, src, newsrc): 

773 if self.sr._check_hardlinks(): 

774 self._link(src, newsrc) 

775 else: 

776 self._rename(src, newsrc) 

777 

778 def __fist_enospace(self): 

779 raise util.CommandException(28, "vhd-util snapshot", reason="No space") 

780 

781 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): 

782 util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) 

783 

784 args = [] 

785 args.append("vdi_clone") 

786 args.append(self.sr.uuid) 

787 args.append(self.uuid) 

788 

789 dest = None 

790 dst = None 

791 if snap_type == VDI.SNAPSHOT_DOUBLE: 791 ↛ 796line 791 didn't jump to line 796, because the condition on line 791 was never false

792 dest = util.gen_uuid() 

793 dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) 

794 args.append(dest) 

795 

796 if self.hidden: 796 ↛ 797line 796 didn't jump to line 797, because the condition on line 796 was never true

797 raise xs_errors.XenError('VDIClone', opterr='hidden VDI') 

798 

799 depth = vhdutil.getDepth(self.path) 

800 if depth == -1: 800 ↛ 801line 800 didn't jump to line 801, because the condition on line 800 was never true

801 raise xs_errors.XenError('VDIUnavailable', \ 

802 opterr='failed to get VHD depth') 

803 elif depth >= vhdutil.MAX_CHAIN_SIZE: 803 ↛ 804line 803 didn't jump to line 804, because the condition on line 803 was never true

804 raise xs_errors.XenError('SnapshotChainTooLong') 

805 

806 newuuid = util.gen_uuid() 

807 src = self.path 

808 newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) 

809 newsrcname = "%s.%s" % (newuuid, self.vdi_type) 

810 

811 if not self._checkpath(src): 811 ↛ 812line 811 didn't jump to line 812, because the condition on line 811 was never true

812 raise xs_errors.XenError('VDIUnavailable', \ 

813 opterr='VDI %s unavailable %s' % (self.uuid, src)) 

814 

815 # wkcfix: multiphase 

816 util.start_log_entry(self.sr.path, self.path, args) 

817 

818 # We assume the filehandle has been released 

819 try: 

820 self._create_new_parent(src, newsrc) 

821 

822 # Create the snapshot under a temporary name, then rename 

823 # it afterwards. This avoids a small window where it exists 

824 # but is invalid. We do not need to do this for 

825 # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed 

826 # before so nobody will try to query it. 

827 tmpsrc = "%s.%s" % (src, "new") 

828 # Fault injection site to fail the snapshot with ENOSPACE 

829 util.fistpoint.activate_custom_fn( 

830 "FileSR_fail_snap1", 

831 self.__fist_enospace) 

832 util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) 

833 # SMB3 can return EACCES if we attempt to rename over the 

834 # hardlink leaf too quickly after creating it. 

835 util.ioretry(lambda: self._rename(tmpsrc, src), 

836 errlist=[errno.EIO, errno.EACCES]) 

837 if snap_type == VDI.SNAPSHOT_DOUBLE: 837 ↛ 845line 837 didn't jump to line 845, because the condition on line 837 was never false

838 # Fault injection site to fail the snapshot with ENOSPACE 

839 util.fistpoint.activate_custom_fn( 

840 "FileSR_fail_snap2", 

841 self.__fist_enospace) 

842 util.ioretry(lambda: self._snap(dst, newsrcname)) 

843 # mark the original file (in this case, its newsrc) 

844 # as hidden so that it does not show up in subsequent scans 

845 util.ioretry(lambda: self._mark_hidden(newsrc)) 

846 

847 #Verify parent locator field of both children and delete newsrc if unused 

848 introduce_parent = True 

849 try: 

850 srcparent = util.ioretry(lambda: self._query_p_uuid(src)) 

851 dstparent = None 

852 if snap_type == VDI.SNAPSHOT_DOUBLE: 852 ↛ 854line 852 didn't jump to line 854, because the condition on line 852 was never false

853 dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) 

854 if srcparent != newuuid and \ 854 ↛ 858line 854 didn't jump to line 858, because the condition on line 854 was never true

855 (snap_type == VDI.SNAPSHOT_SINGLE or \ 

856 snap_type == VDI.SNAPSHOT_INTERNAL or \ 

857 dstparent != newuuid): 

858 util.ioretry(lambda: self._unlink(newsrc)) 

859 introduce_parent = False 

860 except: 

861 pass 

862 

863 # Introduce the new VDI records 

864 leaf_vdi = None 

865 if snap_type == VDI.SNAPSHOT_DOUBLE: 865 ↛ 884line 865 didn't jump to line 884, because the condition on line 865 was never false

866 leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI 

867 leaf_vdi.read_only = False 

868 leaf_vdi.location = dest 

869 leaf_vdi.size = self.size 

870 leaf_vdi.utilisation = self.utilisation 

871 leaf_vdi.sm_config = {} 

872 leaf_vdi.sm_config['vhd-parent'] = dstparent 

873 # If the parent is encrypted set the key_hash 

874 # for the new snapshot disk 

875 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

876 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

877 if "key_hash" in sm_config: 877 ↛ 878line 877 didn't jump to line 878, because the condition on line 877 was never true

878 leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] 

879 # If we have CBT enabled on the VDI, 

880 # set CBT status for the new snapshot disk 

881 if cbtlog: 881 ↛ 882line 881 didn't jump to line 882, because the condition on line 881 was never true

882 leaf_vdi.cbt_enabled = True 

883 

884 base_vdi = None 

885 if introduce_parent: 885 ↛ 897line 885 didn't jump to line 897, because the condition on line 885 was never false

886 base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent 

887 base_vdi.label = "base copy" 

888 base_vdi.read_only = True 

889 base_vdi.location = newuuid 

890 base_vdi.size = self.size 

891 base_vdi.utilisation = self.utilisation 

892 base_vdi.sm_config = {} 

893 grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) 

894 if grandparent.find("no parent") == -1: 894 ↛ 897line 894 didn't jump to line 897, because the condition on line 894 was never false

895 base_vdi.sm_config['vhd-parent'] = grandparent 

896 

897 try: 

898 if snap_type == VDI.SNAPSHOT_DOUBLE: 898 ↛ 903line 898 didn't jump to line 903, because the condition on line 898 was never false

899 leaf_vdi_ref = leaf_vdi._db_introduce() 

900 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ 

901 (leaf_vdi_ref, dest)) 

902 

903 if introduce_parent: 903 ↛ 907line 903 didn't jump to line 907, because the condition on line 903 was never false

904 base_vdi_ref = base_vdi._db_introduce() 

905 self.session.xenapi.VDI.set_managed(base_vdi_ref, False) 

906 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) 

907 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

908 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

909 sm_config['vhd-parent'] = srcparent 

910 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

911 except Exception as e: 

912 util.SMlog("vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) 

913 # Note it's too late to actually clean stuff up here: the base disk has 

914 # been marked as deleted already. 

915 util.end_log_entry(self.sr.path, self.path, ["error"]) 

916 raise 

917 except util.CommandException as inst: 

918 # XXX: it might be too late if the base disk has been marked as deleted! 

919 self._clonecleanup(src, dst, newsrc) 

920 util.end_log_entry(self.sr.path, self.path, ["error"]) 

921 raise xs_errors.XenError('VDIClone', 

922 opterr='VDI clone failed error %d' % inst.code) 

923 

924 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

925 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog: 925 ↛ 926line 925 didn't jump to line 926, because the condition on line 925 was never true

926 try: 

927 self._cbt_snapshot(dest, cbt_consistency) 

928 except: 

929 # CBT operation failed. 

930 util.end_log_entry(self.sr.path, self.path, ["error"]) 

931 raise 

932 

933 util.end_log_entry(self.sr.path, self.path, ["done"]) 

934 if snap_type != VDI.SNAPSHOT_INTERNAL: 934 ↛ 937line 934 didn't jump to line 937, because the condition on line 934 was never false

935 self.sr._update(self.sr.uuid, self.size) 

936 # Return info on the new user-visible leaf VDI 

937 ret_vdi = leaf_vdi 

938 if not ret_vdi: 938 ↛ 939line 938 didn't jump to line 939, because the condition on line 938 was never true

939 ret_vdi = base_vdi 

940 if not ret_vdi: 940 ↛ 941line 940 didn't jump to line 941, because the condition on line 940 was never true

941 ret_vdi = self 

942 return ret_vdi.get_params() 

943 

944 @override 

945 def get_params(self) -> str: 

946 if not self._checkpath(self.path): 

947 raise xs_errors.XenError('VDIUnavailable', \ 

948 opterr='VDI %s unavailable %s' % (self.uuid, self.path)) 

949 return super(FileVDI, self).get_params() 

950 

951 def _snap(self, child, parent): 

952 cmd = [SR.TAPDISK_UTIL, "snapshot", vhdutil.VDI_TYPE_VHD, child, parent] 

953 text = util.pread(cmd) 

954 

955 def _clonecleanup(self, src, dst, newsrc): 

956 try: 

957 if dst: 957 ↛ 961line 957 didn't jump to line 961, because the condition on line 957 was never false

958 util.ioretry(lambda: self._unlink(dst)) 

959 except util.CommandException as inst: 

960 pass 

961 try: 

962 if util.ioretry(lambda: util.pathexists(newsrc)): 962 ↛ exitline 962 didn't return from function '_clonecleanup', because the condition on line 962 was never false

963 stats = os.stat(newsrc) 

964 # Check if we have more than one link to newsrc 

965 if (stats.st_nlink > 1): 

966 util.ioretry(lambda: self._unlink(newsrc)) 

967 elif not self._is_hidden(newsrc): 967 ↛ exitline 967 didn't return from function '_clonecleanup', because the condition on line 967 was never false

968 self._rename(newsrc, src) 

969 except util.CommandException as inst: 

970 pass 

971 

972 def _checkpath(self, path): 

973 try: 

974 if not util.ioretry(lambda: util.pathexists(path)): 974 ↛ 975line 974 didn't jump to line 975, because the condition on line 974 was never true

975 return False 

976 return True 

977 except util.CommandException as inst: 

978 raise xs_errors.XenError('EIO', \ 

979 opterr='IO error checking path %s' % path) 

980 

981 def _query_v(self, path): 

982 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-v", path] 

983 return int(util.pread(cmd)) * 1024 * 1024 

984 

985 def _query_p_uuid(self, path): 

986 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-p", path] 

987 parent = util.pread(cmd) 

988 parent = parent[:-1] 

989 ls = parent.split('/') 

990 return ls[len(ls) - 1].replace(vhdutil.FILE_EXTN_VHD, '') 

991 

992 def _query_info(self, path, use_bkp_footer=False): 

993 diskinfo = {} 

994 qopts = '-vpf' 

995 if use_bkp_footer: 995 ↛ 997line 995 didn't jump to line 997, because the condition on line 995 was never false

996 qopts += 'b' 

997 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, qopts, path] 

998 txt = util.pread(cmd).split('\n') 

999 diskinfo['size'] = txt[0] 

1000 lst = [txt[1].split('/')[-1].replace(vhdutil.FILE_EXTN_VHD, "")] 

1001 for val in filter(util.exactmatch_uuid, lst): 1001 ↛ 1002line 1001 didn't jump to line 1002, because the loop on line 1001 never started

1002 diskinfo['parent'] = val 

1003 diskinfo['hidden'] = txt[2].split()[1] 

1004 return diskinfo 

1005 

1006 def _create(self, size, path): 

1007 cmd = [SR.TAPDISK_UTIL, "create", vhdutil.VDI_TYPE_VHD, size, path] 

1008 text = util.pread(cmd) 

1009 if self.key_hash: 1009 ↛ 1010line 1009 didn't jump to line 1010, because the condition on line 1009 was never true

1010 vhdutil.setKey(path, self.key_hash) 

1011 

1012 def _mark_hidden(self, path): 

1013 vhdutil.setHidden(path, True) 

1014 self.hidden = 1 

1015 

1016 def _is_hidden(self, path): 

1017 return vhdutil.getHidden(path) == 1 

1018 

1019 def extractUuid(path): 

1020 fileName = os.path.basename(path) 

1021 uuid = fileName.replace(vhdutil.FILE_EXTN_VHD, "") 

1022 return uuid 

1023 extractUuid = staticmethod(extractUuid) 

1024 

1025 @override 

1026 def generate_config(self, sr_uuid, vdi_uuid) -> str: 

1027 """ 

1028 Generate the XML config required to attach and activate 

1029 a VDI for use when XAPI is not running. Attach and 

1030 activation is handled by vdi_attach_from_config below. 

1031 """ 

1032 util.SMlog("FileVDI.generate_config") 

1033 if not util.pathexists(self.path): 1033 ↛ 1034line 1033 didn't jump to line 1034, because the condition on line 1033 was never true

1034 raise xs_errors.XenError('VDIUnavailable') 

1035 resp = {} 

1036 resp['device_config'] = self.sr.dconf 

1037 resp['sr_uuid'] = sr_uuid 

1038 resp['vdi_uuid'] = vdi_uuid 

1039 resp['command'] = 'vdi_attach_from_config' 

1040 # Return the 'config' encoded within a normal XMLRPC response so that 

1041 # we can use the regular response/error parsing code. 

1042 config = xmlrpc.client.dumps(tuple([resp]), "vdi_attach_from_config") 

1043 return xmlrpc.client.dumps((config, ), "", True) 

1044 

1045 @override 

1046 def attach_from_config(self, sr_uuid, vdi_uuid) -> str: 

1047 """ 

1048 Attach and activate a VDI using config generated by 

1049 vdi_generate_config above. This is used for cases such as 

1050 the HA state-file and the redo-log. 

1051 """ 

1052 util.SMlog("FileVDI.attach_from_config") 

1053 try: 

1054 if not util.pathexists(self.sr.path): 

1055 return self.sr.attach(sr_uuid) 

1056 except: 

1057 util.logException("FileVDI.attach_from_config") 

1058 raise xs_errors.XenError( 

1059 'SRUnavailable', 

1060 opterr='Unable to attach from config' 

1061 ) 

1062 return '' 

1063 

1064 @override 

1065 def _create_cbt_log(self) -> str: 

1066 # Create CBT log file 

1067 # Name: <vdi_uuid>.cbtlog 

1068 #Handle if file already exists 

1069 log_path = self._get_cbt_logpath(self.uuid) 

1070 open_file = open(log_path, "w+") 

1071 open_file.close() 

1072 return super(FileVDI, self)._create_cbt_log() 

1073 

1074 @override 

1075 def _delete_cbt_log(self) -> None: 

1076 logPath = self._get_cbt_logpath(self.uuid) 

1077 try: 

1078 os.remove(logPath) 

1079 except OSError as e: 

1080 if e.errno != errno.ENOENT: 

1081 raise 

1082 

1083 @override 

1084 def _cbt_log_exists(self, logpath) -> bool: 

1085 return util.pathexists(logpath) 

1086 

1087 

1088class SharedFileSR(FileSR): 

1089 """ 

1090 FileSR subclass for SRs that use shared network storage 

1091 """ 

1092 

1093 def _check_writable(self): 

1094 """ 

1095 Checks that the filesystem being used by the SR can be written to, 

1096 raising an exception if it can't. 

1097 """ 

1098 test_name = os.path.join(self.path, str(uuid4())) 

1099 try: 

1100 open(test_name, 'ab').close() 

1101 except OSError as e: 

1102 util.SMlog("Cannot write to SR file system: %s" % e) 

1103 raise xs_errors.XenError('SharedFileSystemNoWrite') 

1104 finally: 

1105 util.force_unlink(test_name) 

1106 

1107 def _raise_hardlink_error(self): 

1108 raise OSError(524, "Unknown error 524") 

1109 

1110 @override 

1111 def _check_hardlinks(self) -> bool: 

1112 hardlink_conf = self._read_hardlink_conf() 

1113 if hardlink_conf is not None: 1113 ↛ 1114line 1113 didn't jump to line 1114, because the condition on line 1113 was never true

1114 return hardlink_conf 

1115 

1116 test_name = os.path.join(self.path, str(uuid4())) 

1117 open(test_name, 'ab').close() 

1118 

1119 link_name = '%s.new' % test_name 

1120 try: 

1121 # XSI-1100: Let tests simulate failure of the link operation 

1122 util.fistpoint.activate_custom_fn( 

1123 "FileSR_fail_hardlink", 

1124 self._raise_hardlink_error) 

1125 

1126 os.link(test_name, link_name) 

1127 self._write_hardlink_conf(supported=True) 

1128 return True 

1129 except OSError: 

1130 self._write_hardlink_conf(supported=False) 

1131 

1132 msg = "File system for SR %s does not support hardlinks, crash " \ 

1133 "consistency of snapshots cannot be assured" % self.uuid 

1134 util.SMlog(msg, priority=util.LOG_WARNING) 

1135 # Note: session can be not set during attach/detach_from_config calls. 

1136 if self.session: 1136 ↛ 1145line 1136 didn't jump to line 1145, because the condition on line 1136 was never false

1137 try: 

1138 self.session.xenapi.message.create( 

1139 "sr_does_not_support_hardlinks", 2, "SR", self.uuid, 

1140 msg) 

1141 except XenAPI.Failure: 

1142 # Might already be set and checking has TOCTOU issues 

1143 pass 

1144 finally: 

1145 util.force_unlink(link_name) 

1146 util.force_unlink(test_name) 

1147 

1148 return False 

1149 

1150 def _get_hardlink_conf_path(self): 

1151 return os.path.join(self.path, 'sm-hardlink.conf') 

1152 

1153 def _read_hardlink_conf(self) -> Optional[bool]: 

1154 try: 

1155 with open(self._get_hardlink_conf_path(), 'r') as f: 

1156 try: 

1157 return bool(int(f.read())) 

1158 except Exception as e: 

1159 # If we can't read, assume the file is empty and test for hardlink support. 

1160 return None 

1161 except IOError as e: 

1162 if e.errno == errno.ENOENT: 

1163 # If the config file doesn't exist, assume we want to support hardlinks. 

1164 return None 

1165 util.SMlog('Failed to read hardlink conf: {}'.format(e)) 

1166 # Can be caused by a concurrent access, not a major issue. 

1167 return None 

1168 

1169 def _write_hardlink_conf(self, supported): 

1170 try: 

1171 with open(self._get_hardlink_conf_path(), 'w') as f: 

1172 f.write('1' if supported else '0') 

1173 except Exception as e: 

1174 # Can be caused by a concurrent access, not a major issue. 

1175 util.SMlog('Failed to write hardlink conf: {}'.format(e)) 

1176 

1177if __name__ == '__main__': 1177 ↛ 1178line 1177 didn't jump to line 1178, because the condition on line 1177 was never true

1178 SRCommand.run(FileSR, DRIVER_INFO) 

1179else: 

1180 SR.registerSR(FileSR)