Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# FileSR: local-file storage repository 

19 

20import SR 

21import VDI 

22import SRCommand 

23import util 

24import scsiutil 

25import vhdutil 

26import os 

27import errno 

28import xs_errors 

29import cleanup 

30import blktap2 

31import time 

32import glob 

33from uuid import uuid4 

34from lock import Lock 

35import xmlrpc.client 

36import XenAPI # pylint: disable=import-error 

37from constants import CBTLOG_TAG 

38 

39geneology = {} 

40CAPABILITIES = ["SR_PROBE", "SR_UPDATE", \ 

41 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", \ 

42 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR", 

43 "VDI_GENERATE_CONFIG", "ATOMIC_PAUSE", "VDI_CONFIG_CBT", 

44 "VDI_ACTIVATE", "VDI_DEACTIVATE", "THIN_PROVISIONING"] 

45 

46CONFIGURATION = [['location', 'local directory path (required)']] 

47 

48DRIVER_INFO = { 

49 'name': 'Local Path VHD', 

50 'description': 'SR plugin which represents disks as VHD files stored on a local path', 

51 'vendor': 'Citrix Systems Inc', 

52 'copyright': '(C) 2008 Citrix Systems Inc', 

53 'driver_version': '1.0', 

54 'required_api_version': '1.0', 

55 'capabilities': CAPABILITIES, 

56 'configuration': CONFIGURATION 

57 } 

58 

59JOURNAL_FILE_PREFIX = ".journal-" 

60 

61OPS_EXCLUSIVE = [ 

62 "sr_create", "sr_delete", "sr_probe", "sr_attach", "sr_detach", 

63 "sr_scan", "vdi_init", "vdi_create", "vdi_delete", "vdi_attach", 

64 "vdi_detach", "vdi_resize_online", "vdi_snapshot", "vdi_clone"] 

65 

66DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True} 

67 

68 

69class FileSR(SR.SR): 

70 """Local file storage repository""" 

71 

72 SR_TYPE = "file" 

73 

74 def handles(srtype): 

75 return srtype == 'file' 

76 handles = staticmethod(handles) 

77 

78 def _check_o_direct(self): 

79 if self.sr_ref and self.session is not None: 

80 other_config = self.session.xenapi.SR.get_other_config(self.sr_ref) 

81 o_direct = other_config.get("o_direct") 

82 self.o_direct = o_direct is not None and o_direct == "true" 

83 else: 

84 self.o_direct = True 

85 

86 def __init__(self, srcmd, sr_uuid): 

87 # We call SR.SR.__init__ explicitly because 

88 # "super" sometimes failed due to circular imports 

89 SR.SR.__init__(self, srcmd, sr_uuid) 

90 self._check_o_direct() 

91 

92 def load(self, sr_uuid): 

93 self.ops_exclusive = OPS_EXCLUSIVE 

94 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

95 self.sr_vditype = vhdutil.VDI_TYPE_VHD 

96 if 'location' not in self.dconf or not self.dconf['location']: 96 ↛ 97line 96 didn't jump to line 97, because the condition on line 96 was never true

97 raise xs_errors.XenError('ConfigLocationMissing') 

98 self.remotepath = self.dconf['location'] 

99 self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) 

100 self.linkpath = self.path 

101 self.mountpoint = self.path 

102 self.attached = False 

103 self.driver_config = DRIVER_CONFIG 

104 

105 def create(self, sr_uuid, size): 

106 """ Create the SR. The path must not already exist, or if it does,  

107 it must be empty. (This accounts for the case where the user has 

108 mounted a device onto a directory manually and want to use this as the 

109 root of a file-based SR.) """ 

110 try: 

111 if util.ioretry(lambda: util.pathexists(self.remotepath)): 111 ↛ 112line 111 didn't jump to line 112, because the condition on line 111 was never true

112 if len(util.ioretry(lambda: util.listdir(self.remotepath))) != 0: 

113 raise xs_errors.XenError('SRExists') 

114 else: 

115 try: 

116 util.ioretry(lambda: os.mkdir(self.remotepath)) 

117 except util.CommandException as inst: 

118 if inst.code == errno.EEXIST: 

119 raise xs_errors.XenError('SRExists') 

120 else: 

121 raise xs_errors.XenError('FileSRCreate', \ 

122 opterr='directory creation failure %d' \ 

123 % inst.code) 

124 except: 

125 raise xs_errors.XenError('FileSRCreate') 

126 

127 def delete(self, sr_uuid): 

128 self.attach(sr_uuid) 

129 cleanup.gc_force(self.session, self.uuid) 

130 

131 # check to make sure no VDIs are present; then remove old 

132 # files that are non VDI's 

133 try: 

134 if util.ioretry(lambda: util.pathexists(self.path)): 

135 #Load the VDI list 

136 self._loadvdis() 

137 for uuid in self.vdis: 

138 if not self.vdis[uuid].deleted: 

139 raise xs_errors.XenError('SRNotEmpty', \ 

140 opterr='VDIs still exist in SR') 

141 

142 # remove everything else, there are no vdi's 

143 for name in util.ioretry(lambda: util.listdir(self.path)): 

144 fullpath = os.path.join(self.path, name) 

145 try: 

146 util.ioretry(lambda: os.unlink(fullpath)) 

147 except util.CommandException as inst: 

148 if inst.code != errno.ENOENT and \ 

149 inst.code != errno.EISDIR: 

150 raise xs_errors.XenError('FileSRDelete', \ 

151 opterr='failed to remove %s error %d' \ 

152 % (fullpath, inst.code)) 

153 self.detach(sr_uuid) 

154 except util.CommandException as inst: 

155 self.detach(sr_uuid) 

156 raise xs_errors.XenError('FileSRDelete', \ 

157 opterr='error %d' % inst.code) 

158 

159 def attach(self, sr_uuid): 

160 if not self._checkmount(): 

161 try: 

162 util.ioretry(lambda: util.makedirs(self.path)) 

163 except util.CommandException as inst: 

164 if inst.code != errno.EEXIST: 

165 raise xs_errors.XenError("FileSRCreate", \ 

166 opterr='fail to create mount point. Errno is %s' % inst.code) 

167 try: 

168 util.pread(["mount", "--bind", self.remotepath, self.path]) 

169 except util.CommandException as inst: 

170 raise xs_errors.XenError('FileSRCreate', \ 

171 opterr='fail to mount FileSR. Errno is %s' % inst.code) 

172 self.attached = True 

173 

174 def detach(self, sr_uuid): 

175 if self._checkmount(): 

176 try: 

177 util.SMlog("Aborting GC/coalesce") 

178 cleanup.abort(self.uuid) 

179 os.chdir(SR.MOUNT_BASE) 

180 util.pread(["umount", self.path]) 

181 os.rmdir(self.path) 

182 except Exception as e: 

183 raise xs_errors.XenError('SRInUse', opterr=str(e)) 

184 self.attached = False 

185 

186 def scan(self, sr_uuid): 

187 if not self._checkmount(): 

188 raise xs_errors.XenError('SRUnavailable', \ 

189 opterr='no such directory %s' % self.path) 

190 

191 if not self.vdis: 191 ↛ 194line 191 didn't jump to line 194, because the condition on line 191 was never false

192 self._loadvdis() 

193 

194 if not self.passthrough: 

195 self.physical_size = self._getsize() 

196 self.physical_utilisation = self._getutilisation() 

197 

198 for uuid in list(self.vdis.keys()): 

199 if self.vdis[uuid].deleted: 199 ↛ 200line 199 didn't jump to line 200, because the condition on line 199 was never true

200 del self.vdis[uuid] 

201 

202 # CA-15607: make sure we are robust to the directory being unmounted beneath 

203 # us (eg by a confused user). Without this we might forget all our VDI references 

204 # which would be a shame. 

205 # For SMB SRs, this path is mountpoint 

206 mount_path = self.path 

207 if self.handles("smb"): 207 ↛ 208line 207 didn't jump to line 208, because the condition on line 207 was never true

208 mount_path = self.mountpoint 

209 

210 if not self.handles("file") and not os.path.ismount(mount_path): 210 ↛ 211line 210 didn't jump to line 211, because the condition on line 210 was never true

211 util.SMlog("Error: FileSR.scan called but directory %s isn't a mountpoint" % mount_path) 

212 raise xs_errors.XenError('SRUnavailable', \ 

213 opterr='not mounted %s' % mount_path) 

214 

215 self._kickGC() 

216 

217 # default behaviour from here on 

218 super(FileSR, self).scan(sr_uuid) 

219 

220 def update(self, sr_uuid): 

221 if not self._checkmount(): 

222 raise xs_errors.XenError('SRUnavailable', \ 

223 opterr='no such directory %s' % self.path) 

224 self._update(sr_uuid, 0) 

225 

226 def _update(self, sr_uuid, virt_alloc_delta): 

227 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

228 self.virtual_allocation = valloc + virt_alloc_delta 

229 self.physical_size = self._getsize() 

230 self.physical_utilisation = self._getutilisation() 

231 self._db_update() 

232 

233 def content_type(self, sr_uuid): 

234 return super(FileSR, self).content_type(sr_uuid) 

235 

236 def vdi(self, uuid): 

237 return FileVDI(self, uuid) 

238 

239 def added_vdi(self, vdi): 

240 self.vdis[vdi.uuid] = vdi 

241 

242 def deleted_vdi(self, uuid): 

243 if uuid in self.vdis: 

244 del self.vdis[uuid] 

245 

246 def replay(self, uuid): 

247 try: 

248 file = open(self.path + "/filelog.txt", "r") 

249 data = file.readlines() 

250 file.close() 

251 self._process_replay(data) 

252 except: 

253 raise xs_errors.XenError('SRLog') 

254 

255 def _loadvdis(self): 

256 if self.vdis: 256 ↛ 257line 256 didn't jump to line 257, because the condition on line 256 was never true

257 return 

258 

259 pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) 

260 try: 

261 self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) 

262 except util.CommandException as inst: 

263 raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ 

264 "path %s (%s)" % (self.path, inst)) 

265 try: 

266 list_vhds = [FileVDI.extractUuid(v) for v in util.ioretry(lambda: glob.glob(pattern))] 

267 if len(self.vhds) != len(list_vhds): 267 ↛ 272line 267 didn't jump to line 272, because the condition on line 267 was never false

268 util.SMlog("VHD scan returns %d VHDs: %s" % (len(self.vhds), sorted(self.vhds))) 

269 util.SMlog("VHD list returns %d VHDs: %s" % (len(list_vhds), sorted(list_vhds))) 

270 except: 

271 pass 

272 for uuid in self.vhds.keys(): 

273 if self.vhds[uuid].error: 273 ↛ 274line 273 didn't jump to line 274, because the condition on line 273 was never true

274 raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) 

275 self.vdis[uuid] = self.vdi(uuid) 

276 # Get the key hash of any encrypted VDIs: 

277 vhd_path = os.path.join(self.path, self.vhds[uuid].path) 

278 key_hash = vhdutil.getKeyHash(vhd_path) 

279 self.vdis[uuid].sm_config_override['key_hash'] = key_hash 

280 

281 # raw VDIs and CBT log files 

282 files = util.ioretry(lambda: util.listdir(self.path)) 282 ↛ exitline 282 didn't run the lambda on line 282

283 for fn in files: 283 ↛ 284line 283 didn't jump to line 284, because the loop on line 283 never started

284 if fn.endswith(vhdutil.FILE_EXTN_RAW): 

285 uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] 

286 self.vdis[uuid] = self.vdi(uuid) 

287 elif fn.endswith(CBTLOG_TAG): 

288 cbt_uuid = fn.split(".")[0] 

289 # If an associated disk exists, update CBT status 

290 # else create new VDI of type cbt_metadata 

291 if cbt_uuid in self.vdis: 

292 self.vdis[cbt_uuid].cbt_enabled = True 

293 else: 

294 new_vdi = self.vdi(cbt_uuid) 

295 new_vdi.ty = "cbt_metadata" 

296 new_vdi.cbt_enabled = True 

297 self.vdis[cbt_uuid] = new_vdi 

298 

299 # Mark parent VDIs as Read-only and generate virtual allocation 

300 self.virtual_allocation = 0 

301 for uuid, vdi in self.vdis.items(): 

302 if vdi.parent: 302 ↛ 303line 302 didn't jump to line 303, because the condition on line 302 was never true

303 if vdi.parent in self.vdis: 

304 self.vdis[vdi.parent].read_only = True 

305 if vdi.parent in geneology: 

306 geneology[vdi.parent].append(uuid) 

307 else: 

308 geneology[vdi.parent] = [uuid] 

309 if not vdi.hidden: 309 ↛ 301line 309 didn't jump to line 301, because the condition on line 309 was never false

310 self.virtual_allocation += (vdi.size) 

311 

312 # now remove all hidden leaf nodes from self.vdis so that they are not 

313 # introduced into the Agent DB when SR is synchronized. With the 

314 # asynchronous GC, a deleted VDI might stay around until the next 

315 # SR.scan, so if we don't ignore hidden leaves we would pick up 

316 # freshly-deleted VDIs as newly-added VDIs 

317 for uuid in list(self.vdis.keys()): 

318 if uuid not in geneology and self.vdis[uuid].hidden: 318 ↛ 319line 318 didn't jump to line 319, because the condition on line 318 was never true

319 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

320 del self.vdis[uuid] 

321 

322 def _getsize(self): 

323 path = self.path 

324 if self.handles("smb"): 324 ↛ 325line 324 didn't jump to line 325, because the condition on line 324 was never true

325 path = self.linkpath 

326 return util.get_fs_size(path) 

327 

328 def _getutilisation(self): 

329 return util.get_fs_utilisation(self.path) 

330 

331 def _replay(self, logentry): 

332 # all replay commands have the same 5,6,7th arguments 

333 # vdi_command, sr-uuid, vdi-uuid 

334 back_cmd = logentry[5].replace("vdi_", "") 

335 target = self.vdi(logentry[7]) 

336 cmd = getattr(target, back_cmd) 

337 args = [] 

338 for item in logentry[6:]: 

339 item = item.replace("\n", "") 

340 args.append(item) 

341 ret = cmd( * args) 

342 if ret: 

343 print(ret) 

344 

345 def _compare_args(self, a, b): 

346 try: 

347 if a[2] != "log:": 

348 return 1 

349 if b[2] != "end:" and b[2] != "error:": 

350 return 1 

351 if a[3] != b[3]: 

352 return 1 

353 if a[4] != b[4]: 

354 return 1 

355 return 0 

356 except: 

357 return 1 

358 

359 def _process_replay(self, data): 

360 logentries = [] 

361 for logentry in data: 

362 logentry = logentry.split(" ") 

363 logentries.append(logentry) 

364 # we are looking for a log entry that has a log but no end or error 

365 # wkcfix -- recreate (adjusted) logfile 

366 index = 0 

367 while index < len(logentries) - 1: 

368 if self._compare_args(logentries[index], logentries[index + 1]): 

369 self._replay(logentries[index]) 

370 else: 

371 # skip the paired one 

372 index += 1 

373 # next 

374 index += 1 

375 

376 def _kickGC(self): 

377 # don't bother if an instance already running (this is just an 

378 # optimization to reduce the overhead of forking a new process if we 

379 # don't have to, but the process will check the lock anyways) 

380 lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) 

381 if not lockRunning.acquireNoblock(): 381 ↛ 382line 381 didn't jump to line 382, because the condition on line 381 was never true

382 if cleanup.should_preempt(self.session, self.uuid): 

383 util.SMlog("Aborting currently-running coalesce of garbage VDI") 

384 try: 

385 if not cleanup.abort(self.uuid, soft=True): 

386 util.SMlog("The GC has already been scheduled to " 

387 "re-start") 

388 except util.CommandException as e: 

389 if e.code != errno.ETIMEDOUT: 

390 raise 

391 util.SMlog('failed to abort the GC') 

392 finally: 

393 return 

394 else: 

395 util.SMlog("A GC instance already running, not kicking") 

396 return 

397 else: 

398 lockRunning.release() 

399 

400 util.SMlog("Kicking GC") 

401 cleanup.gc(self.session, self.uuid, True) 

402 

403 def _isbind(self): 

404 # os.path.ismount can't deal with bind mount 

405 st1 = os.stat(self.path) 

406 st2 = os.stat(self.remotepath) 

407 return st1.st_dev == st2.st_dev and st1.st_ino == st2.st_ino 

408 

409 def _checkmount(self): 

410 mount_path = self.path 

411 if self.handles("smb"): 411 ↛ 412line 411 didn't jump to line 412, because the condition on line 411 was never true

412 mount_path = self.mountpoint 

413 

414 return util.ioretry(lambda: util.pathexists(mount_path) and \ 

415 (util.ismount(mount_path) or \ 

416 util.pathexists(self.remotepath) and self._isbind())) 

417 

418 

419class FileVDI(VDI.VDI): 

420 PARAM_VHD = "vhd" 

421 PARAM_RAW = "raw" 

422 VDI_TYPE = { 

423 PARAM_VHD: vhdutil.VDI_TYPE_VHD, 

424 PARAM_RAW: vhdutil.VDI_TYPE_RAW 

425 } 

426 

427 def _find_path_with_retries(self, vdi_uuid, maxretry=5, period=2.0): 

428 vhd_path = os.path.join(self.sr.path, "%s.%s" % \ 

429 (vdi_uuid, self.PARAM_VHD)) 

430 raw_path = os.path.join(self.sr.path, "%s.%s" % \ 

431 (vdi_uuid, self.PARAM_RAW)) 

432 cbt_path = os.path.join(self.sr.path, "%s.%s" % 

433 (vdi_uuid, CBTLOG_TAG)) 

434 found = False 

435 tries = 0 

436 while tries < maxretry and not found: 

437 tries += 1 

438 if util.ioretry(lambda: util.pathexists(vhd_path)): 

439 self.vdi_type = vhdutil.VDI_TYPE_VHD 

440 self.path = vhd_path 

441 found = True 

442 elif util.ioretry(lambda: util.pathexists(raw_path)): 

443 self.vdi_type = vhdutil.VDI_TYPE_RAW 

444 self.path = raw_path 

445 self.hidden = False 

446 found = True 

447 elif util.ioretry(lambda: util.pathexists(cbt_path)): 447 ↛ 448line 447 didn't jump to line 448, because the condition on line 447 was never true

448 self.vdi_type = CBTLOG_TAG 

449 self.path = cbt_path 

450 self.hidden = False 

451 found = True 

452 

453 if not found: 

454 util.SMlog("VHD %s not found, retry %s of %s" % (vhd_path, tries, maxretry)) 

455 time.sleep(period) 

456 

457 return found 

458 

459 def load(self, vdi_uuid): 

460 self.lock = self.sr.lock 

461 

462 self.sr.srcmd.params['o_direct'] = self.sr.o_direct 

463 

464 if self.sr.srcmd.cmd == "vdi_create": 

465 self.vdi_type = vhdutil.VDI_TYPE_VHD 

466 self.key_hash = None 

467 if "vdi_sm_config" in self.sr.srcmd.params: 467 ↛ 468line 467 didn't jump to line 468, because the condition on line 467 was never true

468 if "key_hash" in self.sr.srcmd.params["vdi_sm_config"]: 

469 self.key_hash = self.sr.srcmd.params["vdi_sm_config"]["key_hash"] 

470 

471 if "type" in self.sr.srcmd.params["vdi_sm_config"]: 

472 vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

473 if not self.VDI_TYPE.get(vdi_type): 

474 raise xs_errors.XenError('VDIType', 

475 opterr='Invalid VDI type %s' % vdi_type) 

476 self.vdi_type = self.VDI_TYPE[vdi_type] 

477 self.path = os.path.join(self.sr.path, "%s%s" % 

478 (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) 

479 else: 

480 found = self._find_path_with_retries(vdi_uuid) 

481 if not found: 481 ↛ 482line 481 didn't jump to line 482, because the condition on line 481 was never true

482 if self.sr.srcmd.cmd == "vdi_delete": 

483 # Could be delete for CBT log file 

484 self.path = os.path.join(self.sr.path, "%s.%s" % 

485 (vdi_uuid, self.PARAM_VHD)) 

486 return 

487 if self.sr.srcmd.cmd == "vdi_attach_from_config": 

488 return 

489 raise xs_errors.XenError('VDIUnavailable', 

490 opterr="VDI %s not found" % vdi_uuid) 

491 

492 

493 if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ 

494 self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): 

495 # VHD info already preloaded: use it instead of querying directly 

496 vhdInfo = self.sr.vhds[vdi_uuid] 

497 self.utilisation = vhdInfo.sizePhys 

498 self.size = vhdInfo.sizeVirt 

499 self.hidden = vhdInfo.hidden 

500 if self.hidden: 500 ↛ 501line 500 didn't jump to line 501, because the condition on line 500 was never true

501 self.managed = False 

502 self.parent = vhdInfo.parentUuid 

503 if self.parent: 503 ↛ 504line 503 didn't jump to line 504, because the condition on line 503 was never true

504 self.sm_config_override = {'vhd-parent': self.parent} 

505 else: 

506 self.sm_config_override = {'vhd-parent': None} 

507 return 

508 

509 try: 

510 # Change to the SR directory in case parent 

511 # locator field path has changed 

512 os.chdir(self.sr.path) 

513 except Exception as chdir_exception: 

514 util.SMlog("Unable to change to SR directory, SR unavailable, %s" % 

515 str(chdir_exception)) 

516 raise xs_errors.XenError('SRUnavailable', opterr=str(chdir_exception)) 

517 

518 if util.ioretry( 518 ↛ exitline 518 didn't return from function 'load', because the condition on line 518 was never false

519 lambda: util.pathexists(self.path), 

520 errlist=[errno.EIO, errno.ENOENT]): 

521 try: 

522 st = util.ioretry(lambda: os.stat(self.path), 

523 errlist=[errno.EIO, errno.ENOENT]) 

524 self.utilisation = int(st.st_size) 

525 except util.CommandException as inst: 

526 if inst.code == errno.EIO: 

527 raise xs_errors.XenError('VDILoad', \ 

528 opterr='Failed load VDI information %s' % self.path) 

529 else: 

530 util.SMlog("Stat failed for %s, %s" % ( 

531 self.path, str(inst))) 

532 raise xs_errors.XenError('VDIType', \ 

533 opterr='Invalid VDI type %s' % self.vdi_type) 

534 

535 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 535 ↛ 536line 535 didn't jump to line 536, because the condition on line 535 was never true

536 self.exists = True 

537 self.size = self.utilisation 

538 self.sm_config_override = {'type': self.PARAM_RAW} 

539 return 

540 

541 if self.vdi_type == CBTLOG_TAG: 541 ↛ 542line 541 didn't jump to line 542, because the condition on line 541 was never true

542 self.exists = True 

543 self.size = self.utilisation 

544 return 

545 

546 try: 

547 # The VDI might be activated in R/W mode so the VHD footer 

548 # won't be valid, use the back-up one instead. 

549 diskinfo = util.ioretry( 

550 lambda: self._query_info(self.path, True), 

551 errlist=[errno.EIO, errno.ENOENT]) 

552 

553 if 'parent' in diskinfo: 553 ↛ 554line 553 didn't jump to line 554, because the condition on line 553 was never true

554 self.parent = diskinfo['parent'] 

555 self.sm_config_override = {'vhd-parent': self.parent} 

556 else: 

557 self.sm_config_override = {'vhd-parent': None} 

558 self.parent = '' 

559 self.size = int(diskinfo['size']) * 1024 * 1024 

560 self.hidden = int(diskinfo['hidden']) 

561 if self.hidden: 561 ↛ 562line 561 didn't jump to line 562, because the condition on line 561 was never true

562 self.managed = False 

563 self.exists = True 

564 except util.CommandException as inst: 

565 raise xs_errors.XenError('VDILoad', \ 

566 opterr='Failed load VDI information %s' % self.path) 

567 

568 def update(self, sr_uuid, vdi_location): 

569 self.load(vdi_location) 

570 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

571 self.sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

572 self._db_update() 

573 

574 def create(self, sr_uuid, vdi_uuid, size): 

575 if util.ioretry(lambda: util.pathexists(self.path)): 575 ↛ 576line 575 didn't jump to line 576, because the condition on line 575 was never true

576 raise xs_errors.XenError('VDIExists') 

577 

578 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

579 try: 

580 size = vhdutil.validate_and_round_vhd_size(int(size)) 

581 mb = 1024 * 1024 

582 size_mb = size // mb 

583 util.ioretry(lambda: self._create(str(size_mb), self.path)) 

584 self.size = util.ioretry(lambda: self._query_v(self.path)) 

585 except util.CommandException as inst: 

586 raise xs_errors.XenError('VDICreate', 

587 opterr='error %d' % inst.code) 

588 else: 

589 f = open(self.path, 'w') 

590 f.truncate(int(size)) 

591 f.close() 

592 self.size = size 

593 

594 self.sr.added_vdi(self) 

595 

596 st = util.ioretry(lambda: os.stat(self.path)) 

597 self.utilisation = int(st.st_size) 

598 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

599 self.sm_config = {"type": self.PARAM_RAW} 

600 

601 self._db_introduce() 

602 self.sr._update(self.sr.uuid, self.size) 

603 return super(FileVDI, self).get_params() 

604 

605 def delete(self, sr_uuid, vdi_uuid, data_only=False): 

606 if not util.ioretry(lambda: util.pathexists(self.path)): 

607 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

608 

609 if self.attached: 

610 raise xs_errors.XenError('VDIInUse') 

611 

612 try: 

613 util.force_unlink(self.path) 

614 except Exception as e: 

615 raise xs_errors.XenError( 

616 'VDIDelete', 

617 opterr='Failed to unlink file during deleting VDI: %s' % str(e)) 

618 

619 self.sr.deleted_vdi(vdi_uuid) 

620 # If this is a data_destroy call, don't remove from XAPI db 

621 if not data_only: 

622 self._db_forget() 

623 self.sr._update(self.sr.uuid, -self.size) 

624 self.sr.lock.cleanupAll(vdi_uuid) 

625 self.sr._kickGC() 

626 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

627 

628 def attach(self, sr_uuid, vdi_uuid): 

629 if self.path is None: 

630 self._find_path_with_retries(vdi_uuid) 

631 if not self._checkpath(self.path): 

632 raise xs_errors.XenError('VDIUnavailable', \ 

633 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

634 try: 

635 self.attached = True 

636 

637 if not hasattr(self, 'xenstore_data'): 

638 self.xenstore_data = {} 

639 

640 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(vdi_uuid, \ 

641 scsiutil.gen_synthetic_page_data(vdi_uuid))) 

642 

643 if self.sr.handles("file"): 

644 # XXX: PR-1255: if these are constants then they should 

645 # be returned by the attach API call, not persisted in the 

646 # pool database. 

647 self.xenstore_data['storage-type'] = 'ext' 

648 return super(FileVDI, self).attach(sr_uuid, vdi_uuid) 

649 except util.CommandException as inst: 

650 raise xs_errors.XenError('VDILoad', opterr='error %d' % inst.code) 

651 

652 def detach(self, sr_uuid, vdi_uuid): 

653 self.attached = False 

654 

655 def resize(self, sr_uuid, vdi_uuid, size): 

656 if not self.exists: 

657 raise xs_errors.XenError('VDIUnavailable', \ 

658 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

659 

660 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

661 raise xs_errors.XenError('Unimplemented') 

662 

663 if self.hidden: 

664 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

665 

666 if size < self.size: 

667 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

668 '(current size: %d, new size: %d)' % (self.size, size)) 

669 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

670 

671 if size == self.size: 

672 return VDI.VDI.get_params(self) 

673 

674 # We already checked it is a VDI_TYPE_VHD 

675 size = vhdutil.validate_and_round_vhd_size(int(size)) 

676 

677 jFile = JOURNAL_FILE_PREFIX + self.uuid 

678 try: 

679 vhdutil.setSizeVirt(self.path, size, jFile) 

680 except: 

681 # Revert the operation 

682 vhdutil.revert(self.path, jFile) 

683 raise xs_errors.XenError('VDISize', opterr='resize operation failed') 

684 

685 old_size = self.size 

686 self.size = vhdutil.getSizeVirt(self.path) 

687 st = util.ioretry(lambda: os.stat(self.path)) 

688 self.utilisation = int(st.st_size) 

689 

690 self._db_update() 

691 self.sr._update(self.sr.uuid, self.size - old_size) 

692 super(FileVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

693 return VDI.VDI.get_params(self) 

694 

695 def clone(self, sr_uuid, vdi_uuid): 

696 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE) 

697 

698 def compose(self, sr_uuid, vdi1, vdi2): 

699 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

700 raise xs_errors.XenError('Unimplemented') 

701 parent_fn = vdi1 + vhdutil.FILE_EXTN[vhdutil.VDI_TYPE_VHD] 

702 parent_path = os.path.join(self.sr.path, parent_fn) 

703 assert(util.pathexists(parent_path)) 

704 vhdutil.setParent(self.path, parent_path, False) 

705 vhdutil.setHidden(parent_path) 

706 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

707 util.pread2([vhdutil.VHD_UTIL, "modify", "-p", parent_path, 

708 "-n", self.path]) 

709 # Tell tapdisk the chain has changed 

710 if not blktap2.VDI.tap_refresh(self.session, sr_uuid, vdi2): 

711 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

712 util.SMlog("VDI.compose: relinked %s->%s" % (vdi2, vdi1)) 

713 

714 def reset_leaf(self, sr_uuid, vdi_uuid): 

715 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

716 raise xs_errors.XenError('Unimplemented') 

717 

718 # safety check 

719 if not vhdutil.hasParent(self.path): 

720 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

721 "will not reset contents" % self.uuid) 

722 

723 vhdutil.killData(self.path) 

724 

725 def _do_snapshot(self, sr_uuid, vdi_uuid, snap_type, 

726 secondary=None, cbtlog=None): 

727 # If cbt enabled, save file consistency state 

728 if cbtlog is not None: 728 ↛ 729line 728 didn't jump to line 729, because the condition on line 728 was never true

729 if blktap2.VDI.tap_status(self.session, vdi_uuid): 

730 consistency_state = False 

731 else: 

732 consistency_state = True 

733 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

734 (consistency_state, vdi_uuid)) 

735 else: 

736 consistency_state = None 

737 

738 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 738 ↛ 739line 738 didn't jump to line 739, because the condition on line 738 was never true

739 raise xs_errors.XenError('Unimplemented') 

740 

741 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 741 ↛ 742line 741 didn't jump to line 742, because the condition on line 741 was never true

742 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

743 try: 

744 return self._snapshot(snap_type, cbtlog, consistency_state) 

745 finally: 

746 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

747 

748 def _rename(self, src, dst): 

749 util.SMlog("FileVDI._rename %s to %s" % (src, dst)) 

750 util.ioretry(lambda: os.rename(src, dst)) 

751 

752 def _link(self, src, dst): 

753 util.SMlog("FileVDI._link %s to %s" % (src, dst)) 

754 os.link(src, dst) 

755 

756 def _unlink(self, path): 

757 util.SMlog("FileVDI._unlink %s" % (path)) 

758 os.unlink(path) 

759 

760 def _create_new_parent(self, src, newsrc): 

761 sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref) 

762 if SharedFileSR.NO_HARDLINK_SUPPORT in sr_sm_config: 

763 self._rename(src, newsrc) 

764 else: 

765 self._link(src, newsrc) 

766 

767 def __fist_enospace(self): 

768 raise util.CommandException(28, "vhd-util snapshot", reason="No space") 

769 

770 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): 

771 util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) 

772 

773 args = [] 

774 args.append("vdi_clone") 

775 args.append(self.sr.uuid) 

776 args.append(self.uuid) 

777 

778 dest = None 

779 dst = None 

780 if snap_type == VDI.SNAPSHOT_DOUBLE: 780 ↛ 785line 780 didn't jump to line 785, because the condition on line 780 was never false

781 dest = util.gen_uuid() 

782 dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) 

783 args.append(dest) 

784 

785 if self.hidden: 785 ↛ 786line 785 didn't jump to line 786, because the condition on line 785 was never true

786 raise xs_errors.XenError('VDIClone', opterr='hidden VDI') 

787 

788 depth = vhdutil.getDepth(self.path) 

789 if depth == -1: 789 ↛ 790line 789 didn't jump to line 790, because the condition on line 789 was never true

790 raise xs_errors.XenError('VDIUnavailable', \ 

791 opterr='failed to get VHD depth') 

792 elif depth >= vhdutil.MAX_CHAIN_SIZE: 792 ↛ 793line 792 didn't jump to line 793, because the condition on line 792 was never true

793 raise xs_errors.XenError('SnapshotChainTooLong') 

794 

795 newuuid = util.gen_uuid() 

796 src = self.path 

797 newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) 

798 newsrcname = "%s.%s" % (newuuid, self.vdi_type) 

799 

800 if not self._checkpath(src): 800 ↛ 801line 800 didn't jump to line 801, because the condition on line 800 was never true

801 raise xs_errors.XenError('VDIUnavailable', \ 

802 opterr='VDI %s unavailable %s' % (self.uuid, src)) 

803 

804 # wkcfix: multiphase 

805 util.start_log_entry(self.sr.path, self.path, args) 

806 

807 # We assume the filehandle has been released 

808 try: 

809 self._create_new_parent(src, newsrc) 

810 

811 # Create the snapshot under a temporary name, then rename 

812 # it afterwards. This avoids a small window where it exists 

813 # but is invalid. We do not need to do this for 

814 # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed 

815 # before so nobody will try to query it. 

816 tmpsrc = "%s.%s" % (src, "new") 

817 # Fault injection site to fail the snapshot with ENOSPACE 

818 util.fistpoint.activate_custom_fn( 

819 "FileSR_fail_snap1", 

820 self.__fist_enospace) 

821 util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) 

822 self._rename(tmpsrc, src) 

823 if snap_type == VDI.SNAPSHOT_DOUBLE: 823 ↛ 831line 823 didn't jump to line 831, because the condition on line 823 was never false

824 # Fault injection site to fail the snapshot with ENOSPACE 

825 util.fistpoint.activate_custom_fn( 

826 "FileSR_fail_snap2", 

827 self.__fist_enospace) 

828 util.ioretry(lambda: self._snap(dst, newsrcname)) 

829 # mark the original file (in this case, its newsrc) 

830 # as hidden so that it does not show up in subsequent scans 

831 util.ioretry(lambda: self._mark_hidden(newsrc)) 

832 

833 #Verify parent locator field of both children and delete newsrc if unused 

834 introduce_parent = True 

835 try: 

836 srcparent = util.ioretry(lambda: self._query_p_uuid(src)) 

837 dstparent = None 

838 if snap_type == VDI.SNAPSHOT_DOUBLE: 838 ↛ 840line 838 didn't jump to line 840, because the condition on line 838 was never false

839 dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) 

840 if srcparent != newuuid and \ 840 ↛ 844line 840 didn't jump to line 844, because the condition on line 840 was never true

841 (snap_type == VDI.SNAPSHOT_SINGLE or \ 

842 snap_type == VDI.SNAPSHOT_INTERNAL or \ 

843 dstparent != newuuid): 

844 util.ioretry(lambda: self._unlink(newsrc)) 

845 introduce_parent = False 

846 except: 

847 pass 

848 

849 # Introduce the new VDI records 

850 leaf_vdi = None 

851 if snap_type == VDI.SNAPSHOT_DOUBLE: 851 ↛ 870line 851 didn't jump to line 870, because the condition on line 851 was never false

852 leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI 

853 leaf_vdi.read_only = False 

854 leaf_vdi.location = dest 

855 leaf_vdi.size = self.size 

856 leaf_vdi.utilisation = self.utilisation 

857 leaf_vdi.sm_config = {} 

858 leaf_vdi.sm_config['vhd-parent'] = dstparent 

859 # If the parent is encrypted set the key_hash 

860 # for the new snapshot disk 

861 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

862 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

863 if "key_hash" in sm_config: 863 ↛ 864line 863 didn't jump to line 864, because the condition on line 863 was never true

864 leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] 

865 # If we have CBT enabled on the VDI, 

866 # set CBT status for the new snapshot disk 

867 if cbtlog: 867 ↛ 868line 867 didn't jump to line 868, because the condition on line 867 was never true

868 leaf_vdi.cbt_enabled = True 

869 

870 base_vdi = None 

871 if introduce_parent: 871 ↛ 883line 871 didn't jump to line 883, because the condition on line 871 was never false

872 base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent 

873 base_vdi.label = "base copy" 

874 base_vdi.read_only = True 

875 base_vdi.location = newuuid 

876 base_vdi.size = self.size 

877 base_vdi.utilisation = self.utilisation 

878 base_vdi.sm_config = {} 

879 grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) 

880 if grandparent.find("no parent") == -1: 880 ↛ 883line 880 didn't jump to line 883, because the condition on line 880 was never false

881 base_vdi.sm_config['vhd-parent'] = grandparent 

882 

883 try: 

884 if snap_type == VDI.SNAPSHOT_DOUBLE: 884 ↛ 889line 884 didn't jump to line 889, because the condition on line 884 was never false

885 leaf_vdi_ref = leaf_vdi._db_introduce() 

886 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ 

887 (leaf_vdi_ref, dest)) 

888 

889 if introduce_parent: 889 ↛ 893line 889 didn't jump to line 893, because the condition on line 889 was never false

890 base_vdi_ref = base_vdi._db_introduce() 

891 self.session.xenapi.VDI.set_managed(base_vdi_ref, False) 

892 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) 

893 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

894 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

895 sm_config['vhd-parent'] = srcparent 

896 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

897 except Exception as e: 

898 util.SMlog("vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) 

899 # Note it's too late to actually clean stuff up here: the base disk has 

900 # been marked as deleted already. 

901 util.end_log_entry(self.sr.path, self.path, ["error"]) 

902 raise 

903 except util.CommandException as inst: 

904 # XXX: it might be too late if the base disk has been marked as deleted! 

905 self._clonecleanup(src, dst, newsrc) 

906 util.end_log_entry(self.sr.path, self.path, ["error"]) 

907 raise xs_errors.XenError('VDIClone', 

908 opterr='VDI clone failed error %d' % inst.code) 

909 

910 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

911 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog: 911 ↛ 912line 911 didn't jump to line 912, because the condition on line 911 was never true

912 try: 

913 self._cbt_snapshot(dest, cbt_consistency) 

914 except: 

915 # CBT operation failed. 

916 util.end_log_entry(self.sr.path, self.path, ["error"]) 

917 raise 

918 

919 util.end_log_entry(self.sr.path, self.path, ["done"]) 

920 if snap_type != VDI.SNAPSHOT_INTERNAL: 920 ↛ 923line 920 didn't jump to line 923, because the condition on line 920 was never false

921 self.sr._update(self.sr.uuid, self.size) 

922 # Return info on the new user-visible leaf VDI 

923 ret_vdi = leaf_vdi 

924 if not ret_vdi: 924 ↛ 925line 924 didn't jump to line 925, because the condition on line 924 was never true

925 ret_vdi = base_vdi 

926 if not ret_vdi: 926 ↛ 927line 926 didn't jump to line 927, because the condition on line 926 was never true

927 ret_vdi = self 

928 return ret_vdi.get_params() 

929 

930 def get_params(self): 

931 if not self._checkpath(self.path): 

932 raise xs_errors.XenError('VDIUnavailable', \ 

933 opterr='VDI %s unavailable %s' % (self.uuid, self.path)) 

934 return super(FileVDI, self).get_params() 

935 

936 def _snap(self, child, parent): 

937 cmd = [SR.TAPDISK_UTIL, "snapshot", vhdutil.VDI_TYPE_VHD, child, parent] 

938 text = util.pread(cmd) 

939 

940 def _clonecleanup(self, src, dst, newsrc): 

941 try: 

942 if dst: 942 ↛ 946line 942 didn't jump to line 946, because the condition on line 942 was never false

943 util.ioretry(lambda: self._unlink(dst)) 

944 except util.CommandException as inst: 

945 pass 

946 try: 

947 if util.ioretry(lambda: util.pathexists(newsrc)): 947 ↛ exitline 947 didn't return from function '_clonecleanup', because the condition on line 947 was never false

948 stats = os.stat(newsrc) 

949 # Check if we have more than one link to newsrc 

950 if (stats.st_nlink > 1): 

951 util.ioretry(lambda: self._unlink(newsrc)) 

952 elif not self._is_hidden(newsrc): 952 ↛ exitline 952 didn't return from function '_clonecleanup', because the condition on line 952 was never false

953 self._rename(newsrc, src) 

954 except util.CommandException as inst: 

955 pass 

956 

957 def _checkpath(self, path): 

958 try: 

959 if not util.ioretry(lambda: util.pathexists(path)): 959 ↛ 960line 959 didn't jump to line 960, because the condition on line 959 was never true

960 return False 

961 return True 

962 except util.CommandException as inst: 

963 raise xs_errors.XenError('EIO', \ 

964 opterr='IO error checking path %s' % path) 

965 

966 def _query_v(self, path): 

967 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-v", path] 

968 return int(util.pread(cmd)) * 1024 * 1024 

969 

970 def _query_p_uuid(self, path): 

971 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-p", path] 

972 parent = util.pread(cmd) 

973 parent = parent[:-1] 

974 ls = parent.split('/') 

975 return ls[len(ls) - 1].replace(vhdutil.FILE_EXTN_VHD, '') 

976 

977 def _query_info(self, path, use_bkp_footer=False): 

978 diskinfo = {} 

979 qopts = '-vpf' 

980 if use_bkp_footer: 980 ↛ 982line 980 didn't jump to line 982, because the condition on line 980 was never false

981 qopts += 'b' 

982 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, qopts, path] 

983 txt = util.pread(cmd).split('\n') 

984 diskinfo['size'] = txt[0] 

985 lst = [txt[1].split('/')[-1].replace(vhdutil.FILE_EXTN_VHD, "")] 

986 for val in filter(util.exactmatch_uuid, lst): 986 ↛ 987line 986 didn't jump to line 987, because the loop on line 986 never started

987 diskinfo['parent'] = val 

988 diskinfo['hidden'] = txt[2].split()[1] 

989 return diskinfo 

990 

991 def _create(self, size, path): 

992 cmd = [SR.TAPDISK_UTIL, "create", vhdutil.VDI_TYPE_VHD, size, path] 

993 text = util.pread(cmd) 

994 if self.key_hash: 994 ↛ 995line 994 didn't jump to line 995, because the condition on line 994 was never true

995 vhdutil.setKey(path, self.key_hash) 

996 

997 def _mark_hidden(self, path): 

998 vhdutil.setHidden(path, True) 

999 self.hidden = 1 

1000 

1001 def _is_hidden(self, path): 

1002 return vhdutil.getHidden(path) == 1 

1003 

1004 def extractUuid(path): 

1005 fileName = os.path.basename(path) 

1006 uuid = fileName.replace(vhdutil.FILE_EXTN_VHD, "") 

1007 return uuid 

1008 extractUuid = staticmethod(extractUuid) 

1009 

1010 def generate_config(self, sr_uuid, vdi_uuid): 

1011 """ 

1012 Generate the XML config required to attach and activate 

1013 a VDI for use when XAPI is not running. Attach and 

1014 activation is handled by vdi_attach_from_config below. 

1015 """ 

1016 util.SMlog("FileVDI.generate_config") 

1017 if not util.pathexists(self.path): 1017 ↛ 1018line 1017 didn't jump to line 1018, because the condition on line 1017 was never true

1018 raise xs_errors.XenError('VDIUnavailable') 

1019 resp = {} 

1020 resp['device_config'] = self.sr.dconf 

1021 resp['sr_uuid'] = sr_uuid 

1022 resp['vdi_uuid'] = vdi_uuid 

1023 resp['command'] = 'vdi_attach_from_config' 

1024 # Return the 'config' encoded within a normal XMLRPC response so that 

1025 # we can use the regular response/error parsing code. 

1026 config = xmlrpc.client.dumps(tuple([resp]), "vdi_attach_from_config") 

1027 return xmlrpc.client.dumps((config, ), "", True) 

1028 

1029 def attach_from_config(self, sr_uuid, vdi_uuid): 

1030 """ 

1031 Attach and activate a VDI using config generated by 

1032 vdi_generate_config above. This is used for cases such as 

1033 the HA state-file and the redo-log. 

1034 """ 

1035 util.SMlog("FileVDI.attach_from_config") 

1036 try: 

1037 if not util.pathexists(self.sr.path): 

1038 self.sr.attach(sr_uuid) 

1039 except: 

1040 util.logException("FileVDI.attach_from_config") 

1041 raise xs_errors.XenError( 

1042 'SRUnavailable', 

1043 opterr='Unable to attach from config' 

1044 ) 

1045 

1046 def _create_cbt_log(self): 

1047 # Create CBT log file 

1048 # Name: <vdi_uuid>.cbtlog 

1049 #Handle if file already exists 

1050 log_path = self._get_cbt_logpath(self.uuid) 

1051 open_file = open(log_path, "w+") 

1052 open_file.close() 

1053 return super(FileVDI, self)._create_cbt_log() 

1054 

1055 def _delete_cbt_log(self): 

1056 logPath = self._get_cbt_logpath(self.uuid) 

1057 try: 

1058 os.remove(logPath) 

1059 except OSError as e: 

1060 if e.errno != errno.ENOENT: 

1061 raise 

1062 

1063 def _cbt_log_exists(self, logpath): 

1064 return util.pathexists(logpath) 

1065 

1066 

1067class SharedFileSR(FileSR): 

1068 """ 

1069 FileSR subclass for SRs that use shared network storage 

1070 """ 

1071 NO_HARDLINK_SUPPORT = "no_hardlinks" 

1072 

1073 def _raise_hardlink_error(self): 

1074 raise OSError(524, "Unknown error 524") 

1075 

1076 def _check_hardlinks(self): 

1077 test_name = os.path.join(self.path, str(uuid4())) 

1078 open(test_name, 'ab').close() 

1079 

1080 link_name = '%s.new' % test_name 

1081 try: 

1082 # XSI-1100: Let tests simulate failure of the link operation 

1083 util.fistpoint.activate_custom_fn( 

1084 "FileSR_fail_hardlink", 

1085 self._raise_hardlink_error) 

1086 

1087 os.link(test_name, link_name) 

1088 self.session.xenapi.SR.remove_from_sm_config( 

1089 self.sr_ref, SharedFileSR.NO_HARDLINK_SUPPORT) 

1090 except OSError: 

1091 msg = "File system for SR %s does not support hardlinks, crash " \ 

1092 "consistency of snapshots cannot be assured" % self.uuid 

1093 util.SMlog(msg, priority=util.LOG_WARNING) 

1094 try: 

1095 self.session.xenapi.SR.add_to_sm_config( 

1096 self.sr_ref, SharedFileSR.NO_HARDLINK_SUPPORT, 'True') 

1097 self.session.xenapi.message.create( 

1098 "sr_does_not_support_hardlinks", 2, "SR", self.uuid, 

1099 msg) 

1100 except XenAPI.Failure: 

1101 # Might already be set and checking has TOCTOU issues 

1102 pass 

1103 finally: 

1104 util.force_unlink(link_name) 

1105 util.force_unlink(test_name) 

1106 

1107 

1108if __name__ == '__main__': 1108 ↛ 1109line 1108 didn't jump to line 1109, because the condition on line 1108 was never true

1109 SRCommand.run(FileSR, DRIVER_INFO) 

1110else: 

1111 SR.registerSR(FileSR)