Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/env python3 

2# 

3# Copyright (C) 2020 Vates SAS - ronan.abhamon@vates.fr 

4# 

5# This program is free software: you can redistribute it and/or modify 

6# it under the terms of the GNU General Public License as published by 

7# the Free Software Foundation, either version 3 of the License, or 

8# (at your option) any later version. 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU General Public License for more details. 

13# 

14# You should have received a copy of the GNU General Public License 

15# along with this program. If not, see <https://www.gnu.org/licenses/>. 

16 

17from constants import CBTLOG_TAG 

18 

19try: 

20 from linstorjournaler import LinstorJournaler 

21 from linstorvhdutil import LinstorVhdUtil 

22 from linstorvolumemanager \ 

23 import LinstorVolumeManager, LinstorVolumeManagerError 

24 LINSTOR_AVAILABLE = True 

25except ImportError: 

26 LINSTOR_AVAILABLE = False 

27 

28from lock import Lock 

29import blktap2 

30import cleanup 

31import errno 

32import functools 

33import scsiutil 

34import SR 

35import SRCommand 

36import time 

37import traceback 

38import util 

39import VDI 

40import vhdutil 

41import xmlrpc.client 

42import xs_errors 

43 

44from srmetadata import \ 

45 NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, \ 

46 TYPE_TAG, VDI_TYPE_TAG, READ_ONLY_TAG, SNAPSHOT_TIME_TAG, \ 

47 METADATA_OF_POOL_TAG 

48 

49HIDDEN_TAG = 'hidden' 

50 

51# ============================================================================== 

52 

53# TODO: Supports 'VDI_INTRODUCE', 'VDI_RESET_ON_BOOT/2', 'SR_TRIM', 

54# 'VDI_CONFIG_CBT', 'SR_PROBE' 

55 

56CAPABILITIES = [ 

57 'ATOMIC_PAUSE', 

58 'SR_UPDATE', 

59 'VDI_CREATE', 

60 'VDI_DELETE', 

61 'VDI_UPDATE', 

62 'VDI_ATTACH', 

63 'VDI_DETACH', 

64 'VDI_ACTIVATE', 

65 'VDI_DEACTIVATE', 

66 'VDI_CLONE', 

67 'VDI_MIRROR', 

68 'VDI_RESIZE', 

69 'VDI_SNAPSHOT', 

70 'VDI_GENERATE_CONFIG' 

71] 

72 

73CONFIGURATION = [ 

74 ['group-name', 'LVM group name'], 

75 ['hosts', 'host names to use'], 

76 ['redundancy', 'replication count'], 

77 ['provisioning', '"thin" or "thick" are accepted'] 

78] 

79 

80DRIVER_INFO = { 

81 'name': 'LINSTOR resources on XCP-ng', 

82 'description': 'SR plugin which uses Linstor to manage VDIs', 

83 'vendor': 'Vates', 

84 'copyright': '(C) 2020 Vates', 

85 'driver_version': '1.0', 

86 'required_api_version': '1.0', 

87 'capabilities': CAPABILITIES, 

88 'configuration': CONFIGURATION 

89} 

90 

91DRIVER_CONFIG = {'ATTACH_FROM_CONFIG_WITH_TAPDISK': False} 

92 

93OPS_EXCLUSIVE = [ 

94 'sr_create', 'sr_delete', 'sr_attach', 'sr_detach', 'sr_scan', 

95 'sr_update', 'vdi_create', 'vdi_delete', 'vdi_clone', 'vdi_snapshot' 

96] 

97 

98# ============================================================================== 

99# Misc helpers used by LinstorSR and linstor-thin plugin. 

100# ============================================================================== 

101 

102 

103def compute_volume_size(virtual_size, image_type): 

104 if image_type == vhdutil.VDI_TYPE_VHD: 

105 # All LINSTOR VDIs have the metadata area preallocated for 

106 # the maximum possible virtual size (for fast online VDI.resize). 

107 meta_overhead = vhdutil.calcOverheadEmpty(LinstorVDI.MAX_SIZE) 

108 bitmap_overhead = vhdutil.calcOverheadBitmap(virtual_size) 

109 virtual_size += meta_overhead + bitmap_overhead 

110 elif image_type != vhdutil.VDI_TYPE_RAW: 

111 raise Exception('Invalid image type: {}'.format(image_type)) 

112 

113 return LinstorVolumeManager.round_up_volume_size(virtual_size) 

114 

115 

116def try_lock(lock): 

117 for i in range(20): 

118 if lock.acquireNoblock(): 

119 return 

120 time.sleep(1) 

121 raise util.SRBusyException() 

122 

123 

124def attach_thin(session, journaler, linstor, sr_uuid, vdi_uuid): 

125 volume_metadata = linstor.get_volume_metadata(vdi_uuid) 

126 image_type = volume_metadata.get(VDI_TYPE_TAG) 

127 if image_type == vhdutil.VDI_TYPE_RAW: 

128 return 

129 

130 lock = Lock(vhdutil.LOCK_TYPE_SR, sr_uuid) 

131 try: 

132 try_lock(lock) 

133 

134 device_path = linstor.get_device_path(vdi_uuid) 

135 

136 # If the virtual VHD size is lower than the LINSTOR volume size, 

137 # there is nothing to do. 

138 vhd_size = compute_volume_size( 

139 LinstorVhdUtil(session, linstor).get_size_virt(vdi_uuid), 

140 image_type 

141 ) 

142 

143 volume_info = linstor.get_volume_info(vdi_uuid) 

144 volume_size = volume_info.virtual_size 

145 

146 if vhd_size > volume_size: 

147 inflate( 

148 journaler, linstor, vdi_uuid, device_path, 

149 vhd_size, volume_size 

150 ) 

151 finally: 

152 lock.release() 

153 

154 

155def detach_thin(session, linstor, sr_uuid, vdi_uuid): 

156 volume_metadata = linstor.get_volume_metadata(vdi_uuid) 

157 image_type = volume_metadata.get(VDI_TYPE_TAG) 

158 if image_type == vhdutil.VDI_TYPE_RAW: 

159 return 

160 

161 lock = Lock(vhdutil.LOCK_TYPE_SR, sr_uuid) 

162 try: 

163 try_lock(lock) 

164 

165 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid) 

166 vbds = session.xenapi.VBD.get_all_records_where( 

167 'field "VDI" = "{}"'.format(vdi_ref) 

168 ) 

169 

170 num_plugged = 0 

171 for vbd_rec in vbds.values(): 

172 if vbd_rec['currently_attached']: 

173 num_plugged += 1 

174 if num_plugged > 1: 

175 raise xs_errors.XenError( 

176 'VDIUnavailable', 

177 opterr='Cannot deflate VDI {}, already used by ' 

178 'at least 2 VBDs'.format(vdi_uuid) 

179 ) 

180 

181 device_path = linstor.get_device_path(vdi_uuid) 

182 new_volume_size = LinstorVolumeManager.round_up_volume_size( 

183 LinstorVhdUtil(session, linstor).get_size_phys(device_path) 

184 ) 

185 

186 volume_info = linstor.get_volume_info(vdi_uuid) 

187 old_volume_size = volume_info.virtual_size 

188 deflate(vdi_uuid, device_path, new_volume_size, old_volume_size) 

189 finally: 

190 lock.release() 

191 

192 

193def inflate(journaler, linstor, vdi_uuid, vdi_path, new_size, old_size): 

194 # Only inflate if the LINSTOR volume capacity is not enough. 

195 new_size = LinstorVolumeManager.round_up_volume_size(new_size) 

196 if new_size <= old_size: 

197 return 

198 

199 util.SMlog( 

200 'Inflate {} (new VHD size={}, previous={})' 

201 .format(vdi_uuid, new_size, old_size) 

202 ) 

203 

204 journaler.create( 

205 LinstorJournaler.INFLATE, vdi_uuid, old_size 

206 ) 

207 linstor.resize_volume(vdi_uuid, new_size) 

208 

209 if not util.zeroOut( 

210 vdi_path, new_size - vhdutil.VHD_FOOTER_SIZE, 

211 vhdutil.VHD_FOOTER_SIZE 

212 ): 

213 raise xs_errors.XenError( 

214 'EIO', 

215 opterr='Failed to zero out VHD footer {}'.format(vdi_path) 

216 ) 

217 

218 vhdutil.setSizePhys(vdi_path, new_size, False) 

219 journaler.remove(LinstorJournaler.INFLATE, vdi_uuid) 

220 

221 

222def deflate(vdi_uuid, vdi_path, new_size, old_size): 

223 new_size = LinstorVolumeManager.round_up_volume_size(new_size) 

224 if new_size >= old_size: 

225 return 

226 

227 util.SMlog( 

228 'Deflate {} (new size={}, previous={})' 

229 .format(vdi_uuid, new_size, old_size) 

230 ) 

231 

232 vhdutil.setSizePhys(vdi_path, new_size) 

233 # TODO: Change the LINSTOR volume size using linstor.resize_volume. 

234 

235 

236# ============================================================================== 

237 

238# Usage example: 

239# xe sr-create type=linstor name-label=linstor-sr 

240# host-uuid=d2deba7a-c5ad-4de1-9a20-5c8df3343e93 

241# device-config:hosts=node-linstor1,node-linstor2,node-linstor3 

242# device-config:group-name=vg_loop device-config:redundancy=2 

243 

244 

245class LinstorSR(SR.SR): 

246 DRIVER_TYPE = 'linstor' 

247 

248 PROVISIONING_TYPES = ['thin', 'thick'] 

249 PROVISIONING_DEFAULT = 'thin' 

250 

251 MANAGER_PLUGIN = 'linstor-manager' 

252 

253 # -------------------------------------------------------------------------- 

254 # SR methods. 

255 # -------------------------------------------------------------------------- 

256 

257 @staticmethod 

258 def handles(type): 

259 return type == LinstorSR.DRIVER_TYPE 

260 

261 def load(self, sr_uuid): 

262 if not LINSTOR_AVAILABLE: 

263 raise util.SMException( 

264 'Can\'t load LinstorSR: LINSTOR libraries are missing' 

265 ) 

266 

267 # Check parameters. 

268 if 'hosts' not in self.dconf or not self.dconf['hosts']: 

269 raise xs_errors.XenError('LinstorConfigHostsMissing') 

270 if 'group-name' not in self.dconf or not self.dconf['group-name']: 

271 raise xs_errors.XenError('LinstorConfigGroupNameMissing') 

272 if 'redundancy' not in self.dconf or not self.dconf['redundancy']: 

273 raise xs_errors.XenError('LinstorConfigRedundancyMissing') 

274 

275 self.driver_config = DRIVER_CONFIG 

276 

277 # Check provisioning config. 

278 provisioning = self.dconf.get('provisioning') 

279 if provisioning: 

280 if provisioning in self.PROVISIONING_TYPES: 

281 self._provisioning = provisioning 

282 else: 

283 raise xs_errors.XenError( 

284 'InvalidArg', 

285 opterr='Provisioning parameter must be one of {}'.format( 

286 self.PROVISIONING_TYPES 

287 ) 

288 ) 

289 else: 

290 self._provisioning = self.PROVISIONING_DEFAULT 

291 

292 # Note: We don't have access to the session field if the 

293 # 'vdi_attach_from_config' command is executed. 

294 self._has_session = self.sr_ref and self.session is not None 

295 if self._has_session: 

296 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

297 else: 

298 self.sm_config = self.srcmd.params.get('sr_sm_config') or {} 

299 

300 provisioning = self.sm_config.get('provisioning') 

301 if provisioning in self.PROVISIONING_TYPES: 

302 self._provisioning = provisioning 

303 

304 # Define properties for SR parent class. 

305 self.ops_exclusive = OPS_EXCLUSIVE 

306 self.path = LinstorVolumeManager.DEV_ROOT_PATH 

307 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

308 self.sr_vditype = SR.DEFAULT_TAP 

309 

310 self._hosts = self.dconf['hosts'].split(',') 

311 self._redundancy = int(self.dconf['redundancy'] or 1) 

312 self._linstor = None # Ensure that LINSTOR attribute exists. 

313 self._journaler = None 

314 

315 self._is_master = False 

316 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

317 self._is_master = True 

318 self._group_name = self.dconf['group-name'] 

319 

320 self._master_uri = None 

321 self._vdi_shared_locked = False 

322 

323 self._initialized = False 

324 

325 def _locked_load(method): 

326 @functools.wraps(method) 

327 def wrap(self, *args, **kwargs): 

328 if self._initialized: 

329 return method(self, *args, **kwargs) 

330 self._initialized = True 

331 

332 if not self._has_session: 

333 if self.srcmd.cmd == 'vdi_attach_from_config': 

334 # We must have a valid LINSTOR instance here without using 

335 # the XAPI. 

336 self._master_uri = 'linstor://{}'.format( 

337 util.get_master_address() 

338 ) 

339 self._journaler = LinstorJournaler( 

340 self._master_uri, self._group_name, logger=util.SMlog 

341 ) 

342 

343 try: 

344 self._linstor = LinstorVolumeManager( 

345 self._master_uri, 

346 self._group_name, 

347 logger=util.SMlog 

348 ) 

349 return 

350 except Exception as e: 

351 util.SMlog( 

352 'Ignore exception. Failed to build LINSTOR ' 

353 'instance without session: {}'.format(e) 

354 ) 

355 return 

356 

357 self._master_uri = 'linstor://{}'.format( 

358 util.get_master_rec(self.session)['address'] 

359 ) 

360 

361 if not self._is_master: 

362 if self.cmd in [ 

363 'sr_create', 'sr_delete', 'sr_update', 'sr_probe', 

364 'sr_scan', 'vdi_create', 'vdi_delete', 'vdi_resize', 

365 'vdi_snapshot', 'vdi_clone' 

366 ]: 

367 util.SMlog('{} blocked for non-master'.format(self.cmd)) 

368 raise xs_errors.XenError('LinstorMaster') 

369 

370 # Because the LINSTOR KV objects cache all values, we must lock 

371 # the VDI before the LinstorJournaler/LinstorVolumeManager 

372 # instantiation and before any action on the master to avoid a 

373 # bad read. The lock is also necessary to avoid strange 

374 # behaviors if the GC is executed during an action on a slave. 

375 if self.cmd.startswith('vdi_'): 

376 self._shared_lock_vdi(self.srcmd.params['vdi_uuid']) 

377 self._vdi_shared_locked = True 

378 

379 self._journaler = LinstorJournaler( 

380 self._master_uri, self._group_name, logger=util.SMlog 

381 ) 

382 

383 # Ensure ports are opened and LINSTOR controller/satellite 

384 # are activated. 

385 if self.srcmd.cmd == 'sr_create': 

386 # TODO: Disable if necessary 

387 self._enable_linstor_on_all_hosts(status=True) 

388 

389 try: 

390 # Try to open SR if exists. 

391 self._linstor = LinstorVolumeManager( 

392 self._master_uri, 

393 self._group_name, 

394 repair=self._is_master, 

395 logger=util.SMlog 

396 ) 

397 self._vhdutil = LinstorVhdUtil(self.session, self._linstor) 

398 except Exception as e: 

399 if self.srcmd.cmd == 'sr_create' or \ 

400 self.srcmd.cmd == 'sr_detach': 

401 # Ignore exception in this specific case: sr_create. 

402 # At this moment the LinstorVolumeManager cannot be 

403 # instantiated. Concerning the sr_detach command, we must 

404 # ignore LINSTOR exceptions (if the volume group doesn't 

405 # exist for example after a bad user action). 

406 pass 

407 else: 

408 raise xs_errors.XenError('SRUnavailable', opterr=str(e)) 

409 

410 if self._linstor: 

411 try: 

412 hosts = self._linstor.disconnected_hosts 

413 except Exception as e: 

414 raise xs_errors.XenError('SRUnavailable', opterr=str(e)) 

415 

416 if hosts: 

417 util.SMlog('Failed to join node(s): {}'.format(hosts)) 

418 

419 try: 

420 # If the command is a SR command on the master, we must 

421 # load all VDIs and clean journal transactions. 

422 # We must load the VDIs in the snapshot case too. 

423 if self._is_master and self.cmd not in [ 

424 'vdi_attach', 'vdi_detach', 

425 'vdi_activate', 'vdi_deactivate', 

426 'vdi_epoch_begin', 'vdi_epoch_end', 

427 'vdi_update', 'vdi_destroy' 

428 ]: 

429 self._load_vdis() 

430 self._undo_all_journal_transactions() 

431 self._linstor.remove_resourceless_volumes() 

432 

433 self._synchronize_metadata() 

434 except Exception as e: 

435 util.SMlog( 

436 'Ignoring exception in LinstorSR.load: {}'.format(e) 

437 ) 

438 util.SMlog(traceback.format_exc()) 

439 

440 return method(self, *args, **kwargs) 

441 

442 return wrap 

443 

444 @_locked_load 

445 def cleanup(self): 

446 if self._vdi_shared_locked: 

447 self._shared_lock_vdi(self.srcmd.params['vdi_uuid'], locked=False) 

448 

449 @_locked_load 

450 def create(self, uuid, size): 

451 util.SMlog('LinstorSR.create for {}'.format(self.uuid)) 

452 

453 if self._redundancy > len(self._hosts): 

454 raise xs_errors.XenError( 

455 'LinstorSRCreate', 

456 opterr='Redundancy greater than host count' 

457 ) 

458 

459 xenapi = self.session.xenapi 

460 srs = xenapi.SR.get_all_records_where( 

461 'field "type" = "{}"'.format(self.DRIVER_TYPE) 

462 ) 

463 srs = dict([e for e in srs.items() if e[1]['uuid'] != self.uuid]) 

464 

465 for sr in srs.values(): 

466 for pbd in sr['PBDs']: 

467 device_config = xenapi.PBD.get_device_config(pbd) 

468 group_name = device_config.get('group-name') 

469 if group_name and group_name == self._group_name: 

470 raise xs_errors.XenError( 

471 'LinstorSRCreate', 

472 opterr='group name must be unique' 

473 ) 

474 

475 # Create SR. 

476 # Throw if the SR already exists. 

477 try: 

478 self._linstor = LinstorVolumeManager.create_sr( 

479 self._master_uri, 

480 self._group_name, 

481 self._hosts, 

482 self._redundancy, 

483 thin_provisioning=self._provisioning == 'thin', 

484 logger=util.SMlog 

485 ) 

486 self._vhdutil = LinstorVhdUtil(self.session, self._linstor) 

487 except Exception as e: 

488 util.SMlog('Failed to create LINSTOR SR: {}'.format(e)) 

489 raise xs_errors.XenError('LinstorSRCreate', opterr=str(e)) 

490 

491 @_locked_load 

492 def delete(self, uuid): 

493 util.SMlog('LinstorSR.delete for {}'.format(self.uuid)) 

494 cleanup.gc_force(self.session, self.uuid) 

495 

496 if self.vdis: 

497 raise xs_errors.XenError('SRNotEmpty') 

498 

499 try: 

500 # TODO: Use specific exceptions. If the LINSTOR group doesn't 

501 # exist, we can remove it without problem. 

502 

503 # TODO: Maybe remove all volumes unused by the SMAPI. 

504 # We must ensure it's a safe idea... 

505 

506 self._linstor.destroy() 

507 Lock.cleanupAll(self.uuid) 

508 except Exception as e: 

509 util.SMlog('Failed to delete LINSTOR SR: {}'.format(e)) 

510 raise xs_errors.XenError( 

511 'LinstorSRDelete', 

512 opterr=str(e) 

513 ) 

514 

515 @_locked_load 

516 def update(self, uuid): 

517 util.SMlog('LinstorSR.update for {}'.format(self.uuid)) 

518 

519 # Well, how can we update a SR if it doesn't exist? :thinking: 

520 if not self._linstor: 

521 raise xs_errors.XenError( 

522 'SRUnavailable', 

523 opterr='no such volume group: {}'.format(self._group_name) 

524 ) 

525 

526 self._update_stats(0) 

527 

528 # Update the SR name and description only in LINSTOR metadata. 

529 xenapi = self.session.xenapi 

530 self._linstor.metadata = { 

531 NAME_LABEL_TAG: util.to_plain_string( 

532 xenapi.SR.get_name_label(self.sr_ref) 

533 ), 

534 NAME_DESCRIPTION_TAG: util.to_plain_string( 

535 xenapi.SR.get_name_description(self.sr_ref) 

536 ) 

537 } 

538 

539 @_locked_load 

540 def attach(self, uuid): 

541 util.SMlog('LinstorSR.attach for {}'.format(self.uuid)) 

542 

543 if not self._linstor: 

544 raise xs_errors.XenError( 

545 'SRUnavailable', 

546 opterr='no such group: {}'.format(self._group_name) 

547 ) 

548 

549 @_locked_load 

550 def detach(self, uuid): 

551 util.SMlog('LinstorSR.detach for {}'.format(self.uuid)) 

552 cleanup.abort(self.uuid) 

553 

554 @_locked_load 

555 def probe(self): 

556 util.SMlog('LinstorSR.probe for {}'.format(self.uuid)) 

557 # TODO 

558 

559 @_locked_load 

560 def scan(self, uuid): 

561 util.SMlog('LinstorSR.scan for {}'.format(self.uuid)) 

562 if not self._linstor: 

563 raise xs_errors.XenError( 

564 'SRUnavailable', 

565 opterr='no such volume group: {}'.format(self._group_name) 

566 ) 

567 

568 self._update_physical_size() 

569 

570 for vdi_uuid in self.vdis.keys(): 

571 if self.vdis[vdi_uuid].deleted: 

572 del self.vdis[vdi_uuid] 

573 

574 # Update the database before the restart of the GC to avoid 

575 # bad sync in the process if new VDIs have been introduced. 

576 ret = super(LinstorSR, self).scan(self.uuid) 

577 self._kick_gc() 

578 return ret 

579 

580 @_locked_load 

581 def vdi(self, uuid): 

582 return LinstorVDI(self, uuid) 

583 

584 _locked_load = staticmethod(_locked_load) 

585 

586 # -------------------------------------------------------------------------- 

587 # Lock. 

588 # -------------------------------------------------------------------------- 

589 

590 def _shared_lock_vdi(self, vdi_uuid, locked=True): 

591 pools = self.session.xenapi.pool.get_all() 

592 master = self.session.xenapi.pool.get_master(pools[0]) 

593 

594 method = 'lockVdi' 

595 args = { 

596 'groupName': self._group_name, 

597 'srUuid': self.uuid, 

598 'vdiUuid': vdi_uuid, 

599 'locked': str(locked) 

600 } 

601 

602 ret = self.session.xenapi.host.call_plugin( 

603 master, self.MANAGER_PLUGIN, method, args 

604 ) 

605 util.SMlog( 

606 'call-plugin ({} with {}) returned: {}' 

607 .format(method, args, ret) 

608 ) 

609 if ret == 'False': 

610 raise xs_errors.XenError( 

611 'VDIUnavailable', 

612 opterr='Plugin {} failed'.format(self.MANAGER_PLUGIN) 

613 ) 

614 

615 # -------------------------------------------------------------------------- 

616 # Network. 

617 # -------------------------------------------------------------------------- 

618 

619 def _enable_linstor(self, host, status): 

620 method = 'enable' 

621 args = {'enabled': str(bool(status))} 

622 

623 ret = self.session.xenapi.host.call_plugin( 

624 host, self.MANAGER_PLUGIN, method, args 

625 ) 

626 util.SMlog( 

627 'call-plugin ({} with {}) returned: {}'.format(method, args, ret) 

628 ) 

629 if ret == 'False': 

630 raise xs_errors.XenError( 

631 'SRUnavailable', 

632 opterr='Plugin {} failed'.format(self.MANAGER_PLUGIN) 

633 ) 

634 

635 def _enable_linstor_on_master(self, status): 

636 pools = self.session.xenapi.pool.get_all() 

637 master = self.session.xenapi.pool.get_master(pools[0]) 

638 self._enable_linstor(master, status) 

639 

640 def _enable_linstor_on_all_hosts(self, status): 

641 self._enable_linstor_on_master(status) 

642 for slave in util.get_all_slaves(self.session): 

643 self._enable_linstor(slave, status) 

644 

645 # -------------------------------------------------------------------------- 

646 # Metadata. 

647 # -------------------------------------------------------------------------- 

648 

649 def _synchronize_metadata_and_xapi(self): 

650 try: 

651 # First synch SR parameters. 

652 self.update(self.uuid) 

653 

654 # Now update the VDI information in the metadata if required. 

655 xenapi = self.session.xenapi 

656 volumes_metadata = self._linstor.volumes_with_metadata 

657 for vdi_uuid, volume_metadata in volumes_metadata.items(): 

658 try: 

659 vdi_ref = xenapi.VDI.get_by_uuid(vdi_uuid) 

660 except Exception: 

661 # May be the VDI is not in XAPI yet dont bother. 

662 continue 

663 

664 label = util.to_plain_string( 

665 xenapi.VDI.get_name_label(vdi_ref) 

666 ) 

667 description = util.to_plain_string( 

668 xenapi.VDI.get_name_description(vdi_ref) 

669 ) 

670 

671 if ( 

672 volume_metadata.get(NAME_LABEL_TAG) != label or 

673 volume_metadata.get(NAME_DESCRIPTION_TAG) != description 

674 ): 

675 self._linstor.update_volume_metadata(vdi_uuid, { 

676 NAME_LABEL_TAG: label, 

677 NAME_DESCRIPTION_TAG: description 

678 }) 

679 except Exception as e: 

680 raise xs_errors.XenError( 

681 'MetadataError', 

682 opterr='Error synching SR Metadata and XAPI: {}'.format(e) 

683 ) 

684 

685 def _synchronize_metadata(self): 

686 if not self._is_master: 

687 return 

688 

689 util.SMlog('Synchronize metadata...') 

690 if self.cmd == 'sr_attach': 

691 try: 

692 util.SMlog( 

693 'Synchronize SR metadata and the state on the storage.' 

694 ) 

695 self._synchronize_metadata_and_xapi() 

696 except Exception as e: 

697 util.SMlog('Failed to synchronize metadata: {}'.format(e)) 

698 

699 # -------------------------------------------------------------------------- 

700 # Stats. 

701 # -------------------------------------------------------------------------- 

702 

703 def _update_stats(self, virt_alloc_delta): 

704 valloc = int(self.session.xenapi.SR.get_virtual_allocation( 

705 self.sr_ref 

706 )) 

707 

708 # Update size attributes of the SR parent class. 

709 self.virtual_allocation = valloc + virt_alloc_delta 

710 

711 # Physical size contains the total physical size. 

712 # i.e. the sum of the sizes of all devices on all hosts, not the AVG. 

713 self._update_physical_size() 

714 

715 # Notify SR parent class. 

716 self._db_update() 

717 

718 def _update_physical_size(self): 

719 # Physical size contains the total physical size. 

720 # i.e. the sum of the sizes of all devices on all hosts, not the AVG. 

721 self.physical_size = self._linstor.physical_size 

722 

723 # `self._linstor.physical_free_size` contains the total physical free 

724 # memory. If Thin provisioning is used we can't use it, we must use 

725 # LINSTOR volume size to gives a good idea of the required 

726 # usable memory to the users. 

727 self.physical_utilisation = self._linstor.total_allocated_volume_size 

728 

729 # If Thick provisioning is used, we can use this line instead: 

730 # self.physical_utilisation = \ 

731 # self.physical_size - self._linstor.physical_free_size 

732 

733 # -------------------------------------------------------------------------- 

734 # VDIs. 

735 # -------------------------------------------------------------------------- 

736 

737 def _load_vdis(self): 

738 if self.vdis: 

739 return 

740 

741 # 1. Get existing VDIs in XAPI. 

742 xenapi = self.session.xenapi 

743 xapi_vdi_uuids = set() 

744 for vdi in xenapi.SR.get_VDIs(self.sr_ref): 

745 xapi_vdi_uuids.add(xenapi.VDI.get_uuid(vdi)) 

746 

747 # 2. Get volumes info. 

748 all_volume_info = self._linstor.volumes_with_info 

749 volumes_metadata = self._linstor.volumes_with_metadata 

750 

751 # 3. Get CBT vdis. 

752 # See: https://support.citrix.com/article/CTX230619 

753 cbt_vdis = set() 

754 for volume_metadata in volumes_metadata.values(): 

755 cbt_uuid = volume_metadata.get(CBTLOG_TAG) 

756 if cbt_uuid: 

757 cbt_vdis.add(cbt_uuid) 

758 

759 introduce = False 

760 

761 if self.cmd == 'sr_scan': 

762 has_clone_entries = list(self._journaler.get_all( 

763 LinstorJournaler.CLONE 

764 ).items()) 

765 

766 if has_clone_entries: 

767 util.SMlog( 

768 'Cannot introduce VDIs during scan because it exists ' 

769 'CLONE entries in journaler on SR {}'.format(self.uuid) 

770 ) 

771 else: 

772 introduce = True 

773 

774 # 4. Now check all volume info. 

775 vdi_to_snaps = {} 

776 for vdi_uuid, volume_info in all_volume_info.items(): 

777 if vdi_uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 

778 continue 

779 

780 # 4.a. Check if the VDI in LINSTOR is in XAPI VDIs. 

781 if vdi_uuid not in xapi_vdi_uuids: 

782 if not introduce: 

783 continue 

784 

785 volume_metadata = volumes_metadata.get(vdi_uuid) 

786 if not volume_metadata: 

787 util.SMlog( 

788 'Skipping volume {} because no metadata could be found' 

789 .format(vdi_uuid) 

790 ) 

791 continue 

792 

793 util.SMlog( 

794 'Trying to introduce VDI {} as it is present in ' 

795 'LINSTOR and not in XAPI...' 

796 .format(vdi_uuid) 

797 ) 

798 

799 try: 

800 self._linstor.get_device_path(vdi_uuid) 

801 except Exception as e: 

802 util.SMlog( 

803 'Cannot introduce {}, unable to get path: {}' 

804 .format(vdi_uuid, e) 

805 ) 

806 continue 

807 

808 name_label = volume_metadata.get(NAME_LABEL_TAG) or '' 

809 type = volume_metadata.get(TYPE_TAG) or 'user' 

810 vdi_type = volume_metadata.get(VDI_TYPE_TAG) 

811 

812 if not vdi_type: 

813 util.SMlog( 

814 'Cannot introduce {} '.format(vdi_uuid) + 

815 'without vdi_type' 

816 ) 

817 continue 

818 

819 sm_config = { 

820 'vdi_type': vdi_type 

821 } 

822 

823 if vdi_type == vhdutil.VDI_TYPE_RAW: 

824 managed = not volume_metadata.get(HIDDEN_TAG) 

825 elif vdi_type == vhdutil.VDI_TYPE_VHD: 

826 vhd_info = self._vhdutil.get_vhd_info(vdi_uuid) 

827 managed = not vhd_info.hidden 

828 if vhd_info.parentUuid: 

829 sm_config['vhd-parent'] = vhd_info.parentUuid 

830 else: 

831 util.SMlog( 

832 'Cannot introduce {} with invalid VDI type {}' 

833 .format(vdi_uuid, vdi_type) 

834 ) 

835 continue 

836 

837 util.SMlog( 

838 'Introducing VDI {} '.format(vdi_uuid) + 

839 ' (name={}, virtual_size={}, physical_size={})'.format( 

840 name_label, 

841 volume_info.virtual_size, 

842 volume_info.physical_size 

843 ) 

844 ) 

845 

846 vdi_ref = xenapi.VDI.db_introduce( 

847 vdi_uuid, 

848 name_label, 

849 volume_metadata.get(NAME_DESCRIPTION_TAG) or '', 

850 self.sr_ref, 

851 type, 

852 False, # sharable 

853 bool(volume_metadata.get(READ_ONLY_TAG)), 

854 {}, # other_config 

855 vdi_uuid, # location 

856 {}, # xenstore_data 

857 sm_config, 

858 managed, 

859 str(volume_info.virtual_size), 

860 str(volume_info.physical_size) 

861 ) 

862 

863 is_a_snapshot = volume_metadata.get(IS_A_SNAPSHOT_TAG) 

864 xenapi.VDI.set_is_a_snapshot(vdi_ref, bool(is_a_snapshot)) 

865 if is_a_snapshot: 

866 xenapi.VDI.set_snapshot_time( 

867 vdi_ref, 

868 xmlrpc.client.DateTime( 

869 volume_metadata[SNAPSHOT_TIME_TAG] or 

870 '19700101T00:00:00Z' 

871 ) 

872 ) 

873 

874 snap_uuid = volume_metadata[SNAPSHOT_OF_TAG] 

875 if snap_uuid in vdi_to_snaps: 

876 vdi_to_snaps[snap_uuid].append(vdi_uuid) 

877 else: 

878 vdi_to_snaps[snap_uuid] = [vdi_uuid] 

879 

880 # 4.b. Add the VDI in the list. 

881 vdi = self.vdi(vdi_uuid) 

882 self.vdis[vdi_uuid] = vdi 

883 

884 if vdi.vdi_type == vhdutil.VDI_TYPE_VHD: 

885 vdi.sm_config_override['key_hash'] = \ 

886 self._vhdutil.get_key_hash(vdi_uuid) 

887 

888 # 4.c. Update CBT status of disks either just added 

889 # or already in XAPI. 

890 cbt_uuid = volume_metadata.get(CBTLOG_TAG) 

891 if cbt_uuid in cbt_vdis: 

892 vdi_ref = xenapi.VDI.get_by_uuid(vdi_uuid) 

893 xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

894 # For existing VDIs, update local state too. 

895 # Scan in base class SR updates existing VDIs 

896 # again based on local states. 

897 self.vdis[vdi_uuid].cbt_enabled = True 

898 cbt_vdis.remove(cbt_uuid) 

899 

900 # 5. Now set the snapshot statuses correctly in XAPI. 

901 for src_uuid in vdi_to_snaps: 

902 try: 

903 src_ref = xenapi.VDI.get_by_uuid(src_uuid) 

904 except Exception: 

905 # The source VDI no longer exists, continue. 

906 continue 

907 

908 for snap_uuid in vdi_to_snaps[src_uuid]: 

909 try: 

910 # This might fail in cases where its already set. 

911 snap_ref = xenapi.VDI.get_by_uuid(snap_uuid) 

912 xenapi.VDI.set_snapshot_of(snap_ref, src_ref) 

913 except Exception as e: 

914 util.SMlog('Setting snapshot failed: {}'.format(e)) 

915 

916 # TODO: Check correctly how to use CBT. 

917 # Update cbt_enabled on the right VDI, check LVM/FileSR code. 

918 

919 # 6. If we have items remaining in this list, 

920 # they are cbt_metadata VDI that XAPI doesn't know about. 

921 # Add them to self.vdis and they'll get added to the DB. 

922 for cbt_uuid in cbt_vdis: 

923 new_vdi = self.vdi(cbt_uuid) 

924 new_vdi.ty = 'cbt_metadata' 

925 new_vdi.cbt_enabled = True 

926 self.vdis[cbt_uuid] = new_vdi 

927 

928 # 7. Update virtual allocation, build geneology and remove useless VDIs 

929 self.virtual_allocation = 0 

930 

931 # 8. Build geneology. 

932 geneology = {} 

933 

934 for vdi_uuid, vdi in self.vdis.items(): 

935 if vdi.parent: 

936 if vdi.parent in self.vdis: 

937 self.vdis[vdi.parent].read_only = True 

938 if vdi.parent in geneology: 

939 geneology[vdi.parent].append(vdi_uuid) 

940 else: 

941 geneology[vdi.parent] = [vdi_uuid] 

942 if not vdi.hidden: 

943 self.virtual_allocation += vdi.utilisation 

944 

945 # 9. Remove all hidden leaf nodes to avoid introducing records that 

946 # will be GC'ed. 

947 for vdi_uuid in self.vdis.keys(): 

948 if vdi_uuid not in geneology and self.vdis[vdi_uuid].hidden: 

949 util.SMlog( 

950 'Scan found hidden leaf ({}), ignoring'.format(vdi_uuid) 

951 ) 

952 del self.vdis[vdi_uuid] 

953 

954 # -------------------------------------------------------------------------- 

955 # Journals. 

956 # -------------------------------------------------------------------------- 

957 

958 def _get_vdi_path_and_parent(self, vdi_uuid, volume_name): 

959 try: 

960 device_path = self._linstor.build_device_path(volume_name) 

961 if not util.pathexists(device_path): 

962 return (None, None) 

963 

964 # If it's a RAW VDI, there is no parent. 

965 volume_metadata = self._linstor.get_volume_metadata(vdi_uuid) 

966 vdi_type = volume_metadata[VDI_TYPE_TAG] 

967 if vdi_type == vhdutil.VDI_TYPE_RAW: 

968 return (device_path, None) 

969 

970 # Otherwise it's a VHD and a parent can exist. 

971 if not self._vhdutil.check(vdi_uuid): 

972 return (None, None) 

973 

974 vhd_info = self._vhdutil.get_vhd_info(vdi_uuid) 

975 if vhd_info: 

976 return (device_path, vhd_info.parentUuid) 

977 except Exception as e: 

978 util.SMlog( 

979 'Failed to get VDI path and parent, ignoring: {}' 

980 .format(e) 

981 ) 

982 return (None, None) 

983 

984 def _undo_all_journal_transactions(self): 

985 util.SMlog('Undoing all journal transactions...') 

986 self.lock.acquire() 

987 try: 

988 self._handle_interrupted_inflate_ops() 

989 self._handle_interrupted_clone_ops() 

990 pass 

991 finally: 

992 self.lock.release() 

993 

994 def _handle_interrupted_inflate_ops(self): 

995 transactions = self._journaler.get_all(LinstorJournaler.INFLATE) 

996 for vdi_uuid, old_size in transactions.items(): 

997 self._handle_interrupted_inflate(vdi_uuid, old_size) 

998 self._journaler.remove(LinstorJournaler.INFLATE, vdi_uuid) 

999 

1000 def _handle_interrupted_clone_ops(self): 

1001 transactions = self._journaler.get_all(LinstorJournaler.CLONE) 

1002 for vdi_uuid, old_size in transactions.items(): 

1003 self._handle_interrupted_clone(vdi_uuid, old_size) 

1004 self._journaler.remove(LinstorJournaler.CLONE, vdi_uuid) 

1005 

1006 def _handle_interrupted_inflate(self, vdi_uuid, old_size): 

1007 util.SMlog( 

1008 '*** INTERRUPTED INFLATE OP: for {} ({})' 

1009 .format(vdi_uuid, old_size) 

1010 ) 

1011 

1012 vdi = self.vdis.get(vdi_uuid) 

1013 if not vdi: 

1014 util.SMlog('Cannot deflate missing VDI {}'.format(vdi_uuid)) 

1015 return 

1016 

1017 current_size = self._linstor.get_volume_info(self.uuid).virtual_size 

1018 util.zeroOut( 

1019 vdi.path, 

1020 current_size - vhdutil.VHD_FOOTER_SIZE, 

1021 vhdutil.VHD_FOOTER_SIZE 

1022 ) 

1023 deflate(vdi_uuid, vdi.path, old_size, current_size) 

1024 

1025 def _handle_interrupted_clone( 

1026 self, vdi_uuid, clone_info, force_undo=False 

1027 ): 

1028 util.SMlog( 

1029 '*** INTERRUPTED CLONE OP: for {} ({})' 

1030 .format(vdi_uuid, clone_info) 

1031 ) 

1032 

1033 base_uuid, snap_uuid = clone_info.split('_') 

1034 

1035 # Use LINSTOR data because new VDIs may not be in the XAPI. 

1036 volume_names = self._linstor.volumes_with_name 

1037 

1038 # Check if we don't have a base VDI. (If clone failed at startup.) 

1039 if base_uuid not in volume_names: 

1040 if vdi_uuid in volume_names: 

1041 util.SMlog('*** INTERRUPTED CLONE OP: nothing to do') 

1042 return 

1043 raise util.SMException( 

1044 'Base copy {} not present, but no original {} found' 

1045 .format(base_uuid, vdi_uuid) 

1046 ) 

1047 

1048 if force_undo: 

1049 util.SMlog('Explicit revert') 

1050 self._undo_clone( 

1051 volume_names, vdi_uuid, base_uuid, snap_uuid 

1052 ) 

1053 return 

1054 

1055 # If VDI or snap uuid is missing... 

1056 if vdi_uuid not in volume_names or \ 

1057 (snap_uuid and snap_uuid not in volume_names): 

1058 util.SMlog('One or both leaves missing => revert') 

1059 self._undo_clone(volume_names, vdi_uuid, base_uuid, snap_uuid) 

1060 return 

1061 

1062 vdi_path, vdi_parent_uuid = self._get_vdi_path_and_parent( 

1063 vdi_uuid, volume_names[vdi_uuid] 

1064 ) 

1065 snap_path, snap_parent_uuid = self._get_vdi_path_and_parent( 

1066 snap_uuid, volume_names[snap_uuid] 

1067 ) 

1068 

1069 if not vdi_path or (snap_uuid and not snap_path): 

1070 util.SMlog('One or both leaves invalid (and path(s)) => revert') 

1071 self._undo_clone(volume_names, vdi_uuid, base_uuid, snap_uuid) 

1072 return 

1073 

1074 util.SMlog('Leaves valid but => revert') 

1075 self._undo_clone(volume_names, vdi_uuid, base_uuid, snap_uuid) 

1076 

1077 def _undo_clone(self, volume_names, vdi_uuid, base_uuid, snap_uuid): 

1078 base_path = self._linstor.build_device_path(volume_names[base_uuid]) 

1079 base_metadata = self._linstor.get_volume_metadata(base_uuid) 

1080 base_type = base_metadata[VDI_TYPE_TAG] 

1081 

1082 if not util.pathexists(base_path): 

1083 util.SMlog('Base not found! Exit...') 

1084 util.SMlog('*** INTERRUPTED CLONE OP: rollback fail') 

1085 return 

1086 

1087 # Un-hide the parent. 

1088 self._linstor.update_volume_metadata(base_uuid, {READ_ONLY_TAG: False}) 

1089 if base_type == vhdutil.VDI_TYPE_VHD: 

1090 vhd_info = self._vhdutil.get_vhd_info(base_uuid, False) 

1091 if vhd_info.hidden: 

1092 vhdutil.setHidden(base_path, False) 

1093 elif base_type == vhdutil.VDI_TYPE_RAW and \ 

1094 base_metadata.get(HIDDEN_TAG): 

1095 self._linstor.update_volume_metadata( 

1096 base_uuid, {HIDDEN_TAG: False} 

1097 ) 

1098 

1099 # Remove the child nodes. 

1100 if snap_uuid and snap_uuid in volume_names: 

1101 util.SMlog('Destroying snap {}...'.format(snap_uuid)) 

1102 snap_metadata = self._linstor.get_volume_metadata(snap_uuid) 

1103 

1104 if snap_metadata.get(VDI_TYPE_TAG) != vhdutil.VDI_TYPE_VHD: 

1105 raise util.SMException('Clone {} not VHD'.format(snap_uuid)) 

1106 

1107 try: 

1108 self._linstor.destroy_volume(snap_uuid) 

1109 except Exception as e: 

1110 util.SMlog( 

1111 'Cannot destroy snap {} during undo clone: {}' 

1112 .format(snap_uuid, e) 

1113 ) 

1114 

1115 if vdi_uuid in volume_names: 

1116 try: 

1117 util.SMlog('Destroying {}...'.format(vdi_uuid)) 

1118 self._linstor.destroy_volume(vdi_uuid) 

1119 except Exception as e: 

1120 util.SMlog( 

1121 'Cannot destroy VDI {} during undo clone: {}' 

1122 .format(vdi_uuid, e) 

1123 ) 

1124 # We can get an exception like this: 

1125 # "Shutdown of the DRBD resource 'XXX failed", so the 

1126 # volume info remains... The problem is we can't rename 

1127 # properly the base VDI below this line, so we must change the 

1128 # UUID of this bad VDI before. 

1129 self._linstor.update_volume_uuid( 

1130 vdi_uuid, 'DELETED_' + vdi_uuid, force=True 

1131 ) 

1132 

1133 # Rename! 

1134 self._linstor.update_volume_uuid(base_uuid, vdi_uuid) 

1135 

1136 # Inflate to the right size. 

1137 if base_type == vhdutil.VDI_TYPE_VHD: 

1138 vdi = self.vdi(vdi_uuid) 

1139 volume_size = compute_volume_size(vdi.size, vdi.vdi_type) 

1140 inflate( 

1141 self._journaler, self._linstor, vdi_uuid, vdi.path, 

1142 volume_size, vdi.capacity 

1143 ) 

1144 self.vdis[vdi_uuid] = vdi 

1145 

1146 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1147 # flag to facilitate vm deactivate. 

1148 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

1149 self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused') 

1150 

1151 util.SMlog('*** INTERRUPTED CLONE OP: rollback success') 

1152 

1153 # -------------------------------------------------------------------------- 

1154 # Misc. 

1155 # -------------------------------------------------------------------------- 

1156 

1157 def _ensure_space_available(self, amount_needed): 

1158 space_available = self._linstor.max_volume_size_allowed 

1159 if (space_available < amount_needed): 

1160 util.SMlog( 

1161 'Not enough space! Free space: {}, need: {}'.format( 

1162 space_available, amount_needed 

1163 ) 

1164 ) 

1165 raise xs_errors.XenError('SRNoSpace') 

1166 

1167 def _kick_gc(self): 

1168 # Don't bother if an instance already running. This is just an 

1169 # optimization to reduce the overhead of forking a new process if we 

1170 # don't have to, but the process will check the lock anyways. 

1171 lock = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) 

1172 if not lock.acquireNoblock(): 

1173 if not cleanup.should_preempt(self.session, self.uuid): 

1174 util.SMlog('A GC instance already running, not kicking') 

1175 return 

1176 

1177 util.SMlog('Aborting currently-running coalesce of garbage VDI') 

1178 try: 

1179 if not cleanup.abort(self.uuid, soft=True): 

1180 util.SMlog('The GC has already been scheduled to re-start') 

1181 except util.CommandException as e: 

1182 if e.code != errno.ETIMEDOUT: 

1183 raise 

1184 util.SMlog('Failed to abort the GC') 

1185 else: 

1186 lock.release() 

1187 

1188 util.SMlog('Kicking GC') 

1189 cleanup.gc(self.session, self.uuid, True) 

1190 

1191# ============================================================================== 

1192# LinstorSr VDI 

1193# ============================================================================== 

1194 

1195 

1196class LinstorVDI(VDI.VDI): 

1197 # Warning: Not the same values than vhdutil.VDI_TYPE_*. 

1198 # These values represents the types given on the command line. 

1199 TYPE_RAW = 'raw' 

1200 TYPE_VHD = 'vhd' 

1201 

1202 MAX_SIZE = 2 * 1024 * 1024 * 1024 * 1024 # Max VHD size. 

1203 

1204 # Metadata size given to the "S" param of vhd-util create. 

1205 # "-S size (MB) for metadata preallocation". 

1206 # Increase the performance when resize is called. 

1207 MAX_METADATA_VIRT_SIZE = 2 * 1024 * 1024 

1208 

1209 # -------------------------------------------------------------------------- 

1210 # VDI methods. 

1211 # -------------------------------------------------------------------------- 

1212 

1213 def load(self, vdi_uuid): 

1214 self._lock = self.sr.lock 

1215 self._exists = True 

1216 self._linstor = self.sr._linstor 

1217 

1218 # Update hidden parent property. 

1219 self.hidden = False 

1220 

1221 def raise_bad_load(e): 

1222 util.SMlog( 

1223 'Got exception in LinstorVDI.load: {}'.format(e) 

1224 ) 

1225 util.SMlog(traceback.format_exc()) 

1226 raise xs_errors.XenError( 

1227 'VDIUnavailable', 

1228 opterr='Could not load {} because: {}'.format(self.uuid, e) 

1229 ) 

1230 

1231 # Try to load VDI. 

1232 try: 

1233 if ( 

1234 self.sr.srcmd.cmd == 'vdi_attach_from_config' or 

1235 self.sr.srcmd.cmd == 'vdi_detach_from_config' 

1236 ) and self.sr.srcmd.params['vdi_uuid'] == self.uuid: 

1237 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1238 self.path = self.sr.srcmd.params['vdi_path'] 

1239 else: 

1240 self._determine_type_and_path() 

1241 self._load_this() 

1242 

1243 util.SMlog('VDI {} loaded! (path={}, hidden={})'.format( 

1244 self.uuid, self.path, self.hidden 

1245 )) 

1246 except LinstorVolumeManagerError as e: 

1247 # 1. It may be a VDI deletion. 

1248 if e.code == LinstorVolumeManagerError.ERR_VOLUME_NOT_EXISTS: 

1249 if self.sr.srcmd.cmd == 'vdi_delete': 

1250 self.deleted = True 

1251 return 

1252 

1253 # 2. Or maybe a creation. 

1254 if self.sr.srcmd.cmd == 'vdi_create': 

1255 # Set type attribute of VDI parent class. 

1256 # We use VHD by default. 

1257 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1258 self._key_hash = None # Only used in create. 

1259 

1260 self._exists = False 

1261 vdi_sm_config = self.sr.srcmd.params.get('vdi_sm_config') 

1262 if vdi_sm_config is not None: 

1263 type = vdi_sm_config.get('type') 

1264 if type is not None: 

1265 if type == self.TYPE_RAW: 

1266 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1267 elif type == self.TYPE_VHD: 

1268 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1269 else: 

1270 raise xs_errors.XenError( 

1271 'VDICreate', 

1272 opterr='Invalid VDI type {}'.format(type) 

1273 ) 

1274 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

1275 self._key_hash = vdi_sm_config.get('key_hash') 

1276 

1277 # For the moment we don't have a path. 

1278 self._update_device_name(None) 

1279 return 

1280 raise_bad_load(e) 

1281 except Exception as e: 

1282 raise_bad_load(e) 

1283 

1284 def create(self, sr_uuid, vdi_uuid, size): 

1285 # Usage example: 

1286 # xe vdi-create sr-uuid=39a5826b-5a90-73eb-dd09-51e3a116f937 

1287 # name-label="linstor-vdi-1" virtual-size=4096MiB sm-config:type=vhd 

1288 

1289 # 1. Check if we are on the master and if the VDI doesn't exist. 

1290 util.SMlog('LinstorVDI.create for {}'.format(self.uuid)) 

1291 if self._exists: 

1292 raise xs_errors.XenError('VDIExists') 

1293 

1294 assert self.uuid 

1295 assert self.ty 

1296 assert self.vdi_type 

1297 

1298 # 2. Compute size and check space available. 

1299 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1300 util.SMlog('LinstorVDI.create: type={}, size={}'.format( 

1301 self.vdi_type, size 

1302 )) 

1303 

1304 volume_size = compute_volume_size(size, self.vdi_type) 

1305 self.sr._ensure_space_available(volume_size) 

1306 

1307 # 3. Set sm_config attribute of VDI parent class. 

1308 self.sm_config = self.sr.srcmd.params['vdi_sm_config'] 

1309 

1310 # 4. Create! 

1311 failed = False 

1312 try: 

1313 self._linstor.create_volume( 

1314 self.uuid, volume_size, persistent=False 

1315 ) 

1316 volume_info = self._linstor.get_volume_info(self.uuid) 

1317 

1318 self._update_device_name(volume_info.name) 

1319 

1320 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1321 self.size = volume_info.virtual_size 

1322 else: 

1323 vhdutil.create( 

1324 self.path, size, False, self.MAX_METADATA_VIRT_SIZE 

1325 ) 

1326 self.size = self.sr._vhdutil.get_size_virt(self.uuid) 

1327 

1328 if self._key_hash: 

1329 vhdutil.setKey(self.path, self._key_hash) 

1330 

1331 # Because vhdutil commands modify the volume data, 

1332 # we must retrieve a new time the utilisation size. 

1333 volume_info = self._linstor.get_volume_info(self.uuid) 

1334 

1335 volume_metadata = { 

1336 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1337 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1338 IS_A_SNAPSHOT_TAG: False, 

1339 SNAPSHOT_OF_TAG: '', 

1340 SNAPSHOT_TIME_TAG: '', 

1341 TYPE_TAG: self.ty, 

1342 VDI_TYPE_TAG: self.vdi_type, 

1343 READ_ONLY_TAG: bool(self.read_only), 

1344 METADATA_OF_POOL_TAG: '' 

1345 } 

1346 self._linstor.set_volume_metadata(self.uuid, volume_metadata) 

1347 self._linstor.mark_volume_as_persistent(self.uuid) 

1348 except util.CommandException as e: 

1349 failed = True 

1350 raise xs_errors.XenError( 

1351 'VDICreate', opterr='error {}'.format(e.code) 

1352 ) 

1353 except Exception as e: 

1354 failed = True 

1355 raise xs_errors.XenError('VDICreate', opterr='error {}'.format(e)) 

1356 finally: 

1357 if failed: 

1358 util.SMlog('Unable to create VDI {}'.format(self.uuid)) 

1359 try: 

1360 self._linstor.destroy_volume(self.uuid) 

1361 except Exception as e: 

1362 util.SMlog( 

1363 'Ignoring exception after fail in LinstorVDI.create: ' 

1364 '{}'.format(e) 

1365 ) 

1366 

1367 self.utilisation = volume_info.physical_size 

1368 self.sm_config['vdi_type'] = self.vdi_type 

1369 

1370 self.ref = self._db_introduce() 

1371 self.sr._update_stats(volume_info.virtual_size) 

1372 

1373 return VDI.VDI.get_params(self) 

1374 

1375 def delete(self, sr_uuid, vdi_uuid, data_only=False): 

1376 util.SMlog('LinstorVDI.delete for {}'.format(self.uuid)) 

1377 if self.attached: 

1378 raise xs_errors.XenError('VDIInUse') 

1379 

1380 if self.deleted: 

1381 return super(LinstorVDI, self).delete( 

1382 sr_uuid, vdi_uuid, data_only 

1383 ) 

1384 

1385 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1386 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1387 raise xs_errors.XenError( 

1388 'VDIDelete', 

1389 opterr='Deleting non-leaf node not permitted' 

1390 ) 

1391 

1392 try: 

1393 # Remove from XAPI and delete from LINSTOR. 

1394 self._linstor.destroy_volume(self.uuid) 

1395 if not data_only: 

1396 self._db_forget() 

1397 

1398 self.sr.lock.cleanupAll(vdi_uuid) 

1399 except Exception as e: 

1400 util.SMlog( 

1401 'Failed to remove the volume (maybe is leaf coalescing) ' 

1402 'for {} err: {}'.format(self.uuid, e) 

1403 ) 

1404 raise xs_errors.XenError('VDIDelete', opterr=str(e)) 

1405 

1406 if self.uuid in self.sr.vdis: 

1407 del self.sr.vdis[self.uuid] 

1408 

1409 # TODO: Check size after delete. 

1410 self.sr._update_stats(-self.capacity) 

1411 self.sr._kick_gc() 

1412 return super(LinstorVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1413 

1414 def attach(self, sr_uuid, vdi_uuid): 

1415 util.SMlog('LinstorVDI.attach for {}'.format(self.uuid)) 

1416 if ( 

1417 self.sr.srcmd.cmd != 'vdi_attach_from_config' or 

1418 self.sr.srcmd.params['vdi_uuid'] != self.uuid 

1419 ) and self.sr._journaler.has_entries(self.uuid): 

1420 raise xs_errors.XenError( 

1421 'VDIUnavailable', 

1422 opterr='Interrupted operation detected on this VDI, ' 

1423 'scan SR first to trigger auto-repair' 

1424 ) 

1425 

1426 writable = 'args' not in self.sr.srcmd.params or \ 

1427 self.sr.srcmd.params['args'][0] == 'true' 

1428 

1429 # We need to inflate the volume if we don't have enough place 

1430 # to mount the VHD image. I.e. the volume capacity must be greater 

1431 # than the VHD size + bitmap size. 

1432 need_inflate = True 

1433 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable or \ 

1434 self.capacity >= compute_volume_size(self.size, self.vdi_type): 

1435 need_inflate = False 

1436 

1437 if need_inflate: 

1438 try: 

1439 self._prepare_thin(True) 

1440 except Exception as e: 

1441 raise xs_errors.XenError( 

1442 'VDIUnavailable', 

1443 opterr='Failed to attach VDI during "prepare thin": {}' 

1444 .format(e) 

1445 ) 

1446 

1447 if not util.pathexists(self.path): 

1448 raise xs_errors.XenError( 

1449 'VDIUnavailable', opterr='Could not find: {}'.format(self.path) 

1450 ) 

1451 

1452 if not hasattr(self, 'xenstore_data'): 

1453 self.xenstore_data = {} 

1454 

1455 # TODO: Is it useful? 

1456 self.xenstore_data.update(scsiutil.update_XS_SCSIdata( 

1457 self.uuid, scsiutil.gen_synthetic_page_data(self.uuid) 

1458 )) 

1459 

1460 self.xenstore_data['storage-type'] = LinstorSR.DRIVER_TYPE 

1461 

1462 self.attached = True 

1463 

1464 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1465 

1466 def detach(self, sr_uuid, vdi_uuid): 

1467 util.SMlog('LinstorVDI.detach for {}'.format(self.uuid)) 

1468 self.attached = False 

1469 

1470 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1471 return 

1472 

1473 # The VDI is already deflated if the VHD image size + metadata is 

1474 # equal to the LINSTOR volume size. 

1475 volume_size = compute_volume_size(self.size, self.vdi_type) 

1476 already_deflated = self.capacity <= volume_size 

1477 

1478 if already_deflated: 

1479 util.SMlog( 

1480 'VDI {} already deflated (old volume size={}, volume size={})' 

1481 .format(self.uuid, self.capacity, volume_size) 

1482 ) 

1483 

1484 need_deflate = True 

1485 if already_deflated: 

1486 need_deflate = False 

1487 elif self.sr._provisioning == 'thick': 

1488 need_deflate = False 

1489 

1490 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1491 if self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref): 

1492 need_deflate = True 

1493 

1494 if need_deflate: 

1495 try: 

1496 self._prepare_thin(False) 

1497 except Exception as e: 

1498 raise xs_errors.XenError( 

1499 'VDIUnavailable', 

1500 opterr='Failed to detach VDI during "prepare thin": {}' 

1501 .format(e) 

1502 ) 

1503 

1504 def resize(self, sr_uuid, vdi_uuid, size): 

1505 util.SMlog('LinstorVDI.resize for {}'.format(self.uuid)) 

1506 if self.hidden: 

1507 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1508 

1509 if size < self.size: 

1510 util.SMlog( 

1511 'vdi_resize: shrinking not supported: ' 

1512 '(current size: {}, new size: {})'.format(self.size, size) 

1513 ) 

1514 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1515 

1516 # Compute the virtual VHD size. 

1517 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1518 

1519 if size == self.size: 

1520 return VDI.VDI.get_params(self) 

1521 

1522 # Compute the LINSTOR volume size. 

1523 new_volume_size = compute_volume_size(size, self.vdi_type) 

1524 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1525 old_volume_size = self.size 

1526 else: 

1527 old_volume_size = self.capacity 

1528 if self.sr._provisioning == 'thin': 

1529 # VDI is currently deflated, so keep it deflated. 

1530 new_volume_size = old_volume_size 

1531 assert new_volume_size >= old_volume_size 

1532 

1533 space_needed = new_volume_size - old_volume_size 

1534 self.sr._ensure_space_available(space_needed) 

1535 

1536 old_capacity = self.capacity 

1537 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1538 self._linstor.resize(self.uuid, new_volume_size) 

1539 else: 

1540 if new_volume_size != old_volume_size: 

1541 inflate( 

1542 self.sr._journaler, self._linstor, self.uuid, self.path, 

1543 new_volume_size, old_volume_size 

1544 ) 

1545 vhdutil.setSizeVirtFast(self.path, size) 

1546 

1547 # Reload size attributes. 

1548 self._load_this() 

1549 

1550 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1551 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1552 self.session.xenapi.VDI.set_physical_utilisation( 

1553 vdi_ref, str(self.utilisation) 

1554 ) 

1555 self.sr._update_stats(self.capacity - old_capacity) 

1556 return VDI.VDI.get_params(self) 

1557 

1558 def clone(self, sr_uuid, vdi_uuid): 

1559 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE) 

1560 

1561 def compose(self, sr_uuid, vdi1, vdi2): 

1562 util.SMlog('VDI.compose for {} -> {}'.format(vdi2, vdi1)) 

1563 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1564 raise xs_errors.XenError('Unimplemented') 

1565 

1566 parent_uuid = vdi1 

1567 parent_path = self._linstor.get_device_path(parent_uuid) 

1568 

1569 # We must pause tapdisk to correctly change the parent. Otherwise we 

1570 # have a readonly error. 

1571 # See: https://github.com/xapi-project/xen-api/blob/b3169a16d36dae0654881b336801910811a399d9/ocaml/xapi/storage_migrate.ml#L928-L929 

1572 # and: https://github.com/xapi-project/xen-api/blob/b3169a16d36dae0654881b336801910811a399d9/ocaml/xapi/storage_migrate.ml#L775 

1573 

1574 if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, self.uuid): 

1575 raise util.SMException('Failed to pause VDI {}'.format(self.uuid)) 

1576 try: 

1577 vhdutil.setParent(self.path, parent_path, False) 

1578 vhdutil.setHidden(parent_path) 

1579 self.sr.session.xenapi.VDI.set_managed( 

1580 self.sr.srcmd.params['args'][0], False 

1581 ) 

1582 finally: 

1583 blktap2.VDI.tap_unpause(self.session, self.sr.uuid, self.uuid) 

1584 

1585 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid): 

1586 raise util.SMException( 

1587 'Failed to refresh VDI {}'.format(self.uuid) 

1588 ) 

1589 

1590 util.SMlog('Compose done') 

1591 

1592 def generate_config(self, sr_uuid, vdi_uuid): 

1593 """ 

1594 Generate the XML config required to attach and activate 

1595 a VDI for use when XAPI is not running. Attach and 

1596 activation is handled by vdi_attach_from_config below. 

1597 """ 

1598 

1599 util.SMlog('LinstorVDI.generate_config for {}'.format(self.uuid)) 

1600 

1601 if not self.path or not util.pathexists(self.path): 

1602 available = False 

1603 # Try to refresh symlink path... 

1604 try: 

1605 self.path = self._linstor.get_device_path(vdi_uuid) 

1606 available = util.pathexists(self.path) 

1607 except Exception: 

1608 pass 

1609 if not available: 

1610 raise xs_errors.XenError('VDIUnavailable') 

1611 

1612 resp = {} 

1613 resp['device_config'] = self.sr.dconf 

1614 resp['sr_uuid'] = sr_uuid 

1615 resp['vdi_uuid'] = self.uuid 

1616 resp['sr_sm_config'] = self.sr.sm_config 

1617 resp['vdi_path'] = self.path 

1618 resp['command'] = 'vdi_attach_from_config' 

1619 

1620 config = xmlrpc.client.dumps(tuple([resp]), 'vdi_attach_from_config') 

1621 return xmlrpc.client.dumps((config,), "", True) 

1622 

1623 def attach_from_config(self, sr_uuid, vdi_uuid): 

1624 """ 

1625 Attach and activate a VDI using config generated by 

1626 vdi_generate_config above. This is used for cases such as 

1627 the HA state-file and the redo-log. 

1628 """ 

1629 

1630 util.SMlog('LinstorVDI.attach_from_config for {}'.format(vdi_uuid)) 

1631 

1632 try: 

1633 if not util.pathexists(self.sr.path): 

1634 self.sr.attach(sr_uuid) 

1635 

1636 if not DRIVER_CONFIG['ATTACH_FROM_CONFIG_WITH_TAPDISK']: 

1637 return self.attach(sr_uuid, vdi_uuid) 

1638 except Exception: 

1639 util.logException('LinstorVDI.attach_from_config') 

1640 raise xs_errors.XenError( 

1641 'SRUnavailable', 

1642 opterr='Unable to attach from config' 

1643 ) 

1644 

1645 def reset_leaf(self, sr_uuid, vdi_uuid): 

1646 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1647 raise xs_errors.XenError('Unimplemented') 

1648 

1649 if not self.sr._vhdutil.has_parent(self.uuid): 

1650 raise util.SMException( 

1651 'ERROR: VDI {} has no parent, will not reset contents' 

1652 .format(self.uuid) 

1653 ) 

1654 

1655 vhdutil.killData(self.path) 

1656 

1657 def _load_this(self): 

1658 volume_metadata = self._linstor.get_volume_metadata(self.uuid) 

1659 volume_info = self._linstor.get_volume_info(self.uuid) 

1660 

1661 # Contains the physical size used on all disks. 

1662 # When LINSTOR LVM driver is used, the size should be similar to 

1663 # virtual size (i.e. the LINSTOR max volume size). 

1664 # When LINSTOR Thin LVM driver is used, the used physical size should 

1665 # be lower than virtual size at creation. 

1666 # The physical size increases after each write in a new block. 

1667 self.utilisation = volume_info.physical_size 

1668 self.capacity = volume_info.virtual_size 

1669 

1670 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1671 self.hidden = int(volume_metadata.get(HIDDEN_TAG) or 0) 

1672 self.size = volume_info.virtual_size 

1673 self.parent = '' 

1674 else: 

1675 vhd_info = self.sr._vhdutil.get_vhd_info(self.uuid) 

1676 self.hidden = vhd_info.hidden 

1677 self.size = vhd_info.sizeVirt 

1678 self.parent = vhd_info.parentUuid 

1679 

1680 if self.hidden: 

1681 self.managed = False 

1682 

1683 self.label = volume_metadata.get(NAME_LABEL_TAG) or '' 

1684 self.description = volume_metadata.get(NAME_DESCRIPTION_TAG) or '' 

1685 

1686 # Update sm_config_override of VDI parent class. 

1687 self.sm_config_override = {'vhd-parent': self.parent or None} 

1688 

1689 def _mark_hidden(self, hidden=True): 

1690 if self.hidden == hidden: 

1691 return 

1692 

1693 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

1694 vhdutil.setHidden(self.path, hidden) 

1695 else: 

1696 self._linstor.update_volume_metadata(self.uuid, { 

1697 HIDDEN_TAG: hidden 

1698 }) 

1699 self.hidden = hidden 

1700 

1701 def update(self, sr_uuid, vdi_uuid): 

1702 xenapi = self.session.xenapi 

1703 vdi_ref = xenapi.VDI.get_by_uuid(self.uuid) 

1704 

1705 volume_metadata = { 

1706 NAME_LABEL_TAG: util.to_plain_string( 

1707 xenapi.VDI.get_name_label(vdi_ref) 

1708 ), 

1709 NAME_DESCRIPTION_TAG: util.to_plain_string( 

1710 xenapi.VDI.get_name_description(vdi_ref) 

1711 ) 

1712 } 

1713 

1714 try: 

1715 self._linstor.update_volume_metadata(self.uuid, volume_metadata) 

1716 except LinstorVolumeManagerError as e: 

1717 if e.code == LinstorVolumeManagerError.ERR_VOLUME_NOT_EXISTS: 

1718 raise xs_errors.XenError( 

1719 'VDIUnavailable', 

1720 opterr='LINSTOR volume {} not found'.format(self.uuid) 

1721 ) 

1722 raise xs_errors.XenError('VDIUnavailable', opterr=str(e)) 

1723 

1724 # -------------------------------------------------------------------------- 

1725 # Thin provisioning. 

1726 # -------------------------------------------------------------------------- 

1727 

1728 def _prepare_thin(self, attach): 

1729 if self.sr._is_master: 

1730 if attach: 

1731 attach_thin( 

1732 self.session, self.sr._journaler, self._linstor, 

1733 self.sr.uuid, self.uuid 

1734 ) 

1735 else: 

1736 detach_thin( 

1737 self.session, self._linstor, self.sr.uuid, self.uuid 

1738 ) 

1739 else: 

1740 fn = 'attach' if attach else 'detach' 

1741 

1742 # We assume the first pool is always the one currently in use. 

1743 pools = self.session.xenapi.pool.get_all() 

1744 master = self.session.xenapi.pool.get_master(pools[0]) 

1745 args = { 

1746 'groupName': self.sr._group_name, 

1747 'srUuid': self.sr.uuid, 

1748 'vdiUuid': self.uuid 

1749 } 

1750 ret = self.session.xenapi.host.call_plugin( 

1751 master, self.sr.MANAGER_PLUGIN, fn, args 

1752 ) 

1753 util.SMlog( 

1754 'call-plugin ({} with {}) returned: {}'.format(fn, args, ret) 

1755 ) 

1756 if ret == 'False': 

1757 raise xs_errors.XenError( 

1758 'VDIUnavailable', 

1759 opterr='Plugin {} failed'.format(self.sr.MANAGER_PLUGIN) 

1760 ) 

1761 

1762 # Reload size attrs after inflate or deflate! 

1763 self._load_this() 

1764 self.sr._update_physical_size() 

1765 

1766 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1767 self.session.xenapi.VDI.set_physical_utilisation( 

1768 vdi_ref, str(self.utilisation) 

1769 ) 

1770 

1771 self.session.xenapi.SR.set_physical_utilisation( 

1772 self.sr.sr_ref, str(self.sr.physical_utilisation) 

1773 ) 

1774 

1775 # -------------------------------------------------------------------------- 

1776 # Generic helpers. 

1777 # -------------------------------------------------------------------------- 

1778 

1779 def _determine_type_and_path(self): 

1780 """ 

1781 Determine whether this is a RAW or a VHD VDI. 

1782 """ 

1783 

1784 # 1. Check vdi_ref and vdi_type in config. 

1785 try: 

1786 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

1787 if vdi_ref: 

1788 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1789 vdi_type = sm_config.get('vdi_type') 

1790 if vdi_type: 

1791 # Update parent fields. 

1792 self.vdi_type = vdi_type 

1793 self.sm_config_override = sm_config 

1794 self._update_device_name( 

1795 self._linstor.get_volume_name(self.uuid) 

1796 ) 

1797 return 

1798 except Exception: 

1799 pass 

1800 

1801 # 2. Otherwise use the LINSTOR volume manager directly. 

1802 # It's probably a new VDI created via snapshot. 

1803 volume_metadata = self._linstor.get_volume_metadata(self.uuid) 

1804 self.vdi_type = volume_metadata.get(VDI_TYPE_TAG) 

1805 if not self.vdi_type: 

1806 raise xs_errors.XenError( 

1807 'VDIUnavailable', 

1808 opterr='failed to get vdi_type in metadata' 

1809 ) 

1810 self._update_device_name( 

1811 self._linstor.get_volume_name(self.uuid) 

1812 ) 

1813 

1814 def _update_device_name(self, device_name): 

1815 self._device_name = device_name 

1816 

1817 # Mark path of VDI parent class. 

1818 if device_name: 

1819 self.path = self._linstor.build_device_path(self._device_name) 

1820 else: 

1821 self.path = None 

1822 

1823 def _create_snapshot(self, snap_uuid, snap_of_uuid=None): 

1824 """ 

1825 Snapshot self and return the snapshot VDI object. 

1826 """ 

1827 

1828 # 1. Create a new LINSTOR volume with the same size than self. 

1829 snap_path = self._linstor.shallow_clone_volume( 

1830 self.uuid, snap_uuid, persistent=False 

1831 ) 

1832 

1833 # 2. Write the snapshot content. 

1834 is_raw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) 

1835 vhdutil.snapshot( 

1836 snap_path, self.path, is_raw, self.MAX_METADATA_VIRT_SIZE 

1837 ) 

1838 

1839 # 3. Get snapshot parent. 

1840 snap_parent = self.sr._vhdutil.get_parent(snap_uuid) 

1841 

1842 # 4. Update metadata. 

1843 util.SMlog('Set VDI {} metadata of snapshot'.format(snap_uuid)) 

1844 volume_metadata = { 

1845 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1846 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1847 IS_A_SNAPSHOT_TAG: bool(snap_of_uuid), 

1848 SNAPSHOT_OF_TAG: snap_of_uuid, 

1849 SNAPSHOT_TIME_TAG: '', 

1850 TYPE_TAG: self.ty, 

1851 VDI_TYPE_TAG: vhdutil.VDI_TYPE_VHD, 

1852 READ_ONLY_TAG: False, 

1853 METADATA_OF_POOL_TAG: '' 

1854 } 

1855 self._linstor.set_volume_metadata(snap_uuid, volume_metadata) 

1856 

1857 # 5. Set size. 

1858 snap_vdi = LinstorVDI(self.sr, snap_uuid) 

1859 if not snap_vdi._exists: 

1860 raise xs_errors.XenError('VDISnapshot') 

1861 

1862 volume_info = self._linstor.get_volume_info(snap_uuid) 

1863 

1864 snap_vdi.size = self.sr._vhdutil.get_size_virt(snap_uuid) 

1865 snap_vdi.utilisation = volume_info.physical_size 

1866 

1867 # 6. Update sm config. 

1868 snap_vdi.sm_config = {} 

1869 snap_vdi.sm_config['vdi_type'] = snap_vdi.vdi_type 

1870 if snap_parent: 

1871 snap_vdi.sm_config['vhd-parent'] = snap_parent 

1872 snap_vdi.parent = snap_parent 

1873 

1874 snap_vdi.label = self.label 

1875 snap_vdi.description = self.description 

1876 

1877 self._linstor.mark_volume_as_persistent(snap_uuid) 

1878 

1879 return snap_vdi 

1880 

1881 # -------------------------------------------------------------------------- 

1882 # Implement specific SR methods. 

1883 # -------------------------------------------------------------------------- 

1884 

1885 def _rename(self, oldpath, newpath): 

1886 # TODO: I'm not sure... Used by CBT. 

1887 volume_uuid = self._linstor.get_volume_uuid_from_device_path(oldpath) 

1888 self._linstor.update_volume_name(volume_uuid, newpath) 

1889 

1890 def _do_snapshot( 

1891 self, sr_uuid, vdi_uuid, snap_type, secondary=None, cbtlog=None 

1892 ): 

1893 # If cbt enabled, save file consistency state. 

1894 if cbtlog is not None: 

1895 if blktap2.VDI.tap_status(self.session, vdi_uuid): 

1896 consistency_state = False 

1897 else: 

1898 consistency_state = True 

1899 util.SMlog( 

1900 'Saving log consistency state of {} for vdi: {}' 

1901 .format(consistency_state, vdi_uuid) 

1902 ) 

1903 else: 

1904 consistency_state = None 

1905 

1906 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1907 raise xs_errors.XenError('Unimplemented') 

1908 

1909 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 

1910 raise util.SMException('Failed to pause VDI {}'.format(vdi_uuid)) 

1911 try: 

1912 return self._snapshot(snap_type, cbtlog, consistency_state) 

1913 finally: 

1914 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1915 

1916 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): 

1917 util.SMlog( 

1918 'LinstorVDI._snapshot for {} (type {})' 

1919 .format(self.uuid, snap_type) 

1920 ) 

1921 

1922 # 1. Checks... 

1923 if self.hidden: 

1924 raise xs_errors.XenError('VDIClone', opterr='hidden VDI') 

1925 

1926 depth = self.sr._vhdutil.get_depth(self.uuid) 

1927 if depth == -1: 

1928 raise xs_errors.XenError( 

1929 'VDIUnavailable', 

1930 opterr='failed to get VHD depth' 

1931 ) 

1932 elif depth >= vhdutil.MAX_CHAIN_SIZE: 

1933 raise xs_errors.XenError('SnapshotChainTooLong') 

1934 

1935 volume_path = self.path 

1936 if not util.pathexists(volume_path): 

1937 raise xs_errors.XenError( 

1938 'EIO', 

1939 opterr='IO error checking path {}'.format(volume_path) 

1940 ) 

1941 

1942 # 2. Create base and snap uuid (if required) and a journal entry. 

1943 base_uuid = util.gen_uuid() 

1944 snap_uuid = None 

1945 

1946 if snap_type == VDI.SNAPSHOT_DOUBLE: 

1947 snap_uuid = util.gen_uuid() 

1948 

1949 clone_info = '{}_{}'.format(base_uuid, snap_uuid) 

1950 

1951 active_uuid = self.uuid 

1952 self.sr._journaler.create( 

1953 LinstorJournaler.CLONE, active_uuid, clone_info 

1954 ) 

1955 

1956 try: 

1957 # 3. Self becomes the new base. 

1958 # The device path remains the same. 

1959 self._linstor.update_volume_uuid(self.uuid, base_uuid) 

1960 self.uuid = base_uuid 

1961 self.location = self.uuid 

1962 self.read_only = True 

1963 self.managed = False 

1964 

1965 # 4. Create snapshots (new active and snap). 

1966 active_vdi = self._create_snapshot(active_uuid) 

1967 

1968 snap_vdi = None 

1969 if snap_type == VDI.SNAPSHOT_DOUBLE: 

1970 snap_vdi = self._create_snapshot(snap_uuid, active_uuid) 

1971 

1972 self.label = 'base copy' 

1973 self.description = '' 

1974 

1975 # 5. Mark the base VDI as hidden so that it does not show up 

1976 # in subsequent scans. 

1977 self._mark_hidden() 

1978 self._linstor.update_volume_metadata( 

1979 self.uuid, {READ_ONLY_TAG: True} 

1980 ) 

1981 

1982 # 6. We must update the new active VDI with the "paused" and 

1983 # "host_" properties. Why? Because the original VDI has been 

1984 # paused and we we must unpause it after the snapshot. 

1985 # See: `tap_unpause` in `blktap2.py`. 

1986 vdi_ref = self.session.xenapi.VDI.get_by_uuid(active_uuid) 

1987 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1988 for key in [x for x in sm_config.keys() if x == 'paused' or x.startswith('host_')]: 

1989 active_vdi.sm_config[key] = sm_config[key] 

1990 

1991 # 7. Verify parent locator field of both children and 

1992 # delete base if unused. 

1993 introduce_parent = True 

1994 try: 

1995 snap_parent = None 

1996 if snap_vdi: 

1997 snap_parent = snap_vdi.parent 

1998 

1999 if active_vdi.parent != self.uuid and ( 

2000 snap_type == VDI.SNAPSHOT_SINGLE or 

2001 snap_type == VDI.SNAPSHOT_INTERNAL or 

2002 snap_parent != self.uuid 

2003 ): 

2004 util.SMlog( 

2005 'Destroy unused base volume: {} (path={})' 

2006 .format(self.uuid, self.path) 

2007 ) 

2008 introduce_parent = False 

2009 self._linstor.destroy_volume(self.uuid) 

2010 except Exception as e: 

2011 util.SMlog('Ignoring exception: {}'.format(e)) 

2012 pass 

2013 

2014 # 8. Introduce the new VDI records. 

2015 if snap_vdi: 

2016 # If the parent is encrypted set the key_hash for the 

2017 # new snapshot disk. 

2018 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2019 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2020 # TODO: Maybe remove key_hash support. 

2021 if 'key_hash' in sm_config: 

2022 snap_vdi.sm_config['key_hash'] = sm_config['key_hash'] 

2023 # If we have CBT enabled on the VDI, 

2024 # set CBT status for the new snapshot disk. 

2025 if cbtlog: 

2026 snap_vdi.cbt_enabled = True 

2027 

2028 if snap_vdi: 

2029 snap_vdi_ref = snap_vdi._db_introduce() 

2030 util.SMlog( 

2031 'vdi_clone: introduced VDI: {} ({})' 

2032 .format(snap_vdi_ref, snap_vdi.uuid) 

2033 ) 

2034 if introduce_parent: 

2035 base_vdi_ref = self._db_introduce() 

2036 self.session.xenapi.VDI.set_managed(base_vdi_ref, False) 

2037 util.SMlog( 

2038 'vdi_clone: introduced VDI: {} ({})' 

2039 .format(base_vdi_ref, self.uuid) 

2040 ) 

2041 self._linstor.update_volume_metadata(self.uuid, { 

2042 NAME_LABEL_TAG: util.to_plain_string(self.label), 

2043 NAME_DESCRIPTION_TAG: util.to_plain_string( 

2044 self.description 

2045 ), 

2046 READ_ONLY_TAG: True, 

2047 METADATA_OF_POOL_TAG: '' 

2048 }) 

2049 

2050 # 9. Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

2051 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog: 

2052 try: 

2053 self._cbt_snapshot(snap_uuid, cbt_consistency) 

2054 except Exception: 

2055 # CBT operation failed. 

2056 # TODO: Implement me. 

2057 raise 

2058 

2059 if snap_type != VDI.SNAPSHOT_INTERNAL: 

2060 self.sr._update_stats(self.capacity) 

2061 

2062 # 10. Return info on the new user-visible leaf VDI. 

2063 ret_vdi = snap_vdi 

2064 if not ret_vdi: 

2065 ret_vdi = self 

2066 if not ret_vdi: 

2067 ret_vdi = active_vdi 

2068 

2069 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2070 self.session.xenapi.VDI.set_sm_config( 

2071 vdi_ref, active_vdi.sm_config 

2072 ) 

2073 except Exception as e: 

2074 util.logException('Failed to snapshot!') 

2075 try: 

2076 self.sr._handle_interrupted_clone( 

2077 active_uuid, clone_info, force_undo=True 

2078 ) 

2079 self.sr._journaler.remove(LinstorJournaler.CLONE, active_uuid) 

2080 except Exception as e: 

2081 util.SMlog( 

2082 'WARNING: Failed to clean up failed snapshot: {}' 

2083 .format(e) 

2084 ) 

2085 raise xs_errors.XenError('VDIClone', opterr=str(e)) 

2086 

2087 self.sr._journaler.remove(LinstorJournaler.CLONE, active_uuid) 

2088 

2089 return ret_vdi.get_params() 

2090 

2091# ------------------------------------------------------------------------------ 

2092 

2093 

2094if __name__ == '__main__': 

2095 SRCommand.run(LinstorSR, DRIVER_INFO) 

2096else: 

2097 SR.registerSR(LinstorSR)