Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/env python3 

2# 

3# Copyright (C) 2020 Vates SAS - ronan.abhamon@vates.fr 

4# 

5# This program is free software: you can redistribute it and/or modify 

6# it under the terms of the GNU General Public License as published by 

7# the Free Software Foundation, either version 3 of the License, or 

8# (at your option) any later version. 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU General Public License for more details. 

13# 

14# You should have received a copy of the GNU General Public License 

15# along with this program. If not, see <https://www.gnu.org/licenses/>. 

16# 

17 

18 

19import json 

20import linstor 

21import os.path 

22import re 

23import socket 

24import time 

25import util 

26 

27 

28def round_up(value, divisor): 

29 assert divisor 

30 divisor = int(divisor) 

31 return ((int(value) + divisor - 1) // divisor) * divisor 

32 

33 

34def round_down(value, divisor): 

35 assert divisor 

36 value = int(value) 

37 return value - (value % int(divisor)) 

38 

39 

40class LinstorVolumeManagerError(Exception): 

41 ERR_GENERIC = 0, 

42 ERR_VOLUME_EXISTS = 1, 

43 ERR_VOLUME_NOT_EXISTS = 2 

44 

45 def __init__(self, message, code=ERR_GENERIC): 

46 super(LinstorVolumeManagerError, self).__init__(message) 

47 self._code = code 

48 

49 @property 

50 def code(self): 

51 return self._code 

52 

53# ============================================================================== 

54 

55# Note: 

56# If a storage pool is not accessible after a network change: 

57# linstor node interface modify <NODE> default --ip <IP> 

58 

59 

60class LinstorVolumeManager(object): 

61 """ 

62 API to manager LINSTOR volumes in XCP-ng. 

63 A volume in this context is a physical part of the storage layer. 

64 """ 

65 

66 DEV_ROOT_PATH = '/dev/drbd/by-res/' 

67 

68 # Default LVM extent size. 

69 BLOCK_SIZE = 4 * 1024 * 1024 

70 

71 # List of volume properties. 

72 PROP_METADATA = 'metadata' 

73 PROP_NOT_EXISTS = 'not-exists' 

74 PROP_VOLUME_NAME = 'volume-name' 

75 PROP_IS_READONLY_TIMESTAMP = 'readonly-timestamp' 

76 

77 # A volume can only be locked for a limited duration. 

78 # The goal is to give enough time to slaves to execute some actions on 

79 # a device before an UUID update or a coalesce for example. 

80 # Expiration is expressed in seconds. 

81 LOCKED_EXPIRATION_DELAY = 1 * 60 

82 

83 # Used when volume uuid is being updated. 

84 PROP_UPDATING_UUID_SRC = 'updating-uuid-src' 

85 

86 # States of property PROP_NOT_EXISTS. 

87 STATE_EXISTS = '0' 

88 STATE_NOT_EXISTS = '1' 

89 STATE_CREATING = '2' 

90 

91 # Property namespaces. 

92 NAMESPACE_SR = 'xcp/sr' 

93 NAMESPACE_VOLUME = 'volume' 

94 

95 # Regex to match properties. 

96 REG_PROP = '^([^/]+)/{}$' 

97 

98 REG_METADATA = re.compile(REG_PROP.format(PROP_METADATA)) 

99 REG_NOT_EXISTS = re.compile(REG_PROP.format(PROP_NOT_EXISTS)) 

100 REG_VOLUME_NAME = re.compile(REG_PROP.format(PROP_VOLUME_NAME)) 

101 REG_UPDATING_UUID_SRC = re.compile(REG_PROP.format(PROP_UPDATING_UUID_SRC)) 

102 

103 # Prefixes of SR/VOLUME in the LINSTOR DB. 

104 # A LINSTOR (resource, group, ...) name cannot start with a number. 

105 # So we add a prefix behind our SR/VOLUME uuids. 

106 PREFIX_SR = 'xcp-sr-' 

107 PREFIX_VOLUME = 'xcp-volume-' 

108 

109 @staticmethod 

110 def default_logger(*args): 

111 print(args) 

112 

113 # -------------------------------------------------------------------------- 

114 # API. 

115 # -------------------------------------------------------------------------- 

116 

117 class VolumeInfo(object): 

118 __slots__ = ( 

119 'name', 

120 'physical_size', # Total physical size used by this volume on 

121 # all disks. 

122 'virtual_size' # Total virtual available size of this volume 

123 # (i.e. the user size at creation). 

124 ) 

125 

126 def __init__(self, name): 

127 self.name = name 

128 self.physical_size = 0 

129 self.virtual_size = 0 

130 

131 def __repr__(self): 

132 return 'VolumeInfo("{}", {}, {})'.format( 

133 self.name, self.physical_size, self.virtual_size 

134 ) 

135 

136 # -------------------------------------------------------------------------- 

137 

138 def __init__( 

139 self, uri, group_name, repair=False, logger=default_logger.__func__ 

140 ): 

141 """ 

142 Create a new LinstorApi object. 

143 :param str uri: URI to communicate with the LINSTOR controller. 

144 :param str group_name: The SR goup name to use. 

145 :param bool repair: If true we try to remove bad volumes due to a crash 

146 or unexpected behavior. 

147 :param function logger: Function to log messages. 

148 """ 

149 

150 self._uri = uri 

151 self._linstor = self._create_linstor_instance(uri) 

152 self._base_group_name = group_name 

153 

154 # Ensure group exists. 

155 group_name = self._build_group_name(group_name) 

156 groups = self._linstor.resource_group_list_raise([group_name]) 

157 groups = groups.resource_groups 

158 if not groups: 

159 raise LinstorVolumeManagerError( 

160 'Unable to find `{}` Linstor SR'.format(group_name) 

161 ) 

162 

163 # Ok. ;) 

164 self._logger = logger 

165 self._redundancy = groups[0].select_filter.place_count 

166 self._group_name = group_name 

167 self._build_volumes(repair=repair) 

168 

169 @property 

170 def group_name(self): 

171 """ 

172 Give the used group name. 

173 :return: The group name. 

174 :rtype: str 

175 """ 

176 return self._base_group_name 

177 

178 @property 

179 def volumes(self): 

180 """ 

181 Give the volumes uuid set. 

182 :return: The volumes uuid set. 

183 :rtype: set(str) 

184 """ 

185 return self._volumes 

186 

187 @property 

188 def volumes_with_name(self): 

189 """ 

190 Give a volume dictionnary that contains names actually owned. 

191 :return: A volume/name dict. 

192 :rtype: dict(str, str) 

193 """ 

194 return self._get_volumes_by_property(self.REG_VOLUME_NAME) 

195 

196 @property 

197 def volumes_with_info(self): 

198 """ 

199 Give a volume dictionnary that contains VolumeInfos. 

200 :return: A volume/VolumeInfo dict. 

201 :rtype: dict(str, VolumeInfo) 

202 """ 

203 

204 volumes = {} 

205 

206 all_volume_info = self._get_volumes_info() 

207 volume_names = self.volumes_with_name 

208 for volume_uuid, volume_name in volume_names.items(): 

209 if volume_name: 

210 volume_info = all_volume_info.get(volume_name) 

211 if volume_info: 

212 volumes[volume_uuid] = volume_info 

213 continue 

214 

215 # Well I suppose if this volume is not available, 

216 # LINSTOR has been used directly without using this API. 

217 volumes[volume_uuid] = self.VolumeInfo('') 

218 

219 return volumes 

220 

221 @property 

222 def volumes_with_metadata(self): 

223 """ 

224 Give a volume dictionnary that contains metadata. 

225 :return: A volume/metadata dict. 

226 :rtype: dict(str, dict) 

227 """ 

228 

229 volumes = {} 

230 

231 metadata = self._get_volumes_by_property(self.REG_METADATA) 

232 for volume_uuid, volume_metadata in metadata.items(): 

233 if volume_metadata: 

234 volume_metadata = json.loads(volume_metadata) 

235 if isinstance(volume_metadata, dict): 

236 volumes[volume_uuid] = volume_metadata 

237 continue 

238 raise LinstorVolumeManagerError( 

239 'Expected dictionary in volume metadata: {}' 

240 .format(volume_uuid) 

241 ) 

242 

243 volumes[volume_uuid] = {} 

244 

245 return volumes 

246 

247 @property 

248 def max_volume_size_allowed(self): 

249 """ 

250 Give the max volume size currently available in B. 

251 :return: The current size. 

252 :rtype: int 

253 """ 

254 

255 candidates = self._find_best_size_candidates() 

256 if not candidates: 

257 raise LinstorVolumeManagerError( 

258 'Failed to get max volume size allowed' 

259 ) 

260 

261 size = candidates[0].max_volume_size 

262 if size < 0: 

263 raise LinstorVolumeManagerError( 

264 'Invalid max volume size allowed given: {}'.format(size) 

265 ) 

266 return self.round_down_volume_size(size * 1024) 

267 

268 @property 

269 def physical_size(self): 

270 """ 

271 Give the total physical size of the SR. 

272 :return: The physical size. 

273 :rtype: int 

274 """ 

275 return self._compute_size('total_capacity') 

276 

277 @property 

278 def physical_free_size(self): 

279 """ 

280 Give the total free physical size of the SR. 

281 :return: The physical free size. 

282 :rtype: int 

283 """ 

284 return self._compute_size('free_capacity') 

285 

286 @property 

287 def total_allocated_volume_size(self): 

288 """ 

289 Give the sum of all created volumes. 

290 :return: The physical required size to use the volumes. 

291 :rtype: int 

292 """ 

293 

294 size = 0 

295 for resource in self._linstor.resource_list_raise().resources: 

296 for volume in resource.volumes: 

297 # We ignore diskless pools of the form "DfltDisklessStorPool". 

298 if volume.storage_pool_name == self._group_name: 

299 current_size = volume.usable_size 

300 if current_size < 0: 

301 raise LinstorVolumeManagerError( 

302 'Failed to get usable size of `{}` on `{}`' 

303 .format(resource.name, volume.storage_pool_name) 

304 ) 

305 size += current_size 

306 return size * 1024 

307 

308 @property 

309 def metadata(self): 

310 """ 

311 Get the metadata of the SR. 

312 :return: Dictionary that contains metadata. 

313 :rtype: dict(str, dict) 

314 """ 

315 

316 sr_properties = self._get_sr_properties() 

317 metadata = sr_properties.get(self.PROP_METADATA) 

318 if metadata is not None: 

319 metadata = json.loads(metadata) 

320 if isinstance(metadata, dict): 

321 return metadata 

322 raise LinstorVolumeManagerError( 

323 'Expected dictionary in SR metadata: {}'.format( 

324 self._group_name 

325 ) 

326 ) 

327 

328 return {} 

329 

330 @metadata.setter 

331 def metadata(self, metadata): 

332 """ 

333 Set the metadata of the SR. 

334 :param dict metadata: Dictionary that contains metadata. 

335 """ 

336 

337 assert isinstance(metadata, dict) 

338 sr_properties = self._get_sr_properties() 

339 sr_properties[self.PROP_METADATA] = json.dumps(metadata) 

340 

341 @property 

342 def disconnected_hosts(self): 

343 """ 

344 Get the list of disconnected hosts. 

345 :return: Set that contains disconnected hosts. 

346 :rtype: set(str) 

347 """ 

348 

349 pools = self._linstor.storage_pool_list_raise( 

350 filter_by_stor_pools=[self._group_name] 

351 ).storage_pools 

352 

353 disconnected_hosts = set() 

354 for pool in pools: 

355 for report in pool.reports: 

356 if report.ret_code & linstor.consts.WARN_NOT_CONNECTED == \ 

357 linstor.consts.WARN_NOT_CONNECTED: 

358 disconnected_hosts.add(pool.node_name) 

359 break 

360 return disconnected_hosts 

361 

362 def check_volume_exists(self, volume_uuid): 

363 """ 

364 Check if a volume exists in the SR. 

365 :return: True if volume exists. 

366 :rtype: bool 

367 """ 

368 return volume_uuid in self._volumes 

369 

370 def create_volume(self, volume_uuid, size, persistent=True): 

371 """ 

372 Create a new volume on the SR. 

373 :param str volume_uuid: The volume uuid to use. 

374 :param int size: volume size in B. 

375 :param bool persistent: If false the volume will be unavailable 

376 on the next constructor call LinstorSR(...). 

377 :return: The current device path of the volume. 

378 :rtype: str 

379 """ 

380 

381 self._logger('Creating LINSTOR volume {}...'.format(volume_uuid)) 

382 volume_name = self.build_volume_name(util.gen_uuid()) 

383 volume_properties = self._create_volume_with_properties( 

384 volume_uuid, volume_name, size, place_resources=True 

385 ) 

386 

387 try: 

388 self._logger( 

389 'Find device path of LINSTOR volume {}...'.format(volume_uuid) 

390 ) 

391 device_path = self._find_device_path(volume_uuid, volume_name) 

392 if persistent: 

393 volume_properties[self.PROP_NOT_EXISTS] = self.STATE_EXISTS 

394 self._volumes.add(volume_uuid) 

395 self._logger( 

396 'LINSTOR volume {} created!'.format(volume_uuid) 

397 ) 

398 return device_path 

399 except Exception: 

400 self._force_destroy_volume(volume_uuid, volume_properties) 

401 raise 

402 

403 def mark_volume_as_persistent(self, volume_uuid): 

404 """ 

405 Mark volume as persistent if created with persistent=False. 

406 :param str volume_uuid: The volume uuid to mark. 

407 """ 

408 

409 self._ensure_volume_exists(volume_uuid) 

410 

411 # Mark volume as persistent. 

412 volume_properties = self._get_volume_properties(volume_uuid) 

413 volume_properties[self.PROP_NOT_EXISTS] = self.STATE_EXISTS 

414 

415 def destroy_volume(self, volume_uuid): 

416 """ 

417 Destroy a volume. 

418 :param str volume_uuid: The volume uuid to destroy. 

419 """ 

420 

421 self._ensure_volume_exists(volume_uuid) 

422 self.ensure_volume_is_not_locked(volume_uuid) 

423 

424 # Mark volume as destroyed. 

425 volume_properties = self._get_volume_properties(volume_uuid) 

426 volume_properties[self.PROP_NOT_EXISTS] = self.STATE_NOT_EXISTS 

427 

428 self._volumes.remove(volume_uuid) 

429 self._destroy_volume(volume_uuid, volume_properties) 

430 

431 def lock_volume(self, volume_uuid, locked=True): 

432 """ 

433 Prevent modifications of the volume properties during 

434 "self.LOCKED_EXPIRATION_DELAY" seconds. The SR must be locked 

435 when used. This method is useful to attach/detach correctly a volume on 

436 a slave. Without it the GC can rename a volume, in this case the old 

437 volume path can be used by a slave... 

438 :param str volume_uuid: The volume uuid to protect/unprotect. 

439 :param bool locked: Lock/unlock the volume. 

440 """ 

441 

442 self._ensure_volume_exists(volume_uuid) 

443 

444 self._logger( 

445 '{} volume {} as locked'.format( 

446 'Mark' if locked else 'Unmark', 

447 volume_uuid 

448 ) 

449 ) 

450 

451 volume_properties = self._get_volume_properties(volume_uuid) 

452 if locked: 

453 volume_properties[ 

454 self.PROP_IS_READONLY_TIMESTAMP 

455 ] = str(time.time()) 

456 elif self.PROP_IS_READONLY_TIMESTAMP in volume_properties: 

457 volume_properties.pop(self.PROP_IS_READONLY_TIMESTAMP) 

458 

459 def ensure_volume_is_not_locked(self, volume_uuid, timeout=None): 

460 """ 

461 Ensure a volume is not locked. Wait if necessary. 

462 :param str volume_uuid: The volume uuid to check. 

463 :param int timeout: If the volume is always locked after the expiration 

464 of the timeout, an exception is thrown. 

465 """ 

466 return self.ensure_volume_list_is_not_locked([volume_uuid], timeout) 

467 

468 def ensure_volume_list_is_not_locked(self, volume_uuids, timeout=None): 

469 checked = set() 

470 for volume_uuid in volume_uuids: 

471 if volume_uuid in self._volumes: 

472 checked.add(volume_uuid) 

473 

474 if not checked: 

475 return 

476 

477 waiting = False 

478 

479 start = time.time() 

480 while True: 

481 # Can't delete in for loop, use a copy of the list. 

482 remaining = checked.copy() 

483 for volume_uuid in checked: 

484 volume_properties = self._get_volume_properties(volume_uuid) 

485 timestamp = volume_properties.get( 

486 self.PROP_IS_READONLY_TIMESTAMP 

487 ) 

488 if timestamp is None: 

489 remaining.remove(volume_uuid) 

490 continue 

491 

492 now = time.time() 

493 if now - float(timestamp) > self.LOCKED_EXPIRATION_DELAY: 

494 self._logger( 

495 'Remove readonly timestamp on {}'.format(volume_uuid) 

496 ) 

497 volume_properties.pop(self.PROP_IS_READONLY_TIMESTAMP) 

498 remaining.remove(volume_uuid) 

499 continue 

500 

501 if not waiting: 

502 self._logger( 

503 'Volume {} is locked, waiting...'.format(volume_uuid) 

504 ) 

505 waiting = True 

506 break 

507 

508 if not remaining: 

509 break 

510 checked = remaining 

511 

512 if timeout is not None and now - start > timeout: 

513 raise LinstorVolumeManagerError( 

514 'volume `{}` is locked and timeout has been reached' 

515 .format(volume_uuid), 

516 LinstorVolumeManagerError.ERR_VOLUME_NOT_EXISTS 

517 ) 

518 

519 # We must wait to use the volume. After that we can modify it 

520 # ONLY if the SR is locked to avoid bad reads on the slaves. 

521 time.sleep(1) 

522 

523 if waiting: 

524 self._logger('No volume locked now!') 

525 

526 def introduce_volume(self, volume_uuid): 

527 pass # TODO: Implement me. 

528 

529 def resize_volume(self, volume_uuid, new_size): 

530 """ 

531 Resize a volume. 

532 :param str volume_uuid: The volume uuid to resize. 

533 :param int new_size: New size in B. 

534 """ 

535 

536 volume_name = self.get_volume_name(volume_uuid) 

537 self.ensure_volume_is_not_locked(volume_uuid) 

538 new_size = self.round_up_volume_size(new_size) 

539 

540 result = self._linstor.volume_dfn_modify( 

541 rsc_name=volume_name, 

542 volume_nr=0, 

543 size=new_size // 1024 

544 ) 

545 error_str = self._get_error_str(result) 

546 if error_str: 

547 raise LinstorVolumeManagerError( 

548 'Could not resize volume `{}` from SR `{}`: {}' 

549 .format(volume_uuid, self._group_name, error_str) 

550 ) 

551 

552 def get_volume_name(self, volume_uuid): 

553 """ 

554 Get the name of a particular volume. 

555 :param str volume_uuid: The volume uuid of the name to get. 

556 :return: The volume name. 

557 :rtype: str 

558 """ 

559 

560 self._ensure_volume_exists(volume_uuid) 

561 volume_properties = self._get_volume_properties(volume_uuid) 

562 volume_name = volume_properties.get(self.PROP_VOLUME_NAME) 

563 if volume_name: 

564 return volume_name 

565 raise LinstorVolumeManagerError( 

566 'Failed to get volume name of {}'.format(volume_uuid) 

567 ) 

568 

569 def get_volume_size(self, volume_uuid): 

570 """ 

571 Get the size of a particular volume. 

572 :param str volume_uuid: The volume uuid of the size to get. 

573 :return: The volume size. 

574 :rtype: int 

575 """ 

576 

577 volume_name = self.get_volume_name(volume_uuid) 

578 dfns = self._linstor.resource_dfn_list_raise( 

579 query_volume_definitions=True, 

580 filter_by_resource_definitions=[volume_name] 

581 ).resource_definitions 

582 

583 size = dfns[0].volume_definitions[0].size 

584 if size < 0: 

585 raise LinstorVolumeManagerError( 

586 'Failed to get volume size of: {}'.format(volume_uuid) 

587 ) 

588 return size * 1024 

589 

590 def get_volume_info(self, volume_uuid): 

591 """ 

592 Get the volume info of a particular volume. 

593 :param str volume_uuid: The volume uuid of the volume info to get. 

594 :return: The volume info. 

595 :rtype: VolumeInfo 

596 """ 

597 

598 volume_name = self.get_volume_name(volume_uuid) 

599 return self._get_volumes_info(filter=[volume_name])[volume_name] 

600 

601 def get_device_path(self, volume_uuid): 

602 """ 

603 Get the dev path of a volume. 

604 :param str volume_uuid: The volume uuid to get the dev path. 

605 :return: The current device path of the volume. 

606 :rtype: str 

607 """ 

608 

609 volume_name = self.get_volume_name(volume_uuid) 

610 return self._find_device_path(volume_uuid, volume_name) 

611 

612 def get_volume_uuid_from_device_path(self, device_path): 

613 """ 

614 Get the volume uuid of a device_path. 

615 :param str device_path: The dev path to find the volume uuid. 

616 :return: The volume uuid of the local device path. 

617 :rtype: str 

618 """ 

619 

620 expected_volume_name = \ 

621 self.get_volume_name_from_device_path(device_path) 

622 

623 volume_names = self.volumes_with_name 

624 for volume_uuid, volume_name in volume_names.items(): 

625 if volume_name == expected_volume_name: 

626 return volume_uuid 

627 

628 raise LinstorVolumeManagerError( 

629 'Unable to find volume uuid from dev path `{}`'.format(device_path) 

630 ) 

631 

632 def get_volume_name_from_device_path(self, device_path): 

633 """ 

634 Get the volume name of a device_path on the current host. 

635 :param str device_path: The dev path to find the volume name. 

636 :return: The volume name of the local device path. 

637 :rtype: str 

638 """ 

639 

640 node_name = socket.gethostname() 

641 resources = self._linstor.resource_list_raise( 

642 filter_by_nodes=[node_name] 

643 ).resources 

644 

645 real_device_path = os.path.realpath(device_path) 

646 for resource in resources: 

647 if resource.volumes[0].device_path == real_device_path: 

648 return resource.name 

649 

650 raise LinstorVolumeManagerError( 

651 'Unable to find volume name from dev path `{}`' 

652 .format(device_path) 

653 ) 

654 

655 def update_volume_uuid(self, volume_uuid, new_volume_uuid, force=False): 

656 """ 

657 Change the uuid of a volume. 

658 :param str volume_uuid: The volume to modify. 

659 :param str new_volume_uuid: The new volume uuid to use. 

660 :param bool force: If true we doesn't check if volume_uuid is in the 

661 volume list. I.e. the volume can be marked as deleted but the volume 

662 can still be in the LINSTOR KV store if the deletion has failed. 

663 In specific cases like "undo" after a failed clone we must rename a bad 

664 deleted VDI. 

665 """ 

666 

667 self._logger( 

668 'Trying to update volume UUID {} to {}...' 

669 .format(volume_uuid, new_volume_uuid) 

670 ) 

671 if not force: 

672 self._ensure_volume_exists(volume_uuid) 

673 self.ensure_volume_is_not_locked(volume_uuid) 

674 

675 if new_volume_uuid in self._volumes: 

676 raise LinstorVolumeManagerError( 

677 'Volume `{}` already exists'.format(new_volume_uuid), 

678 LinstorVolumeManagerError.ERR_VOLUME_EXISTS 

679 ) 

680 

681 volume_properties = self._get_volume_properties(volume_uuid) 

682 if volume_properties.get(self.PROP_UPDATING_UUID_SRC): 

683 raise LinstorVolumeManagerError( 

684 'Cannot update volume uuid {}: invalid state' 

685 .format(volume_uuid) 

686 ) 

687 

688 new_volume_properties = self._get_volume_properties( 

689 new_volume_uuid 

690 ) 

691 if list(new_volume_properties.items()): 

692 raise LinstorVolumeManagerError( 

693 'Cannot update volume uuid {} to {}: ' 

694 .format(volume_uuid, new_volume_uuid) + 

695 'this last one is not empty' 

696 ) 

697 

698 assert volume_properties.namespace != \ 

699 new_volume_properties.namespace 

700 

701 try: 

702 # 1. Mark new volume properties with PROP_UPDATING_UUID_SRC. 

703 # If we crash after that, the new properties can be removed 

704 # properly. 

705 new_volume_properties[self.PROP_NOT_EXISTS] = self.STATE_NOT_EXISTS 

706 new_volume_properties[self.PROP_UPDATING_UUID_SRC] = volume_uuid 

707 

708 # 2. Copy the properties. 

709 for property in [self.PROP_METADATA, self.PROP_VOLUME_NAME]: 

710 new_volume_properties[property] = \ 

711 volume_properties.get(property) 

712 

713 # 3. Ok! 

714 new_volume_properties[self.PROP_NOT_EXISTS] = self.STATE_EXISTS 

715 except Exception as e: 

716 try: 

717 new_volume_properties.clear() 

718 except Exception as e: 

719 self._logger( 

720 'Failed to clear new volume properties: {} (ignoring...)' 

721 .format(e) 

722 ) 

723 raise LinstorVolumeManagerError( 

724 'Failed to copy volume properties: {}'.format(e) 

725 ) 

726 

727 try: 

728 # 4. After this point, it's ok we can remove the 

729 # PROP_UPDATING_UUID_SRC property and clear the src properties 

730 # without problems. 

731 volume_properties.clear() 

732 new_volume_properties.pop(self.PROP_UPDATING_UUID_SRC) 

733 except Exception as e: 

734 raise LinstorVolumeManagerError( 

735 'Failed to clear volume properties ' 

736 'after volume uuid update: {}'.format(e) 

737 ) 

738 

739 self._volumes.remove(volume_uuid) 

740 self._volumes.add(new_volume_uuid) 

741 

742 self._logger( 

743 'UUID update succeeded of {} to {}! (properties={})' 

744 .format( 

745 volume_uuid, new_volume_uuid, 

746 self._get_filtered_properties(new_volume_properties) 

747 ) 

748 ) 

749 

750 def update_volume_name(self, volume_uuid, volume_name): 

751 """ 

752 Change the volume name of a volume. 

753 :param str volume_uuid: The volume to modify. 

754 :param str volume_name: The volume_name to use. 

755 """ 

756 

757 self._ensure_volume_exists(volume_uuid) 

758 self.ensure_volume_is_not_locked(volume_uuid) 

759 if not volume_name.startswith(self.PREFIX_VOLUME): 

760 raise LinstorVolumeManagerError( 

761 'Volume name `{}` must be start with `{}`' 

762 .format(volume_name, self.PREFIX_VOLUME) 

763 ) 

764 

765 if volume_name not in self._fetch_resource_names(): 

766 raise LinstorVolumeManagerError( 

767 'Volume `{}` doesn\'t exist'.format(volume_name) 

768 ) 

769 

770 volume_properties = self._get_volume_properties(volume_uuid) 

771 volume_properties[self.PROP_VOLUME_NAME] = volume_name 

772 

773 def get_usage_states(self, volume_uuid): 

774 """ 

775 Check if a volume is currently used. 

776 :param str volume_uuid: The volume uuid to check. 

777 :return: A dictionnary that contains states. 

778 :rtype: dict(str, bool or None) 

779 """ 

780 

781 states = {} 

782 

783 volume_name = self.get_volume_name(volume_uuid) 

784 for resource_state in self._linstor.resource_list_raise( 

785 filter_by_resources=[volume_name] 

786 ).resource_states: 

787 states[resource_state.node_name] = resource_state.in_use 

788 

789 return states 

790 

791 def get_volume_metadata(self, volume_uuid): 

792 """ 

793 Get the metadata of a volume. 

794 :return: Dictionary that contains metadata. 

795 :rtype: dict 

796 """ 

797 

798 self._ensure_volume_exists(volume_uuid) 

799 volume_properties = self._get_volume_properties(volume_uuid) 

800 metadata = volume_properties.get(self.PROP_METADATA) 

801 if metadata: 

802 metadata = json.loads(metadata) 

803 if isinstance(metadata, dict): 

804 return metadata 

805 raise LinstorVolumeManagerError( 

806 'Expected dictionary in volume metadata: {}' 

807 .format(volume_uuid) 

808 ) 

809 return {} 

810 

811 def set_volume_metadata(self, volume_uuid, metadata): 

812 """ 

813 Set the metadata of a volume. 

814 :param dict metadata: Dictionary that contains metadata. 

815 """ 

816 

817 self._ensure_volume_exists(volume_uuid) 

818 self.ensure_volume_is_not_locked(volume_uuid) 

819 

820 assert isinstance(metadata, dict) 

821 volume_properties = self._get_volume_properties(volume_uuid) 

822 volume_properties[self.PROP_METADATA] = json.dumps(metadata) 

823 

824 def update_volume_metadata(self, volume_uuid, metadata): 

825 """ 

826 Update the metadata of a volume. It modify only the given keys. 

827 It doesn't remove unreferenced key instead of set_volume_metadata. 

828 :param dict metadata: Dictionary that contains metadata. 

829 """ 

830 

831 self._ensure_volume_exists(volume_uuid) 

832 self.ensure_volume_is_not_locked(volume_uuid) 

833 

834 assert isinstance(metadata, dict) 

835 volume_properties = self._get_volume_properties(volume_uuid) 

836 

837 current_metadata = json.loads( 

838 volume_properties.get(self.PROP_METADATA, '{}') 

839 ) 

840 if not isinstance(metadata, dict): 

841 raise LinstorVolumeManagerError( 

842 'Expected dictionary in volume metadata: {}' 

843 .format(volume_uuid) 

844 ) 

845 

846 for key, value in metadata.items(): 

847 current_metadata[key] = value 

848 volume_properties[self.PROP_METADATA] = json.dumps(current_metadata) 

849 

850 def shallow_clone_volume(self, volume_uuid, clone_uuid, persistent=True): 

851 """ 

852 Clone a volume. Do not copy the data, this method creates a new volume 

853 with the same size. It tries to create the volume on the same host 

854 than volume source. 

855 :param str volume_uuid: The volume to clone. 

856 :param str clone_uuid: The cloned volume. 

857 :param bool persistent: If false the volume will be unavailable 

858 on the next constructor call LinstorSR(...). 

859 :return: The current device path of the cloned volume. 

860 :rtype: str 

861 """ 

862 

863 volume_name = self.get_volume_name(volume_uuid) 

864 self.ensure_volume_is_not_locked(volume_uuid) 

865 

866 # 1. Find ideal nodes + size to use. 

867 ideal_node_names, size = self._get_volume_node_names_and_size( 

868 volume_name 

869 ) 

870 if size <= 0: 

871 raise LinstorVolumeManagerError( 

872 'Invalid size of {} for volume `{}`'.format(size, volume_name) 

873 ) 

874 

875 # 2. Find the node(s) with the maximum space. 

876 candidates = self._find_best_size_candidates() 

877 if not candidates: 

878 raise LinstorVolumeManagerError( 

879 'Unable to shallow clone volume `{}`, no free space found.' 

880 ) 

881 

882 # 3. Compute node names and search if we can try to clone 

883 # on the same nodes than volume. 

884 def find_best_nodes(): 

885 for candidate in candidates: 

886 for node_name in candidate.node_names: 

887 if node_name in ideal_node_names: 

888 return candidate.node_names 

889 

890 node_names = find_best_nodes() 

891 if not node_names: 

892 node_names = candidates[0].node_names 

893 

894 if len(node_names) < self._redundancy: 

895 raise LinstorVolumeManagerError( 

896 'Unable to shallow clone volume `{}`, '.format(volume_uuid) + 

897 '{} are required to clone, found: {}'.format( 

898 self._redundancy, len(node_names) 

899 ) 

900 ) 

901 

902 # 4. Compute resources to create. 

903 clone_volume_name = self.build_volume_name(util.gen_uuid()) 

904 diskless_node_names = self._get_node_names() 

905 resources = [] 

906 for node_name in node_names: 

907 diskless_node_names.remove(node_name) 

908 resources.append(linstor.ResourceData( 

909 node_name=node_name, 

910 rsc_name=clone_volume_name, 

911 storage_pool=self._group_name 

912 )) 

913 for node_name in diskless_node_names: 

914 resources.append(linstor.ResourceData( 

915 node_name=node_name, 

916 rsc_name=clone_volume_name, 

917 diskless=True 

918 )) 

919 

920 # 5. Create resources! 

921 def clean(properties): 

922 try: 

923 self._destroy_volume(clone_uuid, properties) 

924 except Exception as e: 

925 self._logger( 

926 'Unable to destroy volume {} after shallow clone fail: {}' 

927 .format(clone_uuid, e) 

928 ) 

929 

930 def create(): 

931 try: 

932 volume_properties = self._create_volume_with_properties( 

933 clone_uuid, clone_volume_name, size, 

934 place_resources=False 

935 ) 

936 

937 result = self._linstor.resource_create(resources) 

938 error_str = self._get_error_str(result) 

939 if error_str: 

940 raise LinstorVolumeManagerError( 

941 'Could not create cloned volume `{}` of `{}` from ' 

942 'SR `{}`: {}'.format( 

943 clone_uuid, volume_uuid, self._group_name, 

944 error_str 

945 ) 

946 ) 

947 return volume_properties 

948 except Exception: 

949 clean(volume_properties) 

950 raise 

951 

952 # Retry because we can get errors like this: 

953 # "Resource disappeared while waiting for it to be ready" or 

954 # "Resource did not became ready on node 'XXX' within reasonable time, check Satellite for errors." 

955 # in the LINSTOR server. 

956 volume_properties = util.retry(create, maxretry=5) 

957 

958 try: 

959 device_path = self._find_device_path(clone_uuid, clone_volume_name) 

960 if persistent: 

961 volume_properties[self.PROP_NOT_EXISTS] = self.STATE_EXISTS 

962 self._volumes.add(clone_uuid) 

963 return device_path 

964 except Exception as e: 

965 clean(volume_properties) 

966 raise 

967 

968 def remove_resourceless_volumes(self): 

969 """ 

970 Remove all volumes without valid or non-empty name 

971 (i.e. without LINSTOR resource). It's different than 

972 LinstorVolumeManager constructor that takes a `repair` param that 

973 removes volumes with `PROP_NOT_EXISTS` to 1. 

974 """ 

975 

976 resource_names = self._fetch_resource_names() 

977 for volume_uuid, volume_name in self.volumes_with_name.items(): 

978 if not volume_name or volume_name not in resource_names: 

979 self.destroy_volume(volume_uuid) 

980 

981 def destroy(self, force=False): 

982 """ 

983 Destroy this SR. Object should not be used after that. 

984 :param bool force: Try to destroy volumes before if true. 

985 """ 

986 

987 if (force): 

988 for volume_uuid in self._volumes: 

989 self.destroy_volume(volume_uuid) 

990 

991 # TODO: Throw exceptions in the helpers below if necessary. 

992 # TODO: What's the required action if it exists remaining volumes? 

993 

994 self._destroy_resource_group(self._linstor, self._group_name) 

995 

996 pools = self._linstor.storage_pool_list_raise( 

997 filter_by_stor_pools=[self._group_name] 

998 ).storage_pools 

999 for pool in pools: 

1000 self._destroy_storage_pool( 

1001 self._linstor, pool.name, pool.node_name 

1002 ) 

1003 

1004 def find_up_to_date_diskfull_nodes(self, volume_uuid): 

1005 """ 

1006 Find all nodes that contain a specific volume using diskfull disks. 

1007 The disk must be up to data to be used. 

1008 :param str volume_uuid: The volume to use. 

1009 :return: The available nodes. 

1010 :rtype: tuple(set(str), bool) 

1011 """ 

1012 

1013 volume_name = self.get_volume_name(volume_uuid) 

1014 

1015 in_use = False 

1016 node_names = set() 

1017 resource_list = self._linstor.resource_list_raise( 

1018 filter_by_resources=[volume_name] 

1019 ) 

1020 for resource_state in resource_list.resource_states: 

1021 volume_state = resource_state.volume_states[0] 

1022 if volume_state.disk_state == 'UpToDate': 

1023 node_names.add(resource_state.node_name) 

1024 if resource_state.in_use: 

1025 in_use = True 

1026 

1027 return (node_names, in_use) 

1028 

1029 @classmethod 

1030 def create_sr( 

1031 cls, uri, group_name, node_names, redundancy, 

1032 thin_provisioning=False, 

1033 logger=default_logger.__func__ 

1034 ): 

1035 """ 

1036 Create a new SR on the given nodes. 

1037 :param str uri: URI to communicate with the LINSTOR controller. 

1038 :param str group_name: The SR group_name to use. 

1039 :param list[str] node_names: String list of nodes. 

1040 :param int redundancy: How many copy of volumes should we store? 

1041 :param function logger: Function to log messages. 

1042 :return: A new LinstorSr instance. 

1043 :rtype: LinstorSr 

1044 """ 

1045 

1046 # 1. Check if SR already exists. 

1047 lin = cls._create_linstor_instance(uri) 

1048 driver_pool_name = group_name 

1049 group_name = cls._build_group_name(group_name) 

1050 pools = lin.storage_pool_list_raise(filter_by_stor_pools=[group_name]) 

1051 

1052 # TODO: Maybe if the SR already exists and if the nodes are the same, 

1053 # we can try to use it directly. 

1054 pools = pools.storage_pools 

1055 if pools: 

1056 existing_node_names = [pool.node_name for pool in pools] 

1057 raise LinstorVolumeManagerError( 

1058 'Unable to create SR `{}`. It already exists on node(s): {}' 

1059 .format(group_name, existing_node_names) 

1060 ) 

1061 

1062 if lin.resource_group_list_raise( 

1063 [group_name] 

1064 ).resource_groups: 

1065 raise LinstorVolumeManagerError( 

1066 'Unable to create SR `{}`: The group name already exists' 

1067 .format(group_name) 

1068 ) 

1069 

1070 if thin_provisioning: 

1071 driver_pool_parts = driver_pool_name.split('/') 

1072 if not len(driver_pool_parts) == 2: 

1073 raise LinstorVolumeManagerError( 

1074 'Invalid group name using thin provisioning. ' 

1075 'Expected format: \'VG/LV`\'' 

1076 ) 

1077 

1078 # 2. Create storage pool on each node + resource group. 

1079 i = 0 

1080 try: 

1081 # 2.a. Create storage pools. 

1082 while i < len(node_names): 

1083 node_name = node_names[i] 

1084 

1085 result = lin.storage_pool_create( 

1086 node_name=node_name, 

1087 storage_pool_name=group_name, 

1088 storage_driver='LVM_THIN' if thin_provisioning else 'LVM', 

1089 driver_pool_name=driver_pool_name 

1090 ) 

1091 

1092 error_str = cls._get_error_str(result) 

1093 if error_str: 

1094 raise LinstorVolumeManagerError( 

1095 'Could not create SP `{}` on node `{}`: {}'.format( 

1096 group_name, 

1097 node_name, 

1098 error_str 

1099 ) 

1100 ) 

1101 i += 1 

1102 

1103 # 2.b. Create resource group. 

1104 result = lin.resource_group_create( 

1105 name=group_name, 

1106 place_count=redundancy, 

1107 storage_pool=group_name, 

1108 diskless_on_remaining=True 

1109 ) 

1110 error_str = cls._get_error_str(result) 

1111 if error_str: 

1112 raise LinstorVolumeManagerError( 

1113 'Could not create RG `{}`: {}'.format( 

1114 group_name, error_str 

1115 ) 

1116 ) 

1117 

1118 # 2.c. Create volume group. 

1119 result = lin.volume_group_create(group_name) 

1120 error_str = cls._get_error_str(result) 

1121 if error_str: 

1122 raise LinstorVolumeManagerError( 

1123 'Could not create VG `{}`: {}'.format( 

1124 group_name, error_str 

1125 ) 

1126 ) 

1127 

1128 # 3. Remove storage pools/resource/volume group in the case of errors. 

1129 except Exception as e: 

1130 try: 

1131 cls._destroy_resource_group(lin, group_name) 

1132 except Exception: 

1133 pass 

1134 j = 0 

1135 i = min(i, len(node_names) - 1) 

1136 while j <= i: 

1137 try: 

1138 cls._destroy_storage_pool(lin, group_name, node_names[j]) 

1139 except Exception: 

1140 pass 

1141 j += 1 

1142 raise e 

1143 

1144 # 4. Return new instance. 

1145 instance = cls.__new__(cls) 

1146 instance._uri = uri 

1147 instance._linstor = lin 

1148 instance._logger = logger 

1149 instance._redundancy = redundancy 

1150 instance._group_name = group_name 

1151 instance._volumes = set() 

1152 return instance 

1153 

1154 @classmethod 

1155 def build_device_path(cls, volume_name): 

1156 """ 

1157 Build a device path given a volume name. 

1158 :param str volume_name: The volume name to use. 

1159 :return: A valid or not device path. 

1160 :rtype: str 

1161 """ 

1162 

1163 return '{}{}/0'.format(cls.DEV_ROOT_PATH, volume_name) 

1164 

1165 @classmethod 

1166 def build_volume_name(cls, base_name): 

1167 """ 

1168 Build a volume name given a base name (i.e. a UUID). 

1169 :param str volume_name: The volume name to use. 

1170 :return: A valid or not device path. 

1171 :rtype: str 

1172 """ 

1173 return '{}{}'.format(cls.PREFIX_VOLUME, base_name) 

1174 

1175 @classmethod 

1176 def round_up_volume_size(cls, volume_size): 

1177 """ 

1178 Align volume size on higher multiple of BLOCK_SIZE. 

1179 :param int volume_size: The volume size to align. 

1180 :return: An aligned volume size. 

1181 :rtype: int 

1182 """ 

1183 return round_up(volume_size, cls.BLOCK_SIZE) 

1184 

1185 @classmethod 

1186 def round_down_volume_size(cls, volume_size): 

1187 """ 

1188 Align volume size on lower multiple of BLOCK_SIZE. 

1189 :param int volume_size: The volume size to align. 

1190 :return: An aligned volume size. 

1191 :rtype: int 

1192 """ 

1193 return round_down(volume_size, cls.BLOCK_SIZE) 

1194 

1195 # -------------------------------------------------------------------------- 

1196 # Private helpers. 

1197 # -------------------------------------------------------------------------- 

1198 

1199 def _ensure_volume_exists(self, volume_uuid): 

1200 if volume_uuid not in self._volumes: 

1201 raise LinstorVolumeManagerError( 

1202 'volume `{}` doesn\'t exist'.format(volume_uuid), 

1203 LinstorVolumeManagerError.ERR_VOLUME_NOT_EXISTS 

1204 ) 

1205 

1206 def _find_best_size_candidates(self): 

1207 result = self._linstor.resource_group_qmvs(self._group_name) 

1208 error_str = self._get_error_str(result) 

1209 if error_str: 

1210 raise LinstorVolumeManagerError( 

1211 'Failed to get max volume size allowed of SR `{}`: {}'.format( 

1212 self._group_name, 

1213 error_str 

1214 ) 

1215 ) 

1216 return result[0].candidates 

1217 

1218 def _fetch_resource_names(self): 

1219 resource_names = set() 

1220 dfns = self._linstor.resource_dfn_list_raise().resource_definitions 

1221 for dfn in dfns: 

1222 if dfn.resource_group_name == self._group_name and \ 

1223 linstor.consts.FLAG_DELETE not in dfn.flags: 

1224 resource_names.add(dfn.name) 

1225 return resource_names 

1226 

1227 def _get_volumes_info(self, filter=None): 

1228 all_volume_info = {} 

1229 resources = self._linstor.resource_list_raise( 

1230 filter_by_resources=filter 

1231 ) 

1232 for resource in resources.resources: 

1233 if resource.name not in all_volume_info: 

1234 current = all_volume_info[resource.name] = self.VolumeInfo( 

1235 resource.name 

1236 ) 

1237 else: 

1238 current = all_volume_info[resource.name] 

1239 

1240 for volume in resource.volumes: 

1241 # We ignore diskless pools of the form "DfltDisklessStorPool". 

1242 if volume.storage_pool_name == self._group_name: 

1243 if volume.allocated_size < 0: 

1244 raise LinstorVolumeManagerError( 

1245 'Failed to get allocated size of `{}` on `{}`' 

1246 .format(resource.name, volume.storage_pool_name) 

1247 ) 

1248 current.physical_size += volume.allocated_size 

1249 

1250 if volume.usable_size < 0: 

1251 raise LinstorVolumeManagerError( 

1252 'Failed to get usable size of `{}` on `{}`' 

1253 .format(resource.name, volume.storage_pool_name) 

1254 ) 

1255 virtual_size = volume.usable_size 

1256 

1257 current.virtual_size = current.virtual_size and \ 

1258 min(current.virtual_size, virtual_size) or virtual_size 

1259 

1260 for current in all_volume_info.values(): 

1261 current.physical_size *= 1024 

1262 current.virtual_size *= 1024 

1263 

1264 return all_volume_info 

1265 

1266 def _get_volume_node_names_and_size(self, volume_name): 

1267 node_names = set() 

1268 size = -1 

1269 for resource in self._linstor.resource_list_raise( 

1270 filter_by_resources=[volume_name] 

1271 ).resources: 

1272 for volume in resource.volumes: 

1273 # We ignore diskless pools of the form "DfltDisklessStorPool". 

1274 if volume.storage_pool_name == self._group_name: 

1275 node_names.add(resource.node_name) 

1276 

1277 current_size = volume.usable_size 

1278 if current_size < 0: 

1279 raise LinstorVolumeManagerError( 

1280 'Failed to get usable size of `{}` on `{}`' 

1281 .format(resource.name, volume.storage_pool_name) 

1282 ) 

1283 

1284 if size < 0: 

1285 size = current_size 

1286 else: 

1287 size = min(size, current_size) 

1288 

1289 return (node_names, size * 1024) 

1290 

1291 def _compute_size(self, attr): 

1292 pools = self._linstor.storage_pool_list_raise( 

1293 filter_by_stor_pools=[self._group_name] 

1294 ).storage_pools 

1295 

1296 capacity = 0 

1297 for pool in pools: 

1298 space = pool.free_space 

1299 if space: 

1300 size = getattr(space, attr) 

1301 if size < 0: 

1302 raise LinstorVolumeManagerError( 

1303 'Failed to get pool {} attr of `{}`' 

1304 .format(attr, pool.node_name) 

1305 ) 

1306 capacity += size 

1307 return capacity * 1024 

1308 

1309 def _get_node_names(self): 

1310 node_names = set() 

1311 pools = self._linstor.storage_pool_list_raise( 

1312 filter_by_stor_pools=[self._group_name] 

1313 ).storage_pools 

1314 for pool in pools: 

1315 node_names.add(pool.node_name) 

1316 return node_names 

1317 

1318 def _check_volume_creation_errors(self, result, volume_uuid): 

1319 errors = self._filter_errors(result) 

1320 if self._check_errors(errors, [ 

1321 linstor.consts.FAIL_EXISTS_RSC, linstor.consts.FAIL_EXISTS_RSC_DFN 

1322 ]): 

1323 raise LinstorVolumeManagerError( 

1324 'Failed to create volume `{}` from SR `{}`, it already exists' 

1325 .format(volume_uuid, self._group_name), 

1326 LinstorVolumeManagerError.ERR_VOLUME_EXISTS 

1327 ) 

1328 

1329 if errors: 

1330 raise LinstorVolumeManagerError( 

1331 'Failed to create volume `{}` from SR `{}`: {}'.format( 

1332 volume_uuid, 

1333 self._group_name, 

1334 self._get_error_str(errors) 

1335 ) 

1336 ) 

1337 

1338 def _create_volume(self, volume_uuid, volume_name, size, place_resources): 

1339 size = self.round_up_volume_size(size) 

1340 

1341 self._check_volume_creation_errors(self._linstor.resource_group_spawn( 

1342 rsc_grp_name=self._group_name, 

1343 rsc_dfn_name=volume_name, 

1344 vlm_sizes=['{}B'.format(size)], 

1345 definitions_only=not place_resources 

1346 ), volume_uuid) 

1347 

1348 def _create_volume_with_properties( 

1349 self, volume_uuid, volume_name, size, place_resources 

1350 ): 

1351 if self.check_volume_exists(volume_uuid): 

1352 raise LinstorVolumeManagerError( 

1353 'Could not create volume `{}` from SR `{}`, it already exists' 

1354 .format(volume_uuid, self._group_name) + ' in properties', 

1355 LinstorVolumeManagerError.ERR_VOLUME_EXISTS 

1356 ) 

1357 

1358 if volume_name in self._fetch_resource_names(): 

1359 raise LinstorVolumeManagerError( 

1360 'Could not create volume `{}` from SR `{}`, '.format( 

1361 volume_uuid, self._group_name 

1362 ) + 'resource of the same name already exists in LINSTOR' 

1363 ) 

1364 

1365 # I am paranoid. 

1366 volume_properties = self._get_volume_properties(volume_uuid) 

1367 if (volume_properties.get(self.PROP_NOT_EXISTS) is not None): 

1368 raise LinstorVolumeManagerError( 

1369 'Could not create volume `{}`, '.format(volume_uuid) + 

1370 'properties already exist' 

1371 ) 

1372 

1373 try: 

1374 volume_properties[self.PROP_NOT_EXISTS] = self.STATE_CREATING 

1375 volume_properties[self.PROP_VOLUME_NAME] = volume_name 

1376 

1377 self._create_volume( 

1378 volume_uuid, volume_name, size, place_resources 

1379 ) 

1380 

1381 return volume_properties 

1382 except LinstorVolumeManagerError as e: 

1383 # Do not destroy existing resource! 

1384 # In theory we can't get this error because we check this event 

1385 # before the `self._create_volume` case. 

1386 # It can only happen if the same volume uuid is used in the same 

1387 # call in another host. 

1388 if e.code == LinstorVolumeManagerError.ERR_VOLUME_EXISTS: 

1389 raise 

1390 self._force_destroy_volume(volume_uuid, volume_properties) 

1391 raise 

1392 except Exception: 

1393 self._force_destroy_volume(volume_uuid, volume_properties) 

1394 raise 

1395 

1396 def _find_device_path(self, volume_uuid, volume_name): 

1397 current_device_path = self._request_device_path( 

1398 volume_uuid, volume_name, activate=True 

1399 ) 

1400 

1401 # We use realpath here to get the /dev/drbd<id> path instead of 

1402 # /dev/drbd/by-res/<resource_name>. 

1403 expected_device_path = self.build_device_path(volume_name) 

1404 util.wait_for_path(expected_device_path, 5) 

1405 

1406 device_realpath = os.path.realpath(expected_device_path) 

1407 if current_device_path != device_realpath: 

1408 raise LinstorVolumeManagerError( 

1409 'Invalid path, current={}, expected={} (realpath={})' 

1410 .format( 

1411 current_device_path, 

1412 expected_device_path, 

1413 device_realpath 

1414 ) 

1415 ) 

1416 return expected_device_path 

1417 

1418 def _request_device_path(self, volume_uuid, volume_name, activate=False): 

1419 node_name = socket.gethostname() 

1420 resources = self._linstor.resource_list( 

1421 filter_by_nodes=[node_name], 

1422 filter_by_resources=[volume_name] 

1423 ) 

1424 

1425 if not resources or not resources[0]: 

1426 raise LinstorVolumeManagerError( 

1427 'No response list for dev path of `{}`'.format(volume_uuid) 

1428 ) 

1429 if isinstance(resources[0], linstor.responses.ResourceResponse): 

1430 if not resources[0].resources: 

1431 if activate: 

1432 self._activate_device_path(node_name, volume_name) 

1433 return self._request_device_path(volume_uuid, volume_name) 

1434 raise LinstorVolumeManagerError( 

1435 'Empty dev path for `{}`, but definition "seems" to exist' 

1436 .format(volume_uuid) 

1437 ) 

1438 # Contains a path of the /dev/drbd<id> form. 

1439 return resources[0].resources[0].volumes[0].device_path 

1440 

1441 raise LinstorVolumeManagerError( 

1442 'Unable to get volume dev path `{}`: {}'.format( 

1443 volume_uuid, str(resources[0]) 

1444 ) 

1445 ) 

1446 

1447 def _activate_device_path(self, node_name, volume_name): 

1448 result = self._linstor.resource_create([ 

1449 linstor.ResourceData(node_name, volume_name, diskless=True) 

1450 ]) 

1451 if linstor.Linstor.all_api_responses_no_error(result): 

1452 return 

1453 errors = linstor.Linstor.filter_api_call_response_errors(result) 

1454 if len(errors) == 1 and errors[0].is_error( 

1455 linstor.consts.FAIL_EXISTS_RSC 

1456 ): 

1457 return 

1458 

1459 raise LinstorVolumeManagerError( 

1460 'Unable to activate device path of `{}` on node `{}`: {}' 

1461 .format(volume_name, node_name, ', '.join( 

1462 [str(x) for x in result])) 

1463 ) 

1464 

1465 def _destroy_resource(self, resource_name): 

1466 result = self._linstor.resource_dfn_delete(resource_name) 

1467 error_str = self._get_error_str(result) 

1468 if error_str: 

1469 raise LinstorVolumeManagerError( 

1470 'Could not destroy resource `{}` from SR `{}`: {}' 

1471 .format(resource_name, self._group_name, error_str) 

1472 ) 

1473 

1474 def _destroy_volume(self, volume_uuid, volume_properties): 

1475 assert volume_properties.namespace == \ 

1476 self._build_volume_namespace(volume_uuid) 

1477 

1478 try: 

1479 volume_name = volume_properties.get(self.PROP_VOLUME_NAME) 

1480 if volume_name in self._fetch_resource_names(): 

1481 self._destroy_resource(volume_name) 

1482 

1483 # Assume this call is atomic. 

1484 volume_properties.clear() 

1485 except Exception as e: 

1486 raise LinstorVolumeManagerError( 

1487 'Cannot destroy volume `{}`: {}'.format(volume_uuid, e) 

1488 ) 

1489 

1490 def _force_destroy_volume(self, volume_uuid, volume_properties): 

1491 try: 

1492 self._destroy_volume(volume_uuid, volume_properties) 

1493 except Exception as e: 

1494 self._logger('Ignore fail: {}'.format(e)) 

1495 

1496 def _build_volumes(self, repair): 

1497 properties = linstor.KV( 

1498 self._get_store_name(), 

1499 uri=self._uri, 

1500 namespace=self._build_volume_namespace() 

1501 ) 

1502 

1503 resource_names = self._fetch_resource_names() 

1504 

1505 self._volumes = set() 

1506 

1507 updating_uuid_volumes = self._get_volumes_by_property( 

1508 self.REG_UPDATING_UUID_SRC, ignore_inexisting_volumes=False 

1509 ) 

1510 if updating_uuid_volumes and not repair: 

1511 raise LinstorVolumeManagerError( 

1512 'Cannot build LINSTOR volume list: ' 

1513 'It exists invalid "updating uuid volumes", repair is required' 

1514 ) 

1515 

1516 existing_volumes = self._get_volumes_by_property( 

1517 self.REG_NOT_EXISTS, ignore_inexisting_volumes=False 

1518 ) 

1519 for volume_uuid, not_exists in existing_volumes.items(): 

1520 properties.namespace = self._build_volume_namespace( 

1521 volume_uuid 

1522 ) 

1523 

1524 src_uuid = properties.get(self.PROP_UPDATING_UUID_SRC) 

1525 if src_uuid: 

1526 self._logger( 

1527 'Ignoring volume during manager initialization with prop ' 

1528 ' PROP_UPDATING_UUID_SRC: {} (properties={})' 

1529 .format( 

1530 volume_uuid, 

1531 self._get_filtered_properties(properties) 

1532 ) 

1533 ) 

1534 continue 

1535 

1536 # Insert volume in list if the volume exists. Or if the volume 

1537 # is being created and a slave wants to use it (repair = False). 

1538 # 

1539 # If we are on the master and if repair is True and state is 

1540 # Creating, it's probably a bug or crash: the creation process has 

1541 # been stopped. 

1542 if not_exists == self.STATE_EXISTS or ( 

1543 not repair and not_exists == self.STATE_CREATING 

1544 ): 

1545 self._volumes.add(volume_uuid) 

1546 continue 

1547 

1548 if not repair: 

1549 self._logger( 

1550 'Ignoring bad volume during manager initialization: {} ' 

1551 '(properties={})'.format( 

1552 volume_uuid, 

1553 self._get_filtered_properties(properties) 

1554 ) 

1555 ) 

1556 continue 

1557 

1558 # Remove bad volume. 

1559 try: 

1560 self._logger( 

1561 'Removing bad volume during manager initialization: {} ' 

1562 '(properties={})'.format( 

1563 volume_uuid, 

1564 self._get_filtered_properties(properties) 

1565 ) 

1566 ) 

1567 volume_name = properties.get(self.PROP_VOLUME_NAME) 

1568 

1569 # Little optimization, don't call `self._destroy_volume`, 

1570 # we already have resource name list. 

1571 if volume_name in resource_names: 

1572 self._destroy_resource(volume_name) 

1573 

1574 # Assume this call is atomic. 

1575 properties.clear() 

1576 except Exception as e: 

1577 # Do not raise, we don't want to block user action. 

1578 self._logger( 

1579 'Cannot clean volume {}: {}'.format(volume_uuid, e) 

1580 ) 

1581 

1582 for dest_uuid, src_uuid in updating_uuid_volumes.items(): 

1583 dest_properties = self._get_volume_properties(dest_uuid) 

1584 if int(dest_properties.get(self.PROP_NOT_EXISTS) or 

1585 self.STATE_EXISTS): 

1586 dest_properties.clear() 

1587 continue 

1588 

1589 src_properties = self._get_volume_properties(src_uuid) 

1590 src_properties.clear() 

1591 

1592 dest_properties.pop(self.PROP_UPDATING_UUID_SRC) 

1593 

1594 if src_uuid in self._volumes: 

1595 self._volumes.remove(src_uuid) 

1596 self._volumes.add(dest_uuid) 

1597 

1598 def _get_sr_properties(self): 

1599 return linstor.KV( 

1600 self._get_store_name(), 

1601 uri=self._uri, 

1602 namespace=self._build_sr_namespace() 

1603 ) 

1604 

1605 def _get_volumes_by_property( 

1606 self, reg_prop, ignore_inexisting_volumes=True 

1607 ): 

1608 base_properties = linstor.KV( 

1609 self._get_store_name(), 

1610 uri=self._uri, 

1611 namespace=self._build_volume_namespace() 

1612 ) 

1613 

1614 volume_properties = {} 

1615 for volume_uuid in self._volumes: 

1616 volume_properties[volume_uuid] = '' 

1617 

1618 for key, value in base_properties.items(): 

1619 res = reg_prop.match(key) 

1620 if res: 

1621 volume_uuid = res.groups()[0] 

1622 if not ignore_inexisting_volumes or \ 

1623 volume_uuid in self._volumes: 

1624 volume_properties[volume_uuid] = value 

1625 

1626 return volume_properties 

1627 

1628 def _get_volume_properties(self, volume_uuid): 

1629 return linstor.KV( 

1630 self._get_store_name(), 

1631 uri=self._uri, 

1632 namespace=self._build_volume_namespace(volume_uuid) 

1633 ) 

1634 

1635 def _get_store_name(self): 

1636 return 'xcp-sr-{}'.format(self._group_name) 

1637 

1638 @classmethod 

1639 def _build_sr_namespace(cls): 

1640 return '/{}/'.format(cls.NAMESPACE_SR) 

1641 

1642 @classmethod 

1643 def _build_volume_namespace(cls, volume_uuid=None): 

1644 # Return a path to all volumes if `volume_uuid` is not given. 

1645 if volume_uuid is None: 

1646 return '/{}/'.format(cls.NAMESPACE_VOLUME) 

1647 return '/{}/{}/'.format(cls.NAMESPACE_VOLUME, volume_uuid) 

1648 

1649 @classmethod 

1650 def _get_error_str(cls, result): 

1651 return ', '.join([ 

1652 err.message for err in cls._filter_errors(result) 

1653 ]) 

1654 

1655 @classmethod 

1656 def _create_linstor_instance(cls, uri): 

1657 def connect(): 

1658 instance = linstor.Linstor(uri, keep_alive=True) 

1659 instance.connect() 

1660 return instance 

1661 

1662 return util.retry( 

1663 connect, 

1664 maxretry=60, 

1665 exceptions=[linstor.errors.LinstorNetworkError] 

1666 ) 

1667 

1668 @classmethod 

1669 def _destroy_storage_pool(cls, lin, group_name, node_name): 

1670 result = lin.storage_pool_delete(node_name, group_name) 

1671 error_str = cls._get_error_str(result) 

1672 if error_str: 

1673 raise LinstorVolumeManagerError( 

1674 'Failed to destroy SP `{}` on node `{}`: {}'.format( 

1675 group_name, 

1676 node_name, 

1677 error_str 

1678 ) 

1679 ) 

1680 

1681 @classmethod 

1682 def _destroy_resource_group(cls, lin, group_name): 

1683 result = lin.resource_group_delete(group_name) 

1684 error_str = cls._get_error_str(result) 

1685 if error_str: 

1686 raise LinstorVolumeManagerError( 

1687 'Failed to destroy RG `{}`: {}'.format(group_name, error_str) 

1688 ) 

1689 

1690 @classmethod 

1691 def _build_group_name(cls, base_name): 

1692 # If thin provisioning is used we have a path like this: 

1693 # `VG/LV`. "/" is not accepted by LINSTOR. 

1694 return '{}{}'.format(cls.PREFIX_SR, base_name.replace('/', '_')) 

1695 

1696 @staticmethod 

1697 def _get_filtered_properties(properties): 

1698 return dict(properties.items()) 

1699 

1700 @staticmethod 

1701 def _filter_errors(result): 

1702 return [ 

1703 err for err in result 

1704 if hasattr(err, 'is_error') and err.is_error() 

1705 ] 

1706 

1707 @staticmethod 

1708 def _check_errors(result, codes): 

1709 for err in result: 

1710 for code in codes: 

1711 if err.is_error(code): 

1712 return True 

1713 return False