Coverage for drivers/LinstorSR.py : 9%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/env python3
2#
3# Copyright (C) 2020 Vates SAS - ronan.abhamon@vates.fr
4#
5# This program is free software: you can redistribute it and/or modify
6# it under the terms of the GNU General Public License as published by
7# the Free Software Foundation, either version 3 of the License, or
8# (at your option) any later version.
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <https://www.gnu.org/licenses/>.
17from sm_typing import Optional, override
19from constants import CBTLOG_TAG
21try:
22 from linstorjournaler import LinstorJournaler
23 from linstorvhdutil import LinstorVhdUtil
24 from linstorvolumemanager import get_controller_uri
25 from linstorvolumemanager import get_controller_node_name
26 from linstorvolumemanager import LinstorVolumeManager
27 from linstorvolumemanager import LinstorVolumeManagerError
28 from linstorvolumemanager import PERSISTENT_PREFIX
30 LINSTOR_AVAILABLE = True
31except ImportError:
32 PERSISTENT_PREFIX = 'unknown'
34 LINSTOR_AVAILABLE = False
36from lock import Lock
37import blktap2
38import cleanup
39import errno
40import functools
41import lvutil
42import os
43import re
44import scsiutil
45import signal
46import socket
47import SR
48import SRCommand
49import subprocess
50import sys
51import time
52import traceback
53import util
54import VDI
55import vhdutil
56import xml.etree.ElementTree as xml_parser
57import xmlrpc.client
58import xs_errors
60from srmetadata import \
61 NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, \
62 TYPE_TAG, VDI_TYPE_TAG, READ_ONLY_TAG, SNAPSHOT_TIME_TAG, \
63 METADATA_OF_POOL_TAG
65HIDDEN_TAG = 'hidden'
67XHA_CONFIG_PATH = '/etc/xensource/xhad.conf'
69FORK_LOG_DAEMON = '/opt/xensource/libexec/fork-log-daemon'
71# This flag can be disabled to debug the DRBD layer.
72# When this config var is False, the HA can only be used under
73# specific conditions:
74# - Only one heartbeat diskless VDI is present in the pool.
75# - The other hearbeat volumes must be diskful and limited to a maximum of 3.
76USE_HTTP_NBD_SERVERS = True
78# Useful flag to trace calls using cProfile.
79TRACE_PERFS = False
81# Enable/Disable VHD key hash support.
82USE_KEY_HASH = False
84# Special volumes.
85HA_VOLUME_NAME = PERSISTENT_PREFIX + 'ha-statefile'
86REDO_LOG_VOLUME_NAME = PERSISTENT_PREFIX + 'redo-log'
88# ==============================================================================
90# TODO: Supports 'VDI_INTRODUCE', 'VDI_RESET_ON_BOOT/2', 'SR_TRIM',
91# 'VDI_CONFIG_CBT', 'SR_PROBE'
93CAPABILITIES = [
94 'ATOMIC_PAUSE',
95 'SR_UPDATE',
96 'VDI_CREATE',
97 'VDI_DELETE',
98 'VDI_UPDATE',
99 'VDI_ATTACH',
100 'VDI_DETACH',
101 'VDI_ACTIVATE',
102 'VDI_DEACTIVATE',
103 'VDI_CLONE',
104 'VDI_MIRROR',
105 'VDI_RESIZE',
106 'VDI_SNAPSHOT',
107 'VDI_GENERATE_CONFIG'
108]
110CONFIGURATION = [
111 ['group-name', 'LVM group name'],
112 ['redundancy', 'replication count'],
113 ['provisioning', '"thin" or "thick" are accepted (optional, defaults to thin)'],
114 ['monitor-db-quorum', 'disable controller when only one host is online (optional, defaults to true)']
115]
117DRIVER_INFO = {
118 'name': 'LINSTOR resources on XCP-ng',
119 'description': 'SR plugin which uses Linstor to manage VDIs',
120 'vendor': 'Vates',
121 'copyright': '(C) 2020 Vates',
122 'driver_version': '1.0',
123 'required_api_version': '1.0',
124 'capabilities': CAPABILITIES,
125 'configuration': CONFIGURATION
126}
128DRIVER_CONFIG = {'ATTACH_FROM_CONFIG_WITH_TAPDISK': False}
130OPS_EXCLUSIVE = [
131 'sr_create', 'sr_delete', 'sr_attach', 'sr_detach', 'sr_scan',
132 'sr_update', 'sr_probe', 'vdi_init', 'vdi_create', 'vdi_delete',
133 'vdi_attach', 'vdi_detach', 'vdi_clone', 'vdi_snapshot',
134]
136# ==============================================================================
137# Misc helpers used by LinstorSR and linstor-thin plugin.
138# ==============================================================================
141def attach_thin(session, journaler, linstor, sr_uuid, vdi_uuid):
142 volume_metadata = linstor.get_volume_metadata(vdi_uuid)
143 image_type = volume_metadata.get(VDI_TYPE_TAG)
144 if image_type == vhdutil.VDI_TYPE_RAW:
145 return
147 device_path = linstor.get_device_path(vdi_uuid)
149 # If the virtual VHD size is lower than the LINSTOR volume size,
150 # there is nothing to do.
151 vhd_size = LinstorVhdUtil.compute_volume_size(
152 # TODO: Replace pylint comment with this feature when possible:
153 # https://github.com/PyCQA/pylint/pull/2926
154 LinstorVhdUtil(session, linstor).get_size_virt(vdi_uuid), # pylint: disable = E1120
155 image_type
156 )
158 volume_info = linstor.get_volume_info(vdi_uuid)
159 volume_size = volume_info.virtual_size
161 if vhd_size > volume_size:
162 LinstorVhdUtil(session, linstor).inflate(
163 journaler, vdi_uuid, device_path, vhd_size, volume_size
164 )
167def detach_thin_impl(session, linstor, sr_uuid, vdi_uuid):
168 volume_metadata = linstor.get_volume_metadata(vdi_uuid)
169 image_type = volume_metadata.get(VDI_TYPE_TAG)
170 if image_type == vhdutil.VDI_TYPE_RAW:
171 return
173 def check_vbd_count():
174 vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
175 vbds = session.xenapi.VBD.get_all_records_where(
176 'field "VDI" = "{}"'.format(vdi_ref)
177 )
179 num_plugged = 0
180 for vbd_rec in vbds.values():
181 if vbd_rec['currently_attached']:
182 num_plugged += 1
183 if num_plugged > 1:
184 raise xs_errors.XenError(
185 'VDIUnavailable',
186 opterr='Cannot deflate VDI {}, already used by '
187 'at least 2 VBDs'.format(vdi_uuid)
188 )
190 # We can have multiple VBDs attached to a VDI during a VM-template clone.
191 # So we use a timeout to ensure that we can detach the volume properly.
192 util.retry(check_vbd_count, maxretry=10, period=1)
194 device_path = linstor.get_device_path(vdi_uuid)
195 vhdutil_inst = LinstorVhdUtil(session, linstor)
196 new_volume_size = LinstorVolumeManager.round_up_volume_size(
197 # TODO: Replace pylint comment with this feature when possible:
198 # https://github.com/PyCQA/pylint/pull/2926
199 vhdutil_inst.get_size_phys(vdi_uuid) # pylint: disable = E1120
200 )
202 volume_info = linstor.get_volume_info(vdi_uuid)
203 old_volume_size = volume_info.virtual_size
204 vhdutil_inst.deflate(device_path, new_volume_size, old_volume_size)
207def detach_thin(session, linstor, sr_uuid, vdi_uuid):
208 # This function must always return without errors.
209 # Otherwise it could cause errors in the XAPI regarding the state of the VDI.
210 # It's why we use this `try` block.
211 try:
212 detach_thin_impl(session, linstor, sr_uuid, vdi_uuid)
213 except Exception as e:
214 util.SMlog('Failed to detach properly VDI {}: {}'.format(vdi_uuid, e))
217def get_ips_from_xha_config_file():
218 ips = dict()
219 host_id = None
220 try:
221 # Ensure there is no dirty read problem.
222 # For example if the HA is reloaded.
223 tree = util.retry(
224 lambda: xml_parser.parse(XHA_CONFIG_PATH),
225 maxretry=10,
226 period=1
227 )
228 except:
229 return (None, ips)
231 def parse_host_nodes(ips, node):
232 current_id = None
233 current_ip = None
235 for sub_node in node:
236 if sub_node.tag == 'IPaddress':
237 current_ip = sub_node.text
238 elif sub_node.tag == 'HostID':
239 current_id = sub_node.text
240 else:
241 continue
243 if current_id and current_ip:
244 ips[current_id] = current_ip
245 return
246 util.SMlog('Ill-formed XHA file, missing IPaddress or/and HostID')
248 def parse_common_config(ips, node):
249 for sub_node in node:
250 if sub_node.tag == 'host':
251 parse_host_nodes(ips, sub_node)
253 def parse_local_config(ips, node):
254 for sub_node in node:
255 if sub_node.tag == 'localhost':
256 for host_node in sub_node:
257 if host_node.tag == 'HostID':
258 return host_node.text
260 for node in tree.getroot():
261 if node.tag == 'common-config':
262 parse_common_config(ips, node)
263 elif node.tag == 'local-config':
264 host_id = parse_local_config(ips, node)
265 else:
266 continue
268 if ips and host_id:
269 break
271 return (host_id and ips.get(host_id), ips)
274def activate_lvm_group(group_name):
275 path = group_name.split('/')
276 assert path and len(path) <= 2
277 try:
278 lvutil.setActiveVG(path[0], True)
279 except Exception as e:
280 util.SMlog('Cannot active VG `{}`: {}'.format(path[0], e))
282# ==============================================================================
284# Usage example:
285# xe sr-create type=linstor name-label=linstor-sr
286# host-uuid=d2deba7a-c5ad-4de1-9a20-5c8df3343e93
287# device-config:group-name=vg_loop device-config:redundancy=2
290class LinstorSR(SR.SR):
291 DRIVER_TYPE = 'linstor'
293 PROVISIONING_TYPES = ['thin', 'thick']
294 PROVISIONING_DEFAULT = 'thin'
296 MANAGER_PLUGIN = 'linstor-manager'
298 INIT_STATUS_NOT_SET = 0
299 INIT_STATUS_IN_PROGRESS = 1
300 INIT_STATUS_OK = 2
301 INIT_STATUS_FAIL = 3
303 # --------------------------------------------------------------------------
304 # SR methods.
305 # --------------------------------------------------------------------------
307 @override
308 @staticmethod
309 def handles(type) -> bool:
310 return type == LinstorSR.DRIVER_TYPE
312 @override
313 def load(self, sr_uuid) -> None:
314 if not LINSTOR_AVAILABLE:
315 raise util.SMException(
316 'Can\'t load LinstorSR: LINSTOR libraries are missing'
317 )
319 # Check parameters.
320 if 'group-name' not in self.dconf or not self.dconf['group-name']:
321 raise xs_errors.XenError('LinstorConfigGroupNameMissing')
322 if 'redundancy' not in self.dconf or not self.dconf['redundancy']:
323 raise xs_errors.XenError('LinstorConfigRedundancyMissing')
325 self.driver_config = DRIVER_CONFIG
327 # Check provisioning config.
328 provisioning = self.dconf.get('provisioning')
329 if provisioning:
330 if provisioning in self.PROVISIONING_TYPES:
331 self._provisioning = provisioning
332 else:
333 raise xs_errors.XenError(
334 'InvalidArg',
335 opterr='Provisioning parameter must be one of {}'.format(
336 self.PROVISIONING_TYPES
337 )
338 )
339 else:
340 self._provisioning = self.PROVISIONING_DEFAULT
342 monitor_db_quorum = self.dconf.get('monitor-db-quorum')
343 self._monitor_db_quorum = (monitor_db_quorum is None) or \
344 util.strtobool(monitor_db_quorum)
346 # Note: We don't have access to the session field if the
347 # 'vdi_attach_from_config' command is executed.
348 self._has_session = self.sr_ref and self.session is not None
349 if self._has_session:
350 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
351 else:
352 self.sm_config = self.srcmd.params.get('sr_sm_config') or {}
354 provisioning = self.sm_config.get('provisioning')
355 if provisioning in self.PROVISIONING_TYPES:
356 self._provisioning = provisioning
358 # Define properties for SR parent class.
359 self.ops_exclusive = OPS_EXCLUSIVE
360 self.path = LinstorVolumeManager.DEV_ROOT_PATH
361 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
362 self.sr_vditype = SR.DEFAULT_TAP
364 if self.cmd == 'sr_create':
365 self._redundancy = int(self.dconf['redundancy']) or 1
366 self._linstor = None # Ensure that LINSTOR attribute exists.
367 self._journaler = None
369 self._group_name = self.dconf['group-name']
371 self._vdi_shared_time = 0
373 self._init_status = self.INIT_STATUS_NOT_SET
375 self._vdis_loaded = False
376 self._all_volume_info_cache = None
377 self._all_volume_metadata_cache = None
379 # To remove in python 3.10.
380 # Use directly @staticmethod instead.
381 @util.conditional_decorator(staticmethod, sys.version_info >= (3, 10, 0))
382 def _locked_load(method):
383 def wrapped_method(self, *args, **kwargs):
384 self._init_status = self.INIT_STATUS_OK
385 return method(self, *args, **kwargs)
387 def load(self, *args, **kwargs):
388 # Activate all LVMs to make drbd-reactor happy.
389 if self.srcmd.cmd in ('sr_attach', 'vdi_attach_from_config'):
390 activate_lvm_group(self._group_name)
392 if not self._has_session:
393 if self.srcmd.cmd in (
394 'vdi_attach_from_config',
395 'vdi_detach_from_config',
396 # When on-slave (is_open) is executed we have an
397 # empty command.
398 None
399 ):
400 def create_linstor(uri, attempt_count=30):
401 self._linstor = LinstorVolumeManager(
402 uri,
403 self._group_name,
404 logger=util.SMlog,
405 attempt_count=attempt_count
406 )
407 # Only required if we are attaching from config using a non-special VDI.
408 # I.e. not an HA volume.
409 self._vhdutil = LinstorVhdUtil(self.session, self._linstor)
411 controller_uri = get_controller_uri()
412 if controller_uri:
413 create_linstor(controller_uri)
414 else:
415 def connect():
416 # We must have a valid LINSTOR instance here without using
417 # the XAPI. Fallback with the HA config file.
418 for ip in get_ips_from_xha_config_file()[1].values():
419 controller_uri = 'linstor://' + ip
420 try:
421 util.SMlog('Connecting from config to LINSTOR controller using: {}'.format(ip))
422 create_linstor(controller_uri, attempt_count=0)
423 return controller_uri
424 except:
425 pass
427 controller_uri = util.retry(connect, maxretry=30, period=1)
428 if not controller_uri:
429 raise xs_errors.XenError(
430 'SRUnavailable',
431 opterr='No valid controller URI to attach/detach from config'
432 )
434 self._journaler = LinstorJournaler(
435 controller_uri, self._group_name, logger=util.SMlog
436 )
438 if self.srcmd.cmd is None:
439 # Only useful on on-slave plugin (is_open).
440 self._vhdutil = LinstorVhdUtil(self.session, self._linstor)
442 return wrapped_method(self, *args, **kwargs)
444 if not self.is_master():
445 if self.cmd in [
446 'sr_create', 'sr_delete', 'sr_update', 'sr_probe',
447 'sr_scan', 'vdi_create', 'vdi_delete', 'vdi_resize',
448 'vdi_snapshot', 'vdi_clone'
449 ]:
450 util.SMlog('{} blocked for non-master'.format(self.cmd))
451 raise xs_errors.XenError('LinstorMaster')
453 # Because the LINSTOR KV objects cache all values, we must lock
454 # the VDI before the LinstorJournaler/LinstorVolumeManager
455 # instantiation and before any action on the master to avoid a
456 # bad read. The lock is also necessary to avoid strange
457 # behaviors if the GC is executed during an action on a slave.
458 if self.cmd.startswith('vdi_'):
459 self._shared_lock_vdi(self.srcmd.params['vdi_uuid'])
460 self._vdi_shared_time = time.time()
462 if self.srcmd.cmd != 'sr_create' and self.srcmd.cmd != 'sr_detach':
463 try:
464 self._reconnect()
465 except Exception as e:
466 raise xs_errors.XenError('SRUnavailable', opterr=str(e))
468 if self._linstor:
469 try:
470 hosts = self._linstor.disconnected_hosts
471 except Exception as e:
472 raise xs_errors.XenError('SRUnavailable', opterr=str(e))
474 if hosts:
475 util.SMlog('Failed to join node(s): {}'.format(hosts))
477 # Ensure we use a non-locked volume when vhdutil is called.
478 if (
479 self.is_master() and self.cmd.startswith('vdi_') and
480 self.cmd != 'vdi_create'
481 ):
482 self._linstor.ensure_volume_is_not_locked(
483 self.srcmd.params['vdi_uuid']
484 )
486 try:
487 # If the command is a SR scan command on the master,
488 # we must load all VDIs and clean journal transactions.
489 # We must load the VDIs in the snapshot case too only if
490 # there is at least one entry in the journal.
491 #
492 # If the command is a SR command we want at least to remove
493 # resourceless volumes.
494 if self.is_master() and self.cmd not in [
495 'vdi_attach', 'vdi_detach',
496 'vdi_activate', 'vdi_deactivate',
497 'vdi_epoch_begin', 'vdi_epoch_end',
498 'vdi_update', 'vdi_destroy'
499 ]:
500 load_vdis = (
501 self.cmd == 'sr_scan' or
502 self.cmd == 'sr_attach'
503 ) or len(
504 self._journaler.get_all(LinstorJournaler.INFLATE)
505 ) or len(
506 self._journaler.get_all(LinstorJournaler.CLONE)
507 )
509 if load_vdis:
510 self._load_vdis()
512 self._linstor.remove_resourceless_volumes()
514 self._synchronize_metadata()
515 except Exception as e:
516 if self.cmd == 'sr_scan' or self.cmd == 'sr_attach':
517 # Always raise, we don't want to remove VDIs
518 # from the XAPI database otherwise.
519 raise e
520 util.SMlog(
521 'Ignoring exception in LinstorSR.load: {}'.format(e)
522 )
523 util.SMlog(traceback.format_exc())
525 return wrapped_method(self, *args, **kwargs)
527 @functools.wraps(wrapped_method)
528 def wrap(self, *args, **kwargs):
529 if self._init_status in \
530 (self.INIT_STATUS_OK, self.INIT_STATUS_IN_PROGRESS):
531 return wrapped_method(self, *args, **kwargs)
532 if self._init_status == self.INIT_STATUS_FAIL:
533 util.SMlog(
534 'Can\'t call method {} because initialization failed'
535 .format(method)
536 )
537 else:
538 try:
539 self._init_status = self.INIT_STATUS_IN_PROGRESS
540 return load(self, *args, **kwargs)
541 except Exception:
542 if self._init_status != self.INIT_STATUS_OK:
543 self._init_status = self.INIT_STATUS_FAIL
544 raise
546 return wrap
548 @override
549 def cleanup(self) -> None:
550 if self._vdi_shared_time:
551 self._shared_lock_vdi(self.srcmd.params['vdi_uuid'], locked=False)
553 @override
554 @_locked_load
555 def create(self, uuid, size) -> None:
556 util.SMlog('LinstorSR.create for {}'.format(self.uuid))
558 host_adresses = util.get_host_addresses(self.session)
559 if self._redundancy > len(host_adresses):
560 raise xs_errors.XenError(
561 'LinstorSRCreate',
562 opterr='Redundancy greater than host count'
563 )
565 xenapi = self.session.xenapi
566 srs = xenapi.SR.get_all_records_where(
567 'field "type" = "{}"'.format(self.DRIVER_TYPE)
568 )
569 srs = dict([e for e in srs.items() if e[1]['uuid'] != self.uuid])
571 for sr in srs.values():
572 for pbd in sr['PBDs']:
573 device_config = xenapi.PBD.get_device_config(pbd)
574 group_name = device_config.get('group-name')
575 if group_name and group_name == self._group_name:
576 raise xs_errors.XenError(
577 'LinstorSRCreate',
578 opterr='group name must be unique, already used by PBD {}'.format(
579 xenapi.PBD.get_uuid(pbd)
580 )
581 )
583 if srs:
584 raise xs_errors.XenError(
585 'LinstorSRCreate',
586 opterr='LINSTOR SR must be unique in a pool'
587 )
589 online_hosts = util.get_online_hosts(self.session)
590 if len(online_hosts) < len(host_adresses):
591 raise xs_errors.XenError(
592 'LinstorSRCreate',
593 opterr='Not enough online hosts'
594 )
596 ips = {}
597 for host_ref in online_hosts:
598 record = self.session.xenapi.host.get_record(host_ref)
599 hostname = record['hostname']
600 ips[hostname] = record['address']
602 if len(ips) != len(online_hosts):
603 raise xs_errors.XenError(
604 'LinstorSRCreate',
605 opterr='Multiple hosts with same hostname'
606 )
608 # Ensure ports are opened and LINSTOR satellites
609 # are activated. In the same time the drbd-reactor instances
610 # must be stopped.
611 self._prepare_sr_on_all_hosts(self._group_name, enabled=True)
613 # Create SR.
614 # Throw if the SR already exists.
615 try:
616 self._linstor = LinstorVolumeManager.create_sr(
617 self._group_name,
618 ips,
619 self._redundancy,
620 thin_provisioning=self._provisioning == 'thin',
621 auto_quorum=self._monitor_db_quorum,
622 logger=util.SMlog
623 )
624 self._vhdutil = LinstorVhdUtil(self.session, self._linstor)
626 util.SMlog(
627 "Finishing SR creation, enable drbd-reactor on all hosts..."
628 )
629 self._update_drbd_reactor_on_all_hosts(enabled=True)
630 except Exception as e:
631 if not self._linstor:
632 util.SMlog('Failed to create LINSTOR SR: {}'.format(e))
633 raise xs_errors.XenError('LinstorSRCreate', opterr=str(e))
635 try:
636 self._linstor.destroy()
637 except Exception as e2:
638 util.SMlog(
639 'Failed to destroy LINSTOR SR after creation fail: {}'
640 .format(e2)
641 )
642 raise e
644 @override
645 @_locked_load
646 def delete(self, uuid) -> None:
647 util.SMlog('LinstorSR.delete for {}'.format(self.uuid))
648 cleanup.gc_force(self.session, self.uuid)
650 assert self._linstor
651 if self.vdis or self._linstor._volumes:
652 raise xs_errors.XenError('SRNotEmpty')
654 node_name = get_controller_node_name()
655 if not node_name:
656 raise xs_errors.XenError(
657 'LinstorSRDelete',
658 opterr='Cannot get controller node name'
659 )
661 host_ref = None
662 if node_name == 'localhost':
663 host_ref = util.get_this_host_ref(self.session)
664 else:
665 for slave in util.get_all_slaves(self.session):
666 r_name = self.session.xenapi.host.get_record(slave)['hostname']
667 if r_name == node_name:
668 host_ref = slave
669 break
671 if not host_ref:
672 raise xs_errors.XenError(
673 'LinstorSRDelete',
674 opterr='Failed to find host with hostname: {}'.format(
675 node_name
676 )
677 )
679 try:
680 self._update_drbd_reactor_on_all_hosts(
681 controller_node_name=node_name, enabled=False
682 )
684 args = {
685 'groupName': self._group_name,
686 }
687 self._exec_manager_command(
688 host_ref, 'destroy', args, 'LinstorSRDelete'
689 )
690 except Exception as e:
691 try:
692 self._update_drbd_reactor_on_all_hosts(
693 controller_node_name=node_name, enabled=True
694 )
695 except Exception as e2:
696 util.SMlog(
697 'Failed to restart drbd-reactor after destroy fail: {}'
698 .format(e2)
699 )
700 util.SMlog('Failed to delete LINSTOR SR: {}'.format(e))
701 raise xs_errors.XenError(
702 'LinstorSRDelete',
703 opterr=str(e)
704 )
706 Lock.cleanupAll(self.uuid)
708 @override
709 @_locked_load
710 def update(self, uuid) -> None:
711 util.SMlog('LinstorSR.update for {}'.format(self.uuid))
713 # Well, how can we update a SR if it doesn't exist? :thinking:
714 if not self._linstor:
715 raise xs_errors.XenError(
716 'SRUnavailable',
717 opterr='no such volume group: {}'.format(self._group_name)
718 )
720 self._update_stats(0)
722 # Update the SR name and description only in LINSTOR metadata.
723 xenapi = self.session.xenapi
724 self._linstor.metadata = {
725 NAME_LABEL_TAG: util.to_plain_string(
726 xenapi.SR.get_name_label(self.sr_ref)
727 ),
728 NAME_DESCRIPTION_TAG: util.to_plain_string(
729 xenapi.SR.get_name_description(self.sr_ref)
730 )
731 }
733 @override
734 @_locked_load
735 def attach(self, uuid) -> None:
736 util.SMlog('LinstorSR.attach for {}'.format(self.uuid))
738 if not self._linstor:
739 raise xs_errors.XenError(
740 'SRUnavailable',
741 opterr='no such group: {}'.format(self._group_name)
742 )
744 @override
745 @_locked_load
746 def detach(self, uuid) -> None:
747 util.SMlog('LinstorSR.detach for {}'.format(self.uuid))
748 cleanup.abort(self.uuid)
750 @override
751 @_locked_load
752 def probe(self) -> str:
753 util.SMlog('LinstorSR.probe for {}'.format(self.uuid))
754 # TODO
755 return ''
757 @override
758 @_locked_load
759 def scan(self, uuid) -> None:
760 if self._init_status == self.INIT_STATUS_FAIL:
761 return
763 util.SMlog('LinstorSR.scan for {}'.format(self.uuid))
764 if not self._linstor:
765 raise xs_errors.XenError(
766 'SRUnavailable',
767 opterr='no such volume group: {}'.format(self._group_name)
768 )
770 # Note: `scan` can be called outside this module, so ensure the VDIs
771 # are loaded.
772 self._load_vdis()
773 self._update_physical_size()
775 for vdi_uuid in list(self.vdis.keys()):
776 if self.vdis[vdi_uuid].deleted:
777 del self.vdis[vdi_uuid]
779 # Security to prevent VDIs from being forgotten if the controller
780 # is started without a shared and mounted /var/lib/linstor path.
781 try:
782 self._linstor.get_database_path()
783 except Exception as e:
784 # Failed to get database path, ensure we don't have
785 # VDIs in the XAPI database...
786 if self.session.xenapi.SR.get_VDIs(
787 self.session.xenapi.SR.get_by_uuid(self.uuid)
788 ):
789 raise xs_errors.XenError(
790 'SRUnavailable',
791 opterr='Database is not mounted or node name is invalid ({})'.format(e)
792 )
794 # Update the database before the restart of the GC to avoid
795 # bad sync in the process if new VDIs have been introduced.
796 super(LinstorSR, self).scan(self.uuid)
797 self._kick_gc()
799 def is_master(self):
800 if not hasattr(self, '_is_master'):
801 if 'SRmaster' not in self.dconf:
802 self._is_master = self.session is not None and util.is_master(self.session)
803 else:
804 self._is_master = self.dconf['SRmaster'] == 'true'
806 return self._is_master
808 @override
809 @_locked_load
810 def vdi(self, uuid) -> VDI.VDI:
811 return LinstorVDI(self, uuid)
813 # To remove in python 3.10
814 # See: https://stackoverflow.com/questions/12718187/python-version-3-9-calling-class-staticmethod-within-the-class-body
815 _locked_load = staticmethod(_locked_load)
817 # --------------------------------------------------------------------------
818 # Lock.
819 # --------------------------------------------------------------------------
821 def _shared_lock_vdi(self, vdi_uuid, locked=True):
822 master = util.get_master_ref(self.session)
824 command = 'lockVdi'
825 args = {
826 'groupName': self._group_name,
827 'srUuid': self.uuid,
828 'vdiUuid': vdi_uuid,
829 'locked': str(locked)
830 }
832 # Note: We must avoid to unlock the volume if the timeout is reached
833 # because during volume unlock, the SR lock is not used. Otherwise
834 # we could destroy a valid lock acquired from another host...
835 #
836 # This code is not very clean, the ideal solution would be to acquire
837 # the SR lock during volume unlock (like lock) but it's not easy
838 # to implement without impacting performance.
839 if not locked:
840 elapsed_time = time.time() - self._vdi_shared_time
841 timeout = LinstorVolumeManager.LOCKED_EXPIRATION_DELAY * 0.7
842 if elapsed_time >= timeout:
843 util.SMlog(
844 'Avoid unlock call of {} because timeout has been reached'
845 .format(vdi_uuid)
846 )
847 return
849 self._exec_manager_command(master, command, args, 'VDIUnavailable')
851 # --------------------------------------------------------------------------
852 # Network.
853 # --------------------------------------------------------------------------
855 def _exec_manager_command(self, host_ref, command, args, error):
856 host_rec = self.session.xenapi.host.get_record(host_ref)
857 host_uuid = host_rec['uuid']
859 try:
860 ret = self.session.xenapi.host.call_plugin(
861 host_ref, self.MANAGER_PLUGIN, command, args
862 )
863 except Exception as e:
864 util.SMlog(
865 'call-plugin on {} ({}:{} with {}) raised'.format(
866 host_uuid, self.MANAGER_PLUGIN, command, args
867 )
868 )
869 raise e
871 util.SMlog(
872 'call-plugin on {} ({}:{} with {}) returned: {}'.format(
873 host_uuid, self.MANAGER_PLUGIN, command, args, ret
874 )
875 )
876 if ret == 'False':
877 raise xs_errors.XenError(
878 error,
879 opterr='Plugin {} failed'.format(self.MANAGER_PLUGIN)
880 )
882 def _prepare_sr(self, host, group_name, enabled):
883 self._exec_manager_command(
884 host,
885 'prepareSr' if enabled else 'releaseSr',
886 {'groupName': group_name},
887 'SRUnavailable'
888 )
890 def _prepare_sr_on_all_hosts(self, group_name, enabled):
891 master = util.get_master_ref(self.session)
892 self._prepare_sr(master, group_name, enabled)
894 for slave in util.get_all_slaves(self.session):
895 self._prepare_sr(slave, group_name, enabled)
897 def _update_drbd_reactor(self, host, enabled):
898 self._exec_manager_command(
899 host,
900 'updateDrbdReactor',
901 {'enabled': str(enabled)},
902 'SRUnavailable'
903 )
905 def _update_drbd_reactor_on_all_hosts(
906 self, enabled, controller_node_name=None
907 ):
908 if controller_node_name == 'localhost':
909 controller_node_name = self.session.xenapi.host.get_record(
910 util.get_this_host_ref(self.session)
911 )['hostname']
912 assert controller_node_name
913 assert controller_node_name != 'localhost'
915 controller_host = None
916 secondary_hosts = []
918 hosts = self.session.xenapi.host.get_all_records()
919 for host_ref, host_rec in hosts.items():
920 hostname = host_rec['hostname']
921 if controller_node_name == hostname:
922 controller_host = host_ref
923 else:
924 secondary_hosts.append((host_ref, hostname))
926 action_name = 'Starting' if enabled else 'Stopping'
927 if controller_node_name and not controller_host:
928 util.SMlog('Failed to find controller host: `{}`'.format(
929 controller_node_name
930 ))
932 if enabled and controller_host:
933 util.SMlog('{} drbd-reactor on controller host `{}`...'.format(
934 action_name, controller_node_name
935 ))
936 # If enabled is true, we try to start the controller on the desired
937 # node name first.
938 self._update_drbd_reactor(controller_host, enabled)
940 for host_ref, hostname in secondary_hosts:
941 util.SMlog('{} drbd-reactor on host {}...'.format(
942 action_name, hostname
943 ))
944 self._update_drbd_reactor(host_ref, enabled)
946 if not enabled and controller_host:
947 util.SMlog('{} drbd-reactor on controller host `{}`...'.format(
948 action_name, controller_node_name
949 ))
950 # If enabled is false, we disable the drbd-reactor service of
951 # the controller host last. Why? Otherwise the linstor-controller
952 # of other nodes can be started, and we don't want that.
953 self._update_drbd_reactor(controller_host, enabled)
955 # --------------------------------------------------------------------------
956 # Metadata.
957 # --------------------------------------------------------------------------
959 def _synchronize_metadata_and_xapi(self):
960 try:
961 # First synch SR parameters.
962 self.update(self.uuid)
964 # Now update the VDI information in the metadata if required.
965 xenapi = self.session.xenapi
966 volumes_metadata = self._linstor.get_volumes_with_metadata()
967 for vdi_uuid, volume_metadata in volumes_metadata.items():
968 try:
969 vdi_ref = xenapi.VDI.get_by_uuid(vdi_uuid)
970 except Exception:
971 # May be the VDI is not in XAPI yet dont bother.
972 continue
974 label = util.to_plain_string(
975 xenapi.VDI.get_name_label(vdi_ref)
976 )
977 description = util.to_plain_string(
978 xenapi.VDI.get_name_description(vdi_ref)
979 )
981 if (
982 volume_metadata.get(NAME_LABEL_TAG) != label or
983 volume_metadata.get(NAME_DESCRIPTION_TAG) != description
984 ):
985 self._linstor.update_volume_metadata(vdi_uuid, {
986 NAME_LABEL_TAG: label,
987 NAME_DESCRIPTION_TAG: description
988 })
989 except Exception as e:
990 raise xs_errors.XenError(
991 'MetadataError',
992 opterr='Error synching SR Metadata and XAPI: {}'.format(e)
993 )
995 def _synchronize_metadata(self):
996 if not self.is_master():
997 return
999 util.SMlog('Synchronize metadata...')
1000 if self.cmd == 'sr_attach':
1001 try:
1002 util.SMlog(
1003 'Synchronize SR metadata and the state on the storage.'
1004 )
1005 self._synchronize_metadata_and_xapi()
1006 except Exception as e:
1007 util.SMlog('Failed to synchronize metadata: {}'.format(e))
1009 # --------------------------------------------------------------------------
1010 # Stats.
1011 # --------------------------------------------------------------------------
1013 def _update_stats(self, virt_alloc_delta):
1014 valloc = int(self.session.xenapi.SR.get_virtual_allocation(
1015 self.sr_ref
1016 ))
1018 # Update size attributes of the SR parent class.
1019 self.virtual_allocation = valloc + virt_alloc_delta
1021 self._update_physical_size()
1023 # Notify SR parent class.
1024 self._db_update()
1026 def _update_physical_size(self):
1027 # We use the size of the smallest disk, this is an approximation that
1028 # ensures the displayed physical size is reachable by the user.
1029 (min_physical_size, pool_count) = self._linstor.get_min_physical_size()
1030 self.physical_size = min_physical_size * pool_count // \
1031 self._linstor.redundancy
1033 self.physical_utilisation = self._linstor.allocated_volume_size
1035 # --------------------------------------------------------------------------
1036 # VDIs.
1037 # --------------------------------------------------------------------------
1039 def _load_vdis(self):
1040 if self._vdis_loaded:
1041 return
1043 assert self.is_master()
1045 # We use a cache to avoid repeated JSON parsing.
1046 # The performance gain is not big but we can still
1047 # enjoy it with a few lines.
1048 self._create_linstor_cache()
1049 self._load_vdis_ex()
1050 self._destroy_linstor_cache()
1052 # We must mark VDIs as loaded only if the load is a success.
1053 self._vdis_loaded = True
1055 self._undo_all_journal_transactions()
1057 def _load_vdis_ex(self):
1058 # 1. Get existing VDIs in XAPI.
1059 xenapi = self.session.xenapi
1060 xapi_vdi_uuids = set()
1061 for vdi in xenapi.SR.get_VDIs(self.sr_ref):
1062 xapi_vdi_uuids.add(xenapi.VDI.get_uuid(vdi))
1064 # 2. Get volumes info.
1065 all_volume_info = self._all_volume_info_cache
1066 volumes_metadata = self._all_volume_metadata_cache
1068 # 3. Get CBT vdis.
1069 # See: https://support.citrix.com/article/CTX230619
1070 cbt_vdis = set()
1071 for volume_metadata in volumes_metadata.values():
1072 cbt_uuid = volume_metadata.get(CBTLOG_TAG)
1073 if cbt_uuid:
1074 cbt_vdis.add(cbt_uuid)
1076 introduce = False
1078 # Try to introduce VDIs only during scan/attach.
1079 if self.cmd == 'sr_scan' or self.cmd == 'sr_attach':
1080 has_clone_entries = list(self._journaler.get_all(
1081 LinstorJournaler.CLONE
1082 ).items())
1084 if has_clone_entries:
1085 util.SMlog(
1086 'Cannot introduce VDIs during scan because it exists '
1087 'CLONE entries in journaler on SR {}'.format(self.uuid)
1088 )
1089 else:
1090 introduce = True
1092 # 4. Now check all volume info.
1093 vdi_to_snaps = {}
1094 for vdi_uuid, volume_info in all_volume_info.items():
1095 if vdi_uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX):
1096 continue
1098 # 4.a. Check if the VDI in LINSTOR is in XAPI VDIs.
1099 if vdi_uuid not in xapi_vdi_uuids:
1100 if not introduce:
1101 continue
1103 if vdi_uuid.startswith('DELETED_'):
1104 continue
1106 volume_metadata = volumes_metadata.get(vdi_uuid)
1107 if not volume_metadata:
1108 util.SMlog(
1109 'Skipping volume {} because no metadata could be found'
1110 .format(vdi_uuid)
1111 )
1112 continue
1114 util.SMlog(
1115 'Trying to introduce VDI {} as it is present in '
1116 'LINSTOR and not in XAPI...'
1117 .format(vdi_uuid)
1118 )
1120 try:
1121 self._linstor.get_device_path(vdi_uuid)
1122 except Exception as e:
1123 util.SMlog(
1124 'Cannot introduce {}, unable to get path: {}'
1125 .format(vdi_uuid, e)
1126 )
1127 continue
1129 name_label = volume_metadata.get(NAME_LABEL_TAG) or ''
1130 type = volume_metadata.get(TYPE_TAG) or 'user'
1131 vdi_type = volume_metadata.get(VDI_TYPE_TAG)
1133 if not vdi_type:
1134 util.SMlog(
1135 'Cannot introduce {} '.format(vdi_uuid) +
1136 'without vdi_type'
1137 )
1138 continue
1140 sm_config = {
1141 'vdi_type': vdi_type
1142 }
1144 if vdi_type == vhdutil.VDI_TYPE_RAW:
1145 managed = not volume_metadata.get(HIDDEN_TAG)
1146 elif vdi_type == vhdutil.VDI_TYPE_VHD:
1147 vhd_info = self._vhdutil.get_vhd_info(vdi_uuid)
1148 managed = not vhd_info.hidden
1149 if vhd_info.parentUuid:
1150 sm_config['vhd-parent'] = vhd_info.parentUuid
1151 else:
1152 util.SMlog(
1153 'Cannot introduce {} with invalid VDI type {}'
1154 .format(vdi_uuid, vdi_type)
1155 )
1156 continue
1158 util.SMlog(
1159 'Introducing VDI {} '.format(vdi_uuid) +
1160 ' (name={}, virtual_size={}, allocated_size={})'.format(
1161 name_label,
1162 volume_info.virtual_size,
1163 volume_info.allocated_size
1164 )
1165 )
1167 vdi_ref = xenapi.VDI.db_introduce(
1168 vdi_uuid,
1169 name_label,
1170 volume_metadata.get(NAME_DESCRIPTION_TAG) or '',
1171 self.sr_ref,
1172 type,
1173 False, # sharable
1174 bool(volume_metadata.get(READ_ONLY_TAG)),
1175 {}, # other_config
1176 vdi_uuid, # location
1177 {}, # xenstore_data
1178 sm_config,
1179 managed,
1180 str(volume_info.virtual_size),
1181 str(volume_info.allocated_size)
1182 )
1184 is_a_snapshot = volume_metadata.get(IS_A_SNAPSHOT_TAG)
1185 xenapi.VDI.set_is_a_snapshot(vdi_ref, bool(is_a_snapshot))
1186 if is_a_snapshot:
1187 xenapi.VDI.set_snapshot_time(
1188 vdi_ref,
1189 xmlrpc.client.DateTime(
1190 volume_metadata[SNAPSHOT_TIME_TAG] or
1191 '19700101T00:00:00Z'
1192 )
1193 )
1195 snap_uuid = volume_metadata[SNAPSHOT_OF_TAG]
1196 if snap_uuid in vdi_to_snaps:
1197 vdi_to_snaps[snap_uuid].append(vdi_uuid)
1198 else:
1199 vdi_to_snaps[snap_uuid] = [vdi_uuid]
1201 # 4.b. Add the VDI in the list.
1202 vdi = self.vdi(vdi_uuid)
1203 self.vdis[vdi_uuid] = vdi
1205 if USE_KEY_HASH and vdi.vdi_type == vhdutil.VDI_TYPE_VHD:
1206 # TODO: Replace pylint comment with this feature when possible:
1207 # https://github.com/PyCQA/pylint/pull/2926
1208 vdi.sm_config_override['key_hash'] = \
1209 self._vhdutil.get_key_hash(vdi_uuid) # pylint: disable = E1120
1211 # 4.c. Update CBT status of disks either just added
1212 # or already in XAPI.
1213 cbt_uuid = volume_metadata.get(CBTLOG_TAG)
1214 if cbt_uuid in cbt_vdis:
1215 vdi_ref = xenapi.VDI.get_by_uuid(vdi_uuid)
1216 xenapi.VDI.set_cbt_enabled(vdi_ref, True)
1217 # For existing VDIs, update local state too.
1218 # Scan in base class SR updates existing VDIs
1219 # again based on local states.
1220 self.vdis[vdi_uuid].cbt_enabled = True
1221 cbt_vdis.remove(cbt_uuid)
1223 # 5. Now set the snapshot statuses correctly in XAPI.
1224 for src_uuid in vdi_to_snaps:
1225 try:
1226 src_ref = xenapi.VDI.get_by_uuid(src_uuid)
1227 except Exception:
1228 # The source VDI no longer exists, continue.
1229 continue
1231 for snap_uuid in vdi_to_snaps[src_uuid]:
1232 try:
1233 # This might fail in cases where its already set.
1234 snap_ref = xenapi.VDI.get_by_uuid(snap_uuid)
1235 xenapi.VDI.set_snapshot_of(snap_ref, src_ref)
1236 except Exception as e:
1237 util.SMlog('Setting snapshot failed: {}'.format(e))
1239 # TODO: Check correctly how to use CBT.
1240 # Update cbt_enabled on the right VDI, check LVM/FileSR code.
1242 # 6. If we have items remaining in this list,
1243 # they are cbt_metadata VDI that XAPI doesn't know about.
1244 # Add them to self.vdis and they'll get added to the DB.
1245 for cbt_uuid in cbt_vdis:
1246 new_vdi = self.vdi(cbt_uuid)
1247 new_vdi.ty = 'cbt_metadata'
1248 new_vdi.cbt_enabled = True
1249 self.vdis[cbt_uuid] = new_vdi
1251 # 7. Update virtual allocation, build geneology and remove useless VDIs
1252 self.virtual_allocation = 0
1254 # 8. Build geneology.
1255 geneology = {}
1257 for vdi_uuid, vdi in self.vdis.items():
1258 if vdi.parent:
1259 if vdi.parent in self.vdis:
1260 self.vdis[vdi.parent].read_only = True
1261 if vdi.parent in geneology:
1262 geneology[vdi.parent].append(vdi_uuid)
1263 else:
1264 geneology[vdi.parent] = [vdi_uuid]
1265 if not vdi.hidden:
1266 self.virtual_allocation += vdi.size
1268 # 9. Remove all hidden leaf nodes to avoid introducing records that
1269 # will be GC'ed.
1270 for vdi_uuid in list(self.vdis.keys()):
1271 if vdi_uuid not in geneology and self.vdis[vdi_uuid].hidden:
1272 util.SMlog(
1273 'Scan found hidden leaf ({}), ignoring'.format(vdi_uuid)
1274 )
1275 del self.vdis[vdi_uuid]
1277 # --------------------------------------------------------------------------
1278 # Journals.
1279 # --------------------------------------------------------------------------
1281 def _get_vdi_path_and_parent(self, vdi_uuid, volume_name):
1282 try:
1283 device_path = self._linstor.build_device_path(volume_name)
1284 if not util.pathexists(device_path):
1285 return (None, None)
1287 # If it's a RAW VDI, there is no parent.
1288 volume_metadata = self._linstor.get_volume_metadata(vdi_uuid)
1289 vdi_type = volume_metadata[VDI_TYPE_TAG]
1290 if vdi_type == vhdutil.VDI_TYPE_RAW:
1291 return (device_path, None)
1293 # Otherwise it's a VHD and a parent can exist.
1294 if not self._vhdutil.check(vdi_uuid):
1295 return (None, None)
1297 vhd_info = self._vhdutil.get_vhd_info(vdi_uuid)
1298 if vhd_info:
1299 return (device_path, vhd_info.parentUuid)
1300 except Exception as e:
1301 util.SMlog(
1302 'Failed to get VDI path and parent, ignoring: {}'
1303 .format(e)
1304 )
1305 return (None, None)
1307 def _undo_all_journal_transactions(self):
1308 util.SMlog('Undoing all journal transactions...')
1309 self.lock.acquire()
1310 try:
1311 self._handle_interrupted_inflate_ops()
1312 self._handle_interrupted_clone_ops()
1313 pass
1314 finally:
1315 self.lock.release()
1317 def _handle_interrupted_inflate_ops(self):
1318 transactions = self._journaler.get_all(LinstorJournaler.INFLATE)
1319 for vdi_uuid, old_size in transactions.items():
1320 self._handle_interrupted_inflate(vdi_uuid, old_size)
1321 self._journaler.remove(LinstorJournaler.INFLATE, vdi_uuid)
1323 def _handle_interrupted_clone_ops(self):
1324 transactions = self._journaler.get_all(LinstorJournaler.CLONE)
1325 for vdi_uuid, old_size in transactions.items():
1326 self._handle_interrupted_clone(vdi_uuid, old_size)
1327 self._journaler.remove(LinstorJournaler.CLONE, vdi_uuid)
1329 def _handle_interrupted_inflate(self, vdi_uuid, old_size):
1330 util.SMlog(
1331 '*** INTERRUPTED INFLATE OP: for {} ({})'
1332 .format(vdi_uuid, old_size)
1333 )
1335 vdi = self.vdis.get(vdi_uuid)
1336 if not vdi:
1337 util.SMlog('Cannot deflate missing VDI {}'.format(vdi_uuid))
1338 return
1340 assert not self._all_volume_info_cache
1341 volume_info = self._linstor.get_volume_info(vdi_uuid)
1343 current_size = volume_info.virtual_size
1344 assert current_size > 0
1345 self._vhdutil.force_deflate(vdi.path, old_size, current_size, zeroize=True)
1347 def _handle_interrupted_clone(
1348 self, vdi_uuid, clone_info, force_undo=False
1349 ):
1350 util.SMlog(
1351 '*** INTERRUPTED CLONE OP: for {} ({})'
1352 .format(vdi_uuid, clone_info)
1353 )
1355 base_uuid, snap_uuid = clone_info.split('_')
1357 # Use LINSTOR data because new VDIs may not be in the XAPI.
1358 volume_names = self._linstor.get_volumes_with_name()
1360 # Check if we don't have a base VDI. (If clone failed at startup.)
1361 if base_uuid not in volume_names:
1362 if vdi_uuid in volume_names:
1363 util.SMlog('*** INTERRUPTED CLONE OP: nothing to do')
1364 return
1365 raise util.SMException(
1366 'Base copy {} not present, but no original {} found'
1367 .format(base_uuid, vdi_uuid)
1368 )
1370 if force_undo:
1371 util.SMlog('Explicit revert')
1372 self._undo_clone(
1373 volume_names, vdi_uuid, base_uuid, snap_uuid
1374 )
1375 return
1377 # If VDI or snap uuid is missing...
1378 if vdi_uuid not in volume_names or \
1379 (snap_uuid and snap_uuid not in volume_names):
1380 util.SMlog('One or both leaves missing => revert')
1381 self._undo_clone(volume_names, vdi_uuid, base_uuid, snap_uuid)
1382 return
1384 vdi_path, vdi_parent_uuid = self._get_vdi_path_and_parent(
1385 vdi_uuid, volume_names[vdi_uuid]
1386 )
1387 snap_path, snap_parent_uuid = self._get_vdi_path_and_parent(
1388 snap_uuid, volume_names[snap_uuid]
1389 )
1391 if not vdi_path or (snap_uuid and not snap_path):
1392 util.SMlog('One or both leaves invalid (and path(s)) => revert')
1393 self._undo_clone(volume_names, vdi_uuid, base_uuid, snap_uuid)
1394 return
1396 util.SMlog('Leaves valid but => revert')
1397 self._undo_clone(volume_names, vdi_uuid, base_uuid, snap_uuid)
1399 def _undo_clone(self, volume_names, vdi_uuid, base_uuid, snap_uuid):
1400 base_path = self._linstor.build_device_path(volume_names[base_uuid])
1401 base_metadata = self._linstor.get_volume_metadata(base_uuid)
1402 base_type = base_metadata[VDI_TYPE_TAG]
1404 if not util.pathexists(base_path):
1405 util.SMlog('Base not found! Exit...')
1406 util.SMlog('*** INTERRUPTED CLONE OP: rollback fail')
1407 return
1409 # Un-hide the parent.
1410 self._linstor.update_volume_metadata(base_uuid, {READ_ONLY_TAG: False})
1411 if base_type == vhdutil.VDI_TYPE_VHD:
1412 vhd_info = self._vhdutil.get_vhd_info(base_uuid, False)
1413 if vhd_info.hidden:
1414 self._vhdutil.set_hidden(base_path, False)
1415 elif base_type == vhdutil.VDI_TYPE_RAW and \
1416 base_metadata.get(HIDDEN_TAG):
1417 self._linstor.update_volume_metadata(
1418 base_uuid, {HIDDEN_TAG: False}
1419 )
1421 # Remove the child nodes.
1422 if snap_uuid and snap_uuid in volume_names:
1423 util.SMlog('Destroying snap {}...'.format(snap_uuid))
1425 try:
1426 self._linstor.destroy_volume(snap_uuid)
1427 except Exception as e:
1428 util.SMlog(
1429 'Cannot destroy snap {} during undo clone: {}'
1430 .format(snap_uuid, e)
1431 )
1433 if vdi_uuid in volume_names:
1434 try:
1435 util.SMlog('Destroying {}...'.format(vdi_uuid))
1436 self._linstor.destroy_volume(vdi_uuid)
1437 except Exception as e:
1438 util.SMlog(
1439 'Cannot destroy VDI {} during undo clone: {}'
1440 .format(vdi_uuid, e)
1441 )
1442 # We can get an exception like this:
1443 # "Shutdown of the DRBD resource 'XXX failed", so the
1444 # volume info remains... The problem is we can't rename
1445 # properly the base VDI below this line, so we must change the
1446 # UUID of this bad VDI before.
1447 self._linstor.update_volume_uuid(
1448 vdi_uuid, 'DELETED_' + vdi_uuid, force=True
1449 )
1451 # Rename!
1452 self._linstor.update_volume_uuid(base_uuid, vdi_uuid)
1454 # Inflate to the right size.
1455 if base_type == vhdutil.VDI_TYPE_VHD:
1456 vdi = self.vdi(vdi_uuid)
1457 volume_size = LinstorVhdUtil.compute_volume_size(vdi.size, vdi.vdi_type)
1458 self._vhdutil.inflate(
1459 self._journaler, vdi_uuid, vdi.path,
1460 volume_size, vdi.capacity
1461 )
1462 self.vdis[vdi_uuid] = vdi
1464 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1465 # flag to facilitate vm deactivate.
1466 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
1467 self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
1469 util.SMlog('*** INTERRUPTED CLONE OP: rollback success')
1471 # --------------------------------------------------------------------------
1472 # Cache.
1473 # --------------------------------------------------------------------------
1475 def _create_linstor_cache(self):
1476 reconnect = False
1478 def create_cache():
1479 nonlocal reconnect
1480 try:
1481 if reconnect:
1482 self._reconnect()
1483 return self._linstor.get_volumes_with_info()
1484 except Exception as e:
1485 reconnect = True
1486 raise e
1488 self._all_volume_metadata_cache = \
1489 self._linstor.get_volumes_with_metadata()
1490 self._all_volume_info_cache = util.retry(
1491 create_cache,
1492 maxretry=10,
1493 period=3
1494 )
1496 def _destroy_linstor_cache(self):
1497 self._all_volume_info_cache = None
1498 self._all_volume_metadata_cache = None
1500 # --------------------------------------------------------------------------
1501 # Misc.
1502 # --------------------------------------------------------------------------
1504 def _reconnect(self):
1505 controller_uri = get_controller_uri()
1507 self._journaler = LinstorJournaler(
1508 controller_uri, self._group_name, logger=util.SMlog
1509 )
1511 # Try to open SR if exists.
1512 # We can repair only if we are on the master AND if
1513 # we are trying to execute an exclusive operation.
1514 # Otherwise we could try to delete a VDI being created or
1515 # during a snapshot. An exclusive op is the guarantee that
1516 # the SR is locked.
1517 self._linstor = LinstorVolumeManager(
1518 controller_uri,
1519 self._group_name,
1520 repair=(
1521 self.is_master() and
1522 self.srcmd.cmd in self.ops_exclusive
1523 ),
1524 logger=util.SMlog
1525 )
1526 self._vhdutil = LinstorVhdUtil(self.session, self._linstor)
1528 def _ensure_space_available(self, amount_needed):
1529 space_available = self._linstor.max_volume_size_allowed
1530 if (space_available < amount_needed):
1531 util.SMlog(
1532 'Not enough space! Free space: {}, need: {}'.format(
1533 space_available, amount_needed
1534 )
1535 )
1536 raise xs_errors.XenError('SRNoSpace')
1538 def _kick_gc(self):
1539 util.SMlog('Kicking GC')
1540 cleanup.start_gc_service(self.uuid)
1542# ==============================================================================
1543# LinstorSr VDI
1544# ==============================================================================
1547class LinstorVDI(VDI.VDI):
1548 # Warning: Not the same values than vhdutil.VDI_TYPE_*.
1549 # These values represents the types given on the command line.
1550 TYPE_RAW = 'raw'
1551 TYPE_VHD = 'vhd'
1553 # Metadata size given to the "S" param of vhd-util create.
1554 # "-S size (MB) for metadata preallocation".
1555 # Increase the performance when resize is called.
1556 MAX_METADATA_VIRT_SIZE = 2 * 1024 * 1024
1558 # --------------------------------------------------------------------------
1559 # VDI methods.
1560 # --------------------------------------------------------------------------
1562 @override
1563 def load(self, vdi_uuid) -> None:
1564 self._lock = self.sr.lock
1565 self._exists = True
1566 self._linstor = self.sr._linstor
1568 # Update hidden parent property.
1569 self.hidden = False
1571 def raise_bad_load(e):
1572 util.SMlog(
1573 'Got exception in LinstorVDI.load: {}'.format(e)
1574 )
1575 util.SMlog(traceback.format_exc())
1576 raise xs_errors.XenError(
1577 'VDIUnavailable',
1578 opterr='Could not load {} because: {}'.format(self.uuid, e)
1579 )
1581 # Try to load VDI.
1582 try:
1583 if (
1584 self.sr.srcmd.cmd == 'vdi_attach_from_config' or
1585 self.sr.srcmd.cmd == 'vdi_detach_from_config'
1586 ):
1587 self.vdi_type = vhdutil.VDI_TYPE_RAW
1588 self.path = self.sr.srcmd.params['vdi_path']
1589 else:
1590 self._determine_type_and_path()
1591 self._load_this()
1593 util.SMlog('VDI {} loaded! (path={}, hidden={})'.format(
1594 self.uuid, self.path, self.hidden
1595 ))
1596 except LinstorVolumeManagerError as e:
1597 # 1. It may be a VDI deletion.
1598 if e.code == LinstorVolumeManagerError.ERR_VOLUME_NOT_EXISTS:
1599 if self.sr.srcmd.cmd == 'vdi_delete':
1600 self.deleted = True
1601 return
1603 # 2. Or maybe a creation.
1604 if self.sr.srcmd.cmd == 'vdi_create':
1605 # Set type attribute of VDI parent class.
1606 # We use VHD by default.
1607 self.vdi_type = vhdutil.VDI_TYPE_VHD
1608 self._key_hash = None # Only used in create.
1610 self._exists = False
1611 vdi_sm_config = self.sr.srcmd.params.get('vdi_sm_config')
1612 if vdi_sm_config is not None:
1613 type = vdi_sm_config.get('type')
1614 if type is not None:
1615 if type == self.TYPE_RAW:
1616 self.vdi_type = vhdutil.VDI_TYPE_RAW
1617 elif type == self.TYPE_VHD:
1618 self.vdi_type = vhdutil.VDI_TYPE_VHD
1619 else:
1620 raise xs_errors.XenError(
1621 'VDICreate',
1622 opterr='Invalid VDI type {}'.format(type)
1623 )
1624 if self.vdi_type == vhdutil.VDI_TYPE_VHD:
1625 self._key_hash = vdi_sm_config.get('key_hash')
1627 # For the moment we don't have a path.
1628 self._update_device_name(None)
1629 return
1630 raise_bad_load(e)
1631 except Exception as e:
1632 raise_bad_load(e)
1634 @override
1635 def create(self, sr_uuid, vdi_uuid, size) -> str:
1636 # Usage example:
1637 # xe vdi-create sr-uuid=39a5826b-5a90-73eb-dd09-51e3a116f937
1638 # name-label="linstor-vdi-1" virtual-size=4096MiB sm-config:type=vhd
1640 # 1. Check if we are on the master and if the VDI doesn't exist.
1641 util.SMlog('LinstorVDI.create for {}'.format(self.uuid))
1642 if self._exists:
1643 raise xs_errors.XenError('VDIExists')
1645 assert self.uuid
1646 assert self.ty
1647 assert self.vdi_type
1649 # 2. Compute size and check space available.
1650 size = vhdutil.validate_and_round_vhd_size(int(size))
1651 volume_size = LinstorVhdUtil.compute_volume_size(size, self.vdi_type)
1652 util.SMlog(
1653 'LinstorVDI.create: type={}, vhd-size={}, volume-size={}'
1654 .format(self.vdi_type, size, volume_size)
1655 )
1656 self.sr._ensure_space_available(volume_size)
1658 # 3. Set sm_config attribute of VDI parent class.
1659 self.sm_config = self.sr.srcmd.params['vdi_sm_config']
1661 # 4. Create!
1662 failed = False
1663 try:
1664 volume_name = None
1665 if self.ty == 'ha_statefile':
1666 volume_name = HA_VOLUME_NAME
1667 elif self.ty == 'redo_log':
1668 volume_name = REDO_LOG_VOLUME_NAME
1670 self._linstor.create_volume(
1671 self.uuid,
1672 volume_size,
1673 persistent=False,
1674 volume_name=volume_name,
1675 high_availability=volume_name is not None
1676 )
1677 volume_info = self._linstor.get_volume_info(self.uuid)
1679 self._update_device_name(volume_info.name)
1681 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1682 self.size = volume_info.virtual_size
1683 else:
1684 self.sr._vhdutil.create(
1685 self.path, size, False, self.MAX_METADATA_VIRT_SIZE
1686 )
1687 self.size = self.sr._vhdutil.get_size_virt(self.uuid)
1689 if self._key_hash:
1690 self.sr._vhdutil.set_key(self.path, self._key_hash)
1692 # Because vhdutil commands modify the volume data,
1693 # we must retrieve a new time the utilization size.
1694 volume_info = self._linstor.get_volume_info(self.uuid)
1696 volume_metadata = {
1697 NAME_LABEL_TAG: util.to_plain_string(self.label),
1698 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1699 IS_A_SNAPSHOT_TAG: False,
1700 SNAPSHOT_OF_TAG: '',
1701 SNAPSHOT_TIME_TAG: '',
1702 TYPE_TAG: self.ty,
1703 VDI_TYPE_TAG: self.vdi_type,
1704 READ_ONLY_TAG: bool(self.read_only),
1705 METADATA_OF_POOL_TAG: ''
1706 }
1707 self._linstor.set_volume_metadata(self.uuid, volume_metadata)
1709 # Set the open timeout to 1min to reduce CPU usage
1710 # in http-disk-server when a secondary server tries to open
1711 # an already opened volume.
1712 if self.ty == 'ha_statefile' or self.ty == 'redo_log':
1713 self._linstor.set_auto_promote_timeout(self.uuid, 600)
1715 self._linstor.mark_volume_as_persistent(self.uuid)
1716 except util.CommandException as e:
1717 failed = True
1718 raise xs_errors.XenError(
1719 'VDICreate', opterr='error {}'.format(e.code)
1720 )
1721 except Exception as e:
1722 failed = True
1723 raise xs_errors.XenError('VDICreate', opterr='error {}'.format(e))
1724 finally:
1725 if failed:
1726 util.SMlog('Unable to create VDI {}'.format(self.uuid))
1727 try:
1728 self._linstor.destroy_volume(self.uuid)
1729 except Exception as e:
1730 util.SMlog(
1731 'Ignoring exception after fail in LinstorVDI.create: '
1732 '{}'.format(e)
1733 )
1735 self.utilisation = volume_info.allocated_size
1736 self.sm_config['vdi_type'] = self.vdi_type
1738 self.ref = self._db_introduce()
1739 self.sr._update_stats(self.size)
1741 return VDI.VDI.get_params(self)
1743 @override
1744 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None:
1745 util.SMlog('LinstorVDI.delete for {}'.format(self.uuid))
1746 if self.attached:
1747 raise xs_errors.XenError('VDIInUse')
1749 if self.deleted:
1750 return super(LinstorVDI, self).delete(
1751 sr_uuid, vdi_uuid, data_only
1752 )
1754 vdi_ref = self.sr.srcmd.params['vdi_ref']
1755 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1756 raise xs_errors.XenError(
1757 'VDIDelete',
1758 opterr='Deleting non-leaf node not permitted'
1759 )
1761 try:
1762 # Remove from XAPI and delete from LINSTOR.
1763 self._linstor.destroy_volume(self.uuid)
1764 if not data_only:
1765 self._db_forget()
1767 self.sr.lock.cleanupAll(vdi_uuid)
1768 except Exception as e:
1769 util.SMlog(
1770 'Failed to remove the volume (maybe is leaf coalescing) '
1771 'for {} err: {}'.format(self.uuid, e)
1772 )
1774 try:
1775 raise xs_errors.XenError('VDIDelete', opterr=str(e))
1776 except LinstorVolumeManagerError as e:
1777 if e.code != LinstorVolumeManagerError.ERR_VOLUME_DESTROY:
1778 raise xs_errors.XenError('VDIDelete', opterr=str(e))
1780 return
1782 if self.uuid in self.sr.vdis:
1783 del self.sr.vdis[self.uuid]
1785 # TODO: Check size after delete.
1786 self.sr._update_stats(-self.size)
1787 self.sr._kick_gc()
1788 return super(LinstorVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1790 @override
1791 def attach(self, sr_uuid, vdi_uuid) -> str:
1792 util.SMlog('LinstorVDI.attach for {}'.format(self.uuid))
1793 attach_from_config = self.sr.srcmd.cmd == 'vdi_attach_from_config'
1794 if (
1795 not attach_from_config or
1796 self.sr.srcmd.params['vdi_uuid'] != self.uuid
1797 ) and self.sr._journaler.has_entries(self.uuid):
1798 raise xs_errors.XenError(
1799 'VDIUnavailable',
1800 opterr='Interrupted operation detected on this VDI, '
1801 'scan SR first to trigger auto-repair'
1802 )
1804 writable = 'args' not in self.sr.srcmd.params or \
1805 self.sr.srcmd.params['args'][0] == 'true'
1807 if not attach_from_config or self.sr.is_master():
1808 # We need to inflate the volume if we don't have enough place
1809 # to mount the VHD image. I.e. the volume capacity must be greater
1810 # than the VHD size + bitmap size.
1811 need_inflate = True
1812 if (
1813 self.vdi_type == vhdutil.VDI_TYPE_RAW or
1814 not writable or
1815 self.capacity >= LinstorVhdUtil.compute_volume_size(self.size, self.vdi_type)
1816 ):
1817 need_inflate = False
1819 if need_inflate:
1820 try:
1821 self._prepare_thin(True)
1822 except Exception as e:
1823 raise xs_errors.XenError(
1824 'VDIUnavailable',
1825 opterr='Failed to attach VDI during "prepare thin": {}'
1826 .format(e)
1827 )
1829 if not hasattr(self, 'xenstore_data'):
1830 self.xenstore_data = {}
1831 self.xenstore_data['storage-type'] = LinstorSR.DRIVER_TYPE
1833 if (
1834 USE_HTTP_NBD_SERVERS and
1835 attach_from_config and
1836 self.path.startswith('/dev/http-nbd/')
1837 ):
1838 return self._attach_using_http_nbd()
1840 # Ensure we have a path...
1841 self.sr._vhdutil.create_chain_paths(self.uuid, readonly=not writable)
1843 self.attached = True
1844 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1846 @override
1847 def detach(self, sr_uuid, vdi_uuid) -> None:
1848 util.SMlog('LinstorVDI.detach for {}'.format(self.uuid))
1849 detach_from_config = self.sr.srcmd.cmd == 'vdi_detach_from_config'
1850 self.attached = False
1852 if detach_from_config and self.path.startswith('/dev/http-nbd/'):
1853 return self._detach_using_http_nbd()
1855 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1856 return
1858 # The VDI is already deflated if the VHD image size + metadata is
1859 # equal to the LINSTOR volume size.
1860 volume_size = LinstorVhdUtil.compute_volume_size(self.size, self.vdi_type)
1861 already_deflated = self.capacity <= volume_size
1863 if already_deflated:
1864 util.SMlog(
1865 'VDI {} already deflated (old volume size={}, volume size={})'
1866 .format(self.uuid, self.capacity, volume_size)
1867 )
1869 need_deflate = True
1870 if already_deflated:
1871 need_deflate = False
1872 elif self.sr._provisioning == 'thick':
1873 need_deflate = False
1875 vdi_ref = self.sr.srcmd.params['vdi_ref']
1876 if self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref):
1877 need_deflate = True
1879 if need_deflate:
1880 try:
1881 self._prepare_thin(False)
1882 except Exception as e:
1883 raise xs_errors.XenError(
1884 'VDIUnavailable',
1885 opterr='Failed to detach VDI during "prepare thin": {}'
1886 .format(e)
1887 )
1889 # We remove only on slaves because the volume can be used by the GC.
1890 if self.sr.is_master():
1891 return
1893 while vdi_uuid:
1894 try:
1895 path = self._linstor.build_device_path(self._linstor.get_volume_name(vdi_uuid))
1896 parent_vdi_uuid = self.sr._vhdutil.get_vhd_info(vdi_uuid).parentUuid
1897 except Exception:
1898 break
1900 if util.pathexists(path):
1901 try:
1902 self._linstor.remove_volume_if_diskless(vdi_uuid)
1903 except Exception as e:
1904 # Ensure we can always detach properly.
1905 # I don't want to corrupt the XAPI info.
1906 util.SMlog('Failed to clean VDI {} during detach: {}'.format(vdi_uuid, e))
1907 vdi_uuid = parent_vdi_uuid
1909 @override
1910 def resize(self, sr_uuid, vdi_uuid, size) -> str:
1911 util.SMlog('LinstorVDI.resize for {}'.format(self.uuid))
1912 if not self.sr.is_master():
1913 raise xs_errors.XenError(
1914 'VDISize',
1915 opterr='resize on slave not allowed'
1916 )
1918 if self.hidden:
1919 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1921 # Compute the virtual VHD and DRBD volume size.
1922 size = vhdutil.validate_and_round_vhd_size(int(size))
1923 volume_size = LinstorVhdUtil.compute_volume_size(size, self.vdi_type)
1924 util.SMlog(
1925 'LinstorVDI.resize: type={}, vhd-size={}, volume-size={}'
1926 .format(self.vdi_type, size, volume_size)
1927 )
1929 if size < self.size:
1930 util.SMlog(
1931 'vdi_resize: shrinking not supported: '
1932 '(current size: {}, new size: {})'.format(self.size, size)
1933 )
1934 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1936 if size == self.size:
1937 return VDI.VDI.get_params(self)
1939 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1940 old_volume_size = self.size
1941 new_volume_size = LinstorVolumeManager.round_up_volume_size(size)
1942 else:
1943 old_volume_size = self.utilisation
1944 if self.sr._provisioning == 'thin':
1945 # VDI is currently deflated, so keep it deflated.
1946 new_volume_size = old_volume_size
1947 else:
1948 new_volume_size = LinstorVhdUtil.compute_volume_size(size, self.vdi_type)
1949 assert new_volume_size >= old_volume_size
1951 space_needed = new_volume_size - old_volume_size
1952 self.sr._ensure_space_available(space_needed)
1954 old_size = self.size
1955 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1956 self._linstor.resize(self.uuid, new_volume_size)
1957 else:
1958 if new_volume_size != old_volume_size:
1959 self.sr._vhdutil.inflate(
1960 self.sr._journaler, self.uuid, self.path,
1961 new_volume_size, old_volume_size
1962 )
1963 self.sr._vhdutil.set_size_virt_fast(self.path, size)
1965 # Reload size attributes.
1966 self._load_this()
1968 vdi_ref = self.sr.srcmd.params['vdi_ref']
1969 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1970 self.session.xenapi.VDI.set_physical_utilisation(
1971 vdi_ref, str(self.utilisation)
1972 )
1973 self.sr._update_stats(self.size - old_size)
1974 return VDI.VDI.get_params(self)
1976 @override
1977 def clone(self, sr_uuid, vdi_uuid) -> str:
1978 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE)
1980 @override
1981 def compose(self, sr_uuid, vdi1, vdi2) -> None:
1982 util.SMlog('VDI.compose for {} -> {}'.format(vdi2, vdi1))
1983 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1984 raise xs_errors.XenError('Unimplemented')
1986 parent_uuid = vdi1
1987 parent_path = self._linstor.get_device_path(parent_uuid)
1989 # We must pause tapdisk to correctly change the parent. Otherwise we
1990 # have a readonly error.
1991 # See: https://github.com/xapi-project/xen-api/blob/b3169a16d36dae0654881b336801910811a399d9/ocaml/xapi/storage_migrate.ml#L928-L929
1992 # and: https://github.com/xapi-project/xen-api/blob/b3169a16d36dae0654881b336801910811a399d9/ocaml/xapi/storage_migrate.ml#L775
1994 if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, self.uuid):
1995 raise util.SMException('Failed to pause VDI {}'.format(self.uuid))
1996 try:
1997 self.sr._vhdutil.set_parent(self.path, parent_path, False)
1998 self.sr._vhdutil.set_hidden(parent_path)
1999 self.sr.session.xenapi.VDI.set_managed(
2000 self.sr.srcmd.params['args'][0], False
2001 )
2002 finally:
2003 blktap2.VDI.tap_unpause(self.session, self.sr.uuid, self.uuid)
2005 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid):
2006 raise util.SMException(
2007 'Failed to refresh VDI {}'.format(self.uuid)
2008 )
2010 util.SMlog('Compose done')
2012 @override
2013 def generate_config(self, sr_uuid, vdi_uuid) -> str:
2014 """
2015 Generate the XML config required to attach and activate
2016 a VDI for use when XAPI is not running. Attach and
2017 activation is handled by vdi_attach_from_config below.
2018 """
2020 util.SMlog('LinstorVDI.generate_config for {}'.format(self.uuid))
2022 resp = {}
2023 resp['device_config'] = self.sr.dconf
2024 resp['sr_uuid'] = sr_uuid
2025 resp['vdi_uuid'] = self.uuid
2026 resp['sr_sm_config'] = self.sr.sm_config
2027 resp['command'] = 'vdi_attach_from_config'
2029 # By default, we generate a normal config.
2030 # But if the disk is persistent, we must use a HTTP/NBD
2031 # server to ensure we can always write or read data.
2032 # Why? DRBD is unsafe when used with more than 4 hosts:
2033 # We are limited to use 1 diskless and 3 full.
2034 # We can't increase this limitation, so we use a NBD/HTTP device
2035 # instead.
2036 volume_name = self._linstor.get_volume_name(self.uuid)
2037 if not USE_HTTP_NBD_SERVERS or volume_name not in [
2038 HA_VOLUME_NAME, REDO_LOG_VOLUME_NAME
2039 ]:
2040 if not self.path or not util.pathexists(self.path):
2041 available = False
2042 # Try to refresh symlink path...
2043 try:
2044 self.path = self._linstor.get_device_path(vdi_uuid)
2045 available = util.pathexists(self.path)
2046 except Exception:
2047 pass
2048 if not available:
2049 raise xs_errors.XenError('VDIUnavailable')
2051 resp['vdi_path'] = self.path
2052 else:
2053 # Axiom: DRBD device is present on at least one host.
2054 resp['vdi_path'] = '/dev/http-nbd/' + volume_name
2056 config = xmlrpc.client.dumps(tuple([resp]), 'vdi_attach_from_config')
2057 return xmlrpc.client.dumps((config,), "", True)
2059 @override
2060 def attach_from_config(self, sr_uuid, vdi_uuid) -> str:
2061 """
2062 Attach and activate a VDI using config generated by
2063 vdi_generate_config above. This is used for cases such as
2064 the HA state-file and the redo-log.
2065 """
2067 util.SMlog('LinstorVDI.attach_from_config for {}'.format(vdi_uuid))
2069 try:
2070 if not util.pathexists(self.sr.path):
2071 self.sr.attach(sr_uuid)
2073 if not DRIVER_CONFIG['ATTACH_FROM_CONFIG_WITH_TAPDISK']:
2074 return self.attach(sr_uuid, vdi_uuid)
2075 except Exception:
2076 util.logException('LinstorVDI.attach_from_config')
2077 raise xs_errors.XenError(
2078 'SRUnavailable',
2079 opterr='Unable to attach from config'
2080 )
2081 return ''
2083 def reset_leaf(self, sr_uuid, vdi_uuid):
2084 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
2085 raise xs_errors.XenError('Unimplemented')
2087 if not self.sr._vhdutil.has_parent(self.uuid):
2088 raise util.SMException(
2089 'ERROR: VDI {} has no parent, will not reset contents'
2090 .format(self.uuid)
2091 )
2093 self.sr._vhdutil.kill_data(self.path)
2095 def _load_this(self):
2096 volume_metadata = None
2097 if self.sr._all_volume_metadata_cache:
2098 volume_metadata = self.sr._all_volume_metadata_cache.get(self.uuid)
2099 if volume_metadata is None:
2100 volume_metadata = self._linstor.get_volume_metadata(self.uuid)
2102 volume_info = None
2103 if self.sr._all_volume_info_cache:
2104 volume_info = self.sr._all_volume_info_cache.get(self.uuid)
2105 if volume_info is None:
2106 volume_info = self._linstor.get_volume_info(self.uuid)
2108 # Contains the max physical size used on a disk.
2109 # When LINSTOR LVM driver is used, the size should be similar to
2110 # virtual size (i.e. the LINSTOR max volume size).
2111 # When LINSTOR Thin LVM driver is used, the used physical size should
2112 # be lower than virtual size at creation.
2113 # The physical size increases after each write in a new block.
2114 self.utilisation = volume_info.allocated_size
2115 self.capacity = volume_info.virtual_size
2117 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
2118 self.hidden = int(volume_metadata.get(HIDDEN_TAG) or 0)
2119 self.size = volume_info.virtual_size
2120 self.parent = ''
2121 else:
2122 vhd_info = self.sr._vhdutil.get_vhd_info(self.uuid)
2123 self.hidden = vhd_info.hidden
2124 self.size = vhd_info.sizeVirt
2125 self.parent = vhd_info.parentUuid
2127 if self.hidden:
2128 self.managed = False
2130 self.label = volume_metadata.get(NAME_LABEL_TAG) or ''
2131 self.description = volume_metadata.get(NAME_DESCRIPTION_TAG) or ''
2133 # Update sm_config_override of VDI parent class.
2134 self.sm_config_override = {'vhd-parent': self.parent or None}
2136 def _mark_hidden(self, hidden=True):
2137 if self.hidden == hidden:
2138 return
2140 if self.vdi_type == vhdutil.VDI_TYPE_VHD:
2141 self.sr._vhdutil.set_hidden(self.path, hidden)
2142 else:
2143 self._linstor.update_volume_metadata(self.uuid, {
2144 HIDDEN_TAG: hidden
2145 })
2146 self.hidden = hidden
2148 @override
2149 def update(self, sr_uuid, vdi_uuid) -> None:
2150 xenapi = self.session.xenapi
2151 vdi_ref = xenapi.VDI.get_by_uuid(self.uuid)
2153 volume_metadata = {
2154 NAME_LABEL_TAG: util.to_plain_string(
2155 xenapi.VDI.get_name_label(vdi_ref)
2156 ),
2157 NAME_DESCRIPTION_TAG: util.to_plain_string(
2158 xenapi.VDI.get_name_description(vdi_ref)
2159 )
2160 }
2162 try:
2163 self._linstor.update_volume_metadata(self.uuid, volume_metadata)
2164 except LinstorVolumeManagerError as e:
2165 if e.code == LinstorVolumeManagerError.ERR_VOLUME_NOT_EXISTS:
2166 raise xs_errors.XenError(
2167 'VDIUnavailable',
2168 opterr='LINSTOR volume {} not found'.format(self.uuid)
2169 )
2170 raise xs_errors.XenError('VDIUnavailable', opterr=str(e))
2172 # --------------------------------------------------------------------------
2173 # Thin provisioning.
2174 # --------------------------------------------------------------------------
2176 def _prepare_thin(self, attach):
2177 if self.sr.is_master():
2178 if attach:
2179 attach_thin(
2180 self.session, self.sr._journaler, self._linstor,
2181 self.sr.uuid, self.uuid
2182 )
2183 else:
2184 detach_thin(
2185 self.session, self._linstor, self.sr.uuid, self.uuid
2186 )
2187 else:
2188 fn = 'attach' if attach else 'detach'
2190 master = util.get_master_ref(self.session)
2192 args = {
2193 'groupName': self.sr._group_name,
2194 'srUuid': self.sr.uuid,
2195 'vdiUuid': self.uuid
2196 }
2198 try:
2199 self.sr._exec_manager_command(master, fn, args, 'VDIUnavailable')
2200 except Exception:
2201 if fn != 'detach':
2202 raise
2204 # Reload size attrs after inflate or deflate!
2205 self._load_this()
2206 self.sr._update_physical_size()
2208 vdi_ref = self.sr.srcmd.params['vdi_ref']
2209 self.session.xenapi.VDI.set_physical_utilisation(
2210 vdi_ref, str(self.utilisation)
2211 )
2213 self.session.xenapi.SR.set_physical_utilisation(
2214 self.sr.sr_ref, str(self.sr.physical_utilisation)
2215 )
2217 # --------------------------------------------------------------------------
2218 # Generic helpers.
2219 # --------------------------------------------------------------------------
2221 def _determine_type_and_path(self):
2222 """
2223 Determine whether this is a RAW or a VHD VDI.
2224 """
2226 # 1. Check vdi_ref and vdi_type in config.
2227 try:
2228 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2229 if vdi_ref:
2230 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2231 vdi_type = sm_config.get('vdi_type')
2232 if vdi_type:
2233 # Update parent fields.
2234 self.vdi_type = vdi_type
2235 self.sm_config_override = sm_config
2236 self._update_device_name(
2237 self._linstor.get_volume_name(self.uuid)
2238 )
2239 return
2240 except Exception:
2241 pass
2243 # 2. Otherwise use the LINSTOR volume manager directly.
2244 # It's probably a new VDI created via snapshot.
2245 volume_metadata = self._linstor.get_volume_metadata(self.uuid)
2246 self.vdi_type = volume_metadata.get(VDI_TYPE_TAG)
2247 if not self.vdi_type:
2248 raise xs_errors.XenError(
2249 'VDIUnavailable',
2250 opterr='failed to get vdi_type in metadata'
2251 )
2252 self._update_device_name(self._linstor.get_volume_name(self.uuid))
2254 def _update_device_name(self, device_name):
2255 self._device_name = device_name
2257 # Mark path of VDI parent class.
2258 if device_name:
2259 self.path = self._linstor.build_device_path(self._device_name)
2260 else:
2261 self.path = None
2263 def _create_snapshot(self, snap_uuid, snap_of_uuid=None):
2264 """
2265 Snapshot self and return the snapshot VDI object.
2266 """
2268 # 1. Create a new LINSTOR volume with the same size than self.
2269 snap_path = self._linstor.shallow_clone_volume(
2270 self.uuid, snap_uuid, persistent=False
2271 )
2273 # 2. Write the snapshot content.
2274 is_raw = (self.vdi_type == vhdutil.VDI_TYPE_RAW)
2275 self.sr._vhdutil.snapshot(
2276 snap_path, self.path, is_raw, self.MAX_METADATA_VIRT_SIZE
2277 )
2279 # 3. Get snapshot parent.
2280 snap_parent = self.sr._vhdutil.get_parent(snap_uuid)
2282 # 4. Update metadata.
2283 util.SMlog('Set VDI {} metadata of snapshot'.format(snap_uuid))
2284 volume_metadata = {
2285 NAME_LABEL_TAG: util.to_plain_string(self.label),
2286 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
2287 IS_A_SNAPSHOT_TAG: bool(snap_of_uuid),
2288 SNAPSHOT_OF_TAG: snap_of_uuid,
2289 SNAPSHOT_TIME_TAG: '',
2290 TYPE_TAG: self.ty,
2291 VDI_TYPE_TAG: vhdutil.VDI_TYPE_VHD,
2292 READ_ONLY_TAG: False,
2293 METADATA_OF_POOL_TAG: ''
2294 }
2295 self._linstor.set_volume_metadata(snap_uuid, volume_metadata)
2297 # 5. Set size.
2298 snap_vdi = LinstorVDI(self.sr, snap_uuid)
2299 if not snap_vdi._exists:
2300 raise xs_errors.XenError('VDISnapshot')
2302 volume_info = self._linstor.get_volume_info(snap_uuid)
2304 snap_vdi.size = self.sr._vhdutil.get_size_virt(snap_uuid)
2305 snap_vdi.utilisation = volume_info.allocated_size
2307 # 6. Update sm config.
2308 snap_vdi.sm_config = {}
2309 snap_vdi.sm_config['vdi_type'] = snap_vdi.vdi_type
2310 if snap_parent:
2311 snap_vdi.sm_config['vhd-parent'] = snap_parent
2312 snap_vdi.parent = snap_parent
2314 snap_vdi.label = self.label
2315 snap_vdi.description = self.description
2317 self._linstor.mark_volume_as_persistent(snap_uuid)
2319 return snap_vdi
2321 # --------------------------------------------------------------------------
2322 # Implement specific SR methods.
2323 # --------------------------------------------------------------------------
2325 @override
2326 def _rename(self, oldpath, newpath) -> None:
2327 # TODO: I'm not sure... Used by CBT.
2328 volume_uuid = self._linstor.get_volume_uuid_from_device_path(oldpath)
2329 self._linstor.update_volume_name(volume_uuid, newpath)
2331 @override
2332 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
2333 cloneOp=False, secondary=None, cbtlog=None) -> str:
2334 # If cbt enabled, save file consistency state.
2335 if cbtlog is not None:
2336 if blktap2.VDI.tap_status(self.session, vdi_uuid):
2337 consistency_state = False
2338 else:
2339 consistency_state = True
2340 util.SMlog(
2341 'Saving log consistency state of {} for vdi: {}'
2342 .format(consistency_state, vdi_uuid)
2343 )
2344 else:
2345 consistency_state = None
2347 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
2348 raise xs_errors.XenError('Unimplemented')
2350 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid):
2351 raise util.SMException('Failed to pause VDI {}'.format(vdi_uuid))
2352 try:
2353 return self._snapshot(snapType, cbtlog, consistency_state)
2354 finally:
2355 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary)
2356 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
2358 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None):
2359 util.SMlog(
2360 'LinstorVDI._snapshot for {} (type {})'
2361 .format(self.uuid, snap_type)
2362 )
2364 # 1. Checks...
2365 if self.hidden:
2366 raise xs_errors.XenError('VDIClone', opterr='hidden VDI')
2368 depth = self.sr._vhdutil.get_depth(self.uuid)
2369 if depth == -1:
2370 raise xs_errors.XenError(
2371 'VDIUnavailable',
2372 opterr='failed to get VHD depth'
2373 )
2374 elif depth >= vhdutil.MAX_CHAIN_SIZE:
2375 raise xs_errors.XenError('SnapshotChainTooLong')
2377 # Ensure we have a valid path if we don't have a local diskful.
2378 self.sr._vhdutil.create_chain_paths(self.uuid, readonly=True)
2380 volume_path = self.path
2381 if not util.pathexists(volume_path):
2382 raise xs_errors.XenError(
2383 'EIO',
2384 opterr='IO error checking path {}'.format(volume_path)
2385 )
2387 # 2. Create base and snap uuid (if required) and a journal entry.
2388 base_uuid = util.gen_uuid()
2389 snap_uuid = None
2391 if snap_type == VDI.SNAPSHOT_DOUBLE:
2392 snap_uuid = util.gen_uuid()
2394 clone_info = '{}_{}'.format(base_uuid, snap_uuid)
2396 active_uuid = self.uuid
2397 self.sr._journaler.create(
2398 LinstorJournaler.CLONE, active_uuid, clone_info
2399 )
2401 try:
2402 # 3. Self becomes the new base.
2403 # The device path remains the same.
2404 self._linstor.update_volume_uuid(self.uuid, base_uuid)
2405 self.uuid = base_uuid
2406 self.location = self.uuid
2407 self.read_only = True
2408 self.managed = False
2410 # 4. Create snapshots (new active and snap).
2411 active_vdi = self._create_snapshot(active_uuid)
2413 snap_vdi = None
2414 if snap_type == VDI.SNAPSHOT_DOUBLE:
2415 snap_vdi = self._create_snapshot(snap_uuid, active_uuid)
2417 self.label = 'base copy'
2418 self.description = ''
2420 # 5. Mark the base VDI as hidden so that it does not show up
2421 # in subsequent scans.
2422 self._mark_hidden()
2423 self._linstor.update_volume_metadata(
2424 self.uuid, {READ_ONLY_TAG: True}
2425 )
2427 # 6. We must update the new active VDI with the "paused" and
2428 # "host_" properties. Why? Because the original VDI has been
2429 # paused and we we must unpause it after the snapshot.
2430 # See: `tap_unpause` in `blktap2.py`.
2431 vdi_ref = self.session.xenapi.VDI.get_by_uuid(active_uuid)
2432 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2433 for key in [x for x in sm_config.keys() if x == 'paused' or x.startswith('host_')]:
2434 active_vdi.sm_config[key] = sm_config[key]
2436 # 7. Verify parent locator field of both children and
2437 # delete base if unused.
2438 introduce_parent = True
2439 try:
2440 snap_parent = None
2441 if snap_vdi:
2442 snap_parent = snap_vdi.parent
2444 if active_vdi.parent != self.uuid and (
2445 snap_type == VDI.SNAPSHOT_SINGLE or
2446 snap_type == VDI.SNAPSHOT_INTERNAL or
2447 snap_parent != self.uuid
2448 ):
2449 util.SMlog(
2450 'Destroy unused base volume: {} (path={})'
2451 .format(self.uuid, self.path)
2452 )
2453 introduce_parent = False
2454 self._linstor.destroy_volume(self.uuid)
2455 except Exception as e:
2456 util.SMlog('Ignoring exception: {}'.format(e))
2457 pass
2459 # 8. Introduce the new VDI records.
2460 if snap_vdi:
2461 # If the parent is encrypted set the key_hash for the
2462 # new snapshot disk.
2463 vdi_ref = self.sr.srcmd.params['vdi_ref']
2464 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2465 # TODO: Maybe remove key_hash support.
2466 if 'key_hash' in sm_config:
2467 snap_vdi.sm_config['key_hash'] = sm_config['key_hash']
2468 # If we have CBT enabled on the VDI,
2469 # set CBT status for the new snapshot disk.
2470 if cbtlog:
2471 snap_vdi.cbt_enabled = True
2473 if snap_vdi:
2474 snap_vdi_ref = snap_vdi._db_introduce()
2475 util.SMlog(
2476 'vdi_clone: introduced VDI: {} ({})'
2477 .format(snap_vdi_ref, snap_vdi.uuid)
2478 )
2479 if introduce_parent:
2480 base_vdi_ref = self._db_introduce()
2481 self.session.xenapi.VDI.set_managed(base_vdi_ref, False)
2482 util.SMlog(
2483 'vdi_clone: introduced VDI: {} ({})'
2484 .format(base_vdi_ref, self.uuid)
2485 )
2486 self._linstor.update_volume_metadata(self.uuid, {
2487 NAME_LABEL_TAG: util.to_plain_string(self.label),
2488 NAME_DESCRIPTION_TAG: util.to_plain_string(
2489 self.description
2490 ),
2491 READ_ONLY_TAG: True,
2492 METADATA_OF_POOL_TAG: ''
2493 })
2495 # 9. Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
2496 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog:
2497 try:
2498 self._cbt_snapshot(snap_uuid, cbt_consistency)
2499 except Exception:
2500 # CBT operation failed.
2501 # TODO: Implement me.
2502 raise
2504 if snap_type != VDI.SNAPSHOT_INTERNAL:
2505 self.sr._update_stats(self.size)
2507 # 10. Return info on the new user-visible leaf VDI.
2508 ret_vdi = snap_vdi
2509 if not ret_vdi:
2510 ret_vdi = self
2511 if not ret_vdi:
2512 ret_vdi = active_vdi
2514 vdi_ref = self.sr.srcmd.params['vdi_ref']
2515 self.session.xenapi.VDI.set_sm_config(
2516 vdi_ref, active_vdi.sm_config
2517 )
2518 except Exception:
2519 util.logException('Failed to snapshot!')
2520 try:
2521 self.sr._handle_interrupted_clone(
2522 active_uuid, clone_info, force_undo=True
2523 )
2524 self.sr._journaler.remove(LinstorJournaler.CLONE, active_uuid)
2525 except Exception as clean_error:
2526 util.SMlog(
2527 'WARNING: Failed to clean up failed snapshot: {}'
2528 .format(clean_error)
2529 )
2530 raise xs_errors.XenError('VDIClone', opterr=str(e))
2532 self.sr._journaler.remove(LinstorJournaler.CLONE, active_uuid)
2534 return ret_vdi.get_params()
2536 @staticmethod
2537 def _start_persistent_http_server(volume_name):
2538 pid_path = None
2539 http_server = None
2541 try:
2542 if volume_name == HA_VOLUME_NAME:
2543 port = '8076'
2544 else:
2545 port = '8077'
2547 try:
2548 # Use a timeout call because XAPI may be unusable on startup
2549 # or if the host has been ejected. So in this case the call can
2550 # block indefinitely.
2551 session = util.timeout_call(5, util.get_localAPI_session)
2552 host_ip = util.get_this_host_address(session)
2553 except:
2554 # Fallback using the XHA file if session not available.
2555 host_ip, _ = get_ips_from_xha_config_file()
2556 if not host_ip:
2557 raise Exception(
2558 'Cannot start persistent HTTP server: no XAPI session, nor XHA config file'
2559 )
2561 arguments = [
2562 'http-disk-server',
2563 '--disk',
2564 '/dev/drbd/by-res/{}/0'.format(volume_name),
2565 '--ip',
2566 host_ip,
2567 '--port',
2568 port
2569 ]
2571 util.SMlog('Starting {} on port {}...'.format(arguments[0], port))
2572 http_server = subprocess.Popen(
2573 [FORK_LOG_DAEMON] + arguments,
2574 stdout=subprocess.PIPE,
2575 stderr=subprocess.STDOUT,
2576 universal_newlines=True,
2577 # Ensure we use another group id to kill this process without
2578 # touch the current one.
2579 preexec_fn=os.setsid
2580 )
2582 pid_path = '/run/http-server-{}.pid'.format(volume_name)
2583 with open(pid_path, 'w') as pid_file:
2584 pid_file.write(str(http_server.pid))
2586 reg_server_ready = re.compile("Server ready!$")
2587 def is_ready():
2588 while http_server.poll() is None:
2589 line = http_server.stdout.readline()
2590 if reg_server_ready.search(line):
2591 return True
2592 return False
2593 try:
2594 if not util.timeout_call(10, is_ready):
2595 raise Exception('Failed to wait HTTP server startup, bad output')
2596 except util.TimeoutException:
2597 raise Exception('Failed to wait for HTTP server startup during given delay')
2598 except Exception as e:
2599 if pid_path:
2600 try:
2601 os.remove(pid_path)
2602 except Exception:
2603 pass
2605 if http_server:
2606 # Kill process and children in this case...
2607 try:
2608 os.killpg(os.getpgid(http_server.pid), signal.SIGTERM)
2609 except:
2610 pass
2612 raise xs_errors.XenError(
2613 'VDIUnavailable',
2614 opterr='Failed to start http-server: {}'.format(e)
2615 )
2617 def _start_persistent_nbd_server(self, volume_name):
2618 pid_path = None
2619 nbd_path = None
2620 nbd_server = None
2622 try:
2623 # We use a precomputed device size.
2624 # So if the XAPI is modified, we must update these values!
2625 if volume_name == HA_VOLUME_NAME:
2626 # See: https://github.com/xapi-project/xen-api/blob/703479fa448a8d7141954bb6e8964d8e25c4ac2e/ocaml/xapi/xha_statefile.ml#L32-L37
2627 port = '8076'
2628 device_size = 4 * 1024 * 1024
2629 else:
2630 # See: https://github.com/xapi-project/xen-api/blob/703479fa448a8d7141954bb6e8964d8e25c4ac2e/ocaml/database/redo_log.ml#L41-L44
2631 port = '8077'
2632 device_size = 256 * 1024 * 1024
2634 try:
2635 session = util.timeout_call(5, util.get_localAPI_session)
2636 ips = util.get_host_addresses(session)
2637 except Exception as e:
2638 _, ips = get_ips_from_xha_config_file()
2639 if not ips:
2640 raise Exception(
2641 'Cannot start persistent NBD server: no XAPI session, nor XHA config file ({})'.format(e)
2642 )
2643 ips = ips.values()
2645 arguments = [
2646 'nbd-http-server',
2647 '--socket-path',
2648 '/run/{}.socket'.format(volume_name),
2649 '--nbd-name',
2650 volume_name,
2651 '--urls',
2652 ','.join(['http://' + ip + ':' + port for ip in ips]),
2653 '--device-size',
2654 str(device_size)
2655 ]
2657 util.SMlog('Starting {} using port {}...'.format(arguments[0], port))
2658 nbd_server = subprocess.Popen(
2659 [FORK_LOG_DAEMON] + arguments,
2660 stdout=subprocess.PIPE,
2661 stderr=subprocess.STDOUT,
2662 universal_newlines=True,
2663 # Ensure we use another group id to kill this process without
2664 # touch the current one.
2665 preexec_fn=os.setsid
2666 )
2668 pid_path = '/run/nbd-server-{}.pid'.format(volume_name)
2669 with open(pid_path, 'w') as pid_file:
2670 pid_file.write(str(nbd_server.pid))
2672 reg_nbd_path = re.compile("NBD `(/dev/nbd[0-9]+)` is now attached.$")
2673 def get_nbd_path():
2674 while nbd_server.poll() is None:
2675 line = nbd_server.stdout.readline()
2676 match = reg_nbd_path.search(line)
2677 if match:
2678 return match.group(1)
2679 # Use a timeout to never block the smapi if there is a problem.
2680 try:
2681 nbd_path = util.timeout_call(10, get_nbd_path)
2682 if nbd_path is None:
2683 raise Exception('Empty NBD path (NBD server is probably dead)')
2684 except util.TimeoutException:
2685 raise Exception('Unable to read NBD path')
2687 util.SMlog('Create symlink: {} -> {}'.format(self.path, nbd_path))
2688 os.symlink(nbd_path, self.path)
2689 except Exception as e:
2690 if pid_path:
2691 try:
2692 os.remove(pid_path)
2693 except Exception:
2694 pass
2696 if nbd_path:
2697 try:
2698 os.remove(nbd_path)
2699 except Exception:
2700 pass
2702 if nbd_server:
2703 # Kill process and children in this case...
2704 try:
2705 os.killpg(os.getpgid(nbd_server.pid), signal.SIGTERM)
2706 except:
2707 pass
2709 raise xs_errors.XenError(
2710 'VDIUnavailable',
2711 opterr='Failed to start nbd-server: {}'.format(e)
2712 )
2714 @classmethod
2715 def _kill_persistent_server(self, type, volume_name, sig):
2716 try:
2717 path = '/run/{}-server-{}.pid'.format(type, volume_name)
2718 if not os.path.exists(path):
2719 return
2721 pid = None
2722 with open(path, 'r') as pid_file:
2723 try:
2724 pid = int(pid_file.read())
2725 except Exception:
2726 pass
2728 if pid is not None and util.check_pid_exists(pid):
2729 util.SMlog('Kill {} server {} (pid={})'.format(type, path, pid))
2730 try:
2731 os.killpg(os.getpgid(pid), sig)
2732 except Exception as e:
2733 util.SMlog('Failed to kill {} server: {}'.format(type, e))
2735 os.remove(path)
2736 except:
2737 pass
2739 @classmethod
2740 def _kill_persistent_http_server(self, volume_name, sig=signal.SIGTERM):
2741 return self._kill_persistent_server('nbd', volume_name, sig)
2743 @classmethod
2744 def _kill_persistent_nbd_server(self, volume_name, sig=signal.SIGTERM):
2745 return self._kill_persistent_server('http', volume_name, sig)
2747 def _check_http_nbd_volume_name(self):
2748 volume_name = self.path[14:]
2749 if volume_name not in [
2750 HA_VOLUME_NAME, REDO_LOG_VOLUME_NAME
2751 ]:
2752 raise xs_errors.XenError(
2753 'VDIUnavailable',
2754 opterr='Unsupported path: {}'.format(self.path)
2755 )
2756 return volume_name
2758 def _attach_using_http_nbd(self):
2759 volume_name = self._check_http_nbd_volume_name()
2761 # Ensure there is no NBD and HTTP server running.
2762 self._kill_persistent_nbd_server(volume_name)
2763 self._kill_persistent_http_server(volume_name)
2765 # 0. Fetch drbd path.
2766 must_get_device_path = True
2767 if not self.sr.is_master():
2768 # We are on a slave, we must try to find a diskful locally.
2769 try:
2770 volume_info = self._linstor.get_volume_info(self.uuid)
2771 except Exception as e:
2772 raise xs_errors.XenError(
2773 'VDIUnavailable',
2774 opterr='Cannot get volume info of {}: {}'
2775 .format(self.uuid, e)
2776 )
2778 hostname = socket.gethostname()
2779 must_get_device_path = hostname in volume_info.diskful
2781 drbd_path = None
2782 if must_get_device_path or self.sr.is_master():
2783 # If we are master, we must ensure we have a diskless
2784 # or diskful available to init HA.
2785 # It also avoid this error in xensource.log
2786 # (/usr/libexec/xapi/cluster-stack/xhad/ha_set_pool_state):
2787 # init exited with code 8 [stdout = ''; stderr = 'SF: failed to write in State-File \x10 (fd 4208696). (sys 28)\x0A']
2788 # init returned MTC_EXIT_CAN_NOT_ACCESS_STATEFILE (State-File is inaccessible)
2789 available = False
2790 try:
2791 drbd_path = self._linstor.get_device_path(self.uuid)
2792 available = util.pathexists(drbd_path)
2793 except Exception:
2794 pass
2796 if not available:
2797 raise xs_errors.XenError(
2798 'VDIUnavailable',
2799 opterr='Cannot get device path of {}'.format(self.uuid)
2800 )
2802 # 1. Prepare http-nbd folder.
2803 try:
2804 if not os.path.exists('/dev/http-nbd/'):
2805 os.makedirs('/dev/http-nbd/')
2806 elif os.path.islink(self.path):
2807 os.remove(self.path)
2808 except OSError as e:
2809 if e.errno != errno.EEXIST:
2810 raise xs_errors.XenError(
2811 'VDIUnavailable',
2812 opterr='Cannot prepare http-nbd: {}'.format(e)
2813 )
2815 # 2. Start HTTP service if we have a diskful or if we are master.
2816 http_service = None
2817 if drbd_path:
2818 assert(drbd_path in (
2819 '/dev/drbd/by-res/{}/0'.format(HA_VOLUME_NAME),
2820 '/dev/drbd/by-res/{}/0'.format(REDO_LOG_VOLUME_NAME)
2821 ))
2822 self._start_persistent_http_server(volume_name)
2824 # 3. Start NBD server in all cases.
2825 try:
2826 self._start_persistent_nbd_server(volume_name)
2827 except Exception as e:
2828 if drbd_path:
2829 self._kill_persistent_http_server(volume_name)
2830 raise
2832 self.attached = True
2833 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
2835 def _detach_using_http_nbd(self):
2836 volume_name = self._check_http_nbd_volume_name()
2837 self._kill_persistent_nbd_server(volume_name)
2838 self._kill_persistent_http_server(volume_name)
2840# ------------------------------------------------------------------------------
2843if __name__ == '__main__': 2843 ↛ 2844line 2843 didn't jump to line 2844, because the condition on line 2843 was never true
2844 def run():
2845 SRCommand.run(LinstorSR, DRIVER_INFO)
2847 if not TRACE_PERFS:
2848 run()
2849 else:
2850 util.make_profile('LinstorSR', run)
2851else:
2852 SR.registerSR(LinstorSR)