Coverage for drivers/lvhdutil.py : 23%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18"""Helper functions for LVHD SR. This module knows about RAW and VHD VDI's
19that live in LV's."""
20import os
21import sys
22import time
24import util
25import vhdutil
26from lock import Lock
27from refcounter import RefCounter
29MSIZE_MB = 2 * 1024 * 1024 # max virt size for fast resize
30MSIZE = int(MSIZE_MB * 1024 * 1024)
32VG_LOCATION = "/dev"
33VG_PREFIX = "VG_XenStorage-"
34LVM_SIZE_INCREMENT = 4 * 1024 * 1024
36LV_PREFIX = {
37 vhdutil.VDI_TYPE_VHD: "VHD-",
38 vhdutil.VDI_TYPE_RAW: "LV-",
39}
40VDI_TYPES = [vhdutil.VDI_TYPE_VHD, vhdutil.VDI_TYPE_RAW]
42JRN_INFLATE = "inflate"
44JVHD_TAG = "jvhd"
46LOCK_RETRY_ATTEMPTS = 20
48# ref counting for VDI's: we need a ref count for LV activation/deactivation
49# on the master
50NS_PREFIX_LVM = "lvm-"
53class VDIInfo:
54 uuid = ""
55 scanError = False
56 vdiType = None
57 lvName = ""
58 sizeLV = -1
59 sizeVirt = -1
60 lvActive = False
61 lvOpen = False
62 lvReadonly = False
63 hidden = False
64 parentUuid = ""
66 def __init__(self, uuid):
67 self.uuid = uuid
70def matchLV(lvName):
71 """given LV name, return the VDI type and the UUID, or (None, None)
72 if the name doesn't match any known type"""
73 for vdiType in VDI_TYPES:
74 prefix = LV_PREFIX[vdiType]
75 if lvName.startswith(prefix):
76 return (vdiType, lvName.replace(prefix, ""))
77 return (None, None)
80def extractUuid(path):
81 uuid = os.path.basename(path)
82 if uuid.startswith(VG_PREFIX):
83 # we are dealing with realpath
84 uuid = uuid.replace("--", "-")
85 uuid.replace(VG_PREFIX, "")
86 for t in VDI_TYPES:
87 if uuid.find(LV_PREFIX[t]) != -1:
88 uuid = uuid.split(LV_PREFIX[t])[-1]
89 uuid = uuid.strip()
90 # TODO: validate UUID format
91 return uuid
92 return None
95def calcSizeLV(sizeVHD):
96 return util.roundup(LVM_SIZE_INCREMENT, sizeVHD)
99def calcSizeVHDLV(sizeVirt):
100 # all LVHD VDIs have the metadata area preallocated for the maximum
101 # possible virtual size (for fast online VDI.resize)
102 metaOverhead = vhdutil.calcOverheadEmpty(MSIZE)
103 bitmapOverhead = vhdutil.calcOverheadBitmap(sizeVirt)
104 return calcSizeLV(sizeVirt + metaOverhead + bitmapOverhead)
107def getLVInfo(lvmCache, lvName=None):
108 """Load LV info for all LVs in the VG or an individual LV.
109 This is a wrapper for lvutil.getLVInfo that filters out LV's that
110 are not LVHD VDI's and adds the vdi_type information"""
111 allLVs = lvmCache.getLVInfo(lvName)
113 lvs = dict()
114 for lvName, lv in allLVs.items(): 114 ↛ 115line 114 didn't jump to line 115, because the loop on line 114 never started
115 vdiType, uuid = matchLV(lvName)
116 if not vdiType:
117 continue
118 lv.vdiType = vdiType
119 lvs[uuid] = lv
120 return lvs
123def getVDIInfo(lvmCache):
124 """Load VDI info (both LV and if the VDI is not raw, VHD info)"""
125 vdis = {}
126 lvs = getLVInfo(lvmCache)
128 haveVHDs = False
129 for uuid, lvInfo in lvs.items(): 129 ↛ 130line 129 didn't jump to line 130, because the loop on line 129 never started
130 if lvInfo.vdiType == vhdutil.VDI_TYPE_VHD:
131 haveVHDs = True
132 vdiInfo = VDIInfo(uuid)
133 vdiInfo.vdiType = lvInfo.vdiType
134 vdiInfo.lvName = lvInfo.name
135 vdiInfo.sizeLV = lvInfo.size
136 vdiInfo.sizeVirt = lvInfo.size
137 vdiInfo.lvActive = lvInfo.active
138 vdiInfo.lvOpen = lvInfo.open
139 vdiInfo.lvReadonly = lvInfo.readonly
140 vdiInfo.hidden = lvInfo.hidden
141 vdis[uuid] = vdiInfo
143 if haveVHDs: 143 ↛ 144line 143 didn't jump to line 144, because the condition on line 143 was never true
144 pattern = "%s*" % LV_PREFIX[vhdutil.VDI_TYPE_VHD]
145 vhds = vhdutil.getAllVHDs(pattern, extractUuid, lvmCache.vgName)
146 uuids = vdis.keys()
147 for uuid in uuids:
148 vdi = vdis[uuid]
149 if vdi.vdiType == vhdutil.VDI_TYPE_VHD:
150 if not vhds.get(uuid):
151 lvmCache.refresh()
152 if lvmCache.checkLV(vdi.lvName):
153 util.SMlog("*** VHD info missing: %s" % uuid)
154 vdis[uuid].scanError = True
155 else:
156 util.SMlog("LV disappeared since last scan: %s" % uuid)
157 del vdis[uuid]
158 elif vhds[uuid].error:
159 util.SMlog("*** vhd-scan error: %s" % uuid)
160 vdis[uuid].scanError = True
161 else:
162 vdis[uuid].sizeVirt = vhds[uuid].sizeVirt
163 vdis[uuid].parentUuid = vhds[uuid].parentUuid
164 vdis[uuid].hidden = vhds[uuid].hidden
165 return vdis
168def inflate(journaler, srUuid, vdiUuid, size):
169 """Expand a VDI LV (and its VHD) to 'size'. If the LV is already bigger
170 than that, it's a no-op. Does not change the virtual size of the VDI"""
171 lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid
172 vgName = VG_PREFIX + srUuid
173 path = os.path.join(VG_LOCATION, vgName, lvName)
174 lvmCache = journaler.lvmCache
176 currSizeLV = lvmCache.getSize(lvName)
177 newSize = calcSizeLV(size)
178 if newSize <= currSizeLV:
179 return
180 journaler.create(JRN_INFLATE, vdiUuid, str(currSizeLV))
181 util.fistpoint.activate("LVHDRT_inflate_after_create_journal", srUuid)
182 lvmCache.setSize(lvName, newSize)
183 util.fistpoint.activate("LVHDRT_inflate_after_setSize", srUuid)
184 if not util.zeroOut(path, newSize - vhdutil.VHD_FOOTER_SIZE,
185 vhdutil.VHD_FOOTER_SIZE):
186 raise Exception('failed to zero out VHD footer')
187 util.fistpoint.activate("LVHDRT_inflate_after_zeroOut", srUuid)
188 vhdutil.setSizePhys(path, newSize, False)
189 util.fistpoint.activate("LVHDRT_inflate_after_setSizePhys", srUuid)
190 journaler.remove(JRN_INFLATE, vdiUuid)
193def deflate(lvmCache, lvName, size):
194 """Shrink the LV and the VHD on it to 'size'. Does not change the
195 virtual size of the VDI"""
196 currSizeLV = lvmCache.getSize(lvName)
197 newSize = calcSizeLV(size)
198 if newSize >= currSizeLV:
199 return
200 path = os.path.join(VG_LOCATION, lvmCache.vgName, lvName)
201 # no undo necessary if this fails at any point between now and the end
202 vhdutil.setSizePhys(path, newSize)
203 lvmCache.setSize(lvName, newSize)
206def setSizeVirt(journaler, srUuid, vdiUuid, size, jFile):
207 """When resizing the VHD virtual size, we might have to inflate the LV in
208 case the metadata size increases"""
209 lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid
210 vgName = VG_PREFIX + srUuid
211 path = os.path.join(VG_LOCATION, vgName, lvName)
212 inflate(journaler, srUuid, vdiUuid, calcSizeVHDLV(size))
213 vhdutil.setSizeVirt(path, size, jFile)
216def _tryAcquire(lock):
217 """We must give up if the SR is locked because it could be locked by the
218 coalesce thread trying to acquire the VDI lock we're holding, so as to
219 avoid deadlock"""
220 for i in range(LOCK_RETRY_ATTEMPTS):
221 gotLock = lock.acquireNoblock()
222 if gotLock:
223 return
224 time.sleep(1)
225 raise util.SRBusyException()
228def attachThin(journaler, srUuid, vdiUuid):
229 """Ensure that the VDI LV is expanded to the fully-allocated size"""
230 lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid
231 vgName = VG_PREFIX + srUuid
232 lock = Lock(vhdutil.LOCK_TYPE_SR, srUuid)
233 lvmCache = journaler.lvmCache
234 _tryAcquire(lock)
235 lvmCache.refresh()
236 vhdInfo = vhdutil.getVHDInfoLVM(lvName, extractUuid, vgName)
237 newSize = calcSizeVHDLV(vhdInfo.sizeVirt)
238 currSizeLV = lvmCache.getSize(lvName)
239 if newSize <= currSizeLV:
240 return
241 lvmCache.activate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False)
242 try:
243 inflate(journaler, srUuid, vdiUuid, newSize)
244 finally:
245 lvmCache.deactivate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False)
246 lock.release()
249def detachThin(session, lvmCache, srUuid, vdiUuid):
250 """Shrink the VDI to the minimal size if no one is using it"""
251 lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid
252 path = os.path.join(VG_LOCATION, VG_PREFIX + srUuid, lvName)
253 lock = Lock(vhdutil.LOCK_TYPE_SR, srUuid)
254 _tryAcquire(lock)
256 vdiRef = session.xenapi.VDI.get_by_uuid(vdiUuid)
257 vbds = session.xenapi.VBD.get_all_records_where( \
258 "field \"VDI\" = \"%s\"" % vdiRef)
259 numPlugged = 0
260 for vbdRec in vbds.values():
261 if vbdRec["currently_attached"]:
262 numPlugged += 1
264 if numPlugged > 1:
265 raise util.SMException("%s still in use by %d others" % \
266 (vdiUuid, numPlugged - 1))
267 lvmCache.activate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False)
268 try:
269 newSize = calcSizeLV(vhdutil.getSizePhys(path))
270 deflate(lvmCache, lvName, newSize)
271 finally:
272 lvmCache.deactivate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False)
273 lock.release()
276def createVHDJournalLV(lvmCache, jName, size):
277 """Create a LV to hold a VHD journal"""
278 lvName = "%s_%s" % (JVHD_TAG, jName)
279 lvmCache.create(lvName, size, JVHD_TAG)
280 return os.path.join(lvmCache.vgPath, lvName)
283def deleteVHDJournalLV(lvmCache, jName):
284 """Delete a VHD journal LV"""
285 lvName = "%s_%s" % (JVHD_TAG, jName)
286 lvmCache.remove(lvName)
289def getAllVHDJournals(lvmCache):
290 """Get a list of all VHD journals in VG vgName as (jName,jFile) pairs"""
291 journals = []
292 lvList = lvmCache.getTagged(JVHD_TAG)
293 for lvName in lvList: 293 ↛ 294line 293 didn't jump to line 294, because the loop on line 293 never started
294 jName = lvName[len(JVHD_TAG) + 1:]
295 journals.append((jName, lvName))
296 return journals
299def lvRefreshOnSlaves(session, srUuid, vgName, lvName, vdiUuid, slaves):
300 args = {"vgName": vgName,
301 "action1": "activate",
302 "uuid1": vdiUuid,
303 "ns1": NS_PREFIX_LVM + srUuid,
304 "lvName1": lvName,
305 "action2": "refresh",
306 "lvName2": lvName,
307 "action3": "deactivate",
308 "uuid3": vdiUuid,
309 "ns3": NS_PREFIX_LVM + srUuid,
310 "lvName3": lvName}
311 for slave in slaves:
312 util.SMlog("Refreshing %s on slave %s" % (lvName, slave))
313 text = session.xenapi.host.call_plugin(slave, "on-slave", "multi", args)
314 util.SMlog("call-plugin returned: '%s'" % text)
317def lvRefreshOnAllSlaves(session, srUuid, vgName, lvName, vdiUuid):
318 slaves = util.get_all_slaves(session)
319 lvRefreshOnSlaves(session, srUuid, vgName, lvName, vdiUuid, slaves)
322def setInnerNodeRefcounts(lvmCache, srUuid):
323 """[Re]calculate and set the refcounts for inner VHD nodes based on
324 refcounts of the leaf nodes. We can infer inner node refcounts on slaves
325 directly because they are in use only when VDIs are attached - as opposed
326 to the Master case where the coalesce process can also operate on inner
327 nodes.
328 Return all LVs (paths) that are active but not in use (i.e. that should
329 be deactivated)"""
330 vdiInfo = getVDIInfo(lvmCache)
331 for uuid, vdi in vdiInfo.items():
332 vdi.refcount = 0
334 ns = NS_PREFIX_LVM + srUuid
335 for uuid, vdi in vdiInfo.items():
336 if vdi.hidden:
337 continue # only read leaf refcounts
338 refcount = RefCounter.check(uuid, ns)
339 assert(refcount == (0, 0) or refcount == (0, 1))
340 if refcount[1]:
341 vdi.refcount = 1
342 while vdi.parentUuid:
343 vdi = vdiInfo[vdi.parentUuid]
344 vdi.refcount += 1
346 pathsNotInUse = []
347 for uuid, vdi in vdiInfo.items():
348 if vdi.hidden:
349 util.SMlog("Setting refcount for %s to %d" % (uuid, vdi.refcount))
350 RefCounter.set(uuid, vdi.refcount, 0, ns)
351 if vdi.refcount == 0 and vdi.lvActive:
352 path = os.path.join("/dev", lvmCache.vgName, vdi.lvName)
353 pathsNotInUse.append(path)
355 return pathsNotInUse
357if __name__ == "__main__": 357 ↛ 359line 357 didn't jump to line 359, because the condition on line 357 was never true
358 # used by the master changeover script
359 cmd = sys.argv[1]
360 if cmd == "fixrefcounts":
361 from lvmcache import LVMCache
362 srUuid = sys.argv[2]
363 try:
364 vgName = VG_PREFIX + srUuid
365 lvmCache = LVMCache(vgName)
366 setInnerNodeRefcounts(lvmCache, srUuid)
367 except:
368 util.logException("setInnerNodeRefcounts")
369 else:
370 util.SMlog("Invalid usage")
371 print("Usage: %s fixrefcounts <sr_uuid>" % sys.argv[0])