Remove dd from LVM element

This patch removes the unneeded dd calls in the lvm block device
plugin.

After removing the underlying block device, there is the need to call
'pvscan --cache'.  This is done by a dedicated LVM cleanup node which
is cleaned up after the the underlying block device.

Change-Id: Id8eaede77fbdc107d2ba1035cd6b8eb5c10160c3
Signed-off-by: Andreas Florath <andreas@florath.net>
This commit is contained in:
Andreas Florath 2017-08-26 08:01:14 +00:00
parent fa6c731132
commit bb6cf52d85
2 changed files with 53 additions and 92 deletions

View File

@ -12,10 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import collections
import logging import logging
import os import subprocess
import tempfile
from diskimage_builder.block_device.exception \ from diskimage_builder.block_device.exception \
import BlockDeviceSetupException import BlockDeviceSetupException
@ -106,10 +104,6 @@ class PvsNode(NodeBase):
'device': phys_dev 'device': phys_dev
} }
def _cleanup(self):
exec_sudo(['pvremove', '--force',
self.state['pvs'][self.name]['device']])
def get_edges(self): def get_edges(self):
# See LVMNode.get_edges() for how this gets connected # See LVMNode.get_edges() for how this gets connected
return ([], []) return ([], [])
@ -163,7 +157,6 @@ class VgsNode(NodeBase):
def _cleanup(self): def _cleanup(self):
exec_sudo(['vgchange', '-an', self.name]) exec_sudo(['vgchange', '-an', self.name])
exec_sudo(['vgremove', '--force', self.name])
def get_edges(self): def get_edges(self):
# self.base is already a list, per the config. There might be # self.base is already a list, per the config. There might be
@ -224,8 +217,6 @@ class LvsNode(NodeBase):
def _cleanup(self): def _cleanup(self):
exec_sudo(['lvchange', '-an', exec_sudo(['lvchange', '-an',
'/dev/%s/%s' % (self.base, self.name)]) '/dev/%s/%s' % (self.base, self.name)])
exec_sudo(['lvremove', '--force',
'/dev/%s/%s' % (self.base, self.name)])
def get_edges(self): def get_edges(self):
edge_from = [self.base] edge_from = [self.base]
@ -250,16 +241,6 @@ class LVMNode(NodeBase):
therefore just dependency place holders whose create() call therefore just dependency place holders whose create() call
does nothing. does nothing.
This is quite important in the cleanup phase. In theory, you
would remove the vg's, then the lv's and then free-up the
pv's. But the process of removing these also removes them
from the LVM meta-data in the image, undoing all the
configuration. Thus the unwind process is also "atomic" in
this node; we do a copy of the devices before removing the LVM
components, and then copy them back (better ideas welcome!)
As with creation, the cleanup() calls in the other nodes are
just placeholders.
Arguments: Arguments:
:param name: name of this node :param name: name of this node
:param state: global state pointer :param state: global state pointer
@ -300,37 +281,48 @@ class LVMNode(NodeBase):
lvs._create() lvs._create()
def cleanup(self): def cleanup(self):
# First do a copy of all physical devices to individual
# temporary files. This is because the physical device is
# full of LVM metadata describing the volumes and we don't
# have a better way to handle removing the devices/volumes
# from the host system while persisting this metadata in the
# underlying devices.
tempfiles = collections.OrderedDict() # to unwind in same order!
for pvs in self.pvs:
phys_dev = self.state['blockdev'][pvs.base]['device']
target_file = tempfile.NamedTemporaryFile(delete=False)
target_file.close()
exec_sudo(['dd', 'if=%s' % phys_dev,
'of=%s' % target_file.name])
tempfiles[target_file.name] = phys_dev
# once copied, start the removal in reverse order
for lvs in self.lvs: for lvs in self.lvs:
lvs._cleanup() lvs._cleanup()
for vgs in self.vgs: for vgs in self.vgs:
vgs._cleanup() vgs._cleanup()
for pvs in self.pvs:
pvs._cleanup()
exec_sudo(['udevadm', 'settle']) exec_sudo(['udevadm', 'settle'])
# after the cleanup copy devices back
for tmp_name, phys_dev in tempfiles.items(): class LVMCleanupNode(NodeBase):
exec_sudo(['dd', 'if=%s' % tmp_name, 'of=%s' % phys_dev]) def __init__(self, name, state, pvs):
os.unlink(tmp_name) """Cleanup Node for LVM
Information about the PV, VG and LV is typically
cached in lvmetad. Even after removing PV device and
partitions this data is not automatically updated
which leads to a couple of problems.
the 'pvscan --cache' scans the available disks
and updates the cache.
This must be called after the cleanup of the
containing block device is done.
"""
super(LVMCleanupNode, self).__init__(name, state)
self.pvs = pvs
def create(self):
# This class is used for cleanup only
pass
def cleanup(self):
try:
exec_sudo(['pvscan', '--cache'])
except subprocess.CalledProcessError as cpe:
logger.debug("pvscan call result [%s]", cpe)
def get_edges(self):
# This node depends on all physical device(s), which is
# recorded in the "base" argument of the PV nodes.
edge_to = set()
for pv in self.pvs:
edge_to.add(pv.base)
return ([], edge_to)
class LVMPlugin(PluginBase): class LVMPlugin(PluginBase):
@ -420,8 +412,12 @@ class LVMPlugin(PluginBase):
# create the "driver" node # create the "driver" node
self.lvm_node = LVMNode(config['name'], state, self.lvm_node = LVMNode(config['name'], state,
self.pvs, self.lvs, self.vgs) self.pvs, self.lvs, self.vgs)
self.lvm_cleanup_node = LVMCleanupNode(
config['name'] + "-CLEANUP", state, self.pvs)
def get_nodes(self): def get_nodes(self):
# the nodes for insertion into the graph are all of the pvs, # the nodes for insertion into the graph are all of the pvs,
# vgs and lvs nodes we have created above, and the root node. # vgs and lvs nodes we have created above, the root node and
return self.pvs + self.vgs + self.lvs + [self.lvm_node] # the cleanup node.
return self.pvs + self.vgs + self.lvs \
+ [self.lvm_node, self.lvm_cleanup_node]

View File

@ -21,6 +21,7 @@ from diskimage_builder.block_device.config import config_tree_to_graph
from diskimage_builder.block_device.config import create_graph from diskimage_builder.block_device.config import create_graph
from diskimage_builder.block_device.exception import \ from diskimage_builder.block_device.exception import \
BlockDeviceSetupException BlockDeviceSetupException
from diskimage_builder.block_device.level1.lvm import LVMCleanupNode
from diskimage_builder.block_device.level1.lvm import LVMNode from diskimage_builder.block_device.level1.lvm import LVMNode
from diskimage_builder.block_device.level1.lvm import LVMPlugin from diskimage_builder.block_device.level1.lvm import LVMPlugin
from diskimage_builder.block_device.level1.lvm import LvsNode from diskimage_builder.block_device.level1.lvm import LvsNode
@ -88,7 +89,8 @@ class TestLVM(tc.TestGraphGeneration):
# XXX: This has not mocked out the "lower" layers of # XXX: This has not mocked out the "lower" layers of
# creating the devices, which we're assuming works OK, nor # creating the devices, which we're assuming works OK, nor
# the upper layers. # the upper layers.
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)): if isinstance(node, (LVMNode, LVMCleanupNode, PvsNode,
VgsNode, LvsNode)):
# only the LVMNode actually does anything here... # only the LVMNode actually does anything here...
node.create() node.create()
@ -196,7 +198,8 @@ class TestLVM(tc.TestGraphGeneration):
# XXX: This has not mocked out the "lower" layers of # XXX: This has not mocked out the "lower" layers of
# creating the devices, which we're assuming works OK, nor # creating the devices, which we're assuming works OK, nor
# the upper layers. # the upper layers.
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)): if isinstance(node, (LVMNode, LVMCleanupNode, PvsNode,
VgsNode, LvsNode)):
# only the PvsNode actually does anything here... # only the PvsNode actually does anything here...
node.create() node.create()
@ -305,38 +308,23 @@ class TestLVM(tc.TestGraphGeneration):
reverse_order = reversed(call_order) reverse_order = reversed(call_order)
for node in reverse_order: for node in reverse_order:
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)): if isinstance(node, (LVMNode, LVMCleanupNode, PvsNode,
VgsNode, LvsNode)):
node.cleanup() node.cleanup()
cmd_sequence = [ cmd_sequence = [
# copy the temporary drives
mock.call(['dd', 'if=/dev/fake/root', 'of=%s' % tempfiles[0]]),
mock.call(['dd', 'if=/dev/fake/data', 'of=%s' % tempfiles[1]]),
# delete the lv's # delete the lv's
mock.call(['lvchange', '-an', '/dev/vg1/lv_root']), mock.call(['lvchange', '-an', '/dev/vg1/lv_root']),
mock.call(['lvremove', '--force', '/dev/vg1/lv_root']),
mock.call(['lvchange', '-an', '/dev/vg1/lv_tmp']), mock.call(['lvchange', '-an', '/dev/vg1/lv_tmp']),
mock.call(['lvremove', '--force', '/dev/vg1/lv_tmp']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_var']), mock.call(['lvchange', '-an', '/dev/vg2/lv_var']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_var']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_log']), mock.call(['lvchange', '-an', '/dev/vg2/lv_log']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_log']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_audit']), mock.call(['lvchange', '-an', '/dev/vg2/lv_audit']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_audit']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_home']), mock.call(['lvchange', '-an', '/dev/vg2/lv_home']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_home']),
# delete the vg's # delete the vg's
mock.call(['vgchange', '-an', 'vg1']), mock.call(['vgchange', '-an', 'vg1']),
mock.call(['vgremove', '--force', 'vg1']),
mock.call(['vgchange', '-an', 'vg2']), mock.call(['vgchange', '-an', 'vg2']),
mock.call(['vgremove', '--force', 'vg2']),
# delete the pv's
mock.call(['pvremove', '--force', '/dev/fake/root']),
mock.call(['pvremove', '--force', '/dev/fake/data']),
# copy back again
mock.call(['udevadm', 'settle']), mock.call(['udevadm', 'settle']),
mock.call(['dd', 'if=%s' % tempfiles[0], 'of=/dev/fake/root']), mock.call(['pvscan', '--cache']),
mock.call(['dd', 'if=%s' % tempfiles[1], 'of=/dev/fake/data']),
] ]
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence) self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)
@ -377,7 +365,8 @@ class TestLVM(tc.TestGraphGeneration):
# XXX: This has not mocked out the "lower" layers of # XXX: This has not mocked out the "lower" layers of
# creating the devices, which we're assuming works OK, nor # creating the devices, which we're assuming works OK, nor
# the upper layers. # the upper layers.
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)): if isinstance(node, (LVMNode, LVMCleanupNode,
PvsNode, VgsNode, LvsNode)):
# only the LVMNode actually does anything here... # only the LVMNode actually does anything here...
node.create() node.create()
@ -424,39 +413,15 @@ class TestLVM(tc.TestGraphGeneration):
node.cleanup() node.cleanup()
cmd_sequence = [ cmd_sequence = [
# copy the temporary drives # deactivate lv's
mock.call(['dd', 'if=/dev/fake/root',
'of=%s' % tempfiles[0]]),
mock.call(['dd', 'if=/dev/fake/data1',
'of=%s' % tempfiles[1]]),
mock.call(['dd', 'if=/dev/fake/data2',
'of=%s' % tempfiles[2]]),
# remove lv's
mock.call(['lvchange', '-an', '/dev/vg_root/lv_root']), mock.call(['lvchange', '-an', '/dev/vg_root/lv_root']),
mock.call(['lvremove', '--force', '/dev/vg_root/lv_root']),
mock.call(['lvchange', '-an', '/dev/vg_data/lv_data']), mock.call(['lvchange', '-an', '/dev/vg_data/lv_data']),
mock.call(['lvremove', '--force', '/dev/vg_data/lv_data']),
# remove vg's # deactivate vg's
mock.call(['vgchange', '-an', 'vg_root']), mock.call(['vgchange', '-an', 'vg_root']),
mock.call(['vgremove', '--force', 'vg_root']),
mock.call(['vgchange', '-an', 'vg_data']), mock.call(['vgchange', '-an', 'vg_data']),
mock.call(['vgremove', '--force', 'vg_data']),
# remove pv's
mock.call(['pvremove', '--force', '/dev/fake/root']),
mock.call(['pvremove', '--force', '/dev/fake/data1']),
mock.call(['pvremove', '--force', '/dev/fake/data2']),
# copy back again
mock.call(['udevadm', 'settle']), mock.call(['udevadm', 'settle']),
mock.call(['dd', 'if=%s' % tempfiles[0],
'of=/dev/fake/root']),
mock.call(['dd', 'if=%s' % tempfiles[1],
'of=/dev/fake/data1']),
mock.call(['dd', 'if=%s' % tempfiles[2],
'of=/dev/fake/data2']),
] ]
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence) self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)