Only detach device if all partitions have been cleaned
Currently there is a bug, that tries to detach the device from a partition at the first try, without considering that there may be other partitions and volumes on it. Ensure that the detach is done properly, and add a test to ensure that this happens correctly. Change-Id: I35c5a473509f17a70270a2cbf5bf579faaeb123a Fixes-Bug: #1777861
This commit is contained in:
parent
7302f38f97
commit
64bb87f7b5
@ -42,8 +42,7 @@ class Partitioning(PluginBase):
|
|||||||
# Because using multiple partitions of one base is done
|
# Because using multiple partitions of one base is done
|
||||||
# within one object, there is the need to store a flag if the
|
# within one object, there is the need to store a flag if the
|
||||||
# creation of the partitions was already done.
|
# creation of the partitions was already done.
|
||||||
self.already_created = False
|
self.number_of_partitions = 0
|
||||||
self.already_cleaned = False
|
|
||||||
|
|
||||||
# Parameter check
|
# Parameter check
|
||||||
if 'base' not in config:
|
if 'base' not in config:
|
||||||
@ -177,10 +176,10 @@ class Partitioning(PluginBase):
|
|||||||
# in the graph, so for every partition we get a create() call
|
# in the graph, so for every partition we get a create() call
|
||||||
# as the walk happens. But we only need to create the
|
# as the walk happens. But we only need to create the
|
||||||
# partition table once...
|
# partition table once...
|
||||||
if self.already_created:
|
self.number_of_partitions += 1
|
||||||
|
if self.number_of_partitions > 1:
|
||||||
logger.info("Not creating the partitions a second time.")
|
logger.info("Not creating the partitions a second time.")
|
||||||
return
|
return
|
||||||
self.already_created = True
|
|
||||||
|
|
||||||
# the raw file on disk
|
# the raw file on disk
|
||||||
self.image_path = self.state['blockdev'][self.base]['image']
|
self.image_path = self.state['blockdev'][self.base]['image']
|
||||||
@ -216,12 +215,13 @@ class Partitioning(PluginBase):
|
|||||||
return
|
return
|
||||||
|
|
||||||
def umount(self):
|
def umount(self):
|
||||||
# remove the partition mappings made for the parent
|
# Remove the partition mappings made for the parent
|
||||||
# block-device by create() above. this is called from the
|
# block-device by create() above. This is called from the
|
||||||
# child PartitionNode umount. Thus every partition calls it,
|
# child PartitionNode umount. Thus every
|
||||||
# but we only want to do it once and our gate.
|
# partition calls it, but we only want to do it once when
|
||||||
if not self.already_cleaned:
|
# we know this is the very last partition
|
||||||
self.already_cleaned = True
|
self.number_of_partitions -= 1
|
||||||
|
if self.number_of_partitions == 0:
|
||||||
exec_sudo(["kpartx", "-d",
|
exec_sudo(["kpartx", "-d",
|
||||||
self.state['blockdev'][self.base]['device']])
|
self.state['blockdev'][self.base]['device']])
|
||||||
|
|
||||||
|
@ -0,0 +1,104 @@
|
|||||||
|
- local_loop:
|
||||||
|
name: image0
|
||||||
|
|
||||||
|
- partitioning:
|
||||||
|
base: image0
|
||||||
|
label: mbr
|
||||||
|
partitions:
|
||||||
|
- name: root
|
||||||
|
flags: [ boot ]
|
||||||
|
size: 3G
|
||||||
|
- name: ESP
|
||||||
|
type: 'EF00'
|
||||||
|
size: 8MiB
|
||||||
|
mkfs:
|
||||||
|
type: vfat
|
||||||
|
mount:
|
||||||
|
mount_point: /boot/efi
|
||||||
|
fstab:
|
||||||
|
options: "defaults"
|
||||||
|
fsck-passno: 1
|
||||||
|
- name: BSP
|
||||||
|
type: 'EF02'
|
||||||
|
size: 8MiB
|
||||||
|
|
||||||
|
- lvm:
|
||||||
|
name: lvm
|
||||||
|
base: [ root ]
|
||||||
|
pvs:
|
||||||
|
- name: pv
|
||||||
|
base: root
|
||||||
|
options: [ "--force" ]
|
||||||
|
vgs:
|
||||||
|
- name: vg
|
||||||
|
base: [ "pv" ]
|
||||||
|
options: [ "--force" ]
|
||||||
|
lvs:
|
||||||
|
- name: lv_root
|
||||||
|
base: vg
|
||||||
|
extents: 28%VG
|
||||||
|
- name: lv_tmp
|
||||||
|
base: vg
|
||||||
|
extents: 4%VG
|
||||||
|
- name: lv_var
|
||||||
|
base: vg
|
||||||
|
extents: 40%VG
|
||||||
|
- name: lv_log
|
||||||
|
base: vg
|
||||||
|
extents: 23%VG
|
||||||
|
- name: lv_audit
|
||||||
|
base: vg
|
||||||
|
extents: 4%VG
|
||||||
|
- name: lv_home
|
||||||
|
base: vg
|
||||||
|
extents: 1%VG
|
||||||
|
- mkfs:
|
||||||
|
name: fs_root
|
||||||
|
base: lv_root
|
||||||
|
type: xfs
|
||||||
|
label: "img-rootfs"
|
||||||
|
mount:
|
||||||
|
mount_point: /
|
||||||
|
fstab:
|
||||||
|
options: "rw,relatime"
|
||||||
|
fck-passno: 1
|
||||||
|
- mkfs:
|
||||||
|
name: fs_tmp
|
||||||
|
base: lv_tmp
|
||||||
|
type: xfs
|
||||||
|
mount:
|
||||||
|
mount_point: /tmp
|
||||||
|
fstab:
|
||||||
|
options: "rw,nosuid,nodev,noexec,relatime"
|
||||||
|
- mkfs:
|
||||||
|
name: fs_var
|
||||||
|
base: lv_var
|
||||||
|
type: xfs
|
||||||
|
mount:
|
||||||
|
mount_point: /var
|
||||||
|
fstab:
|
||||||
|
options: "rw,relatime"
|
||||||
|
- mkfs:
|
||||||
|
name: fs_log
|
||||||
|
base: lv_log
|
||||||
|
type: xfs
|
||||||
|
mount:
|
||||||
|
mount_point: /var/log
|
||||||
|
fstab:
|
||||||
|
options: "rw,relatime"
|
||||||
|
- mkfs:
|
||||||
|
name: fs_audit
|
||||||
|
base: lv_audit
|
||||||
|
type: xfs
|
||||||
|
mount:
|
||||||
|
mount_point: /var/log/audit
|
||||||
|
fstab:
|
||||||
|
options: "rw,relatime"
|
||||||
|
- mkfs:
|
||||||
|
name: fs_home
|
||||||
|
base: lv_home
|
||||||
|
type: xfs
|
||||||
|
mount:
|
||||||
|
mount_point: /home
|
||||||
|
fstab:
|
||||||
|
options: "rw,nodev,relatime"
|
@ -21,11 +21,13 @@ from diskimage_builder.block_device.config import config_tree_to_graph
|
|||||||
from diskimage_builder.block_device.config import create_graph
|
from diskimage_builder.block_device.config import create_graph
|
||||||
from diskimage_builder.block_device.exception import \
|
from diskimage_builder.block_device.exception import \
|
||||||
BlockDeviceSetupException
|
BlockDeviceSetupException
|
||||||
|
from diskimage_builder.block_device.level0.localloop import LocalLoopNode
|
||||||
from diskimage_builder.block_device.level1.lvm import LVMNode
|
from diskimage_builder.block_device.level1.lvm import LVMNode
|
||||||
from diskimage_builder.block_device.level1.lvm import LVMPlugin
|
from diskimage_builder.block_device.level1.lvm import LVMPlugin
|
||||||
from diskimage_builder.block_device.level1.lvm import LvsNode
|
from diskimage_builder.block_device.level1.lvm import LvsNode
|
||||||
from diskimage_builder.block_device.level1.lvm import PvsNode
|
from diskimage_builder.block_device.level1.lvm import PvsNode
|
||||||
from diskimage_builder.block_device.level1.lvm import VgsNode
|
from diskimage_builder.block_device.level1.lvm import VgsNode
|
||||||
|
from diskimage_builder.block_device.level1.partitioning import PartitionNode
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -435,3 +437,142 @@ class TestLVM(tc.TestGraphGeneration):
|
|||||||
]
|
]
|
||||||
|
|
||||||
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)
|
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)
|
||||||
|
|
||||||
|
def test_lvm_multiple_partitions(self):
|
||||||
|
# Test the command-sequence for several partitions, one containing
|
||||||
|
# volumes on it
|
||||||
|
tree = self.load_config_file('lvm_tree_multiple_partitions.yaml')
|
||||||
|
config = config_tree_to_graph(tree)
|
||||||
|
|
||||||
|
state = BlockDeviceState()
|
||||||
|
|
||||||
|
graph, call_order = create_graph(config, self.fake_default_config,
|
||||||
|
state)
|
||||||
|
|
||||||
|
# Fake state for the partitions on this config
|
||||||
|
state['blockdev'] = {}
|
||||||
|
state['blockdev']['image0'] = {}
|
||||||
|
state['blockdev']['image0']['device'] = '/dev/fake/image0'
|
||||||
|
state['blockdev']['image0']['image'] = 'image'
|
||||||
|
state['blockdev']['root'] = {}
|
||||||
|
state['blockdev']['root']['device'] = '/dev/fake/root'
|
||||||
|
state['blockdev']['ESP'] = {}
|
||||||
|
state['blockdev']['ESP']['device'] = '/dev/fake/ESP'
|
||||||
|
state['blockdev']['BSP'] = {}
|
||||||
|
state['blockdev']['BSP']['device'] = '/dev/fake/BSP'
|
||||||
|
|
||||||
|
#
|
||||||
|
# Creation test
|
||||||
|
#
|
||||||
|
|
||||||
|
# We mock out the following exec_sudo and other related calls
|
||||||
|
# calls for the layers we are testing.
|
||||||
|
exec_sudo_lvm = 'diskimage_builder.block_device.level1.lvm.exec_sudo'
|
||||||
|
exec_sudo_part = ('diskimage_builder.block_device.'
|
||||||
|
'level1.partitioning.exec_sudo')
|
||||||
|
exec_sudo_loop = ('diskimage_builder.block_device.'
|
||||||
|
'level0.localloop.exec_sudo')
|
||||||
|
image_create = ('diskimage_builder.block_device.level0.'
|
||||||
|
'localloop.LocalLoopNode.create')
|
||||||
|
size_of_block = ('diskimage_builder.block_device.level1.'
|
||||||
|
'partitioning.Partitioning._size_of_block_dev')
|
||||||
|
create_mbr = ('diskimage_builder.block_device.level1.'
|
||||||
|
'partitioning.Partitioning._create_mbr')
|
||||||
|
|
||||||
|
manager = mock.MagicMock()
|
||||||
|
with mock.patch(exec_sudo_lvm) as mock_sudo_lvm, \
|
||||||
|
mock.patch(exec_sudo_part) as mock_sudo_part, \
|
||||||
|
mock.patch(exec_sudo_loop) as mock_sudo_loop, \
|
||||||
|
mock.patch(image_create) as mock_image_create, \
|
||||||
|
mock.patch(size_of_block) as mock_size_of_block, \
|
||||||
|
mock.patch(create_mbr) as mock_create_mbr:
|
||||||
|
|
||||||
|
manager.attach_mock(mock_sudo_lvm, 'sudo_lvm')
|
||||||
|
manager.attach_mock(mock_sudo_part, 'sudo_part')
|
||||||
|
manager.attach_mock(mock_sudo_loop, 'sudo_loop')
|
||||||
|
manager.attach_mock(mock_image_create, 'image_create')
|
||||||
|
manager.attach_mock(mock_size_of_block, 'size_of_block')
|
||||||
|
manager.attach_mock(mock_create_mbr, 'create_mbr')
|
||||||
|
|
||||||
|
for node in call_order:
|
||||||
|
# We're just keeping this to the partition setup and
|
||||||
|
# LVM creation; i.e. skipping mounting, mkfs, etc.
|
||||||
|
if isinstance(node, (LVMNode, PvsNode,
|
||||||
|
VgsNode, LvsNode,
|
||||||
|
LocalLoopNode, PartitionNode)):
|
||||||
|
node.create()
|
||||||
|
else:
|
||||||
|
logger.debug("Skipping node for test: %s", node)
|
||||||
|
|
||||||
|
cmd_sequence = [
|
||||||
|
# create the underlying block device
|
||||||
|
mock.call.image_create(),
|
||||||
|
mock.call.size_of_block('image'),
|
||||||
|
# write out partition table
|
||||||
|
mock.call.create_mbr(),
|
||||||
|
# now mount partitions
|
||||||
|
mock.call.sudo_part(['sync']),
|
||||||
|
mock.call.sudo_part(['kpartx', '-avs', '/dev/fake/image0']),
|
||||||
|
# now create lvm environment
|
||||||
|
mock.call.sudo_lvm(['pvcreate', '/dev/fake/root', '--force']),
|
||||||
|
mock.call.sudo_lvm(
|
||||||
|
['vgcreate', 'vg', '/dev/fake/root', '--force']),
|
||||||
|
mock.call.sudo_lvm(
|
||||||
|
['lvcreate', '--name', 'lv_root', '-l', '28%VG', 'vg']),
|
||||||
|
mock.call.sudo_lvm(
|
||||||
|
['lvcreate', '--name', 'lv_tmp', '-l', '4%VG', 'vg']),
|
||||||
|
mock.call.sudo_lvm(
|
||||||
|
['lvcreate', '--name', 'lv_var', '-l', '40%VG', 'vg']),
|
||||||
|
mock.call.sudo_lvm(
|
||||||
|
['lvcreate', '--name', 'lv_log', '-l', '23%VG', 'vg']),
|
||||||
|
mock.call.sudo_lvm(
|
||||||
|
['lvcreate', '--name', 'lv_audit', '-l', '4%VG', 'vg']),
|
||||||
|
mock.call.sudo_lvm(
|
||||||
|
['lvcreate', '--name', 'lv_home', '-l', '1%VG', 'vg']),
|
||||||
|
]
|
||||||
|
manager.assert_has_calls(cmd_sequence)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Umount/cleanup test
|
||||||
|
#
|
||||||
|
manager = mock.MagicMock()
|
||||||
|
with mock.patch(exec_sudo_lvm) as mock_sudo_lvm, \
|
||||||
|
mock.patch(exec_sudo_part) as mock_sudo_part, \
|
||||||
|
mock.patch(exec_sudo_loop) as mock_sudo_loop:
|
||||||
|
|
||||||
|
manager.attach_mock(mock_sudo_lvm, 'sudo_lvm')
|
||||||
|
manager.attach_mock(mock_sudo_part, 'sudo_part')
|
||||||
|
manager.attach_mock(mock_sudo_loop, 'sudo_loop')
|
||||||
|
|
||||||
|
def run_it(phase):
|
||||||
|
reverse_order = reversed(call_order)
|
||||||
|
for node in reverse_order:
|
||||||
|
if isinstance(node, (LVMNode, PvsNode,
|
||||||
|
VgsNode, LvsNode,
|
||||||
|
LocalLoopNode, PartitionNode)):
|
||||||
|
getattr(node, phase)()
|
||||||
|
else:
|
||||||
|
logger.debug("Skipping node for test: %s", node)
|
||||||
|
|
||||||
|
run_it('umount')
|
||||||
|
run_it('cleanup')
|
||||||
|
|
||||||
|
cmd_sequence = [
|
||||||
|
# deactivate LVM first
|
||||||
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_root']),
|
||||||
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_tmp']),
|
||||||
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_var']),
|
||||||
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_log']),
|
||||||
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_audit']),
|
||||||
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_home']),
|
||||||
|
mock.call.sudo_lvm(['vgchange', '-an', 'vg']),
|
||||||
|
mock.call.sudo_lvm(['udevadm', 'settle']),
|
||||||
|
# now remove partitions (note has to happen after lvm removal)
|
||||||
|
mock.call.sudo_part(['kpartx', '-d', '/dev/fake/image0']),
|
||||||
|
# now remove loopback device
|
||||||
|
mock.call.sudo_loop(['losetup', '-d', '/dev/fake/image0']),
|
||||||
|
# now final LVM cleanup call
|
||||||
|
mock.call.sudo_lvm(['pvscan', '--cache']),
|
||||||
|
]
|
||||||
|
|
||||||
|
manager.assert_has_calls(cmd_sequence)
|
||||||
|
Loading…
Reference in New Issue
Block a user