2017-06-08 05:02:18 +00:00
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
|
|
|
|
|
|
|
import copy
|
|
|
|
import logging
|
|
|
|
import mock
|
|
|
|
|
|
|
|
import diskimage_builder.block_device.tests.test_config as tc
|
|
|
|
|
|
|
|
from diskimage_builder.block_device.blockdevice import BlockDeviceState
|
|
|
|
from diskimage_builder.block_device.config import config_tree_to_graph
|
|
|
|
from diskimage_builder.block_device.config import create_graph
|
|
|
|
from diskimage_builder.block_device.exception import \
|
|
|
|
BlockDeviceSetupException
|
2018-06-20 14:25:59 +00:00
|
|
|
from diskimage_builder.block_device.level0.localloop import LocalLoopNode
|
2017-06-08 05:02:18 +00:00
|
|
|
from diskimage_builder.block_device.level1.lvm import LVMNode
|
|
|
|
from diskimage_builder.block_device.level1.lvm import LVMPlugin
|
|
|
|
from diskimage_builder.block_device.level1.lvm import LvsNode
|
|
|
|
from diskimage_builder.block_device.level1.lvm import PvsNode
|
|
|
|
from diskimage_builder.block_device.level1.lvm import VgsNode
|
2018-06-20 14:25:59 +00:00
|
|
|
from diskimage_builder.block_device.level1.partitioning import PartitionNode
|
2017-06-08 05:02:18 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class TestLVM(tc.TestGraphGeneration):
|
|
|
|
def test_lvm_tree_to_graph(self):
|
|
|
|
# equivalence of tree-based to graph-based config
|
|
|
|
tree = self.load_config_file('lvm_tree.yaml')
|
|
|
|
graph = self.load_config_file('lvm_graph.yaml')
|
|
|
|
parsed_graph = config_tree_to_graph(tree)
|
2020-07-07 03:11:28 +00:00
|
|
|
self.assertCountEqual(parsed_graph, graph)
|
2017-06-08 05:02:18 +00:00
|
|
|
|
|
|
|
def test_lvm_invalid_config(self):
|
|
|
|
# test some invalid config paths
|
|
|
|
config = self.load_config_file('lvm_graph.yaml')
|
|
|
|
lvm_config = config[2]['lvm']
|
|
|
|
|
|
|
|
bad_config = copy.deepcopy(lvm_config)
|
|
|
|
bad_config['vgs'][0]['base'] = ['invalid_pv']
|
|
|
|
self.assertRaisesRegex(BlockDeviceSetupException,
|
|
|
|
"base:invalid_pv in vgs does not match "
|
|
|
|
"a valid pvs",
|
|
|
|
LVMPlugin, bad_config, {}, {})
|
|
|
|
|
|
|
|
bad_config = copy.deepcopy(lvm_config)
|
|
|
|
bad_config['lvs'][0]['base'] = ['invalid_vg']
|
|
|
|
self.assertRaisesRegex(BlockDeviceSetupException,
|
2020-02-22 23:25:38 +00:00
|
|
|
r"base:\['invalid_vg'\] in lvs does not match "
|
|
|
|
r"a valid vg",
|
2017-06-08 05:02:18 +00:00
|
|
|
LVMPlugin, bad_config, {}, {})
|
|
|
|
|
|
|
|
bad_config = copy.deepcopy(lvm_config)
|
|
|
|
del(bad_config['lvs'][0]['size'])
|
|
|
|
self.assertRaisesRegex(BlockDeviceSetupException,
|
|
|
|
"Missing 'size' or 'extents' in lvs config",
|
|
|
|
LVMPlugin, bad_config, {}, {})
|
|
|
|
|
|
|
|
@mock.patch('diskimage_builder.block_device.level1.lvm.exec_sudo')
|
|
|
|
def test_lvm_multi_pv(self, mock_exec_sudo):
|
|
|
|
# Test the command-sequence for a more complicated LVM setup
|
|
|
|
tree = self.load_config_file('lvm_tree_multiple_pv.yaml')
|
|
|
|
config = config_tree_to_graph(tree)
|
|
|
|
|
|
|
|
state = BlockDeviceState()
|
|
|
|
|
|
|
|
graph, call_order = create_graph(config, self.fake_default_config,
|
|
|
|
state)
|
|
|
|
|
|
|
|
# XXX: todo; test call_order. Make sure PV's come before, VG;
|
|
|
|
# VG before LV, and that mounts/etc happen afterwards.
|
|
|
|
|
|
|
|
# Fake state for the two PV's specified by this config
|
|
|
|
state['blockdev'] = {}
|
|
|
|
state['blockdev']['root'] = {}
|
|
|
|
state['blockdev']['root']['device'] = '/dev/fake/root'
|
|
|
|
state['blockdev']['data'] = {}
|
|
|
|
state['blockdev']['data']['device'] = '/dev/fake/data'
|
|
|
|
|
|
|
|
for node in call_order:
|
|
|
|
# XXX: This has not mocked out the "lower" layers of
|
|
|
|
# creating the devices, which we're assuming works OK, nor
|
|
|
|
# the upper layers.
|
2018-06-29 08:28:37 +00:00
|
|
|
if isinstance(node, (LVMNode, PvsNode,
|
2017-08-26 08:01:14 +00:00
|
|
|
VgsNode, LvsNode)):
|
2017-06-08 05:02:18 +00:00
|
|
|
# only the LVMNode actually does anything here...
|
|
|
|
node.create()
|
|
|
|
|
|
|
|
# ensure the sequence of calls correctly setup the devices
|
|
|
|
cmd_sequence = [
|
|
|
|
# create the pv's on the faked out block devices
|
|
|
|
mock.call(['pvcreate', '/dev/fake/root', '--force']),
|
|
|
|
mock.call(['pvcreate', '/dev/fake/data', '--force']),
|
|
|
|
# create a volume called "vg" out of these two pv's
|
|
|
|
mock.call(['vgcreate', 'vg',
|
|
|
|
'/dev/fake/root', '/dev/fake/data', '--force']),
|
|
|
|
# create a bunch of lv's on vg
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_root', '-L', '1800M', 'vg']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_tmp', '-L', '100M', 'vg']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_var', '-L', '500M', 'vg']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_log', '-L', '100M', 'vg']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_audit', '-L', '100M', 'vg']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_home', '-L', '200M', 'vg'])]
|
|
|
|
|
|
|
|
self.assertEqual(mock_exec_sudo.call_count, len(cmd_sequence))
|
|
|
|
mock_exec_sudo.assert_has_calls(cmd_sequence)
|
|
|
|
|
|
|
|
# Ensure the correct LVM state was preserved
|
|
|
|
blockdev_state = {
|
|
|
|
'data': {'device': '/dev/fake/data'},
|
|
|
|
'root': {'device': '/dev/fake/root'},
|
|
|
|
'lv_audit': {
|
|
|
|
'device': '/dev/mapper/vg-lv_audit',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '100M',
|
|
|
|
'vgs': 'vg'
|
|
|
|
},
|
|
|
|
'lv_home': {
|
|
|
|
'device': '/dev/mapper/vg-lv_home',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '200M',
|
|
|
|
'vgs': 'vg'
|
|
|
|
},
|
|
|
|
'lv_log': {
|
|
|
|
'device': '/dev/mapper/vg-lv_log',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '100M',
|
|
|
|
'vgs': 'vg'
|
|
|
|
},
|
|
|
|
'lv_root': {
|
|
|
|
'device': '/dev/mapper/vg-lv_root',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '1800M',
|
|
|
|
'vgs': 'vg'
|
|
|
|
},
|
|
|
|
'lv_tmp': {
|
|
|
|
'device': '/dev/mapper/vg-lv_tmp',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '100M',
|
|
|
|
'vgs': 'vg'
|
|
|
|
},
|
|
|
|
'lv_var': {
|
|
|
|
'device': '/dev/mapper/vg-lv_var',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '500M',
|
|
|
|
'vgs': 'vg'
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
# state.debug_dump()
|
|
|
|
self.assertDictEqual(state['blockdev'], blockdev_state)
|
|
|
|
|
|
|
|
# XXX: mount ordering? fs creation?
|
|
|
|
|
|
|
|
def test_lvm_multi_pv_vg(self):
|
|
|
|
# Test the command-sequence for a more complicated LVM setup
|
|
|
|
tree = self.load_config_file('lvm_tree_multiple_pv_vg.yaml')
|
|
|
|
config = config_tree_to_graph(tree)
|
|
|
|
|
|
|
|
state = BlockDeviceState()
|
|
|
|
|
|
|
|
graph, call_order = create_graph(config, self.fake_default_config,
|
|
|
|
state)
|
|
|
|
|
|
|
|
# XXX: todo; test call_order. Make sure PV's come before, VG;
|
|
|
|
# VG before LV, and that mounts/etc happen afterwards.
|
|
|
|
|
|
|
|
# Fake state for the two PV's specified by this config
|
|
|
|
state['blockdev'] = {}
|
|
|
|
state['blockdev']['root'] = {}
|
|
|
|
state['blockdev']['root']['device'] = '/dev/fake/root'
|
|
|
|
state['blockdev']['data'] = {}
|
|
|
|
state['blockdev']['data']['device'] = '/dev/fake/data'
|
|
|
|
|
|
|
|
# We mock patch this ... it's just a little long!
|
|
|
|
exec_sudo = 'diskimage_builder.block_device.level1.lvm.exec_sudo'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Creation test
|
|
|
|
#
|
|
|
|
with mock.patch(exec_sudo) as mock_exec_sudo:
|
|
|
|
|
|
|
|
for node in call_order:
|
|
|
|
# XXX: This has not mocked out the "lower" layers of
|
|
|
|
# creating the devices, which we're assuming works OK, nor
|
|
|
|
# the upper layers.
|
2018-06-29 08:28:37 +00:00
|
|
|
if isinstance(node, (LVMNode, PvsNode,
|
2017-08-26 08:01:14 +00:00
|
|
|
VgsNode, LvsNode)):
|
2017-06-08 05:02:18 +00:00
|
|
|
# only the PvsNode actually does anything here...
|
|
|
|
node.create()
|
|
|
|
|
|
|
|
# ensure the sequence of calls correctly setup the devices
|
|
|
|
cmd_sequence = [
|
|
|
|
# create the pv's on the faked out block devices
|
|
|
|
mock.call(['pvcreate', '/dev/fake/root', '--force']),
|
|
|
|
mock.call(['pvcreate', '/dev/fake/data', '--force']),
|
|
|
|
# create a volume called "vg" out of these two pv's
|
|
|
|
mock.call(['vgcreate', 'vg1',
|
|
|
|
'/dev/fake/root', '--force']),
|
|
|
|
mock.call(['vgcreate', 'vg2',
|
|
|
|
'/dev/fake/data', '--force']),
|
|
|
|
# create a bunch of lv's on vg
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_root',
|
|
|
|
'-L', '1800M', 'vg1']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_tmp',
|
|
|
|
'-L', '100M', 'vg1']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_var',
|
|
|
|
'-L', '500M', 'vg2']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_log',
|
|
|
|
'-L', '100M', 'vg2']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_audit',
|
|
|
|
'-L', '100M', 'vg2']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_home',
|
|
|
|
'-L', '200M', 'vg2'])]
|
|
|
|
|
|
|
|
self.assertListEqual(mock_exec_sudo.call_args_list,
|
|
|
|
cmd_sequence)
|
|
|
|
|
|
|
|
# Ensure the correct LVM state was preserved
|
|
|
|
blockdev_state = {
|
|
|
|
'data': {'device': '/dev/fake/data'},
|
|
|
|
'root': {'device': '/dev/fake/root'},
|
|
|
|
'lv_audit': {
|
|
|
|
'device': '/dev/mapper/vg2-lv_audit',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '100M',
|
|
|
|
'vgs': 'vg2'
|
|
|
|
},
|
|
|
|
'lv_home': {
|
|
|
|
'device': '/dev/mapper/vg2-lv_home',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '200M',
|
|
|
|
'vgs': 'vg2'
|
|
|
|
},
|
|
|
|
'lv_log': {
|
|
|
|
'device': '/dev/mapper/vg2-lv_log',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '100M',
|
|
|
|
'vgs': 'vg2'
|
|
|
|
},
|
|
|
|
'lv_root': {
|
|
|
|
'device': '/dev/mapper/vg1-lv_root',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '1800M',
|
|
|
|
'vgs': 'vg1'
|
|
|
|
},
|
|
|
|
'lv_tmp': {
|
|
|
|
'device': '/dev/mapper/vg1-lv_tmp',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '100M',
|
|
|
|
'vgs': 'vg1'
|
|
|
|
},
|
|
|
|
'lv_var': {
|
|
|
|
'device': '/dev/mapper/vg2-lv_var',
|
|
|
|
'extents': None,
|
|
|
|
'opts': None,
|
|
|
|
'size': '500M',
|
|
|
|
'vgs': 'vg2'
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
# state.debug_dump()
|
|
|
|
self.assertDictEqual(state['blockdev'], blockdev_state)
|
|
|
|
|
|
|
|
#
|
2017-09-14 05:14:46 +00:00
|
|
|
# Umount test
|
2017-06-08 05:02:18 +00:00
|
|
|
#
|
|
|
|
with mock.patch(exec_sudo) as mock_exec_sudo, \
|
|
|
|
mock.patch('tempfile.NamedTemporaryFile') as mock_temp, \
|
|
|
|
mock.patch('os.unlink'):
|
|
|
|
|
|
|
|
# each call to tempfile.NamedTemporaryFile will return a
|
|
|
|
# new mock with a unique filename, which we store in
|
|
|
|
# tempfiles
|
|
|
|
tempfiles = []
|
|
|
|
|
|
|
|
def new_tempfile(*args, **kwargs):
|
|
|
|
n = '/tmp/files%s' % len(tempfiles)
|
|
|
|
# trap! note mock.Mock(name = n) doesn't work like you
|
|
|
|
# think it would, since mock has a name attribute.
|
|
|
|
# That's why we override it with the configure_mock
|
|
|
|
# (this is mentioned in mock documentation if you read
|
|
|
|
# it :)
|
|
|
|
r = mock.Mock()
|
|
|
|
r.configure_mock(name=n)
|
|
|
|
tempfiles.append(n)
|
|
|
|
return r
|
|
|
|
mock_temp.side_effect = new_tempfile
|
|
|
|
|
2018-06-29 08:28:37 +00:00
|
|
|
def run_it(phase):
|
|
|
|
reverse_order = reversed(call_order)
|
|
|
|
for node in reverse_order:
|
|
|
|
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
|
|
|
|
getattr(node, phase)()
|
|
|
|
else:
|
|
|
|
logger.debug("Skipping node for test: %s", node)
|
|
|
|
|
|
|
|
run_it('umount')
|
|
|
|
run_it('cleanup')
|
2017-06-08 05:02:18 +00:00
|
|
|
|
|
|
|
cmd_sequence = [
|
|
|
|
# delete the lv's
|
|
|
|
mock.call(['lvchange', '-an', '/dev/vg1/lv_root']),
|
|
|
|
mock.call(['lvchange', '-an', '/dev/vg1/lv_tmp']),
|
|
|
|
mock.call(['lvchange', '-an', '/dev/vg2/lv_var']),
|
|
|
|
mock.call(['lvchange', '-an', '/dev/vg2/lv_log']),
|
|
|
|
mock.call(['lvchange', '-an', '/dev/vg2/lv_audit']),
|
|
|
|
mock.call(['lvchange', '-an', '/dev/vg2/lv_home']),
|
|
|
|
# delete the vg's
|
|
|
|
mock.call(['vgchange', '-an', 'vg1']),
|
|
|
|
mock.call(['vgchange', '-an', 'vg2']),
|
|
|
|
mock.call(['udevadm', 'settle']),
|
2017-08-26 08:01:14 +00:00
|
|
|
mock.call(['pvscan', '--cache']),
|
2017-06-08 05:02:18 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)
|
|
|
|
|
|
|
|
def test_lvm_spanned_vg(self):
|
|
|
|
|
|
|
|
# Test when a volume group spans some partitions
|
|
|
|
|
|
|
|
tree = self.load_config_file('lvm_tree_spanned_vg.yaml')
|
|
|
|
config = config_tree_to_graph(tree)
|
|
|
|
|
|
|
|
state = BlockDeviceState()
|
|
|
|
|
|
|
|
graph, call_order = create_graph(config, self.fake_default_config,
|
|
|
|
state)
|
|
|
|
|
|
|
|
# XXX: todo; test call_order. Make sure PV's come before, VG;
|
|
|
|
# VG before LV, and that mounts/etc happen afterwards.
|
|
|
|
|
|
|
|
# Fake state for the two PV's specified by this config
|
|
|
|
state['blockdev'] = {}
|
|
|
|
state['blockdev']['root'] = {}
|
|
|
|
state['blockdev']['root']['device'] = '/dev/fake/root'
|
|
|
|
state['blockdev']['data1'] = {}
|
|
|
|
state['blockdev']['data1']['device'] = '/dev/fake/data1'
|
|
|
|
state['blockdev']['data2'] = {}
|
|
|
|
state['blockdev']['data2']['device'] = '/dev/fake/data2'
|
|
|
|
|
|
|
|
# We mock patch this ... it's just a little long!
|
|
|
|
exec_sudo = 'diskimage_builder.block_device.level1.lvm.exec_sudo'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Creation test
|
|
|
|
#
|
|
|
|
with mock.patch(exec_sudo) as mock_exec_sudo:
|
|
|
|
|
|
|
|
for node in call_order:
|
|
|
|
# XXX: This has not mocked out the "lower" layers of
|
|
|
|
# creating the devices, which we're assuming works OK, nor
|
|
|
|
# the upper layers.
|
2018-06-29 08:28:37 +00:00
|
|
|
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
|
2017-06-08 05:02:18 +00:00
|
|
|
# only the LVMNode actually does anything here...
|
|
|
|
node.create()
|
|
|
|
|
|
|
|
# ensure the sequence of calls correctly setup the devices
|
|
|
|
cmd_sequence = [
|
|
|
|
# create the pv's on the faked out block devices
|
|
|
|
mock.call(['pvcreate', '/dev/fake/root', '--force']),
|
|
|
|
mock.call(['pvcreate', '/dev/fake/data1', '--force']),
|
|
|
|
mock.call(['pvcreate', '/dev/fake/data2', '--force']),
|
|
|
|
# create a root and a data volume, with the data volume
|
|
|
|
# spanning data1 & data2
|
|
|
|
mock.call(['vgcreate', 'vg_root',
|
|
|
|
'/dev/fake/root', '--force']),
|
|
|
|
mock.call(['vgcreate', 'vg_data',
|
|
|
|
'/dev/fake/data1', '/dev/fake/data2', '--force']),
|
|
|
|
# create root and data volume
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_root',
|
|
|
|
'-L', '1800M', 'vg_root']),
|
|
|
|
mock.call(['lvcreate', '--name', 'lv_data',
|
|
|
|
'-L', '2G', 'vg_data'])
|
|
|
|
]
|
|
|
|
|
|
|
|
self.assertListEqual(mock_exec_sudo.call_args_list,
|
|
|
|
cmd_sequence)
|
|
|
|
|
|
|
|
with mock.patch(exec_sudo) as mock_exec_sudo, \
|
|
|
|
mock.patch('tempfile.NamedTemporaryFile') as mock_temp, \
|
|
|
|
mock.patch('os.unlink'):
|
|
|
|
|
|
|
|
# see above ...
|
|
|
|
tempfiles = []
|
|
|
|
|
|
|
|
def new_tempfile(*args, **kwargs):
|
|
|
|
n = '/tmp/files%s' % len(tempfiles)
|
|
|
|
r = mock.Mock()
|
|
|
|
r.configure_mock(name=n)
|
|
|
|
tempfiles.append(n)
|
|
|
|
return r
|
|
|
|
mock_temp.side_effect = new_tempfile
|
|
|
|
|
2018-06-29 08:28:37 +00:00
|
|
|
def run_it(phase):
|
|
|
|
reverse_order = reversed(call_order)
|
|
|
|
for node in reverse_order:
|
|
|
|
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
|
|
|
|
getattr(node, phase)()
|
|
|
|
else:
|
|
|
|
logger.debug("Skipping node for test: %s", node)
|
|
|
|
|
|
|
|
run_it('umount')
|
|
|
|
run_it('cleanup')
|
2017-06-08 05:02:18 +00:00
|
|
|
|
|
|
|
cmd_sequence = [
|
2017-08-26 08:01:14 +00:00
|
|
|
# deactivate lv's
|
2017-06-08 05:02:18 +00:00
|
|
|
mock.call(['lvchange', '-an', '/dev/vg_root/lv_root']),
|
|
|
|
mock.call(['lvchange', '-an', '/dev/vg_data/lv_data']),
|
|
|
|
|
2017-08-26 08:01:14 +00:00
|
|
|
# deactivate vg's
|
2017-06-08 05:02:18 +00:00
|
|
|
mock.call(['vgchange', '-an', 'vg_root']),
|
|
|
|
mock.call(['vgchange', '-an', 'vg_data']),
|
|
|
|
|
|
|
|
mock.call(['udevadm', 'settle']),
|
2017-09-14 05:14:46 +00:00
|
|
|
mock.call(['pvscan', '--cache']),
|
2017-06-08 05:02:18 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)
|
2018-06-20 14:25:59 +00:00
|
|
|
|
|
|
|
def test_lvm_multiple_partitions(self):
|
|
|
|
# Test the command-sequence for several partitions, one containing
|
|
|
|
# volumes on it
|
|
|
|
tree = self.load_config_file('lvm_tree_multiple_partitions.yaml')
|
|
|
|
config = config_tree_to_graph(tree)
|
|
|
|
|
|
|
|
state = BlockDeviceState()
|
|
|
|
|
|
|
|
graph, call_order = create_graph(config, self.fake_default_config,
|
|
|
|
state)
|
|
|
|
|
|
|
|
# Fake state for the partitions on this config
|
|
|
|
state['blockdev'] = {}
|
|
|
|
state['blockdev']['image0'] = {}
|
|
|
|
state['blockdev']['image0']['device'] = '/dev/fake/image0'
|
|
|
|
state['blockdev']['image0']['image'] = 'image'
|
|
|
|
state['blockdev']['root'] = {}
|
|
|
|
state['blockdev']['root']['device'] = '/dev/fake/root'
|
|
|
|
state['blockdev']['ESP'] = {}
|
|
|
|
state['blockdev']['ESP']['device'] = '/dev/fake/ESP'
|
|
|
|
state['blockdev']['BSP'] = {}
|
|
|
|
state['blockdev']['BSP']['device'] = '/dev/fake/BSP'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Creation test
|
|
|
|
#
|
|
|
|
|
|
|
|
# We mock out the following exec_sudo and other related calls
|
|
|
|
# calls for the layers we are testing.
|
|
|
|
exec_sudo_lvm = 'diskimage_builder.block_device.level1.lvm.exec_sudo'
|
|
|
|
exec_sudo_part = ('diskimage_builder.block_device.'
|
|
|
|
'level1.partitioning.exec_sudo')
|
|
|
|
exec_sudo_loop = ('diskimage_builder.block_device.'
|
|
|
|
'level0.localloop.exec_sudo')
|
|
|
|
image_create = ('diskimage_builder.block_device.level0.'
|
|
|
|
'localloop.LocalLoopNode.create')
|
|
|
|
size_of_block = ('diskimage_builder.block_device.level1.'
|
|
|
|
'partitioning.Partitioning._size_of_block_dev')
|
|
|
|
create_mbr = ('diskimage_builder.block_device.level1.'
|
|
|
|
'partitioning.Partitioning._create_mbr')
|
|
|
|
|
|
|
|
manager = mock.MagicMock()
|
|
|
|
with mock.patch(exec_sudo_lvm) as mock_sudo_lvm, \
|
|
|
|
mock.patch(exec_sudo_part) as mock_sudo_part, \
|
|
|
|
mock.patch(exec_sudo_loop) as mock_sudo_loop, \
|
|
|
|
mock.patch(image_create) as mock_image_create, \
|
|
|
|
mock.patch(size_of_block) as mock_size_of_block, \
|
|
|
|
mock.patch(create_mbr) as mock_create_mbr:
|
|
|
|
|
|
|
|
manager.attach_mock(mock_sudo_lvm, 'sudo_lvm')
|
|
|
|
manager.attach_mock(mock_sudo_part, 'sudo_part')
|
|
|
|
manager.attach_mock(mock_sudo_loop, 'sudo_loop')
|
|
|
|
manager.attach_mock(mock_image_create, 'image_create')
|
|
|
|
manager.attach_mock(mock_size_of_block, 'size_of_block')
|
|
|
|
manager.attach_mock(mock_create_mbr, 'create_mbr')
|
|
|
|
|
|
|
|
for node in call_order:
|
|
|
|
# We're just keeping this to the partition setup and
|
|
|
|
# LVM creation; i.e. skipping mounting, mkfs, etc.
|
|
|
|
if isinstance(node, (LVMNode, PvsNode,
|
|
|
|
VgsNode, LvsNode,
|
|
|
|
LocalLoopNode, PartitionNode)):
|
|
|
|
node.create()
|
|
|
|
else:
|
|
|
|
logger.debug("Skipping node for test: %s", node)
|
|
|
|
|
|
|
|
cmd_sequence = [
|
|
|
|
# create the underlying block device
|
|
|
|
mock.call.image_create(),
|
|
|
|
mock.call.size_of_block('image'),
|
|
|
|
# write out partition table
|
|
|
|
mock.call.create_mbr(),
|
|
|
|
# now mount partitions
|
|
|
|
mock.call.sudo_part(['sync']),
|
Use kpartx option to update partition mappings
Fix cases of 'mkfs' failing because the partitions never showed up. Partition
mappings will now be updated instead of just adding them with 'kpartx'. That
means that 'kpartx' will also remove devmappings for deleted partitions.
Traceback of failing mkfs call:
2020-05-11 22:03:25.523 | INFO diskimage_builder.block_device.utils [-] Calling [sudo sync]
2020-05-11 22:03:25.539 | INFO diskimage_builder.block_device.utils [-] Calling [sudo kpartx -avs /dev/loop0]
2020-05-11 22:03:25.581 | INFO diskimage_builder.block_device.utils [-] Calling [sudo mkfs -t ext4 -i 4096 -J size=64 -L cloudimg-rootfs -U 21c6f9eb-4d52-4e5c-b9b7-796735de8909 -q /dev/mapper/loop0p1]
2020-05-11 22:03:25.700 | ERROR diskimage_builder.block_device.blockdevice [-] Create failed; rollback initiated
2020-05-11 22:03:25.700 | Traceback (most recent call last):
2020-05-11 22:03:25.700 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/blockdevice.py", line 406, in cmd_create
2020-05-11 22:03:25.700 | node.create()
2020-05-11 22:03:25.700 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/level2/mkfs.py", line 133, in create
2020-05-11 22:03:25.700 | exec_sudo(cmd)
2020-05-11 22:03:25.700 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/utils.py", line 143, in exec_sudo
2020-05-11 22:03:25.700 | raise e
2020-05-11 22:03:25.700 | diskimage_builder.block_device.exception.BlockDeviceSetupException: exec_sudo failed
2020-05-11 22:03:25.700 | INFO diskimage_builder.block_device.level0.localloop [-] loopdev detach
2020-05-11 22:03:25.701 | INFO diskimage_builder.block_device.utils [-] Calling [sudo losetup -d /dev/loop0]
2020-05-11 22:03:25.732 | INFO diskimage_builder.block_device.level0.localloop [-] Remove image file [/tmp/dib_image.muyw7t1h/image0.raw]
2020-05-11 22:03:25.734 | ERROR diskimage_builder.block_device.blockdevice [-] Rollback complete, exiting
2020-05-11 22:03:25.740 | Traceback (most recent call last):
2020-05-11 22:03:25.740 | File "/home/zuul/dib/bin/dib-block-device", line 8, in <module>
2020-05-11 22:03:25.740 | sys.exit(main())
2020-05-11 22:03:25.740 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/cmd.py", line 120, in main
2020-05-11 22:03:25.740 | return bdc.main()
2020-05-11 22:03:25.740 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/cmd.py", line 115, in main
2020-05-11 22:03:25.740 | self.args.func()
2020-05-11 22:03:25.740 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/cmd.py", line 36, in cmd_create
2020-05-11 22:03:25.740 | self.bd.cmd_create()
2020-05-11 22:03:25.740 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/blockdevice.py", line 406, in cmd_create
2020-05-11 22:03:25.740 | node.create()
2020-05-11 22:03:25.740 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/level2/mkfs.py", line 133, in create
2020-05-11 22:03:25.740 | exec_sudo(cmd)
2020-05-11 22:03:25.740 | File "/home/zuul/dib/lib/python3.6/site-packages/diskimage_builder/block_device/utils.py", line 143, in exec_sudo
2020-05-11 22:03:25.740 | raise e
2020-05-11 22:03:25.740 | diskimage_builder.block_device.exception.BlockDeviceSetupException: exec_sudo failed
Change-Id: I374f7f22f9e93ef35eb5813712ca59e75f0733e8
Related-Bug: #1698337
2020-05-13 05:56:16 +00:00
|
|
|
mock.call.sudo_part(['kpartx', '-uvs', '/dev/fake/image0']),
|
2018-06-20 14:25:59 +00:00
|
|
|
# now create lvm environment
|
|
|
|
mock.call.sudo_lvm(['pvcreate', '/dev/fake/root', '--force']),
|
|
|
|
mock.call.sudo_lvm(
|
|
|
|
['vgcreate', 'vg', '/dev/fake/root', '--force']),
|
|
|
|
mock.call.sudo_lvm(
|
|
|
|
['lvcreate', '--name', 'lv_root', '-l', '28%VG', 'vg']),
|
|
|
|
mock.call.sudo_lvm(
|
|
|
|
['lvcreate', '--name', 'lv_tmp', '-l', '4%VG', 'vg']),
|
|
|
|
mock.call.sudo_lvm(
|
|
|
|
['lvcreate', '--name', 'lv_var', '-l', '40%VG', 'vg']),
|
|
|
|
mock.call.sudo_lvm(
|
|
|
|
['lvcreate', '--name', 'lv_log', '-l', '23%VG', 'vg']),
|
|
|
|
mock.call.sudo_lvm(
|
|
|
|
['lvcreate', '--name', 'lv_audit', '-l', '4%VG', 'vg']),
|
|
|
|
mock.call.sudo_lvm(
|
|
|
|
['lvcreate', '--name', 'lv_home', '-l', '1%VG', 'vg']),
|
|
|
|
]
|
|
|
|
manager.assert_has_calls(cmd_sequence)
|
|
|
|
|
|
|
|
#
|
|
|
|
# Umount/cleanup test
|
|
|
|
#
|
|
|
|
manager = mock.MagicMock()
|
|
|
|
with mock.patch(exec_sudo_lvm) as mock_sudo_lvm, \
|
|
|
|
mock.patch(exec_sudo_part) as mock_sudo_part, \
|
|
|
|
mock.patch(exec_sudo_loop) as mock_sudo_loop:
|
|
|
|
|
|
|
|
manager.attach_mock(mock_sudo_lvm, 'sudo_lvm')
|
|
|
|
manager.attach_mock(mock_sudo_part, 'sudo_part')
|
|
|
|
manager.attach_mock(mock_sudo_loop, 'sudo_loop')
|
|
|
|
|
|
|
|
def run_it(phase):
|
|
|
|
reverse_order = reversed(call_order)
|
|
|
|
for node in reverse_order:
|
|
|
|
if isinstance(node, (LVMNode, PvsNode,
|
|
|
|
VgsNode, LvsNode,
|
|
|
|
LocalLoopNode, PartitionNode)):
|
|
|
|
getattr(node, phase)()
|
|
|
|
else:
|
|
|
|
logger.debug("Skipping node for test: %s", node)
|
|
|
|
|
|
|
|
run_it('umount')
|
|
|
|
run_it('cleanup')
|
|
|
|
|
|
|
|
cmd_sequence = [
|
|
|
|
# deactivate LVM first
|
|
|
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_root']),
|
|
|
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_tmp']),
|
|
|
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_var']),
|
|
|
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_log']),
|
|
|
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_audit']),
|
|
|
|
mock.call.sudo_lvm(['lvchange', '-an', '/dev/vg/lv_home']),
|
|
|
|
mock.call.sudo_lvm(['vgchange', '-an', 'vg']),
|
|
|
|
mock.call.sudo_lvm(['udevadm', 'settle']),
|
|
|
|
# now remove partitions (note has to happen after lvm removal)
|
|
|
|
mock.call.sudo_part(['kpartx', '-d', '/dev/fake/image0']),
|
|
|
|
# now remove loopback device
|
|
|
|
mock.call.sudo_loop(['losetup', '-d', '/dev/fake/image0']),
|
|
|
|
# now final LVM cleanup call
|
|
|
|
mock.call.sudo_lvm(['pvscan', '--cache']),
|
|
|
|
]
|
|
|
|
|
|
|
|
manager.assert_has_calls(cmd_sequence)
|