LVM support for dib-block-device

This provides a basic LVM support to dib-block-device.

Co-Authored-By: Ian Wienand <iwienand@redhat.com>

Change-Id: Ibd624d9f95ee68b20a15891f639ddd5b3188cdf9
This commit is contained in:
Yolanda Robla 2017-06-08 15:02:18 +10:00 committed by Ian Wienand
parent e04cf78fa5
commit c2dc3dc78e
10 changed files with 1555 additions and 3 deletions

View File

@ -0,0 +1,427 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import os
import tempfile
from diskimage_builder.block_device.exception \
import BlockDeviceSetupException
from diskimage_builder.block_device.plugin import NodeBase
from diskimage_builder.block_device.plugin import PluginBase
from diskimage_builder.block_device.utils import exec_sudo
logger = logging.getLogger(__name__)
#
# LVM
# ---
#
# The LVM config has three required keys; pvs, vgs and lvs
#
# lvm: -> LVSNode
# pvs: -> PvsNode
# lvs: -> LvsNode
# vgs: -> VgsNode
#
# The LVMPlugin will verify this and build nodes into the
# configuration graph.
#
# As described below, a LVSNode is created for synchronisation
# purposes. Thus if you had something like two partitions that became
# two physical-volumes (pv1 & pv2), that you then combine into a
# single volume group (vg) and then create several logical volumes
# (lv1, lv2, lv3) your graph would end up looking like:
#
# partition1 partition2
# | |
# ---> LVSNode <--+
# |
# +------+------+
# v v
# pv1 pv2
# | |
# +--> vg <---+
# |
# +-----+-----+
# v v v
# lv1 lv2 lv3
#
# After the create() call on the LVSNode object, the entire LVM setup
# would actually be complete. The other nodes are all just
# place-holders, and are used for further ordering (for example, the
# fs creation & mounting should depend on the logical volume nodes).
# For this reason, their create() calls are blank. However, for code
# organisational purposes they have a private _create() and _cleanup()
# call that is driven by the LVSNode object.
class PvsNode(NodeBase):
def __init__(self, name, state, base, options):
"""Physical volume
This is a placeholder node for the LVM physical volumes.
Arguments:
:param name: Name of this node
:param state: global state pointer
:param base: Parent partition
:param options: config options
"""
super(PvsNode, self).__init__(name, state)
self.base = base
self.options = options
def _create(self):
# the underlying device path of our parent was previously
# recorded into the state during blockdev creation; look it
# up.
phys_dev = self.state['blockdev'][self.base]['device']
cmd = ["pvcreate"]
cmd.append(phys_dev)
if self.options:
cmd.extend(self.options)
logger.debug("Creating pv command [%s]", cmd)
exec_sudo(cmd)
# save state
if 'pvs' not in self.state:
self.state['pvs'] = {}
self.state['pvs'][self.name] = {
'opts': self.options,
'device': phys_dev
}
def _cleanup(self):
exec_sudo(['pvremove', '--force',
self.state['pvs'][self.name]['device']])
def get_edges(self):
# See LVMNode.get_edges() for how this gets connected
return ([], [])
def create(self):
# see notes in LVMNode object
pass
class VgsNode(NodeBase):
def __init__(self, name, state, base, options):
"""Volume Group
This is a placeholder node for a volume group
Arguments:
:param name: Name of this node
:param state: global state pointer
:param base: Parent :class:`PvsNodes` this volume group exists on
:param options: extra options passed to the `vgcreate` command
"""
super(VgsNode, self).__init__(name, state)
self.base = base
self.options = options
def _create(self):
# The PV's have saved their actual device name into the state
# during their _create(). Look at our base elements and thus
# find the underlying device paths in the state.
pvs_devs = []
for pv in self.base:
pvs_dev = self.state['pvs'][pv]['device']
pvs_devs.append(pvs_dev)
cmd = ["vgcreate", ]
cmd.append(self.name)
cmd.extend(pvs_devs)
if self.options:
cmd.extend(self.options)
logger.debug("Creating vg command [%s]", cmd)
exec_sudo(cmd)
# save state
if 'vgs' not in self.state:
self.state['vgs'] = {}
self.state['vgs'][self.name] = {
'opts': self.options,
'devices': self.base,
}
def _cleanup(self):
exec_sudo(['vgchange', '-an', self.name])
exec_sudo(['vgremove', '--force', self.name])
def get_edges(self):
# self.base is already a list, per the config. There might be
# multiple pv parents here.
edge_from = self.base
edge_to = []
return (edge_from, edge_to)
def create(self):
# see notes in LVMNode object
pass
class LvsNode(NodeBase):
def __init__(self, name, state, base, options, size, extents):
"""Logical Volume
This is a placeholder node for a logical volume
Arguments:
:param name: Name of this node
:param state: global state pointer
:param base: the parent volume group
:param options: options passed to lvcreate
:param size: size of the LV, in MB (this or extents must be provided)
:param extents: size of the LV in extents
"""
super(LvsNode, self).__init__(name, state)
self.base = base
self.options = options
self.size = size
self.extents = extents
def _create(self):
cmd = ["lvcreate", ]
cmd.extend(['--name', self.name])
if self.size:
cmd.extend(['-L', self.size])
elif self.extents:
cmd.extend(['-l', self.extents])
if self.options:
cmd.extend(self.options)
cmd.append(self.base)
logger.debug("Creating lv command [%s]", cmd)
exec_sudo(cmd)
# save state
self.state['blockdev'][self.name] = {
'vgs': self.base,
'size': self.size,
'extents': self.extents,
'opts': self.options,
'device': '/dev/mapper/%s-%s' % (self.base, self.name)
}
def _cleanup(self):
exec_sudo(['lvchange', '-an',
'/dev/%s/%s' % (self.base, self.name)])
exec_sudo(['lvremove', '--force',
'/dev/%s/%s' % (self.base, self.name)])
def get_edges(self):
edge_from = [self.base]
edge_to = []
return (edge_from, edge_to)
def create(self):
# see notes in LVMNode object
pass
class LVMNode(NodeBase):
def __init__(self, name, state, pvs, lvs, vgs):
"""LVM Driver Node
This is the "global" node where all LVM operations are driven
from. In the node graph, the LVM physical-volumes depend on
this node. This node then depends on the devices that the
PV's require. This node incorporates *all* LVM setup;
i.e. after the create() call here we have created all pv's,
lv's and vg. The <Pvs|Lvs|Vgs>Node objects in the graph are
therefore just dependency place holders whose create() call
does nothing.
This is quite important in the cleanup phase. In theory, you
would remove the vg's, then the lv's and then free-up the
pv's. But the process of removing these also removes them
from the LVM meta-data in the image, undoing all the
configuration. Thus the unwind process is also "atomic" in
this node; we do a copy of the devices before removing the LVM
components, and then copy them back (better ideas welcome!)
As with creation, the cleanup() calls in the other nodes are
just placeholders.
Arguments:
:param name: name of this node
:param state: global state pointer
:param pvs: A list of :class:`PvsNode` objects
:param lvs: A list of :class:`LvsNode` objects
:param vgs: A list of :class:`VgsNode` objects
"""
super(LVMNode, self).__init__(name, state)
self.pvs = pvs
self.lvs = lvs
self.vgs = vgs
def get_edges(self):
# This node requires the physical device(s), which is
# recorded in the "base" argument of the PV nodes.
pvs = []
for pv in self.pvs:
pvs.append(pv.base)
edge_from = set(pvs)
# The PV nodes should then depend on us. i.e., we just made
# this node a synchronisation point
edge_to = [pv.name for pv in self.pvs]
return (edge_from, edge_to)
def create(self):
# Run through pvs->vgs->lvs and create them
# XXX: we could theoretically get this same info from walking
# the graph of our children nodes? Would that be helpful in
# any way?
for pvs in self.pvs:
pvs._create()
for vgs in self.vgs:
vgs._create()
for lvs in self.lvs:
lvs._create()
def cleanup(self):
# First do a copy of all physical devices to individual
# temporary files. This is because the physical device is
# full of LVM metadata describing the volumes and we don't
# have a better way to handle removing the devices/volumes
# from the host system while persisting this metadata in the
# underlying devices.
tempfiles = collections.OrderedDict() # to unwind in same order!
for pvs in self.pvs:
phys_dev = self.state['blockdev'][pvs.base]['device']
target_file = tempfile.NamedTemporaryFile(delete=False)
target_file.close()
exec_sudo(['dd', 'if=%s' % phys_dev,
'of=%s' % target_file.name])
tempfiles[target_file.name] = phys_dev
# once copied, start the removal in reverse order
for lvs in self.lvs:
lvs._cleanup()
for vgs in self.vgs:
vgs._cleanup()
for pvs in self.pvs:
pvs._cleanup()
exec_sudo(['udevadm', 'settle'])
# after the cleanup copy devices back
for tmp_name, phys_dev in tempfiles.items():
exec_sudo(['dd', 'if=%s' % tmp_name, 'of=%s' % phys_dev])
os.unlink(tmp_name)
class LVMPlugin(PluginBase):
def _config_error(self, msg):
raise BlockDeviceSetupException(msg)
def __init__(self, config, defaults, state):
"""Build LVM nodes
This reads the "lvm:" config stanza, validates it and produces
the PV, VG and LV nodes. These are all synchronised via a
LVMNode as described above.
Arguments:
:param config: "lvm" configuration dictionary
:param defaults: global defaults dictionary
:param state: global state reference
"""
super(LVMPlugin, self).__init__()
# note lvm: doesn't require a base ... the base is the
# physical devices the "pvs" nodes are made on.
if 'name' not in config:
self._config_error("Lvm config requires 'name'")
if 'pvs' not in config:
self._config_error("Lvm config requires a 'pvs'")
if 'vgs' not in config:
self._config_error("Lvm config needs 'vgs'")
if 'lvs' not in config:
self._config_error("Lvm config needs 'lvs'")
# create physical volume nodes
self.pvs = []
self.pvs_keys = []
for pvs_cfg in config['pvs']:
if 'name' not in pvs_cfg:
self._config_error("Missing 'name' in pvs config")
if 'base' not in pvs_cfg:
self._config_error("Missing 'base' in pvs_config")
pvs_item = PvsNode(pvs_cfg['name'], state,
pvs_cfg['base'],
pvs_cfg.get('options'))
self.pvs.append(pvs_item)
# create volume group nodes
self.vgs = []
self.vgs_keys = []
for vgs_cfg in config['vgs']:
if 'name' not in vgs_cfg:
self._config_error("Missing 'name' in vgs config")
if 'base' not in vgs_cfg:
self._config_error("Missing 'base' in vgs config")
# Ensure we have a valid PVs backing this VG
for pvs in vgs_cfg['base']:
if not any(pv.name == pvs for pv in self.pvs):
self._config_error("base:%s in vgs does not "
"match a valid pvs" % pvs)
vgs_item = VgsNode(vgs_cfg['name'], state, vgs_cfg['base'],
vgs_cfg.get('options', None))
self.vgs.append(vgs_item)
# create logical volume nodes
self.lvs = []
for lvs_cfg in config['lvs']:
if 'name' not in lvs_cfg:
self._config_error("Missing 'name' in lvs config")
if 'base' not in lvs_cfg:
self._config_error("Missing 'base' in lvs config")
if 'size' not in lvs_cfg and 'extents' not in lvs_cfg:
self._config_error("Missing 'size' or 'extents' in lvs config")
# ensure this logical volume has a valid volume group base
if not any(vg.name == lvs_cfg['base'] for vg in self.vgs):
self._config_error("base:%s in lvs does not match a valid vg" %
lvs_cfg['base'])
lvs_item = LvsNode(lvs_cfg['name'], state, lvs_cfg['base'],
lvs_cfg.get('options', None),
lvs_cfg.get('size', None),
lvs_cfg.get('extents', None))
self.lvs.append(lvs_item)
# create the "driver" node
self.lvm_node = LVMNode(config['name'], state,
self.pvs, self.lvs, self.vgs)
def get_nodes(self):
# the nodes for insertion into the graph are all of the pvs,
# vgs and lvs nodes we have created above, and the root node.
return self.pvs + self.vgs + self.lvs + [self.lvm_node]

View File

@ -0,0 +1,142 @@
- local_loop:
name: image0
- partitioning:
base: image0
name: mbr
label: mbr
partitions:
- name: root
base: image0
flags: [ boot,primary ]
size: 3G
- lvm:
base: mbr
name: lvm_mbr
pvs:
- name: pv
options: ["--force"]
base: root
vgs:
- name: vg
base: ["pv"]
options: ["--force"]
lvs:
- name: lv_root
base: vg
size: 1800M
- name: lv_tmp
base: vg
size: 100M
- name: lv_var
base: vg
size: 500M
- name: lv_log
base: vg
size: 100M
- name: lv_audit
base: vg
size: 100M
- name: lv_home
base: vg
size: 200M
- mkfs:
name: fs_root
base: lv_root
label: "img-rootfs"
type: "xfs"
- mount:
name: mount_fs_root
base: fs_root
mount_point: /
- fstab:
name: fstab_mount_fs_root
base: mount_fs_root
options: "rw,relatime"
fsck-passno: 1
- mkfs:
name: fs_var
base: lv_var
type: "xfs"
- mount:
name: mount_fs_var
base: fs_var
mount_point: /var
- fstab:
name: fstab_mount_fs_var
base: mount_fs_var
options: "rw,relatime"
- mkfs:
name: fs_log
base: lv_log
type: "xfs"
- mount:
name: mount_fs_log
base: fs_log
mount_point: /var/log
- fstab:
name: fstab_mount_fs_log
base: mount_fs_log
options: "rw,relatime"
- mkfs:
name: fs_audit
base: lv_audit
type: "xfs"
- mount:
name: mount_fs_audit
base: fs_audit
mount_point: /var/log/audit
- fstab:
name: fstab_mount_fs_audit
base: mount_fs_audit
options: "rw,relatime"
- mkfs:
name: fs_tmp
base: lv_tmp
type: "xfs"
- mount:
name: mount_fs_tmp
base: fs_tmp
mount_point: /tmp
- fstab:
name: fstab_mount_fs_tmp
base: mount_fs_tmp
options: "rw,nosuid,nodev,noexec,relatime"
- mkfs:
name: fs_home
base: lv_home
type: "xfs"
- mount:
name: mount_fs_home
base: fs_home
mount_point: /home
- fstab:
name: fstab_mount_fs_home
base: mount_fs_home
options: "rw,nodev,relatime"

View File

@ -0,0 +1,105 @@
- local_loop:
name: image0
- partitioning:
base: image0
name: mbr
label: mbr
partitions:
- name: root
base: image0
flags: [ boot,primary ]
size: 3G
- lvm:
base: mbr
pvs:
- name: pv
options: ["--force"]
base: root
vgs:
- name: vg
base: ["pv"]
options: ["--force"]
lvs:
- name: lv_root
base: vg
size: 1800M
- name: lv_tmp
base: vg
size: 100M
- name: lv_var
base: vg
size: 500M
- name: lv_log
base: vg
size: 100M
- name: lv_audit
base: vg
size: 100M
- name: lv_home
base: vg
size: 200M
- mkfs:
name: fs_root
base: lv_root
label: "img-rootfs"
type: "xfs"
mount:
mount_point: /
fstab:
options: "rw,relatime"
fsck-passno: 1
- mkfs:
name: fs_var
base: lv_var
type: "xfs"
mount:
mount_point: /var
fstab:
options: "rw,relatime"
- mkfs:
name: fs_log
base: lv_log
type: "xfs"
mount:
mount_point: /var/log
fstab:
options: "rw,relatime"
- mkfs:
name: fs_audit
base: lv_audit
type: "xfs"
mount:
mount_point: /var/log/audit
fstab:
options: "rw,relatime"
- mkfs:
name: fs_tmp
base: lv_tmp
type: "xfs"
mount:
mount_point: /tmp
fstab:
options: "rw,nosuid,nodev,noexec,relatime"
- mkfs:
name: fs_home
base: lv_home
type: "xfs"
mount:
mount_point: /home
fstab:
options: "rw,nodev,relatime"

View File

@ -0,0 +1,109 @@
- local_loop:
name: image0
- partitioning:
base: image0
label: mbr
partitions:
- name: root
flags: [ boot,primary ]
size: 3G
- name: data
flags: [ primary ]
size: 1G
- lvm:
name: lvm
pvs:
- name: pv
options: ["--force"]
base: root
- name: pv1
options: ["--force"]
base: data
vgs:
- name: vg
base: ["pv", "pv1"]
options: ["--force"]
lvs:
- name: lv_root
base: vg
size: 1800M
- name: lv_tmp
base: vg
size: 100M
- name: lv_var
base: vg
size: 500M
- name: lv_log
base: vg
size: 100M
- name: lv_audit
base: vg
size: 100M
- name: lv_home
base: vg
size: 200M
- mkfs:
name: fs_root
base: lv_root
label: "img-rootfs"
type: "xfs"
mount:
mount_point: /
fstab:
options: "rw,relatime"
fsck-passno: 1
- mkfs:
name: fs_var
base: lv_var
type: "xfs"
mount:
mount_point: /var
fstab:
options: "rw,relatime"
- mkfs:
name: fs_log
base: lv_log
type: "xfs"
mount:
mount_point: /var/log
fstab:
options: "rw,relatime"
- mkfs:
name: fs_audit
base: lv_audit
type: "xfs"
mount:
mount_point: /var/log/audit
fstab:
options: "rw,relatime"
- mkfs:
name: fs_tmp
base: lv_tmp
type: "xfs"
mount:
mount_point: /tmp
fstab:
options: "rw,nosuid,nodev,noexec,relatime"
- mkfs:
name: fs_home
base: lv_home
type: "xfs"
mount:
mount_point: /home
fstab:
options: "rw,nodev,relatime"

View File

@ -0,0 +1,113 @@
- local_loop:
name: image0
- partitioning:
base: image0
label: mbr
partitions:
- name: root
flags: [ boot,primary ]
size: 3G
- name: data
flags: [ primary ]
size: 1G
- lvm:
name: lvm
pvs:
- name: pv
options: ["--force"]
base: root
- name: pv1
options: ["--force"]
base: data
vgs:
- name: vg1
base: ["pv"]
options: ["--force"]
- name: vg2
base: ["pv1"]
options: ["--force"]
lvs:
- name: lv_root
base: vg1
size: 1800M
- name: lv_tmp
base: vg1
size: 100M
- name: lv_var
base: vg2
size: 500M
- name: lv_log
base: vg2
size: 100M
- name: lv_audit
base: vg2
size: 100M
- name: lv_home
base: vg2
size: 200M
- mkfs:
name: fs_root
base: lv_root
label: "img-rootfs"
type: "xfs"
mount:
mount_point: /
fstab:
options: "rw,relatime"
fsck-passno: 1
- mkfs:
name: fs_var
base: lv_var
type: "xfs"
mount:
mount_point: /var
fstab:
options: "rw,relatime"
- mkfs:
name: fs_log
base: lv_log
type: "xfs"
mount:
mount_point: /var/log
fstab:
options: "rw,relatime"
- mkfs:
name: fs_audit
base: lv_audit
type: "xfs"
mount:
mount_point: /var/log/audit
fstab:
options: "rw,relatime"
- mkfs:
name: fs_tmp
base: lv_tmp
type: "xfs"
mount:
mount_point: /tmp
fstab:
options: "rw,nosuid,nodev,noexec,relatime"
- mkfs:
name: fs_home
base: lv_home
type: "xfs"
mount:
mount_point: /home
fstab:
options: "rw,nodev,relatime"

View File

@ -0,0 +1,69 @@
# testing config for a LVM data volume group that spans two partitions
- local_loop:
name: image0
- partitioning:
base: image0
label: mbr
partitions:
- name: root
flags: [ boot,primary ]
size: 3G
- name: data1
flags: [ primary ]
size: 1G
- name: data2
flags: [ primary ]
size: 1G
- lvm:
name: lvm
pvs:
- name: pv_root
options: ["--force"]
base: root
- name: pv_data1
options: ["--force"]
base: data1
- name: pv_data2
options: ["--force"]
base: data2
vgs:
- name: vg_root
base: ["pv_root"]
options: ["--force"]
- name: vg_data
base: ["pv_data1", "pv_data2"]
options: ["--force"]
lvs:
- name: lv_root
base: vg_root
size: 1800M
- name: lv_data
base: vg_data
size: 2G
- mkfs:
name: fs_root
base: lv_root
label: "img-rootfs"
type: "xfs"
mount:
mount_point: /
fstab:
options: "rw,relatime"
fsck-passno: 1
- mkfs:
name: fs_data
base: lv_data
type: "xfs"
mount:
mount_point: /opt
fstab:
options: "rw,relatime"

View File

@ -0,0 +1,462 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import mock
import diskimage_builder.block_device.tests.test_config as tc
from diskimage_builder.block_device.blockdevice import BlockDeviceState
from diskimage_builder.block_device.config import config_tree_to_graph
from diskimage_builder.block_device.config import create_graph
from diskimage_builder.block_device.exception import \
BlockDeviceSetupException
from diskimage_builder.block_device.level1.lvm import LVMNode
from diskimage_builder.block_device.level1.lvm import LVMPlugin
from diskimage_builder.block_device.level1.lvm import LvsNode
from diskimage_builder.block_device.level1.lvm import PvsNode
from diskimage_builder.block_device.level1.lvm import VgsNode
logger = logging.getLogger(__name__)
class TestLVM(tc.TestGraphGeneration):
def test_lvm_tree_to_graph(self):
# equivalence of tree-based to graph-based config
tree = self.load_config_file('lvm_tree.yaml')
graph = self.load_config_file('lvm_graph.yaml')
parsed_graph = config_tree_to_graph(tree)
self.assertItemsEqual(parsed_graph, graph)
def test_lvm_invalid_config(self):
# test some invalid config paths
config = self.load_config_file('lvm_graph.yaml')
lvm_config = config[2]['lvm']
bad_config = copy.deepcopy(lvm_config)
bad_config['vgs'][0]['base'] = ['invalid_pv']
self.assertRaisesRegex(BlockDeviceSetupException,
"base:invalid_pv in vgs does not match "
"a valid pvs",
LVMPlugin, bad_config, {}, {})
bad_config = copy.deepcopy(lvm_config)
bad_config['lvs'][0]['base'] = ['invalid_vg']
self.assertRaisesRegex(BlockDeviceSetupException,
"base:\['invalid_vg'\] in lvs does not match "
"a valid vg",
LVMPlugin, bad_config, {}, {})
bad_config = copy.deepcopy(lvm_config)
del(bad_config['lvs'][0]['size'])
self.assertRaisesRegex(BlockDeviceSetupException,
"Missing 'size' or 'extents' in lvs config",
LVMPlugin, bad_config, {}, {})
@mock.patch('diskimage_builder.block_device.level1.lvm.exec_sudo')
def test_lvm_multi_pv(self, mock_exec_sudo):
# Test the command-sequence for a more complicated LVM setup
tree = self.load_config_file('lvm_tree_multiple_pv.yaml')
config = config_tree_to_graph(tree)
state = BlockDeviceState()
graph, call_order = create_graph(config, self.fake_default_config,
state)
# XXX: todo; test call_order. Make sure PV's come before, VG;
# VG before LV, and that mounts/etc happen afterwards.
# Fake state for the two PV's specified by this config
state['blockdev'] = {}
state['blockdev']['root'] = {}
state['blockdev']['root']['device'] = '/dev/fake/root'
state['blockdev']['data'] = {}
state['blockdev']['data']['device'] = '/dev/fake/data'
for node in call_order:
# XXX: This has not mocked out the "lower" layers of
# creating the devices, which we're assuming works OK, nor
# the upper layers.
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
# only the LVMNode actually does anything here...
node.create()
# ensure the sequence of calls correctly setup the devices
cmd_sequence = [
# create the pv's on the faked out block devices
mock.call(['pvcreate', '/dev/fake/root', '--force']),
mock.call(['pvcreate', '/dev/fake/data', '--force']),
# create a volume called "vg" out of these two pv's
mock.call(['vgcreate', 'vg',
'/dev/fake/root', '/dev/fake/data', '--force']),
# create a bunch of lv's on vg
mock.call(['lvcreate', '--name', 'lv_root', '-L', '1800M', 'vg']),
mock.call(['lvcreate', '--name', 'lv_tmp', '-L', '100M', 'vg']),
mock.call(['lvcreate', '--name', 'lv_var', '-L', '500M', 'vg']),
mock.call(['lvcreate', '--name', 'lv_log', '-L', '100M', 'vg']),
mock.call(['lvcreate', '--name', 'lv_audit', '-L', '100M', 'vg']),
mock.call(['lvcreate', '--name', 'lv_home', '-L', '200M', 'vg'])]
self.assertEqual(mock_exec_sudo.call_count, len(cmd_sequence))
mock_exec_sudo.assert_has_calls(cmd_sequence)
# Ensure the correct LVM state was preserved
blockdev_state = {
'data': {'device': '/dev/fake/data'},
'root': {'device': '/dev/fake/root'},
'lv_audit': {
'device': '/dev/mapper/vg-lv_audit',
'extents': None,
'opts': None,
'size': '100M',
'vgs': 'vg'
},
'lv_home': {
'device': '/dev/mapper/vg-lv_home',
'extents': None,
'opts': None,
'size': '200M',
'vgs': 'vg'
},
'lv_log': {
'device': '/dev/mapper/vg-lv_log',
'extents': None,
'opts': None,
'size': '100M',
'vgs': 'vg'
},
'lv_root': {
'device': '/dev/mapper/vg-lv_root',
'extents': None,
'opts': None,
'size': '1800M',
'vgs': 'vg'
},
'lv_tmp': {
'device': '/dev/mapper/vg-lv_tmp',
'extents': None,
'opts': None,
'size': '100M',
'vgs': 'vg'
},
'lv_var': {
'device': '/dev/mapper/vg-lv_var',
'extents': None,
'opts': None,
'size': '500M',
'vgs': 'vg'
},
}
# state.debug_dump()
self.assertDictEqual(state['blockdev'], blockdev_state)
# XXX: mount ordering? fs creation?
def test_lvm_multi_pv_vg(self):
# Test the command-sequence for a more complicated LVM setup
tree = self.load_config_file('lvm_tree_multiple_pv_vg.yaml')
config = config_tree_to_graph(tree)
state = BlockDeviceState()
graph, call_order = create_graph(config, self.fake_default_config,
state)
# XXX: todo; test call_order. Make sure PV's come before, VG;
# VG before LV, and that mounts/etc happen afterwards.
# Fake state for the two PV's specified by this config
state['blockdev'] = {}
state['blockdev']['root'] = {}
state['blockdev']['root']['device'] = '/dev/fake/root'
state['blockdev']['data'] = {}
state['blockdev']['data']['device'] = '/dev/fake/data'
# We mock patch this ... it's just a little long!
exec_sudo = 'diskimage_builder.block_device.level1.lvm.exec_sudo'
#
# Creation test
#
with mock.patch(exec_sudo) as mock_exec_sudo:
for node in call_order:
# XXX: This has not mocked out the "lower" layers of
# creating the devices, which we're assuming works OK, nor
# the upper layers.
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
# only the PvsNode actually does anything here...
node.create()
# ensure the sequence of calls correctly setup the devices
cmd_sequence = [
# create the pv's on the faked out block devices
mock.call(['pvcreate', '/dev/fake/root', '--force']),
mock.call(['pvcreate', '/dev/fake/data', '--force']),
# create a volume called "vg" out of these two pv's
mock.call(['vgcreate', 'vg1',
'/dev/fake/root', '--force']),
mock.call(['vgcreate', 'vg2',
'/dev/fake/data', '--force']),
# create a bunch of lv's on vg
mock.call(['lvcreate', '--name', 'lv_root',
'-L', '1800M', 'vg1']),
mock.call(['lvcreate', '--name', 'lv_tmp',
'-L', '100M', 'vg1']),
mock.call(['lvcreate', '--name', 'lv_var',
'-L', '500M', 'vg2']),
mock.call(['lvcreate', '--name', 'lv_log',
'-L', '100M', 'vg2']),
mock.call(['lvcreate', '--name', 'lv_audit',
'-L', '100M', 'vg2']),
mock.call(['lvcreate', '--name', 'lv_home',
'-L', '200M', 'vg2'])]
self.assertListEqual(mock_exec_sudo.call_args_list,
cmd_sequence)
# Ensure the correct LVM state was preserved
blockdev_state = {
'data': {'device': '/dev/fake/data'},
'root': {'device': '/dev/fake/root'},
'lv_audit': {
'device': '/dev/mapper/vg2-lv_audit',
'extents': None,
'opts': None,
'size': '100M',
'vgs': 'vg2'
},
'lv_home': {
'device': '/dev/mapper/vg2-lv_home',
'extents': None,
'opts': None,
'size': '200M',
'vgs': 'vg2'
},
'lv_log': {
'device': '/dev/mapper/vg2-lv_log',
'extents': None,
'opts': None,
'size': '100M',
'vgs': 'vg2'
},
'lv_root': {
'device': '/dev/mapper/vg1-lv_root',
'extents': None,
'opts': None,
'size': '1800M',
'vgs': 'vg1'
},
'lv_tmp': {
'device': '/dev/mapper/vg1-lv_tmp',
'extents': None,
'opts': None,
'size': '100M',
'vgs': 'vg1'
},
'lv_var': {
'device': '/dev/mapper/vg2-lv_var',
'extents': None,
'opts': None,
'size': '500M',
'vgs': 'vg2'
},
}
# state.debug_dump()
self.assertDictEqual(state['blockdev'], blockdev_state)
#
# Cleanup test
#
with mock.patch(exec_sudo) as mock_exec_sudo, \
mock.patch('tempfile.NamedTemporaryFile') as mock_temp, \
mock.patch('os.unlink'):
# each call to tempfile.NamedTemporaryFile will return a
# new mock with a unique filename, which we store in
# tempfiles
tempfiles = []
def new_tempfile(*args, **kwargs):
n = '/tmp/files%s' % len(tempfiles)
# trap! note mock.Mock(name = n) doesn't work like you
# think it would, since mock has a name attribute.
# That's why we override it with the configure_mock
# (this is mentioned in mock documentation if you read
# it :)
r = mock.Mock()
r.configure_mock(name=n)
tempfiles.append(n)
return r
mock_temp.side_effect = new_tempfile
reverse_order = reversed(call_order)
for node in reverse_order:
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
node.cleanup()
cmd_sequence = [
# copy the temporary drives
mock.call(['dd', 'if=/dev/fake/root', 'of=%s' % tempfiles[0]]),
mock.call(['dd', 'if=/dev/fake/data', 'of=%s' % tempfiles[1]]),
# delete the lv's
mock.call(['lvchange', '-an', '/dev/vg1/lv_root']),
mock.call(['lvremove', '--force', '/dev/vg1/lv_root']),
mock.call(['lvchange', '-an', '/dev/vg1/lv_tmp']),
mock.call(['lvremove', '--force', '/dev/vg1/lv_tmp']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_var']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_var']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_log']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_log']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_audit']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_audit']),
mock.call(['lvchange', '-an', '/dev/vg2/lv_home']),
mock.call(['lvremove', '--force', '/dev/vg2/lv_home']),
# delete the vg's
mock.call(['vgchange', '-an', 'vg1']),
mock.call(['vgremove', '--force', 'vg1']),
mock.call(['vgchange', '-an', 'vg2']),
mock.call(['vgremove', '--force', 'vg2']),
# delete the pv's
mock.call(['pvremove', '--force', '/dev/fake/root']),
mock.call(['pvremove', '--force', '/dev/fake/data']),
# copy back again
mock.call(['udevadm', 'settle']),
mock.call(['dd', 'if=%s' % tempfiles[0], 'of=/dev/fake/root']),
mock.call(['dd', 'if=%s' % tempfiles[1], 'of=/dev/fake/data']),
]
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)
def test_lvm_spanned_vg(self):
# Test when a volume group spans some partitions
tree = self.load_config_file('lvm_tree_spanned_vg.yaml')
config = config_tree_to_graph(tree)
state = BlockDeviceState()
graph, call_order = create_graph(config, self.fake_default_config,
state)
# XXX: todo; test call_order. Make sure PV's come before, VG;
# VG before LV, and that mounts/etc happen afterwards.
# Fake state for the two PV's specified by this config
state['blockdev'] = {}
state['blockdev']['root'] = {}
state['blockdev']['root']['device'] = '/dev/fake/root'
state['blockdev']['data1'] = {}
state['blockdev']['data1']['device'] = '/dev/fake/data1'
state['blockdev']['data2'] = {}
state['blockdev']['data2']['device'] = '/dev/fake/data2'
# We mock patch this ... it's just a little long!
exec_sudo = 'diskimage_builder.block_device.level1.lvm.exec_sudo'
#
# Creation test
#
with mock.patch(exec_sudo) as mock_exec_sudo:
for node in call_order:
# XXX: This has not mocked out the "lower" layers of
# creating the devices, which we're assuming works OK, nor
# the upper layers.
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
# only the LVMNode actually does anything here...
node.create()
# ensure the sequence of calls correctly setup the devices
cmd_sequence = [
# create the pv's on the faked out block devices
mock.call(['pvcreate', '/dev/fake/root', '--force']),
mock.call(['pvcreate', '/dev/fake/data1', '--force']),
mock.call(['pvcreate', '/dev/fake/data2', '--force']),
# create a root and a data volume, with the data volume
# spanning data1 & data2
mock.call(['vgcreate', 'vg_root',
'/dev/fake/root', '--force']),
mock.call(['vgcreate', 'vg_data',
'/dev/fake/data1', '/dev/fake/data2', '--force']),
# create root and data volume
mock.call(['lvcreate', '--name', 'lv_root',
'-L', '1800M', 'vg_root']),
mock.call(['lvcreate', '--name', 'lv_data',
'-L', '2G', 'vg_data'])
]
self.assertListEqual(mock_exec_sudo.call_args_list,
cmd_sequence)
with mock.patch(exec_sudo) as mock_exec_sudo, \
mock.patch('tempfile.NamedTemporaryFile') as mock_temp, \
mock.patch('os.unlink'):
# see above ...
tempfiles = []
def new_tempfile(*args, **kwargs):
n = '/tmp/files%s' % len(tempfiles)
r = mock.Mock()
r.configure_mock(name=n)
tempfiles.append(n)
return r
mock_temp.side_effect = new_tempfile
reverse_order = reversed(call_order)
for node in reverse_order:
if isinstance(node, (LVMNode, PvsNode, VgsNode, LvsNode)):
node.cleanup()
cmd_sequence = [
# copy the temporary drives
mock.call(['dd', 'if=/dev/fake/root',
'of=%s' % tempfiles[0]]),
mock.call(['dd', 'if=/dev/fake/data1',
'of=%s' % tempfiles[1]]),
mock.call(['dd', 'if=/dev/fake/data2',
'of=%s' % tempfiles[2]]),
# remove lv's
mock.call(['lvchange', '-an', '/dev/vg_root/lv_root']),
mock.call(['lvremove', '--force', '/dev/vg_root/lv_root']),
mock.call(['lvchange', '-an', '/dev/vg_data/lv_data']),
mock.call(['lvremove', '--force', '/dev/vg_data/lv_data']),
# remove vg's
mock.call(['vgchange', '-an', 'vg_root']),
mock.call(['vgremove', '--force', 'vg_root']),
mock.call(['vgchange', '-an', 'vg_data']),
mock.call(['vgremove', '--force', 'vg_data']),
# remove pv's
mock.call(['pvremove', '--force', '/dev/fake/root']),
mock.call(['pvremove', '--force', '/dev/fake/data1']),
mock.call(['pvremove', '--force', '/dev/fake/data2']),
# copy back again
mock.call(['udevadm', 'settle']),
mock.call(['dd', 'if=%s' % tempfiles[0],
'of=/dev/fake/root']),
mock.call(['dd', 'if=%s' % tempfiles[1],
'of=/dev/fake/data1']),
mock.call(['dd', 'if=%s' % tempfiles[2],
'of=/dev/fake/data2']),
]
self.assertListEqual(mock_exec_sudo.call_args_list, cmd_sequence)

View File

@ -176,9 +176,6 @@ Tree and digraph notations can be mixed as needed in a configuration.
Limitations Limitations
+++++++++++ +++++++++++
There are a couple of new modules planned, but not yet implemented,
like LVM, MD, encryption, ...
To provide an interface towards the existing elements, there are To provide an interface towards the existing elements, there are
currently three fixed keys used - which are not configurable: currently three fixed keys used - which are not configurable:
@ -357,6 +354,129 @@ On the `image0` two partitions are created. The size of the first is
1GiB, the second uses the remaining free space. On the `data_image` 1GiB, the second uses the remaining free space. On the `data_image`
three partitions are created: all are about 1/3 of the disk size. three partitions are created: all are about 1/3 of the disk size.
Module: Lvm
···········
This module generates volumes on existing block devices. This means that it is
possible to take any previous created partition, and create volumes information
in it.
The symbolic name for this module is `lvm`.
There are the following key / value pairs to define one set of volumes:
pvs
(mandatory) A list of dictionaries. Each dictionary describes one
physical volume.
vgs
(mandatory) A list of dictionaries. Each dictionary describes one volume
group.
lvs
(mandatory) A list of dictionaries. Each dictionary describes one logical
volume.
The following key / value pairs can be given for each `pvs`:
name
(mandatory) The name of the physical volume. With the help of this
name, the physical volume can later be referenced, e.g. when creating
a volume group.
base
(mandatory) The name of the partition where the physical volume
needs to be created.
options
(optional) List of options for the physical volume. It can contain
any option supported by the `pvcreate` command.
The following key / value pairs can be given for each `vgs`:
name
(mandatory) The name of the volume group. With the help of this name,
the volume group can later be referenced, e.g. when creating a logical
volume.
base
(mandatory) The name(s) of the physical volumes where the volume groups
needs to be created. As a volume group can be created on one or more
physical volumes, this needs to be a list.
options
(optional) List of options for the volume group. It can contain any
option supported by the `vgcreate` command.
The following key / value pairs can be given for each `lvs`:
name
(mandatory) The name of the logical volume. With the help of this name,
the logical volume can later be referenced, e.g. when creating a
filesystem.
base
(mandatory) The name of the volume group where the logical volume
needs to be created.
size
(optional) The exact size of the volume to be created. It accepts the same
syntax as the -L flag of the `lvcreate` command.
extents
(optional) The relative size in extents of the volume to be created. It
accepts the same syntax as the -l flag of the `lvcreate` command.
Either size or extents need to be passed on the volume creation.
options
(optional) List of options for the logical volume. It can contain any
option supported by the `lvcreate` command.
Example:
.. code-block: yaml
- lvm:
name: lvm
pvs:
- name: pv
options: ["--force"]
device: root
vgs:
- name: vg
base: ["pv"]
options: ["--force"]
lvs:
- name: lv_root
base: vg
size: 1800M
- name: lv_tmp
base: vg
size: 100M
- name: lv_var
base: vg
size: 500M
- name: lv_log
base: vg
size: 100M
- name: lv_audit
base: vg
size: 100M
- name: lv_home
base: vg
size: 200M
On the `root` partition a physical volume is created. On that physical
volume, a volume group is created. On top of this volume group, six logical
volumes are created.
Level 2 Level 2
+++++++ +++++++

View File

@ -0,0 +1,4 @@
---
features:
- Adds lvm support, allowing to create volumes.

View File

@ -68,6 +68,7 @@ console_scripts =
diskimage_builder.block_device.plugin = diskimage_builder.block_device.plugin =
local_loop = diskimage_builder.block_device.level0.localloop:LocalLoop local_loop = diskimage_builder.block_device.level0.localloop:LocalLoop
partitioning = diskimage_builder.block_device.level1.partitioning:Partitioning partitioning = diskimage_builder.block_device.level1.partitioning:Partitioning
lvm = diskimage_builder.block_device.level1.lvm:LVMPlugin
mkfs = diskimage_builder.block_device.level2.mkfs:Mkfs mkfs = diskimage_builder.block_device.level2.mkfs:Mkfs
mount = diskimage_builder.block_device.level3.mount:Mount mount = diskimage_builder.block_device.level3.mount:Mount
fstab = diskimage_builder.block_device.level4.fstab:Fstab fstab = diskimage_builder.block_device.level4.fstab:Fstab