diskimage-builder/diskimage_builder/block_device/blockdevice.py
Ian Wienand deb832d685 Create and use plugin/node abstract classes
This completes the transitions started in
Ic5a61365ef0132476b11bdbf1dd96885e91c3cb6

The new file plugin.py is the place to start with this change.  The
abstract base classes PluginBase and NodeBase are heavily documented.
NodeBase essentially replaces Digraph.Node

The changes in level?/*.py make no functional changes, but are just
refactoring to implement the plugin and node classes consistently.
Additionally we have added asserts during parsing & generation to
ensure plugins are implemented PluginBase, and get_nodes() is always
returning NodeBase objects for the graph.

Change-Id: Ie648e9224749491260dea65d7e8b8151a6824b9c
2017-05-26 11:48:11 +10:00

455 lines
17 KiB
Python

# Copyright 2016-2017 Andreas Florath (andreas@florath.net)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import codecs
import json
import logging
import os
import shutil
import sys
import yaml
import networkx as nx
from stevedore import extension
from diskimage_builder.block_device.config import \
config_tree_to_graph
from diskimage_builder.block_device.exception import \
BlockDeviceSetupException
from diskimage_builder.block_device.plugin import NodeBase
from diskimage_builder.block_device.plugin import PluginBase
from diskimage_builder.block_device.utils import exec_sudo
logger = logging.getLogger(__name__)
class BlockDevice(object):
"""Handles block devices.
This class handles the complete setup and deletion of all aspects
of the block device level.
A typical call sequence:
cmd_init: initialize the block device level config. After this
call it is possible to e.g. query information from the (partially
automatic generated) internal state like root-label.
cmd_getval: retrieve information about the (internal) block device
state like the block image device (for bootloader) or the
root-label (for writing fstab).
cmd_create: creates all the different aspects of the block
device. When this call is successful, the complete block level
device is set up, filesystems are created and are mounted at
the correct position.
After this call it is possible to copy / install all the needed
files into the appropriate directories.
cmd_writefstab: creates the (complete) fstab for the system.
cmd_umount: unmount and detaches all directories and used many
resources. After this call the used (e.g.) images are still
available for further handling, e.g. converting from raw in
some other format.
cmd_cleanup: removes everything that was created with the
'cmd_create' call, i.e. all images files themselves and
internal temporary configuration.
cmd_delete: unmounts and removes everything that was created
during the 'cmd_create' all. This call should be used in error
conditions when there is the need to remove all allocated
resources immediately and as good as possible.
From the functional point of view this is mostly the same as a
call to 'cmd_umount' and 'cmd_cleanup' - but is typically more
error tolerance.
In a script this should be called in the following way:
dib-block-device init ...
# From that point the database can be queried, like
ROOT_LABEL=$(dib-block-device getval root-label)
Please note that currently the dib-block-device executable can
only be used outside the chroot.
dib-block-device create ...
trap "dib-block-device delete ..." EXIT
# copy / install files
dib-block-device umount ...
# convert image(s)
dib-block-device cleanup ...
trap - EXIT
"""
def _merge_into_config(self):
"""Merge old (default) config into new
There is the need to be compatible using some old environment
variables. This is done in the way, that if there is no
explicit value given, these values are inserted into the current
configuration.
"""
for entry in self.config:
for k, v in entry.items():
if k == 'mkfs':
if 'name' not in v:
continue
if v['name'] != 'mkfs_root':
continue
if 'type' not in v \
and 'root-fs-type' in self.params:
v['type'] = self.params['root-fs-type']
if 'opts' not in v \
and 'root-fs-opts' in self.params:
v['opts'] = self.params['root-fs-opts']
if 'label' not in v \
and 'root-label' in self.params:
if self.params['root-label'] is not None:
v['label'] = self.params['root-label']
else:
v['label'] = "cloudimg-rootfs"
@staticmethod
def _load_json(file_name):
if os.path.exists(file_name):
with codecs.open(file_name, encoding="utf-8", mode="r") as fd:
return json.load(fd)
return None
def __init__(self, params):
"""Create BlockDevice object
Arguments:
:param params: YAML file from --params
"""
logger.debug("Creating BlockDevice object")
self.params = params
logger.debug("Params [%s]" % self.params)
self.state_dir = os.path.join(
self.params['build-dir'], "states/block-device")
self.state_json_file_name \
= os.path.join(self.state_dir, "state.json")
self.plugin_manager = extension.ExtensionManager(
namespace='diskimage_builder.block_device.plugin',
invoke_on_load=False)
self.config_json_file_name \
= os.path.join(self.state_dir, "config.json")
self.config = self._load_json(self.config_json_file_name)
self.state = self._load_json(self.state_json_file_name)
logger.debug("Using state [%s]", self.state)
# This needs to exists for the state and config files
try:
os.makedirs(self.state_dir)
except OSError:
pass
def write_state(self, state):
logger.debug("Write state [%s]" % self.state_json_file_name)
with open(self.state_json_file_name, "w") as fd:
json.dump(state, fd)
def create_graph(self, config, default_config):
"""Generate configuration digraph
Generate the configuration digraph from the config
:param config: graph configuration file
:param default_config: default parameters (from --params)
:return: tuple with the graph object, nodes in call order
"""
# This is the directed graph of nodes: each parse method must
# add the appropriate nodes and edges.
dg = nx.DiGraph()
for config_entry in config:
# this should have been checked by generate_config
assert len(config_entry) == 1
logger.debug("Config entry [%s]" % config_entry)
cfg_obj_name = list(config_entry.keys())[0]
cfg_obj_val = config_entry[cfg_obj_name]
# Instantiate a "plugin" object, passing it the
# configuration entry
# XXX would a "factory" pattern for plugins, where we make
# a method call on an object stevedore has instantiated be
# better here?
if cfg_obj_name not in self.plugin_manager:
raise BlockDeviceSetupException(
("Config element [%s] is not implemented" % cfg_obj_name))
plugin = self.plugin_manager[cfg_obj_name].plugin
assert issubclass(plugin, PluginBase)
cfg_obj = plugin(cfg_obj_val, default_config)
# Ask the plugin for the nodes it would like to insert
# into the graph. Some plugins, such as partitioning,
# return multiple nodes from one config entry.
nodes = cfg_obj.get_nodes()
assert isinstance(nodes, list)
for node in nodes:
# plugins should return nodes...
assert isinstance(node, NodeBase)
# ensure node names are unique. networkx by default
# just appends the attribute to the node dict for
# existing nodes, which is not what we want.
if node.name in dg.node:
raise BlockDeviceSetupException(
"Duplicate node name: %s" % (node.name))
logger.debug("Adding %s : %s", node.name, node)
dg.add_node(node.name, obj=node)
# Now find edges
for name, attr in dg.nodes(data=True):
obj = attr['obj']
# Unfortunately, we can not determine node edges just from
# the configuration file. It's not always simply the
# "base:" pointer. So ask nodes for a list of nodes they
# want to point to. *mostly* it's just base: ... but
# mounting is different.
# edges_from are the nodes that point to us
# edges_to are the nodes we point to
edges_from, edges_to = obj.get_edges()
logger.debug("Edges for %s: f:%s t:%s", name,
edges_from, edges_to)
for edge_from in edges_from:
if edge_from not in dg.node:
raise BlockDeviceSetupException(
"Edge not defined: %s->%s" % (edge_from, name))
dg.add_edge(edge_from, name)
for edge_to in edges_to:
if edge_to not in dg.node:
raise BlockDeviceSetupException(
"Edge not defined: %s->%s" % (name, edge_to))
dg.add_edge(name, edge_to)
# this can be quite helpful debugging but needs pydotplus.
# run "dotty /tmp/out.dot"
# XXX: maybe an env var that dumps to a tmpdir or something?
# nx.nx_pydot.write_dot(dg, '/tmp/graph_dump.dot')
# Topological sort (i.e. create a linear array that satisfies
# dependencies) and return the object list
call_order_nodes = nx.topological_sort(dg)
logger.debug("Call order: %s", list(call_order_nodes))
call_order = [dg.node[n]['obj'] for n in call_order_nodes]
return dg, call_order
def create(self, result, rollback):
dg, call_order = self.create_graph(self.config, self.params)
for node in call_order:
node.create(result, rollback)
def cmd_init(self):
"""Initialize block device setup
This initializes the block device setup layer. One major task
is to parse and check the configuration, write it down for
later examiniation and execution.
"""
with open(self.params['config'], "rt") as config_fd:
self.config = yaml.safe_load(config_fd)
logger.debug("Config before merge [%s]" % self.config)
self.config = config_tree_to_graph(self.config)
logger.debug("Config before merge [%s]" % self.config)
self._merge_into_config()
logger.debug("Final config [%s]" % self.config)
# Write the final config
with open(self.config_json_file_name, "wt") as fd:
json.dump(self.config, fd)
logger.info("Wrote final block device config to [%s]"
% self.config_json_file_name)
def _config_get_mount(self, path):
for entry in self.config:
for k, v in entry.items():
if k == 'mount' and v['mount_point'] == path:
return v
assert False
def _config_get_all_mount_points(self):
rvec = []
for entry in self.config:
for k, v in entry.items():
if k == 'mount':
rvec.append(v['mount_point'])
return rvec
def _config_get_mkfs(self, name):
for entry in self.config:
for k, v in entry.items():
if k == 'mkfs' and v['name'] == name:
return v
assert False
def cmd_getval(self, symbol):
"""Retrieve value from block device level
The value of SYMBOL is printed to stdout. This is intended to
be captured into bash-variables for backward compatibility
(non python) access to internal configuration.
Arguments:
:param symbol: the symbol to get
"""
logger.info("Getting value for [%s]" % symbol)
if symbol == "root-label":
root_mount = self._config_get_mount("/")
root_fs = self._config_get_mkfs(root_mount['base'])
logger.debug("root-label [%s]" % root_fs['label'])
print("%s" % root_fs['label'])
return 0
if symbol == "root-fstype":
root_mount = self._config_get_mount("/")
root_fs = self._config_get_mkfs(root_mount['base'])
logger.debug("root-fstype [%s]" % root_fs['type'])
print("%s" % root_fs['type'])
return 0
if symbol == 'mount-points':
mount_points = self._config_get_all_mount_points()
# we return the mountpoints joined by a pipe, because it is not
# a valid char in directories, so it is a safe separator for the
# mountpoints list
print("%s" % "|".join(mount_points))
return 0
if symbol == 'image-block-partition':
# If there is no partition needed, pass back directly the
# image.
if 'root' in self.state['blockdev']:
print("%s" % self.state['blockdev']['root']['device'])
else:
print("%s" % self.state['blockdev']['image0']['device'])
return 0
if symbol == 'image-path':
print("%s" % self.state['blockdev']['image0']['image'])
return 0
logger.error("Invalid symbol [%s] for getval" % symbol)
return 1
def cmd_writefstab(self):
"""Creates the fstab"""
logger.info("Creating fstab")
tmp_fstab = os.path.join(self.state_dir, "fstab")
with open(tmp_fstab, "wt") as fstab_fd:
# This gives the order in which this must be mounted
for mp in self.state['mount_order']:
logger.debug("Writing fstab entry for [%s]" % mp)
fs_base = self.state['mount'][mp]['base']
fs_name = self.state['mount'][mp]['name']
fs_val = self.state['filesys'][fs_base]
if 'label' in fs_val:
diskid = "LABEL=%s" % fs_val['label']
else:
diskid = "UUID=%s" % fs_val['uuid']
# If there is no fstab entry - do not write anything
if 'fstab' not in self.state:
continue
if fs_name not in self.state['fstab']:
continue
options = self.state['fstab'][fs_name]['options']
dump_freq = self.state['fstab'][fs_name]['dump-freq']
fsck_passno = self.state['fstab'][fs_name]['fsck-passno']
fstab_fd.write("%s %s %s %s %s %s\n"
% (diskid, mp, fs_val['fstype'],
options, dump_freq, fsck_passno))
target_etc_dir = os.path.join(self.params['build-dir'], 'built', 'etc')
exec_sudo(['mkdir', '-p', target_etc_dir])
exec_sudo(['cp', tmp_fstab, os.path.join(target_etc_dir, "fstab")])
return 0
def cmd_create(self):
"""Creates the block device"""
logger.info("create() called")
logger.debug("Using config [%s]" % self.config)
self.state = {}
rollback = []
try:
self.create(self.state, rollback)
except Exception:
logger.exception("Create failed; rollback initiated")
for rollback_cb in reversed(rollback):
rollback_cb()
sys.exit(1)
self.write_state(self.state)
logger.info("create() finished")
return 0
def cmd_umount(self):
"""Unmounts the blockdevice and cleanup resources"""
if self.state is None:
logger.info("State already cleaned - no way to do anything here")
return 0
# Deleting must be done in reverse order
dg, call_order = self.create_graph(self.config, self.params)
reverse_order = reversed(call_order)
if dg is None:
return 0
for node in reverse_order:
node.umount(self.state)
return 0
def cmd_cleanup(self):
"""Cleanup all remaining relicts - in good case"""
# Deleting must be done in reverse order
dg, call_order = self.create_graph(self.config, self.params)
reverse_order = reversed(call_order)
for node in reverse_order:
node.cleanup(self.state)
logger.info("Removing temporary dir [%s]" % self.state_dir)
shutil.rmtree(self.state_dir)
return 0
def cmd_delete(self):
"""Cleanup all remaining relicts - in case of an error"""
# Deleting must be done in reverse order
dg, call_order = self.create_graph(self.config, self.params)
reverse_order = reversed(call_order)
for node in reverse_order:
node.delete(self.state)
logger.info("Removing temporary dir [%s]" % self.state_dir)
shutil.rmtree(self.state_dir)
return 0