Refactor: block-device handling (partitioning)
During the creation of a disk image (e.g. for a VM), there is the need to create, setup, configure and afterwards detach some kind of storage where the newly installed OS can be copied to or directly installed in. This patch implements partitioning handling. Change-Id: I0ca6a4ae3a2684d473b44e5f332ee4225ee30f8c Signed-off-by: Andreas Florath <andreas@florath.net>
This commit is contained in:
parent
6c0eb3d905
commit
ec7f56c1b2
@ -69,7 +69,7 @@ def main():
|
|||||||
method = getattr(bd, "cmd_" + args.phase, None)
|
method = getattr(bd, "cmd_" + args.phase, None)
|
||||||
if callable(method):
|
if callable(method):
|
||||||
# If so: call it.
|
# If so: call it.
|
||||||
method()
|
return method()
|
||||||
else:
|
else:
|
||||||
logger.error("phase [%s] does not exists" % args.phase)
|
logger.error("phase [%s] does not exists" % args.phase)
|
||||||
return 1
|
return 1
|
||||||
|
@ -12,12 +12,18 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from diskimage_builder.block_device.level0 import Level0
|
import codecs
|
||||||
from diskimage_builder.block_device.utils import convert_to_utf8
|
from diskimage_builder.block_device.blockdevicesetupexception \
|
||||||
|
import BlockDeviceSetupException
|
||||||
|
from diskimage_builder.block_device.level0 import LocalLoop
|
||||||
|
from diskimage_builder.block_device.level1 import Partitioning
|
||||||
|
from diskimage_builder.graph.digraph import Digraph
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -25,33 +31,36 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class BlockDevice(object):
|
class BlockDevice(object):
|
||||||
|
|
||||||
# Currently there is only the need for a first element (which must
|
# Default configuration:
|
||||||
# be a list).
|
# one image, one partition, mounted under '/'
|
||||||
DefaultConfig = [
|
DefaultConfig = """
|
||||||
[["local_loop",
|
local_loop:
|
||||||
{"name": "rootdisk"}]]]
|
name: image0
|
||||||
# The reason for the complex layout is, that for future layers
|
"""
|
||||||
# there is a need to add additional lists, like:
|
|
||||||
# DefaultConfig = [
|
# This is an example of the next level config
|
||||||
# [["local_loop",
|
# mkfs:
|
||||||
# {"name": "rootdisk"}]],
|
# base: root_p1
|
||||||
# [["partitioning",
|
# type: ext4
|
||||||
# {"rootdisk": {
|
# mount_point: /
|
||||||
# "label": "mbr",
|
|
||||||
# "partitions":
|
# A dictionary to map sensible names to internal implementation.
|
||||||
# [{"name": "rd-partition1",
|
cfg_type_map = {
|
||||||
# "flags": ["boot"],
|
'local_loop': LocalLoop,
|
||||||
# "size": "100%"}]}}]],
|
'partitioning': Partitioning,
|
||||||
# [["fs",
|
'mkfs': 'not yet implemented',
|
||||||
# {"rd-partition1": {}}]]
|
}
|
||||||
# ]
|
|
||||||
|
|
||||||
def __init__(self, block_device_config, build_dir,
|
def __init__(self, block_device_config, build_dir,
|
||||||
default_image_size, default_image_dir):
|
default_image_size, default_image_dir):
|
||||||
|
logger.debug("Creating BlockDevice object")
|
||||||
|
logger.debug("Config given [%s]" % block_device_config)
|
||||||
|
logger.debug("Build dir [%s]" % build_dir)
|
||||||
if block_device_config is None:
|
if block_device_config is None:
|
||||||
self.config = BlockDevice.DefaultConfig
|
block_device_config = BlockDevice.DefaultConfig
|
||||||
else:
|
self.config = yaml.safe_load(block_device_config)
|
||||||
self.config = json.loads(block_device_config)
|
logger.debug("Using config [%s]" % self.config)
|
||||||
|
|
||||||
self.default_config = {
|
self.default_config = {
|
||||||
'image_size': default_image_size,
|
'image_size': default_image_size,
|
||||||
'image_dir': default_image_dir}
|
'image_dir': default_image_dir}
|
||||||
@ -67,53 +76,125 @@ class BlockDevice(object):
|
|||||||
json.dump([self.config, self.default_config, result], fd)
|
json.dump([self.config, self.default_config, result], fd)
|
||||||
|
|
||||||
def load_state(self):
|
def load_state(self):
|
||||||
with open(self.state_json_file_name, "r") as fd:
|
with codecs.open(self.state_json_file_name,
|
||||||
return convert_to_utf8(json.load(fd))
|
encoding="utf-8", mode="r") as fd:
|
||||||
|
return json.load(fd)
|
||||||
|
|
||||||
|
def create_graph(self, config, default_config):
|
||||||
|
# This is the directed graph of nodes: each parse method must
|
||||||
|
# add the appropriate nodes and edges.
|
||||||
|
dg = Digraph()
|
||||||
|
|
||||||
|
for cfg_obj_name, cfg_obj_val in config.items():
|
||||||
|
# As the first step the configured objects are created
|
||||||
|
# (if it exists)
|
||||||
|
if cfg_obj_name not in BlockDevice.cfg_type_map:
|
||||||
|
logger.error("Configured top level element [%s] "
|
||||||
|
"does not exists." % cfg_obj_name)
|
||||||
|
return 1
|
||||||
|
cfg_obj = BlockDevice.cfg_type_map[cfg_obj_name](
|
||||||
|
cfg_obj_val, default_config)
|
||||||
|
# At this point it is only possible to add the nodes:
|
||||||
|
# adding the edges needs all nodes first.
|
||||||
|
cfg_obj.insert_nodes(dg)
|
||||||
|
|
||||||
|
# Now that all the nodes exists: add also the edges
|
||||||
|
for node in dg.get_iter_nodes_values():
|
||||||
|
node.insert_edges(dg)
|
||||||
|
|
||||||
|
call_order = dg.topological_sort()
|
||||||
|
logger.debug("Call order [%s]" % (list(call_order)))
|
||||||
|
return dg, call_order
|
||||||
|
|
||||||
|
def create(self, result, rollback):
|
||||||
|
dg, call_order = self.create_graph(self.config, self.default_config)
|
||||||
|
for node in call_order:
|
||||||
|
node.create(result, rollback)
|
||||||
|
|
||||||
def cmd_create(self):
|
def cmd_create(self):
|
||||||
"""Creates the block device"""
|
"""Creates the block device"""
|
||||||
|
|
||||||
logger.info("create() called")
|
logger.info("create() called")
|
||||||
logger.debug("config [%s]" % self.config)
|
logger.debug("Using config [%s]" % self.config)
|
||||||
lvl0 = Level0(self.config[0], self.default_config, None)
|
|
||||||
result = lvl0.create()
|
result = {}
|
||||||
logger.debug("Result level 0 [%s]" % result)
|
rollback = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.create(result, rollback)
|
||||||
|
except BlockDeviceSetupException as bdse:
|
||||||
|
logger.error("exception [%s]" % bdse)
|
||||||
|
for rollback_cb in reversed(rollback):
|
||||||
|
rollback_cb()
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
# To be compatible with the current implementation, echo the
|
# To be compatible with the current implementation, echo the
|
||||||
# result to stdout.
|
# result to stdout.
|
||||||
print("%s" % result['rootdisk']['device'])
|
# If there is no partition needed, pass back directly the
|
||||||
|
# image.
|
||||||
|
if 'root_p1' in result:
|
||||||
|
print("%s" % result['root_p1']['device'])
|
||||||
|
else:
|
||||||
|
print("%s" % result['image0']['device'])
|
||||||
|
|
||||||
self.write_state(result)
|
self.write_state(result)
|
||||||
|
|
||||||
logger.info("create() finished")
|
logger.info("create() finished")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def cmd_umount(self):
|
def _load_state(self):
|
||||||
"""Unmounts the blockdevice and cleanup resources"""
|
logger.info("_load_state() called")
|
||||||
|
|
||||||
logger.info("umount() called")
|
|
||||||
try:
|
try:
|
||||||
os.stat(self.state_json_file_name)
|
os.stat(self.state_json_file_name)
|
||||||
except OSError:
|
except OSError:
|
||||||
logger.info("State already cleaned - no way to do anything here")
|
logger.info("State already cleaned - no way to do anything here")
|
||||||
return 0
|
return None, None, None
|
||||||
|
|
||||||
config, default_config, state = self.load_state()
|
config, default_config, state = self.load_state()
|
||||||
logger.debug("Using config [%s]" % config)
|
logger.debug("Using config [%s]" % config)
|
||||||
logger.debug("Using default config [%s]" % default_config)
|
logger.debug("Using default config [%s]" % default_config)
|
||||||
logger.debug("Using state [%s]" % state)
|
logger.debug("Using state [%s]" % state)
|
||||||
|
|
||||||
level0 = Level0(config[0], default_config, state)
|
# Deleting must be done in reverse order
|
||||||
result = level0.delete()
|
dg, call_order = self.create_graph(config, default_config)
|
||||||
|
reverse_order = reversed(call_order)
|
||||||
|
return dg, reverse_order, state
|
||||||
|
|
||||||
# If everything finished well, remove the results.
|
def cmd_umount(self):
|
||||||
if result:
|
"""Unmounts the blockdevice and cleanup resources"""
|
||||||
logger.info("Removing temporary dir [%s]" % self.state_dir)
|
|
||||||
shutil.rmtree(self.state_dir)
|
dg, reverse_order, state = self._load_state()
|
||||||
|
if dg is None:
|
||||||
|
return 0
|
||||||
|
for node in reverse_order:
|
||||||
|
node.umount(state)
|
||||||
|
|
||||||
# To be compatible with the current implementation, echo the
|
# To be compatible with the current implementation, echo the
|
||||||
# result to stdout.
|
# result to stdout.
|
||||||
print("%s" % state['rootdisk']['image'])
|
print("%s" % state['image0']['image'])
|
||||||
|
|
||||||
logger.info("umount() finished result [%d]" % result)
|
return 0
|
||||||
return 0 if result else 1
|
|
||||||
|
def cmd_cleanup(self):
|
||||||
|
"""Cleanup all remaining relicts - in good case"""
|
||||||
|
|
||||||
|
dg, reverse_order, state = self._load_state()
|
||||||
|
for node in reverse_order:
|
||||||
|
node.cleanup(state)
|
||||||
|
|
||||||
|
logger.info("Removing temporary dir [%s]" % self.state_dir)
|
||||||
|
shutil.rmtree(self.state_dir)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def cmd_delete(self):
|
||||||
|
"""Cleanup all remaining relicts - in case of an error"""
|
||||||
|
|
||||||
|
dg, reverse_order, state = self._load_state()
|
||||||
|
for node in reverse_order:
|
||||||
|
node.delete(state)
|
||||||
|
|
||||||
|
logger.info("Removing temporary dir [%s]" % self.state_dir)
|
||||||
|
shutil.rmtree(self.state_dir)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
17
diskimage_builder/block_device/blockdevicesetupexception.py
Normal file
17
diskimage_builder/block_device/blockdevicesetupexception.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class BlockDeviceSetupException(Exception):
|
||||||
|
pass
|
@ -13,18 +13,5 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from diskimage_builder.block_device.level0.localloop import LocalLoop
|
from diskimage_builder.block_device.level0.localloop import LocalLoop
|
||||||
from diskimage_builder.block_device.levelbase import LevelBase
|
|
||||||
|
|
||||||
__all__ = [LocalLoop]
|
__all__ = [LocalLoop]
|
||||||
|
|
||||||
|
|
||||||
class Level0(LevelBase):
|
|
||||||
"""Block Device Level0: preparation of images
|
|
||||||
|
|
||||||
This is the class that handles level 0 block device setup:
|
|
||||||
creating the block device image and providing OS access to it.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, config, default_config, result):
|
|
||||||
LevelBase.__init__(self, 0, config, default_config, result,
|
|
||||||
{LocalLoop.type_string: LocalLoop})
|
|
||||||
|
@ -12,18 +12,19 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from diskimage_builder.block_device.blockdevicesetupexception \
|
||||||
|
import BlockDeviceSetupException
|
||||||
from diskimage_builder.block_device.utils import parse_abs_size_spec
|
from diskimage_builder.block_device.utils import parse_abs_size_spec
|
||||||
|
from diskimage_builder.graph.digraph import Digraph
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LocalLoop(object):
|
class LocalLoop(Digraph.Node):
|
||||||
"""Level0: Local loop image device handling.
|
"""Level0: Local loop image device handling.
|
||||||
|
|
||||||
This class handles local loop devices that can be used
|
This class handles local loop devices that can be used
|
||||||
@ -32,7 +33,9 @@ class LocalLoop(object):
|
|||||||
|
|
||||||
type_string = "local_loop"
|
type_string = "local_loop"
|
||||||
|
|
||||||
def __init__(self, config, default_config, result=None):
|
def __init__(self, config, default_config):
|
||||||
|
logger.debug("Creating LocalLoop object; config [%s] "
|
||||||
|
"default_config [%s]" % (config, default_config))
|
||||||
if 'size' in config:
|
if 'size' in config:
|
||||||
self.size = parse_abs_size_spec(config['size'])
|
self.size = parse_abs_size_spec(config['size'])
|
||||||
logger.debug("Image size [%s]" % self.size)
|
logger.debug("Image size [%s]" % self.size)
|
||||||
@ -44,49 +47,85 @@ class LocalLoop(object):
|
|||||||
else:
|
else:
|
||||||
self.image_dir = default_config['image_dir']
|
self.image_dir = default_config['image_dir']
|
||||||
self.name = config['name']
|
self.name = config['name']
|
||||||
|
Digraph.Node.__init__(self, self.name)
|
||||||
self.filename = os.path.join(self.image_dir, self.name + ".raw")
|
self.filename = os.path.join(self.image_dir, self.name + ".raw")
|
||||||
self.result = result
|
|
||||||
if self.result is not None:
|
|
||||||
self.block_device = self.result[self.name]['device']
|
|
||||||
|
|
||||||
def create(self):
|
def insert_nodes(self, dg):
|
||||||
logger.debug("[%s] Creating loop on [%s] with size [%d]" %
|
dg.add_node(self)
|
||||||
(self.name, self.filename, self.size))
|
|
||||||
|
|
||||||
with open(self.filename, "w") as fd:
|
def insert_edges(self, dg):
|
||||||
fd.seek(self.size - 1)
|
"""Because this is created without base, there are no edges."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def image_create(filename, size):
|
||||||
|
logger.info("Create image file [%s]" % filename)
|
||||||
|
with open(filename, "w") as fd:
|
||||||
|
fd.seek(size - 1)
|
||||||
fd.write("\0")
|
fd.write("\0")
|
||||||
|
|
||||||
logger.debug("Calling [sudo losetup --show -f %s]"
|
@staticmethod
|
||||||
% self.filename)
|
def _image_delete(filename):
|
||||||
|
logger.info("Remove image file [%s]" % filename)
|
||||||
|
os.remove(filename)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _loopdev_attach(filename):
|
||||||
|
logger.info("loopdev attach")
|
||||||
|
logger.debug("Calling [sudo losetup --show -f %s]", filename)
|
||||||
subp = subprocess.Popen(["sudo", "losetup", "--show", "-f",
|
subp = subprocess.Popen(["sudo", "losetup", "--show", "-f",
|
||||||
self.filename], stdout=subprocess.PIPE)
|
filename], stdout=subprocess.PIPE)
|
||||||
rval = subp.wait()
|
rval = subp.wait()
|
||||||
if rval == 0:
|
if rval == 0:
|
||||||
# [:-1]: Cut of the newline
|
# [:-1]: Cut of the newline
|
||||||
self.block_device = subp.stdout.read()[:-1]
|
block_device = subp.stdout.read()[:-1].decode("utf-8")
|
||||||
logger.debug("New block device [%s]" % self.block_device)
|
logger.info("New block device [%s]" % block_device)
|
||||||
|
return block_device
|
||||||
else:
|
else:
|
||||||
logger.error("losetup failed")
|
logger.error("losetup failed")
|
||||||
sys.exit(1)
|
raise BlockDeviceSetupException("losetup failed")
|
||||||
|
|
||||||
return {self.name: {"device": self.block_device,
|
@staticmethod
|
||||||
"image": self.filename}}
|
def _loopdev_detach(loopdev):
|
||||||
|
logger.info("loopdev detach")
|
||||||
def delete(self):
|
|
||||||
# loopback dev may be tied up a bit by udev events triggered
|
# loopback dev may be tied up a bit by udev events triggered
|
||||||
# by partition events
|
# by partition events
|
||||||
for try_cnt in range(10, 1, -1):
|
for try_cnt in range(10, 1, -1):
|
||||||
logger.debug("Delete loop [%s]" % self.block_device)
|
logger.debug("Calling [sudo losetup -d %s]", loopdev)
|
||||||
res = subprocess.call("sudo losetup -d %s" %
|
subp = subprocess.Popen(["sudo", "losetup", "-d",
|
||||||
(self.block_device),
|
loopdev])
|
||||||
shell=True)
|
rval = subp.wait()
|
||||||
if res == 0:
|
if rval == 0:
|
||||||
return {self.name: True}
|
logger.info("Successfully detached [%s]" % loopdev)
|
||||||
logger.debug("[%s] may be busy, sleeping [%d] more secs"
|
return 0
|
||||||
% (self.block_device, try_cnt))
|
else:
|
||||||
time.sleep(1)
|
logger.error("loopdev detach failed")
|
||||||
|
# Do not raise an error - maybe other cleanup methods
|
||||||
|
# can at least do some more work.
|
||||||
|
logger.debug("Gave up trying to detach [%s]" % loopdev)
|
||||||
|
return rval
|
||||||
|
|
||||||
logger.debug("Gave up trying to detach [%s]" %
|
def create(self, result, rollback):
|
||||||
self.block_device)
|
logger.debug("[%s] Creating loop on [%s] with size [%d]" %
|
||||||
return {self.name: False}
|
(self.name, self.filename, self.size))
|
||||||
|
|
||||||
|
rollback.append(lambda: self._image_delete(self.filename))
|
||||||
|
self.image_create(self.filename, self.size)
|
||||||
|
|
||||||
|
block_device = self._loopdev_attach(self.filename)
|
||||||
|
rollback.append(lambda: self._loopdev_detach(block_device))
|
||||||
|
|
||||||
|
result[self.name] = {"device": block_device,
|
||||||
|
"image": self.filename}
|
||||||
|
logger.debug("Created loop name [%s] device [%s] image [%s]"
|
||||||
|
% (self.name, block_device, self.filename))
|
||||||
|
return
|
||||||
|
|
||||||
|
def umount(self, state):
|
||||||
|
self._loopdev_detach(state[self.name]['device'])
|
||||||
|
|
||||||
|
def cleanup(self, state):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def delete(self, state):
|
||||||
|
self._image_delete(state[self.name]['image'])
|
||||||
|
17
diskimage_builder/block_device/level1/__init__.py
Normal file
17
diskimage_builder/block_device/level1/__init__.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from diskimage_builder.block_device.level1.partitioning import Partitioning
|
||||||
|
|
||||||
|
__all__ = [Partitioning]
|
360
diskimage_builder/block_device/level1/mbr.py
Normal file
360
diskimage_builder/block_device/level1/mbr.py
Normal file
@ -0,0 +1,360 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
from struct import pack
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Details of the MBR object itself can be found in the inline
|
||||||
|
# documentation.
|
||||||
|
#
|
||||||
|
# General design and implementation remarks:
|
||||||
|
# o Because the whole GNU parted and co. (e.g. the python-parted that
|
||||||
|
# is based on GNU parted) cannot be used because of the license:
|
||||||
|
# everything falls under GPL2 (not LGPL2!) and therefore does not
|
||||||
|
# fit into the Apache License here.
|
||||||
|
# o It looks that there is no real alternative available (2016-06).
|
||||||
|
# o The interface of python-parted is not that simple to handle - and
|
||||||
|
# the initial try to use GNU (python-)parted was not that much
|
||||||
|
# easier and shorter than this approach.
|
||||||
|
# o When using tools (like fdisk or parted) they try to optimize the
|
||||||
|
# alignment of partitions based on the data found on the host
|
||||||
|
# system. These might be misleading and might lead to (very) poor
|
||||||
|
# performance.
|
||||||
|
# o These ready-to-use tools typically also change the CHS layout
|
||||||
|
# based on the disk size. In case that the disk is enlarged (which
|
||||||
|
# is a normal use case for golden images), the CHS layout of the
|
||||||
|
# disk changes for those tools (and is not longer correct).
|
||||||
|
# In the DIB implementation the CHS are chosen that way, that also
|
||||||
|
# for very small disks the maximum heads/cylinder and sectors/track
|
||||||
|
# is used: even if the disk size in increased, the CHS numbers will
|
||||||
|
# not change.
|
||||||
|
# o In the easy and straight forward way when only using one
|
||||||
|
# partition, exactly 40 bytes (!) must be written - and the biggest
|
||||||
|
# part of this data is fixed (same in all cases).
|
||||||
|
#
|
||||||
|
# Limitations and Incompatibilities
|
||||||
|
# o With the help of this class it is possible to create an
|
||||||
|
# arbitrarily number of extended partitions (tested with over 1000).
|
||||||
|
# o There are limitations and shortcomings in the OS and in tools
|
||||||
|
# handling these partitions.
|
||||||
|
# o Under Linux the loop device is able to handle a limited number of
|
||||||
|
# partitions. The module parameter max_loop can be set - the maximum
|
||||||
|
# number might vary depending on the distribution and kernel build.
|
||||||
|
# o Under Linux fdisk is able to handle 'only' 60 partitions. Only
|
||||||
|
# those are listed, can be changed or written.
|
||||||
|
# o Under Linux GNU parted can handle about 60 partitions.
|
||||||
|
#
|
||||||
|
# Be sure only to pass in the number of partitions that the host OS
|
||||||
|
# and target OS are able to handle.
|
||||||
|
|
||||||
|
class MBR(object):
|
||||||
|
"""MBR Disk / Partition Table Layout
|
||||||
|
|
||||||
|
Primary partitions are created first - and must also be passed in
|
||||||
|
first.
|
||||||
|
The extended partition layout is done in the way, that there is
|
||||||
|
one entry in the MBR (the last) that uses the whole disk.
|
||||||
|
EBR (extended boot records) are used to describe the partitions
|
||||||
|
themselves. This has the advantage, that the same procedure can
|
||||||
|
be used for all partitions and arbitrarily many partitions can be
|
||||||
|
created in the same way (the EBR is placed as block 0 in each
|
||||||
|
partition itself).
|
||||||
|
In conjunction with a fixed and 'fits all' partition alignment the
|
||||||
|
major design focus is maximum performance for the installed image
|
||||||
|
(vs. minimal size).
|
||||||
|
Because of the chosen default alignment of 1MiB there will be
|
||||||
|
(1MiB - 512B) unused disk space for the MBR and also the same
|
||||||
|
size unused in every partition.
|
||||||
|
Assuming that 512 byte blocks are used, the resulting layout for
|
||||||
|
extended partitions looks like (blocks offset in extended
|
||||||
|
partition given):
|
||||||
|
0: MBR - 2047 blocks unused
|
||||||
|
2048: EBR for partition 1 - 2047 blocks unused
|
||||||
|
4096: Start of data for partition 1
|
||||||
|
...
|
||||||
|
X: EBR for partition N - 2047 blocks unused
|
||||||
|
X+2048: Start of data for partition N
|
||||||
|
|
||||||
|
Direct (native) writing of MBR, EBR (partition table) is
|
||||||
|
implemented - no other parititoning library or tools is used -
|
||||||
|
to be sure to get the correct CHS and alignment for a wide range
|
||||||
|
of host systems.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Design & Implementation details:
|
||||||
|
# o A 'block' is a storage unit on disk. It is similar (equal) to a
|
||||||
|
# sector - but with LBA addressing.
|
||||||
|
# o It is assumed that a disk block has that number of bytes
|
||||||
|
bytes_per_sector = 512
|
||||||
|
# o CHS is the 'good and very old way' specifying blocks.
|
||||||
|
# When passing around these numbers, they are also ordered like 'CHS':
|
||||||
|
# (cylinder, head, sector).
|
||||||
|
# o The computation from LBA to CHS is not unique (it is based
|
||||||
|
# on the 'real' (or assumed) number of heads/cylinder and
|
||||||
|
# sectors/track), these are the assumed numbers. Please note
|
||||||
|
# that these are also the maximum numbers:
|
||||||
|
heads_per_cylinder = 254
|
||||||
|
sectors_per_track = 63
|
||||||
|
max_cylinders = 1023
|
||||||
|
# o There is the need for some offsets that are defined in the
|
||||||
|
# MBR/EBR domain.
|
||||||
|
MBR_offset_disk_id = 440
|
||||||
|
MBR_offset_signature = 510
|
||||||
|
MBR_offset_first_partition_table_entry = 446
|
||||||
|
MBR_partition_type_extended_chs = 0x5
|
||||||
|
MBR_partition_type_extended_lba = 0xF
|
||||||
|
MBR_signature = 0xAA55
|
||||||
|
|
||||||
|
def __init__(self, name, disk_size, alignment):
|
||||||
|
"""Initialize a disk partitioning MBR object.
|
||||||
|
|
||||||
|
The name is the (existing) name of the disk.
|
||||||
|
The disk_size is the (used) size of the disk. It must be a
|
||||||
|
proper multiple of the disk bytes per sector (currently 512)
|
||||||
|
"""
|
||||||
|
logger.info("Create MBR disk partitioning object")
|
||||||
|
|
||||||
|
assert disk_size % MBR.bytes_per_sector == 0
|
||||||
|
|
||||||
|
self.disk_size = disk_size
|
||||||
|
self.disk_size_in_blocks \
|
||||||
|
= self.disk_size // MBR.bytes_per_sector
|
||||||
|
self.alignment_blocks = alignment // MBR.bytes_per_sector
|
||||||
|
# Because the extended partitions are a chain of blocks, when
|
||||||
|
# creating a new partition, the reference in the already
|
||||||
|
# existing EBR must be updated. This holds a reference to the
|
||||||
|
# latest EBR. (A special case is the first: when it points to
|
||||||
|
# 0 (MBR) there is no need to update the reference.)
|
||||||
|
self.disk_block_last_ref = 0
|
||||||
|
|
||||||
|
self.name = name
|
||||||
|
self.partition_abs_start = None
|
||||||
|
self.partition_abs_next_free = None
|
||||||
|
# Start of partition number
|
||||||
|
self.partition_number = 0
|
||||||
|
|
||||||
|
self.primary_partitions_created = 0
|
||||||
|
self.extended_partitions_created = 0
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
# Open existing file for writing (r+)
|
||||||
|
self.image_fd = open(self.name, "r+b")
|
||||||
|
self.write_mbr()
|
||||||
|
self.write_mbr_signature(0)
|
||||||
|
self.partition_abs_start = self.align(1)
|
||||||
|
self.partition_abs_next_free \
|
||||||
|
= self.partition_abs_start
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
|
self.image_fd.close()
|
||||||
|
|
||||||
|
def lba2chs(self, lba):
|
||||||
|
"""Converts a LBA block number to CHS
|
||||||
|
|
||||||
|
If the LBA block number is bigger than the max (1023, 63, 254)
|
||||||
|
the maximum is returned.
|
||||||
|
"""
|
||||||
|
if lba > MBR.heads_per_cylinder * MBR.sectors_per_track \
|
||||||
|
* MBR.max_cylinders:
|
||||||
|
return MBR.max_cylinders, MBR.heads_per_cylinder, \
|
||||||
|
MBR.sectors_per_track
|
||||||
|
|
||||||
|
cylinder = lba // (MBR.heads_per_cylinder * MBR.sectors_per_track)
|
||||||
|
head = (lba // MBR.sectors_per_track) % MBR.heads_per_cylinder
|
||||||
|
sector = (lba % MBR.sectors_per_track) + 1
|
||||||
|
|
||||||
|
logger.debug("Convert LBA to CHS [%d] -> [%d, %d, %d]"
|
||||||
|
% (lba, cylinder, head, sector))
|
||||||
|
return cylinder, head, sector
|
||||||
|
|
||||||
|
def encode_chs(self, cylinders, heads, sectors):
|
||||||
|
"""Encodes a CHS triple into disk format"""
|
||||||
|
# Head - nothing to convert
|
||||||
|
assert heads <= MBR.heads_per_cylinder
|
||||||
|
eh = heads
|
||||||
|
|
||||||
|
# Sector
|
||||||
|
assert sectors <= MBR.sectors_per_track
|
||||||
|
es = sectors
|
||||||
|
# top two bits are set in cylinder conversion
|
||||||
|
|
||||||
|
# Cylinder
|
||||||
|
assert cylinders <= MBR.max_cylinders
|
||||||
|
ec = cylinders % 256 # lower part
|
||||||
|
hc = cylinders // 4 # extract top two bits and
|
||||||
|
es = es | hc # pass them into the top two bits of the sector
|
||||||
|
|
||||||
|
logger.debug("Encode CHS to disk format [%d %d %d] "
|
||||||
|
"-> [%02x %02x %02x]" % (cylinders, heads, sectors,
|
||||||
|
eh, es, ec))
|
||||||
|
return eh, es, ec
|
||||||
|
|
||||||
|
def write_mbr(self):
|
||||||
|
"""Write MBR
|
||||||
|
|
||||||
|
This method writes the MBR to disk. It creates a random disk
|
||||||
|
id as well that it creates the extended partition (as
|
||||||
|
first partition) which uses the whole disk.
|
||||||
|
"""
|
||||||
|
disk_id = random.randint(0, 0xFFFFFFFF)
|
||||||
|
self.image_fd.seek(MBR.MBR_offset_disk_id)
|
||||||
|
self.image_fd.write(pack("<I", disk_id))
|
||||||
|
|
||||||
|
def write_mbr_signature(self, blockno):
|
||||||
|
"""Writes the MBR/EBR signature to a block
|
||||||
|
|
||||||
|
The signature consists of a 0xAA55 in the last two bytes of the
|
||||||
|
block.
|
||||||
|
"""
|
||||||
|
self.image_fd.seek(blockno *
|
||||||
|
MBR.bytes_per_sector +
|
||||||
|
MBR.MBR_offset_signature)
|
||||||
|
self.image_fd.write(pack("<H", MBR.MBR_signature))
|
||||||
|
|
||||||
|
def write_partition_entry(self, bootflag, blockno, entry, ptype,
|
||||||
|
lba_start, lba_length):
|
||||||
|
"""Writes a parititon entry
|
||||||
|
|
||||||
|
The entries are always the same and contain 16 bytes. The MBR
|
||||||
|
and also the EBR use the same format.
|
||||||
|
"""
|
||||||
|
logger.info("Write partition entry blockno [%d] entry [%d] "
|
||||||
|
"start [%d] length [%d]" % (blockno, entry,
|
||||||
|
lba_start, lba_length))
|
||||||
|
|
||||||
|
self.image_fd.seek(
|
||||||
|
blockno * MBR.bytes_per_sector +
|
||||||
|
MBR.MBR_offset_first_partition_table_entry +
|
||||||
|
16 * entry)
|
||||||
|
# Boot flag
|
||||||
|
self.image_fd.write(pack("<B", 0x80 if bootflag else 0x00))
|
||||||
|
|
||||||
|
# Encode lba start / length into CHS
|
||||||
|
chs_start = self.lba2chs(lba_start)
|
||||||
|
chs_end = self.lba2chs(lba_start + lba_length)
|
||||||
|
# Encode CHS into disk format
|
||||||
|
chs_start_bin = self.encode_chs(*chs_start)
|
||||||
|
chs_end_bin = self.encode_chs(*chs_end)
|
||||||
|
|
||||||
|
# Write CHS start
|
||||||
|
self.image_fd.write(pack("<BBB", *chs_start_bin))
|
||||||
|
# Write partition type
|
||||||
|
self.image_fd.write(pack("<B", ptype))
|
||||||
|
# Write CHS end
|
||||||
|
self.image_fd.write(pack("<BBB", *chs_end_bin))
|
||||||
|
# Write LBA start & length
|
||||||
|
self.image_fd.write(pack("<I", lba_start))
|
||||||
|
self.image_fd.write(pack("<I", lba_length))
|
||||||
|
|
||||||
|
def align(self, blockno):
|
||||||
|
"""Align the blockno to next alignment count"""
|
||||||
|
if blockno % self.alignment_blocks == 0:
|
||||||
|
# Already aligned
|
||||||
|
return blockno
|
||||||
|
|
||||||
|
return (blockno // self.alignment_blocks + 1) \
|
||||||
|
* self.alignment_blocks
|
||||||
|
|
||||||
|
def compute_partition_lbas(self, abs_start, size):
|
||||||
|
lba_partition_abs_start = self.align(abs_start)
|
||||||
|
lba_partition_rel_start \
|
||||||
|
= lba_partition_abs_start - self.partition_abs_start
|
||||||
|
lba_partition_length = size // MBR.bytes_per_sector
|
||||||
|
lba_abs_partition_end \
|
||||||
|
= self.align(lba_partition_abs_start + lba_partition_length)
|
||||||
|
logger.info("Partition absolute [%d] relative [%d] "
|
||||||
|
"length [%d] absolute end [%d]"
|
||||||
|
% (lba_partition_abs_start, lba_partition_rel_start,
|
||||||
|
lba_partition_length, lba_abs_partition_end))
|
||||||
|
return lba_partition_abs_start, lba_partition_length, \
|
||||||
|
lba_abs_partition_end
|
||||||
|
|
||||||
|
def add_primary_partition(self, bootflag, size, ptype):
|
||||||
|
lba_partition_abs_start, lba_partition_length, lba_abs_partition_end \
|
||||||
|
= self.compute_partition_lbas(self.partition_abs_next_free, size)
|
||||||
|
|
||||||
|
self.write_partition_entry(
|
||||||
|
bootflag, 0, self.partition_number, ptype,
|
||||||
|
self.align(lba_partition_abs_start), lba_partition_length)
|
||||||
|
|
||||||
|
self.partition_abs_next_free = lba_abs_partition_end
|
||||||
|
logger.debug("Next free [%d]" % self.partition_abs_next_free)
|
||||||
|
self.primary_partitions_created += 1
|
||||||
|
self.partition_number += 1
|
||||||
|
return self.partition_number
|
||||||
|
|
||||||
|
def add_extended_partition(self, bootflag, size, ptype):
|
||||||
|
lba_ebr_abs = self.partition_abs_next_free
|
||||||
|
logger.info("EBR block absolute [%d]" % lba_ebr_abs)
|
||||||
|
|
||||||
|
_, lba_partition_length, lba_abs_partition_end \
|
||||||
|
= self.compute_partition_lbas(lba_ebr_abs + 1, size)
|
||||||
|
|
||||||
|
# Write the reference to the new partition
|
||||||
|
if self.disk_block_last_ref != 0:
|
||||||
|
partition_complete_len = lba_abs_partition_end - lba_ebr_abs
|
||||||
|
self.write_partition_entry(
|
||||||
|
False, self.disk_block_last_ref, 1,
|
||||||
|
MBR.MBR_partition_type_extended_chs,
|
||||||
|
lba_ebr_abs - self.partition_abs_start,
|
||||||
|
partition_complete_len)
|
||||||
|
|
||||||
|
self.write_partition_entry(
|
||||||
|
bootflag, lba_ebr_abs, 0, ptype, self.align(1),
|
||||||
|
lba_partition_length)
|
||||||
|
self.write_mbr_signature(lba_ebr_abs)
|
||||||
|
|
||||||
|
self.partition_abs_next_free = lba_abs_partition_end
|
||||||
|
logger.debug("Next free [%d]" % self.partition_abs_next_free)
|
||||||
|
self.disk_block_last_ref = lba_ebr_abs
|
||||||
|
self.extended_partitions_created += 1
|
||||||
|
self.partition_number += 1
|
||||||
|
return self.partition_number
|
||||||
|
|
||||||
|
def add_partition(self, primaryflag, bootflag, size, ptype):
|
||||||
|
"""Adds a partition with the given type and size"""
|
||||||
|
logger.debug("Add new partition primary [%s] boot [%s] "
|
||||||
|
"size [%d] type [%x]" %
|
||||||
|
(primaryflag, bootflag, size, ptype))
|
||||||
|
|
||||||
|
# primaries must be created before extended
|
||||||
|
if primaryflag and self.extended_partitions_created > 0:
|
||||||
|
raise RuntimeError("All primary partitions must be "
|
||||||
|
"given first")
|
||||||
|
|
||||||
|
if primaryflag:
|
||||||
|
return self.add_primary_partition(bootflag, size, ptype)
|
||||||
|
if self.extended_partitions_created == 0:
|
||||||
|
# When this is the first extended partition, the extended
|
||||||
|
# partition entry has to be written.
|
||||||
|
self.partition_abs_start = self.partition_abs_next_free
|
||||||
|
self.write_partition_entry(
|
||||||
|
False, 0, self.partition_number,
|
||||||
|
MBR.MBR_partition_type_extended_lba,
|
||||||
|
self.partition_abs_next_free,
|
||||||
|
self.disk_size_in_blocks - self.partition_abs_next_free)
|
||||||
|
self.partition_number = 4
|
||||||
|
|
||||||
|
return self.add_extended_partition(bootflag, size, ptype)
|
||||||
|
|
||||||
|
def free(self):
|
||||||
|
"""Returns the free (not yet partitioned) size"""
|
||||||
|
return self.disk_size \
|
||||||
|
- (self.partition_abs_next_free + self.align(1)) \
|
||||||
|
* MBR.bytes_per_sector
|
184
diskimage_builder/block_device/level1/partitioning.py
Normal file
184
diskimage_builder/block_device/level1/partitioning.py
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from diskimage_builder.block_device.blockdevicesetupexception \
|
||||||
|
import BlockDeviceSetupException
|
||||||
|
from diskimage_builder.block_device.level1.mbr import MBR
|
||||||
|
from diskimage_builder.block_device.utils import parse_abs_size_spec
|
||||||
|
from diskimage_builder.block_device.utils import parse_rel_size_spec
|
||||||
|
from diskimage_builder.graph.digraph import Digraph
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Partition(Digraph.Node):
|
||||||
|
|
||||||
|
def __init__(self, name, flags, size, ptype, base, partitioning):
|
||||||
|
Digraph.Node.__init__(self, name)
|
||||||
|
self.flags = flags
|
||||||
|
self.size = size
|
||||||
|
self.ptype = ptype
|
||||||
|
self.base = base
|
||||||
|
self.partitioning = partitioning
|
||||||
|
|
||||||
|
def get_flags(self):
|
||||||
|
return self.flags
|
||||||
|
|
||||||
|
def get_size(self):
|
||||||
|
return self.size
|
||||||
|
|
||||||
|
def get_type(self):
|
||||||
|
return self.ptype
|
||||||
|
|
||||||
|
def insert_edges(self, dg):
|
||||||
|
bnode = dg.find(self.base)
|
||||||
|
assert bnode is not None
|
||||||
|
dg.create_edge(bnode, self)
|
||||||
|
|
||||||
|
def create(self, result, rollback):
|
||||||
|
self.partitioning.create(result, rollback)
|
||||||
|
|
||||||
|
def umount(self, state):
|
||||||
|
"""Partitioning does not need any umount task."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def cleanup(self, state):
|
||||||
|
"""Partitioning does not need any cleanup."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def delete(self, state):
|
||||||
|
"""Partitioning does not need any cleanup."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Partitioning(object):
|
||||||
|
|
||||||
|
type_string = "partitioning"
|
||||||
|
|
||||||
|
flag_boot = 1
|
||||||
|
flag_primary = 2
|
||||||
|
|
||||||
|
def __init__(self, config, default_config):
|
||||||
|
logger.debug("Creating Partitioning object; config [%s]" % config)
|
||||||
|
# Because using multiple partitions of one base is done
|
||||||
|
# within one object, there is the need to store a flag if the
|
||||||
|
# creation of the partitions was already done.
|
||||||
|
self.already_created = False
|
||||||
|
|
||||||
|
# Parameter check
|
||||||
|
if 'base' not in config:
|
||||||
|
self._config_error("Partitioning config needs 'base'")
|
||||||
|
self.base = config['base']
|
||||||
|
|
||||||
|
if 'label' not in config:
|
||||||
|
self._config_error("Partitioning config needs 'label'")
|
||||||
|
self.label = config['label']
|
||||||
|
if self.label not in ("mbr", ):
|
||||||
|
self._config_error("Label must be 'mbr'")
|
||||||
|
|
||||||
|
# It is VERY important to get the alignment correct. If this
|
||||||
|
# is not correct, the disk performance might be very poor.
|
||||||
|
# Example: In some tests a 'off by one' leads to a write
|
||||||
|
# performance of 30% compared to a correctly aligned
|
||||||
|
# partition.
|
||||||
|
# The problem for DIB is, that it cannot assume that the host
|
||||||
|
# system uses the same IO sizes as the target system,
|
||||||
|
# therefore here a fixed approach (as used in all modern
|
||||||
|
# systems with large disks) is used. The partitions are
|
||||||
|
# aligned to 1MiB (which are about 2048 times 512 bytes
|
||||||
|
# blocks)
|
||||||
|
self.align = 1024 * 1024 # 1MiB as default
|
||||||
|
if 'align' in config:
|
||||||
|
self.align = parse_abs_size_spec(config['align'])
|
||||||
|
|
||||||
|
if 'partitions' not in config:
|
||||||
|
self._config_error("Partitioning config needs 'partitions'")
|
||||||
|
|
||||||
|
self.partitions = {}
|
||||||
|
for part_cfg in config['partitions']:
|
||||||
|
if 'name' not in part_cfg:
|
||||||
|
self.config_error("Missing 'name' in partition config")
|
||||||
|
part_name = part_cfg['name']
|
||||||
|
|
||||||
|
flags = set()
|
||||||
|
if 'flags' in part_cfg:
|
||||||
|
for f in part_cfg['flags']:
|
||||||
|
if f == 'boot':
|
||||||
|
flags.add(Partitioning.flag_boot)
|
||||||
|
elif f == 'primary':
|
||||||
|
flags.add(Partitioning.flag_primary)
|
||||||
|
else:
|
||||||
|
self._config_error("Unknown flag [%s] in "
|
||||||
|
"partitioning for [%s]"
|
||||||
|
% (f, part_name))
|
||||||
|
if 'size' not in part_cfg:
|
||||||
|
self._config_error("No 'size' in partition [%s]"
|
||||||
|
% part_name)
|
||||||
|
size = part_cfg['size']
|
||||||
|
|
||||||
|
ptype = int(part_cfg['type'], 16) if 'type' in part_cfg else 0x83
|
||||||
|
|
||||||
|
self.partitions[part_name] \
|
||||||
|
= Partition(part_name, flags, size, ptype, self.base, self)
|
||||||
|
logger.debug(part_cfg)
|
||||||
|
|
||||||
|
def _config_error(self, msg):
|
||||||
|
logger.error(msg)
|
||||||
|
raise BlockDeviceSetupException(msg)
|
||||||
|
|
||||||
|
def _size_of_block_dev(self, dev):
|
||||||
|
with open(dev, "r") as fd:
|
||||||
|
fd.seek(0, 2)
|
||||||
|
return fd.tell()
|
||||||
|
|
||||||
|
def insert_nodes(self, dg):
|
||||||
|
for _, part in self.partitions.items():
|
||||||
|
dg.add_node(part)
|
||||||
|
|
||||||
|
def create(self, result, rollback):
|
||||||
|
image_path = result[self.base]['image']
|
||||||
|
device_path = result[self.base]['device']
|
||||||
|
logger.info("Creating partition on [%s] [%s]" %
|
||||||
|
(self.base, image_path))
|
||||||
|
|
||||||
|
if self.already_created:
|
||||||
|
logger.info("Not creating the partitions a second time.")
|
||||||
|
return
|
||||||
|
|
||||||
|
assert self.label == 'mbr'
|
||||||
|
|
||||||
|
disk_size = self._size_of_block_dev(image_path)
|
||||||
|
with MBR(image_path, disk_size, self.align) as part_impl:
|
||||||
|
for part_name, part_cfg in self.partitions.items():
|
||||||
|
part_bootflag = Partitioning.flag_boot \
|
||||||
|
in part_cfg.get_flags()
|
||||||
|
part_primary = Partitioning.flag_primary \
|
||||||
|
in part_cfg.get_flags()
|
||||||
|
part_size = part_cfg.get_size()
|
||||||
|
part_free = part_impl.free()
|
||||||
|
part_type = part_cfg.get_type()
|
||||||
|
logger.debug("Not partitioned space [%d]" % part_free)
|
||||||
|
part_size = parse_rel_size_spec(part_size,
|
||||||
|
part_free)[1]
|
||||||
|
part_no \
|
||||||
|
= part_impl.add_partition(part_primary, part_bootflag,
|
||||||
|
part_size, part_type)
|
||||||
|
logger.debug("Create partition [%s] [%d]" %
|
||||||
|
(part_name, part_no))
|
||||||
|
result[part_name] = {'device': device_path + "p%d" % part_no}
|
||||||
|
|
||||||
|
self.already_created = True
|
||||||
|
return
|
@ -1,66 +0,0 @@
|
|||||||
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class LevelBase(object):
|
|
||||||
|
|
||||||
def __init__(self, lvl, config, default_config, result, sub_modules):
|
|
||||||
self.level = lvl
|
|
||||||
self.config = config
|
|
||||||
self.default_config = default_config
|
|
||||||
self.result = result
|
|
||||||
self.sub_modules = sub_modules
|
|
||||||
|
|
||||||
def call_sub_modules(self, callback):
|
|
||||||
"""Generic way calling submodules"""
|
|
||||||
result = {}
|
|
||||||
if self.result is not None:
|
|
||||||
result = self.result.copy()
|
|
||||||
for name, cfg in self.config:
|
|
||||||
if name in self.sub_modules:
|
|
||||||
logger.info("Calling sub module [%s]" % name)
|
|
||||||
sm = self.sub_modules[name](cfg, self.default_config,
|
|
||||||
self.result)
|
|
||||||
lres = callback(sm)
|
|
||||||
result.update(lres)
|
|
||||||
else:
|
|
||||||
logger.error("Unknown sub module [%s]" % name)
|
|
||||||
sys.exit(1)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def create_cb(self, obj):
|
|
||||||
return obj.create()
|
|
||||||
|
|
||||||
def create(self):
|
|
||||||
"""Create the configured block devices"""
|
|
||||||
logger.info("Starting to create level [%d] block devices" % self.level)
|
|
||||||
result = self.call_sub_modules(self.create_cb)
|
|
||||||
logger.info("Finished creating level [%d] block devices" % self.level)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def delete_cb(self, obj):
|
|
||||||
return obj.delete()
|
|
||||||
|
|
||||||
def delete(self):
|
|
||||||
"""Delete the configured block devices"""
|
|
||||||
logger.info("Starting to delete level [%d] block devices" % self.level)
|
|
||||||
res = self.call_sub_modules(self.delete_cb)
|
|
||||||
logger.info("Finished deleting level [%d] block devices" % self.level)
|
|
||||||
return all(p for p in res.values())
|
|
@ -12,7 +12,10 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
SIZE_SPECS = [
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
SIZE_UNIT_SPECS = [
|
||||||
["TiB", 1024**4],
|
["TiB", 1024**4],
|
||||||
["GiB", 1024**3],
|
["GiB", 1024**3],
|
||||||
["MiB", 1024**2],
|
["MiB", 1024**2],
|
||||||
@ -29,45 +32,52 @@ SIZE_SPECS = [
|
|||||||
["", 1], # No unit -> size is given in bytes
|
["", 1], # No unit -> size is given in bytes
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Basic RE to check and split floats (without exponent)
|
||||||
|
# and a given unit specification (which must be non-numerical).
|
||||||
|
size_unit_spec_re = re.compile("^([\d\.]*) ?([a-zA-Z0-9_]*)$")
|
||||||
|
|
||||||
def _split_size_spec(size_spec):
|
|
||||||
for spec_key, spec_value in SIZE_SPECS:
|
def _split_size_unit_spec(size_unit_spec):
|
||||||
if len(spec_key) == 0:
|
"""Helper function to split unit specification into parts.
|
||||||
return size_spec, spec_key
|
|
||||||
if size_spec.endswith(spec_key):
|
The first part is the numeric part - the second one is the unit.
|
||||||
return size_spec[:-len(spec_key)], spec_key
|
"""
|
||||||
raise RuntimeError("size_spec [%s] not known" % size_spec)
|
match = size_unit_spec_re.match(size_unit_spec)
|
||||||
|
if match is None:
|
||||||
|
raise RuntimeError("Invalid size unit spec [%s]" % size_unit_spec)
|
||||||
|
|
||||||
|
return match.group(1), match.group(2)
|
||||||
|
|
||||||
|
|
||||||
def _get_unit_factor(unit_str):
|
def _get_unit_factor(unit_str):
|
||||||
for spec_key, spec_value in SIZE_SPECS:
|
"""Helper function to get the unit factor.
|
||||||
|
|
||||||
|
The given unit_str needs to be a string of the
|
||||||
|
SIZE_UNIT_SPECS table.
|
||||||
|
If the unit is not found, a runtime error is raised.
|
||||||
|
"""
|
||||||
|
for spec_key, spec_value in SIZE_UNIT_SPECS:
|
||||||
if unit_str == spec_key:
|
if unit_str == spec_key:
|
||||||
return spec_value
|
return spec_value
|
||||||
raise RuntimeError("unit_str [%s] not known" % unit_str)
|
raise RuntimeError("unit_str [%s] not known" % unit_str)
|
||||||
|
|
||||||
|
|
||||||
def parse_abs_size_spec(size_spec):
|
def parse_abs_size_spec(size_spec):
|
||||||
size_cnt_str, size_unit_str = _split_size_spec(size_spec)
|
size_cnt_str, size_unit_str = _split_size_unit_spec(size_spec)
|
||||||
unit_factor = _get_unit_factor(size_unit_str)
|
unit_factor = _get_unit_factor(size_unit_str)
|
||||||
return int(unit_factor * (
|
return int(unit_factor * (
|
||||||
float(size_cnt_str) if len(size_cnt_str) > 0 else 1))
|
float(size_cnt_str) if len(size_cnt_str) > 0 else 1))
|
||||||
|
|
||||||
|
|
||||||
def convert_to_utf8(jdata):
|
def parse_rel_size_spec(size_spec, abs_size):
|
||||||
"""Convert to UTF8.
|
"""Parses size specifications - can be relative like 50%
|
||||||
|
|
||||||
The json parser returns unicode strings. Because in
|
In addition to the absolute parsing also a relative
|
||||||
some python implementations unicode strings are not
|
parsing is done. If the size specification ends in '%',
|
||||||
compatible with utf8 strings - especially when using
|
then the relative size of the given 'abs_size' is returned.
|
||||||
as keys in dictionaries - this function recursively
|
|
||||||
converts the json data.
|
|
||||||
"""
|
"""
|
||||||
if isinstance(jdata, unicode):
|
if size_spec[-1] == '%':
|
||||||
return jdata.encode('utf-8')
|
percent = float(size_spec[:-1])
|
||||||
elif isinstance(jdata, dict):
|
return True, int(abs_size * percent / 100.0)
|
||||||
return {convert_to_utf8(key): convert_to_utf8(value)
|
|
||||||
for key, value in jdata.iteritems()}
|
return False, parse_abs_size_spec(size_spec)
|
||||||
elif isinstance(jdata, list):
|
|
||||||
return [convert_to_utf8(je) for je in jdata]
|
|
||||||
else:
|
|
||||||
return jdata
|
|
||||||
|
@ -9,14 +9,8 @@ fi
|
|||||||
set -eu
|
set -eu
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
# FIXME:
|
|
||||||
[ -n "$IMAGE_BLOCK_DEVICE" ]
|
|
||||||
PART_DEV=$IMAGE_BLOCK_DEVICE
|
PART_DEV=$IMAGE_BLOCK_DEVICE
|
||||||
if [[ "$ARCH" =~ "ppc" ]]; then
|
BOOT_DEV=$IMAGE_BLOCK_DEVICE_WITHOUT_PART
|
||||||
BOOT_DEV=$(echo $IMAGE_BLOCK_DEVICE | sed -e 's#p2##')'p1'
|
|
||||||
else
|
|
||||||
BOOT_DEV=$(echo $IMAGE_BLOCK_DEVICE | sed -e 's#p1##' | sed -e 's#mapper/##')
|
|
||||||
fi
|
|
||||||
|
|
||||||
function install_extlinux {
|
function install_extlinux {
|
||||||
install-packages -m bootloader extlinux
|
install-packages -m bootloader extlinux
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
===================
|
|
||||||
partitioning-sfdisk
|
|
||||||
===================
|
|
||||||
Sets up a partitioned disk using sfdisk, according to user needs.
|
|
||||||
|
|
||||||
Environment Variables
|
|
||||||
---------------------
|
|
||||||
DIB_PARTITIONING_SFDISK_SCHEMA
|
|
||||||
: Required: Yes
|
|
||||||
: Default: 2048,,L *
|
|
||||||
0 0;
|
|
||||||
0 0;
|
|
||||||
0 0;
|
|
||||||
: Description: A multi-line string specifying a disk schema in sectors.
|
|
||||||
: Example: ``DIB_PARTITIONING_SFDISK_SCHEMA="
|
|
||||||
2048,10000,L *
|
|
||||||
10248,,L
|
|
||||||
0 0;
|
|
||||||
" will create two partitions on disk, first one will be bootable.
|
|
@ -1,46 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# dib-lint: disable=safe_sudo
|
|
||||||
|
|
||||||
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
set -eu
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# sanity checks
|
|
||||||
[ -n "$IMAGE_BLOCK_DEVICE" ] || die "Image block device not set"
|
|
||||||
|
|
||||||
# execute sfdisk with the given partitioning schema
|
|
||||||
sudo sfdisk -uS --force $IMAGE_BLOCK_DEVICE <<EOF
|
|
||||||
$DIB_PARTITIONING_SFDISK_SCHEMA
|
|
||||||
EOF
|
|
||||||
sudo partprobe $IMAGE_BLOCK_DEVICE
|
|
||||||
|
|
||||||
# To ensure no race conditions exist from calling partprobe
|
|
||||||
sudo udevadm settle
|
|
||||||
|
|
||||||
# If the partition isn't under /dev/loop*p1, create it with kpartx
|
|
||||||
DM=
|
|
||||||
if [ ! -e "${IMAGE_BLOCK_DEVICE}p1" ]; then
|
|
||||||
DM=${IMAGE_BLOCK_DEVICE/#\/dev/\/dev\/mapper}
|
|
||||||
# If running inside Docker, make our nodes manually, because udev will not be working.
|
|
||||||
if [ -f /.dockerenv ]; then
|
|
||||||
# kpartx cannot run in sync mode in docker.
|
|
||||||
sudo kpartx -av $TMP_IMAGE_PATH
|
|
||||||
sudo dmsetup --noudevsync mknodes
|
|
||||||
else
|
|
||||||
sudo kpartx -asv $TMP_IMAGE_PATH
|
|
||||||
fi
|
|
||||||
elif [[ "$ARCH" =~ "ppc" ]]; then
|
|
||||||
sudo kpartx -asv $TMP_IMAGE_PATH
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$DM" ]; then
|
|
||||||
echo "IMAGE_BLOCK_DEVICE=${DM}p1"
|
|
||||||
elif [[ "$ARCH" =~ "ppc" ]]; then
|
|
||||||
DM=${IMAGE_BLOCK_DEVICE/#\/dev/\/dev\/mapper}
|
|
||||||
echo "IMAGE_BLOCK_DEVICE=${DM}p2"
|
|
||||||
else
|
|
||||||
echo "IMAGE_BLOCK_DEVICE=${IMAGE_BLOCK_DEVICE}p1"
|
|
||||||
fi
|
|
@ -1,5 +0,0 @@
|
|||||||
DEFAULT_SCHEMA="2048 + L *
|
|
||||||
0 0;
|
|
||||||
0 0;
|
|
||||||
0 0;"
|
|
||||||
export DIB_PARTITIONING_SFDISK_SCHEMA=${DIB_PARTITIONING_SFDISK_SCHEMA:-$DEFAULT_SCHEMA}
|
|
@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
set -eu
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
if [ $DISTRO_NAME = 'opensuse' ] ; then
|
|
||||||
# workaround for https://bugzilla.novell.com/show_bug.cgi?id=859493
|
|
||||||
rm -f /dev/mapper/loop*_part1
|
|
||||||
fi
|
|
15
diskimage_builder/elements/vm/environment.d/10-partitioning
Normal file
15
diskimage_builder/elements/vm/environment.d/10-partitioning
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
export DIB_BLOCK_DEVICE_DEFAULT_CONFIG="
|
||||||
|
local_loop:
|
||||||
|
name: image0
|
||||||
|
|
||||||
|
partitioning:
|
||||||
|
base: image0
|
||||||
|
label: mbr
|
||||||
|
partitions:
|
||||||
|
- name: root_p1
|
||||||
|
flags: [ boot, primary ]
|
||||||
|
size: 100%
|
||||||
|
"
|
||||||
|
|
||||||
|
DIB_BLOCK_DEVICE_CONFIG=${DIB_BLOCK_DEVICE_CONFIG:-${DIB_BLOCK_DEVICE_DEFAULT_CONFIG}}
|
||||||
|
export DIB_BLOCK_DEVICE_CONFIG
|
0
diskimage_builder/graph/__init__.py
Normal file
0
diskimage_builder/graph/__init__.py
Normal file
194
diskimage_builder/graph/digraph.py
Normal file
194
diskimage_builder/graph/digraph.py
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
class Digraph(object):
|
||||||
|
"""Implements a directed graph.
|
||||||
|
|
||||||
|
Each node of the digraph must have a unique name.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Node(object):
|
||||||
|
"""Directed graph node.
|
||||||
|
|
||||||
|
This holds the incoming and outgoing edges as well as the
|
||||||
|
nodes' name.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, name):
|
||||||
|
"""Initializes a node.
|
||||||
|
|
||||||
|
Incoming and outgoing are lists of nodes. Typically one
|
||||||
|
direction is provided and the other can be automatically
|
||||||
|
computed.
|
||||||
|
"""
|
||||||
|
self.__name = name
|
||||||
|
self.__incoming = set()
|
||||||
|
self.__outgoing = set()
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
"""Returns the name of the node."""
|
||||||
|
return self.__name
|
||||||
|
|
||||||
|
def add_incoming(self, node):
|
||||||
|
"""Add node to the incoming list."""
|
||||||
|
|
||||||
|
self.__incoming.add(node)
|
||||||
|
|
||||||
|
def add_outgoing(self, node):
|
||||||
|
"""Add node to the incoming list."""
|
||||||
|
|
||||||
|
self.__outgoing.add(node)
|
||||||
|
|
||||||
|
def get_iter_outgoing(self):
|
||||||
|
"""Return an iterator over the outgoing nodes."""
|
||||||
|
|
||||||
|
return iter(self.__outgoing)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __as_named_list(inlist):
|
||||||
|
"""Return given list as list of names."""
|
||||||
|
|
||||||
|
return map(lambda x: x.get_name(), inlist)
|
||||||
|
|
||||||
|
def get_outgoing_as_named_list(self):
|
||||||
|
"""Return the names of all outgoing nodes as a list."""
|
||||||
|
|
||||||
|
return self.__as_named_list(self.__outgoing)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Create a empty digraph."""
|
||||||
|
self._named_nodes = {}
|
||||||
|
|
||||||
|
def create_from_dict(self, init_dgraph, node_gen_func=Node):
|
||||||
|
"""Creates a new digraph based on the given information."""
|
||||||
|
|
||||||
|
# First run: create all nodes
|
||||||
|
for node_name in init_dgraph:
|
||||||
|
# Create the node and put it into the object list of all
|
||||||
|
# nodes and into the local dictionary of named nodes.
|
||||||
|
named_node = node_gen_func(node_name)
|
||||||
|
self.add_node(named_node)
|
||||||
|
|
||||||
|
# Second run: run through all nodes and create the edges.
|
||||||
|
for node_name, outs in init_dgraph.items():
|
||||||
|
node_from = self.find(node_name)
|
||||||
|
for onode in outs:
|
||||||
|
node_to = self.find(onode)
|
||||||
|
if node_to is None:
|
||||||
|
raise RuntimeError("Node '%s' is referenced "
|
||||||
|
"but not specified" % onode)
|
||||||
|
self.create_edge(node_from, node_to)
|
||||||
|
|
||||||
|
def add_node(self, anode):
|
||||||
|
"""Adds a new node to the graph.
|
||||||
|
|
||||||
|
Checks if the node with the same name already exists.
|
||||||
|
"""
|
||||||
|
assert issubclass(anode.__class__, Digraph.Node)
|
||||||
|
|
||||||
|
for node in self._named_nodes.values():
|
||||||
|
if node.get_name() == anode.get_name():
|
||||||
|
raise RuntimeError("Node with name [%s] already "
|
||||||
|
"exists" % node.get_name())
|
||||||
|
self._named_nodes[anode.get_name()] = anode
|
||||||
|
|
||||||
|
def create_edge(self, anode, bnode):
|
||||||
|
"""Creates an edge from a to b - both must be nodes."""
|
||||||
|
|
||||||
|
assert issubclass(anode.__class__, Digraph.Node)
|
||||||
|
assert issubclass(bnode.__class__, Digraph.Node)
|
||||||
|
assert anode.get_name() in self._named_nodes.keys()
|
||||||
|
assert anode == self._named_nodes[anode.get_name()]
|
||||||
|
assert bnode.get_name() in self._named_nodes.keys()
|
||||||
|
assert bnode == self._named_nodes[bnode.get_name()]
|
||||||
|
anode.add_outgoing(bnode)
|
||||||
|
bnode.add_incoming(anode)
|
||||||
|
|
||||||
|
def get_iter_nodes_values(self):
|
||||||
|
"""Returns the nodes dict to the values.
|
||||||
|
|
||||||
|
Note: it is not possible to change things with the help of the
|
||||||
|
result of this function.
|
||||||
|
"""
|
||||||
|
return iter(self._named_nodes.values())
|
||||||
|
|
||||||
|
def find(self, name):
|
||||||
|
"""Get the node with the given name.
|
||||||
|
|
||||||
|
Return None if not available.
|
||||||
|
"""
|
||||||
|
if name not in self._named_nodes:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self._named_nodes[name]
|
||||||
|
|
||||||
|
def as_dict(self):
|
||||||
|
"""Outputs this digraph and create a dictionary."""
|
||||||
|
|
||||||
|
# Start with an empty dictionary
|
||||||
|
rval = {}
|
||||||
|
for node in self._named_nodes.values():
|
||||||
|
rval[node.get_name()] = node.get_outgoing_as_named_list()
|
||||||
|
return rval
|
||||||
|
|
||||||
|
def topological_sort(dg):
|
||||||
|
"""Digraph topological search.
|
||||||
|
|
||||||
|
This algorithm is based upon a depth first search with
|
||||||
|
'making' some special nodes.
|
||||||
|
The result is the topological sorted list of nodes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# List of topological sorted nodes
|
||||||
|
tsort = []
|
||||||
|
# List of nodes already visited.
|
||||||
|
# (This is held here - local to the algorithm - to not modify the
|
||||||
|
# nodes themselves.)
|
||||||
|
visited = []
|
||||||
|
|
||||||
|
def visit(node):
|
||||||
|
"""Recursive deep first search function."""
|
||||||
|
|
||||||
|
if node not in visited:
|
||||||
|
visited.append(node)
|
||||||
|
for onode in node.get_iter_outgoing():
|
||||||
|
visit(onode)
|
||||||
|
tsort.insert(0, node)
|
||||||
|
|
||||||
|
# The 'main' function of the topological sort
|
||||||
|
for node in dg.get_iter_nodes_values():
|
||||||
|
visit(node)
|
||||||
|
|
||||||
|
return tsort
|
||||||
|
|
||||||
|
|
||||||
|
# Utility functions
|
||||||
|
|
||||||
|
def digraph_create_from_dict(init_dgraph, node_gen_func=Digraph.Node):
|
||||||
|
"""Creates a new digraph based on the given information."""
|
||||||
|
|
||||||
|
digraph = Digraph()
|
||||||
|
digraph.create_from_dict(init_dgraph, node_gen_func)
|
||||||
|
return digraph
|
||||||
|
|
||||||
|
|
||||||
|
def node_list_to_node_name_list(node_list):
|
||||||
|
"""Converts a node list into a list of the corresponding node names."""
|
||||||
|
|
||||||
|
node_name_list = []
|
||||||
|
for n in node_list:
|
||||||
|
node_name_list.append(n.get_name())
|
||||||
|
return node_name_list
|
@ -41,7 +41,7 @@ function mk_build_dir () {
|
|||||||
fi
|
fi
|
||||||
trap trap_cleanup EXIT
|
trap trap_cleanup EXIT
|
||||||
echo Building in $TMP_BUILD_DIR
|
echo Building in $TMP_BUILD_DIR
|
||||||
export TMP_IMAGE_PATH=$TMP_IMAGE_DIR/image.raw
|
export TMP_IMAGE_DIR
|
||||||
export OUT_IMAGE_PATH=$TMP_IMAGE_PATH
|
export OUT_IMAGE_PATH=$TMP_IMAGE_PATH
|
||||||
export TMP_HOOKS_PATH=$TMP_BUILD_DIR/hooks
|
export TMP_HOOKS_PATH=$TMP_BUILD_DIR/hooks
|
||||||
}
|
}
|
||||||
|
@ -360,8 +360,27 @@ export TMP_IMAGE_DIR
|
|||||||
# Try the 'old fashioned' way calling the block device
|
# Try the 'old fashioned' way calling the block device
|
||||||
# phase. If this gives no result, use the configuration based approach:
|
# phase. If this gives no result, use the configuration based approach:
|
||||||
eval_run_d block-device "IMAGE_BLOCK_DEVICE="
|
eval_run_d block-device "IMAGE_BLOCK_DEVICE="
|
||||||
|
|
||||||
|
# Because there is currently no generic way of passing in variables
|
||||||
|
# from elements to the main, get here the well known config of a well
|
||||||
|
# known element.
|
||||||
|
# (This is only temporary and will go away when the complete block
|
||||||
|
# device handling including file system handling and mounting is
|
||||||
|
# implemented using python.)
|
||||||
|
if [[ $IMAGE_ELEMENT =~ vm ]]; then
|
||||||
|
for EPATH in $(echo ${ELEMENTS_PATH} | tr ":" " "); do
|
||||||
|
PART_CFG_PATH=${EPATH}/vm/environment.d/10-partitioning
|
||||||
|
[ -e ${PART_CFG_PATH} ] && source ${PART_CFG_PATH}
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# There is the need to get the path of the dib-block-device entry
|
||||||
|
# point, because when running in a venv, the standard path of
|
||||||
|
# sudo does not include this.
|
||||||
|
DIB_BLOCK_DEVICE_SCRIPT=$(which dib-block-device)
|
||||||
|
|
||||||
if [ -z ${IMAGE_BLOCK_DEVICE} ] ; then
|
if [ -z ${IMAGE_BLOCK_DEVICE} ] ; then
|
||||||
IMAGE_BLOCK_DEVICE=$(dib-block-device \
|
IMAGE_BLOCK_DEVICE=$(${DIB_BLOCK_DEVICE_SCRIPT} \
|
||||||
--phase=create \
|
--phase=create \
|
||||||
--config="${DIB_BLOCK_DEVICE_CONFIG:-}" \
|
--config="${DIB_BLOCK_DEVICE_CONFIG:-}" \
|
||||||
--image-size="${DIB_IMAGE_SIZE}"KiB \
|
--image-size="${DIB_IMAGE_SIZE}"KiB \
|
||||||
@ -370,55 +389,36 @@ if [ -z ${IMAGE_BLOCK_DEVICE} ] ; then
|
|||||||
fi
|
fi
|
||||||
export IMAGE_BLOCK_DEVICE
|
export IMAGE_BLOCK_DEVICE
|
||||||
LOOPDEV=${IMAGE_BLOCK_DEVICE}
|
LOOPDEV=${IMAGE_BLOCK_DEVICE}
|
||||||
export EXTRA_DETACH="detach_loopback $LOOPDEV"
|
|
||||||
export EXTRA_UNMOUNT="dib-block-device --phase=umount \
|
IMAGE_BLOCK_DEVICE_WITHOUT_PART=$(echo ${IMAGE_BLOCK_DEVICE} \
|
||||||
|
| sed -e "s|^\(.*loop[0-9]*\)p[0-9]*$|\1|g")
|
||||||
|
export IMAGE_BLOCK_DEVICE_WITHOUT_PART
|
||||||
|
|
||||||
|
export EXTRA_DETACH="detach_loopback ${IMAGE_BLOCK_DEVICE_WITHOUT_PART}"
|
||||||
|
export EXTRA_UNMOUNT="dib-block-device --phase=cleanup \
|
||||||
--build-dir=\"${TMP_BUILD_DIR}\""
|
--build-dir=\"${TMP_BUILD_DIR}\""
|
||||||
|
|
||||||
# Create the partitions and make them visible to the system
|
# Create the partitions and make them visible to the system
|
||||||
|
|
||||||
# Create 2 partitions for PPC, one for PReP boot and other for root
|
sudo partprobe $IMAGE_BLOCK_DEVICE_WITHOUT_PART
|
||||||
if [[ "$ARCH" =~ "ppc" ]] ; then
|
|
||||||
sudo parted -a optimal -s $IMAGE_BLOCK_DEVICE \
|
|
||||||
mklabel msdos \
|
|
||||||
mkpart primary 0 8cyl \
|
|
||||||
set 1 boot on \
|
|
||||||
set 1 prep on \
|
|
||||||
mkpart primary 9cyl 100%
|
|
||||||
else
|
|
||||||
sudo parted -a optimal -s $IMAGE_BLOCK_DEVICE \
|
|
||||||
mklabel msdos \
|
|
||||||
mkpart primary 1MiB 100% \
|
|
||||||
set 1 boot on
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo partprobe $IMAGE_BLOCK_DEVICE
|
|
||||||
|
|
||||||
# To ensure no race conditions exist from calling partprobe
|
# To ensure no race conditions exist from calling partprobe
|
||||||
sudo udevadm settle
|
sudo udevadm settle
|
||||||
|
|
||||||
# If the partition isn't under /dev/loop*p1, create it with kpartx
|
# If the partition isn't under /dev/loop*p1, create it with kpartx
|
||||||
DM=
|
DM=
|
||||||
if [ ! -e "${IMAGE_BLOCK_DEVICE}p1" ]; then
|
if [ ! -e "${IMAGE_BLOCK_DEVICE}" ]; then
|
||||||
DM=${IMAGE_BLOCK_DEVICE/#\/dev/\/dev\/mapper}
|
DM=${IMAGE_BLOCK_DEVICE/#\/dev/\/dev\/mapper}
|
||||||
# If running inside Docker, make our nodes manually, because udev will not be working.
|
# If running inside Docker, make our nodes manually, because udev will not be working.
|
||||||
if [ -f /.dockerenv ]; then
|
if [ -f /.dockerenv ]; then
|
||||||
# kpartx cannot run in sync mode in docker.
|
# kpartx cannot run in sync mode in docker.
|
||||||
sudo kpartx -av $TMP_IMAGE_PATH
|
sudo kpartx -av ${IMAGE_BLOCK_DEVICE_WITHOUT_PART}
|
||||||
sudo dmsetup --noudevsync mknodes
|
sudo dmsetup --noudevsync mknodes
|
||||||
else
|
else
|
||||||
sudo kpartx -asv $TMP_IMAGE_PATH
|
sudo kpartx -asv ${IMAGE_BLOCK_DEVICE_WITHOUT_PART}
|
||||||
fi
|
fi
|
||||||
elif [[ "$ARCH" =~ "ppc" ]]; then
|
elif [[ "$ARCH" =~ "ppc" ]]; then
|
||||||
sudo kpartx -asv $TMP_IMAGE_PATH
|
sudo kpartx -asv ${IMAGE_BLOCK_DEVICE_WITHOUT_PART}
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$DM" ]; then
|
|
||||||
export IMAGE_BLOCK_DEVICE=${DM}p1
|
|
||||||
elif [[ "$ARCH" =~ "ppc" ]]; then
|
|
||||||
DM=${IMAGE_BLOCK_DEVICE/#\/dev/\/dev\/mapper}
|
|
||||||
export IMAGE_BLOCK_DEVICE=${DM}p2
|
|
||||||
else
|
|
||||||
export IMAGE_BLOCK_DEVICE=${IMAGE_BLOCK_DEVICE}p1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# End: Creation of the partitions
|
# End: Creation of the partitions
|
||||||
@ -471,9 +471,14 @@ fi
|
|||||||
# space before converting the image to some other format.
|
# space before converting the image to some other format.
|
||||||
export EXTRA_UNMOUNT=""
|
export EXTRA_UNMOUNT=""
|
||||||
unmount_image
|
unmount_image
|
||||||
export TMP_IMAGE_PATH=$(dib-block-device \
|
export TMP_IMAGE_PATH=$(${DIB_BLOCK_DEVICE_SCRIPT} \
|
||||||
--phase=umount \
|
--phase=umount \
|
||||||
--build-dir="${TMP_BUILD_DIR}" )
|
--build-dir="${TMP_BUILD_DIR}" )
|
||||||
|
|
||||||
|
${DIB_BLOCK_DEVICE_SCRIPT} \
|
||||||
|
--phase=cleanup \
|
||||||
|
--build-dir="${TMP_BUILD_DIR}"
|
||||||
|
|
||||||
cleanup_build_dir
|
cleanup_build_dir
|
||||||
|
|
||||||
if [[ (! $IMAGE_ELEMENT =~ no-final-image) && "$IS_RAMDISK" == "0" ]]; then
|
if [[ (! $IMAGE_ELEMENT =~ no-final-image) && "$IS_RAMDISK" == "0" ]]; then
|
||||||
|
@ -50,6 +50,10 @@ function trap_cleanup() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function cleanup () {
|
function cleanup () {
|
||||||
|
DIB_BLOCK_DEVICE_SCRIPT=$(which dib-block-device)
|
||||||
|
sudo -E ${DIB_BLOCK_DEVICE_SCRIPT} \
|
||||||
|
--phase=umount \
|
||||||
|
--build-dir="${TMP_BUILD_DIR}"
|
||||||
unmount_image
|
unmount_image
|
||||||
cleanup_build_dir
|
cleanup_build_dir
|
||||||
cleanup_image_dir
|
cleanup_image_dir
|
||||||
|
0
diskimage_builder/tests/functional/__init__.py
Normal file
0
diskimage_builder/tests/functional/__init__.py
Normal file
87
diskimage_builder/tests/functional/test_blockdevice.py
Normal file
87
diskimage_builder/tests/functional/test_blockdevice.py
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import logging
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
from diskimage_builder import block_device
|
||||||
|
from diskimage_builder.block_device.level0 import localloop
|
||||||
|
from diskimage_builder.logging_config import setup
|
||||||
|
from diskimage_builder import utils as dib_utils
|
||||||
|
|
||||||
|
|
||||||
|
# Setup Logging
|
||||||
|
setup()
|
||||||
|
|
||||||
|
|
||||||
|
class StateSavingBlockDevice(block_device.BlockDevice):
|
||||||
|
def cmd_create(self):
|
||||||
|
logging.info("StateSavingBlockDevice cmd_create()")
|
||||||
|
super(StateSavingBlockDevice, self).cmd_create()
|
||||||
|
_, _, self.state = self.load_state()
|
||||||
|
|
||||||
|
|
||||||
|
class BlockDeviceFixture(fixtures.Fixture):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
logging.info("BlockDeviceFixture constructor")
|
||||||
|
self.args = args
|
||||||
|
self.kwargs = kwargs
|
||||||
|
self.bd = None
|
||||||
|
|
||||||
|
def _setUp(self):
|
||||||
|
logging.info("BlockDeviceFixture _setUp()")
|
||||||
|
self.bd = StateSavingBlockDevice(*self.args, **self.kwargs)
|
||||||
|
self.addCleanup(self.cleanup_loopbacks)
|
||||||
|
|
||||||
|
def _assert_loopback_detatched(self, loopback):
|
||||||
|
if localloop.LocalLoop.loopdev_is_attached(loopback):
|
||||||
|
localloop.LocalLoop.loopdev_detach(loopback)
|
||||||
|
|
||||||
|
def cleanup_loopbacks(self):
|
||||||
|
for lb_dev in self.bd.state.get('loopback_devices', []):
|
||||||
|
self._assert_loopback_detatched(lb_dev)
|
||||||
|
|
||||||
|
|
||||||
|
class TestBlockDevice(testtools.TestCase):
|
||||||
|
def _assert_loopbacks_cleaned(self, blockdevice):
|
||||||
|
for lb_dev in blockdevice.state.get('loopback_devices', []):
|
||||||
|
self.assertEqual(False,
|
||||||
|
localloop.LocalLoop.loopdev_is_attached(lb_dev))
|
||||||
|
|
||||||
|
# ToDo: This calls sudo to setup the loop device - which is not allowed.
|
||||||
|
# Currently no idea how to continue here...
|
||||||
|
def _DONT_test_create_default_config(self):
|
||||||
|
logging.info("test_create_default_config called")
|
||||||
|
builddir = self.useFixture(fixtures.TempDir()).path
|
||||||
|
imagedir = self.useFixture(fixtures.TempDir()).path
|
||||||
|
logging.info("builddir [%s]" % builddir)
|
||||||
|
logging.info("imagedir [%s]" % imagedir)
|
||||||
|
|
||||||
|
logging.info("Calling BlockDevice constructor")
|
||||||
|
bd = self.useFixture(BlockDeviceFixture(
|
||||||
|
None, builddir, '%dKiB' % (1024 * 1024), imagedir
|
||||||
|
)).bd
|
||||||
|
logging.info("Calling BlockDevice cmd_create()")
|
||||||
|
bd.cmd_create()
|
||||||
|
|
||||||
|
logging.info("Check result")
|
||||||
|
logging.info("State [%s]" % bd.state)
|
||||||
|
self.assertTrue('device' in bd.state['image0'])
|
||||||
|
lb_dev = bd.state['image0']['device']
|
||||||
|
# partprobe loopback so we can get partition info
|
||||||
|
args = ['sudo', 'partprobe', lb_dev]
|
||||||
|
subp, rval = dib_utils.await_popen_cmd(logging, args)
|
||||||
|
self.assertEqual(0, rval)
|
||||||
|
|
||||||
|
bd.cmd_cleanup()
|
||||||
|
self._assert_loopbacks_cleaned(bd)
|
174
diskimage_builder/tests/functional/test_blockdevice_mbr.py
Normal file
174
diskimage_builder/tests/functional/test_blockdevice_mbr.py
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
from diskimage_builder.block_device.level0.localloop import LocalLoop
|
||||||
|
from diskimage_builder.block_device.level1.mbr import MBR
|
||||||
|
from diskimage_builder.logging_config import setup
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
|
||||||
|
class TestMBR(testtools.TestCase):
|
||||||
|
|
||||||
|
disk_size_10M = 10 * 1024 * 1024
|
||||||
|
disk_size_1G = 1024 * 1024 * 1024
|
||||||
|
|
||||||
|
pargs = ["--raw", "--output",
|
||||||
|
"NR,START,END,TYPE,FLAGS,SCHEME", "-g", "-b", "-"]
|
||||||
|
|
||||||
|
def _get_path_for_partx(self):
|
||||||
|
"""Searches and sets the path for partx
|
||||||
|
|
||||||
|
Because different distributions store the partx binary
|
||||||
|
at different places, there is the need to look for it.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dirs = ["/bin", "/usr/bin", "/sbin", "/usr/sbin"]
|
||||||
|
|
||||||
|
for d in dirs:
|
||||||
|
if os.path.exists(os.path.join(d, "partx")):
|
||||||
|
return os.path.join(d, "partx")
|
||||||
|
return
|
||||||
|
# If not found, try without path.
|
||||||
|
return "partx"
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestMBR, self).setUp()
|
||||||
|
setup()
|
||||||
|
|
||||||
|
def _create_image(self):
|
||||||
|
tmp_dir = tempfile.mkdtemp(prefix="dib-bd-mbr-")
|
||||||
|
image_path = os.path.join(tmp_dir, "image.raw")
|
||||||
|
LocalLoop.image_create(image_path, TestMBR.disk_size_1G)
|
||||||
|
return tmp_dir, image_path
|
||||||
|
|
||||||
|
def _run_partx(self, image_path):
|
||||||
|
largs = copy.copy(TestMBR.pargs)
|
||||||
|
partx_path = self._get_path_for_partx()
|
||||||
|
largs.insert(0, partx_path)
|
||||||
|
largs.append(image_path)
|
||||||
|
logging.info("Running command [%s]", largs)
|
||||||
|
return subprocess.check_output(largs).decode("ascii")
|
||||||
|
|
||||||
|
def test_one_ext_partition(self):
|
||||||
|
"""Creates one partition and check correctness with partx."""
|
||||||
|
|
||||||
|
tmp_dir, image_path = self._create_image()
|
||||||
|
with MBR(image_path, TestMBR.disk_size_1G, 1024 * 1024) as mbr:
|
||||||
|
mbr.add_partition(False, False, TestMBR.disk_size_10M, 0x83)
|
||||||
|
|
||||||
|
output = self._run_partx(image_path)
|
||||||
|
shutil.rmtree(tmp_dir)
|
||||||
|
self.assertEqual(
|
||||||
|
"1 2048 2097151 0xf 0x0 dos\n"
|
||||||
|
"5 4096 24575 0x83 0x0 dos\n", output)
|
||||||
|
|
||||||
|
def test_zero_partitions(self):
|
||||||
|
"""Creates no partition and check correctness with partx."""
|
||||||
|
|
||||||
|
tmp_dir, image_path = self._create_image()
|
||||||
|
with MBR(image_path, TestMBR.disk_size_1G, 1024 * 1024):
|
||||||
|
pass
|
||||||
|
|
||||||
|
output = self._run_partx(image_path)
|
||||||
|
shutil.rmtree(tmp_dir)
|
||||||
|
self.assertEqual("", output)
|
||||||
|
|
||||||
|
def test_many_ext_partitions(self):
|
||||||
|
"""Creates many partition and check correctness with partx."""
|
||||||
|
|
||||||
|
tmp_dir, image_path = self._create_image()
|
||||||
|
with MBR(image_path, TestMBR.disk_size_1G, 1024 * 1024) as mbr:
|
||||||
|
for nr in range(0, 64):
|
||||||
|
mbr.add_partition(False, False, TestMBR.disk_size_10M, 0x83)
|
||||||
|
|
||||||
|
output = self._run_partx(image_path)
|
||||||
|
|
||||||
|
shutil.rmtree(tmp_dir)
|
||||||
|
|
||||||
|
lines = output.split("\n")
|
||||||
|
self.assertEqual(66, len(lines))
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
"1 2048 2097151 0xf 0x0 dos", lines[0])
|
||||||
|
|
||||||
|
start_block = 4096
|
||||||
|
end_block = start_block + TestMBR.disk_size_10M / 512 - 1
|
||||||
|
for nr in range(1, 65):
|
||||||
|
fields = lines[nr].split(" ")
|
||||||
|
self.assertEqual(6, len(fields))
|
||||||
|
self.assertEqual(nr + 4, int(fields[0]))
|
||||||
|
self.assertEqual(start_block, int(fields[1]))
|
||||||
|
self.assertEqual(end_block, int(fields[2]))
|
||||||
|
self.assertEqual("0x83", fields[3])
|
||||||
|
self.assertEqual("0x0", fields[4])
|
||||||
|
self.assertEqual("dos", fields[5])
|
||||||
|
start_block += 22528
|
||||||
|
end_block = start_block + TestMBR.disk_size_10M / 512 - 1
|
||||||
|
|
||||||
|
def test_one_pri_partition(self):
|
||||||
|
"""Creates one primary partition and check correctness with partx."""
|
||||||
|
|
||||||
|
tmp_dir, image_path = self._create_image()
|
||||||
|
with MBR(image_path, TestMBR.disk_size_1G, 1024 * 1024) as mbr:
|
||||||
|
mbr.add_partition(True, False, TestMBR.disk_size_10M, 0x83)
|
||||||
|
|
||||||
|
output = self._run_partx(image_path)
|
||||||
|
shutil.rmtree(tmp_dir)
|
||||||
|
self.assertEqual(
|
||||||
|
"1 2048 22527 0x83 0x0 dos\n", output)
|
||||||
|
|
||||||
|
def test_three_pri_partition(self):
|
||||||
|
"""Creates three primary partition and check correctness with partx."""
|
||||||
|
|
||||||
|
tmp_dir, image_path = self._create_image()
|
||||||
|
with MBR(image_path, TestMBR.disk_size_1G, 1024 * 1024) as mbr:
|
||||||
|
for _ in range(3):
|
||||||
|
mbr.add_partition(True, False, TestMBR.disk_size_10M, 0x83)
|
||||||
|
|
||||||
|
output = self._run_partx(image_path)
|
||||||
|
shutil.rmtree(tmp_dir)
|
||||||
|
self.assertEqual(
|
||||||
|
"1 2048 22527 0x83 0x0 dos\n"
|
||||||
|
"2 22528 43007 0x83 0x0 dos\n"
|
||||||
|
"3 43008 63487 0x83 0x0 dos\n", output)
|
||||||
|
|
||||||
|
def test_many_pri_and_ext_partition(self):
|
||||||
|
"""Creates many primary and extended partitions."""
|
||||||
|
|
||||||
|
tmp_dir, image_path = self._create_image()
|
||||||
|
with MBR(image_path, TestMBR.disk_size_1G, 1024 * 1024) as mbr:
|
||||||
|
# Create three primary partitions
|
||||||
|
for _ in range(3):
|
||||||
|
mbr.add_partition(True, False, TestMBR.disk_size_10M, 0x83)
|
||||||
|
for _ in range(7):
|
||||||
|
mbr.add_partition(False, False, TestMBR.disk_size_10M, 0x83)
|
||||||
|
|
||||||
|
output = self._run_partx(image_path)
|
||||||
|
shutil.rmtree(tmp_dir)
|
||||||
|
self.assertEqual(
|
||||||
|
"1 2048 22527 0x83 0x0 dos\n" # Primary 1
|
||||||
|
"2 22528 43007 0x83 0x0 dos\n" # Primary 2
|
||||||
|
"3 43008 63487 0x83 0x0 dos\n" # Primary 3
|
||||||
|
"4 63488 2097151 0xf 0x0 dos\n" # Extended
|
||||||
|
"5 65536 86015 0x83 0x0 dos\n" # Extended Partition 1
|
||||||
|
"6 88064 108543 0x83 0x0 dos\n" # Extended Partition 2
|
||||||
|
"7 110592 131071 0x83 0x0 dos\n" # ...
|
||||||
|
"8 133120 153599 0x83 0x0 dos\n"
|
||||||
|
"9 155648 176127 0x83 0x0 dos\n"
|
||||||
|
"10 178176 198655 0x83 0x0 dos\n"
|
||||||
|
"11 200704 221183 0x83 0x0 dos\n", output)
|
49
diskimage_builder/tests/functional/test_blockdevice_utils.py
Normal file
49
diskimage_builder/tests/functional/test_blockdevice_utils.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
from diskimage_builder.block_device.utils import parse_abs_size_spec
|
||||||
|
from diskimage_builder.block_device.utils import parse_rel_size_spec
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
|
||||||
|
class TestBlockDeviceUtils(testtools.TestCase):
|
||||||
|
"""Tests for the utils.py in the block_device dir.
|
||||||
|
|
||||||
|
This tests mostly the error and failure cases - because the good
|
||||||
|
cases are tested implicitly with the higher level unit tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_parse_rel_size_with_abs(self):
|
||||||
|
"""Calls parse_rel_size_spec with an absolute number"""
|
||||||
|
|
||||||
|
is_rel, size = parse_rel_size_spec("154MiB", 0)
|
||||||
|
self.assertFalse(is_rel)
|
||||||
|
self.assertEqual(154 * 1024 * 1024, size)
|
||||||
|
|
||||||
|
def test_parse_abs_size_without_spec(self):
|
||||||
|
"""Call parse_abs_size_spec without spec"""
|
||||||
|
|
||||||
|
size = parse_abs_size_spec("198")
|
||||||
|
self.assertEqual(198, size)
|
||||||
|
|
||||||
|
def test_invalid_unit_spec(self):
|
||||||
|
"""Call parse_abs_size_spec with invalid unit spec"""
|
||||||
|
|
||||||
|
self.assertRaises(RuntimeError, parse_abs_size_spec, "747InVaLiDUnIt")
|
||||||
|
|
||||||
|
def test_broken_unit_spec(self):
|
||||||
|
"""Call parse_abs_size_spec with a completely broken unit spec"""
|
||||||
|
|
||||||
|
self.assertRaises(RuntimeError, parse_abs_size_spec, "_+!HuHi+-=")
|
123
diskimage_builder/tests/functional/test_graph.py
Normal file
123
diskimage_builder/tests/functional/test_graph.py
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from diskimage_builder.graph.digraph import Digraph
|
||||||
|
from diskimage_builder.graph.digraph import digraph_create_from_dict
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
|
||||||
|
class TestDigraph(testtools.TestCase):
|
||||||
|
|
||||||
|
def test_constructor_001(self):
|
||||||
|
"""Test conversion from dictionary to graph and back (two nodes)"""
|
||||||
|
|
||||||
|
d = {"A": ["B"], "B": []}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
e = dg.as_dict()
|
||||||
|
self.assertEqual(d["A"], list(e["A"]))
|
||||||
|
|
||||||
|
def test_constructor_002(self):
|
||||||
|
"""Test conversion from dictionary to graph and back (zero nodes)"""
|
||||||
|
|
||||||
|
d = {}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
e = dg.as_dict()
|
||||||
|
self.assertEqual(d, e)
|
||||||
|
|
||||||
|
def test_constructor_003(self):
|
||||||
|
"""Test conversion from dictionary to graph and back (one node)"""
|
||||||
|
|
||||||
|
d = {"A": []}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
e = dg.as_dict()
|
||||||
|
self.assertEqual(d["A"], list(e["A"]))
|
||||||
|
|
||||||
|
def test_constructor_004(self):
|
||||||
|
"""Test conversion from dictionary to graph and back (one node)"""
|
||||||
|
|
||||||
|
d = {"A": ["A"]}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
e = dg.as_dict()
|
||||||
|
self.assertEqual(d["A"], list(e["A"]))
|
||||||
|
|
||||||
|
def test_constructor_005(self):
|
||||||
|
"""Test conversion: error: pointed node does not exists"""
|
||||||
|
|
||||||
|
d = {"A": ["B"]}
|
||||||
|
try:
|
||||||
|
d = digraph_create_from_dict(d)
|
||||||
|
self.assertTrue(False)
|
||||||
|
except RuntimeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_constructor_006(self):
|
||||||
|
"""Test conversion from dictionary: two node circle"""
|
||||||
|
|
||||||
|
d = {"A": ["B"], "B": ["A"]}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
e = dg.as_dict()
|
||||||
|
self.assertEqual(d["A"], list(e["A"]))
|
||||||
|
self.assertEqual(d["B"], list(e["B"]))
|
||||||
|
|
||||||
|
def test_constructor_007(self):
|
||||||
|
"""Test conversion from dictionary: more complex graph"""
|
||||||
|
|
||||||
|
d = {"A": ["B"], "B": ["A", "D", "C"], "C": ["A", "D"],
|
||||||
|
"D": ["D"]}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
e = dg.as_dict()
|
||||||
|
self.assertEqual(d['A'], list(e['A']))
|
||||||
|
self.assertEqual(set(d['B']), set(e['B']))
|
||||||
|
self.assertEqual(set(d['C']), set(e['C']))
|
||||||
|
self.assertEqual(d['D'], list(e['D']))
|
||||||
|
|
||||||
|
def test_find_01(self):
|
||||||
|
"""Digraph find with element available"""
|
||||||
|
|
||||||
|
d = {"A": ["B"], "B": ["A", "C", "D"], "C": ["A", "D"],
|
||||||
|
"D": ["D"]}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
n = dg.find("A")
|
||||||
|
self.assertEqual("A", n.get_name(),)
|
||||||
|
|
||||||
|
def test_find_02(self):
|
||||||
|
"""Digraph find with element not available"""
|
||||||
|
|
||||||
|
d = {"A": ["B"], "B": ["A", "C", "D"], "C": ["A", "D"],
|
||||||
|
"D": ["D"]}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
n = dg.find("Z")
|
||||||
|
self.assertIsNone(n)
|
||||||
|
|
||||||
|
def test_get_named_node_01(self):
|
||||||
|
"""Digraph get named node with map available"""
|
||||||
|
|
||||||
|
d = {"A": ["B"], "B": ["A", "C", "D"], "C": ["A", "D"],
|
||||||
|
"D": ["D"]}
|
||||||
|
dg = digraph_create_from_dict(d)
|
||||||
|
n = dg.find("A")
|
||||||
|
self.assertEqual("A", n.get_name())
|
||||||
|
|
||||||
|
def test_add_node_01(self):
|
||||||
|
"""Digraph add node with two times same name"""
|
||||||
|
|
||||||
|
dg = Digraph()
|
||||||
|
n1 = Digraph.Node("myname")
|
||||||
|
n2 = Digraph.Node("myname")
|
||||||
|
dg.add_node(n1)
|
||||||
|
try:
|
||||||
|
dg.add_node(n2)
|
||||||
|
self.assertTrue(False)
|
||||||
|
except RuntimeError:
|
||||||
|
pass
|
69
diskimage_builder/tests/functional/test_graph_toposort.py
Normal file
69
diskimage_builder/tests/functional/test_graph_toposort.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from diskimage_builder.graph.digraph import digraph_create_from_dict
|
||||||
|
from diskimage_builder.graph.digraph import node_list_to_node_name_list
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
|
||||||
|
class TestTopologicalSearch(testtools.TestCase):
|
||||||
|
|
||||||
|
def test_tsort_001(self):
|
||||||
|
"""Simple three node digraph"""
|
||||||
|
|
||||||
|
dg = digraph_create_from_dict(
|
||||||
|
{"A": ["B", "C"], "B": ["C"], "C": []})
|
||||||
|
tsort = dg.topological_sort()
|
||||||
|
tnames = node_list_to_node_name_list(tsort)
|
||||||
|
self.assertEqual(tnames, ['A', 'B', 'C'], "incorrect")
|
||||||
|
|
||||||
|
def test_tsort_002(self):
|
||||||
|
"""Zero node digraph"""
|
||||||
|
|
||||||
|
dg = digraph_create_from_dict({})
|
||||||
|
tsort = dg.topological_sort()
|
||||||
|
tnames = node_list_to_node_name_list(tsort)
|
||||||
|
self.assertEqual(tnames, [], "incorrect")
|
||||||
|
|
||||||
|
def test_tsort_003(self):
|
||||||
|
"""One node digraph"""
|
||||||
|
|
||||||
|
dg = digraph_create_from_dict({"A": []})
|
||||||
|
tsort = dg.topological_sort()
|
||||||
|
tnames = node_list_to_node_name_list(tsort)
|
||||||
|
self.assertEqual(tnames, ["A"], "incorrect")
|
||||||
|
|
||||||
|
def test_tsort_004(self):
|
||||||
|
"""More complex digraph"""
|
||||||
|
|
||||||
|
dg = digraph_create_from_dict(
|
||||||
|
{"A": ["B", "C"], "B": ["C", "E"], "C": ["D", "E"],
|
||||||
|
"D": ["E"], "E": []})
|
||||||
|
tsort = dg.topological_sort()
|
||||||
|
tnames = node_list_to_node_name_list(tsort)
|
||||||
|
self.assertEqual(tnames, ['A', 'B', 'C', 'D', 'E'], "incorrect")
|
||||||
|
|
||||||
|
def test_tsort_005(self):
|
||||||
|
"""Digraph with two components"""
|
||||||
|
|
||||||
|
dg = digraph_create_from_dict({"A": ["B", "C"], "B": ["C"], "C": [],
|
||||||
|
"D": ["E"], "E": []})
|
||||||
|
tsort = dg.topological_sort()
|
||||||
|
tnames = node_list_to_node_name_list(tsort)
|
||||||
|
# Because of two components, there exist a couple of different
|
||||||
|
# possibilities - but these are all the requirements that have
|
||||||
|
# to be fulfilled to be a correct topological sort:
|
||||||
|
self.assertTrue(tnames.index('A') < tnames.index('B'))
|
||||||
|
self.assertTrue(tnames.index('B') < tnames.index('C'))
|
||||||
|
self.assertTrue(tnames.index('D') < tnames.index('E'))
|
20
diskimage_builder/utils.py
Normal file
20
diskimage_builder/utils.py
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def await_popen_cmd(logger, *args, **kwargs):
|
||||||
|
if logger is not None:
|
||||||
|
logger.debug("Running command: %s", args)
|
||||||
|
subproc = subprocess.Popen(*args, **kwargs)
|
||||||
|
return subproc, subproc.wait()
|
@ -61,31 +61,48 @@ Disk Image Layout
|
|||||||
When generating a block image (e.g. qcow2 or raw), by default one
|
When generating a block image (e.g. qcow2 or raw), by default one
|
||||||
image with one partition holding all files is created.
|
image with one partition holding all files is created.
|
||||||
|
|
||||||
The appropriate functionality to use multiple partitions and even LVMs
|
|
||||||
is currently under development; therefore the possible configuration
|
|
||||||
is currently limited, but will get more flexible as soon as all the
|
|
||||||
functionality is implemented.
|
|
||||||
|
|
||||||
The configuration is done by means of the environment variable
|
The configuration is done by means of the environment variable
|
||||||
`DIB_BLOCK_DEVICE_CONFIG`. This variable must hold JSON structured
|
`DIB_BLOCK_DEVICE_CONFIG`. This variable must hold YAML structured
|
||||||
configuration data.
|
configuration data.
|
||||||
|
|
||||||
In future this will be a list of four elements, each describing one
|
|
||||||
level of block device setup - but because currently only the lowest
|
|
||||||
level is implemented, it contains only the configuration of the first
|
|
||||||
level of block device setup
|
|
||||||
|
|
||||||
The default is:
|
The default is:
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
DIB_BLOCK_DEVICE_CONFIG='[
|
DIB_BLOCK_DEVICE_CONFIG='
|
||||||
[["local_loop",
|
local_loop:
|
||||||
{"name": "rootdisk"}]]]'
|
name: image0
|
||||||
|
|
||||||
In general each module is configured in the way, that the first
|
partitioning:
|
||||||
element in the list is the name of the module (e.g. `local_loop`)
|
base: image0
|
||||||
followed by a dictionary of parameters (here `{"name": "rootdisk"}`).
|
label: mbr
|
||||||
|
partitions:
|
||||||
|
- name: root_p1
|
||||||
|
flags: [ boot, primary ]
|
||||||
|
size: 100%'
|
||||||
|
|
||||||
|
In general each module that depends on another module has a `base`
|
||||||
|
element that points to the depending base.
|
||||||
|
|
||||||
|
Limitations
|
||||||
|
+++++++++++
|
||||||
|
The appropriate functionality to use multiple partitions and even LVMs
|
||||||
|
is currently under development; therefore the possible configuration
|
||||||
|
is currently limited, but will get more flexible as soon as all the
|
||||||
|
functionality is implemented.
|
||||||
|
|
||||||
|
In future this will be a list of some elements, each describing one
|
||||||
|
part of block device setup - but because currently only `local_loop`
|
||||||
|
and `partitioning` are implemented, it contains only the configuration
|
||||||
|
of these steps.
|
||||||
|
|
||||||
|
Currently it is possible to create multiple local loop devices, but
|
||||||
|
all but the `image0` will be not useable (are deleted during the
|
||||||
|
build process).
|
||||||
|
|
||||||
|
Currently only one partitions is used for the image. The name of this
|
||||||
|
partition must be `root_p1`. Other partitions are created but not
|
||||||
|
used.
|
||||||
|
|
||||||
Level 0
|
Level 0
|
||||||
+++++++
|
+++++++
|
||||||
@ -119,23 +136,132 @@ directory
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
::
|
::
|
||||||
|
local_loop:
|
||||||
|
name: image0
|
||||||
|
|
||||||
DIB_BLOCK_DEVICE_CONFIG='[
|
local_loop:
|
||||||
[["local_loop",
|
name: data_image
|
||||||
{"name": "rootdisk"}],
|
size: 7.5GiB
|
||||||
["local_loop",
|
directory: /var/tmp
|
||||||
{"name": "datadisk",
|
|
||||||
"size": "7.5GiB",
|
|
||||||
"directory": "/var/tmp"}]]]'
|
|
||||||
|
|
||||||
This creates two image files and uses the loop device to use them as
|
This creates two image files and uses the loop device to use them as
|
||||||
block devices. One image file called `rootdisk` is created with
|
block devices. One image file called `image0` is created with
|
||||||
default size in the default temp directory. The second image has the
|
default size in the default temp directory. The second image has the
|
||||||
size of 7.5GiB and is created in the `/var/tmp` folder.
|
size of 7.5GiB and is created in the `/var/tmp` folder.
|
||||||
|
|
||||||
Please note that due to current implementation restrictions it is only
|
Please note that due to current implementation restrictions it is only
|
||||||
allowed to specify one local loop image.
|
allowed to specify one local loop image.
|
||||||
|
|
||||||
|
Level 1
|
||||||
|
+++++++
|
||||||
|
|
||||||
|
Module: Partitioning
|
||||||
|
....................
|
||||||
|
|
||||||
|
This module generates partitions into existing block devices. This
|
||||||
|
means that it is possible to take any kind of block device (e.g. LVM,
|
||||||
|
encrypted, ...) and create partition information in it.
|
||||||
|
|
||||||
|
The symbolic name for this module is `partitioning`.
|
||||||
|
|
||||||
|
Currently the only partitioning layout is Master Boot Record `MBR`.
|
||||||
|
|
||||||
|
It is possible to create primary or logical partitions or a mix of
|
||||||
|
them. The numbering of the logical partitions will typically start
|
||||||
|
with `5`, e.g. `/dev/vda5` for the first partition, `/dev/vda6` for
|
||||||
|
the second and so on.
|
||||||
|
|
||||||
|
The number of partitions created by this module is theoretical
|
||||||
|
unlimited and it was tested with more than 1000 partitions inside one
|
||||||
|
block device. Nevertheless the Linux kernel and different tools (like
|
||||||
|
`parted`, `sfdisk`, `fdisk`) have some default maximum number of
|
||||||
|
partitions that they can handle. Please consult the documentation of
|
||||||
|
the appropriate software you plan to use and adapt the number of
|
||||||
|
partitions.
|
||||||
|
|
||||||
|
Partitions are created in the order they are configured. Primary
|
||||||
|
partitions - if needed - must be first in the list.
|
||||||
|
|
||||||
|
There are the following key / value pairs to define one disk:
|
||||||
|
|
||||||
|
base
|
||||||
|
(mandatory) The base device where to create the partitions in.
|
||||||
|
|
||||||
|
label
|
||||||
|
(mandatory) Possible values: 'mbr'
|
||||||
|
This uses the Master Boot Record (MBR) layout for the disk.
|
||||||
|
(There are currently plans to add GPT later on.)
|
||||||
|
|
||||||
|
align
|
||||||
|
(optional - default value '1MiB')
|
||||||
|
Set the alignment of the partition. This must be a multiple of the
|
||||||
|
block size (i.e. 512 bytes). The default of 1MiB (~ 2048 * 512
|
||||||
|
bytes blocks) is the default for modern systems and known to
|
||||||
|
perform well on a wide range of targets [6]. For each partition
|
||||||
|
there might be some space that is not used - which is `align` - 512
|
||||||
|
bytes. For the default of 1MiB exactly 1048064 bytes (= 1 MiB -
|
||||||
|
512 byte) are not used in the partition itself. Please note that
|
||||||
|
if a boot loader should be written to the disk or partition,
|
||||||
|
there is a need for some space. E.g. grub needs 63 * 512 byte
|
||||||
|
blocks between the MBR and the start of the partition data; this
|
||||||
|
means when grub will be installed, the `align` must be set at least
|
||||||
|
to 64 * 512 byte = 32 KiB.
|
||||||
|
|
||||||
|
partitions
|
||||||
|
(mandatory) A list of dictionaries. Each dictionary describes one
|
||||||
|
partition.
|
||||||
|
|
||||||
|
The following key / value pairs can be given for each partition:
|
||||||
|
|
||||||
|
name
|
||||||
|
(mandatory) The name of the partition. With the help of this name,
|
||||||
|
the partition can later be referenced, e.g. while creating a
|
||||||
|
file system.
|
||||||
|
|
||||||
|
flags
|
||||||
|
(optional) List of flags for the partition. Default: empty.
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
boot
|
||||||
|
Sets the boot flag for the partition
|
||||||
|
primary
|
||||||
|
Partition should be a primary partition. If not set a logical
|
||||||
|
partition will be created.
|
||||||
|
|
||||||
|
size
|
||||||
|
(mandatory) The size of the partition. The size can either be an
|
||||||
|
absolute number using units like `10GiB` or `1.75TB` or relative
|
||||||
|
(percentage) numbers: in the later case the size is calculated
|
||||||
|
based on the remaining free space.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
::
|
||||||
|
partitioning:
|
||||||
|
base: image0
|
||||||
|
label: mbr
|
||||||
|
partitions:
|
||||||
|
- name: part-01
|
||||||
|
flags: [ boot ]
|
||||||
|
size: 1GiB
|
||||||
|
- name: part-02
|
||||||
|
size: 100%
|
||||||
|
|
||||||
|
partitioning:
|
||||||
|
base: data_image
|
||||||
|
label: mbr
|
||||||
|
partitions:
|
||||||
|
- name: data0
|
||||||
|
size: 33%
|
||||||
|
- name: data1
|
||||||
|
size: 50%
|
||||||
|
- name: data2
|
||||||
|
size: 100%
|
||||||
|
|
||||||
|
On the `image0` two partitions are created. The size of the first is
|
||||||
|
1GiB, the second uses the remaining free space. On the `data_image`
|
||||||
|
three partitions are created: all are about 1/3 of the disk size.
|
||||||
|
|
||||||
Filesystem Caveat
|
Filesystem Caveat
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
@ -166,4 +292,3 @@ creates ramdisk.
|
|||||||
If tmpfs is not used, you will need enough room in /tmp to store two
|
If tmpfs is not used, you will need enough room in /tmp to store two
|
||||||
uncompressed cloud images. If tmpfs is used, you would still need /tmp space
|
uncompressed cloud images. If tmpfs is used, you would still need /tmp space
|
||||||
for one uncompressed cloud image and about 20% of that image for working files.
|
for one uncompressed cloud image and about 20% of that image for working files.
|
||||||
|
|
||||||
|
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Create partitions with MBR layout optimized for performance and
|
||||||
|
highly configurable.
|
||||||
|
|
||||||
|
deprecations:
|
||||||
|
- The new partitions are created based on configuration rather than
|
||||||
|
on a list of provided commands for a special partitioning tool.
|
||||||
|
Therefore elements using tools (like partitioning-sfdisk) are
|
||||||
|
deprecated and will be removed.
|
||||||
|
|
@ -20,6 +20,7 @@ sudo apt-get install -y --force-yes \
|
|||||||
debootstrap \
|
debootstrap \
|
||||||
docker \
|
docker \
|
||||||
kpartx \
|
kpartx \
|
||||||
|
util-linux \
|
||||||
qemu-img || \
|
qemu-img || \
|
||||||
sudo emerge \
|
sudo emerge \
|
||||||
app-emulation/qemu \
|
app-emulation/qemu \
|
||||||
|
@ -4,6 +4,13 @@ set -eu
|
|||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
BASE_DIR=$(cd $(dirname "$0")/.. && pwd)
|
BASE_DIR=$(cd $(dirname "$0")/.. && pwd)
|
||||||
|
|
||||||
|
# first we will run python functional tests with tox
|
||||||
|
pushd $BASE_DIR
|
||||||
|
tox -epython-functests
|
||||||
|
popd
|
||||||
|
|
||||||
|
# then execute tests for elements
|
||||||
export DIB_CMD=disk-image-create
|
export DIB_CMD=disk-image-create
|
||||||
export DIB_ELEMENTS=$(python -c '
|
export DIB_ELEMENTS=$(python -c '
|
||||||
import diskimage_builder.paths
|
import diskimage_builder.paths
|
||||||
|
3
tox.ini
3
tox.ini
@ -24,6 +24,9 @@ commands = {posargs}
|
|||||||
envdir = {toxworkdir}/venv
|
envdir = {toxworkdir}/venv
|
||||||
commands = {toxinidir}/tests/run_functests.sh {posargs}
|
commands = {toxinidir}/tests/run_functests.sh {posargs}
|
||||||
|
|
||||||
|
[testenv:python-functests]
|
||||||
|
setenv = OS_TEST_PATH=./diskimage_builder/tests/functional
|
||||||
|
|
||||||
[testenv:cover]
|
[testenv:cover]
|
||||||
setenv = PYTHON=coverage run --source diskimage_builder
|
setenv = PYTHON=coverage run --source diskimage_builder
|
||||||
commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
|
commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
|
||||||
|
Loading…
Reference in New Issue
Block a user