diff --git a/iso/empanadas/Containerfile b/iso/empanadas/Containerfile index 86a67c5..571d76c 100644 --- a/iso/empanadas/Containerfile +++ b/iso/empanadas/Containerfile @@ -1,4 +1,4 @@ -FROM quay.io/centos/centos:stream9 +FROM quay.io/rockylinux/rockylinux:9 ADD images/get_arch /get_arch diff --git a/iso/empanadas/Containerfile.imagefactory b/iso/empanadas/Containerfile.imagefactory index a4598aa..5d6b711 100644 --- a/iso/empanadas/Containerfile.imagefactory +++ b/iso/empanadas/Containerfile.imagefactory @@ -38,6 +38,8 @@ RUN dnf install -y \ sudo \ mock \ python-pip \ + mock \ + fuse-overlayfs \ imagefactory \ imagefactory-plugins* diff --git a/iso/empanadas/empanadas/backends/__init__.py b/iso/empanadas/empanadas/backends/__init__.py new file mode 100644 index 0000000..4c2ed94 --- /dev/null +++ b/iso/empanadas/empanadas/backends/__init__.py @@ -0,0 +1,5 @@ +"""Empanadas Backends (fillings)""" + +from .imagefactory import ImageFactoryBackend +from .kiwi import KiwiBackend +from .interface import BackendInterface diff --git a/iso/empanadas/empanadas/backends/imagefactory.py b/iso/empanadas/empanadas/backends/imagefactory.py new file mode 100644 index 0000000..70ece6e --- /dev/null +++ b/iso/empanadas/empanadas/backends/imagefactory.py @@ -0,0 +1,318 @@ +"""Backend for ImageFactory""" + +import json +import os +import pathlib +import tempfile + +from .interface import BackendInterface +from empanadas.builders import utils + +from attrs import define, field + +from typing import List, Optional, Callable, Union + +KICKSTART_PATH = pathlib.Path(os.environ.get("KICKSTART_PATH", "/kickstarts")) +STORAGE_DIR = pathlib.Path("/var/lib/imagefactory/storage") + + +@define(kw_only=True) +class ImageFactoryBackend(BackendInterface): + """Build an image using ImageFactory""" + kickstart_arg: List[str] = field(factory=list) + kickstart_path: pathlib.Path = field(init=False) + base_uuid: Optional[str] = field(default="") + target_uuid: Optional[str] = field(default="") + tdl_path: pathlib.Path = field(init=False) + out_type: str = field(init=False) + command_args: List[str] = field(factory=list) + common_args: List[str] = field(factory=list) + package_args: List[str] = field(factory=list) + metadata: pathlib.Path = field(init=False) + stage_commands: Optional[List[List[Union[str, Callable]]]] = field(init=False) + + # The url to use in the path when fetching artifacts for the build + kickstart_dir: str = field() # 'os' or 'kickstart' + + # The git repository to fetch kickstarts from + kickstart_repo: str = field() + + def prepare(self): + self.out_type = self.image_format() + + tdl_template = self.ctx.tmplenv.get_template('icicle/tdl.xml.tmpl') + + self.tdl_path = self.render_icicle_template(tdl_template) + if not self.tdl_path: + exit(2) + + self.metadata = pathlib.Path(self.ctx.outdir, ".imagefactory-metadata.json") + + self.kickstart_path = pathlib.Path(f"{KICKSTART_PATH}/Rocky-{self.ctx.architecture.major}-{self.ctx.type_variant}.ks") + + self.checkout_kickstarts() + self.kickstart_arg = self.kickstart_imagefactory_args() + + try: + os.mkdir(self.ctx.outdir) + except FileExistsError: + self.log.info("Directory already exists for this release. If possible, previously executed steps may be skipped") + except Exception as e: + self.log.exception("Some other exception occured while creating the output directory", e) + return 0 + + if os.path.exists(self.metadata): + self.ctx.log.info(f"Found metadata at {self.metadata}") + with open(self.metadata, "r") as f: + try: + o = json.load(f) + self.base_uuid = o['base_uuid'] + self.target_uuid = o['target_uuid'] + except json.decoder.JSONDecodeError as e: + self.ctx.log.exception("Couldn't decode metadata file", e) + finally: + f.flush() + + self.command_args = self._command_args() + self.package_args = self._package_args() + self.common_args = self._common_args() + + self.setup_staging() + + def build(self) -> int: + if self.base_uuid: + return 0 + + self.fix_ks() + + # TODO(neil): this should be a lambda which is called from the function + ret, out, err, uuid = self.ctx.prepare_and_run(self.build_command(), search=True) + if uuid: + self.base_uuid = uuid.rstrip() + self.save() + + if ret > 0: + return ret + + ret = self.package() + + if ret > 0: + return ret + + + def clean(self): + pass + + def save(self): + with open(self.metadata, "w") as f: + try: + o = { + name: getattr(self, name) for name in [ + "base_uuid", "target_uuid" + ] + } + self.ctx.log.debug(o) + json.dump(o, f) + except AttributeError as e: + self.ctx.log.error("Couldn't find attribute in object. Something is probably wrong", e) + except Exception as e: + self.ctx.log.exception(e) + finally: + f.flush() + + def package(self) -> int: + # Some build types don't need to be packaged by imagefactory + # @TODO remove business logic if possible + if self.ctx.image_type in ["GenericCloud", "EC2", "Azure", "Vagrant", "OCP", "RPI", "GenericArm"]: + self.target_uuid = self.base_uuid if hasattr(self, 'base_uuid') else "" + + if self.target_uuid: + return 0 + + ret, out, err, uuid = self.ctx.prepare_and_run(self.package_command(), search=True) + if uuid: + self.target_uuid = uuid.rstrip() + self.save() + return ret + + def stage(self) -> int: + """ Stage the artifacst from wherever they are (unpacking and converting if needed)""" + self.ctx.log.info("Executing staging commands") + if not hasattr(self, 'stage_commands'): + return 0 + + returns = [] + for command in self.stage_commands: # type: ignore + ret, out, err, _ = self.ctx.prepare_and_run(command, search=False) + returns.append(ret) + + if (res := all(ret > 0 for ret in returns) > 0): + raise Exception(res) + + return 0 + + def checkout_kickstarts(self) -> int: + cmd = ["git", "clone", "--branch", f"r{self.ctx.architecture.major}", + self.kickstart_repo, f"{KICKSTART_PATH}"] + ret, out, err, _ = self.ctx.prepare_and_run(cmd, search=False) + self.ctx.log.debug(out) + self.ctx.log.debug(err) + if ret > 0: + ret = self.pull_kickstarts() + return ret + + def pull_kickstarts(self) -> int: + cmd: utils.CMD_PARAM_T = ["git", "-C", f"{KICKSTART_PATH}", "reset", "--hard", "HEAD"] + ret, out, err, _ = self.ctx.prepare_and_run(cmd, search=False) + self.ctx.log.debug(out) + self.ctx.log.debug(err) + if ret == 0: + cmd = ["git", "-C", f"{KICKSTART_PATH}", "pull"] + ret, out, err, _ = self.ctx.prepare_and_run(cmd, search=False) + self.ctx.log.debug(out) + self.ctx.log.debug(err) + return ret + + def _command_args(self): + args_mapping = { + "debug": "--debug", + } + # NOTE(neil): i'm intentionally leaving this as is; deprecated + return [param for name, param in args_mapping.items() if self.ctx.debug] + + def _package_args(self) -> List[str]: + if self.ctx.image_type in ["Container"]: + return ["--parameter", "compress", "xz"] + return [""] + + def _common_args(self) -> List[str]: + args = [] + if self.ctx.image_type in ["Container"]: + args = ["--parameter", "offline_icicle", "true"] + if self.ctx.image_type in ["GenericCloud", "EC2", "Vagrant", "Azure", "OCP", "RPI", "GenericArm"]: + args = ["--parameter", "generate_icicle", "false"] + return args + + def image_format(self) -> str: + mapping = { + "Container": "docker" + } + return mapping[self.ctx.image_type] if self.ctx.image_type in mapping.keys() else '' + + def kickstart_imagefactory_args(self) -> List[str]: + + if not self.kickstart_path.is_file(): + self.ctx.log.warning(f"Kickstart file is not available: {self.kickstart_path}") + if not self.ctx.debug: + self.ctx.log.warning("Exiting because debug mode is not enabled.") + exit(2) + + return ["--file-parameter", "install_script", str(self.kickstart_path)] + + def render_icicle_template(self, tdl_template) -> pathlib.Path: + output = tempfile.NamedTemporaryFile(delete=False).name + return utils.render_template(output, tdl_template, + architecture=self.ctx.architecture.name, + iso8601date=self.ctx.build_time.strftime("%Y%m%d"), + installdir=self.kickstart_dir, + major=self.ctx.architecture.major, + minor=self.ctx.architecture.minor, + release=self.ctx.release, + size="10G", + type=self.ctx.image_type, + utcnow=self.ctx.build_time, + version_variant=self.ctx.architecture.version if not self.ctx.variant else f"{self.ctx.architecture.version}-{self.ctx.variant}", + ) + + def build_command(self) -> List[str]: + build_command = ["imagefactory", "--timeout", self.ctx.timeout, + *self.command_args, "base_image", *self.common_args, + *self.kickstart_arg, self.tdl_path] + return build_command + + def package_command(self) -> List[str]: + package_command = ["imagefactory", *self.command_args, "target_image", + self.out_type, *self.common_args, + "--id", f"{self.base_uuid}", + *self.package_args, + "--parameter", "repository", self.ctx.outname] + return package_command + + def fix_ks(self): + cmd: utils.CMD_PARAM_T = ["sed", "-i", f"s,$basearch,{self.ctx.architecture.name},", str(self.kickstart_path)] + self.ctx.prepare_and_run(cmd, search=False) + + def setup_staging(self): + # Yes, this is gross. I'll fix it later. + if self.ctx.image_type in ["Container"]: + self.stage_commands = [ + ["tar", "-C", f"{self.ctx.outdir}", "--strip-components=1", "-x", "-f", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", "*/layer.tar"], + ["xz", f"{self.ctx.outdir}/layer.tar"] + ] + if self.ctx.image_type in ["RPI"]: + self.stage_commands = [ + ["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.raw"], + ["xz", f"{self.ctx.outdir}/{self.ctx.outname}.raw"] + ] + if self.ctx.image_type in ["GenericCloud", "OCP", "GenericArm"]: + self.stage_commands = [ + ["qemu-img", "convert", "-c", "-f", "raw", "-O", "qcow2", + lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.qcow2"] + ] + if self.ctx.image_type in ["EC2"]: + self.stage_commands = [ + ["qemu-img", "convert", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.qcow2"] + ] + if self.ctx.image_type in ["Azure"]: + self.stage_commands = [ + ["/prep-azure.sh", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{STORAGE_DIR}"], + ["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.vhd", f"{self.ctx.outdir}/{self.ctx.outname}.vhd"] + ] + if self.ctx.image_type in ["Vagrant"]: + _map = { + "Vbox": {"format": "vmdk", "provider": "virtualbox"}, + "Libvirt": {"format": "qcow2", "provider": "libvirt", "virtual_size": 10}, + "VMware": {"format": "vmdk", "provider": "vmware_desktop"} + } + output = f"{_map[self.ctx.variant]['format']}" # type: ignore + provider = f"{_map[self.ctx.variant]['provider']}" # type: ignore + + # pop from the options map that will be passed to the vagrant metadata.json + convert_options = _map[self.ctx.variant].pop('convertOptions') if 'convertOptions' in _map[self.ctx.variant].keys() else '' # type: ignore + + self.stage_commands = [ + ["qemu-img", "convert", "-c", "-f", "raw", "-O", output, *convert_options, + lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.{output}"], + ["tar", "-C", self.ctx.outdir, "-czf", f"/tmp/{self.ctx.outname}.box", '.'], + ["mv", f"/tmp/{self.ctx.outname}.box", self.ctx.outdir] + ] + self.prepare_vagrant(_map[self.ctx.variant]) + + if self.stage_commands: + self.stage_commands.append(["cp", "-v", lambda: f"{STORAGE_DIR}/{self.target_uuid}.meta", f"{self.ctx.outdir}/build.meta"]) + + def prepare_vagrant(self, options): + """Setup the output directory for the Vagrant type variant, dropping templates as required""" + + templates = {} + templates['Vagrantfile'] = self.ctx.tmplenv.get_template(f"vagrant/Vagrantfile.{self.ctx.variant}") + templates['metadata.json'] = self.ctx.tmplenv.get_template('vagrant/metadata.tmpl.json') + templates['info.json'] = self.ctx.tmplenv.get_template('vagrant/info.tmpl.json') + + if self.ctx.variant == "VMware": + templates[f"{self.ctx.outname}.vmx"] = self.ctx.tmplenv.get_template('vagrant/vmx.tmpl') + + if self.ctx.variant == "Vbox": + templates['box.ovf'] = self.ctx.tmplenv.get_template('vagrant/box.tmpl.ovf') + + if self.ctx.variant == "Libvirt": + # Libvirt vagrant driver expects the qcow2 file to be called box.img. + qemu_command_index = [i for i, d in enumerate(self.stage_commands) if d[0] == "qemu-img"][0] + self.stage_commands.insert(qemu_command_index+1, ["mv", f"{self.ctx.outdir}/{self.ctx.outname}.qcow2", f"{self.ctx.outdir}/box.img"]) + + for name, template in templates.items(): + utils.render_template(f"{self.ctx.outdir}/{name}", template, + name=self.ctx.outname, + arch=self.ctx.architecture.name, + options=options + ) diff --git a/iso/empanadas/empanadas/backends/interface.py b/iso/empanadas/empanadas/backends/interface.py new file mode 100644 index 0000000..e69f901 --- /dev/null +++ b/iso/empanadas/empanadas/backends/interface.py @@ -0,0 +1,40 @@ +""" +empanadas backend interface +""" +from abc import ABC, abstractmethod +from attrs import define, field + + +@define +class BackendInterface(ABC): + ctx = field(init=False) + """ + Interface to build images (or whatever) + """ + @abstractmethod + def prepare(self): + """ + Prepares the environment necessary for building the image. + This might include setting up directories, checking prerequisites, etc. + """ + + @abstractmethod + def build(self): + """ + Performs the image build operation. This is the core method + where the actual image building logic is implemented. + """ + + @abstractmethod + def stage(self): + """ + Transforms and copies artifacts from build directory to the + location expected by the builder (usually in /tmp/) + """ + + @abstractmethod + def clean(self): + """ + Cleans up any resources or temporary files created during + the image building process. + """ diff --git a/iso/empanadas/empanadas/backends/kiwi.py b/iso/empanadas/empanadas/backends/kiwi.py new file mode 100644 index 0000000..e043e3a --- /dev/null +++ b/iso/empanadas/empanadas/backends/kiwi.py @@ -0,0 +1,241 @@ +"""Backend for Kiwi""" + +from .interface import BackendInterface +from .kiwi_imagedata import ImagesData + +from empanadas.builders import utils +from empanadas.common import AttributeDict + +from attrs import define, field +from functools import wraps +from typing import List + +import git +import os +import pathlib +import tempfile +import shutil +import sys + +# TODO(neil): this should be part of the config, somewhere +temp = AttributeDict( + { + "Azure": { + "kiwiType": "oem", + "kiwiProfile": "Cloud-Azure", + "fileType": "raw", # post-converted into vhd on MB boundary + "outputKey": "disk_format_image", + }, + "OCP": { + "kiwiType": "oem", + "kiwiProfile": "Cloud-OCP", + "fileType": "qcow2", + "outputKey": "disk_format_image", + }, + "GenericCloud": { + "kiwiType": "oem", + "kiwiProfile": "Cloud-GenericCloud", + "fileType": "qcow2", + "outputKey": "disk_format_image", + }, + "EC2": { + "kiwiType": "oem", + "kiwiProfile": "Cloud-EC2", + "fileType": "qcow2", + "outputKey": "disk_format_image", + }, + "Vagrant": { + "kiwiType": "oem", + "kiwiProfile": "Vagrant", + "fileType": "box", + "outputKey": "disk_format_image", + }, + "Container": { + "kiwiType": "oci", + "kiwiProfile": "Container", + "fileType": "tar.xz", + "outputKey": "container" + } + } +) + + +def ensure_kiwi_conf(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + if not hasattr(self, 'kiwi_conf') or self.kiwi_conf is None: + self.kiwi_conf = temp[self.ctx.image_type] + return func(self, *args, **kwargs) + return wrapper + + +@define +class KiwiBackend(BackendInterface): + """Build an image using Kiwi""" + + build_args: List[str] = field(factory=list) + image_result: ImagesData = field(init=False) + kiwi_conf: AttributeDict = field(init=False) + + def prepare(self): + """ + Checkout mock-rocky-configs and rocky-kiwi-descriptions, + init the mock env, and setup to run kiwi + """ + self.checkout_repos() + self.setup_mock() + self.setup_kiwi() + + @ensure_kiwi_conf + def build(self): + self.build_args += [f"--type={self.kiwi_conf.kiwiType}", f"--profile={self.kiwi_conf.kiwiProfile}-{self.ctx.variant}"] + + kiwi_command = [ + "kiwi-ng", "--color-output", + *self.build_args, + ] + if self.ctx.debug: + kiwi_command.append("--debug") + + kiwi_system_command = [ + "system", "build", + "--description='/builddir/rocky-kiwi-descriptions'", + "--target-dir", f"/builddir/{self.ctx.outdir}" + ] + + build_command = [ + "--shell", "--enable-network", "--", *kiwi_command, *kiwi_system_command + ] + ret, out, err = self.run_mock_command(build_command) + if ret > 0: + raise Exception(f"Kiwi build failed: code {ret}") + sys.exit(ret) + + @ensure_kiwi_conf + def stage(self): + ret, out, err = self.run_mock_command(["--copyout", f"/builddir/{self.ctx.outdir}", self.ctx.outdir]) + if ret > 0: + raise Exception("failed to copy build result out") + + kiwi_result_path = pathlib.Path(f"{self.ctx.outdir}/kiwi.result.json") + if not os.path.exists(kiwi_result_path): + raise Exception("Missing kiwi.result.json. Aborting") + + with open(kiwi_result_path, "r") as kiwi_result: + self.image_result = ImagesData.from_json(kiwi_result.read()).images + + source = self.image_result[self.kiwi_conf.outputKey].filename + filetype = self.kiwi_conf.fileType + + source = utils.remove_first_directory(source) + dest = f"{self.ctx.outdir}/{self.ctx.outname}.{filetype}" + + # NOTE(neil): only because we are preparing the 'final' image in clean step... + if self.ctx.image_type == 'Container': + dest = f"{self.ctx.outdir}/{self.ctx.outname}.oci" + + try: + shutil.move(source, dest) + except Exception as e: + raise e + + # TODO(neil): refactor + if self.ctx.image_type == 'Azure': + try: + utils.resize_and_convert_raw_image_to_vhd(dest, self.ctx.outdir) + # Remove old raw image + pathlib.Path(f"{self.ctx.outdir}/{self.ctx.outname}.raw").unlink() + except Exception as e: + raise e + + def clean(self): + # TODO(neil): refactor + if self.ctx.image_type == 'Container': + # need to do this before we remove it, otherwise we have to extract from the OCI tarball + root = f"/builddir{self.ctx.outdir}" + builddir = f"{root}/build/image-root" + ret, out, err = self.run_mock_command(["--shell", "--", "tar", "-C", builddir, "-cJf", f"{root}/{self.ctx.outname}.tar.xz", "."]) + if ret > 0: + raise Exception(err) + + ret, out, err = self.run_mock_command(["--shell", "rm", "-fr", f"/builddir/{self.ctx.outdir}/build/"]) + return ret + + def run_mock_command(self, mock_command: List[str]): + mock_args = ["--configdir", "/tmp/mock-rocky-configs/etc/mock", "-r", f"rl-9-{self.ctx.architecture.name}-core-infra"] + if self.ctx.image_type != 'Container': + mock_args.append("--isolation=simple") + command = [ + "mock", + *mock_args, + *mock_command, + ] + ret, out, err, _ = self.ctx.prepare_and_run(command) + return ret, out, err + + def setup_mock(self): + # TODO(neil): add error checking + ret, out, err = self.run_mock_command(["--init"]) + + packages = [ + "kiwi-boxed-plugin", + "kiwi-cli", + "git", + "dracut-kiwi-live", + "fuse-overlayfs", + "kiwi-systemdeps-bootloaders", + "kiwi-systemdeps-containers", + "kiwi-systemdeps-core", + "kiwi-systemdeps-disk-images", + "kiwi-systemdeps-filesystems", + "kiwi-systemdeps-image-validation", + "kiwi-systemdeps-iso-media", + "epel-release", + "rocky-release-core" + ] + ret, out, err = self.run_mock_command(["--install", *packages]) + + ret, out, err = self.run_mock_command(["--copyin", "/tmp/rocky-kiwi-descriptions", "/builddir/"]) + return ret + + def checkout_repos(self): + """ + Checkout sig_core/mock-rocky-configs and sig_core/rocky-kiwi-descriptions to /tmp + """ + repos = { + "mock-rocky-configs": "main", + "rocky-kiwi-descriptions": "r9" + } + + for repo, branch in repos.items(): + repo_url = f"https://git.resf.org/sig_core/{repo}" + clone_dir = f"/tmp/{repo}" + + if os.path.isdir(os.path.join(clone_dir, ".git")): + try: + # The directory exists and is a git repository, so attempt to pull the latest changes + git.Repo(clone_dir).remotes.origin.pull(branch) + self.ctx.log.info(f"pulled the latest changes for {branch} branch in {clone_dir}") + except Exception as e: + raise Exception(f"Failed to pull the repository: {str(e)}") + finally: + continue + + try: + git.Repo.clone_from(repo_url, clone_dir, branch=branch) + print(f"Repository cloned into {clone_dir}") + except Exception as e: + print(f"Failed to clone repository: {str(e)}") + + def setup_kiwi(self): + self.ctx.log.info("Generating kiwi.yml from template") + template = self.ctx.tmplenv.get_template('kiwi/kiwi.yml.j2') + output = tempfile.NamedTemporaryFile(delete=False).name + res = utils.render_template(output, template) + + self.ctx.log.info("Copying generated kiwi.yml into build root") + ret, out, err = self.run_mock_command(["--copyin", res, "/etc/kiwi.yml"]) + if ret > 0: + raise Exception("Failed to configure kiwi") + + self.ctx.log.info("Finished setting up kiwi") diff --git a/iso/empanadas/empanadas/backends/kiwi_imagedata.py b/iso/empanadas/empanadas/backends/kiwi_imagedata.py new file mode 100644 index 0000000..f815cd1 --- /dev/null +++ b/iso/empanadas/empanadas/backends/kiwi_imagedata.py @@ -0,0 +1,24 @@ +from attrs import define, field +from typing import Dict + +import json + + +@define(auto_attribs=True, kw_only=True) +class ImageInfo: + compress: bool + filename: str + shasum: bool + use_for_bundle: bool + + +@define(auto_attribs=True, kw_only=True) +class ImagesData: + images: Dict[str, ImageInfo] = field(factory=dict) + + @staticmethod + def from_json(data: str) -> 'ImagesData': + json_data = json.loads(data) + images = {key: ImageInfo(**value) for key, value in json_data.items()} + + return ImagesData(images=images) diff --git a/iso/empanadas/empanadas/builders/__init__.py b/iso/empanadas/empanadas/builders/__init__.py new file mode 100644 index 0000000..07dc0ee --- /dev/null +++ b/iso/empanadas/empanadas/builders/__init__.py @@ -0,0 +1 @@ +from .imagebuild import ImageBuild diff --git a/iso/empanadas/empanadas/builders/imagebuild.py b/iso/empanadas/empanadas/builders/imagebuild.py new file mode 100644 index 0000000..3e128f2 --- /dev/null +++ b/iso/empanadas/empanadas/builders/imagebuild.py @@ -0,0 +1,118 @@ +"""Build an image with a given backend""" + +import datetime +import logging +import os +import pathlib + +from attrs import define, field + +from empanadas.backends import BackendInterface, KiwiBackend +from empanadas.common import Architecture +from empanadas.common import _rootdir +from . import utils + +from jinja2 import Environment, FileSystemLoader, Template +from typing import List, Optional, Tuple, Callable + + +@define(kw_only=True) +class ImageBuild: # pylint: disable=too-few-public-methods + """Image builder using a given backend""" + tmplenv: Environment = field(init=False) + + # Only things we know we're keeping in this class here + architecture: Architecture = field() + backend: BackendInterface = field() + build_time: datetime.datetime = field() + debug: bool = field(default=False) + log: logging.Logger = field() + release: int = field(default=0) + timeout: str = field(default='3600') + + image_type: str = field() # the type of the image + type_variant: str = field(init=False) + variant: Optional[str] = field() + + # Kubernetes job template + job_template: Optional[Template] = field(init=False) # the kube Job tpl + + # Commands to stage artifacts + + # Where the artifacts should go to + outdir: pathlib.Path = field(init=False) + outname: str = field(init=False) + + def __attrs_post_init__(self): + self.backend.ctx = self + + file_loader = FileSystemLoader(f"{_rootdir}/templates") + self.tmplenv = Environment(loader=file_loader) + + self.job_template = self.tmplenv.get_template('kube/Job.tmpl') + + self.type_variant = self.type_variant_name() + self.outdir, self.outname = self.output_name() + + def output_name(self) -> Tuple[pathlib.Path, str]: + directory = f"Rocky-{self.architecture.major}-{self.type_variant}-{self.architecture.version}-{self.build_time.strftime('%Y%m%d')}.{self.release}" + name = f"{directory}.{self.architecture.name}" + outdir = pathlib.Path("/tmp/", directory) + return outdir, name + + def type_variant_name(self): + return self.image_type if not self.variant else f"{self.image_type}-{self.variant}" + + def prepare_and_run(self, command: utils.CMD_PARAM_T, search: Callable = None) -> utils.CMD_RESULT_T: + return utils.runCmd(self, self.prepare_command(command), search) + + def prepare_command(self, command_list: utils.CMD_PARAM_T) -> List[str]: + """ + Commands may be a callable, which should be a lambda to be evaluated at + preparation time with available locals. This can be used to, among + other things, perform lazy evaluations of f-strings which have values + not available at assignment time. e.g., filling in a second command + with a value extracted from the previous step or command. + """ + + r = [] + for c in command_list: + if callable(c) and c.__name__ == '': + r.append(c()) + else: + r.append(str(c)) + return r + + def render_kubernetes_job(self): + # TODO(neil): should this be put in the builder class itself to return the right thing for us? + if self.backend == KiwiBackend: + self.log.error("Kube not implemented for Kiwi") + + commands = [self.backend.build_command(), self.backend.package_command(), self.backend.copy_command()] + if not self.job_template: + return None + template = self.job_template.render( + architecture=self.architecture.name, + backoffLimit=4, + buildTime=self.build_time.strftime("%s"), + command=commands, + imageName="ghcr.io/rockylinux/sig-core-toolkit:latest", + jobname="buildimage", + namespace="empanadas", + major=self.architecture.major, + minor=self.architecture.minor, + restartPolicy="Never", + ) + return template + + def upload(self, skip=False) -> int: + if not skip: + self.log.info("Copying files to output directory") + copy_command = ["aws", "s3", "cp", "--recursive", f"{self.outdir}/", + f"s3://resf-empanadas/buildimage-{self.architecture.version}-{self.architecture.name}/{self.outname}/{self.build_time.strftime('%s')}/" + ] + ret, out, err, _ = self.prepare_and_run(copy_command, search=False) + return ret + + self.ctx.log.info(f"Build complete! Output available in {self.ctx.outdir}/") + return 0 diff --git a/iso/empanadas/empanadas/builders/utils.py b/iso/empanadas/empanadas/builders/utils.py new file mode 100644 index 0000000..ac35e11 --- /dev/null +++ b/iso/empanadas/empanadas/builders/utils.py @@ -0,0 +1,139 @@ +import json +import os +import logging +import pathlib +import subprocess +import sys + +from typing import Callable, List, Tuple, Union + +CMD_PARAM_T = List[Union[str, Callable[..., str]]] + +STR_NONE_T = Union[bytes, None] +BYTES_NONE_T = Union[bytes, None] +# Tuple of int, stdout, stderr, uuid +CMD_RESULT_T = Tuple[int, BYTES_NONE_T, BYTES_NONE_T, STR_NONE_T] + + +log = logging.getLogger(__name__) +log.setLevel(logging.INFO) +handler = logging.StreamHandler(sys.stdout) +handler.setLevel(logging.INFO) +formatter = logging.Formatter( + '%(asctime)s :: %(name)s :: %(message)s', + '%Y-%m-%d %H:%M:%S' +) +handler.setFormatter(formatter) +log.addHandler(handler) + + +def render_template(path, template, **kwargs) -> pathlib.Path: + with open(path, "wb") as f: + _template = template.render(**kwargs) + f.write(_template.encode()) + f.flush() + output = pathlib.Path(path) + if not output.exists(): + raise Exception("Failed to template") + return output + + +def runCmd(ctx, prepared_command: List[str], search: Callable = None) -> CMD_RESULT_T: + ctx.log.info(f"Running command: {' '.join(prepared_command)}") + + kwargs = { + "stderr": subprocess.PIPE, + "stdout": subprocess.PIPE + } + + if ctx.debug: + del kwargs["stderr"] + + with subprocess.Popen(prepared_command, **kwargs) as p: + uuid = None + # @TODO implement this as a callback? + if search: + for _, line in enumerate(p.stdout): # type: ignore + ln = line.decode() + if ln.startswith("UUID: "): + uuid = ln.split(" ")[-1] + ctx.log.debug(f"found uuid: {uuid}") + + out, err = p.communicate() + res = p.wait(), out, err, uuid + + if res[0] > 0: + ctx.log.error(f"Problem while executing command: '{prepared_command}'") + if search and not res[3]: + ctx.log.error("UUID not found in stdout. Dumping stdout and stderr") + log_subprocess(ctx, res) + + return res + + +def log_subprocess(ctx, result: CMD_RESULT_T): + def log_lines(title, lines): + ctx.log.info(f"====={title}=====") + ctx.log.info(lines.decode()) + ctx.log.info(f"Command return code: {result[0]}") + stdout = result[1] + stderr = result[2] + if stdout: + log_lines("Command STDOUT", stdout) + if stderr: + log_lines("Command STDERR", stderr) + + +def remove_first_directory(path): + p = pathlib.Path(path) + # Check if the path is absolute + if p.is_absolute(): + # For an absolute path, start the new path with the root + new_path = pathlib.Path(p.root, *p.parts[2:]) + else: + # For a relative path, simply skip the first part + new_path = pathlib.Path(*p.parts[1:]) + return new_path + + +def resize_and_convert_raw_image_to_vhd(raw_image_path, outdir=None): + log.info(f"Will resize and convert {raw_image_path}") + MB = 1024 * 1024 # For calculations - 1048576 bytes + + if outdir is None: + outdir = os.getcwd() + + # Ensure the output directory exists + pathlib.Path(outdir).mkdir(parents=True, exist_ok=True) + + # Getting the size of the raw image + result = subprocess.run(['qemu-img', 'info', '-f', 'raw', '--output', 'json', raw_image_path], capture_output=True, text=True) + if result.returncode != 0: + log.error("Error getting image info") + raise Exception(result) + + image_info = json.loads(result.stdout) + size = int(image_info['virtual-size']) + + # Calculate the new size rounded to the nearest MB + rounded_size = ((size + MB - 1) // MB) * MB + + # Prepare output filename (.raw replaced by .vhd) + outfilename = pathlib.Path(raw_image_path).name.replace("raw", "vhd") + outfile = os.path.join(outdir, outfilename) + + # Resize the image + log.info(f"Resizing {raw_image_path} to nearest MB boundary") + result = subprocess.run(['qemu-img', 'resize', '-f', 'raw', raw_image_path, str(rounded_size)]) + if result.returncode != 0: + log.error("Error resizing image") + raise Exception(result) + + # Convert the image + log.info(f"Converting {raw_image_path} to vhd") + result = subprocess.run(['qemu-img', 'convert', '-f', 'raw', '-o', 'subformat=fixed,force_size', '-O', 'vpc', raw_image_path, outfile]) + if result.returncode != 0: + log.error("Error converting image to VHD format") + raise Exception(result) + + log.info(f"Image converted and saved to {outfile}") diff --git a/iso/empanadas/empanadas/common.py b/iso/empanadas/empanadas/common.py index bfb19b0..f2a43e5 100644 --- a/iso/empanadas/empanadas/common.py +++ b/iso/empanadas/empanadas/common.py @@ -1,21 +1,22 @@ # All imports are here import glob -import hashlib -import logging -import os import platform import time from collections import defaultdict -from typing import Tuple +from attrs import define, field + import rpm import yaml # An implementation from the Fabric python library -class AttributeDict(defaultdict): - def __init__(self): - super(AttributeDict, self).__init__(AttributeDict) +class AttributeDict(dict): + def __init__(self, *args, **kwargs): + super(AttributeDict, self).__init__(*args, **kwargs) + for key, value in self.items(): + if isinstance(value, dict): + self[key] = AttributeDict(value) def __getattr__(self, key): try: @@ -26,6 +27,11 @@ class AttributeDict(defaultdict): def __setattr__(self, key, value): self[key] = value + def __setitem__(self, key, value): + if isinstance(value, dict): + value = AttributeDict(value) + super(AttributeDict, self).__setitem__(key, value) + # These are a bunch of colors we may use in terminal output class Color: @@ -59,6 +65,7 @@ config = { "category_stub": "mirror/pub/rocky", "sig_category_stub": "mirror/pub/sig", "repo_base_url": "https://yumrepofs.build.resf.org/v1/projects", + "staging_base_url": "https://dl.rockylinux.org/stg/rocky", "mock_work_root": "/builddir", "container": "centos:stream9", "distname": "Rocky Linux", @@ -107,7 +114,7 @@ for conf in glob.iglob(f"{_rootdir}/sig/*.yaml"): ALLOWED_TYPE_VARIANTS = { "Azure": ["Base", "LVM"], - "Container": ["Base", "Minimal", "UBI", "WSL"], + "Container": ["Base", "Minimal", "UBI", "WSL", "Toolbox"], "EC2": ["Base", "LVM"], "GenericCloud": ["Base", "LVM"], "Vagrant": ["Libvirt", "Vbox", "VMware"], @@ -120,7 +127,7 @@ ALLOWED_TYPE_VARIANTS = { def valid_type_variant(_type: str, variant: str = "") -> bool: if _type not in ALLOWED_TYPE_VARIANTS: raise Exception(f"Type is invalid: ({_type}, {variant})") - if ALLOWED_TYPE_VARIANTS[_type] == None: + if ALLOWED_TYPE_VARIANTS[_type] is None: if variant is not None: raise Exception(f"{_type} Type expects no variant type.") return True @@ -135,9 +142,6 @@ def valid_type_variant(_type: str, variant: str = "") -> bool: return True -from attrs import define, field - - @define(kw_only=True) class Architecture: name: str = field() diff --git a/iso/empanadas/empanadas/configs/el8.yaml b/iso/empanadas/empanadas/configs/el8.yaml index 3b49d3e..fb91c2a 100644 --- a/iso/empanadas/empanadas/configs/el8.yaml +++ b/iso/empanadas/empanadas/configs/el8.yaml @@ -18,7 +18,7 @@ - x86_64 - aarch64 provide_multilib: False - project_id: 'e9cfc87c-d2d2-42d5-a121-852101f1a966' + project_id: 'df5bcbfc-ba83-4da8-84d6-ae0168921b4d' repo_symlinks: devel: 'Devel' NFV: 'nfv' diff --git a/iso/empanadas/empanadas/scripts/build_image.py b/iso/empanadas/empanadas/scripts/build_image.py index 79aca71..3da3518 100644 --- a/iso/empanadas/empanadas/scripts/build_image.py +++ b/iso/empanadas/empanadas/scripts/build_image.py @@ -1,44 +1,49 @@ -# Builds an image given a version, type, variant, and architecture +# Builds an image given a version, type, variant, anctx.d architecture # Defaults to the running host's architecture import argparse import datetime -import json import logging -import os -import pathlib import platform -import subprocess import sys -import tempfile -import time - -from attrs import define, Factory, field, asdict -from botocore import args -from jinja2 import Environment, FileSystemLoader, Template -from typing import Callable, List, NoReturn, Optional, Tuple, IO, Union from empanadas.common import Architecture, rldict, valid_type_variant -from empanadas.common import _rootdir +from empanadas.builders import ImageBuild +from empanadas.backends import ImageFactoryBackend, KiwiBackend parser = argparse.ArgumentParser(description="ISO Compose") -parser.add_argument('--version', type=str, help="Release Version (8.6, 9.1)", required=True) +parser.add_argument('--version', + type=str, help="Release Version (8.6, 9.1)", required=True) parser.add_argument('--rc', action='store_true', help="Release Candidate") -parser.add_argument('--kickstartdir', action='store_true', help="Use the kickstart dir instead of the os dir for repositories") +parser.add_argument('--kickstartdir', action='store_true', + help="Use the kickstart dir instead of the os dir") parser.add_argument('--debug', action='store_true', help="debug?") -parser.add_argument('--type', type=str, help="Image type (container, genclo, azure, aws, vagrant)", required=True) +parser.add_argument('--skip', type=str, + help="what stage(s) to skip", + required=False) +parser.add_argument('--type', type=str, + help="Image type (container, genclo, azure, aws, vagrant)", + required=True) parser.add_argument('--variant', type=str, help="", required=False) -parser.add_argument('--release', type=str, help="Image release for subsequent builds with the same date stamp (rarely needed)", required=False) -parser.add_argument('--kube', action='store_true', help="output as a K8s job(s)", required=False) -parser.add_argument('--timeout', type=str, help="change timeout for imagefactory build process (default 3600)", required=False, default='3600') +parser.add_argument('--release', type=str, + help="Image release for builds with the same date stamp", + required=False) +parser.add_argument('--kube', action='store_true', + help="output as a K8s job(s)", + required=False) +parser.add_argument('--timeout', type=str, + help="change timeout for imagefactory build process", + required=False, default='3600') +parser.add_argument('--backend', type=str, + help="which backend to use (kiwi|imagefactory)", + required=False, default='kiwi') results = parser.parse_args() rlvars = rldict[results.version] major = rlvars["major"] - debug = results.debug log = logging.getLogger(__name__) @@ -52,405 +57,6 @@ formatter = logging.Formatter( handler.setFormatter(formatter) log.addHandler(handler) -STORAGE_DIR = pathlib.Path("/var/lib/imagefactory/storage") -KICKSTART_PATH = pathlib.Path(os.environ.get("KICKSTART_PATH", "/kickstarts")) -BUILDTIME = datetime.datetime.utcnow() - - -CMD_PARAM_T = List[Union[str, Callable[..., str]]] - -@define(kw_only=True) -class ImageBuild: - architecture: Architecture = field() - base_uuid: Optional[str] = field(default="") - cli_args: argparse.Namespace = field() - command_args: List[str] = field(factory=list) - common_args: List[str] = field(factory=list) - debug: bool = field(default=False) - image_type: str = field() - job_template: Optional[Template] = field(init=False) - kickstart_arg: List[str] = field(factory=list) - kickstart_path: pathlib.Path = field(init=False) - metadata: pathlib.Path = field(init=False) - out_type: str = field(init=False) - outdir: pathlib.Path = field(init=False) - outname: str = field(init=False) - package_args: List[str] = field(factory=list) - release: int = field(default=0) - stage_commands: Optional[List[List[Union[str,Callable]]]] = field(init=False) - target_uuid: Optional[str] = field(default="") - tdl_path: pathlib.Path = field(init=False) - template: Template = field() - timeout: str = field(default='3600') - type_variant: str = field(init=False) - variant: Optional[str] = field() - - def __attrs_post_init__(self): - self.tdl_path = self.render_icicle_template() - if not self.tdl_path: - exit(2) - self.type_variant = self.type_variant_name() - self.outdir, self.outname = self.output_name() - self.out_type = self.image_format() - self.command_args = self._command_args() - self.package_args = self._package_args() - self.common_args = self._common_args() - - self.metadata = pathlib.Path(self.outdir, ".imagefactory-metadata.json") - - self.kickstart_path = pathlib.Path(f"{KICKSTART_PATH}/Rocky-{self.architecture.major}-{self.type_variant}.ks") - - self.checkout_kickstarts() - self.kickstart_arg = self.kickstart_imagefactory_args() - - try: - os.mkdir(self.outdir) - except FileExistsError as e: - log.info("Directory already exists for this release. If possible, previously executed steps may be skipped") - except Exception as e: - log.exception("Some other exception occured while creating the output directory", e) - return 0 - - if os.path.exists(self.metadata): - with open(self.metadata, "r") as f: - try: - o = json.load(f) - self.base_uuid = o['base_uuid'] - self.target_uuid = o['target_uuid'] - except json.decoder.JSONDecodeError as e: - log.exception("Couldn't decode metadata file", e) - finally: - f.flush() - - # Yes, this is gross. I'll fix it later. - if self.image_type in ["Container"]: - self.stage_commands = [ - ["tar", "-C", f"{self.outdir}", "--strip-components=1", "-x", "-f", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", "*/layer.tar"], - ["xz", f"{self.outdir}/layer.tar"] - ] - if self.image_type in ["RPI"]: - self.stage_commands = [ - ["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.raw"], - ["xz", f"{self.outdir}/{self.outname}.raw"] - ] - if self.image_type in ["GenericCloud", "OCP", "GenericArm"]: - self.stage_commands = [ - ["qemu-img", "convert", "-c", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.qcow2"] - ] - if self.image_type in ["EC2"]: - self.stage_commands = [ - ["qemu-img", "convert", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.qcow2"] - ] - if self.image_type in ["Azure"]: - self.stage_commands = [ - ["/prep-azure.sh", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{STORAGE_DIR}"], - ["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.vhd", f"{self.outdir}/{self.outname}.vhd"] - ] - if self.image_type in ["Vagrant"]: - _map = { - "Vbox": {"format": "vmdk", "provider": "virtualbox"}, - "Libvirt": {"format": "qcow2", "provider": "libvirt", "virtual_size": 10}, - "VMware": {"format": "vmdk", "provider": "vmware_desktop"} - } - output = f"{_map[self.variant]['format']}" #type: ignore - provider = f"{_map[self.variant]['provider']}" # type: ignore - - # pop from the options map that will be passed to the vagrant metadata.json - convert_options = _map[self.variant].pop('convertOptions') if 'convertOptions' in _map[self.variant].keys() else '' #type: ignore - - - self.stage_commands = [ - ["qemu-img", "convert", "-c", "-f", "raw", "-O", output, *convert_options, lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.{output}"], - ["tar", "-C", self.outdir, "-czf", f"/tmp/{self.outname}.box", '.'], - ["mv", f"/tmp/{self.outname}.box", self.outdir] - ] - self.prepare_vagrant(_map[self.variant]) - - if self.stage_commands: - self.stage_commands.append(["cp", "-v", lambda: f"{STORAGE_DIR}/{self.target_uuid}.meta", f"{self.outdir}/build.meta"]) - - - def prepare_vagrant(self, options): - """Setup the output directory for the Vagrant type variant, dropping templates as required""" - file_loader = FileSystemLoader(f"{_rootdir}/templates") - tmplenv = Environment(loader=file_loader) - - templates = {} - templates['Vagrantfile'] = tmplenv.get_template(f"vagrant/Vagrantfile.{self.variant}") - templates['metadata.json'] = tmplenv.get_template('vagrant/metadata.tmpl.json') - templates['info.json'] = tmplenv.get_template('vagrant/info.tmpl.json') - - if self.variant == "VMware": - templates[f"{self.outname}.vmx"] = tmplenv.get_template('vagrant/vmx.tmpl') - - if self.variant == "Vbox": - templates['box.ovf'] = tmplenv.get_template('vagrant/box.tmpl.ovf') - - if self.variant == "Libvirt": - # Libvirt vagrant driver expects the qcow2 file to be called box.img. - qemu_command_index = [i for i, d in enumerate(self.stage_commands) if d[0] == "qemu-img"][0] - self.stage_commands.insert(qemu_command_index+1, ["mv", f"{self.outdir}/{self.outname}.qcow2", f"{self.outdir}/box.img"]) - - for name, template in templates.items(): - self.render_template(f"{self.outdir}/{name}", template, - name=self.outname, - arch=self.architecture.name, - options=options - ) - - def checkout_kickstarts(self) -> int: - cmd = ["git", "clone", "--branch", f"r{self.architecture.major}", rlvars['livemap']['git_repo'], f"{KICKSTART_PATH}"] - ret, out, err, _ = self.runCmd(cmd, search=False) - log.debug(out) - log.debug(err) - if ret > 0: - ret = self.pull_kickstarts() - return ret - - def pull_kickstarts(self) -> int: - cmd: CMD_PARAM_T = ["git", "-C", f"{KICKSTART_PATH}", "reset", "--hard", "HEAD"] - ret, out, err, _ = self.runCmd(cmd, search=False) - log.debug(out) - log.debug(err) - if ret == 0: - cmd = ["git", "-C", f"{KICKSTART_PATH}", "pull"] - ret, out, err, _ = self.runCmd(cmd, search=False) - log.debug(out) - log.debug(err) - return ret - - - def output_name(self) -> Tuple[pathlib.Path, str]: - directory = f"Rocky-{self.architecture.major}-{self.type_variant}-{self.architecture.version}-{BUILDTIME.strftime('%Y%m%d')}.{self.release}" - name = f"{directory}.{self.architecture.name}" - outdir = pathlib.Path(f"/tmp/", directory) - return outdir, name - - def type_variant_name(self): - return self.image_type if not self.variant else f"{self.image_type}-{self.variant}" - - def _command_args(self): - args_mapping = { - "debug": "--debug", - } - return [param for name, param in args_mapping.items() if getattr(self.cli_args, name)] - - def _package_args(self) -> List[str]: - if self.image_type in ["Container"]: - return ["--parameter", "compress", "xz"] - return [""] - - def _common_args(self) -> List[str]: - args = [] - if self.image_type in ["Container"]: - args = ["--parameter", "offline_icicle", "true"] - if self.image_type in ["GenericCloud", "EC2", "Vagrant", "Azure", "OCP", "RPI", "GenericArm"]: - args = ["--parameter", "generate_icicle", "false"] - return args - - def image_format(self) -> str: - mapping = { - "Container": "docker" - } - return mapping[self.image_type] if self.image_type in mapping.keys() else '' - - def kickstart_imagefactory_args(self) -> List[str]: - - if not self.kickstart_path.is_file(): - log.warning(f"Kickstart file is not available: {self.kickstart_path}") - if not debug: - log.warning("Exiting because debug mode is not enabled.") - exit(2) - - return ["--file-parameter", "install_script", str(self.kickstart_path)] - - def render_template(self, path, template, **kwargs) -> pathlib.Path: - with open(path, "wb") as f: - _template = template.render(**kwargs) - f.write(_template.encode()) - f.flush() - output = pathlib.Path(path) - if not output.exists(): - log.error("Failed to write template") - raise Exception("Failed to template") - return output - - def render_icicle_template(self) -> pathlib.Path: - output = tempfile.NamedTemporaryFile(delete=False).name - return self.render_template(output, self.template, - architecture=self.architecture.name, - iso8601date=BUILDTIME.strftime("%Y%m%d"), - installdir="kickstart" if self.cli_args.kickstartdir else "os", - major=self.architecture.major, - minor=self.architecture.minor, - release=self.release, - size="10G", - type=self.image_type, - utcnow=BUILDTIME, - version_variant=self.architecture.version if not self.variant else f"{self.architecture.version}-{self.variant}", - ) - - def build_command(self) -> List[str]: - build_command = ["imagefactory", "--timeout", self.timeout, *self.command_args, "base_image", *self.common_args, *self.kickstart_arg, self.tdl_path] - return build_command - def package_command(self) -> List[str]: - package_command = ["imagefactory", *self.command_args, "target_image", self.out_type, *self.common_args, - "--id", f"{self.base_uuid}", - *self.package_args, - "--parameter", "repository", self.outname, - ] - return package_command - - def copy_command(self) -> List[str]: - - copy_command = ["aws", "s3", "cp", "--recursive", f"{self.outdir}/", - f"s3://resf-empanadas/buildimage-{self.architecture.version}-{self.architecture.name}/{ self.outname }/{ BUILDTIME.strftime('%s') }/" - ] - - return copy_command - - def build(self) -> int: - if self.base_uuid: - return 0 - - self.fix_ks() - - ret, out, err, uuid = self.runCmd(self.build_command()) - if uuid: - self.base_uuid = uuid.rstrip() - self.save() - return ret - - def package(self) -> int: - # Some build types don't need to be packaged by imagefactory - # @TODO remove business logic if possible - if self.image_type in ["GenericCloud", "EC2", "Azure", "Vagrant", "OCP", "RPI", "GenericArm"]: - self.target_uuid = self.base_uuid if hasattr(self, 'base_uuid') else "" - - if self.target_uuid: - return 0 - - ret, out, err, uuid = self.runCmd(self.package_command()) - if uuid: - self.target_uuid = uuid.rstrip() - self.save() - return ret - - def stage(self) -> int: - """ Stage the artifacst from wherever they are (unpacking and converting if needed)""" - if not hasattr(self,'stage_commands'): - return 0 - - returns = [] - for command in self.stage_commands: #type: ignore - ret, out, err, _ = self.runCmd(command, search=False) - returns.append(ret) - - return all(ret > 0 for ret in returns) - - def copy(self, skip=False) -> int: - # move or unpack if necessary - log.info("Executing staging commands") - if (stage := self.stage() > 0): - raise Exception(stage) - - if not skip: - log.info("Copying files to output directory") - ret, out, err, _ = self.runCmd(self.copy_command(), search=False) - return ret - - log.info(f"Build complete! Output available in {self.outdir}/") - return 0 - - def runCmd(self, command: CMD_PARAM_T, search: bool = True) -> Tuple[int, Union[bytes,None], Union[bytes,None], Union[str,None]]: - prepared, _ = self.prepare_command(command) - log.info(f"Running command: {' '.join(prepared)}") - - kwargs = { - "stderr": subprocess.PIPE, - "stdout": subprocess.PIPE - } - if debug: del kwargs["stderr"] - - with subprocess.Popen(prepared, **kwargs) as p: - uuid = None - # @TODO implement this as a callback? - if search: - for _, line in enumerate(p.stdout): # type: ignore - ln = line.decode() - if ln.startswith("UUID: "): - uuid = ln.split(" ")[-1] - log.debug(f"found uuid: {uuid}") - - out, err = p.communicate() - res = p.wait(), out, err, uuid - - if res[0] > 0: - log.error(f"Problem while executing command: '{prepared}'") - if search and not res[3]: - log.error("UUID not found in stdout. Dumping stdout and stderr") - self.log_subprocess(res) - - return res - - def prepare_command(self, command_list: CMD_PARAM_T) -> Tuple[List[str],List[None]]: - """ - Commands may be a callable, which should be a lambda to be evaluated at - preparation time with available locals. This can be used to, among - other things, perform lazy evaluations of f-strings which have values - not available at assignment time. e.g., filling in a second command - with a value extracted from the previous step or command. - - """ - - r = [] - return r, [r.append(c()) if (callable(c) and c.__name__ == '') else r.append(str(c)) for c in command_list] - - def log_subprocess(self, result: Tuple[int, Union[bytes, None], Union[bytes, None], Union[str, None]]): - def log_lines(title, lines): - log.info(f"====={title}=====") - log.info(lines.decode()) - log.info(f"Command return code: {result[0]}") - stdout = result[1] - stderr = result[2] - if stdout: - log_lines("Command STDOUT", stdout) - if stderr: - log_lines("Command STDERR", stderr) - - def fix_ks(self): - cmd: CMD_PARAM_T = ["sed", "-i", f"s,$basearch,{self.architecture.name},", str(self.kickstart_path)] - self.runCmd(cmd, search=False) - - def render_kubernetes_job(self): - commands = [self.build_command(), self.package_command(), self.copy_command()] - if not self.job_template: - return None - template = self.job_template.render( - architecture=self.architecture.name, - backoffLimit=4, - buildTime=BUILDTIME.strftime("%s"), - command=commands, - imageName="ghcr.io/rockylinux/sig-core-toolkit:latest", - jobname="buildimage", - namespace="empanadas", - major=major, - restartPolicy="Never", - ) - return template - - def save(self): - with open(self.metadata, "w") as f: - try: - o = { name: getattr(self, name) for name in ["base_uuid", "target_uuid"] } - log.debug(o) - json.dump(o, f) - except AttributeError as e: - log.error("Couldn't find attribute in object. Something is probably wrong", e) - except Exception as e: - log.exception(e) - finally: - f.flush() def run(): try: @@ -459,28 +65,51 @@ def run(): log.exception(e) exit(2) - file_loader = FileSystemLoader(f"{_rootdir}/templates") - tmplenv = Environment(loader=file_loader) - tdl_template = tmplenv.get_template('icicle/tdl.xml.tmpl') - arches = rlvars['allowed_arches'] if results.kube else [platform.uname().machine] for architecture in arches: + if results.backend == "kiwi": + backend = KiwiBackend() + else: + backend = ImageFactoryBackend( + kickstart_dir="kickstart" if results.kickstartdir else "os", + kickstart_repo=rlvars['livemap']['git_repo'] + ) IB = ImageBuild( architecture=Architecture.from_version(architecture, rlvars['revision']), - cli_args=results, debug=results.debug, - image_type=results.type, + image_type=results.type, release=results.release if results.release else 0, - template=tdl_template, variant=results.variant, + build_time=datetime.datetime.utcnow(), + backend=backend, + log=log, ) - if results.kube: - IB.job_template = tmplenv.get_template('kube/Job.tmpl') - #commands = IB.kube_commands() - print(IB.render_kubernetes_job()) - else: - ret = IB.build() - ret = IB.package() - ret = IB.copy() + if results.kube: + # commands = IB.kube_commands() + print(IB.render_kubernetes_job()) + sys.exit(0) + + skip_stages = results.skip.split(',') if results.skip else [] + stages = ["prepare", "build", "clean", "stage"] + for i, stage in enumerate(stages): + skip_stage = stage in skip_stages + + log.info(f"Stage {i} - {stage}{' SKIP' if skip_stage else ''}") + + if skip_stage: + continue + + method = getattr(IB.backend, stage) + if callable(method): + method() + else: + log.fatal(f"Unable to execute {stage}") + + if 'upload' in skip_stages: + return + + log.info("Final stage - Upload") + + IB.upload(skip='upload' in skip_stages) diff --git a/iso/empanadas/empanadas/scripts/build_iso.py b/iso/empanadas/empanadas/scripts/build_iso.py index 6da2e4d..6f6372a 100755 --- a/iso/empanadas/empanadas/scripts/build_iso.py +++ b/iso/empanadas/empanadas/scripts/build_iso.py @@ -2,8 +2,7 @@ import argparse -from empanadas.common import * -from empanadas.util import Checks +from empanadas.common import config, rldict from empanadas.util import IsoBuild parser = argparse.ArgumentParser(description="ISO Compose") @@ -29,5 +28,6 @@ a = IsoBuild( logger=results.logger, ) + def run(): a.run() diff --git a/iso/empanadas/empanadas/scripts/sync_from_peridot.py b/iso/empanadas/empanadas/scripts/sync_from_peridot.py index 482a72a..0942b9b 100755 --- a/iso/empanadas/empanadas/scripts/sync_from_peridot.py +++ b/iso/empanadas/empanadas/scripts/sync_from_peridot.py @@ -33,6 +33,7 @@ parser.add_argument('--logger', type=str) parser.add_argument('--disable-gpg-check', action='store_false') parser.add_argument('--disable-repo-gpg-check', action='store_false') parser.add_argument('--clean-old-packages', action='store_true') +parser.add_argument('--use-staging', action='store_true') # Parse them results = parser.parse_args() @@ -64,6 +65,7 @@ a = RepoSync( gpg_check=results.disable_gpg_check, repo_gpg_check=results.disable_repo_gpg_check, reposync_clean_old=results.clean_old_packages, + use_staging=results.use_staging, ) def run(): diff --git a/iso/empanadas/empanadas/templates/buildImage.tmpl.sh b/iso/empanadas/empanadas/templates/buildImage.tmpl.sh index be5273d..785e763 100644 --- a/iso/empanadas/empanadas/templates/buildImage.tmpl.sh +++ b/iso/empanadas/empanadas/templates/buildImage.tmpl.sh @@ -22,7 +22,7 @@ lorax --product="${PRODUCT}" \ --isfinal \ {%- endif %} {%- for repo in repos %} - --source={{ repo.url }} \ + --source='{{ repo.url }}' \ {%- endfor %} {%- if squashfs_only %} --squashfs-only \ diff --git a/iso/empanadas/empanadas/templates/kiwi/kiwi.yml.j2 b/iso/empanadas/empanadas/templates/kiwi/kiwi.yml.j2 new file mode 100644 index 0000000..963b56c --- /dev/null +++ b/iso/empanadas/empanadas/templates/kiwi/kiwi.yml.j2 @@ -0,0 +1,162 @@ +# KIWI - Build configuration file +# +# Below all configuration parameters available to control +# KIWI's build process are listed as comments. The values +# used here provides the default values applied by KIWI if +# no other information is specified. +# +# To make any of the below effective, please uncomment the +# respective section(s) and adapt the parameters according +# to your needs +# + +# Setup access to security keys +#credentials: +# # Specify private key(s) used for signing operations +# - verification_metadata_signing_key_file: /path/to/private.pem + +# Setup access options for the Open BuildService +#obs: +# # Specify the URL of the Open BuildService download server +# - download_url: http://download.opensuse.org/repositories +# # Specify if the BuildService download server is public or private. +# # This information is used to verify if the request to populate +# # the repositories via the imageinclude attribute is possible +# - public: true + + +# Setup behaviour of the kiwi result bundle command +#bundle: +# # Specify if the bundle tarball should contain compressed results. +# # Note: Already compressed result information will not be touched. +# # Build results that generate an encrypted filesystem, i.e. +# # luks setup, will not be compressed. The intention for result compression +# # is to produce a smaller representation of the original. Encrypted data +# # generally grows when an attempt is made to compress the data. This is +# # due to the nature of compression algorithms. Therefore this setting is +# # ignored when encryption is enabled. +# - compress: false +# # Specify if the image build result and bundle should contain +# # a .changes file. The .changes file contains the package changelog +# # information from all packages installed into the image. +# - has_package_changes: false + + +# Setup behaviour of XZ compressor +#xz: +# # Specify options used in any xz compression call +# - options: '--threads=0' + + +# Setup process parameters for container image creation +#container: +# # Specify compression for container images +# # Possible values are true, false, xz or none. +# - compress: true + + +# Setup process parameters for ISO image creation +#iso: +# # Specify tool category which should be used to build iso images +# # Possible values are: xorriso +# - tool_category: xorriso + + +# Setup process parameters for OCI toolchain +#oci: +# # Specify OCI archive tool which should be used on creation of +# # container archives for OCI compliant images, e.g docker +# # Possible values are umoci and buildah +# - archive_tool: buildah + + +# Specify build constraints that applies during the image build +# process. If one or more constraints are violated the build exits +# with an appropriate error message. +#build_constraints: +# # Maximum result image size. The value can be specified in +# # bytes or it can be specified with m=MB or g=GB. The constraint +# # is checked prior to the result bundle creation +# - max_size: 700m + +# Setup process parameters for partition mapping +mapper: +# # Specify tool to use for creating partition maps +# # Possible values are: kpartx and partx + - part_mapper: {{ "partx" if architecture in ["s390x"] else "kpartx" }} + +# Setup process parameters to handle runtime checks +#runtime_checks: +# # Specify list of runtime checks to disable +# - disable: +# # verify that the host has the required container tools installed +# - check_container_tool_chain_installed + +# # verify that there are repositories configured +# - check_repositories_configured + +# # verify that the URL for imageinclude repos is accessable +# - check_image_include_repos_publicly_resolvable + +# # verify secure boot setup disabled for overlay configured disk images +# - check_efi_mode_for_disk_overlay_correctly_setup + +# # verify for legacy kiwi boot images that they exist on the host +# - check_boot_description_exists + +# # verify if kiwi initrd_system was set if a boot attribute exists +# - check_initrd_selection_required + +# # verify for legacy kiwi boot images that the same kernel is used +# - check_consistent_kernel_in_boot_and_system_image + +# # check for reserved label names used in LVM setup +# - check_volume_setup_defines_reserved_labels + +# # verify only one full size volume is specified for LVM images +# - check_volume_setup_defines_multiple_fullsize_volumes + +# # verify no / volume setup is setup but the @root volume is used +# - check_volume_setup_has_no_root_definition + +# # verify if volume label is really used with a volume setup +# - check_volume_label_used_with_lvm + +# # verify that there is a xen domain setup for xen images +# - check_xen_uniquely_setup_as_server_or_guest + +# # verify mediacheck is installed for ISO images that requests it +# - check_mediacheck_installed + +# # verify dracut-kiwi-live is installed for ISO images +# - check_dracut_module_for_live_iso_in_package_list + +# # verify dracut-kiwi-overlay is installed for overlay disk images +# - check_dracut_module_for_disk_overlay_in_package_list + +# # verify dracut-kiwi-repart is installed for OEM disk images +# - check_dracut_module_for_disk_oem_in_package_list + +# # verify dracut-kiwi-oem-dump is installed for OEM install images +# - check_dracut_module_for_oem_install_in_package_list + +# # verify configured firmware is compatible with host architecture +# - check_architecture_supports_iso_firmware_setup + +# # verify WSL naming conventions +# - check_appx_naming_conventions_valid + +# # check kiwi dracut modules compatible with kiwi builder +# - check_dracut_module_versions_compatible_to_kiwi + +# # check for unresolved include statements in the XML description +# - check_include_references_unresolvable + +# # validate options passed to cryptsetup via luksformat element +# - check_luksformat_options_valid + +# # check devicepersistency compatible with partition table type +# - check_partuuid_persistency_type_used_with_mbr + +# # check efifatimagesize does not exceed the max El Torito load size +# - check_efi_fat_image_has_correct_size diff --git a/iso/empanadas/empanadas/util/dnf_utils.py b/iso/empanadas/empanadas/util/dnf_utils.py index 862af8e..0c7ab30 100644 --- a/iso/empanadas/empanadas/util/dnf_utils.py +++ b/iso/empanadas/empanadas/util/dnf_utils.py @@ -62,7 +62,9 @@ class RepoSync: fpsync: bool = False, logger=None, log_level='INFO', - ): + use_staging: bool = False, + ): + self.nofail = nofail self.dryrun = dryrun self.fullrun = fullrun @@ -80,11 +82,14 @@ class RepoSync: # This makes it so every repo is synced at the same time. # This is EXTREMELY dangerous. self.just_pull_everything = just_pull_everything + # Use staging url instead of pulling from peridot (or, for EL8) + self.use_staging = use_staging # Relevant config items self.major_version = major self.date_stamp = config['date_stamp'] self.timestamp = time.time() self.repo_base_url = config['repo_base_url'] + self.staging_base_url = config['staging_base_url'] self.compose_root = config['compose_root'] self.compose_base = config['compose_root'] + "/" + major self.profile = rlvars['profile'] @@ -102,6 +107,7 @@ class RepoSync: self.project_id = rlvars['project_id'] self.repo_renames = rlvars['renames'] self.repos = rlvars['all_repos'] + self.extra_repos = rlvars['extra_repos'] self.multilib = rlvars['provide_multilib'] self.repo = repo self.extra_files = rlvars['extra_files'] @@ -270,7 +276,9 @@ class RepoSync: self.gpg_check, self.repo_gpg_check, self.tmplenv, - self.log + self.log, + staging_base_url=self.staging_base_url, + use_staging=self.use_staging, ) if self.dryrun: @@ -1446,7 +1454,8 @@ class RepoSync: self.gpg_check, self.repo_gpg_check, self.tmplenv, - self.log + self.log, + staging_base_url=self.staging_base_url, ) diff --git a/iso/empanadas/empanadas/util/iso_utils.py b/iso/empanadas/empanadas/util/iso_utils.py index 3ff539f..d52ba44 100644 --- a/iso/empanadas/empanadas/util/iso_utils.py +++ b/iso/empanadas/empanadas/util/iso_utils.py @@ -110,6 +110,7 @@ class IsoBuild: self.revision = rlvars['revision'] self.rclvl = rlvars['rclvl'] self.repos = rlvars['iso_map']['lorax']['repos'] + self.extra_repos = rlvars['extra_repos'] self.repo_base_url = config['repo_base_url'] self.project_id = rlvars['project_id'] self.structure = rlvars['structure'] @@ -202,7 +203,8 @@ class IsoBuild: self.current_arch, self.compose_latest_sync, self.compose_dir_is_here, - self.hashed + self.hashed, + self.extra_repos ) self.log.info(self.revision_level) @@ -268,6 +270,8 @@ class IsoBuild: dist=self.disttag, repos=self.repolist, user_agent='{{ user_agent }}', + compose_dir_is_here=self.compose_dir_is_here, + compose_dir=self.compose_root, ) mock_sh_template_output = mock_sh_template.render( diff --git a/iso/empanadas/empanadas/util/shared.py b/iso/empanadas/empanadas/util/shared.py index fb053dc..7849d5c 100644 --- a/iso/empanadas/empanadas/util/shared.py +++ b/iso/empanadas/empanadas/util/shared.py @@ -446,8 +446,10 @@ class Shared: repo_gpg_check, templates, logger, - dest_path='/var/tmp' - ) -> str: + dest_path='/var/tmp', + staging_base_url='https://dl.rockylinux.org/stg', + use_staging=False, + ) -> str: """ Generates the necessary repo conf file for the operation. This repo file should be temporary in nature. This will generate a repo file @@ -475,22 +477,36 @@ class Shared: if not os.path.exists(dest_path): os.makedirs(dest_path, exist_ok=True) config_file = open(fname, "w+") + repolist = [] for repo in repos: - constructed_url = '{}/{}/repo/{}{}/$basearch'.format( - repo_base_url, - project_id, - prehashed, - repo, - ) + if use_staging: + constructed_url = '{}/{}/{}/$basearch/os'.format( + staging_base_url, + major_version, + repo, + ) - constructed_url_src = '{}/{}/repo/{}{}/src'.format( - repo_base_url, - project_id, - prehashed, - repo, - ) + constructed_url_src = '{}/{}/{}/source/tree'.format( + staging_base_url, + major_version, + repo, + ) + else: + constructed_url = '{}/{}/repo/{}{}/$basearch'.format( + repo_base_url, + project_id, + prehashed, + repo, + ) + + constructed_url_src = '{}/{}/repo/{}{}/src'.format( + repo_base_url, + project_id, + prehashed, + repo, + ) repodata = { 'name': repo, @@ -903,8 +919,10 @@ class Shared: compose_latest_sync, compose_dir_is_here: bool = False, hashed: bool = False, - extra_repos: list = None - ): + extra_repos: list = None, + staging_base_url: str = 'https://dl.rockylinux.org/stg', + use_staging: bool = False, + ): """ Builds the repo dictionary """ @@ -938,7 +956,9 @@ class Shared: repolist.append(repodata) if extra_repos: - repolist.append(repo for repo in Shared.parse_extra_repos(extra_repos)) + extras = Shared.parse_extra_repos(extra_repos) + for repo in extras: + repolist.append(repo) return repolist @@ -947,6 +967,8 @@ class Shared: # must be in format URL[,PRIORITY] result = [] for idx, candidate in enumerate(extra_repos): + if isinstance(candidate, dict): + url, priority = candidate['url'], candidate.get('priority', None) url, priority = candidate.split(',') if not priority: priority = 100 diff --git a/iso/empanadas/pyproject.toml b/iso/empanadas/pyproject.toml index 8c996c9..cb3732e 100644 --- a/iso/empanadas/pyproject.toml +++ b/iso/empanadas/pyproject.toml @@ -39,6 +39,16 @@ generate-compose = "empanadas.scripts.generate_compose:run" peridot-repoclosure = "empanadas.scripts.peridot_repoclosure:run" refresh-all-treeinfo = "empanadas.scripts.refresh_all_treeinfo:run" +[tool.pylint.main] +init-hook =""" +try: + import pylint_venv +except ImportError: + pass +else: + pylint_venv.inithook() +""" + [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" diff --git a/iso/empanadas/tox.ini b/iso/empanadas/tox.ini new file mode 100644 index 0000000..e9587a9 --- /dev/null +++ b/iso/empanadas/tox.ini @@ -0,0 +1,2 @@ +[pycodestyle] +max-line-length = 160