Compare commits

...

26 Commits

Author SHA1 Message Date
7bca2041bd
fix: more enablement for using non-peridot repos / local composes 2024-11-12 18:54:09 -05:00
3967432d3d
fix: extra repos processing 2024-11-12 18:54:09 -05:00
79e52d1302
WIP: support running toolkit against staging in addition to Peridot 2024-11-12 18:54:09 -05:00
84e18e6d8d
fix: pass extra repos 2024-11-12 18:54:09 -05:00
f72b1e2107
fix: build el8 isos 2024-11-12 18:54:08 -05:00
1cd70a267f
use /tmp instead of /workdir 2024-11-12 18:54:08 -05:00
163cd9ef66
make the rootfs tarball the main artifact for Containers 2024-11-12 18:54:08 -05:00
1d75180a86
fix: azure needs special handling 2024-11-12 18:54:08 -05:00
f85030a7b1
add EC2, Vagrant, Azure, OCP; upload step 2024-11-12 18:54:08 -05:00
59e613fdcc
implement kiwi backend 2024-11-12 18:54:08 -05:00
eefe2821cc
refactor and support multiple image backends 2024-11-12 18:54:08 -05:00
0bb3867c6a
empanadas: change el9.yaml for 9.5
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 5s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-11-12 10:27:27 -07:00
78301b7906
sync: change to 9.5
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 5s
Build empanada container images for lorax / buildx (push) Successful in 2s
2024-11-12 10:23:15 -07:00
2cb5ae42b9
fix safednf
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 4s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-29 23:35:13 -07:00
96f8877d1b
dnf4 is required for reposync
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 5s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-29 13:10:26 -07:00
1470e590d3
add group auditor 1/?
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 4s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-18 14:24:26 -07:00
546f8b4687
look at host category for ALL
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 4s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-18 12:35:08 -07:00
7f3a4b4761
add shim unsigned to parser part 2
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 5s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-17 15:37:44 -07:00
4906749ed0
add shim unsigned to parser 2024-10-17 15:37:15 -07:00
1a45143b00
fix rs for generators
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 4s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-17 15:14:16 -07:00
fc0b738c75
add notice for 0 hosts
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 4s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-17 12:25:31 -07:00
689e7aa793
mangle: separate hbac hosts by lists
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 5s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-17 11:55:14 -07:00
9c1b828ab7
remove resilient storage from r10
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 4s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-17 10:01:56 -07:00
448b8c035b
mangle/ipa: all hbac access supersedes everything else
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 5s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-16 10:17:09 -07:00
a6f4632d66
prepare for 9.5 builds
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 5s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-16 01:29:40 -07:00
08d8995344
Use label=disable to prevent context changes
Some checks failed
Build empanada images for imagefactory / buildx (push) Failing after 6s
Build empanada container images for lorax / buildx (push) Successful in 1s
2024-10-07 15:09:06 -07:00
44 changed files with 1376 additions and 526 deletions

View File

@ -1,4 +1,4 @@
FROM quay.io/centos/centos:stream9 FROM quay.io/rockylinux/rockylinux:9
ADD images/get_arch /get_arch ADD images/get_arch /get_arch

View File

@ -38,6 +38,8 @@ RUN dnf install -y \
sudo \ sudo \
mock \ mock \
python-pip \ python-pip \
mock \
fuse-overlayfs \
imagefactory \ imagefactory \
imagefactory-plugins* imagefactory-plugins*

View File

@ -0,0 +1,5 @@
"""Empanadas Backends (fillings)"""
from .imagefactory import ImageFactoryBackend
from .kiwi import KiwiBackend
from .interface import BackendInterface

View File

@ -0,0 +1,318 @@
"""Backend for ImageFactory"""
import json
import os
import pathlib
import tempfile
from .interface import BackendInterface
from empanadas.builders import utils
from attrs import define, field
from typing import List, Optional, Callable, Union
KICKSTART_PATH = pathlib.Path(os.environ.get("KICKSTART_PATH", "/kickstarts"))
STORAGE_DIR = pathlib.Path("/var/lib/imagefactory/storage")
@define(kw_only=True)
class ImageFactoryBackend(BackendInterface):
"""Build an image using ImageFactory"""
kickstart_arg: List[str] = field(factory=list)
kickstart_path: pathlib.Path = field(init=False)
base_uuid: Optional[str] = field(default="")
target_uuid: Optional[str] = field(default="")
tdl_path: pathlib.Path = field(init=False)
out_type: str = field(init=False)
command_args: List[str] = field(factory=list)
common_args: List[str] = field(factory=list)
package_args: List[str] = field(factory=list)
metadata: pathlib.Path = field(init=False)
stage_commands: Optional[List[List[Union[str, Callable]]]] = field(init=False)
# The url to use in the path when fetching artifacts for the build
kickstart_dir: str = field() # 'os' or 'kickstart'
# The git repository to fetch kickstarts from
kickstart_repo: str = field()
def prepare(self):
self.out_type = self.image_format()
tdl_template = self.ctx.tmplenv.get_template('icicle/tdl.xml.tmpl')
self.tdl_path = self.render_icicle_template(tdl_template)
if not self.tdl_path:
exit(2)
self.metadata = pathlib.Path(self.ctx.outdir, ".imagefactory-metadata.json")
self.kickstart_path = pathlib.Path(f"{KICKSTART_PATH}/Rocky-{self.ctx.architecture.major}-{self.ctx.type_variant}.ks")
self.checkout_kickstarts()
self.kickstart_arg = self.kickstart_imagefactory_args()
try:
os.mkdir(self.ctx.outdir)
except FileExistsError:
self.log.info("Directory already exists for this release. If possible, previously executed steps may be skipped")
except Exception as e:
self.log.exception("Some other exception occured while creating the output directory", e)
return 0
if os.path.exists(self.metadata):
self.ctx.log.info(f"Found metadata at {self.metadata}")
with open(self.metadata, "r") as f:
try:
o = json.load(f)
self.base_uuid = o['base_uuid']
self.target_uuid = o['target_uuid']
except json.decoder.JSONDecodeError as e:
self.ctx.log.exception("Couldn't decode metadata file", e)
finally:
f.flush()
self.command_args = self._command_args()
self.package_args = self._package_args()
self.common_args = self._common_args()
self.setup_staging()
def build(self) -> int:
if self.base_uuid:
return 0
self.fix_ks()
# TODO(neil): this should be a lambda which is called from the function
ret, out, err, uuid = self.ctx.prepare_and_run(self.build_command(), search=True)
if uuid:
self.base_uuid = uuid.rstrip()
self.save()
if ret > 0:
return ret
ret = self.package()
if ret > 0:
return ret
def clean(self):
pass
def save(self):
with open(self.metadata, "w") as f:
try:
o = {
name: getattr(self, name) for name in [
"base_uuid", "target_uuid"
]
}
self.ctx.log.debug(o)
json.dump(o, f)
except AttributeError as e:
self.ctx.log.error("Couldn't find attribute in object. Something is probably wrong", e)
except Exception as e:
self.ctx.log.exception(e)
finally:
f.flush()
def package(self) -> int:
# Some build types don't need to be packaged by imagefactory
# @TODO remove business logic if possible
if self.ctx.image_type in ["GenericCloud", "EC2", "Azure", "Vagrant", "OCP", "RPI", "GenericArm"]:
self.target_uuid = self.base_uuid if hasattr(self, 'base_uuid') else ""
if self.target_uuid:
return 0
ret, out, err, uuid = self.ctx.prepare_and_run(self.package_command(), search=True)
if uuid:
self.target_uuid = uuid.rstrip()
self.save()
return ret
def stage(self) -> int:
""" Stage the artifacst from wherever they are (unpacking and converting if needed)"""
self.ctx.log.info("Executing staging commands")
if not hasattr(self, 'stage_commands'):
return 0
returns = []
for command in self.stage_commands: # type: ignore
ret, out, err, _ = self.ctx.prepare_and_run(command, search=False)
returns.append(ret)
if (res := all(ret > 0 for ret in returns) > 0):
raise Exception(res)
return 0
def checkout_kickstarts(self) -> int:
cmd = ["git", "clone", "--branch", f"r{self.ctx.architecture.major}",
self.kickstart_repo, f"{KICKSTART_PATH}"]
ret, out, err, _ = self.ctx.prepare_and_run(cmd, search=False)
self.ctx.log.debug(out)
self.ctx.log.debug(err)
if ret > 0:
ret = self.pull_kickstarts()
return ret
def pull_kickstarts(self) -> int:
cmd: utils.CMD_PARAM_T = ["git", "-C", f"{KICKSTART_PATH}", "reset", "--hard", "HEAD"]
ret, out, err, _ = self.ctx.prepare_and_run(cmd, search=False)
self.ctx.log.debug(out)
self.ctx.log.debug(err)
if ret == 0:
cmd = ["git", "-C", f"{KICKSTART_PATH}", "pull"]
ret, out, err, _ = self.ctx.prepare_and_run(cmd, search=False)
self.ctx.log.debug(out)
self.ctx.log.debug(err)
return ret
def _command_args(self):
args_mapping = {
"debug": "--debug",
}
# NOTE(neil): i'm intentionally leaving this as is; deprecated
return [param for name, param in args_mapping.items() if self.ctx.debug]
def _package_args(self) -> List[str]:
if self.ctx.image_type in ["Container"]:
return ["--parameter", "compress", "xz"]
return [""]
def _common_args(self) -> List[str]:
args = []
if self.ctx.image_type in ["Container"]:
args = ["--parameter", "offline_icicle", "true"]
if self.ctx.image_type in ["GenericCloud", "EC2", "Vagrant", "Azure", "OCP", "RPI", "GenericArm"]:
args = ["--parameter", "generate_icicle", "false"]
return args
def image_format(self) -> str:
mapping = {
"Container": "docker"
}
return mapping[self.ctx.image_type] if self.ctx.image_type in mapping.keys() else ''
def kickstart_imagefactory_args(self) -> List[str]:
if not self.kickstart_path.is_file():
self.ctx.log.warning(f"Kickstart file is not available: {self.kickstart_path}")
if not self.ctx.debug:
self.ctx.log.warning("Exiting because debug mode is not enabled.")
exit(2)
return ["--file-parameter", "install_script", str(self.kickstart_path)]
def render_icicle_template(self, tdl_template) -> pathlib.Path:
output = tempfile.NamedTemporaryFile(delete=False).name
return utils.render_template(output, tdl_template,
architecture=self.ctx.architecture.name,
iso8601date=self.ctx.build_time.strftime("%Y%m%d"),
installdir=self.kickstart_dir,
major=self.ctx.architecture.major,
minor=self.ctx.architecture.minor,
release=self.ctx.release,
size="10G",
type=self.ctx.image_type,
utcnow=self.ctx.build_time,
version_variant=self.ctx.architecture.version if not self.ctx.variant else f"{self.ctx.architecture.version}-{self.ctx.variant}",
)
def build_command(self) -> List[str]:
build_command = ["imagefactory", "--timeout", self.ctx.timeout,
*self.command_args, "base_image", *self.common_args,
*self.kickstart_arg, self.tdl_path]
return build_command
def package_command(self) -> List[str]:
package_command = ["imagefactory", *self.command_args, "target_image",
self.out_type, *self.common_args,
"--id", f"{self.base_uuid}",
*self.package_args,
"--parameter", "repository", self.ctx.outname]
return package_command
def fix_ks(self):
cmd: utils.CMD_PARAM_T = ["sed", "-i", f"s,$basearch,{self.ctx.architecture.name},", str(self.kickstart_path)]
self.ctx.prepare_and_run(cmd, search=False)
def setup_staging(self):
# Yes, this is gross. I'll fix it later.
if self.ctx.image_type in ["Container"]:
self.stage_commands = [
["tar", "-C", f"{self.ctx.outdir}", "--strip-components=1", "-x", "-f", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", "*/layer.tar"],
["xz", f"{self.ctx.outdir}/layer.tar"]
]
if self.ctx.image_type in ["RPI"]:
self.stage_commands = [
["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.raw"],
["xz", f"{self.ctx.outdir}/{self.ctx.outname}.raw"]
]
if self.ctx.image_type in ["GenericCloud", "OCP", "GenericArm"]:
self.stage_commands = [
["qemu-img", "convert", "-c", "-f", "raw", "-O", "qcow2",
lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.qcow2"]
]
if self.ctx.image_type in ["EC2"]:
self.stage_commands = [
["qemu-img", "convert", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.qcow2"]
]
if self.ctx.image_type in ["Azure"]:
self.stage_commands = [
["/prep-azure.sh", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{STORAGE_DIR}"],
["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.vhd", f"{self.ctx.outdir}/{self.ctx.outname}.vhd"]
]
if self.ctx.image_type in ["Vagrant"]:
_map = {
"Vbox": {"format": "vmdk", "provider": "virtualbox"},
"Libvirt": {"format": "qcow2", "provider": "libvirt", "virtual_size": 10},
"VMware": {"format": "vmdk", "provider": "vmware_desktop"}
}
output = f"{_map[self.ctx.variant]['format']}" # type: ignore
provider = f"{_map[self.ctx.variant]['provider']}" # type: ignore
# pop from the options map that will be passed to the vagrant metadata.json
convert_options = _map[self.ctx.variant].pop('convertOptions') if 'convertOptions' in _map[self.ctx.variant].keys() else '' # type: ignore
self.stage_commands = [
["qemu-img", "convert", "-c", "-f", "raw", "-O", output, *convert_options,
lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.ctx.outdir}/{self.ctx.outname}.{output}"],
["tar", "-C", self.ctx.outdir, "-czf", f"/tmp/{self.ctx.outname}.box", '.'],
["mv", f"/tmp/{self.ctx.outname}.box", self.ctx.outdir]
]
self.prepare_vagrant(_map[self.ctx.variant])
if self.stage_commands:
self.stage_commands.append(["cp", "-v", lambda: f"{STORAGE_DIR}/{self.target_uuid}.meta", f"{self.ctx.outdir}/build.meta"])
def prepare_vagrant(self, options):
"""Setup the output directory for the Vagrant type variant, dropping templates as required"""
templates = {}
templates['Vagrantfile'] = self.ctx.tmplenv.get_template(f"vagrant/Vagrantfile.{self.ctx.variant}")
templates['metadata.json'] = self.ctx.tmplenv.get_template('vagrant/metadata.tmpl.json')
templates['info.json'] = self.ctx.tmplenv.get_template('vagrant/info.tmpl.json')
if self.ctx.variant == "VMware":
templates[f"{self.ctx.outname}.vmx"] = self.ctx.tmplenv.get_template('vagrant/vmx.tmpl')
if self.ctx.variant == "Vbox":
templates['box.ovf'] = self.ctx.tmplenv.get_template('vagrant/box.tmpl.ovf')
if self.ctx.variant == "Libvirt":
# Libvirt vagrant driver expects the qcow2 file to be called box.img.
qemu_command_index = [i for i, d in enumerate(self.stage_commands) if d[0] == "qemu-img"][0]
self.stage_commands.insert(qemu_command_index+1, ["mv", f"{self.ctx.outdir}/{self.ctx.outname}.qcow2", f"{self.ctx.outdir}/box.img"])
for name, template in templates.items():
utils.render_template(f"{self.ctx.outdir}/{name}", template,
name=self.ctx.outname,
arch=self.ctx.architecture.name,
options=options
)

View File

@ -0,0 +1,40 @@
"""
empanadas backend interface
"""
from abc import ABC, abstractmethod
from attrs import define, field
@define
class BackendInterface(ABC):
ctx = field(init=False)
"""
Interface to build images (or whatever)
"""
@abstractmethod
def prepare(self):
"""
Prepares the environment necessary for building the image.
This might include setting up directories, checking prerequisites, etc.
"""
@abstractmethod
def build(self):
"""
Performs the image build operation. This is the core method
where the actual image building logic is implemented.
"""
@abstractmethod
def stage(self):
"""
Transforms and copies artifacts from build directory to the
location expected by the builder (usually in /tmp/)
"""
@abstractmethod
def clean(self):
"""
Cleans up any resources or temporary files created during
the image building process.
"""

View File

@ -0,0 +1,241 @@
"""Backend for Kiwi"""
from .interface import BackendInterface
from .kiwi_imagedata import ImagesData
from empanadas.builders import utils
from empanadas.common import AttributeDict
from attrs import define, field
from functools import wraps
from typing import List
import git
import os
import pathlib
import tempfile
import shutil
import sys
# TODO(neil): this should be part of the config, somewhere
temp = AttributeDict(
{
"Azure": {
"kiwiType": "oem",
"kiwiProfile": "Cloud-Azure",
"fileType": "raw", # post-converted into vhd on MB boundary
"outputKey": "disk_format_image",
},
"OCP": {
"kiwiType": "oem",
"kiwiProfile": "Cloud-OCP",
"fileType": "qcow2",
"outputKey": "disk_format_image",
},
"GenericCloud": {
"kiwiType": "oem",
"kiwiProfile": "Cloud-GenericCloud",
"fileType": "qcow2",
"outputKey": "disk_format_image",
},
"EC2": {
"kiwiType": "oem",
"kiwiProfile": "Cloud-EC2",
"fileType": "qcow2",
"outputKey": "disk_format_image",
},
"Vagrant": {
"kiwiType": "oem",
"kiwiProfile": "Vagrant",
"fileType": "box",
"outputKey": "disk_format_image",
},
"Container": {
"kiwiType": "oci",
"kiwiProfile": "Container",
"fileType": "tar.xz",
"outputKey": "container"
}
}
)
def ensure_kiwi_conf(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not hasattr(self, 'kiwi_conf') or self.kiwi_conf is None:
self.kiwi_conf = temp[self.ctx.image_type]
return func(self, *args, **kwargs)
return wrapper
@define
class KiwiBackend(BackendInterface):
"""Build an image using Kiwi"""
build_args: List[str] = field(factory=list)
image_result: ImagesData = field(init=False)
kiwi_conf: AttributeDict = field(init=False)
def prepare(self):
"""
Checkout mock-rocky-configs and rocky-kiwi-descriptions,
init the mock env, and setup to run kiwi
"""
self.checkout_repos()
self.setup_mock()
self.setup_kiwi()
@ensure_kiwi_conf
def build(self):
self.build_args += [f"--type={self.kiwi_conf.kiwiType}", f"--profile={self.kiwi_conf.kiwiProfile}-{self.ctx.variant}"]
kiwi_command = [
"kiwi-ng", "--color-output",
*self.build_args,
]
if self.ctx.debug:
kiwi_command.append("--debug")
kiwi_system_command = [
"system", "build",
"--description='/builddir/rocky-kiwi-descriptions'",
"--target-dir", f"/builddir/{self.ctx.outdir}"
]
build_command = [
"--shell", "--enable-network", "--", *kiwi_command, *kiwi_system_command
]
ret, out, err = self.run_mock_command(build_command)
if ret > 0:
raise Exception(f"Kiwi build failed: code {ret}")
sys.exit(ret)
@ensure_kiwi_conf
def stage(self):
ret, out, err = self.run_mock_command(["--copyout", f"/builddir/{self.ctx.outdir}", self.ctx.outdir])
if ret > 0:
raise Exception("failed to copy build result out")
kiwi_result_path = pathlib.Path(f"{self.ctx.outdir}/kiwi.result.json")
if not os.path.exists(kiwi_result_path):
raise Exception("Missing kiwi.result.json. Aborting")
with open(kiwi_result_path, "r") as kiwi_result:
self.image_result = ImagesData.from_json(kiwi_result.read()).images
source = self.image_result[self.kiwi_conf.outputKey].filename
filetype = self.kiwi_conf.fileType
source = utils.remove_first_directory(source)
dest = f"{self.ctx.outdir}/{self.ctx.outname}.{filetype}"
# NOTE(neil): only because we are preparing the 'final' image in clean step...
if self.ctx.image_type == 'Container':
dest = f"{self.ctx.outdir}/{self.ctx.outname}.oci"
try:
shutil.move(source, dest)
except Exception as e:
raise e
# TODO(neil): refactor
if self.ctx.image_type == 'Azure':
try:
utils.resize_and_convert_raw_image_to_vhd(dest, self.ctx.outdir)
# Remove old raw image
pathlib.Path(f"{self.ctx.outdir}/{self.ctx.outname}.raw").unlink()
except Exception as e:
raise e
def clean(self):
# TODO(neil): refactor
if self.ctx.image_type == 'Container':
# need to do this before we remove it, otherwise we have to extract from the OCI tarball
root = f"/builddir{self.ctx.outdir}"
builddir = f"{root}/build/image-root"
ret, out, err = self.run_mock_command(["--shell", "--", "tar", "-C", builddir, "-cJf", f"{root}/{self.ctx.outname}.tar.xz", "."])
if ret > 0:
raise Exception(err)
ret, out, err = self.run_mock_command(["--shell", "rm", "-fr", f"/builddir/{self.ctx.outdir}/build/"])
return ret
def run_mock_command(self, mock_command: List[str]):
mock_args = ["--configdir", "/tmp/mock-rocky-configs/etc/mock", "-r", f"rl-9-{self.ctx.architecture.name}-core-infra"]
if self.ctx.image_type != 'Container':
mock_args.append("--isolation=simple")
command = [
"mock",
*mock_args,
*mock_command,
]
ret, out, err, _ = self.ctx.prepare_and_run(command)
return ret, out, err
def setup_mock(self):
# TODO(neil): add error checking
ret, out, err = self.run_mock_command(["--init"])
packages = [
"kiwi-boxed-plugin",
"kiwi-cli",
"git",
"dracut-kiwi-live",
"fuse-overlayfs",
"kiwi-systemdeps-bootloaders",
"kiwi-systemdeps-containers",
"kiwi-systemdeps-core",
"kiwi-systemdeps-disk-images",
"kiwi-systemdeps-filesystems",
"kiwi-systemdeps-image-validation",
"kiwi-systemdeps-iso-media",
"epel-release",
"rocky-release-core"
]
ret, out, err = self.run_mock_command(["--install", *packages])
ret, out, err = self.run_mock_command(["--copyin", "/tmp/rocky-kiwi-descriptions", "/builddir/"])
return ret
def checkout_repos(self):
"""
Checkout sig_core/mock-rocky-configs and sig_core/rocky-kiwi-descriptions to /tmp
"""
repos = {
"mock-rocky-configs": "main",
"rocky-kiwi-descriptions": "r9"
}
for repo, branch in repos.items():
repo_url = f"https://git.resf.org/sig_core/{repo}"
clone_dir = f"/tmp/{repo}"
if os.path.isdir(os.path.join(clone_dir, ".git")):
try:
# The directory exists and is a git repository, so attempt to pull the latest changes
git.Repo(clone_dir).remotes.origin.pull(branch)
self.ctx.log.info(f"pulled the latest changes for {branch} branch in {clone_dir}")
except Exception as e:
raise Exception(f"Failed to pull the repository: {str(e)}")
finally:
continue
try:
git.Repo.clone_from(repo_url, clone_dir, branch=branch)
print(f"Repository cloned into {clone_dir}")
except Exception as e:
print(f"Failed to clone repository: {str(e)}")
def setup_kiwi(self):
self.ctx.log.info("Generating kiwi.yml from template")
template = self.ctx.tmplenv.get_template('kiwi/kiwi.yml.j2')
output = tempfile.NamedTemporaryFile(delete=False).name
res = utils.render_template(output, template)
self.ctx.log.info("Copying generated kiwi.yml into build root")
ret, out, err = self.run_mock_command(["--copyin", res, "/etc/kiwi.yml"])
if ret > 0:
raise Exception("Failed to configure kiwi")
self.ctx.log.info("Finished setting up kiwi")

View File

@ -0,0 +1,24 @@
from attrs import define, field
from typing import Dict
import json
@define(auto_attribs=True, kw_only=True)
class ImageInfo:
compress: bool
filename: str
shasum: bool
use_for_bundle: bool
@define(auto_attribs=True, kw_only=True)
class ImagesData:
images: Dict[str, ImageInfo] = field(factory=dict)
@staticmethod
def from_json(data: str) -> 'ImagesData':
json_data = json.loads(data)
images = {key: ImageInfo(**value) for key, value in json_data.items()}
return ImagesData(images=images)

View File

@ -0,0 +1 @@
from .imagebuild import ImageBuild

View File

@ -0,0 +1,118 @@
"""Build an image with a given backend"""
import datetime
import logging
import os
import pathlib
from attrs import define, field
from empanadas.backends import BackendInterface, KiwiBackend
from empanadas.common import Architecture
from empanadas.common import _rootdir
from . import utils
from jinja2 import Environment, FileSystemLoader, Template
from typing import List, Optional, Tuple, Callable
@define(kw_only=True)
class ImageBuild: # pylint: disable=too-few-public-methods
"""Image builder using a given backend"""
tmplenv: Environment = field(init=False)
# Only things we know we're keeping in this class here
architecture: Architecture = field()
backend: BackendInterface = field()
build_time: datetime.datetime = field()
debug: bool = field(default=False)
log: logging.Logger = field()
release: int = field(default=0)
timeout: str = field(default='3600')
image_type: str = field() # the type of the image
type_variant: str = field(init=False)
variant: Optional[str] = field()
# Kubernetes job template
job_template: Optional[Template] = field(init=False) # the kube Job tpl
# Commands to stage artifacts
# Where the artifacts should go to
outdir: pathlib.Path = field(init=False)
outname: str = field(init=False)
def __attrs_post_init__(self):
self.backend.ctx = self
file_loader = FileSystemLoader(f"{_rootdir}/templates")
self.tmplenv = Environment(loader=file_loader)
self.job_template = self.tmplenv.get_template('kube/Job.tmpl')
self.type_variant = self.type_variant_name()
self.outdir, self.outname = self.output_name()
def output_name(self) -> Tuple[pathlib.Path, str]:
directory = f"Rocky-{self.architecture.major}-{self.type_variant}-{self.architecture.version}-{self.build_time.strftime('%Y%m%d')}.{self.release}"
name = f"{directory}.{self.architecture.name}"
outdir = pathlib.Path("/tmp/", directory)
return outdir, name
def type_variant_name(self):
return self.image_type if not self.variant else f"{self.image_type}-{self.variant}"
def prepare_and_run(self, command: utils.CMD_PARAM_T, search: Callable = None) -> utils.CMD_RESULT_T:
return utils.runCmd(self, self.prepare_command(command), search)
def prepare_command(self, command_list: utils.CMD_PARAM_T) -> List[str]:
"""
Commands may be a callable, which should be a lambda to be evaluated at
preparation time with available locals. This can be used to, among
other things, perform lazy evaluations of f-strings which have values
not available at assignment time. e.g., filling in a second command
with a value extracted from the previous step or command.
"""
r = []
for c in command_list:
if callable(c) and c.__name__ == '<lambda>':
r.append(c())
else:
r.append(str(c))
return r
def render_kubernetes_job(self):
# TODO(neil): should this be put in the builder class itself to return the right thing for us?
if self.backend == KiwiBackend:
self.log.error("Kube not implemented for Kiwi")
commands = [self.backend.build_command(), self.backend.package_command(), self.backend.copy_command()]
if not self.job_template:
return None
template = self.job_template.render(
architecture=self.architecture.name,
backoffLimit=4,
buildTime=self.build_time.strftime("%s"),
command=commands,
imageName="ghcr.io/rockylinux/sig-core-toolkit:latest",
jobname="buildimage",
namespace="empanadas",
major=self.architecture.major,
minor=self.architecture.minor,
restartPolicy="Never",
)
return template
def upload(self, skip=False) -> int:
if not skip:
self.log.info("Copying files to output directory")
copy_command = ["aws", "s3", "cp", "--recursive", f"{self.outdir}/",
f"s3://resf-empanadas/buildimage-{self.architecture.version}-{self.architecture.name}/{self.outname}/{self.build_time.strftime('%s')}/"
]
ret, out, err, _ = self.prepare_and_run(copy_command, search=False)
return ret
self.ctx.log.info(f"Build complete! Output available in {self.ctx.outdir}/")
return 0

View File

@ -0,0 +1,139 @@
import json
import os
import logging
import pathlib
import subprocess
import sys
from typing import Callable, List, Tuple, Union
CMD_PARAM_T = List[Union[str, Callable[..., str]]]
STR_NONE_T = Union[bytes, None]
BYTES_NONE_T = Union[bytes, None]
# Tuple of int, stdout, stderr, uuid
CMD_RESULT_T = Tuple[int, BYTES_NONE_T, BYTES_NONE_T, STR_NONE_T]
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
log.addHandler(handler)
def render_template(path, template, **kwargs) -> pathlib.Path:
with open(path, "wb") as f:
_template = template.render(**kwargs)
f.write(_template.encode())
f.flush()
output = pathlib.Path(path)
if not output.exists():
raise Exception("Failed to template")
return output
def runCmd(ctx, prepared_command: List[str], search: Callable = None) -> CMD_RESULT_T:
ctx.log.info(f"Running command: {' '.join(prepared_command)}")
kwargs = {
"stderr": subprocess.PIPE,
"stdout": subprocess.PIPE
}
if ctx.debug:
del kwargs["stderr"]
with subprocess.Popen(prepared_command, **kwargs) as p:
uuid = None
# @TODO implement this as a callback?
if search:
for _, line in enumerate(p.stdout): # type: ignore
ln = line.decode()
if ln.startswith("UUID: "):
uuid = ln.split(" ")[-1]
ctx.log.debug(f"found uuid: {uuid}")
out, err = p.communicate()
res = p.wait(), out, err, uuid
if res[0] > 0:
ctx.log.error(f"Problem while executing command: '{prepared_command}'")
if search and not res[3]:
ctx.log.error("UUID not found in stdout. Dumping stdout and stderr")
log_subprocess(ctx, res)
return res
def log_subprocess(ctx, result: CMD_RESULT_T):
def log_lines(title, lines):
ctx.log.info(f"====={title}=====")
ctx.log.info(lines.decode())
ctx.log.info(f"Command return code: {result[0]}")
stdout = result[1]
stderr = result[2]
if stdout:
log_lines("Command STDOUT", stdout)
if stderr:
log_lines("Command STDERR", stderr)
def remove_first_directory(path):
p = pathlib.Path(path)
# Check if the path is absolute
if p.is_absolute():
# For an absolute path, start the new path with the root
new_path = pathlib.Path(p.root, *p.parts[2:])
else:
# For a relative path, simply skip the first part
new_path = pathlib.Path(*p.parts[1:])
return new_path
def resize_and_convert_raw_image_to_vhd(raw_image_path, outdir=None):
log.info(f"Will resize and convert {raw_image_path}")
MB = 1024 * 1024 # For calculations - 1048576 bytes
if outdir is None:
outdir = os.getcwd()
# Ensure the output directory exists
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
# Getting the size of the raw image
result = subprocess.run(['qemu-img', 'info', '-f', 'raw', '--output', 'json', raw_image_path], capture_output=True, text=True)
if result.returncode != 0:
log.error("Error getting image info")
raise Exception(result)
image_info = json.loads(result.stdout)
size = int(image_info['virtual-size'])
# Calculate the new size rounded to the nearest MB
rounded_size = ((size + MB - 1) // MB) * MB
# Prepare output filename (.raw replaced by .vhd)
outfilename = pathlib.Path(raw_image_path).name.replace("raw", "vhd")
outfile = os.path.join(outdir, outfilename)
# Resize the image
log.info(f"Resizing {raw_image_path} to nearest MB boundary")
result = subprocess.run(['qemu-img', 'resize', '-f', 'raw', raw_image_path, str(rounded_size)])
if result.returncode != 0:
log.error("Error resizing image")
raise Exception(result)
# Convert the image
log.info(f"Converting {raw_image_path} to vhd")
result = subprocess.run(['qemu-img', 'convert', '-f', 'raw', '-o', 'subformat=fixed,force_size', '-O', 'vpc', raw_image_path, outfile])
if result.returncode != 0:
log.error("Error converting image to VHD format")
raise Exception(result)
log.info(f"Image converted and saved to {outfile}")

View File

@ -1,21 +1,22 @@
# All imports are here # All imports are here
import glob import glob
import hashlib
import logging
import os
import platform import platform
import time import time
from collections import defaultdict from collections import defaultdict
from typing import Tuple from attrs import define, field
import rpm import rpm
import yaml import yaml
# An implementation from the Fabric python library # An implementation from the Fabric python library
class AttributeDict(defaultdict): class AttributeDict(dict):
def __init__(self): def __init__(self, *args, **kwargs):
super(AttributeDict, self).__init__(AttributeDict) super(AttributeDict, self).__init__(*args, **kwargs)
for key, value in self.items():
if isinstance(value, dict):
self[key] = AttributeDict(value)
def __getattr__(self, key): def __getattr__(self, key):
try: try:
@ -26,6 +27,11 @@ class AttributeDict(defaultdict):
def __setattr__(self, key, value): def __setattr__(self, key, value):
self[key] = value self[key] = value
def __setitem__(self, key, value):
if isinstance(value, dict):
value = AttributeDict(value)
super(AttributeDict, self).__setitem__(key, value)
# These are a bunch of colors we may use in terminal output # These are a bunch of colors we may use in terminal output
class Color: class Color:
@ -59,6 +65,7 @@ config = {
"category_stub": "mirror/pub/rocky", "category_stub": "mirror/pub/rocky",
"sig_category_stub": "mirror/pub/sig", "sig_category_stub": "mirror/pub/sig",
"repo_base_url": "https://yumrepofs.build.resf.org/v1/projects", "repo_base_url": "https://yumrepofs.build.resf.org/v1/projects",
"staging_base_url": "https://dl.rockylinux.org/stg/rocky",
"mock_work_root": "/builddir", "mock_work_root": "/builddir",
"container": "centos:stream9", "container": "centos:stream9",
"distname": "Rocky Linux", "distname": "Rocky Linux",
@ -107,7 +114,7 @@ for conf in glob.iglob(f"{_rootdir}/sig/*.yaml"):
ALLOWED_TYPE_VARIANTS = { ALLOWED_TYPE_VARIANTS = {
"Azure": ["Base", "LVM"], "Azure": ["Base", "LVM"],
"Container": ["Base", "Minimal", "UBI", "WSL"], "Container": ["Base", "Minimal", "UBI", "WSL", "Toolbox"],
"EC2": ["Base", "LVM"], "EC2": ["Base", "LVM"],
"GenericCloud": ["Base", "LVM"], "GenericCloud": ["Base", "LVM"],
"Vagrant": ["Libvirt", "Vbox", "VMware"], "Vagrant": ["Libvirt", "Vbox", "VMware"],
@ -120,7 +127,7 @@ ALLOWED_TYPE_VARIANTS = {
def valid_type_variant(_type: str, variant: str = "") -> bool: def valid_type_variant(_type: str, variant: str = "") -> bool:
if _type not in ALLOWED_TYPE_VARIANTS: if _type not in ALLOWED_TYPE_VARIANTS:
raise Exception(f"Type is invalid: ({_type}, {variant})") raise Exception(f"Type is invalid: ({_type}, {variant})")
if ALLOWED_TYPE_VARIANTS[_type] == None: if ALLOWED_TYPE_VARIANTS[_type] is None:
if variant is not None: if variant is not None:
raise Exception(f"{_type} Type expects no variant type.") raise Exception(f"{_type} Type expects no variant type.")
return True return True
@ -135,9 +142,6 @@ def valid_type_variant(_type: str, variant: str = "") -> bool:
return True return True
from attrs import define, field
@define(kw_only=True) @define(kw_only=True)
class Architecture: class Architecture:
name: str = field() name: str = field()

View File

@ -31,7 +31,6 @@
- 'AppStream' - 'AppStream'
- 'CRB' - 'CRB'
- 'HighAvailability' - 'HighAvailability'
- 'ResilientStorage'
- 'RT' - 'RT'
- 'NFV' - 'NFV'
- 'SAP' - 'SAP'
@ -190,9 +189,6 @@
HighAvailability: HighAvailability:
- BaseOS - BaseOS
- AppStream - AppStream
ResilientStorage:
- BaseOS
- AppStream
RT: RT:
- BaseOS - BaseOS
- AppStream - AppStream

View File

@ -31,7 +31,6 @@
- 'AppStream' - 'AppStream'
- 'CRB' - 'CRB'
- 'HighAvailability' - 'HighAvailability'
- 'ResilientStorage'
- 'RT' - 'RT'
- 'NFV' - 'NFV'
- 'SAP' - 'SAP'
@ -190,9 +189,6 @@
HighAvailability: HighAvailability:
- BaseOS - BaseOS
- AppStream - AppStream
ResilientStorage:
- BaseOS
- AppStream
RT: RT:
- BaseOS - BaseOS
- AppStream - AppStream

View File

@ -18,7 +18,7 @@
- x86_64 - x86_64
- aarch64 - aarch64
provide_multilib: False provide_multilib: False
project_id: 'e9cfc87c-d2d2-42d5-a121-852101f1a966' project_id: 'df5bcbfc-ba83-4da8-84d6-ae0168921b4d'
repo_symlinks: repo_symlinks:
devel: 'Devel' devel: 'Devel'
NFV: 'nfv' NFV: 'nfv'

View File

@ -1,10 +1,10 @@
--- ---
'9-beta': '9-beta':
fullname: 'Rocky Linux 9.4' fullname: 'Rocky Linux 9.6'
revision: '9.4' revision: '9.6'
rclvl: 'BETA1' rclvl: 'BETA1'
major: '9' major: '9'
minor: '4' minor: '6'
profile: '9-beta' profile: '9-beta'
disttag: 'el9' disttag: 'el9'
code: "Blue Onyx" code: "Blue Onyx"
@ -20,7 +20,7 @@
- ppc64le - ppc64le
- s390x - s390x
provide_multilib: True provide_multilib: True
project_id: 'df5bcbfc-ba83-4da8-84d6-ae0168921b4d' project_id: 'ae163d6a-f050-484f-bbaa-100ca673f146'
repo_symlinks: repo_symlinks:
NFV: 'nfv' NFV: 'nfv'
renames: renames:

View File

@ -1,10 +1,10 @@
--- ---
'9': '9':
fullname: 'Rocky Linux 9.4' fullname: 'Rocky Linux 9.5'
revision: '9.4' revision: '9.5'
rclvl: 'RC1' rclvl: 'RC1'
major: '9' major: '9'
minor: '4' minor: '5'
profile: '9' profile: '9'
disttag: 'el9' disttag: 'el9'
code: "Blue Onyx" code: "Blue Onyx"
@ -20,7 +20,7 @@
- ppc64le - ppc64le
- s390x - s390x
provide_multilib: True provide_multilib: True
project_id: 'df5bcbfc-ba83-4da8-84d6-ae0168921b4d' project_id: 'ae163d6a-f050-484f-bbaa-100ca673f146'
repo_symlinks: repo_symlinks:
NFV: 'nfv' NFV: 'nfv'
renames: renames:

View File

@ -1,10 +1,10 @@
--- ---
'9-lookahead': '9-lookahead':
fullname: 'Rocky Linux 9.5' fullname: 'Rocky Linux 9.6'
revision: '9.5' revision: '9.6'
rclvl: 'LH1' rclvl: 'LH1'
major: '9' major: '9'
minor: '5' minor: '6'
profile: '9-lookahead' profile: '9-lookahead'
disttag: 'el9' disttag: 'el9'
code: "Blue Onyx" code: "Blue Onyx"
@ -20,7 +20,7 @@
- ppc64le - ppc64le
- s390x - s390x
provide_multilib: True provide_multilib: True
project_id: '6794b5a8-290b-4d0d-ad5a-47164329cbb0' project_id: 'ae163d6a-f050-484f-bbaa-100ca673f146'
repo_symlinks: repo_symlinks:
NFV: 'nfv' NFV: 'nfv'
renames: renames:

View File

@ -1,44 +1,49 @@
# Builds an image given a version, type, variant, and architecture # Builds an image given a version, type, variant, anctx.d architecture
# Defaults to the running host's architecture # Defaults to the running host's architecture
import argparse import argparse
import datetime import datetime
import json
import logging import logging
import os
import pathlib
import platform import platform
import subprocess
import sys import sys
import tempfile
import time
from attrs import define, Factory, field, asdict
from botocore import args
from jinja2 import Environment, FileSystemLoader, Template
from typing import Callable, List, NoReturn, Optional, Tuple, IO, Union
from empanadas.common import Architecture, rldict, valid_type_variant from empanadas.common import Architecture, rldict, valid_type_variant
from empanadas.common import _rootdir from empanadas.builders import ImageBuild
from empanadas.backends import ImageFactoryBackend, KiwiBackend
parser = argparse.ArgumentParser(description="ISO Compose") parser = argparse.ArgumentParser(description="ISO Compose")
parser.add_argument('--version', type=str, help="Release Version (8.6, 9.1)", required=True) parser.add_argument('--version',
type=str, help="Release Version (8.6, 9.1)", required=True)
parser.add_argument('--rc', action='store_true', help="Release Candidate") parser.add_argument('--rc', action='store_true', help="Release Candidate")
parser.add_argument('--kickstartdir', action='store_true', help="Use the kickstart dir instead of the os dir for repositories") parser.add_argument('--kickstartdir', action='store_true',
help="Use the kickstart dir instead of the os dir")
parser.add_argument('--debug', action='store_true', help="debug?") parser.add_argument('--debug', action='store_true', help="debug?")
parser.add_argument('--type', type=str, help="Image type (container, genclo, azure, aws, vagrant)", required=True) parser.add_argument('--skip', type=str,
help="what stage(s) to skip",
required=False)
parser.add_argument('--type', type=str,
help="Image type (container, genclo, azure, aws, vagrant)",
required=True)
parser.add_argument('--variant', type=str, help="", required=False) parser.add_argument('--variant', type=str, help="", required=False)
parser.add_argument('--release', type=str, help="Image release for subsequent builds with the same date stamp (rarely needed)", required=False) parser.add_argument('--release', type=str,
parser.add_argument('--kube', action='store_true', help="output as a K8s job(s)", required=False) help="Image release for builds with the same date stamp",
parser.add_argument('--timeout', type=str, help="change timeout for imagefactory build process (default 3600)", required=False, default='3600') required=False)
parser.add_argument('--kube', action='store_true',
help="output as a K8s job(s)",
required=False)
parser.add_argument('--timeout', type=str,
help="change timeout for imagefactory build process",
required=False, default='3600')
parser.add_argument('--backend', type=str,
help="which backend to use (kiwi|imagefactory)",
required=False, default='kiwi')
results = parser.parse_args() results = parser.parse_args()
rlvars = rldict[results.version] rlvars = rldict[results.version]
major = rlvars["major"] major = rlvars["major"]
debug = results.debug debug = results.debug
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -52,405 +57,6 @@ formatter = logging.Formatter(
handler.setFormatter(formatter) handler.setFormatter(formatter)
log.addHandler(handler) log.addHandler(handler)
STORAGE_DIR = pathlib.Path("/var/lib/imagefactory/storage")
KICKSTART_PATH = pathlib.Path(os.environ.get("KICKSTART_PATH", "/kickstarts"))
BUILDTIME = datetime.datetime.utcnow()
CMD_PARAM_T = List[Union[str, Callable[..., str]]]
@define(kw_only=True)
class ImageBuild:
architecture: Architecture = field()
base_uuid: Optional[str] = field(default="")
cli_args: argparse.Namespace = field()
command_args: List[str] = field(factory=list)
common_args: List[str] = field(factory=list)
debug: bool = field(default=False)
image_type: str = field()
job_template: Optional[Template] = field(init=False)
kickstart_arg: List[str] = field(factory=list)
kickstart_path: pathlib.Path = field(init=False)
metadata: pathlib.Path = field(init=False)
out_type: str = field(init=False)
outdir: pathlib.Path = field(init=False)
outname: str = field(init=False)
package_args: List[str] = field(factory=list)
release: int = field(default=0)
stage_commands: Optional[List[List[Union[str,Callable]]]] = field(init=False)
target_uuid: Optional[str] = field(default="")
tdl_path: pathlib.Path = field(init=False)
template: Template = field()
timeout: str = field(default='3600')
type_variant: str = field(init=False)
variant: Optional[str] = field()
def __attrs_post_init__(self):
self.tdl_path = self.render_icicle_template()
if not self.tdl_path:
exit(2)
self.type_variant = self.type_variant_name()
self.outdir, self.outname = self.output_name()
self.out_type = self.image_format()
self.command_args = self._command_args()
self.package_args = self._package_args()
self.common_args = self._common_args()
self.metadata = pathlib.Path(self.outdir, ".imagefactory-metadata.json")
self.kickstart_path = pathlib.Path(f"{KICKSTART_PATH}/Rocky-{self.architecture.major}-{self.type_variant}.ks")
self.checkout_kickstarts()
self.kickstart_arg = self.kickstart_imagefactory_args()
try:
os.mkdir(self.outdir)
except FileExistsError as e:
log.info("Directory already exists for this release. If possible, previously executed steps may be skipped")
except Exception as e:
log.exception("Some other exception occured while creating the output directory", e)
return 0
if os.path.exists(self.metadata):
with open(self.metadata, "r") as f:
try:
o = json.load(f)
self.base_uuid = o['base_uuid']
self.target_uuid = o['target_uuid']
except json.decoder.JSONDecodeError as e:
log.exception("Couldn't decode metadata file", e)
finally:
f.flush()
# Yes, this is gross. I'll fix it later.
if self.image_type in ["Container"]:
self.stage_commands = [
["tar", "-C", f"{self.outdir}", "--strip-components=1", "-x", "-f", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", "*/layer.tar"],
["xz", f"{self.outdir}/layer.tar"]
]
if self.image_type in ["RPI"]:
self.stage_commands = [
["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.raw"],
["xz", f"{self.outdir}/{self.outname}.raw"]
]
if self.image_type in ["GenericCloud", "OCP", "GenericArm"]:
self.stage_commands = [
["qemu-img", "convert", "-c", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.qcow2"]
]
if self.image_type in ["EC2"]:
self.stage_commands = [
["qemu-img", "convert", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.qcow2"]
]
if self.image_type in ["Azure"]:
self.stage_commands = [
["/prep-azure.sh", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{STORAGE_DIR}"],
["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.vhd", f"{self.outdir}/{self.outname}.vhd"]
]
if self.image_type in ["Vagrant"]:
_map = {
"Vbox": {"format": "vmdk", "provider": "virtualbox"},
"Libvirt": {"format": "qcow2", "provider": "libvirt", "virtual_size": 10},
"VMware": {"format": "vmdk", "provider": "vmware_desktop"}
}
output = f"{_map[self.variant]['format']}" #type: ignore
provider = f"{_map[self.variant]['provider']}" # type: ignore
# pop from the options map that will be passed to the vagrant metadata.json
convert_options = _map[self.variant].pop('convertOptions') if 'convertOptions' in _map[self.variant].keys() else '' #type: ignore
self.stage_commands = [
["qemu-img", "convert", "-c", "-f", "raw", "-O", output, *convert_options, lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.{output}"],
["tar", "-C", self.outdir, "-czf", f"/tmp/{self.outname}.box", '.'],
["mv", f"/tmp/{self.outname}.box", self.outdir]
]
self.prepare_vagrant(_map[self.variant])
if self.stage_commands:
self.stage_commands.append(["cp", "-v", lambda: f"{STORAGE_DIR}/{self.target_uuid}.meta", f"{self.outdir}/build.meta"])
def prepare_vagrant(self, options):
"""Setup the output directory for the Vagrant type variant, dropping templates as required"""
file_loader = FileSystemLoader(f"{_rootdir}/templates")
tmplenv = Environment(loader=file_loader)
templates = {}
templates['Vagrantfile'] = tmplenv.get_template(f"vagrant/Vagrantfile.{self.variant}")
templates['metadata.json'] = tmplenv.get_template('vagrant/metadata.tmpl.json')
templates['info.json'] = tmplenv.get_template('vagrant/info.tmpl.json')
if self.variant == "VMware":
templates[f"{self.outname}.vmx"] = tmplenv.get_template('vagrant/vmx.tmpl')
if self.variant == "Vbox":
templates['box.ovf'] = tmplenv.get_template('vagrant/box.tmpl.ovf')
if self.variant == "Libvirt":
# Libvirt vagrant driver expects the qcow2 file to be called box.img.
qemu_command_index = [i for i, d in enumerate(self.stage_commands) if d[0] == "qemu-img"][0]
self.stage_commands.insert(qemu_command_index+1, ["mv", f"{self.outdir}/{self.outname}.qcow2", f"{self.outdir}/box.img"])
for name, template in templates.items():
self.render_template(f"{self.outdir}/{name}", template,
name=self.outname,
arch=self.architecture.name,
options=options
)
def checkout_kickstarts(self) -> int:
cmd = ["git", "clone", "--branch", f"r{self.architecture.major}", rlvars['livemap']['git_repo'], f"{KICKSTART_PATH}"]
ret, out, err, _ = self.runCmd(cmd, search=False)
log.debug(out)
log.debug(err)
if ret > 0:
ret = self.pull_kickstarts()
return ret
def pull_kickstarts(self) -> int:
cmd: CMD_PARAM_T = ["git", "-C", f"{KICKSTART_PATH}", "reset", "--hard", "HEAD"]
ret, out, err, _ = self.runCmd(cmd, search=False)
log.debug(out)
log.debug(err)
if ret == 0:
cmd = ["git", "-C", f"{KICKSTART_PATH}", "pull"]
ret, out, err, _ = self.runCmd(cmd, search=False)
log.debug(out)
log.debug(err)
return ret
def output_name(self) -> Tuple[pathlib.Path, str]:
directory = f"Rocky-{self.architecture.major}-{self.type_variant}-{self.architecture.version}-{BUILDTIME.strftime('%Y%m%d')}.{self.release}"
name = f"{directory}.{self.architecture.name}"
outdir = pathlib.Path(f"/tmp/", directory)
return outdir, name
def type_variant_name(self):
return self.image_type if not self.variant else f"{self.image_type}-{self.variant}"
def _command_args(self):
args_mapping = {
"debug": "--debug",
}
return [param for name, param in args_mapping.items() if getattr(self.cli_args, name)]
def _package_args(self) -> List[str]:
if self.image_type in ["Container"]:
return ["--parameter", "compress", "xz"]
return [""]
def _common_args(self) -> List[str]:
args = []
if self.image_type in ["Container"]:
args = ["--parameter", "offline_icicle", "true"]
if self.image_type in ["GenericCloud", "EC2", "Vagrant", "Azure", "OCP", "RPI", "GenericArm"]:
args = ["--parameter", "generate_icicle", "false"]
return args
def image_format(self) -> str:
mapping = {
"Container": "docker"
}
return mapping[self.image_type] if self.image_type in mapping.keys() else ''
def kickstart_imagefactory_args(self) -> List[str]:
if not self.kickstart_path.is_file():
log.warning(f"Kickstart file is not available: {self.kickstart_path}")
if not debug:
log.warning("Exiting because debug mode is not enabled.")
exit(2)
return ["--file-parameter", "install_script", str(self.kickstart_path)]
def render_template(self, path, template, **kwargs) -> pathlib.Path:
with open(path, "wb") as f:
_template = template.render(**kwargs)
f.write(_template.encode())
f.flush()
output = pathlib.Path(path)
if not output.exists():
log.error("Failed to write template")
raise Exception("Failed to template")
return output
def render_icicle_template(self) -> pathlib.Path:
output = tempfile.NamedTemporaryFile(delete=False).name
return self.render_template(output, self.template,
architecture=self.architecture.name,
iso8601date=BUILDTIME.strftime("%Y%m%d"),
installdir="kickstart" if self.cli_args.kickstartdir else "os",
major=self.architecture.major,
minor=self.architecture.minor,
release=self.release,
size="10G",
type=self.image_type,
utcnow=BUILDTIME,
version_variant=self.architecture.version if not self.variant else f"{self.architecture.version}-{self.variant}",
)
def build_command(self) -> List[str]:
build_command = ["imagefactory", "--timeout", self.timeout, *self.command_args, "base_image", *self.common_args, *self.kickstart_arg, self.tdl_path]
return build_command
def package_command(self) -> List[str]:
package_command = ["imagefactory", *self.command_args, "target_image", self.out_type, *self.common_args,
"--id", f"{self.base_uuid}",
*self.package_args,
"--parameter", "repository", self.outname,
]
return package_command
def copy_command(self) -> List[str]:
copy_command = ["aws", "s3", "cp", "--recursive", f"{self.outdir}/",
f"s3://resf-empanadas/buildimage-{self.architecture.version}-{self.architecture.name}/{ self.outname }/{ BUILDTIME.strftime('%s') }/"
]
return copy_command
def build(self) -> int:
if self.base_uuid:
return 0
self.fix_ks()
ret, out, err, uuid = self.runCmd(self.build_command())
if uuid:
self.base_uuid = uuid.rstrip()
self.save()
return ret
def package(self) -> int:
# Some build types don't need to be packaged by imagefactory
# @TODO remove business logic if possible
if self.image_type in ["GenericCloud", "EC2", "Azure", "Vagrant", "OCP", "RPI", "GenericArm"]:
self.target_uuid = self.base_uuid if hasattr(self, 'base_uuid') else ""
if self.target_uuid:
return 0
ret, out, err, uuid = self.runCmd(self.package_command())
if uuid:
self.target_uuid = uuid.rstrip()
self.save()
return ret
def stage(self) -> int:
""" Stage the artifacst from wherever they are (unpacking and converting if needed)"""
if not hasattr(self,'stage_commands'):
return 0
returns = []
for command in self.stage_commands: #type: ignore
ret, out, err, _ = self.runCmd(command, search=False)
returns.append(ret)
return all(ret > 0 for ret in returns)
def copy(self, skip=False) -> int:
# move or unpack if necessary
log.info("Executing staging commands")
if (stage := self.stage() > 0):
raise Exception(stage)
if not skip:
log.info("Copying files to output directory")
ret, out, err, _ = self.runCmd(self.copy_command(), search=False)
return ret
log.info(f"Build complete! Output available in {self.outdir}/")
return 0
def runCmd(self, command: CMD_PARAM_T, search: bool = True) -> Tuple[int, Union[bytes,None], Union[bytes,None], Union[str,None]]:
prepared, _ = self.prepare_command(command)
log.info(f"Running command: {' '.join(prepared)}")
kwargs = {
"stderr": subprocess.PIPE,
"stdout": subprocess.PIPE
}
if debug: del kwargs["stderr"]
with subprocess.Popen(prepared, **kwargs) as p:
uuid = None
# @TODO implement this as a callback?
if search:
for _, line in enumerate(p.stdout): # type: ignore
ln = line.decode()
if ln.startswith("UUID: "):
uuid = ln.split(" ")[-1]
log.debug(f"found uuid: {uuid}")
out, err = p.communicate()
res = p.wait(), out, err, uuid
if res[0] > 0:
log.error(f"Problem while executing command: '{prepared}'")
if search and not res[3]:
log.error("UUID not found in stdout. Dumping stdout and stderr")
self.log_subprocess(res)
return res
def prepare_command(self, command_list: CMD_PARAM_T) -> Tuple[List[str],List[None]]:
"""
Commands may be a callable, which should be a lambda to be evaluated at
preparation time with available locals. This can be used to, among
other things, perform lazy evaluations of f-strings which have values
not available at assignment time. e.g., filling in a second command
with a value extracted from the previous step or command.
"""
r = []
return r, [r.append(c()) if (callable(c) and c.__name__ == '<lambda>') else r.append(str(c)) for c in command_list]
def log_subprocess(self, result: Tuple[int, Union[bytes, None], Union[bytes, None], Union[str, None]]):
def log_lines(title, lines):
log.info(f"====={title}=====")
log.info(lines.decode())
log.info(f"Command return code: {result[0]}")
stdout = result[1]
stderr = result[2]
if stdout:
log_lines("Command STDOUT", stdout)
if stderr:
log_lines("Command STDERR", stderr)
def fix_ks(self):
cmd: CMD_PARAM_T = ["sed", "-i", f"s,$basearch,{self.architecture.name},", str(self.kickstart_path)]
self.runCmd(cmd, search=False)
def render_kubernetes_job(self):
commands = [self.build_command(), self.package_command(), self.copy_command()]
if not self.job_template:
return None
template = self.job_template.render(
architecture=self.architecture.name,
backoffLimit=4,
buildTime=BUILDTIME.strftime("%s"),
command=commands,
imageName="ghcr.io/rockylinux/sig-core-toolkit:latest",
jobname="buildimage",
namespace="empanadas",
major=major,
restartPolicy="Never",
)
return template
def save(self):
with open(self.metadata, "w") as f:
try:
o = { name: getattr(self, name) for name in ["base_uuid", "target_uuid"] }
log.debug(o)
json.dump(o, f)
except AttributeError as e:
log.error("Couldn't find attribute in object. Something is probably wrong", e)
except Exception as e:
log.exception(e)
finally:
f.flush()
def run(): def run():
try: try:
@ -459,28 +65,51 @@ def run():
log.exception(e) log.exception(e)
exit(2) exit(2)
file_loader = FileSystemLoader(f"{_rootdir}/templates")
tmplenv = Environment(loader=file_loader)
tdl_template = tmplenv.get_template('icicle/tdl.xml.tmpl')
arches = rlvars['allowed_arches'] if results.kube else [platform.uname().machine] arches = rlvars['allowed_arches'] if results.kube else [platform.uname().machine]
for architecture in arches: for architecture in arches:
if results.backend == "kiwi":
backend = KiwiBackend()
else:
backend = ImageFactoryBackend(
kickstart_dir="kickstart" if results.kickstartdir else "os",
kickstart_repo=rlvars['livemap']['git_repo']
)
IB = ImageBuild( IB = ImageBuild(
architecture=Architecture.from_version(architecture, rlvars['revision']), architecture=Architecture.from_version(architecture, rlvars['revision']),
cli_args=results,
debug=results.debug, debug=results.debug,
image_type=results.type, image_type=results.type,
release=results.release if results.release else 0, release=results.release if results.release else 0,
template=tdl_template,
variant=results.variant, variant=results.variant,
build_time=datetime.datetime.utcnow(),
backend=backend,
log=log,
) )
if results.kube:
IB.job_template = tmplenv.get_template('kube/Job.tmpl')
#commands = IB.kube_commands()
print(IB.render_kubernetes_job())
else:
ret = IB.build()
ret = IB.package()
ret = IB.copy()
if results.kube:
# commands = IB.kube_commands()
print(IB.render_kubernetes_job())
sys.exit(0)
skip_stages = results.skip.split(',') if results.skip else []
stages = ["prepare", "build", "clean", "stage"]
for i, stage in enumerate(stages):
skip_stage = stage in skip_stages
log.info(f"Stage {i} - {stage}{' SKIP' if skip_stage else ''}")
if skip_stage:
continue
method = getattr(IB.backend, stage)
if callable(method):
method()
else:
log.fatal(f"Unable to execute {stage}")
if 'upload' in skip_stages:
return
log.info("Final stage - Upload")
IB.upload(skip='upload' in skip_stages)

View File

@ -2,8 +2,7 @@
import argparse import argparse
from empanadas.common import * from empanadas.common import config, rldict
from empanadas.util import Checks
from empanadas.util import IsoBuild from empanadas.util import IsoBuild
parser = argparse.ArgumentParser(description="ISO Compose") parser = argparse.ArgumentParser(description="ISO Compose")
@ -29,5 +28,6 @@ a = IsoBuild(
logger=results.logger, logger=results.logger,
) )
def run(): def run():
a.run() a.run()

View File

@ -33,6 +33,7 @@ parser.add_argument('--logger', type=str)
parser.add_argument('--disable-gpg-check', action='store_false') parser.add_argument('--disable-gpg-check', action='store_false')
parser.add_argument('--disable-repo-gpg-check', action='store_false') parser.add_argument('--disable-repo-gpg-check', action='store_false')
parser.add_argument('--clean-old-packages', action='store_true') parser.add_argument('--clean-old-packages', action='store_true')
parser.add_argument('--use-staging', action='store_true')
# Parse them # Parse them
results = parser.parse_args() results = parser.parse_args()
@ -64,6 +65,7 @@ a = RepoSync(
gpg_check=results.disable_gpg_check, gpg_check=results.disable_gpg_check,
repo_gpg_check=results.disable_repo_gpg_check, repo_gpg_check=results.disable_repo_gpg_check,
reposync_clean_old=results.clean_old_packages, reposync_clean_old=results.clean_old_packages,
use_staging=results.use_staging,
) )
def run(): def run():

View File

@ -22,7 +22,7 @@ lorax --product="${PRODUCT}" \
--isfinal \ --isfinal \
{%- endif %} {%- endif %}
{%- for repo in repos %} {%- for repo in repos %}
--source={{ repo.url }} \ --source='{{ repo.url }}' \
{%- endfor %} {%- endfor %}
{%- if squashfs_only %} {%- if squashfs_only %}
--squashfs-only \ --squashfs-only \

View File

@ -0,0 +1,162 @@
# KIWI - Build configuration file
#
# Below all configuration parameters available to control
# KIWI's build process are listed as comments. The values
# used here provides the default values applied by KIWI if
# no other information is specified.
#
# To make any of the below effective, please uncomment the
# respective section(s) and adapt the parameters according
# to your needs
#
# Setup access to security keys
#credentials:
# # Specify private key(s) used for signing operations
# - verification_metadata_signing_key_file: /path/to/private.pem
# Setup access options for the Open BuildService
#obs:
# # Specify the URL of the Open BuildService download server
# - download_url: http://download.opensuse.org/repositories
# # Specify if the BuildService download server is public or private.
# # This information is used to verify if the request to populate
# # the repositories via the imageinclude attribute is possible
# - public: true
# Setup behaviour of the kiwi result bundle command
#bundle:
# # Specify if the bundle tarball should contain compressed results.
# # Note: Already compressed result information will not be touched.
# # Build results that generate an encrypted filesystem, i.e.
# # luks setup, will not be compressed. The intention for result compression
# # is to produce a smaller representation of the original. Encrypted data
# # generally grows when an attempt is made to compress the data. This is
# # due to the nature of compression algorithms. Therefore this setting is
# # ignored when encryption is enabled.
# - compress: false
# # Specify if the image build result and bundle should contain
# # a .changes file. The .changes file contains the package changelog
# # information from all packages installed into the image.
# - has_package_changes: false
# Setup behaviour of XZ compressor
#xz:
# # Specify options used in any xz compression call
# - options: '--threads=0'
# Setup process parameters for container image creation
#container:
# # Specify compression for container images
# # Possible values are true, false, xz or none.
# - compress: true
# Setup process parameters for ISO image creation
#iso:
# # Specify tool category which should be used to build iso images
# # Possible values are: xorriso
# - tool_category: xorriso
# Setup process parameters for OCI toolchain
#oci:
# # Specify OCI archive tool which should be used on creation of
# # container archives for OCI compliant images, e.g docker
# # Possible values are umoci and buildah
# - archive_tool: buildah
# Specify build constraints that applies during the image build
# process. If one or more constraints are violated the build exits
# with an appropriate error message.
#build_constraints:
# # Maximum result image size. The value can be specified in
# # bytes or it can be specified with m=MB or g=GB. The constraint
# # is checked prior to the result bundle creation
# - max_size: 700m
# Setup process parameters for partition mapping
mapper:
# # Specify tool to use for creating partition maps
# # Possible values are: kpartx and partx
- part_mapper: {{ "partx" if architecture in ["s390x"] else "kpartx" }}
# Setup process parameters to handle runtime checks
#runtime_checks:
# # Specify list of runtime checks to disable
# - disable:
# # verify that the host has the required container tools installed
# - check_container_tool_chain_installed
# # verify that there are repositories configured
# - check_repositories_configured
# # verify that the URL for imageinclude repos is accessable
# - check_image_include_repos_publicly_resolvable
# # verify secure boot setup disabled for overlay configured disk images
# - check_efi_mode_for_disk_overlay_correctly_setup
# # verify for legacy kiwi boot images that they exist on the host
# - check_boot_description_exists
# # verify if kiwi initrd_system was set if a boot attribute exists
# - check_initrd_selection_required
# # verify for legacy kiwi boot images that the same kernel is used
# - check_consistent_kernel_in_boot_and_system_image
# # check for reserved label names used in LVM setup
# - check_volume_setup_defines_reserved_labels
# # verify only one full size volume is specified for LVM images
# - check_volume_setup_defines_multiple_fullsize_volumes
# # verify no / volume setup is setup but the @root volume is used
# - check_volume_setup_has_no_root_definition
# # verify if volume label is really used with a volume setup
# - check_volume_label_used_with_lvm
# # verify that there is a xen domain setup for xen images
# - check_xen_uniquely_setup_as_server_or_guest
# # verify mediacheck is installed for ISO images that requests it
# - check_mediacheck_installed
# # verify dracut-kiwi-live is installed for ISO images
# - check_dracut_module_for_live_iso_in_package_list
# # verify dracut-kiwi-overlay is installed for overlay disk images
# - check_dracut_module_for_disk_overlay_in_package_list
# # verify dracut-kiwi-repart is installed for OEM disk images
# - check_dracut_module_for_disk_oem_in_package_list
# # verify dracut-kiwi-oem-dump is installed for OEM install images
# - check_dracut_module_for_oem_install_in_package_list
# # verify configured firmware is compatible with host architecture
# - check_architecture_supports_iso_firmware_setup
# # verify WSL naming conventions
# - check_appx_naming_conventions_valid
# # check kiwi dracut modules compatible with kiwi builder
# - check_dracut_module_versions_compatible_to_kiwi
# # check for unresolved include statements in the XML description
# - check_include_references_unresolvable
# # validate options passed to cryptsetup via luksformat element
# - check_luksformat_options_valid
# # check devicepersistency compatible with partition table type
# - check_partuuid_persistency_type_used_with_mbr
# # check efifatimagesize does not exceed the max El Torito load size
# - check_efi_fat_image_has_correct_size

View File

@ -62,7 +62,9 @@ class RepoSync:
fpsync: bool = False, fpsync: bool = False,
logger=None, logger=None,
log_level='INFO', log_level='INFO',
use_staging: bool = False,
): ):
self.nofail = nofail self.nofail = nofail
self.dryrun = dryrun self.dryrun = dryrun
self.fullrun = fullrun self.fullrun = fullrun
@ -80,11 +82,14 @@ class RepoSync:
# This makes it so every repo is synced at the same time. # This makes it so every repo is synced at the same time.
# This is EXTREMELY dangerous. # This is EXTREMELY dangerous.
self.just_pull_everything = just_pull_everything self.just_pull_everything = just_pull_everything
# Use staging url instead of pulling from peridot (or, for EL8)
self.use_staging = use_staging
# Relevant config items # Relevant config items
self.major_version = major self.major_version = major
self.date_stamp = config['date_stamp'] self.date_stamp = config['date_stamp']
self.timestamp = time.time() self.timestamp = time.time()
self.repo_base_url = config['repo_base_url'] self.repo_base_url = config['repo_base_url']
self.staging_base_url = config['staging_base_url']
self.compose_root = config['compose_root'] self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major self.compose_base = config['compose_root'] + "/" + major
self.profile = rlvars['profile'] self.profile = rlvars['profile']
@ -102,6 +107,7 @@ class RepoSync:
self.project_id = rlvars['project_id'] self.project_id = rlvars['project_id']
self.repo_renames = rlvars['renames'] self.repo_renames = rlvars['renames']
self.repos = rlvars['all_repos'] self.repos = rlvars['all_repos']
self.extra_repos = rlvars['extra_repos']
self.multilib = rlvars['provide_multilib'] self.multilib = rlvars['provide_multilib']
self.repo = repo self.repo = repo
self.extra_files = rlvars['extra_files'] self.extra_files = rlvars['extra_files']
@ -270,7 +276,9 @@ class RepoSync:
self.gpg_check, self.gpg_check,
self.repo_gpg_check, self.repo_gpg_check,
self.tmplenv, self.tmplenv,
self.log self.log,
staging_base_url=self.staging_base_url,
use_staging=self.use_staging,
) )
if self.dryrun: if self.dryrun:
@ -560,7 +568,7 @@ class RepoSync:
#print(entry_name_list) #print(entry_name_list)
for pod in entry_name_list: for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}:z" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( podman_cmd_entry = '{} run -d -it --security-opt label=disable -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd, cmd,
self.compose_root, self.compose_root,
self.compose_root, self.compose_root,
@ -714,7 +722,7 @@ class RepoSync:
self.log.info('Spawning pods for %s' % repo) self.log.info('Spawning pods for %s' % repo)
for pod in repoclosure_entry_name_list: for pod in repoclosure_entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}:z" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( podman_cmd_entry = '{} run -d -it --security-opt label=disable -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd, cmd,
self.compose_root, self.compose_root,
self.compose_root, self.compose_root,
@ -1446,7 +1454,8 @@ class RepoSync:
self.gpg_check, self.gpg_check,
self.repo_gpg_check, self.repo_gpg_check,
self.tmplenv, self.tmplenv,
self.log self.log,
staging_base_url=self.staging_base_url,
) )
@ -1509,7 +1518,7 @@ class RepoSync:
self.log.info('Spawning pods for %s' % repo) self.log.info('Spawning pods for %s' % repo)
for pod in repoclosure_entry_name_list: for pod in repoclosure_entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}:z" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( podman_cmd_entry = '{} run -d -it --security-opt label=disable -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd, cmd,
self.compose_root, self.compose_root,
self.compose_root, self.compose_root,
@ -2045,7 +2054,7 @@ class SigRepoSync:
#print(entry_name_list) #print(entry_name_list)
for pod in entry_name_list: for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}:z" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( podman_cmd_entry = '{} run -d -it --security-opt label=disable -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd, cmd,
self.compose_root, self.compose_root,
self.compose_root, self.compose_root,

View File

@ -110,6 +110,7 @@ class IsoBuild:
self.revision = rlvars['revision'] self.revision = rlvars['revision']
self.rclvl = rlvars['rclvl'] self.rclvl = rlvars['rclvl']
self.repos = rlvars['iso_map']['lorax']['repos'] self.repos = rlvars['iso_map']['lorax']['repos']
self.extra_repos = rlvars['extra_repos']
self.repo_base_url = config['repo_base_url'] self.repo_base_url = config['repo_base_url']
self.project_id = rlvars['project_id'] self.project_id = rlvars['project_id']
self.structure = rlvars['structure'] self.structure = rlvars['structure']
@ -202,7 +203,8 @@ class IsoBuild:
self.current_arch, self.current_arch,
self.compose_latest_sync, self.compose_latest_sync,
self.compose_dir_is_here, self.compose_dir_is_here,
self.hashed self.hashed,
self.extra_repos
) )
self.log.info(self.revision_level) self.log.info(self.revision_level)
@ -268,6 +270,8 @@ class IsoBuild:
dist=self.disttag, dist=self.disttag,
repos=self.repolist, repos=self.repolist,
user_agent='{{ user_agent }}', user_agent='{{ user_agent }}',
compose_dir_is_here=self.compose_dir_is_here,
compose_dir=self.compose_root,
) )
mock_sh_template_output = mock_sh_template.render( mock_sh_template_output = mock_sh_template.render(
@ -1022,7 +1026,7 @@ class IsoBuild:
checksum_list.append(latestname) checksum_list.append(latestname)
for pod in entry_name_list: for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( podman_cmd_entry = '{} run -d -it --security-opt label=disable -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd, cmd,
self.compose_root, self.compose_root,
self.compose_root, self.compose_root,

View File

@ -446,7 +446,9 @@ class Shared:
repo_gpg_check, repo_gpg_check,
templates, templates,
logger, logger,
dest_path='/var/tmp' dest_path='/var/tmp',
staging_base_url='https://dl.rockylinux.org/stg',
use_staging=False,
) -> str: ) -> str:
""" """
Generates the necessary repo conf file for the operation. This repo Generates the necessary repo conf file for the operation. This repo
@ -475,9 +477,23 @@ class Shared:
if not os.path.exists(dest_path): if not os.path.exists(dest_path):
os.makedirs(dest_path, exist_ok=True) os.makedirs(dest_path, exist_ok=True)
config_file = open(fname, "w+") config_file = open(fname, "w+")
repolist = [] repolist = []
for repo in repos: for repo in repos:
if use_staging:
constructed_url = '{}/{}/{}/$basearch/os'.format(
staging_base_url,
major_version,
repo,
)
constructed_url_src = '{}/{}/{}/source/tree'.format(
staging_base_url,
major_version,
repo,
)
else:
constructed_url = '{}/{}/repo/{}{}/$basearch'.format( constructed_url = '{}/{}/repo/{}{}/$basearch'.format(
repo_base_url, repo_base_url,
project_id, project_id,
@ -903,7 +919,9 @@ class Shared:
compose_latest_sync, compose_latest_sync,
compose_dir_is_here: bool = False, compose_dir_is_here: bool = False,
hashed: bool = False, hashed: bool = False,
extra_repos: list = None extra_repos: list = None,
staging_base_url: str = 'https://dl.rockylinux.org/stg',
use_staging: bool = False,
): ):
""" """
Builds the repo dictionary Builds the repo dictionary
@ -938,7 +956,9 @@ class Shared:
repolist.append(repodata) repolist.append(repodata)
if extra_repos: if extra_repos:
repolist.append(repo for repo in Shared.parse_extra_repos(extra_repos)) extras = Shared.parse_extra_repos(extra_repos)
for repo in extras:
repolist.append(repo)
return repolist return repolist
@ -947,6 +967,8 @@ class Shared:
# must be in format URL[,PRIORITY] # must be in format URL[,PRIORITY]
result = [] result = []
for idx, candidate in enumerate(extra_repos): for idx, candidate in enumerate(extra_repos):
if isinstance(candidate, dict):
url, priority = candidate['url'], candidate.get('priority', None)
url, priority = candidate.split(',') url, priority = candidate.split(',')
if not priority: if not priority:
priority = 100 priority = 100

View File

@ -39,6 +39,16 @@ generate-compose = "empanadas.scripts.generate_compose:run"
peridot-repoclosure = "empanadas.scripts.peridot_repoclosure:run" peridot-repoclosure = "empanadas.scripts.peridot_repoclosure:run"
refresh-all-treeinfo = "empanadas.scripts.refresh_all_treeinfo:run" refresh-all-treeinfo = "empanadas.scripts.refresh_all_treeinfo:run"
[tool.pylint.main]
init-hook ="""
try:
import pylint_venv
except ImportError:
pass
else:
pylint_venv.inithook()
"""
[build-system] [build-system]
requires = ["poetry-core>=1.0.0"] requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"

2
iso/empanadas/tox.ini Normal file
View File

@ -0,0 +1,2 @@
[pycodestyle]
max-line-length = 160

View File

@ -47,7 +47,6 @@ class common:
'CRB': ['aarch64', 'ppc64le', 's390x', 'x86_64'], 'CRB': ['aarch64', 'ppc64le', 's390x', 'x86_64'],
'HighAvailability': ['aarch64', 'ppc64le', 's390x', 'x86_64'], 'HighAvailability': ['aarch64', 'ppc64le', 's390x', 'x86_64'],
'NFV': ['x86_64'], 'NFV': ['x86_64'],
'ResilientStorage': ['ppc64le', 's390x', 'x86_64'],
'RT': ['x86_64'], 'RT': ['x86_64'],
'SAP': ['ppc64le', 's390x', 'x86_64'], 'SAP': ['ppc64le', 's390x', 'x86_64'],
'SAPHANA': ['ppc64le', 'x86_64'] 'SAPHANA': ['ppc64le', 'x86_64']

View File

@ -1,6 +1,6 @@
# To be sourced by scripts to use # To be sourced by scripts to use
REPO=("BaseOS" "AppStream" "CRB" "HighAvailability" "ResilientStorage" "NFV" "RT" "SAP" "SAPHANA") REPO=("BaseOS" "AppStream" "CRB" "HighAvailability" "NFV" "RT" "SAP" "SAPHANA")
ARCH=("aarch64" "ppc64le" "s390x" "x86_64") ARCH=("aarch64" "ppc64le" "s390x" "x86_64")
MAJOR="10" MAJOR="10"

View File

@ -9,6 +9,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER=$MAJOR export RLVER=$MAJOR
source common source common
@ -20,7 +26,7 @@ eln_repo_url="${ELN_KOJI_REPO}/${tag_template}/latest"
pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; } pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${tag_template},${eln_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${tag_template},${eln_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${tag_template}/${y}/repodata" mkdir -p "${tag_template}/${y}/repodata"
pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -9,6 +9,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER=$MAJOR export RLVER=$MAJOR
source common source common
@ -20,7 +26,7 @@ stream_repo_url="${STREAM_KOJI_REPO}/${tag_template}/latest"
pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; } pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${tag_template},${stream_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${tag_template},${stream_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${tag_template}/${y}/repodata" mkdir -p "${tag_template}/${y}/repodata"
pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -10,6 +10,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
# Verify the date format # Verify the date format
echo "${DATE}" | grep -Eq '[0-9]+\.[0-9]' echo "${DATE}" | grep -Eq '[0-9]+\.[0-9]'
grep_val=$? grep_val=$?

View File

@ -9,6 +9,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER=$MAJOR export RLVER=$MAJOR
source common source common

View File

@ -9,6 +9,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER=$MAJOR export RLVER=$MAJOR
source common source common
@ -21,7 +27,7 @@ stream_repo_url="https://kojidev.rockylinux.org/kojifiles/repos/${tag_template}/
pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; } pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for y in x86_64 aarch64 i386; do for y in x86_64 aarch64 i386; do
repodatas=( $(dnf reposync --repofrompath ${str_template},${stream_repo_url}/${y} --download-metadata --repoid=${str_template} -p ${str_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${str_template},${stream_repo_url}/${y} --download-metadata --repoid=${str_template} -p ${str_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${str_template}/${y}/repodata" mkdir -p "${str_template}/${y}/repodata"
pushd "${str_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${str_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -10,6 +10,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
# Verify the date format # Verify the date format
echo "${DATE}" | grep -Eq '[0-9]+\.[0-9]' echo "${DATE}" | grep -Eq '[0-9]+\.[0-9]'
grep_val=$? grep_val=$?
@ -31,7 +37,7 @@ pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for x in "${REPO[@]}"; do for x in "${REPO[@]}"; do
echo "Working on ${x}" echo "Working on ${x}"
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${x}/${y}/repodata" mkdir -p "${x}/${y}/repodata"
pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -10,6 +10,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
# Verify the date format # Verify the date format
echo "${DATE}" | grep -Eq '[0-9]+\.[0-9]' echo "${DATE}" | grep -Eq '[0-9]+\.[0-9]'
grep_val=$? grep_val=$?
@ -27,11 +33,17 @@ current=$(pwd)
tmpdir=$(mktemp -d) tmpdir=$(mktemp -d)
stream_compose_url="https://composes.stream.centos.org/stream-${MAJOR}/production/CentOS-Stream-${MAJOR}-${DATE}/compose" stream_compose_url="https://composes.stream.centos.org/stream-${MAJOR}/production/CentOS-Stream-${MAJOR}-${DATE}/compose"
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; } pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for x in "${REPO[@]}"; do for x in "${REPO[@]}"; do
echo "Working on ${x}" echo "Working on ${x}"
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${x}/${y}/repodata" mkdir -p "${x}/${y}/repodata"
pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -9,6 +9,12 @@ else
exit 1 exit 1
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER="${MAJOR}" export RLVER="${MAJOR}"
source common source common
@ -20,7 +26,7 @@ stream_repo_url="${STREAM_KOJI_REPO}/${tag_template}/latest"
pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; } pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${tag_template},${stream_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${tag_template},${stream_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${tag_template}/${y}/repodata" mkdir -p "${tag_template}/${y}/repodata"
pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -13,6 +13,12 @@ if [ -n "$2" ] && [[ "$2" == "lh" ]]; then
export LH="lh" export LH="lh"
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER="${MAJOR}" export RLVER="${MAJOR}"
source common source common
@ -24,7 +30,7 @@ peridot_repo_url="${PERIDOT_REPO}/${PERIDOT_PROJECT_ID}/repo/${tag_template}"
pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; } pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${tag_template},${peridot_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${tag_template},${peridot_repo_url}/${y} --download-metadata --repoid=${tag_template} -p ${tag_template}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${tag_template}/${y}/repodata" mkdir -p "${tag_template}/${y}/repodata"
pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${tag_template}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -18,6 +18,12 @@ if [ "$grep_val" -ne 0 ]; then
echo "Date format incorrect. You must use: YYYYMMDD.X" echo "Date format incorrect. You must use: YYYYMMDD.X"
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER="${MAJOR}" export RLVER="${MAJOR}"
source common source common
@ -30,7 +36,7 @@ pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for x in "${REPO[@]}"; do for x in "${REPO[@]}"; do
echo "Working on ${x}" echo "Working on ${x}"
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${x}/${y}/repodata" mkdir -p "${x}/${y}/repodata"
pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -18,6 +18,12 @@ if [ "$grep_val" -ne 0 ]; then
echo "Date format incorrect. You must use: YYYYMMDD.X" echo "Date format incorrect. You must use: YYYYMMDD.X"
fi fi
if [ -f /usr/bin/dnf4 ]; then
SAFEDNF=/usr/bin/dnf4
else
SAFEDNF=/usr/bin/dnf
fi
export RLVER="${MAJOR}" export RLVER="${MAJOR}"
source common source common
@ -31,7 +37,7 @@ pushd "${tmpdir}" || { echo "Could not change directory"; exit 1; }
for x in "${REPO[@]}"; do for x in "${REPO[@]}"; do
echo "Working on ${x}" echo "Working on ${x}"
for y in "${ARCH[@]}"; do for y in "${ARCH[@]}"; do
repodatas=( $(dnf reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) ) repodatas=( $($SAFEDNF reposync --repofrompath ${x},${stream_compose_url}/${x}/${y}/os --download-metadata --repoid=${x} -p ${x}/${y} --forcearch ${y} --norepopath --remote-time --assumeyes -u | grep repodata) )
mkdir -p "${x}/${y}/repodata" mkdir -p "${x}/${y}/repodata"
pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; } pushd "${x}/${y}/repodata" || { echo "Could not change directory"; exit 1; }
for z in "${repodatas[@]}"; do for z in "${repodatas[@]}"; do

View File

@ -12,6 +12,8 @@ IGNORES = [
'insights-client', 'insights-client',
'lorax-templates-rhel', 'lorax-templates-rhel',
'shim', 'shim',
'shim-unsigned-x64',
'shim-unsigned-aarch64',
'redhat-cloud-client-configuration', 'redhat-cloud-client-configuration',
'rhc', 'rhc',
'rhc-worker-playbook', 'rhc-worker-playbook',

View File

@ -20,6 +20,9 @@ REPOS = switcher.rlver(results.version,
# Source packages we do not ship or are rocky branded # Source packages we do not ship or are rocky branded
IGNORES = [ IGNORES = [
'insights-client', 'insights-client',
'shim',
'shim-unsigned-x64',
'shim-unsigned-aarch64',
'redhat-cloud-client-configuration', 'redhat-cloud-client-configuration',
'rhc', 'rhc',
'rhc-worker-playbook', 'rhc-worker-playbook',

View File

@ -304,7 +304,7 @@ class IPAAudit:
} }
print('User Information') print('User Information')
print('----------------------------------------') print('------------------------------------------')
for key, value in starter_user.items(): for key, value in starter_user.items():
if len(value) > 0: if len(value) > 0:
print(f'{key: <16}{value}') print(f'{key: <16}{value}')
@ -312,14 +312,54 @@ class IPAAudit:
if deep: if deep:
group_list = [] if not user_results.get('memberof_group', None) else user_results['memberof_group'] group_list = [] if not user_results.get('memberof_group', None) else user_results['memberof_group']
IPAAudit.user_deep_list(api, name, group_list) hbac_list = [] if not user_results.get('memberof_hbacrule', None) else user_results['memberof_hbacrule']
IPAAudit.user_deep_list(api, name, group_list, hbac_list)
@staticmethod @staticmethod
def group_pull(api, name, deep): def group_pull(api, name, deep):
""" """
Gets requested rbac info Gets requested rbac info
""" """
print() try:
group_results = IPAQuery.group_data(api, name)
except:
print(f'Could not find {name}', sys.stderr)
sys.exit(1)
group_name = '' if not group_results.get('cn', None) else group_results['cn'][0]
group_gidnum = '' if not group_results.get('gidnumber', None) else group_results['gidnumber'][0]
group_members_direct = [] if not group_results.get('member_user', None) else group_results['member_user']
group_members_indirect = [] if not group_results.get('memberindirect_user', None) else group_results['memberindirect_user']
group_members = list(group_members_direct) + list(group_members_indirect)
num_of_group_members = str(len(group_members))
group_hbacs_direct = [] if not group_results.get('memberof_hbacrule', None) else group_results['memberof_hbacrule']
group_hbacs_indirect = [] if not group_results.get('memberofindirect_hbacrule', None) else group_results['memberofindirect_hbacrule']
group_hbacs = list(group_hbacs_direct) + list(group_hbacs_indirect)
num_of_hbacs = str(len(group_hbacs))
group_sudo_direct = [] if not group_results.get('memberof_sudorule', None) else group_results['memberof_sudorule']
group_sudo_indirect = [] if not group_results.get('memberofindirect_sudorule', None) else group_results['memberofindirect_sudorule']
group_sudos = list(group_sudo_direct) + list(group_sudo_indirect)
num_of_sudos = str(len(group_sudos))
starter_group = {
'Group name': group_name,
'GID': group_gidnum,
'Number of Users': num_of_group_members,
'Number of HBAC Rules': num_of_hbacs,
'Number of SUDO Rules': num_of_sudos,
}
print('Group Information')
print('------------------------------------------')
for key, value in starter_group.items():
if len(value) > 0:
print(f'{key: <24}{value}')
print('')
if deep:
IPAAudit.group_deep_list(api, name, group_members, group_hbacs, group_sudos)
@staticmethod @staticmethod
def hbac_pull(api, name, deep): def hbac_pull(api, name, deep):
@ -463,14 +503,13 @@ class IPAAudit:
print(f'{key: <24}{value}') print(f'{key: <24}{value}')
@staticmethod @staticmethod
def user_deep_list(api, user, groups): def user_deep_list(api, user, groups, hbacs):
""" """
Does a recursive dig on a user Does a recursive dig on a user
""" """
hbac_rule_list = [] hbac_rule_list = list(hbacs)
hbac_rule_all_hosts = [] hbac_rule_all_hosts = []
host_list = [] host_list = []
hostgroup_list = []
for group in groups: for group in groups:
group_results = IPAQuery.group_data(api, group) group_results = IPAQuery.group_data(api, group)
hbac_list = [] if not group_results.get('memberof_hbacrule', None) else group_results['memberof_hbacrule'] hbac_list = [] if not group_results.get('memberof_hbacrule', None) else group_results['memberof_hbacrule']
@ -481,12 +520,13 @@ class IPAAudit:
# TODO: Add HBAC list (including services) # TODO: Add HBAC list (including services)
# TODO: Add RBAC list # TODO: Add RBAC list
hbac_hosts = [] hbac_host_dict = {}
for hbac in hbac_rule_list: for hbac in hbac_rule_list:
hbac_hosts = []
hbac_results = IPAQuery.hbac_data(api, hbac) hbac_results = IPAQuery.hbac_data(api, hbac)
hbac_host_list = [] if not hbac_results.get('memberhost_host', None) else hbac_results['memberhost_host'] hbac_host_list = [] if not hbac_results.get('memberhost_host', None) else hbac_results['memberhost_host']
hbac_hostgroup_list = [] if not hbac_results.get('memberhost_hostgroup', None) else hbac_results['memberhost_hostgroup'] hbac_hostgroup_list = [] if not hbac_results.get('memberhost_hostgroup', None) else hbac_results['memberhost_hostgroup']
if hbac_results.get('servicecategory'): if hbac_results.get('hostcategory'):
hbac_rule_all_hosts.append(hbac) hbac_rule_all_hosts.append(hbac)
for host in hbac_host_list: for host in hbac_host_list:
@ -497,19 +537,29 @@ class IPAAudit:
host_list = [] if not hostgroup_data.get('member_host', None) else hostgroup_data['member_host'] host_list = [] if not hostgroup_data.get('member_host', None) else hostgroup_data['member_host']
hbac_hosts.extend(host_list) hbac_hosts.extend(host_list)
new_hbac_hosts = sorted(set(hbac_hosts)) hbac_host_dict[hbac] = hbac_hosts
#new_hbac_hosts = sorted(set(hbac_hosts))
print('User Has Access To These Hosts') print('User Has Access To These Hosts')
print('------------------------------------------') print('------------------------------------------')
for hhost in new_hbac_hosts:
print(hhost)
if len(hbac_rule_all_hosts) > 0: if len(hbac_rule_all_hosts) > 0:
print('!! Notice: User has access to ALL hosts from the following rules:') print('!! Notice: User has access to ALL hosts from the following rules:')
hbac_rule_all_hosts = sorted(set(hbac_rule_all_hosts)) hbac_rule_all_hosts = sorted(set(hbac_rule_all_hosts))
for allrule in hbac_rule_all_hosts: for allrule in hbac_rule_all_hosts:
print(allrule) print(allrule)
else:
for hrule in hbac_host_dict:
print()
print(f'HBAC Rule: {hrule}')
print('==========================================')
for h in hbac_host_dict[hrule]:
print(h)
if len(hbac_host_dict[hrule]) == 0:
print('(No hosts set for this rule)')
@staticmethod @staticmethod
def group_deep_list(api, group): def group_deep_list(api, group, members, hbacs, sudos):
""" """
Does a recursive dig on a group Does a recursive dig on a group
""" """

View File

@ -3,19 +3,19 @@
# Revision must always start with a major number # Revision must always start with a major number
case "${RLREL}" in case "${RLREL}" in
stable) stable)
REVISION=9.4 REVISION=9.5
PREREV=9.3 PREREV=9.4
APPEND_TO_DIR="-RC1" APPEND_TO_DIR="-RC1"
;; ;;
beta) beta)
REVISION=9.5 REVISION=9.6
PREREV=9.4 PREREV=9.5
APPEND_TO_DIR="-beta" APPEND_TO_DIR="-beta"
COMPOSE_APPEND="-beta" COMPOSE_APPEND="-beta"
;; ;;
lh) lh)
REVISION=9.5 REVISION=9.6
PREREV=9.4 PREREV=9.5
APPEND_TO_DIR="-lookahead" APPEND_TO_DIR="-lookahead"
COMPOSE_APPEND="-lookahead" COMPOSE_APPEND="-lookahead"
;; ;;