diff --git a/.gitlab-ci.yml b/.disable.gitlab-ci.yml similarity index 100% rename from .gitlab-ci.yml rename to .disable.gitlab-ci.yml diff --git a/.github/workflows/imagefactory-image.yml b/.github/workflows/imagefactory-image.yml new file mode 100644 index 0000000..59ee1d8 --- /dev/null +++ b/.github/workflows/imagefactory-image.yml @@ -0,0 +1,47 @@ +--- +name: Build empanada images for imagefactory + +on: + push: + branches: [ $default-branch, "devel" ] + pull_request: + branches: [ $default-branch ] + workflow_dispatch: + +jobs: + buildx: + runs-on: + - ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + # https://github.com/docker/setup-buildx-action + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + install: true + + - name: Login to ghcr + if: github.event_name != 'pull_request' + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + id: docker_build + uses: docker/build-push-action@v2 + with: + builder: ${{ steps.buildx.outputs.name }} + platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le + context: ./iso/empanadas + file: ./iso/empanadas/Containerfile.imagefactory + push: ${{ github.event_name != 'pull_request' }} + tags: ghcr.io/rocky-linux/empanadas-imagefactory:latest + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/mix-empanadas.yml b/.github/workflows/mix-empanadas.yml index 2f408ca..9352219 100644 --- a/.github/workflows/mix-empanadas.yml +++ b/.github/workflows/mix-empanadas.yml @@ -1,9 +1,9 @@ --- -name: Build empanada container images +name: Build empanada container images for lorax on: push: - branches: [ $default-branch ] + branches: [ $default-branch, "devel" ] pull_request: branches: [ $default-branch ] workflow_dispatch: @@ -42,6 +42,6 @@ jobs: context: ./iso/empanadas file: ./iso/empanadas/Containerfile push: ${{ github.event_name != 'pull_request' }} - tags: ghcr.io/neilhanlon/sig-core-toolkit:latest + tags: ghcr.io/rocky-linux/sig-core-toolkit:latest cache-from: type=gha cache-to: type=gha,mode=max diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6ff6e1d --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.swp diff --git a/README.md b/README.md index 9495108..2b4f6cd 100644 --- a/README.md +++ b/README.md @@ -3,10 +3,9 @@ sig-core-toolkit Release Engineering toolkit for repeatable operations or functionality testing. -Currently mirrored at our [github](https://github.com/rocky-linux), -[Rocky Linux Git Service](https://git.rockylinux.org), and the -[RESF Git Service](https://git.resf.org). Changes either occur at the Rocky -Linux Git Service or RESF Git Service. +Currently mirrored at our [github](https://github.com/rocky-linux), and the +[RESF Git Service](https://git.resf.org). Changes will typically occur at the +RESF Git Service. What does this have? -------------------- @@ -14,10 +13,10 @@ What does this have? * analyze -> Analysis utilities (such as download stats) * chat -> mattermost related utilities * func -> (mostly defunct) testing scripts and tools to test base functionality -* iso -> ISO related utilities +* iso -> ISO, Compose, and Sync related utilities, primarily for Rocky Linux 9+ * live -> Live image related utilities * mangle -> Manglers and other misc stuff -* sync -> Sync tools, primarily for Rocky Linux 8 +* sync -> Sync tools, primarily for Rocky Linux 8 and will eventually be deprecated How can I help? --------------- @@ -28,7 +27,7 @@ when you make changes: * Have pre-commit installed * Have shellcheck installed * Shell Scripts: These must pass a shellcheck test! -* Python scripts: Try your best to follow PEP8 guidelines +* Python scripts: Try your best to follow PEP8 guidelines (even the best linters get things wrong) Your PR should be against the devel branch at all times. PR's against the main branch will be closed. diff --git a/func/ipa.sh b/func/ipa.sh new file mode 100644 index 0000000..19086c1 --- /dev/null +++ b/func/ipa.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# Release Engineering Core Functionality Testing +# Louis Abel @nazunalika + +################################################################################ +# Settings and variables + +# Exits on any non-zero exit status - Disabled for now. +#set -e +# Undefined variables will cause an exit +set -u + +COMMON_EXPORTS='./common/exports.sh' +COMMON_IMPORTS='./common/imports.sh' +SELINUX=$(getenforce) + +# End +################################################################################ + +# shellcheck source=/dev/null disable=SC2015 +[ -f $COMMON_EXPORTS ] && source $COMMON_EXPORTS || { echo -e "\n[-] $(date): Variables cannot be sourced."; exit 1; } +# shellcheck source=/dev/null disable=SC2015 +[ -f $COMMON_IMPORTS ] && source $COMMON_IMPORTS || { echo -e "\n[-] $(date): Functions cannot be sourced."; exit 1; } +# Init log +# shellcheck disable=SC2015 +[ -e "$LOGFILE" ] && m_recycleLog || touch "$LOGFILE" +# SELinux check +if [ "$SELINUX" != "Enforcing" ]; then + echo -e "\n[-] $(date): SELinux is not enforcing." + exit 1 +fi + +r_log "internal" "Starting Release Engineering Core Tests" + +################################################################################ +# Script Work + +# Skip tests in a list - some tests are already -x, so it won't be an issue +if [ -e skip.list ]; then + r_log "internal" "Disabling tests" + # shellcheck disable=SC2162 + grep -E "^${RL_VER}" skip.list | while read line; do + # shellcheck disable=SC2086 + testFile="$(echo $line | cut -d '|' -f 2)" + r_log "internal" "SKIP ${testFile}" + chmod -x "${testFile}" + done + r_log "internal" "WARNING: Tests above were disabled." +fi + +# TODO: should we let $1 judge what directory is ran? +# TODO: get some stacks and lib in there + +#r_processor <(/usr/bin/find ./core -type f | sort -t'/') +#r_processor <(/usr/bin/find ./lib -type f | sort -t'/') +r_processor <(/usr/bin/find ./stacks/ipa -type f | sort -t'/') + +r_log "internal" "Core Tests completed" +exit 0 diff --git a/func/stacks/ipa/00-ipa-pregame.sh b/func/stacks/ipa/00-ipa-pregame.sh old mode 100644 new mode 100755 diff --git a/func/stacks/ipa/10-install-ipa.sh b/func/stacks/ipa/10-install-ipa.sh old mode 100644 new mode 100755 index 0e5b029..901055e --- a/func/stacks/ipa/10-install-ipa.sh +++ b/func/stacks/ipa/10-install-ipa.sh @@ -11,4 +11,4 @@ if [ "$RL_VER" -eq 8 ]; then p_enableModule idm:DL1/{client,common,dns,server} fi -p_installPackageNormal ipa-server ipa-server-dns +p_installPackageNormal ipa-server ipa-server-dns expect diff --git a/func/stacks/ipa/11-configure-ipa.sh b/func/stacks/ipa/11-configure-ipa.sh old mode 100644 new mode 100755 diff --git a/func/stacks/ipa/12-verify-ipa.sh b/func/stacks/ipa/12-verify-ipa.sh old mode 100644 new mode 100755 diff --git a/func/stacks/ipa/20-ipa-user.sh b/func/stacks/ipa/20-ipa-user.sh old mode 100644 new mode 100755 index 9965a56..c0f5313 --- a/func/stacks/ipa/20-ipa-user.sh +++ b/func/stacks/ipa/20-ipa-user.sh @@ -13,42 +13,43 @@ kdestroy &> /dev/null klist 2>&1 | grep -E "(No credentials|Credentials cache .* not found)" &> /dev/null r_checkExitStatus $? -expect -f - < /dev/null r_checkExitStatus $? r_log "ipa" "Test adding a user" -userDetails="$(ipa user-add --first=test --last=user --random ipatestuser)" -echo "$userDetails" | grep -q 'Added user "ipatestuser"' -r_checkExitStatus $? +ipa user-add --first=test --last=user --random ipatestuser > /tmp/ipatestuser +grep -q 'Added user "ipatestuser"' /tmp/ipatestuser -echo "$userDetails" | grep -q 'First name: test' +ret_val=$? +if [ "$ret_val" -ne 0 ]; then + r_log "ipa" "User was not created, this is considered fatal" + r_checkExitStatus 1 + exit 1 +fi + +sed -i 's|^ ||g' /tmp/ipatestuser +grep -q 'First name: test' /tmp/ipatestuser r_checkExitStatus $? -echo "$userDetails" | grep -q 'Last name: user' +grep -q 'Last name: user' /tmp/ipatestuser r_checkExitStatus $? -echo "$userDetails" | grep -q 'Full name: test user' +grep -q 'Full name: test user' /tmp/ipatestuser r_checkExitStatus $? -echo "$userDetails" | grep -q 'Home directory: /home/ipatestuser' +grep -q 'Home directory: /home/ipatestuser' /tmp/ipatestuser r_checkExitStatus $? r_log "ipa" "Changing password of the user" kdestroy &> /dev/null +userPassword="$(awk '/Random password/ { print $3 }' /tmp/ipatestuser)" +/bin/rm /tmp/ipatestuser expect -f - < /dev/null klist 2>&1 | grep -E "(No credentials|Credentials cache .* not found)" &> /dev/null r_checkExitStatus $? -expect -f - < /dev/null r_checkExitStatus $? r_log "ipa" "Adding test service" -ipa service-add testservice/rltest.rlipa.local &> /dev/null +ipa service-add testservice/onyxtest.rlipa.local &> /dev/null r_checkExitStatus $? r_log "ipa" "Getting keytab for service" -ipa-getkeytab -s rltest.rlipa.local -p testservice/rltest.rlipa.local -k /tmp/testservice.keytab &> /dev/null +ipa-getkeytab -s onyxtest.rlipa.local -p testservice/onyxtest.rlipa.local -k /tmp/testservice.keytab &> /dev/null r_checkExitStatus $? r_log "ipa" "Getting a certificate for service" -ipa-getcert request -K testservice/rltest.rlipa.local -D rltest.rlipa.local -f /etc/pki/tls/certs/testservice.crt -k /etc/pki/tls/private/testservice.key &> /dev/null +ipa-getcert request -K testservice/onyxtest.rlipa.local -D onyxtest.rlipa.local -f /etc/pki/tls/certs/testservice.crt -k /etc/pki/tls/private/testservice.key &> /dev/null r_checkExitStatus $? while true; do @@ -57,7 +49,7 @@ while ! stat /etc/pki/tls/certs/testservice.crt &> /dev/null; do done r_log "ipa" "Verifying keytab" -klist -k /tmp/testservice.keytab | grep "testservice/rltest.rlipa.local" &> /dev/null +klist -k /tmp/testservice.keytab | grep "testservice/onyxtest.rlipa.local" &> /dev/null r_checkExitStatus $? r_log "ipa" "Verifying key matches the certificate" diff --git a/func/stacks/ipa/22-ipa-dns.sh b/func/stacks/ipa/22-ipa-dns.sh old mode 100644 new mode 100755 index 4d74174..e0b507f --- a/func/stacks/ipa/22-ipa-dns.sh +++ b/func/stacks/ipa/22-ipa-dns.sh @@ -13,21 +13,13 @@ kdestroy &> /dev/null klist 2>&1 | grep -qE "(No credentials|Credentials cache .* not found)" &> /dev/null r_checkExitStatus $? -expect -f - < /dev/null r_checkExitStatus $? r_log "ipa" "Adding testzone subdomain" -ipa dnszone-add --name-server=rltest.rlipa.local. --admin-email=hostmaster.testzone.rlipa.local. testzone.rlipa.local &> /dev/null +ipa dnszone-add --name-server=onyxtest.rlipa.local. --admin-email=hostmaster.testzone.rlipa.local. testzone.rlipa.local &> /dev/null r_checkExitStatus $? sleep 5 @@ -36,7 +28,7 @@ dig @localhost SOA testzone.rlipa.local | grep -q "status: NOERROR" &> /dev/null r_checkExitStatus $? r_log "ipa" "Adding a CNAME record to the primary domain" -ipa dnsrecord-add rlipa.local testrecord --cname-hostname=rltest &> /dev/null +ipa dnsrecord-add rlipa.local testrecord --cname-hostname=onyxtest &> /dev/null r_checkExitStatus $? sleep 5 @@ -45,7 +37,7 @@ dig @localhost CNAME testrecord.rlipa.local | grep -q "status: NOERROR" &> /dev/ r_checkExitStatus $? r_log "ipa" "Adding a CNAME to subdomain" -ipa dnsrecord-add testzone.rlipa.local testrecord --cname-hostname=rltest.rlipa.local. &> /dev/null +ipa dnsrecord-add testzone.rlipa.local testrecord --cname-hostname=onyxtest.rlipa.local. &> /dev/null r_checkExitStatus $? sleep 5 diff --git a/func/stacks/ipa/23-ipa-sudo.sh b/func/stacks/ipa/23-ipa-sudo.sh old mode 100644 new mode 100755 index 68e50ac..a52c4ef --- a/func/stacks/ipa/23-ipa-sudo.sh +++ b/func/stacks/ipa/23-ipa-sudo.sh @@ -9,19 +9,51 @@ if [ "$IPAINSTALLED" -eq 1 ]; then r_checkExitStatus 1 fi -kdestroy &> /dev/null -klist 2>&1 | grep -E "(No credentials|Credentials cache .* not found)" &> /dev/null +kdestroy -A +klist 2>&1 | grep -E "(No credentials|Credentials cache .* not found)" r_checkExitStatus $? -expect -f - < /dev/null +r_checkExitStatus $? + +r_log "ipa" "Adding user to test sudo rule" +ipa sudorule-add-user testrule --users="ipatestuser" &> /dev/null +r_checkExitStatus $? + +r_log "ipa" "Verifying rule..." +ipa sudorule-show testrule > /tmp/testrule +grep -q 'Rule name: testrule' /tmp/testrule +r_checkExitStatus $? +grep -q 'Description: Test rule in IPA' /tmp/testrule +r_checkExitStatus $? +grep -q 'Enabled: TRUE' /tmp/testrule +r_checkExitStatus $? +grep -q 'Host category: all' /tmp/testrule +r_checkExitStatus $? +grep -q 'Command category: all' /tmp/testrule +r_checkExitStatus $? +grep -q 'RunAs User category: all' /tmp/testrule +r_checkExitStatus $? +grep -q 'RunAs Group category: all' /tmp/testrule +r_checkExitStatus $? +grep -q 'Users: ipatestuser' /tmp/testrule +r_checkExitStatus $? + +m_serviceCycler sssd stop +rm -rf /var/lib/sss/db/* +m_serviceCycler sssd start + sleep 5 -close -EOF -klist | grep "admin@RLIPA.LOCAL" &> /dev/null +r_log "ipa" "Verifying sudo abilities" +sudo -l -U ipatestuser > /tmp/sudooutput +grep -q 'ipatestuser may run the following commands' /tmp/sudooutput +r_checkExitStatus $? +grep -q 'ALL) ALL' /tmp/sudooutput r_checkExitStatus $? diff --git a/func/stacks/ipa/50-cleanup-ipa.sh b/func/stacks/ipa/50-cleanup-ipa.sh old mode 100644 new mode 100755 diff --git a/func/stacks/lamp/00-install-lamp.sh b/func/stacks/lamp/00-install-lamp.sh old mode 100755 new mode 100644 diff --git a/func/stacks/lamp/01-verification.sh b/func/stacks/lamp/01-verification.sh old mode 100755 new mode 100644 diff --git a/func/stacks/lamp/10-test-lamp.sh b/func/stacks/lamp/10-test-lamp.sh old mode 100755 new mode 100644 diff --git a/iso/empanadas/.gitignore b/iso/empanadas/.gitignore index 961321b..6059633 100644 --- a/iso/empanadas/.gitignore +++ b/iso/empanadas/.gitignore @@ -2,3 +2,4 @@ __pycache__/ *.py[cod] *$py.class *.so +Containerfile*.devel diff --git a/iso/empanadas/Containerfile b/iso/empanadas/Containerfile index c3c4ed9..86a67c5 100644 --- a/iso/empanadas/Containerfile +++ b/iso/empanadas/Containerfile @@ -56,7 +56,7 @@ RUN rm -rf /etc/yum.repos.d/*.repo RUN useradd -o -d /var/peridot -u 1002 peridotbuilder && usermod -a -G mock peridotbuilder RUN chown peridotbuilder:mock /etc/yum.conf && chown -R peridotbuilder:mock /etc/dnf && chown -R peridotbuilder:mock /etc/rpm && chown -R peridotbuilder:mock /etc/yum.repos.d -RUN pip install 'git+https://git.rockylinux.org/release-engineering/public/toolkit.git@feature/iso-kube#egg=empanadas&subdirectory=iso/empanadas' +RUN pip install 'git+https://git.resf.org/sig_core/toolkit.git@devel#egg=empanadas&subdirectory=iso/empanadas' RUN pip install awscli diff --git a/iso/empanadas/Containerfile.imagefactory b/iso/empanadas/Containerfile.imagefactory new file mode 100644 index 0000000..d6b4aca --- /dev/null +++ b/iso/empanadas/Containerfile.imagefactory @@ -0,0 +1,70 @@ +FROM docker.io/fedora:36 + +ADD images/get_arch /get_arch + +ENV TINI_VERSION v0.19.0 +RUN curl -o /tini -L "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-$(/get_arch)" +RUN chmod +x /tini + +RUN dnf install -y \ + bash \ + bzip2 \ + cpio \ + diffutils \ + findutils \ + gawk \ + gcc \ + gcc-c++ \ + git \ + grep \ + gzip \ + info \ + make \ + patch \ + python3 \ + redhat-rpm-config \ + rpm-build \ + scl-utils-build \ + sed \ + shadow-utils \ + tar \ + unzip \ + util-linux \ + which \ + xz \ + dnf-plugins-core \ + createrepo_c \ + rpm-sign \ + sudo \ + mock \ + python-pip \ + imagefactory \ + imagefactory-plugins* + +RUN sed -i -e 's/# memory = 1024/memory = 2048/' /etc/oz/oz.cfg + +COPY imagefactory.patch / +COPY oz.rpm / + +RUN dnf -y install /oz.rpm +RUN (cd /usr/lib/python3.10/site-packages/; patch -p1 Syncs repositories from Peridot +* sync_sig -> Syncs SIG repositories from Peridot +* build-iso -> Builds initial ISO's using Lorax +* build-iso-extra -> Builds DVD's and other images based on Lorax data +* build-iso-live -> Builds live images +* pull-unpack-tree -> Pulls the latest lorax data from an S3 bucket and configures treeinfo +* pull-cloud-image -> Pulls the latest cloud images from an S3 bucket +* finalize_compose -> Finalizes a compose with metadata and checksums, as well as copies images +* launch-builds -> Creates a kube config to run build-iso +* build-image -> Runs build-iso +* generate_compose -> Creates a compose directory right away and optionally links it as latest + (You should only use this if you are running into errors with images) +``` ## wrappers -* lorax-generators -* sync-generators +``` +* common -> The starting point +* iso_utils -> Does work for ISO building and generation +* dnf_utils -> Does work for repo building and generation +* check -> Checks if the architecture/release combination are valid +* shared -> Shared utilities between all wrappers +``` ## rules @@ -43,8 +63,9 @@ When making a script, you *must* import common. This is insanely bad practice, but we would prefer if we started out this way: ``` -from common import * import argparse +from empanadas.common import * +from empanadas.util import Checks ``` Whatever is imported in common will effectively be imported in your scripts as diff --git a/iso/empanadas/empanadas/__init__.py b/iso/empanadas/empanadas/__init__.py index b794fd4..7fd229a 100644 --- a/iso/empanadas/empanadas/__init__.py +++ b/iso/empanadas/empanadas/__init__.py @@ -1 +1 @@ -__version__ = '0.1.0' +__version__ = '0.2.0' diff --git a/iso/empanadas/empanadas/common.py b/iso/empanadas/empanadas/common.py index c3619ce..ee32228 100644 --- a/iso/empanadas/empanadas/common.py +++ b/iso/empanadas/empanadas/common.py @@ -8,6 +8,24 @@ import yaml import logging import hashlib + +from collections import defaultdict +from typing import Tuple + +# An implementation from the Fabric python library +class AttributeDict(defaultdict): + def __init__(self): + super(AttributeDict, self).__init__(AttributeDict) + + def __getattr__(self, key): + try: + return self[key] + except KeyError: + raise AttributeError(key) + + def __setattr__(self, key, value): + self[key] = value + # These are a bunch of colors we may use in terminal output class Color: RED = '\033[91m' @@ -20,10 +38,14 @@ class Color: UNDERLINE = '\033[4m' BOLD = '\033[1m' END = '\033[0m' + INFO = '[' + BOLD + GREEN + 'INFO' + END + '] ' + WARN = '[' + BOLD + YELLOW + 'WARN' + END + '] ' + FAIL = '[' + BOLD + RED + 'FAIL' + END + '] ' + STAT = '[' + BOLD + CYAN + 'STAT' + END + '] ' # vars and additional checks -rldict = {} -sigdict = {} +rldict = AttributeDict() +sigdict = AttributeDict() config = { "rlmacro": rpm.expandMacro('%rhel'), "dist": 'el' + rpm.expandMacro('%rhel'), @@ -77,3 +99,40 @@ for conf in glob.iglob(f"{_rootdir}/sig/*.yaml"): #rlvars = rldict[rlver] #rlvars = rldict[rlmacro] #COMPOSE_ISO_WORKDIR = COMPOSE_ROOT + "work/" + arch + "/" + date_stamp + + +ALLOWED_TYPE_VARIANTS = { + "Azure": None, + "Container": ["Base", "Minimal", "UBI"], + "EC2": None, + "GenericCloud": None, + "Vagrant": ["Libvirt", "Vbox"] +} +def valid_type_variant(_type: str, variant: str="") -> bool: + if _type not in ALLOWED_TYPE_VARIANTS: + raise Exception(f"Type is invalid: ({_type}, {variant})") + if ALLOWED_TYPE_VARIANTS[_type] == None: + if variant is not None: + raise Exception(f"{_type} Type expects no variant type.") + return True + if variant not in ALLOWED_TYPE_VARIANTS[_type]: + if variant.capitalize() in ALLOWED_TYPE_VARIANTS[_type]: + raise Exception(f"Capitalization mismatch. Found: ({_type}, {variant}). Expected: ({_type}, {variant.capitalize()})") + raise Exception(f"Type/Variant Combination is not allowed: ({_type}, {variant})") + return True + +from attrs import define, field +@define(kw_only=True) +class Architecture: + name: str = field() + version: str = field() + major: int = field(converter=int) + minor: int = field(converter=int) + + @classmethod + def from_version(cls, architecture: str, version: str): + major, minor = str.split(version, ".") + if architecture not in rldict[major]["allowed_arches"]: + print("Invalid architecture/version combo, skipping") + exit() + return cls(name=architecture, version=version, major=major, minor=minor) diff --git a/iso/empanadas/empanadas/configs/el8.yaml b/iso/empanadas/empanadas/configs/el8.yaml index eb80aff..1ece4b1 100644 --- a/iso/empanadas/empanadas/configs/el8.yaml +++ b/iso/empanadas/empanadas/configs/el8.yaml @@ -44,64 +44,78 @@ has_modules: - 'AppStream' - 'PowerTools' - iso_map: - hosts: - x86_64: '' - aarch64: '' - ppc64le: '' - s390x: '' - images: - - dvd1 - - minimal - - boot - repos: - - 'BaseOS' - - 'AppStream' - variant: 'BaseOS' - lorax_removes: - - 'libreport-rhel-anaconda-bugzilla' - required_packages: - - 'lorax' - - 'genisoimage' - - 'isomd5sum' - - 'lorax-templates-rhel' - - 'lorax-templates-generic' structure: packages: 'os/Packages' repodata: 'os/repodata' iso_map: xorrisofs: False iso_level: False - hosts: - x86_64: '' - aarch64: '' images: dvd: + disc: True + variant: 'AppStream' repos: - 'BaseOS' - 'AppStream' - lorax_variants: - - dvd - - minimal - - BaseOS - repos: - - 'BaseOS' - - 'AppStream' - variant: 'BaseOS' - lorax_removes: - - 'libreport-rhel-anaconda-bugzilla' + minimal: + disc: True + isoskip: True + repos: + - 'minimal' + - 'BaseOS' + variant: 'minimal' + BaseOS: + disc: False + isoskip: True + variant: 'BaseOS' + repos: + - 'BaseOS' + - 'AppStream' + lorax: + repos: + - 'BaseOS' + - 'AppStream' + variant: 'BaseOS' + lorax_removes: + - 'libreport-rhel-anaconda-bugzilla' + required_pkgs: + - 'lorax' + - 'genisoimage' + - 'isomd5sum' + - 'lorax-templates-rhel' + - 'lorax-templates-generic' + - 'xorriso' + cloudimages: + images: + EC2: + format: raw + GenericCloud: + format: qcow2 + livemap: + git_repo: 'https://git.resf.org/sig_core/kickstarts.git' + branch: 'r9' + ksentry: + Workstation: rocky-live-workstation.ks + Workstation-Lite: rocky-live-workstation-lite.ks + XFCE: rocky-live-xfce.ks + KDE: rocky-live-kde.ks + allowed_arches: + - x86_64 required_pkgs: - - 'lorax' - - 'genisoimage' - - 'isomd5sum' - - 'lorax-templates-rhel' - - 'lorax-templates-generic' + - 'lorax-lmc-novirt' + - 'vim-minimal' + - 'pykickstart' + - 'git' + variantmap: + git_repo: 'https://git.rockylinux.org/rocky/pungi-rocky.git' + branch: 'r8' + git_raw_path: 'https://git.rockylinux.org/rocky/pungi-rocky/-/raw/r8/' repoclosure_map: arches: - x86_64: '--arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch' - aarch64: '--arch=aarch64 --arch=noarch' - ppc64le: '--arch=ppc64le --arch=noarch' - s390x: '--arch=s390x --arch=noarch' + x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch' + aarch64: '--forcearch=aarch64 --arch=aarch64 --arch=noarch' + ppc64le: '--forcearch=ppc64le --arch=ppc64le --arch=noarch' + s390x: '--forcearch=s390x --arch=s390x --arch=noarch' repos: BaseOS: [] AppStream: diff --git a/iso/empanadas/empanadas/configs/el9-beta.yaml b/iso/empanadas/empanadas/configs/el9-beta.yaml index 19d6cd5..39719e1 100644 --- a/iso/empanadas/empanadas/configs/el9-beta.yaml +++ b/iso/empanadas/empanadas/configs/el9-beta.yaml @@ -50,6 +50,7 @@ isoskip: True repos: - 'minimal' + - 'BaseOS' variant: 'minimal' BaseOS: disc: False @@ -72,6 +73,31 @@ - 'lorax-templates-rhel' - 'lorax-templates-generic' - 'xorriso' + cloudimages: + images: + EC2: + format: raw + GenericCloud: + format: qcow2 + livemap: + git_repo: 'https://git.resf.org/sig_core/kickstarts.git' + branch: 'r9-beta' + ksentry: + Workstation: rocky-live-workstation.ks + Workstation-Lite: rocky-live-workstation-lite.ks + XFCE: rocky-live-xfce.ks + KDE: rocky-live-kde.ks + allowed_arches: + - x86_64 + required_pkgs: + - 'lorax-lmc-novirt' + - 'vim-minimal' + - 'pykickstart' + - 'git' + variantmap: + git_repo: 'https://git.rockylinux.org/rocky/pungi-rocky.git' + branch: 'r9-beta' + git_raw_path: 'https://git.rockylinux.org/rocky/pungi-rocky/-/raw/r9-beta/' repoclosure_map: arches: x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch' diff --git a/iso/empanadas/empanadas/configs/el9.yaml b/iso/empanadas/empanadas/configs/el9.yaml index 88a978b..f67f28c 100644 --- a/iso/empanadas/empanadas/configs/el9.yaml +++ b/iso/empanadas/empanadas/configs/el9.yaml @@ -2,7 +2,7 @@ '9': fullname: 'Rocky Linux 9.0' revision: '9.0' - rclvl: 'RC1' + rclvl: 'RC2' major: '9' minor: '0' profile: '9' @@ -50,7 +50,9 @@ isoskip: True repos: - 'minimal' + - 'BaseOS' variant: 'minimal' + volname: 'dvd' BaseOS: disc: False isoskip: True @@ -72,6 +74,31 @@ - 'lorax-templates-rhel' - 'lorax-templates-generic' - 'xorriso' + cloudimages: + images: + EC2: + format: raw + GenericCloud: + format: qcow2 + livemap: + git_repo: 'https://git.resf.org/sig_core/kickstarts.git' + branch: 'r9' + ksentry: + Workstation: rocky-live-workstation.ks + Workstation-Lite: rocky-live-workstation-lite.ks + XFCE: rocky-live-xfce.ks + KDE: rocky-live-kde.ks + allowed_arches: + - x86_64 + required_pkgs: + - 'lorax-lmc-novirt' + - 'vim-minimal' + - 'pykickstart' + - 'git' + variantmap: + git_repo: 'https://git.rockylinux.org/rocky/pungi-rocky.git' + branch: 'r9' + git_raw_path: 'https://git.rockylinux.org/rocky/pungi-rocky/-/raw/r9/' repoclosure_map: arches: x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch' diff --git a/iso/empanadas/empanadas/configs/el9lh.yaml b/iso/empanadas/empanadas/configs/el9lh.yaml index 4176f66..e817298 100644 --- a/iso/empanadas/empanadas/configs/el9lh.yaml +++ b/iso/empanadas/empanadas/configs/el9lh.yaml @@ -50,6 +50,7 @@ isoskip: True repos: - 'minimal' + - 'BaseOS' variant: 'minimal' BaseOS: disc: False @@ -72,6 +73,31 @@ - 'lorax-templates-rhel' - 'lorax-templates-generic' - 'xorriso' + cloudimages: + images: + EC2: + format: raw + GenericCloud: + format: qcow2 + livemap: + git_repo: 'https://git.resf.org/sig_core/kickstarts.git' + branch: 'r9lh' + ksentry: + Workstation: rocky-live-workstation.ks + Workstation-Lite: rocky-live-workstation-lite.ks + XFCE: rocky-live-xfce.ks + KDE: rocky-live-kde.ks + allowed_arches: + - x86_64 + required_pkgs: + - 'lorax-lmc-novirt' + - 'vim-minimal' + - 'pykickstart' + - 'git' + variantmap: + git_repo: 'https://git.rockylinux.org/rocky/pungi-rocky.git' + branch: 'r9lh' + git_raw_path: 'https://git.rockylinux.org/rocky/pungi-rocky/-/raw/r9lh/' repoclosure_map: arches: x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch' diff --git a/iso/empanadas/empanadas/configs/rln.yaml b/iso/empanadas/empanadas/configs/rln.yaml index 7544ce4..3df3104 100644 --- a/iso/empanadas/empanadas/configs/rln.yaml +++ b/iso/empanadas/empanadas/configs/rln.yaml @@ -40,18 +40,19 @@ iso_level: False images: dvd: - discnum: '1' + disc: True variant: 'AppStream' repos: - 'BaseOS' - 'AppStream' minimal: - discnum: '1' + disc: True isoskip: True repos: - 'minimal' variant: 'minimal' BaseOS: + disc: False isoskip: True variant: 'BaseOS' repos: @@ -66,10 +67,32 @@ - 'libreport-rhel-anaconda-bugzilla' required_pkgs: - 'lorax' + - 'genisoimage' - 'isomd5sum' - 'lorax-templates-rhel' - 'lorax-templates-generic' - 'xorriso' + cloudimages: + images: + EC2: + format: raw + GenericCloud: + format: qcow2 + livemap: + git_repo: 'https://git.resf.org/sig_core/kickstarts.git' + branch: 'rln' + ksentry: + Workstation: rocky-live-workstation.ks + Workstation-Lite: rocky-live-workstation-lite.ks + XFCE: rocky-live-xfce.ks + KDE: rocky-live-kde.ks + allowed_arches: + - x86_64 + required_pkgs: + - 'lorax-lmc-novirt' + - 'vim-minimal' + - 'pykickstart' + - 'git' repoclosure_map: arches: x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch' diff --git a/iso/empanadas/empanadas/scripts/build_image.py b/iso/empanadas/empanadas/scripts/build_image.py new file mode 100644 index 0000000..5085253 --- /dev/null +++ b/iso/empanadas/empanadas/scripts/build_image.py @@ -0,0 +1,424 @@ +# Builds an image given a version, type, variant, and architecture +# Defaults to the running host's architecture + +import argparse +import datetime +import json +import logging +import os +import pathlib +import platform +import subprocess +import sys +import tempfile +import time + +from attrs import define, Factory, field, asdict +from botocore import args +from jinja2 import Environment, FileSystemLoader, Template +from typing import Callable, List, NoReturn, Optional, Tuple, IO, Union + +from empanadas.common import Architecture, rldict, valid_type_variant +from empanadas.common import _rootdir + +parser = argparse.ArgumentParser(description="ISO Compose") + +parser.add_argument('--version', type=str, help="Release Version (8.6, 9.1)", required=True) +parser.add_argument('--rc', action='store_true', help="Release Candidate") +parser.add_argument('--kickstartdir', action='store_true', help="Use the kickstart dir instead of the os dir for repositories") +parser.add_argument('--debug', action='store_true', help="debug?") +parser.add_argument('--type', type=str, help="Image type (container, genclo, azure, aws, vagrant)", required=True) +parser.add_argument('--variant', type=str, help="", required=False) +parser.add_argument('--release', type=str, help="Image release for subsequent builds with the same date stamp (rarely needed)", required=False) +parser.add_argument('--kube', action='store_true', help="output as a K8s job(s)", required=False) + + +results = parser.parse_args() +rlvars = rldict[results.version] +major = rlvars["major"] + + +debug = results.debug + +log = logging.getLogger(__name__) +log.setLevel(logging.INFO if not debug else logging.DEBUG) +handler = logging.StreamHandler(sys.stdout) +handler.setLevel(logging.INFO if not debug else logging.DEBUG) +formatter = logging.Formatter( + '%(asctime)s :: %(name)s :: %(message)s', + '%Y-%m-%d %H:%M:%S' +) +handler.setFormatter(formatter) +log.addHandler(handler) + +STORAGE_DIR = pathlib.Path("/var/lib/imagefactory/storage") +KICKSTART_PATH = pathlib.Path(os.environ.get("KICKSTART_PATH", "/kickstarts")) +BUILDTIME = datetime.datetime.utcnow() + +@define(kw_only=True) +class ImageBuild: + architecture: Architecture = field() + base_uuid: Optional[str] = field(default="") + cli_args: argparse.Namespace = field() + command_args: List[str] = field(factory=list) + common_args: List[str] = field(factory=list) + debug: bool = field(default=False) + image_type: str = field() + job_template: Optional[Template] = field(init=False) + kickstart_arg: List[str] = field(factory=list) + metadata: pathlib.Path = field(init=False) + out_type: str = field(init=False) + outdir: pathlib.Path = field(init=False) + outname: str = field(init=False) + package_args: List[str] = field(factory=list) + release: int = field(default=0) + stage_commands: Optional[List[List[Union[str,Callable]]]] = field(init=False) + target_uuid: Optional[str] = field(default="") + tdl_path: pathlib.Path = field(init=False) + template: Template = field() + type_variant: str = field(init=False) + variant: Optional[str] = field() + + def __attrs_post_init__(self): + self.tdl_path = self.render_icicle_template() + if not self.tdl_path: + exit(2) + self.type_variant = self.type_variant_name() + self.outdir, self.outname = self.output_name() + self.out_type = self.image_format() + self.command_args = self._command_args() + self.package_args = self._package_args() + self.common_args = self._common_args() + self.kickstart_arg = self.kickstart_imagefactory_args() + + self.metadata = pathlib.Path(self.outdir, "metadata.json") + + # Yes, this is gross. I'll fix it later. + if self.image_type in ["Container"]: + self.stage_commands = [ + ["tar", "-C", f"{self.outdir}", "--strip-components=1", "-x", "-f", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", "*/layer.tar"], + ["xz", f"{self.outdir}/layer.tar"] + ] + if self.image_type in ["GenericCloud"]: + self.stage_commands = [ + ["qemu-img", "convert", "-c", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.qcow2"] + ] + if self.image_type in ["EC2"]: + self.stage_commands = [ + ["qemu-img", "convert", "-f", "raw", "-O", "qcow2", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.qcow2"] + ] + if self.image_type in ["Azure"]: + self.stage_commands = [ + ["/prep-azure.sh", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{STORAGE_DIR}"], + ["cp", lambda: f"{STORAGE_DIR}/{self.target_uuid}.vhd", f"{self.outdir}/{self.outname}.vhd"] + ] + # ["qemu-img", "resize", "-f", "raw", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", lambda: f"{self.rounded_size()}"], + # ["qemu-img", "convert", "-f", "raw", "-o", "subformat=fixed,force_size" ,"-O", "vpc", lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.vhd"] + if self.image_type in ["Vagrant"]: + _map = { + "Vbox": "vmdk", + "Libvirt": "qcow2" + } + output = f"{_map[self.variant]}" #type: ignore + self.stage_commands = [ + ["qemu-img", "convert", "-c", "-f", "raw", "-O", output, lambda: f"{STORAGE_DIR}/{self.target_uuid}.body", f"{self.outdir}/{self.outname}.{output}"] + ] + + + if self.stage_commands: + self.stage_commands.append(["cp", "-v", lambda: f"{STORAGE_DIR}/{self.target_uuid}.meta", f"{self.outdir}/build.meta"]) + + try: + os.mkdir(self.outdir) + except FileExistsError as e: + log.info("Directory already exists for this release. If possible, previously executed steps may be skipped") + except Exception as e: + log.exception("Some other exception occured while creating the output directory", e) + return 0 + + if os.path.exists(self.metadata): + with open(self.metadata, "r") as f: + try: + o = json.load(f) + self.base_uuid = o['base_uuid'] + self.target_uuid = o['target_uuid'] + except json.decoder.JSONDecodeError as e: + log.exception("Couldn't decode metadata file", e) + finally: + f.flush() + + # def rounded_size(self) -> int: + # # Azure images need to be rounded to the nearest 1MB boundary. + # MB=1024*1024 + # + # raw_size = pathlib.Path(STORAGE_DIR},f"{self.target_uuid}.body").stat().st_size + # rounded_size = raw + + def output_name(self) -> Tuple[pathlib.Path, str]: + directory = f"Rocky-{self.architecture.major}-{self.type_variant}-{self.architecture.version}-{BUILDTIME.strftime('%Y%m%d')}.{self.release}" + name = f"{directory}.{self.architecture.name}" + outdir = pathlib.Path(f"/tmp/", directory) + return outdir, name + + def type_variant_name(self): + return self.image_type if not self.variant else f"{self.image_type}-{self.variant}" + + def _command_args(self): + args_mapping = { + "debug": "--debug" + } + return [param for name, param in args_mapping.items() if getattr(self.cli_args, name)] + + def _package_args(self) -> List[str]: + if self.image_type == "Container": + return ["--parameter", "compress", "xz"] + return [""] + + def _common_args(self) -> List[str]: + args = [] + if self.image_type == "Container": + args = ["--parameter", "offline_icicle", "true"] + if self.image_type in ["GenericCloud", "EC2", "Vagrant", "Azure"]: + args = ["--parameter", "generate_icicle", "false"] + return args + + def image_format(self) -> str: + mapping = { + "Container": "docker" + } + return mapping[self.image_type] if self.image_type in mapping.keys() else '' + + def kickstart_imagefactory_args(self) -> List[str]: + kickstart_path = pathlib.Path(f"{KICKSTART_PATH}/Rocky-{self.architecture.major}-{self.type_variant}.ks") + + if not kickstart_path.is_file(): + log.warn(f"Kickstart file is not available: {kickstart_path}") + if not debug: + log.warn("Exiting because debug mode is not enabled.") + exit(2) + + return ["--file-parameter", "install_script", str(kickstart_path)] + + def render_icicle_template(self) -> pathlib.Path: + handle, output = tempfile.mkstemp() + if not handle: + exit(3) + with os.fdopen(handle, "wb") as tmp: + _template = self.template.render( + architecture=self.architecture.name, + iso8601date=BUILDTIME.strftime("%Y%m%d"), + installdir="kickstart" if self.cli_args.kickstartdir else "os", + major=self.architecture.major, + minor=self.architecture.minor, + release=self.release, + size="10G", + type=self.image_type, + utcnow=BUILDTIME, + version_variant=self.architecture.version if not self.variant else f"{self.architecture.version}-{self.variant}", + ) + tmp.write(_template.encode()) + tmp.flush() + output = pathlib.Path(output) + if not output.exists(): + log.error("Failed to write TDL template") + raise Exception("Failed to write TDL template") + return output + + def build_command(self) -> List[str]: + build_command = ["imagefactory", *self.command_args, "base_image", *self.common_args, *self.kickstart_arg, self.tdl_path + # "|", "tee", "-a", f"{outdir}/logs/base_image-{outname}.out", + # "|", "tail", "-n4", ">", f"{outdir}/base.meta", "||", "exit", "2" + ] + return build_command + def package_command(self) -> List[str]: + package_command = ["imagefactory", *self.command_args, "target_image", self.out_type, *self.common_args, + "--id", f"{self.base_uuid}", + *self.package_args, + "--parameter", "repository", self.outname, + # "|", "tee", "-a", f"{outdir}/base_image-{outname}.out", + # "|", "tail", "-n4", ">", f"{outdir}/target.meta", "||", "exit", "3" + ] + return package_command + + def copy_command(self) -> List[str]: + + copy_command = ["aws", "s3", "cp", "--recursive", f"{self.outdir}/", + f"s3://resf-empanadas/buildimage-{self.architecture.version}-{self.architecture.name}/{ self.outname }/{ BUILDTIME.strftime('%s') }/" + ] + + return copy_command + + def build(self) -> int: + if self.base_uuid: + return 0 + + self.fix_ks() + + ret, out, err, uuid = self.runCmd(self.build_command()) + if uuid: + self.base_uuid = uuid.rstrip() + self.save() + return ret + + def package(self) -> int: + # Some build types don't need to be packaged by imagefactory + # @TODO remove business logic if possible + if self.image_type in ["GenericCloud", "EC2", "Azure", "Vagrant"]: + self.target_uuid = self.base_uuid if hasattr(self, 'base_uuid') else "" + + if self.target_uuid: + return 0 + + ret, out, err, uuid = self.runCmd(self.package_command()) + if uuid: + self.target_uuid = uuid.rstrip() + self.save() + return ret + + def stage(self) -> int: + """ Stage the artifacst from wherever they are (unpacking and converting if needed)""" + if not hasattr(self,'stage_commands'): + return 0 + + returns = [] + for command in self.stage_commands: #type: ignore + ret, out, err, _ = self.runCmd(command, search=False) + returns.append(ret) + + return all(ret > 0 for ret in returns) + + def copy(self, skip=False) -> int: + # move or unpack if necessary + log.info("Executing staging commands") + if (stage := self.stage() > 0): + raise Exception(stage) + + if not skip: + log.info("Copying files to output directory") + ret, out, err, _ = self.runCmd(self.copy_command(), search=False) + return ret + + log.info(f"Build complete! Output available in {self.outdir}/") + return 0 + + def runCmd(self, command: List[Union[str, Callable]], search: bool = True) -> Tuple[int, Union[bytes,None], Union[bytes,None], Union[str,None]]: + prepared, _ = self.prepare_command(command) + log.info(f"Running command: {' '.join(prepared)}") + + kwargs = { + "stderr": subprocess.PIPE, + "stdout": subprocess.PIPE + } + if debug: del kwargs["stderr"] + + with subprocess.Popen(prepared, **kwargs) as p: + uuid = None + # @TODO implement this as a callback? + if search: + for _, line in enumerate(p.stdout): # type: ignore + ln = line.decode() + if ln.startswith("UUID: "): + uuid = ln.split(" ")[-1] + log.debug(f"found uuid: {uuid}") + + out, err = p.communicate() + res = p.wait(), out, err, uuid + + if res[0] > 0: + log.error(f"Problem while executing command: '{prepared}'") + if search and not res[3]: + log.error("UUID not found in stdout. Dumping stdout and stderr") + self.log_subprocess(res) + + return res + + def prepare_command(self, command_list: List[Union[str, Callable]]) -> Tuple[List[str],List[None]]: + """ + Commands may be a callable, which should be a lambda to be evaluated at + preparation time with available locals. This can be used to, among + other things, perform lazy evaluations of f-strings which have values + not available at assignment time. e.g., filling in a second command + with a value extracted from the previous step or command. + + """ + + r = [] + return r, [r.append(c()) if (callable(c) and c.__name__ == '') else r.append(str(c)) for c in command_list] + + def log_subprocess(self, result: Tuple[int, Union[bytes, None], Union[bytes, None], Union[str, None]]): + def log_lines(title, lines): + log.info(f"====={title}=====") + log.info(lines.decode()) + log.info(f"Command return code: {result[0]}") + stdout = result[1] + stderr = result[2] + if stdout: + log_lines("Command STDOUT", stdout) + if stderr: + log_lines("Command STDERR", stderr) + + def fix_ks(self): + self.runCmd(["sed", "-i", f"s,$basearch,{self.architecture.name},", self.kickstart_arg[-1]], search=False) + + def render_kubernetes_job(self): + commands = [self.build_command(), self.package_command(), self.copy_command()] + if not self.job_template: + return None + template = self.job_template.render( + architecture=self.architecture.name, + backoffLimit=4, + buildTime=BUILDTIME.strftime("%s"), + command=commands, + imageName="ghcr.io/rockylinux/sig-core-toolkit:latest", + jobname="buildimage", + namespace="empanadas", + major=major, + restartPolicy="Never", + ) + return template + + def save(self): + with open(pathlib.Path(self.outdir, "metadata.json"), "w") as f: + try: + o = { name: getattr(self, name) for name in ["base_uuid", "target_uuid"] } + log.debug(o) + json.dump(o, f) + except AttributeError as e: + log.error("Couldn't find attribute in object. Something is probably wrong", e) + except Exception as e: + log.exception(e) + finally: + f.flush() + +def run(): + try: + valid_type_variant(results.type, results.variant) + except Exception as e: + log.exception(e) + exit(2) + + file_loader = FileSystemLoader(f"{_rootdir}/templates") + tmplenv = Environment(loader=file_loader) + tdl_template = tmplenv.get_template('icicle/tdl.xml.tmpl') + + arches = rlvars['allowed_arches'] if results.kube else [platform.uname().machine] + + for architecture in arches: + IB = ImageBuild( + architecture=Architecture.from_version(architecture, rlvars['revision']), + cli_args=results, + debug=results.debug, + image_type=results.type, + release=results.release if results.release else 0, + template=tdl_template, + variant=results.variant, + ) + if results.kube: + IB.job_template = tmplenv.get_template('kube/Job.tmpl') + #commands = IB.kube_commands() + print(IB.render_kubernetes_job()) + else: + ret = IB.build() + ret = IB.package() + ret = IB.copy() + diff --git a/iso/empanadas/empanadas/scripts/build_iso.py b/iso/empanadas/empanadas/scripts/build_iso.py index fbf1d06..6da2e4d 100755 --- a/iso/empanadas/empanadas/scripts/build_iso.py +++ b/iso/empanadas/empanadas/scripts/build_iso.py @@ -13,6 +13,7 @@ parser.add_argument('--isolation', type=str, help="mock isolation mode") parser.add_argument('--rc', action='store_true', help="Release Candidate, Beta, RLN") parser.add_argument('--local-compose', action='store_true', help="Compose Directory is Here") parser.add_argument('--logger', type=str) +parser.add_argument('--hashed', action='store_true') results = parser.parse_args() rlvars = rldict[results.release] major = rlvars['major'] @@ -24,6 +25,7 @@ a = IsoBuild( rc=results.rc, isolation=results.isolation, compose_dir_is_here=results.local_compose, + hashed=results.hashed, logger=results.logger, ) diff --git a/iso/empanadas/empanadas/scripts/build_iso_extra.py b/iso/empanadas/empanadas/scripts/build_iso_extra.py index 9fe9c05..b645747 100755 --- a/iso/empanadas/empanadas/scripts/build_iso_extra.py +++ b/iso/empanadas/empanadas/scripts/build_iso_extra.py @@ -16,6 +16,7 @@ parser.add_argument('--local-compose', action='store_true', help="Compose Direct parser.add_argument('--logger', type=str) parser.add_argument('--extra-iso', type=str, help="Granular choice in which iso is built") parser.add_argument('--extra-iso-mode', type=str, default='local') +parser.add_argument('--hashed', action='store_true') results = parser.parse_args() rlvars = rldict[results.release] major = rlvars['major'] @@ -30,6 +31,7 @@ a = IsoBuild( extra_iso=results.extra_iso, extra_iso_mode=results.extra_iso_mode, compose_dir_is_here=results.local_compose, + hashed=results.hashed, logger=results.logger ) diff --git a/iso/empanadas/empanadas/scripts/build_iso_live.py b/iso/empanadas/empanadas/scripts/build_iso_live.py new file mode 100755 index 0000000..6b2e5fa --- /dev/null +++ b/iso/empanadas/empanadas/scripts/build_iso_live.py @@ -0,0 +1,39 @@ +# builds ISO's + +import argparse + +from empanadas.common import * +from empanadas.util import Checks +from empanadas.util import LiveBuild + +parser = argparse.ArgumentParser(description="Live ISO Compose") + +parser.add_argument('--release', type=str, help="Major Release Version or major-type (eg 9-beta)", required=True) +parser.add_argument('--isolation', type=str, help="Mock Isolation") +parser.add_argument('--local-compose', action='store_true', help="Compose Directory is Here") +parser.add_argument('--image', type=str, help="Granular choice in which live image is built") +parser.add_argument('--logger', type=str) +parser.add_argument('--live-iso-mode', type=str, default='local') +parser.add_argument('--hashed', action='store_true') +parser.add_argument('--just-copy-it', action='store_true', help="Just copy the images to the compose dir") +parser.add_argument('--force-build', action='store_true', help="Just build and overwrite the images") +results = parser.parse_args() +rlvars = rldict[results.release] +major = rlvars['major'] + +a = LiveBuild( + rlvars, + config, + major=major, + isolation=results.isolation, + live_iso_mode=results.live_iso_mode, + image=results.image, + compose_dir_is_here=results.local_compose, + hashed=results.hashed, + justcopyit=results.just_copy_it, + force_build=results.force_build, + logger=results.logger +) + +def run(): + a.run_build_live_iso() diff --git a/iso/empanadas/empanadas/scripts/build_iso_live_test.py b/iso/empanadas/empanadas/scripts/build_iso_live_test.py new file mode 100755 index 0000000..7db3b53 --- /dev/null +++ b/iso/empanadas/empanadas/scripts/build_iso_live_test.py @@ -0,0 +1,34 @@ +# builds ISO's + +import argparse + +from empanadas.common import * +from empanadas.util import Checks +from empanadas.util import IsoBuild + +parser = argparse.ArgumentParser(description="Live ISO Compose") + +parser.add_argument('--release', type=str, help="Major Release Version or major-type (eg 9-beta)", required=True) +parser.add_argument('--isolation', type=str, help="Mock Isolation") +parser.add_argument('--local-compose', action='store_true', help="Compose Directory is Here") +parser.add_argument('--image', action='store_true', help="Live image name") +parser.add_argument('--logger', type=str) +parser.add_argument('--live-iso-mode', type=str, default='local') +results = parser.parse_args() +rlvars = rldict[results.release] +major = rlvars['major'] + +a = LiveBuild( + rlvars, + config, + major=major, + isolation=results.isolation, + extra_iso_mode=results.live_iso_mode, + image=results.image, + compose_dir_is_here=results.local_compose, + logger=results.logger +) + +def run(): + print(a.livemap['ksentry']) + print(a.livemap['ksentry'].keys()) diff --git a/iso/empanadas/empanadas/scripts/finalize_compose.py b/iso/empanadas/empanadas/scripts/finalize_compose.py new file mode 100755 index 0000000..9cc8139 --- /dev/null +++ b/iso/empanadas/empanadas/scripts/finalize_compose.py @@ -0,0 +1,35 @@ +# This script can be called to do single syncs or full on syncs. + +import argparse + +from empanadas.common import * +from empanadas.util import Checks +from empanadas.util import RepoSync + +# Start up the parser baby +parser = argparse.ArgumentParser(description="Peridot Sync and Compose") + +# All of our options +parser.add_argument('--release', type=str, help="Major Release Version or major-type (eg 9-beta)", required=True) +parser.add_argument('--arch', type=str, help="Architecture") +parser.add_argument('--logger', type=str) + +# Parse them +results = parser.parse_args() +rlvars = rldict[results.release] +major = rlvars['major'] + +r = Checks(rlvars, config['arch']) +r.check_valid_arch() + +# Send them and do whatever I guess +a = RepoSync( + rlvars, + config, + major=major, + arch=results.arch, + logger=results.logger, +) + +def run(): + a.run_compose_closeout() diff --git a/iso/empanadas/empanadas/scripts/generate_compose.py b/iso/empanadas/empanadas/scripts/generate_compose.py new file mode 100755 index 0000000..19b85a1 --- /dev/null +++ b/iso/empanadas/empanadas/scripts/generate_compose.py @@ -0,0 +1,73 @@ +# This script can be called to do single syncs or full on syncs. + +import argparse +import logging +import sys + +from empanadas.common import * +from empanadas.util import Checks +from empanadas.util import RepoSync +from empanadas.util import Shared + +# Start up the parser baby +parser = argparse.ArgumentParser(description="Peridot Sync and Compose") + +# All of our options +parser.add_argument('--release', type=str, help="Major Release Version or major-type (eg 9-beta)", required=True) +parser.add_argument('--symlink', action='store_true', help="symlink") +parser.add_argument('--logger', type=str) + +# Parse them +results = parser.parse_args() +rlvars = rldict[results.release] +major = rlvars['major'] + +r = Checks(rlvars, config['arch']) +r.check_valid_arch() + +# Send them and do whatever I guess +def run(): + if results.logger is None: + log = logging.getLogger("generate") + log.setLevel(logging.INFO) + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s :: %(name)s :: %(message)s', + '%Y-%m-%d %H:%M:%S' + ) + handler.setFormatter(formatter) + log.addHandler(handler) + else: + log = results.logger + + compose_base = config['compose_root'] + "/" + major + shortname = config['shortname'] + version = rlvars['revision'] + date_stamp = config['date_stamp'] + profile = rlvars['profile'] + logger = log + + generated_dir = Shared.generate_compose_dirs( + compose_base, + shortname, + version, + date_stamp, + logger + ) + + if results.symlink: + compose_latest_dir = os.path.join( + config['compose_root'], + major, + "latest-{}-{}".format( + shortname, + profile, + ) + ) + if os.path.exists(compose_latest_dir): + os.remove(compose_latest_dir) + + os.symlink(generated_dir, compose_latest_dir) + + log.info('Generated compose dirs.') diff --git a/iso/empanadas/empanadas/scripts/launch_builds.py b/iso/empanadas/empanadas/scripts/launch_builds.py index f0f82f7..81dd457 100755 --- a/iso/empanadas/empanadas/scripts/launch_builds.py +++ b/iso/empanadas/empanadas/scripts/launch_builds.py @@ -8,10 +8,11 @@ from empanadas.common import _rootdir from jinja2 import Environment, FileSystemLoader -parser = argparse.ArgumentParser(description="ISO Compose") +parser = argparse.ArgumentParser(description="Generate Kubernetes Jobs to run lorax in mock and upload the result. Pipe into kubectl for the appropriate cluster") -parser.add_argument('--release', type=str, help="Major Release Version", required=True) -parser.add_argument('--env', type=str, help="environment", required=True) +parser.add_argument('--release', type=str, help="Major Release Version: (8|9)", required=True) +parser.add_argument('--env', type=str, help="environment: one of (eks|ext|all). presently jobs are scheduled on different kubernetes clusters", required=True) +parser.add_argument('--rc', action='store_true', help="Release Candidate, Beta, RLN") results = parser.parse_args() rlvars = rldict[results.release] major = rlvars['major'] @@ -30,16 +31,25 @@ def run(): elif results.env == "all": arches = EKSARCH+EXTARCH - command = ["build-iso", "--release", f"{results.release}", "--rc", "--isolation", "simple"] + command = ["build-iso", "--release", f"{results.release}", "--isolation", "simple", "--hashed"] + if results.rc: + command += ["--rc"] + + buildstamp = datetime.datetime.utcnow() out = "" - for arch in arches: + for architecture in arches: + copy_command = (f"aws s3 cp --recursive --exclude=* --include=lorax* " + f"/var/lib/mock/rocky-{ major }-$(uname -m)/root/builddir/ " + f"s3://resf-empanadas/buildiso-{ major }-{ architecture }/{ buildstamp.strftime('%s') }/" + ) out += job_template.render( - architecture=arch, + architecture=architecture, backoffLimit=4, - buildTime=datetime.datetime.utcnow().strftime("%s"), - command=command, - imageName="ghcr.io/neilhanlon/sig-core-toolkit:latest", + buildTime=buildstamp.strftime("%s"), + command=[command, copy_command], + imageName="ghcr.io/rocky-linux/sig-core-toolkit:latest", + jobname="buildiso", namespace="empanadas", major=major, restartPolicy="Never", diff --git a/iso/empanadas/empanadas/scripts/pull_cloud_image.py b/iso/empanadas/empanadas/scripts/pull_cloud_image.py new file mode 100755 index 0000000..4e175de --- /dev/null +++ b/iso/empanadas/empanadas/scripts/pull_cloud_image.py @@ -0,0 +1,33 @@ +# builds ISO's + +import argparse + +from empanadas.common import * +from empanadas.util import Checks +from empanadas.util import IsoBuild + +parser = argparse.ArgumentParser(description="ISO Artifact Builder") + +parser.add_argument('--release', type=str, help="Major Release Version", required=True) +parser.add_argument('--s3', action='store_true', help="Release Candidate") +parser.add_argument('--arch', type=str, help="Architecture") +parser.add_argument('--local-compose', action='store_true', help="Compose Directory is Here") +parser.add_argument('--force-download', action='store_true', help="Force a download") +parser.add_argument('--logger', type=str) +results = parser.parse_args() +rlvars = rldict[results.release] +major = rlvars['major'] + +a = IsoBuild( + rlvars, + config, + major=major, + s3=results.s3, + arch=results.arch, + force_download=results.force_download, + compose_dir_is_here=results.local_compose, + logger=results.logger, +) + +def run(): + a.run_pull_generic_images() diff --git a/iso/empanadas/empanadas/scripts/sync_from_peridot.py b/iso/empanadas/empanadas/scripts/sync_from_peridot.py index 1e40ed8..d025f65 100755 --- a/iso/empanadas/empanadas/scripts/sync_from_peridot.py +++ b/iso/empanadas/empanadas/scripts/sync_from_peridot.py @@ -22,6 +22,7 @@ parser.add_argument('--dry-run', action='store_true') parser.add_argument('--full-run', action='store_true') parser.add_argument('--no-fail', action='store_true') parser.add_argument('--refresh-extra-files', action='store_true') +parser.add_argument('--refresh-treeinfo', action='store_true') # I am aware this is confusing, I want podman to be the default option parser.add_argument('--simple', action='store_false') parser.add_argument('--logger', type=str) @@ -52,6 +53,7 @@ a = RepoSync( nofail=results.no_fail, logger=results.logger, refresh_extra_files=results.refresh_extra_files, + refresh_treeinfo=results.refresh_treeinfo, ) def run(): diff --git a/iso/empanadas/empanadas/scripts/sync_from_peridot_test.py b/iso/empanadas/empanadas/scripts/sync_from_peridot_test.py index 5057753..5a3dbb1 100755 --- a/iso/empanadas/empanadas/scripts/sync_from_peridot_test.py +++ b/iso/empanadas/empanadas/scripts/sync_from_peridot_test.py @@ -2,6 +2,7 @@ import argparse +import empanadas from empanadas.common import * from empanadas.util import Checks from empanadas.util import RepoSync @@ -16,3 +17,5 @@ a = RepoSync(rlvars, config, major="9", repo="BaseOS", parallel=True, ignore_deb def run(): print(rlvars.keys()) print(rlvars) + print(empanadas.__version__) + print(a.hashed) diff --git a/iso/empanadas/empanadas/sig/cloud.yaml b/iso/empanadas/empanadas/sig/cloud.yaml index f30f94a..e0ad17a 100644 --- a/iso/empanadas/empanadas/sig/cloud.yaml +++ b/iso/empanadas/empanadas/sig/cloud.yaml @@ -1,6 +1,7 @@ --- cloud: '8': + profile: 'cloud' cloud-kernel: project_id: 'f91da90d-5bdb-4cf2-80ea-e07f8dae5a5c' allowed_arches: @@ -10,8 +11,23 @@ cloud: allowed_arches: - aarch64 - x86_64 - project_id: '' + project_id: 'f91da90d-5bdb-4cf2-80ea-e07f8dae5a5c' + extra_files: + git_repo: 'https://git.rockylinux.org/staging/src/rocky-release-cloud.git' + git_raw_path: 'https://git.rockylinux.org/staging/src/rocky-release-cloud/-/raw/r8/' + branch: 'r8' + gpg: + stable: 'SOURCES/RPM-GPG-KEY-Rocky-SIG-Cloud' + list: + - 'SOURCES/RPM-GPG-KEY-Rocky-SIG-Cloud' '9': + cloud-kernel: + project_id: '' + allowed_arches: + - aarch64 + - x86_64 + - ppc64le + - s390x cloud-common: project_id: '' allowed_arches: diff --git a/iso/empanadas/empanadas/templates/README.tmpl b/iso/empanadas/empanadas/templates/README.tmpl new file mode 100644 index 0000000..ea4ec00 --- /dev/null +++ b/iso/empanadas/empanadas/templates/README.tmpl @@ -0,0 +1,35 @@ +These set of repositories (or "compose") is for {{ fullname }} and was generated +using Empanadas {{ version }} from the SIG/Core Toolkit. + +As this is not a traditional compose (via pungi), there will be things that you +might be expecting and do not see, or not expecting and do see. While we +attempted to recreate a lot of those elements, it's not perfect and we don't +expect that it ever will be. With that being said, in the future, we do plan on +having more metadata and providing client libraries that can ingest this type of +metadata that we produce for easy consumption, on top of extending what our +metadata provides. + +# Notes # + +## Checksums ## + +CHECKSUM Validation: https://github.com/rocky-linux/checksums + https://git.resf.org/rocky-linux/checksums (mirror) + +Traditionally, we would "sign" the checksum files with the current GPG key of a +major release. However, due to how the new build system operates and for +ensuring strong security within the new build ecosystem as it pertains the +signing keys, this is no longer a viable approach. It was determined by SIG/Core +(or Release Engineering) to instead provide verified signed commits using our +keys with RESF/Rocky Linux email domain names to a proper git repository. Our +signing keys are attached to our GitHub and RESF Git Service profiles. + +If you are looking for "verification" of the ISO checksums and were expecting a +`CHECKSUM.sig`, it is highly recommended to visit the link above instead. + +To verify our signature, click on "commits" and click the green "Verified" +button where you will see a GPG key ID. You can then search for this ID at the +any of the following: + +https://keys.openpgp.org/ +https://keyserver.ubuntu.com diff --git a/iso/empanadas/empanadas/templates/buildExtraImage.tmpl.sh b/iso/empanadas/empanadas/templates/buildExtraImage.tmpl.sh index ae9b5cf..fbac286 100644 --- a/iso/empanadas/empanadas/templates/buildExtraImage.tmpl.sh +++ b/iso/empanadas/empanadas/templates/buildExtraImage.tmpl.sh @@ -5,7 +5,7 @@ set -ex {{ lorax_pkg_cmd }} mkdir -p {{ compose_work_iso_dir }}/{{ arch }} cd {{ compose_work_iso_dir }}/{{ arch }} -test -f {{ isoname }} || { echo "!! ISO ALREDY EXISTS !!"; exit 1; } +test -f {{ isoname }} && { echo "ERROR: ISO ALREDY EXISTS!"; exit 1; } {% else %} cd /builddir diff --git a/iso/empanadas/empanadas/templates/buildLiveImage.tmpl.sh b/iso/empanadas/empanadas/templates/buildLiveImage.tmpl.sh new file mode 100644 index 0000000..841987a --- /dev/null +++ b/iso/empanadas/empanadas/templates/buildLiveImage.tmpl.sh @@ -0,0 +1,36 @@ +#!/bin/bash +set -ex + +{% if live_iso_mode == "podman" %} +{{ live_pkg_cmd }} +mkdir -p {{ compose_live_work_dir }}/{{ arch }} +cd {{ compose_live_work_dir }}/{{ arch }} +test -f {{ isoname }} && { echo "ERROR: ISO ALREDY EXISTS!"; exit 1; } + +major=$(grep loop /proc/devices | cut -c3) +for index in 0 1 2 3 4 5; do + mknod /dev/loop$index $major $index +done +{% else %} +cd /builddir + +{% endif %} + +{{ git_clone }} +if [ -d "/builddir/ks/live/{{ major }}/peridot" ]; then + pushd /builddir/ks/live/{{ major }}/peridot || { echo "Could not change directory"; exit 1; } +else + pushd /builddir/ks/live/{{ major }}/staging || { echo "Could not change directory"; exit 1; } +fi +ksflatten -c {{ ks_file }} -o /builddir/ks.cfg +if [ $? -ne 0 ]; then + echo "Error flattening kickstart" + exit 1 +fi +popd || { echo "Could not leave directory"; exit 1; } + +{{ make_image }} + +{% if live_iso_mode == "podman" %} +cp /builddir/lmc/{{ isoname }} {{ compose_live_work_dir }}/{{ arch }}/{{ isoname }} +{% endif %} diff --git a/iso/empanadas/empanadas/templates/extraisobuild.tmpl.sh b/iso/empanadas/empanadas/templates/extraisobuild.tmpl.sh index 4d42901..df51333 100644 --- a/iso/empanadas/empanadas/templates/extraisobuild.tmpl.sh +++ b/iso/empanadas/empanadas/templates/extraisobuild.tmpl.sh @@ -3,6 +3,8 @@ # under extreme circumstances should you be filling this out and running # manually. +set -o pipefail + # Vars MOCK_CFG="/var/tmp/lorax-{{ major }}.cfg" MOCK_ROOT="/var/lib/mock/{{ shortname|lower }}-{{ major }}-{{ arch }}" diff --git a/iso/empanadas/empanadas/templates/icicle/tdl.xml.tmpl b/iso/empanadas/empanadas/templates/icicle/tdl.xml.tmpl new file mode 100644 index 0000000..5ba9c6d --- /dev/null +++ b/iso/empanadas/empanadas/templates/icicle/tdl.xml.tmpl @@ -0,0 +1,21 @@ + + + diff --git a/iso/empanadas/empanadas/templates/isobuild.tmpl.sh b/iso/empanadas/empanadas/templates/isobuild.tmpl.sh index 95184b6..28398e3 100644 --- a/iso/empanadas/empanadas/templates/isobuild.tmpl.sh +++ b/iso/empanadas/empanadas/templates/isobuild.tmpl.sh @@ -2,6 +2,8 @@ # This is a template that is used to build ISO's for Rocky Linux. Only under # extreme circumstances should you be filling this out and running manually. +set -o pipefail + # Vars MOCK_CFG="/var/tmp/lorax-{{ major }}.cfg" MOCK_ROOT="/var/lib/mock/{{ shortname|lower }}-{{ major }}-{{ arch }}" diff --git a/iso/empanadas/empanadas/templates/kube/Job.tmpl b/iso/empanadas/empanadas/templates/kube/Job.tmpl index bfcc20a..1ddf1f2 100644 --- a/iso/empanadas/empanadas/templates/kube/Job.tmpl +++ b/iso/empanadas/empanadas/templates/kube/Job.tmpl @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: build-iso-{{ major }}-{{ architecture }} + name: {{ jobname }}-{{ major }}-{{ architecture }} namespace: {{ namespace }} spec: template: @@ -11,15 +11,18 @@ spec: peridot.rockylinux.org/workflow-tolerates-arch: {{ architecture }} spec: containers: - - name: buildiso-{{ major }}-{{ architecture }} + - name: {{ jobname }}-{{ major }}-{{ architecture }} image: {{ imageName }} command: ["/bin/bash", "-c"] args: - | - {{ command | join(' ') }} - aws s3 cp --recursive --exclude=* --include=lorax* \ - /var/lib/mock/rocky-{{ major }}-$(uname -m)/root/builddir/ \ - "s3://resf-empanadas/buildiso-{{ major }}-{{ architecture }}/{{ buildTime }}/" +{%- for c in command -%} +{%- if c is string %} + {{ c }} +{%- else %} + {{ ' '.join(c) }} +{%- endif %} +{%- endfor %} securityContext: runAsUser: 0 runAsGroup: 0 diff --git a/iso/empanadas/empanadas/templates/liveisobuild.tmpl.sh b/iso/empanadas/empanadas/templates/liveisobuild.tmpl.sh new file mode 100644 index 0000000..f5e48ec --- /dev/null +++ b/iso/empanadas/empanadas/templates/liveisobuild.tmpl.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# This is a template that is used to build extra ISO's for Rocky Linux. Only +# under extreme circumstances should you be filling this out and running +# manually. + +set -o pipefail + +# Vars +MOCK_CFG="/var/tmp/live-{{ major }}.cfg" +MOCK_ROOT="/var/lib/mock/{{ shortname|lower }}-{{ major }}-{{ arch }}" +MOCK_RESL="${MOCK_ROOT}/result" +MOCK_CHRO="${MOCK_ROOT}/root" +MOCK_LOG="${MOCK_RESL}/mock-output.log" +IMAGE_SCR="{{ entries_dir }}/buildLiveImage-{{ arch }}-{{ image }}.sh" +IMAGE_ISO="{{ isoname }}" +ISOLATION="{{ isolation }}" +BUILDDIR="{{ builddir }}" + +#if [ -f "/usr/sbin/setenforce" ]; then +# sudo setenforce 0 +#fi + +# Init the container +mock \ + -r "${MOCK_CFG}" \ + --isolation="${ISOLATION}" \ + --enable-network \ + --init + +init_ret_val=$? +if [ $init_ret_val -ne 0 ]; then + echo "!! MOCK INIT FAILED !!" + exit 1 +fi + +mkdir -p "${MOCK_RESL}" +cp "${IMAGE_SCR}" "${MOCK_CHRO}${IMAGE_SCR}" + +mock \ + -r "${MOCK_CFG}" \ + --shell \ + --isolation="${ISOLATION}" \ + --enable-network -- /bin/bash "${IMAGE_SCR}" | tee -a "${MOCK_LOG}" + +mock_ret_val=$? +if [ $mock_ret_val -eq 0 ]; then + # Copy resulting data to /var/lib/mock/{{ shortname|lower }}-{{ major }}-{{ arch }}/result + mkdir -p "${MOCK_RESL}" + cp "${MOCK_CHRO}${BUILDDIR}/lmc/${IMAGE_ISO}" "${MOCK_RESL}" +else + echo "!! EXTRA ISO RUN FAILED !!" + exit 1 +fi + +# Clean up? +#if [ -f "/usr/sbin/setenforce" ]; then +# sudo setenforce 1 +#fi diff --git a/iso/empanadas/empanadas/templates/minimal/9/aarch64 b/iso/empanadas/empanadas/templates/minimal/9/aarch64 new file mode 100644 index 0000000..b5f2eae --- /dev/null +++ b/iso/empanadas/empanadas/templates/minimal/9/aarch64 @@ -0,0 +1,38 @@ +@minimal-environment +@core +@standard +@base +@guest-agents +kernel +lvm2 +bubblewrap +efibootmgr +efi-filesystem +efivar-libs +flashrom +fwupd +fwupd-plugin-flashrom +gdisk +glibc-langpack-en +grub2 +grub2-efi-aa64 +langpacks-core-en +langpacks-en +libatasmart +libblockdev +libblockdev-crypto +libblockdev-fs +libblockdev-loop +libblockdev-mdraid +libblockdev-part +libblockdev-swap +libblockdev-utils +libbytesize +libgcab1 +libjcat +libudisks2 +libxmlb +mokutil +shim-aa64 +udisks2 +volume_key-libs diff --git a/iso/empanadas/empanadas/templates/minimal/9/ppc64le b/iso/empanadas/empanadas/templates/minimal/9/ppc64le new file mode 100644 index 0000000..62fc6c9 --- /dev/null +++ b/iso/empanadas/empanadas/templates/minimal/9/ppc64le @@ -0,0 +1,33 @@ +@minimal-environment +@core +@standard +@base +@guest-agents +kernel +lvm2 +bubblewrap +efi-filesystem +flashrom +fwupd +fwupd-plugin-flashrom +gdisk +glibc-langpack-en +grub2 +langpacks-core-en +langpacks-en +libatasmart +libblockdev +libblockdev-crypto +libblockdev-fs +libblockdev-loop +libblockdev-mdraid +libblockdev-part +libblockdev-swap +libblockdev-utils +libbytesize +libgcab1 +libjcat +libudisks2 +libxmlb +udisks2 +volume_key-libs diff --git a/iso/empanadas/empanadas/templates/minimal/9/s390x b/iso/empanadas/empanadas/templates/minimal/9/s390x new file mode 100644 index 0000000..8317cf1 --- /dev/null +++ b/iso/empanadas/empanadas/templates/minimal/9/s390x @@ -0,0 +1,29 @@ +@minimal-environment +@core +@standard +@base +kernel +lvm2 +bubblewrap +efi-filesystem +fwupd +gdisk +glibc-langpack-en +langpacks-core-en +langpacks-en +libatasmart +libblockdev +libblockdev-crypto +libblockdev-fs +libblockdev-loop +libblockdev-mdraid +libblockdev-part +libblockdev-swap +libblockdev-utils +libbytesize +libgcab1 +libjcat +libudisks2 +libxmlb +udisks2 +volume_key-libs diff --git a/iso/empanadas/empanadas/templates/minimal/9/x86_64 b/iso/empanadas/empanadas/templates/minimal/9/x86_64 new file mode 100644 index 0000000..1d550a2 --- /dev/null +++ b/iso/empanadas/empanadas/templates/minimal/9/x86_64 @@ -0,0 +1,39 @@ +@minimal-environment +@core +@standard +@base +@guest-agents +kernel +lvm2 +bubblewrap +efibootmgr +efi-filesystem +efivar-libs +flashrom +fwupd +fwupd-plugin-flashrom +gdisk +glibc-langpack-en +grub2 +grub2-efi-x64 +langpacks-core-en +langpacks-en +libatasmart +libblockdev +libblockdev-crypto +libblockdev-fs +libblockdev-loop +libblockdev-mdraid +libblockdev-part +libblockdev-swap +libblockdev-utils +libbytesize +libgcab1 +libjcat +libsmbios +libudisks2 +libxmlb +mokutil +shim-x64 +udisks2 +volume_key-libs diff --git a/iso/empanadas/empanadas/templates/reposync-src.tmpl b/iso/empanadas/empanadas/templates/reposync-src.tmpl index 88836a3..3758643 100644 --- a/iso/empanadas/empanadas/templates/reposync-src.tmpl +++ b/iso/empanadas/empanadas/templates/reposync-src.tmpl @@ -2,6 +2,24 @@ set -o pipefail {{ import_gpg_cmd }} | tee -a {{ sync_log }} {{ dnf_plugin_cmd }} | tee -a {{ sync_log }} +sed -i 's/enabled=1/enabled=0/g' /etc/yum.repos.d/*.repo {{ sync_cmd }} | tee -a {{ sync_log }} +# Yes this is a bit hacky. Can't think of a better way to do this. +ret_val=$? +if [ "$ret_val" -ne 0 ]; then + echo "SYNCING FAILED" | tee -a {{ sync_log }} + exit 1 +fi + +if [ "$ret_val" -eq 0 ]; then + recs=$(grep '\[FAILED\]' {{ sync_log }}) + if [[ -n "${recs}" ]]; then + echo "SOME PACKAGES DID NOT DOWNLOAD" | tee -a {{ sync_log }} + exit 1 + else + exit 0 + fi +fi + # {{ check_cmd }} | tee -a {{ sync_log }} diff --git a/iso/empanadas/empanadas/templates/reposync.tmpl b/iso/empanadas/empanadas/templates/reposync.tmpl index 22f71b4..0709b6f 100644 --- a/iso/empanadas/empanadas/templates/reposync.tmpl +++ b/iso/empanadas/empanadas/templates/reposync.tmpl @@ -3,6 +3,31 @@ set -o pipefail {{ import_gpg_cmd }} | tee -a {{ sync_log }} {{ arch_force_cp }} | tee -a {{ sync_log }} {{ dnf_plugin_cmd }} | tee -a {{ sync_log }} +sed -i 's/enabled=1/enabled=0/g' /etc/yum.repos.d/*.repo {{ sync_cmd }} | tee -a {{ sync_log }} +# Yes this is a bit hacky. Can't think of a better way to do this. +ret_val=$? +if [ "$ret_val" -ne 0 ]; then + echo "SYNCING FAILED" | tee -a {{ sync_log }} + exit 1 +fi + +if [ "$ret_val" -eq 0 ]; then + recs=$(grep '\[FAILED\]' {{ sync_log }}) + if [[ -n "${recs}" ]]; then + echo "SOME PACKAGES DID NOT DOWNLOAD" | tee -a {{ sync_log }} + exit 1 + else + # This is kind of a hack too. + #FOUND=$(grep -A20 'git\.rockylinux\.org' {{ sync_log }} | egrep -c '^\([0-9]+\/[0-9]+\)|\[SKIPPED\]|\.rpm') + #if [ "$FOUND" -eq "0" ]; then + # echo "Repository is empty." | tee -a {{ sync_log }} + # rm -rf {{ download_path }} + #fi + exit 0 + fi +fi + + # {{ check_cmd }} | tee -a {{ sync_log }} diff --git a/iso/empanadas/empanadas/util/__init__.py b/iso/empanadas/empanadas/util/__init__.py index f107a54..828e595 100644 --- a/iso/empanadas/empanadas/util/__init__.py +++ b/iso/empanadas/empanadas/util/__init__.py @@ -8,6 +8,7 @@ from empanadas.util.check import ( from empanadas.util.shared import ( Shared, + ArchCheck, ) from empanadas.util.dnf_utils import ( diff --git a/iso/empanadas/empanadas/util/dnf_utils.py b/iso/empanadas/empanadas/util/dnf_utils.py index a54b4ee..820f6db 100644 --- a/iso/empanadas/empanadas/util/dnf_utils.py +++ b/iso/empanadas/empanadas/util/dnf_utils.py @@ -14,10 +14,12 @@ import shutil import time import re import json +import glob #import pipes from jinja2 import Environment, FileSystemLoader +import empanadas from empanadas.common import Color, _rootdir from empanadas.util import Shared @@ -74,10 +76,15 @@ class RepoSync: # Relevant config items self.major_version = major self.date_stamp = config['date_stamp'] + self.timestamp = time.time() self.repo_base_url = config['repo_base_url'] self.compose_root = config['compose_root'] self.compose_base = config['compose_root'] + "/" + major self.profile = rlvars['profile'] + self.iso_map = rlvars['iso_map'] + self.distname = config['distname'] + self.fullname = rlvars['fullname'] + self.shortname = config['shortname'] # Relevant major version items self.shortname = config['shortname'] @@ -91,6 +98,13 @@ class RepoSync: self.repo = repo self.extra_files = rlvars['extra_files'] self.gpgkey = gpgkey + self.checksum = rlvars['checksum'] + + self.compose_id = '{}-{}-{}'.format( + config['shortname'], + rlvars['revision'], + config['date_stamp'] + ) # Templates file_loader = FileSystemLoader(f"{_rootdir}/templates") @@ -114,7 +128,10 @@ class RepoSync: self.compose_latest_dir = os.path.join( config['compose_root'], major, - "latest-Rocky-{}".format(self.profile) + "latest-{}-{}".format( + self.shortname, + self.profile + ) ) self.compose_latest_sync = os.path.join( @@ -147,8 +164,12 @@ class RepoSync: self.log.info('reposync init') self.log.info(self.revision) - self.dnf_config = self.generate_conf() + # The repo name should be valid + if self.repo is not None: + if self.repo not in self.repos: + self.log.error(Color.FAIL + 'Invalid repository: ' + self.repo) + raise SystemExit() def run(self): """ @@ -159,7 +180,8 @@ class RepoSync: * Dry runs only create initial directories and structure * Full runs sync everything from the top and setup structure, - including creating a symlink to latest-Rocky-X + including creating a symlink to latest-Rocky-X and creating the + kickstart directories * self.repo is ignored during full runs (noted in stdout) * self.arch being set will force only that arch to sync """ @@ -172,7 +194,13 @@ class RepoSync: # This should create the initial compose dir and set the path. # Otherwise, just use the latest link. if self.fullrun: - generated_dir = self.generate_compose_dirs() + generated_dir = Shared.generate_compose_dirs( + self.compose_base, + self.shortname, + self.fullversion, + self.date_stamp, + self.log + ) work_root = os.path.join( generated_dir, 'work' @@ -206,31 +234,38 @@ class RepoSync: "global", ) + #self.dnf_config = self.generate_conf(dest_path=global_work_root) + self.dnf_config = self.generate_conf() + if self.dryrun: self.log.error('Dry Runs are not supported just yet. Sorry!') raise SystemExit() if self.fullrun and self.refresh_extra_files: - self.log.warn( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'A full run implies extra files are also deployed.' - ) + self.log.warn(Color.WARN + 'A full run implies extra files are also deployed.') self.sync(self.repo, sync_root, work_root, log_root, global_work_root, self.arch) if self.fullrun: - self.deploy_extra_files(global_work_root) + self.deploy_extra_files(sync_root, global_work_root) self.deploy_treeinfo(self.repo, sync_root, self.arch) + self.tweak_treeinfo(self.repo, sync_root, self.arch) self.symlink_to_latest(generated_dir) if self.repoclosure: self.repoclosure_work(sync_root, work_root, log_root) if self.refresh_extra_files and not self.fullrun: - self.deploy_extra_files(global_work_root) + self.deploy_extra_files(sync_root, global_work_root) + # deploy_treeinfo does NOT overwrite any treeinfo files. However, + # tweak_treeinfo calls out to a method that does. This should not + # cause issues as the method is fairly static in nature. if self.refresh_treeinfo and not self.fullrun: self.deploy_treeinfo(self.repo, sync_root, self.arch) + self.tweak_treeinfo(self.repo, sync_root, self.arch) + + self.deploy_metadata(sync_root) self.log.info('Compose repo directory: %s' % sync_root) self.log.info('Compose logs: %s' % log_root) @@ -276,7 +311,7 @@ class RepoSync: Each container runs their own script wait till all is finished """ - cmd = self.podman_cmd() + cmd = Shared.podman_cmd(self.log) contrunlist = [] bad_exit_list = [] self.log.info('Generating container entries') @@ -324,7 +359,7 @@ class RepoSync: entry_name_list.append(entry_name) - if not self.ignore_debug: + if not self.ignore_debug and not a == 'source': entry_name_list.append(debug_entry_name) entry_point_sh = os.path.join( @@ -403,7 +438,8 @@ class RepoSync: arch_force_cp=arch_force_cp, dnf_plugin_cmd=dnf_plugin_cmd, sync_cmd=sync_cmd, - sync_log=sync_log + sync_log=sync_log, + download_path=os_sync_path ) debug_sync_template = self.tmplenv.get_template('reposync.tmpl') @@ -412,7 +448,8 @@ class RepoSync: arch_force_cp=arch_force_cp, dnf_plugin_cmd=dnf_plugin_cmd, sync_cmd=debug_sync_cmd, - sync_log=debug_sync_log + sync_log=debug_sync_log, + download_path=debug_sync_path ) entry_point_open = open(entry_point_sh, "w+") @@ -427,8 +464,56 @@ class RepoSync: os.chmod(entry_point_sh, 0o755) os.chmod(debug_entry_point_sh, 0o755) + # During fullruns, a kickstart directory is made. Kickstart + # should not be updated nor touched during regular runs under + # any circumstances. + if self.fullrun: + ks_entry_name = '{}-ks-{}'.format(r, a) + entry_name_list.append(ks_entry_name) + ks_point_sh = os.path.join( + entries_dir, + ks_entry_name + ) + + ks_sync_path = os.path.join( + sync_root, + repo_name, + a, + 'kickstart' + ) + + ks_sync_cmd = ("/usr/bin/dnf reposync -c {}.{} --download-metadata " + "--repoid={} -p {} --forcearch {} --norepopath " + "--gpgcheck --assumeyes 2>&1").format( + self.dnf_config, + a, + r, + ks_sync_path, + a + ) + + ks_sync_log = ("{}/{}-{}-ks.log").format( + log_root, + repo_name, + a + ) + + ks_sync_template = self.tmplenv.get_template('reposync.tmpl') + ks_sync_output = ks_sync_template.render( + import_gpg_cmd=import_gpg_cmd, + arch_force_cp=arch_force_cp, + dnf_plugin_cmd=dnf_plugin_cmd, + sync_cmd=ks_sync_cmd, + sync_log=ks_sync_log + ) + ks_entry_point_open = open(ks_point_sh, "w+") + ks_entry_point_open.write(ks_sync_output) + ks_entry_point_open.close() + os.chmod(ks_point_sh, 0o755) + # We ignoring sources? - if not self.ignore_source: + if (not self.ignore_source and not arch) or ( + not self.ignore_source and arch == 'source'): source_entry_name = '{}-source'.format(r) entry_name_list.append(source_entry_name) @@ -474,7 +559,7 @@ class RepoSync: #print(entry_name_list) for pod in entry_name_list: - podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( + podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}:z" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( cmd, self.compose_root, self.compose_root, @@ -496,10 +581,7 @@ class RepoSync: join_all_pods = ' '.join(entry_name_list) time.sleep(3) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Syncing ' + r + ' ...' - ) + self.log.info(Color.INFO + 'Syncing ' + r + ' ...') pod_watcher = '{} wait {}'.format( cmd, join_all_pods @@ -529,9 +611,7 @@ class RepoSync: output, errors = podcheck.communicate() if 'Exited (0)' not in output.decode(): - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + pod - ) + self.log.error(Color.FAIL + pod) bad_exit_list.append(pod) rmcmd = '{} rm {}'.format( @@ -547,10 +627,7 @@ class RepoSync: ) entry_name_list.clear() - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Syncing ' + r + ' completed' - ) + self.log.info(Color.INFO + 'Syncing ' + r + ' completed') if len(bad_exit_list) > 0: self.log.error( @@ -565,20 +642,6 @@ class RepoSync: 'No issues detected.' ) - def generate_compose_dirs(self) -> str: - """ - Generate compose dirs for full runs - """ - compose_base_dir = os.path.join( - self.compose_base, - "Rocky-{}-{}".format(self.fullversion, self.date_stamp) - ) - self.log.info('Creating compose directory %s' % compose_base_dir) - if not os.path.exists(compose_base_dir): - os.makedirs(compose_base_dir) - - return compose_base_dir - def symlink_to_latest(self, generated_dir): """ Emulates pungi and symlinks latest-Rocky-X @@ -607,7 +670,11 @@ class RepoSync: """ fname = os.path.join( dest_path, - "{}-config.repo".format(self.major_version) + "{}-{}-config.repo".format(self.shortname, self.major_version) + ) + pname = os.path.join( + '/var/tmp', + "{}-{}-config.repo".format(self.shortname, self.major_version) ) self.log.info('Generating the repo configuration: %s' % fname) @@ -625,7 +692,6 @@ class RepoSync: config_file = open(fname, "w+") repolist = [] for repo in self.repos: - constructed_url = '{}/{}/repo/{}{}/$basearch'.format( self.repo_base_url, self.project_id, @@ -653,62 +719,9 @@ class RepoSync: config_file.write(output) config_file.close() + #return (fname, pname) return fname - def reposync_cmd(self) -> str: - """ - This generates the reposync command. We don't support reposync by - itself and will raise an error. - - :return: The path to the reposync command. If dnf exists, we'll use - that. Otherwise, fail immediately. - """ - cmd = None - if os.path.exists("/usr/bin/dnf"): - cmd = "/usr/bin/dnf reposync" - else: - self.log.error('/usr/bin/dnf was not found. Good bye.') - raise SystemExit("/usr/bin/dnf was not found. \n\n/usr/bin/reposync " - "is not sufficient and you are likely running on an el7 " - "system or a grossly modified EL8+ system, " + Color.BOLD + - "which tells us that you probably made changes to these tools " - "expecting them to work and got to this point." + Color.END) - return cmd - - def podman_cmd(self) -> str: - """ - This generates the podman run command. This is in the case that we want - to do reposyncs in parallel as we cannot reasonably run multiple - instances of dnf reposync on a single system. - """ - cmd = None - if os.path.exists("/usr/bin/podman"): - cmd = "/usr/bin/podman" - else: - self.log.error('/usr/bin/podman was not found. Good bye.') - raise SystemExit("\n\n/usr/bin/podman was not found.\n\nPlease " - " ensure that you have installed the necessary packages on " - " this system. " + Color.BOLD + "Note that docker is not " - "supported." + Color.END - ) - return cmd - - def git_cmd(self) -> str: - """ - This generates the git command. This is when we need to pull down extra - files or do work from a git repository. - """ - cmd = None - if os.path.exists("/usr/bin/git"): - cmd = "/usr/bin/git" - else: - self.log.error('/usr/bin/git was not found. Good bye.') - raise SystemExit("\n\n/usr/bin/git was not found.\n\nPlease " - " ensure that you have installed the necessary packages on " - " this system. " - ) - return cmd - def repoclosure_work(self, sync_root, work_root, log_root): """ This is where we run repoclosures, based on the configuration of each @@ -719,7 +732,7 @@ class RepoSync: against itself. (This means BaseOS should be able to survive by itself.) """ - cmd = self.podman_cmd() + cmd = Shared.podman_cmd(self.log) entries_dir = os.path.join(work_root, "entries") bad_exit_list = [] @@ -791,7 +804,7 @@ class RepoSync: self.log.info('Spawning pods for %s' % repo) for pod in repoclosure_entry_name_list: - podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( + podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}:z" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( cmd, self.compose_root, self.compose_root, @@ -839,9 +852,7 @@ class RepoSync: output, errors = podcheck.communicate() if 'Exited (0)' not in output.decode(): - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + pod - ) + self.log.error(Color.FAIL + pod) bad_exit_list.append(pod) rmcmd = '{} rm {}'.format( @@ -867,7 +878,7 @@ class RepoSync: for issue in bad_exit_list: self.log.error(issue) - def deploy_extra_files(self, global_work_root): + def deploy_extra_files(self, sync_root, global_work_root): """ deploys extra files based on info of rlvars including a extra_files.json @@ -875,20 +886,24 @@ class RepoSync: might also deploy COMPOSE_ID and maybe in the future a metadata dir with a bunch of compose-esque stuff. """ - cmd = self.git_cmd() + self.log.info(Color.INFO + 'Deploying treeinfo, discinfo, and media.repo') + + cmd = Shared.git_cmd(self.log) tmpclone = '/tmp/clone' extra_files_dir = os.path.join( global_work_root, 'extra-files' ) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Deploying extra files to work directory ...' + metadata_dir = os.path.join( + sync_root, + "metadata" ) - if not os.path.exists(extra_files_dir): os.makedirs(extra_files_dir, exist_ok=True) + if not os.path.exists(metadata_dir): + os.makedirs(metadata_dir, exist_ok=True) + clonecmd = '{} clone {} -b {} -q {}'.format( cmd, self.extra_files['git_repo'], @@ -902,6 +917,8 @@ class RepoSync: stderr=subprocess.DEVNULL ) + self.log.info(Color.INFO + 'Deploying extra files to work and metadata directories ...') + # Copy files to work root for extra in self.extra_files['list']: src = '/tmp/clone/' + extra @@ -910,33 +927,83 @@ class RepoSync: # exist on our mirrors. try: shutil.copy2(src, extra_files_dir) + shutil.copy2(src, metadata_dir) except: - self.log.warn( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'Extra file not copied: ' + src - ) + self.log.warn(Color.WARN + 'Extra file not copied: ' + src) try: shutil.rmtree(tmpclone) except OSError as e: - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'Directory ' + tmpclone + ' could not be removed: ' + - e.strerror + self.log.error(Color.FAIL + 'Directory ' + tmpclone + + ' could not be removed: ' + e.strerror ) - # Create metadata here? - - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Extra files phase completed.' + def deploy_metadata(self, sync_root): + """ + Deploys metadata that defines information about the compose. Some data + will be close to how pungi produces it, but it won't be exact nor a + perfect replica. + """ + self.log.info(Color.INFO + 'Deploying metadata for this compose') + # Create metadata here + # Create COMPOSE_ID here (this doesn't necessarily match anything, it's + # just an indicator) + metadata_dir = os.path.join( + sync_root, + "metadata" ) + # It should already exist from a full run or refresh. This is just in + # case and it doesn't hurt. + if not os.path.exists(metadata_dir): + os.makedirs(metadata_dir, exist_ok=True) + + with open(metadata_dir + '/COMPOSE_ID', "w+") as f: + f.write(self.compose_id) + f.close() + + Shared.write_metadata( + self.timestamp, + self.date_stamp, + self.distname, + self.fullversion, + self.compose_id, + metadata_dir + '/metadata' + ) + + # TODO: Add in each repo and their corresponding arch. + productmd_date = self.date_stamp.split('.')[0] + Shared.composeinfo_write( + metadata_dir + '/composeinfo', + self.distname, + self.shortname, + self.fullversion, + 'updates', + productmd_date + ) + + self.log.info(Color.INFO + 'Metadata files phase completed.') + + # Deploy README to metadata directory + readme_template = self.tmplenv.get_template('README.tmpl') + readme_output = readme_template.render( + fullname=self.fullname, + version=empanadas.__version__ + ) + + with open(metadata_dir + '/README', 'w+', encoding='utf-8') as readme_file: + readme_file.write(readme_output) + readme_file.close() + + def deploy_treeinfo(self, repo, sync_root, arch): """ Deploys initial treeinfo files. These have the potential of being - overwritten by our ISO process, which is fine. + overwritten by our ISO process, which is fine. If there is a treeinfo + found, it will be skipped. """ + self.log.info(Color.INFO + 'Deploying treeinfo, discinfo, and media.repo') + arches_to_tree = self.arches if arch: arches_to_tree = [arch] @@ -945,6 +1012,547 @@ class RepoSync: if repo and not self.fullrun: repos_to_tree = [repo] + # If a treeinfo or discinfo file exists, it should be skipped. + for r in repos_to_tree: + entry_name_list = [] + repo_name = r + arch_tree = arches_to_tree.copy() + + if r in self.repo_renames: + repo_name = self.repo_renames[r] + + # I feel it's necessary to make sure even i686 has .treeinfo and + # .discinfo, just for consistency. + if 'all' in r and 'x86_64' in arches_to_tree and self.multilib: + arch_tree.append('i686') + + for a in arch_tree: + if a == 'source': + continue + + os_tree_path = os.path.join( + sync_root, + repo_name, + a, + 'os/.treeinfo' + ) + + os_disc_path = os.path.join( + sync_root, + repo_name, + a, + 'os/.discinfo' + ) + + os_media_path = os.path.join( + sync_root, + repo_name, + a, + 'os/media.repo' + ) + + ks_tree_path = os.path.join( + sync_root, + repo_name, + a, + 'kickstart/.treeinfo' + ) + + ks_disc_path = os.path.join( + sync_root, + repo_name, + a, + 'kickstart/.discinfo' + ) + + ks_media_path = os.path.join( + sync_root, + repo_name, + a, + 'kickstart/media.repo' + ) + + + if not os.path.exists(os_tree_path): + try: + Shared.treeinfo_new_write( + os_tree_path, + self.distname, + self.shortname, + self.fullversion, + a, + int(self.timestamp), + repo_name + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + + a + ' os .treeinfo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' ' + a + ' os .treeinfo already exists') + + if not os.path.exists(os_disc_path): + try: + Shared.discinfo_write( + self.timestamp, + self.fullname, + a, + os_disc_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + + a + ' os .discinfo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' ' + a + + ' os .discinfo already exists' + ) + + if not os.path.exists(os_media_path): + try: + Shared.media_repo_write( + self.timestamp, + self.fullname, + os_media_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + a + + ' os media.repo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' ' + a + + ' os media.repo already exists' + ) + + # Kickstart part of the repos + if not os.path.exists(ks_tree_path): + try: + Shared.treeinfo_new_write( + ks_tree_path, + self.distname, + self.shortname, + self.fullversion, + a, + int(self.timestamp), + repo_name + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + a + + ' kickstart .treeinfo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' ' + a + + ' kickstart .treeinfo already exists' + ) + + if not os.path.exists(ks_disc_path): + try: + Shared.discinfo_write( + self.timestamp, + self.fullname, + a, + ks_disc_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + a + + ' kickstart .discinfo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.FAIL + repo_name + ' ' + a + + ' kickstart .discinfo already exists' + ) + + if not os.path.exists(ks_media_path): + try: + Shared.media_repo_write( + self.timestamp, + self.fullname, + ks_media_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + a + + ' kickstart media.repo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' ' + a + + ' kickstart media.repo already exists' + ) + + if not self.ignore_debug and not a == 'source': + debug_tree_path = os.path.join( + sync_root, + repo_name, + a, + 'debug/tree/.treeinfo' + ) + + debug_disc_path = os.path.join( + sync_root, + repo_name, + a, + 'debug/tree/.discinfo' + ) + + debug_media_path = os.path.join( + sync_root, + repo_name, + a, + 'debug/tree/media.repo' + ) + + if not os.path.exists(debug_tree_path): + try: + Shared.treeinfo_new_write( + debug_tree_path, + self.distname, + self.shortname, + self.fullversion, + a, + self.timestamp, + repo_name + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + a + + ' debug .treeinfo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + r + ' ' + a + + ' debug .treeinfo already exists' + ) + + if not os.path.exists(debug_disc_path): + try: + Shared.discinfo_write( + self.timestamp, + self.fullname, + a, + debug_disc_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + a + + ' debug .discinfo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + r + ' ' + a + + ' debug .discinfo already exists' + ) + + if not os.path.exists(debug_media_path): + try: + Shared.media_repo_write( + self.timestamp, + self.fullname, + debug_media_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' ' + a + + ' debug media.repo could not be written' + ) + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' ' + a + + ' debug media.repo already exists' + ) + + + if not self.ignore_source and not arch: + source_tree_path = os.path.join( + sync_root, + repo_name, + 'source/tree/.treeinfo' + ) + + source_disc_path = os.path.join( + sync_root, + repo_name, + 'source/tree/.discinfo' + ) + + source_media_path = os.path.join( + sync_root, + repo_name, + 'source/tree/media.repo' + ) + + if not os.path.exists(source_tree_path): + try: + Shared.treeinfo_new_write( + source_tree_path, + self.distname, + self.shortname, + self.fullversion, + 'src', + self.timestamp, + repo_name + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' source os .treeinfo could not be written') + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' source os .treeinfo already exists') + + if not os.path.exists(source_disc_path): + try: + Shared.discinfo_write( + self.timestamp, + self.fullname, + 'src', + source_disc_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' source os .discinfo could not be written') + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' source .discinfo already exists') + + if not os.path.exists(source_media_path): + try: + Shared.media_repo_write( + self.timestamp, + self.fullname, + source_media_path + ) + except Exception as e: + self.log.error(Color.FAIL + repo_name + ' source os media.repo could not be written') + self.log.error(e) + else: + self.log.warn(Color.WARN + repo_name + ' source media.repo already exists') + + def tweak_treeinfo(self, repo, sync_root, arch): + """ + This modifies treeinfo for the primary repository. If the repository is + listed in the iso_map as a non-disc, it will be considered for modification. + """ + variants_to_tweak = [] + + arches_to_tree = self.arches + if arch: + arches_to_tree = [arch] + + repos_to_tree = self.repos + if repo and not self.fullrun: + repos_to_tree = [repo] + + for r in repos_to_tree: + entry_name_list = [] + repo_name = r + arch_tree = arches_to_tree.copy() + + if r in self.iso_map['images']: + variants_to_tweak.append(r) + + if not len(variants_to_tweak) > 0: + self.log.info(Color.INFO + 'No treeinfo to tweak.') + return + + for a in arches_to_tree: + for v in variants_to_tweak: + self.log.info(Color.INFO + 'Tweaking treeinfo for ' + a + ' ' + v) + image = os.path.join(sync_root, v, a, 'os') + imagemap = self.iso_map['images'][v] + data = { + 'arch': a, + 'variant': v, + 'variant_path': image, + 'checksum': self.checksum, + 'distname': self.distname, + 'fullname': self.fullname, + 'shortname': self.shortname, + 'release': self.fullversion, + 'timestamp': self.timestamp, + } + + try: + Shared.treeinfo_modify_write(data, imagemap, self.log) + except Exception as e: + self.log.error(Color.FAIL + 'There was an error writing os treeinfo.') + self.log.error(e) + + if self.fullrun: + ksimage = os.path.join(sync_root, v, a, 'kickstart') + ksdata = { + 'arch': a, + 'variant': v, + 'variant_path': ksimage, + 'checksum': self.checksum, + 'distname': self.distname, + 'fullname': self.fullname, + 'shortname': self.shortname, + 'release': self.fullversion, + 'timestamp': self.timestamp, + } + + try: + Shared.treeinfo_modify_write(ksdata, imagemap, self.log) + except Exception as e: + self.log.error(Color.FAIL + 'There was an error writing kickstart treeinfo.') + self.log.error(e) + + def run_compose_closeout(self): + """ + Closes out a compose. This ensures the ISO's are synced from work/isos + to compose/isos, checks for live media and syncs as well from work/live + to compose/live, deploys final metadata. + """ + # latest-X-Y should exist at all times for this to work. + work_root = os.path.join( + self.compose_latest_dir, + 'work' + ) + sync_root = self.compose_latest_sync + + sync_iso_root = os.path.join( + sync_root, + 'isos' + ) + + tmp_dir = os.path.join( + self.compose_root, + 'partitions' + ) + + # Verify if the link even exists + if not os.path.exists(self.compose_latest_dir): + self.log.error( + '!! Latest compose link is broken does not exist: %s' % self.compose_latest_dir + ) + self.log.error( + '!! Please perform a full run if you have not done so.' + ) + raise SystemExit() + + log_root = os.path.join( + work_root, + "logs", + self.date_stamp + ) + + iso_root = os.path.join( + work_root, + "isos" + ) + + live_root = os.path.join( + work_root, + "live" + ) + + sync_live_root = os.path.join( + sync_root, + 'live' + ) + + images_root = os.path.join( + work_root, + 'images' + ) + + sync_images_root = os.path.join( + sync_root, + 'images' + ) + + global_work_root = os.path.join( + work_root, + "global", + ) + + # Standard ISOs + self.log.info(Color.INFO + 'Starting to sync ISOs to compose') + + if os.path.exists('/usr/bin/fpsync'): + self.log.info(Color.INFO + 'Starting up fpsync') + message, ret = Shared.fpsync_method(iso_root, sync_iso_root, tmp_dir) + elif os.path.exists('/usr/bin/parallel') and os.path.exists('/usr/bin/rsync'): + self.log.info(Color.INFO + 'Starting up parallel | rsync') + message, ret = Shared.rsync_method(iso_root, sync_iso_root) + else: + self.log.error( + Color.FAIL + + 'fpsync nor parallel + rsync were found on this system. ' + + 'There is also no built-in parallel rsync method at this ' + + 'time.' + ) + raise SystemExit() + + if ret != 0: + self.log.error(Color.FAIL + message) + else: + self.log.info(Color.INFO + message) + + # Live images + if os.path.exists(live_root): + self.log.info(Color.INFO + 'Starting to sync live images to compose') + + if os.path.exists('/usr/bin/fpsync'): + message, ret = Shared.fpsync_method(live_root, sync_live_root, tmp_dir) + elif os.path.exists('/usr/bin/parallel') and os.path.exists('/usr/bin/rsync'): + message, ret = Shared.rsync_method(live_root, sync_live_root) + + if ret != 0: + self.log.error(Color.FAIL + message) + else: + self.log.info(Color.INFO + message) + + # Cloud images + if os.path.exists(images_root): + self.log.info(Color.INFO + 'Starting to sync cloud images to compose') + + if os.path.exists('/usr/bin/fpsync'): + message, ret = Shared.fpsync_method(images_root, sync_images_root, tmp_dir) + elif os.path.exists('/usr/bin/parallel') and os.path.exists('/usr/bin/rsync'): + message, ret = Shared.rsync_method(images_root, sync_images_root) + + if ret != 0: + self.log.error(Color.FAIL + message) + else: + self.log.info(Color.INFO + message) + + # Combine all checksums here + for arch in self.arches: + iso_arch_root = os.path.join(sync_iso_root, arch) + iso_arch_checksum = os.path.join(iso_arch_root, 'CHECKSUM') + if os.path.exists(iso_arch_root): + with open(iso_arch_checksum, 'w+', encoding='utf-8') as fp: + for check in glob.iglob(iso_arch_root + '/*.CHECKSUM'): + with open(check, 'r', encoding='utf-8') as sum: + for line in sum: + fp.write(line) + sum.close() + fp.close() + + live_arch_root = os.path.join(sync_live_root, arch) + live_arch_checksum = os.path.join(live_arch_root, 'CHECKSUM') + if os.path.exists(live_arch_root): + with open(live_arch_checksum, 'w+', encoding='utf-8') as lp: + for lcheck in glob.iglob(live_arch_root + '/*.CHECKSUM'): + with open(lcheck, 'r', encoding='utf-8') as sum: + for line in sum: + lp.write(line) + sum.close() + lp.close() + + images_arch_root = os.path.join(sync_images_root, arch) + images_arch_checksum = os.path.join(sync_images_root, 'CHECKSUM') + if os.path.exists(images_arch_root): + with open(images_arch_checksum, 'w+', encoding='utf-8') as ip: + for icheck in glob.iglob(images_arch_root + '/*.CHECKSUM'): + with open(icheck, 'r', encoding='utf-8') as sum: + for line in sum: + ip.write(line) + sum.close() + ip.close() + + # Deploy final metadata for a close out + self.deploy_metadata(sync_root) class SigRepoSync: """ @@ -959,6 +1567,7 @@ class SigRepoSync: major, repo=None, arch=None, + ignore_debug: bool = False, ignore_source: bool = False, repoclosure: bool = False, refresh_extra_files: bool = False, @@ -974,6 +1583,7 @@ class SigRepoSync: self.dryrun = dryrun self.fullrun = fullrun self.arch = arch + self.ignore_debug = ignore_debug self.ignore_source = ignore_source self.skip_all = skip_all self.hashed = hashed @@ -984,9 +1594,16 @@ class SigRepoSync: # Relevant config items self.major_version = major self.date_stamp = config['date_stamp'] + self.timestamp = time.time() self.repo_base_url = config['repo_base_url'] self.compose_root = config['compose_root'] self.compose_base = config['compose_root'] + "/" + major + self.profile = rlvars['profile'] + self.sigprofile = sigvars['profile'] + self.iso_map = rlvars['iso_map'] + self.distname = config['distname'] + self.fullname = rlvars['fullname'] + self.shortname = config['shortname'] # Relevant major version items self.sigvars = sigvars @@ -995,6 +1612,10 @@ class SigRepoSync: #self.project_id = sigvars['project_id'] self.sigrepo = repo + # Templates + file_loader = FileSystemLoader(f"{_rootdir}/templates") + self.tmplenv = Environment(loader=file_loader) + # each el can have its own designated container to run stuff in, # otherwise we'll just default to the default config. self.container = config['container'] @@ -1013,7 +1634,11 @@ class SigRepoSync: self.compose_latest_dir = os.path.join( config['compose_root'], major, - "latest-Rocky-{}-SIG".format(major) + "latest-{}-{}-SIG-{}".format( + self.shortname, + major, + self.sigprofile + ) ) self.compose_latest_sync = os.path.join( @@ -1046,7 +1671,7 @@ class SigRepoSync: self.log.info('sig reposync init') self.log.info(major) - #self.dnf_config = self.generate_conf() + #self.dnf_config = Shared.generate_conf() def run(self): """ diff --git a/iso/empanadas/empanadas/util/imagebuild.py b/iso/empanadas/empanadas/util/imagebuild.py new file mode 100644 index 0000000..e69de29 diff --git a/iso/empanadas/empanadas/util/iso_utils.py b/iso/empanadas/empanadas/util/iso_utils.py index 5921df9..a098791 100644 --- a/iso/empanadas/empanadas/util/iso_utils.py +++ b/iso/empanadas/empanadas/util/iso_utils.py @@ -15,11 +15,11 @@ import tarfile import shutil # lazy person's s3 parser -import requests -import json -import xmltodict +#import requests +#import json +#import xmltodict # if we can access s3 -import boto3 +#import boto3 # relative_path, compute_file_checksums import kobo.shortcuts from fnmatch import fnmatch @@ -35,16 +35,15 @@ import productmd.treeinfo from jinja2 import Environment, FileSystemLoader from empanadas.common import Color, _rootdir -from empanadas.util import Shared +from empanadas.util import Shared, ArchCheck class IsoBuild: """ This helps us build the generic ISO's for a Rocky Linux release. In particular, this is for the boot images. - While there is a function for building the DVD and live images, this not - the main design of this class. The other functions can be called on their - own to facilitate those particular builds. + There are functions to build the DVD (and potentially other) images. Each + particular build or process starts with "run" in their name. """ def __init__( self, @@ -61,6 +60,7 @@ class IsoBuild: extra_iso=None, extra_iso_mode: str = 'local', compose_dir_is_here: bool = False, + hashed: bool = False, image=None, logger=None ): @@ -76,13 +76,14 @@ class IsoBuild: self.timestamp = time.time() self.compose_root = config['compose_root'] self.compose_base = config['compose_root'] + "/" + major - self.iso_drop = config['compose_root'] + "/" + major + "/isos" self.current_arch = config['arch'] self.required_pkgs = rlvars['iso_map']['lorax']['required_pkgs'] self.mock_work_root = config['mock_work_root'] self.lorax_result_root = config['mock_work_root'] + "/" + "lorax" self.mock_isolation = isolation self.iso_map = rlvars['iso_map'] + #self.livemap = rlvars['livemap'] + self.cloudimages = rlvars['cloudimages'] self.release_candidate = rc self.s3 = s3 self.force_unpack = force_unpack @@ -91,6 +92,7 @@ class IsoBuild: self.extra_iso_mode = extra_iso_mode self.checksum = rlvars['checksum'] self.profile = rlvars['profile'] + self.hashed = hashed # Relevant major version items self.arch = arch @@ -122,8 +124,8 @@ class IsoBuild: self.s3_bucket = config['bucket'] self.s3_bucket_url = config['bucket_url'] - if s3: - self.s3 = boto3.client('s3') + #if s3: + # self.s3 = boto3.client('s3') # arch specific self.hfs_compat = hfs_compat @@ -135,7 +137,10 @@ class IsoBuild: self.compose_latest_dir = os.path.join( config['compose_root'], major, - "latest-Rocky-{}".format(self.profile) + "latest-{}-{}".format( + self.shortname, + self.profile + ) ) self.compose_latest_sync = os.path.join( @@ -153,6 +158,16 @@ class IsoBuild: "work/isos" ) + self.live_work_dir = os.path.join( + self.compose_latest_dir, + "work/live" + ) + + self.image_work_dir = os.path.join( + self.compose_latest_dir, + "work/images" + ) + self.lorax_work_dir = os.path.join( self.compose_latest_dir, "work/lorax" @@ -172,7 +187,15 @@ class IsoBuild: self.log.addHandler(handler) self.log.info('iso build init') - self.repolist = self.build_repo_list() + self.repolist = Shared.build_repo_list( + self.repo_base_url, + self.repos, + self.project_id, + self.current_arch, + self.compose_latest_sync, + self.compose_dir_is_here, + self.hashed + ) self.log.info(self.revision) def run(self): @@ -195,36 +218,6 @@ class IsoBuild: ) self.log.info('ISO Build completed.') - def build_repo_list(self): - """ - Builds the repo dictionary - """ - repolist = [] - for name in self.repos: - if not self.compose_dir_is_here: - constructed_url = '{}/{}/repo/hashed-{}/{}'.format( - self.repo_base_url, - self.project_id, - name, - self.current_arch - ) - else: - constructed_url = 'file://{}/{}/{}/os'.format( - self.compose_latest_sync, - name, - self.current_arch - ) - - - repodata = { - 'name': name, - 'url': constructed_url - } - - repolist.append(repodata) - - return repolist - def iso_build(self): """ This does the general ISO building for the current running @@ -344,19 +337,27 @@ class IsoBuild: unpack_single_arch = True arches_to_unpack = [self.arch] - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Determining the latest pulls...' - ) + self.log.info(Color.INFO + 'Determining the latest pulls...') if self.s3: - latest_artifacts = self._s3_determine_latest() + latest_artifacts = Shared.s3_determine_latest( + self.s3_bucket, + self.release, + self.arches, + 'tar.gz', + 'lorax', + self.log + ) else: - latest_artifacts = self._reqs_determine_latest() + latest_artifacts = Shared.reqs_determine_latest( + self.s3_bucket_url, + self.release, + self.arches, + 'tar.gz', + 'lorax', + self.log + ) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Downloading requested artifact(s)' - ) + self.log.info(Color.INFO + 'Downloading requested artifact(s)') for arch in arches_to_unpack: lorax_arch_dir = os.path.join( self.lorax_work_dir, @@ -378,25 +379,23 @@ class IsoBuild: 'Downloading artifact for ' + Color.BOLD + arch + Color.END ) if self.s3: - self._s3_download_artifacts( + Shared.s3_download_artifacts( self.force_download, + self.s3_bucket, source_path, - full_drop + full_drop, + self.log ) else: - self._reqs_download_artifacts( + Shared.reqs_download_artifacts( self.force_download, + self.s3_bucket_url, source_path, - full_drop + full_drop, + self.log ) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Download phase completed' - ) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Beginning unpack phase...' - ) + self.log.info(Color.INFO + 'Download phase completed') + self.log.info(Color.INFO + 'Beginning unpack phase...') for arch in arches_to_unpack: tarname = 'lorax-{}-{}.tar.gz'.format( @@ -411,22 +410,13 @@ class IsoBuild: ) if not os.path.exists(tarball): - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'Artifact does not exist: ' + tarball - ) + self.log.error(Color.FAIL + 'Artifact does not exist: ' + tarball) continue self._unpack_artifacts(self.force_unpack, arch, tarball) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Unpack phase completed' - ) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Beginning image variant phase' - ) + self.log.info(Color.INFO + 'Unpack phase completed') + self.log.info(Color.INFO + 'Beginning image variant phase') for arch in arches_to_unpack: self.log.info( @@ -437,20 +427,14 @@ class IsoBuild: self._copy_boot_to_work(self.force_unpack, arch) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Image variant phase completed' - ) + self.log.info(Color.INFO + 'Image variant phase completed') - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Beginning treeinfo phase' - ) + self.log.info(Color.INFO + 'Beginning treeinfo phase') for arch in arches_to_unpack: for variant in self.iso_map['images']: self.log.info( - 'Configuring treeinfo for %s%s %s%s' % (Color.BOLD, arch, variant, Color.END) + 'Configuring treeinfo and discinfo for %s%s %s%s' % (Color.BOLD, arch, variant, Color.END) ) self._treeinfo_wrapper(arch, variant) @@ -461,111 +445,6 @@ class IsoBuild: ) self._copy_nondisc_to_repo(self.force_unpack, arch, variant) - - def _s3_determine_latest(self): - """ - Using native s3, determine the latest artifacts and return a dict - """ - temp = [] - data = {} - try: - self.s3.list_objects(Bucket=self.s3_bucket)['Contents'] - except: - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'Cannot access s3 bucket.' - ) - raise SystemExit() - - for y in self.s3.list_objects(Bucket=self.s3_bucket)['Contents']: - if 'tar.gz' in y['Key'] and self.release in y['Key']: - temp.append(y['Key']) - - for arch in self.arches: - temps = [] - for y in temp: - if arch in y: - temps.append(y) - temps.sort(reverse=True) - data[arch] = temps[0] - - return data - - def _s3_download_artifacts(self, force_download, source, dest): - """ - Download the requested artifact(s) via s3 - """ - if os.path.exists(dest): - if not force_download: - self.log.warn( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'Artifact at ' + dest + ' already exists' - ) - return - - self.log.info('Downloading ({}) to: {}'.format(source, dest)) - try: - self.s3.download_file( - Bucket=self.s3_bucket, - Key=source, - Filename=dest - ) - except: - self.log.error('There was an issue downloading from %s' % self.s3_bucket) - - def _reqs_determine_latest(self): - """ - Using requests, determine the latest artifacts and return a list - """ - temp = [] - data = {} - - try: - bucket_data = requests.get(self.s3_bucket_url) - except requests.exceptions.RequestException as e: - self.log.error('The s3 bucket http endpoint is inaccessible') - raise SystemExit(e) - - resp = xmltodict.parse(bucket_data.content) - - for y in resp['ListBucketResult']['Contents']: - if 'tar.gz' in y['Key'] and self.release in y['Key']: - temp.append(y['Key']) - - for arch in self.arches: - temps = [] - for y in temp: - if arch in y: - temps.append(y) - temps.sort(reverse=True) - data[arch] = temps[0] - - return data - - def _reqs_download_artifacts(self, force_download, source, dest): - """ - Download the requested artifact(s) via requests only - """ - if os.path.exists(dest): - if not force_download: - self.log.warn( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'Artifact at ' + dest + ' already exists' - ) - return - unurl = self.s3_bucket_url + '/' + source - - self.log.info('Downloading ({}) to: {}'.format(source, dest)) - try: - with requests.get(unurl, allow_redirects=True) as r: - with open(dest, 'wb') as f: - f.write(r.content) - f.close() - r.close() - except requests.exceptions.RequestException as e: - self.log.error('There was a problem downloading the artifact') - raise SystemExit(e) - def _unpack_artifacts(self, force_unpack, arch, tarball): """ Unpack the requested artifacts(s) @@ -574,10 +453,7 @@ class IsoBuild: if not force_unpack: file_check = os.path.join(unpack_dir, 'lorax/.treeinfo') if os.path.exists(file_check): - self.log.warn( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'Artifact (' + arch + ') already unpacked' - ) + self.log.warn(Color.WARN + 'Artifact (' + arch + ') already unpacked') return self.log.info('Unpacking %s' % tarball) @@ -601,10 +477,7 @@ class IsoBuild: ) if not os.path.exists(os.path.join(src_to_image, '.treeinfo')): - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'Lorax base image does not exist' - ) + self.log.error(Color.FAIL + 'Lorax base image does not exist') return path_to_image = os.path.join( @@ -616,10 +489,7 @@ class IsoBuild: if not force_unpack: file_check = os.path.join(path_to_image, '.treeinfo') if os.path.exists(file_check): - self.log.warn( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'Lorax image for ' + image + ' already exists' - ) + self.log.warn(Color.WARN + 'Lorax image for ' + image + ' already exists') return self.log.info('Copying base lorax to %s directory...' % image) @@ -681,10 +551,7 @@ class IsoBuild: if not force_unpack: file_check = isobootpath if os.path.exists(file_check): - self.log.warn( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'Boot image (' + discname + ') already exists' - ) + self.log.warn(Color.WARN + 'Boot image (' + discname + ') already exists') return self.log.info('Copying %s boot iso to work directory...' % arch) @@ -696,10 +563,7 @@ class IsoBuild: self.log.info('Creating checksum for %s boot iso...' % arch) checksum = Shared.get_checksum(isobootpath, self.checksum, self.log) if not checksum: - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - isobootpath + ' not found! Are you sure we copied it?' - ) + self.log.error(Color.FAIL + isobootpath + ' not found! Are you sure we copied it?') return with open(isobootpath + '.CHECKSUM', "w+") as c: c.write(checksum) @@ -710,6 +574,49 @@ class IsoBuild: Syncs data from a non-disc set of images to the appropriate repo. Repo and image MUST match names for this to work. """ + pathway = os.path.join( + self.compose_latest_sync, + repo, + arch, + 'os' + ) + + src_to_image = os.path.join( + self.lorax_work_dir, + arch, + repo + ) + + if not os.path.exists(pathway): + self.log.error(Color.FAIL + + 'Repo and Image variant either does NOT match or does ' + + 'NOT exist. Are you sure you have synced the repository?' + ) + + if not force_unpack: + found_files = [] + for y in ArchCheck.archfile[arch]: + imgpath = os.path.join( + pathway, + y + ) + if os.path.exists(imgpath): + found_files.append(y) + + if os.path.exists(pathway + '/images/boot.iso'): + found_files.append('/images/boot.iso') + + if len(found_files) > 0: + self.log.warn(Color.WARN + 'Images and data for ' + repo + ' and ' + arch + ' already exists.') + return + + self.log.info(Color.INFO + 'Copying images and data for ' + repo + ' ' + arch) + + try: + shutil.copytree(src_to_image, pathway, copy_function=shutil.copy2, dirs_exist_ok=True) + except: + self.log.error('%s already exists??' % repo) + def run_boot_sync(self): """ @@ -741,92 +648,30 @@ class IsoBuild: def _treeinfo_wrapper(self, arch, variant): """ - Ensure treeinfo is written correctly based on the variant passed. Each - .treeinfo file should be configured similarly but also differently from - the next. + Ensure treeinfo and discinfo is written correctly based on the variant + passed. Each file should be configured similarly but also differently + from the next. The Shared module does have a .treeinfo writer, but it + is for basic use. Eventually it'll be expanded to handle this scenario. """ image = os.path.join(self.lorax_work_dir, arch, variant) - treeinfo = os.path.join(image, '.treeinfo') imagemap = self.iso_map['images'][variant] - primary = imagemap['variant'] - repos = imagemap['repos'] - is_disc = False + data = { + 'arch': arch, + 'variant': variant, + 'variant_path': image, + 'checksum': self.checksum, + 'distname': self.distname, + 'fullname': self.fullname, + 'shortname': self.shortname, + 'release': self.release, + 'timestamp': self.timestamp, + } - if imagemap['disc']: - is_disc = True - discnum = 1 - - # load up productmd - ti = productmd.treeinfo.TreeInfo() - ti.load(treeinfo) - - # Set the name - ti.release.name = self.distname - ti.release.short = self.shortname - # Set the version (the initial lorax run does this, but we are setting - # it just in case) - ti.release.version = self.release - # Assign the present images into a var as a copy. For each platform, - # clear out the present dictionary. For each item and path in the - # assigned var, assign it back to the platform dictionary. If the path - # is empty, continue. Do checksums afterwards. - plats = ti.images.images.copy() - for platform in ti.images.images: - ti.images.images[platform] = {} - for i, p in plats[platform].items(): - if not p: - continue - if 'boot.iso' in i and is_disc: - continue - ti.images.images[platform][i] = p - ti.checksums.add(p, self.checksum, root_dir=image) - - # stage2 checksums - if ti.stage2.mainimage: - ti.checksums.add(ti.stage2.mainimage, self.checksum, root_dir=image) - - if ti.stage2.instimage: - ti.checksums.add(ti.stage2.instimage, self.checksum, root_dir=image) - - # If we are a disc, set the media section appropriately. - if is_disc: - ti.media.discnum = discnum - ti.media.totaldiscs = discnum - - # Create variants - # Note to self: There's a lot of legacy stuff running around for - # Fedora, ELN, and RHEL in general. This is the general structure, - # apparently. But there could be a chance it'll change. We may need to - # put in a configuration to deal with it at some point. - #ti.variants.variants.clear() - for y in repos: - if y in ti.variants.variants.keys(): - vari = ti.variants.variants[y] - else: - vari = productmd.treeinfo.Variant(ti) - - vari.id = y - vari.uid = y - vari.name = y - vari.type = "variant" - if is_disc: - vari.paths.repository = y - vari.paths.packages = y + "/Packages" - else: - if y == primary: - vari.paths.repository = "." - vari.paths.packages = "Packages" - else: - vari.paths.repository = "../../../" + y + "/" + arch + "/os" - vari.paths.packages = "../../../" + y + "/" + arch + "/os/Packages" - - if y not in ti.variants.variants.keys(): - ti.variants.add(vari) - - del vari - - # Set default variant - ti.dump(treeinfo, main_variant=primary) + try: + Shared.treeinfo_modify_write(data, imagemap, self.log) + except Exception as e: + self.log.error(Color.FAIL + 'There was an error writing treeinfo.') + self.log.error(e) # Next set of functions are loosely borrowed (in concept) from pungi. Some # stuff may be combined/mixed together, other things may be simplified or @@ -838,26 +683,17 @@ class IsoBuild: """ sync_root = self.compose_latest_sync - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Starting Extra ISOs phase' - ) + self.log.info(Color.INFO + 'Starting Extra ISOs phase') if not os.path.exists(self.compose_base): - self.log.info( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'The compose directory MUST be here. Cannot continue.' - ) + self.log.info(Color.FAIL + 'The compose directory MUST be here. Cannot continue.') raise SystemExit() self._extra_iso_build_wrap() self.log.info('Compose repo directory: %s' % sync_root) self.log.info('ISO result directory: %s/$arch' % self.lorax_work_dir) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Extra ISO phase completed.' - ) + self.log.info(Color.INFO + 'Extra ISO phase completed.') def _extra_iso_build_wrap(self): """ @@ -879,26 +715,26 @@ class IsoBuild: for y in images_to_build: if 'isoskip' in self.iso_map['images'][y] and self.iso_map['images'][y]['isoskip']: - self.log.info( - '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + - 'Skipping ' + y + ' image' - ) + self.log.info(Color.WARN + 'Skipping ' + y + ' image') continue + # Kind of hacky, but if we decide to have more than boot/dvd iso's, + # we need to make sure volname matches the initial lorax image, + # which the volid contains "dvd". AKA, file name doesn't always + # equate to volume ID + if 'volname' in self.iso_map['images'][y]: + volname = self.iso_map['images'][y]['volname'] + else: + volname = y + for a in arches_to_build: lorax_path = os.path.join(self.lorax_work_dir, a, 'lorax', '.treeinfo') image_path = os.path.join(self.lorax_work_dir, a, y, '.treeinfo') if not os.path.exists(image_path): - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'Lorax data not found for ' + y + '. Skipping.' - ) + self.log.error(Color.FAIL + 'Lorax data not found for ' + y + '. Skipping.') if not os.path.exists(lorax_path): - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'Lorax not found at all. This is considered fatal.' - ) + self.log.error(Color.FAIL + 'Lorax not found at all. This is considered fatal.') raise SystemExit() @@ -907,23 +743,20 @@ class IsoBuild: y, self.iso_map['images'][y]['repos'], ) - self._extra_iso_local_config(a, y, grafts, work_root) + self._extra_iso_local_config(a, y, grafts, work_root, volname) if self.extra_iso_mode == 'local': self._extra_iso_local_run(a, y, work_root) elif self.extra_iso_mode == 'podman': continue else: - self.log.info( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - 'Mode specified is not valid.' - ) + self.log.error(Color.FAIL + 'Mode specified is not valid.') raise SystemExit() if self.extra_iso_mode == 'podman': self._extra_iso_podman_run(arches_to_build, images_to_build, work_root) - def _extra_iso_local_config(self, arch, image, grafts, work_root): + def _extra_iso_local_config(self, arch, image, grafts, work_root, volname): """ Local ISO build configuration - This generates the configuration for both mock and podman entries @@ -974,7 +807,7 @@ class IsoBuild: self.minor_version, rclevel, arch, - image + volname ) isoname = '{}-{}.{}{}-{}-{}.iso'.format( @@ -1040,10 +873,16 @@ class IsoBuild: xorriso_template_entry.close() opts['graft_points'] = xorriso_template_path - make_image = '{} {}'.format(self._get_make_image_cmd(opts), log_path_command) - isohybrid = self._get_isohybrid_cmd(opts) - implantmd5 = self._get_implantisomd5_cmd(opts) - make_manifest = self._get_manifest_cmd(opts) + make_image = '{} {}'.format( + Shared.get_make_image_cmd( + opts, + self.hfs_compat + ), + log_path_command + ) + isohybrid = Shared.get_isohybrid_cmd(opts) + implantmd5 = Shared.get_implantisomd5_cmd(opts) + make_manifest = Shared.get_manifest_cmd(opts) iso_template_output = iso_template.render( extra_iso_mode=self.extra_iso_mode, @@ -1074,7 +913,9 @@ class IsoBuild: def _extra_iso_local_run(self, arch, image, work_root): """ - Runs the actual local process using mock + Runs the actual local process using mock. This is for running in + peridot or running on a machine that does not have podman, but does + have mock available. """ entries_dir = os.path.join(work_root, "entries") extra_iso_cmd = '/bin/bash {}/extraisobuild-{}-{}.sh'.format(entries_dir, arch, image) @@ -1097,7 +938,7 @@ class IsoBuild: to the compose directories. It's the same as if you were doing a reposync of the repositories. """ - cmd = self.podman_cmd() + cmd = Shared.podman_cmd(self.log) entries_dir = os.path.join(work_root, "entries") isos_dir = os.path.join(work_root, "isos") bad_exit_list = [] @@ -1148,10 +989,7 @@ class IsoBuild: join_all_pods = ' '.join(entry_name_list) time.sleep(3) - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Building ' + i + ' ...' - ) + self.log.info(Color.INFO + 'Building ' + i + ' ...') pod_watcher = '{} wait {}'.format( cmd, join_all_pods @@ -1180,9 +1018,7 @@ class IsoBuild: output, errors = podcheck.communicate() if 'Exited (0)' not in output.decode(): - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + pod - ) + self.log.error(Color.FAIL + pod) bad_exit_list.append(pod) rmcmd = '{} rm {}'.format( @@ -1201,36 +1037,23 @@ class IsoBuild: for p in checksum_list: path = os.path.join(isos_dir, p) if os.path.exists(path): - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Performing checksum for ' + p - ) + self.log.info(Color.INFO + 'Performing checksum for ' + p) checksum = Shared.get_checksum(path, self.checksum, self.log) if not checksum: - self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + - path + ' not found! Are you sure it was built?' - ) + self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?') with open(path + '.CHECKSUM', "w+") as c: c.write(checksum) c.close() - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Building ' + i + ' completed' - ) + self.log.info(Color.INFO + 'Building ' + i + ' completed') if len(bad_exit_list) == 0: - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Copying ISOs over to compose directory...' - ) - print() + self.log.info(Color.INFO + 'Images built successfully.') else: self.log.error( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + + Color.FAIL + 'There were issues with the work done. As a result, ' + - 'the ISOs will not be copied.' + 'some/all ISOs may not exist.' ) @@ -1247,17 +1070,14 @@ class IsoBuild: lorax_base_dir = os.path.join(self.lorax_work_dir, arch) global_work_dir = os.path.join(self.compose_latest_dir, "work/global") - self.log.info( - '[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' + - 'Generating graft points for extra iso: (' + arch + ') ' + iso - ) + self.log.info(Color.INFO + 'Generating graft points for extra iso: (' + arch + ') ' + iso) files = {} # This is the data we need to actually boot lorax_for_var = os.path.join(lorax_base_dir, iso) if not os.path.exists(lorax_for_var + '/.treeinfo'): self.log.info( - '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + + Color.FAIL + '!! .treeinfo is missing, does this variant actually exist? !!' ) return @@ -1484,248 +1304,616 @@ class IsoBuild: Gets a volume ID """ - def _get_boot_options(self, arch, createfrom, efi=True, hfs_compat=False): + def run_pull_generic_images(self): """ - Gets boot options based on architecture, the iso commands are not - universal. + Pulls generic images built in peridot and places them where they need + to be. This relies on a list called "cloudimages" in the version + configuration. """ - if arch in ("armhfp",): - result = [] - return result + unpack_single_arch = False + arches_to_unpack = self.arches + if self.arch: + unpack_single_arch = True + arches_to_unpack = [self.arch] - if arch in ("aarch64",): - result = [ - "-eltorito-alt-boot", - "-e", - "images/efiboot.img", - "-no-emul-boot", - ] - return result + for imagename in self.cloudimages['images']: + self.log.info(Color.INFO + 'Determining the latest images for ' + imagename + ' ...') + formattype = self.cloudimages['images'][imagename]['format'] - if arch in ("i386", "i686", "x86_64"): - result = [ - "-b", - "isolinux/isolinux.bin", - "-c", - "isolinux/boot.cat", - "-no-emul-boot", - "-boot-load-size", - "4", - "-boot-info-table", - ] - - # EFI args - if arch == "x86_64": - result.extend( - [ - "-eltorito-alt-boot", - "-e", - "images/efiboot.img", - "-no-emul-boot" - ] + if self.s3: + latest_artifacts = Shared.s3_determine_latest( + self.s3_bucket, + self.release, + arches_to_unpack, + formattype, + imagename, + self.log ) - return result - # need to go double check if this is needed with stream 9 - if arch == "ppc64le" and hfs_compat: - result = [ - "-part", - "-hfs", - "-r", - "-l", - "-sysid", - "PPC", - "-no-desktop", - "-allow-multidot", - "-chrp-boot", - "-map", - os.path.join(createfrom, "mapping"), - "-hfs-bless", - "/ppc/mac" - ] - return result + else: + latest_artifacts = Shared.reqs_determine_latest( + self.s3_bucket_url, + self.release, + arches_to_unpack, + formattype, + imagename, + self.log + ) - if arch == "ppc64le" and not hfs_compat: - result = [ - "-r", - "-l", - "-sysid", - "PPC", - "-chrp-boot", - ] - return result + if not len(latest_artifacts) > 0: + self.log.warn(Color.WARN + 'No images found.') + continue - if arch in ("s390x",): - result = [ - "-eltorito-boot", - "images/cdboot.img", - "-no-emul-boot", - ] - return result + self.log.info(Color.INFO + 'Attempting to download requested artifacts') + for arch in arches_to_unpack: + image_arch_dir = os.path.join( + self.image_work_dir, + arch + ) - raise ValueError("Architecture %s%s%s is NOT known" % (Color.BOLD, arch, Color.END)) + if arch not in latest_artifacts.keys(): + self.log.warn(Color.WARN + 'Artifact for ' + imagename + + ' ' + arch + ' (' + formattype + ') does not exist.') + continue - # ALL COMMANDS # - def _get_mkisofs_cmd( - self, - iso, - appid=None, - volid=None, - volset=None, - exclude=None, - boot_args=None, - input_charset="utf-8", - grafts=None, - use_xorrisofs=False, - iso_level=None - ): - # I should hardcode this I think - #untranslated_filenames = True - translation_table = True - #joliet = True - #joliet_long = True - #rock = True - cmd = ["/usr/bin/xorrisofs" if use_xorrisofs else "/usr/bin/genisoimage"] - if not os.path.exists(cmd[0]): - self.log.error('%s was not found. Good bye.' % cmd[0]) - raise SystemExit("\n\n" + cmd[0] + " was not found.\n\nPlease " - " ensure that you have installed the necessary packages on " - " this system. " - ) + source_path = latest_artifacts[arch] + drop_name = source_path.split('/')[-1] + full_drop = '{}/{}'.format( + image_arch_dir, + drop_name + ) - if iso_level: - cmd.extend(["-iso-level", str(iso_level)]) + checksum_drop = '{}/{}.CHECKSUM'.format( + image_arch_dir, + drop_name + ) - if appid: - cmd.extend(["-appid", appid]) + if not os.path.exists(image_arch_dir): + os.makedirs(image_arch_dir, exist_ok=True) - #if untranslated_filenames: - cmd.append("-untranslated-filenames") + self.log.info('Downloading artifact for ' + Color.BOLD + arch + Color.END) + if self.s3: + Shared.s3_download_artifacts( + self.force_download, + self.s3_bucket, + source_path, + full_drop, + self.log + ) + else: + Shared.reqs_download_artifacts( + self.force_download, + self.s3_bucket_url, + source_path, + full_drop, + self.log + ) - if volid: - cmd.extend(["-volid", volid]) + self.log.info('Creating checksum ...') + checksum = Shared.get_checksum(full_drop, self.checksum, self.log) + if not checksum: + self.log.error(Color.FAIL + full_drop + ' not found! Are you sure we copied it?') + continue + with open(checksum_drop, 'w+') as c: + c.write(checksum) + c.close() - #if joliet: - cmd.append("-J") + self.log.info('Creating a symlink to latest image...') + latest_name = '{}/{}-{}-{}.latest.{}.{}'.format( + image_arch_dir, + self.shortname, + self.major_version, + imagename, + arch, + formattype + ) + # For some reason python doesn't have a "yeah just change this + # link" part of the function + if os.path.exists(latest_name): + os.remove(latest_name) - #if joliet_long: - cmd.append("-joliet-long") + os.symlink(drop_name, latest_name) - if volset: - cmd.extend(["-volset", volset]) + self.log.info(Color.INFO + 'Image download phase completed') - #if rock: - cmd.append("-rational-rock") - - if not use_xorrisofs and translation_table: - cmd.append("-translation-table") - - if input_charset: - cmd.extend(["-input-charset", input_charset]) - - if exclude: - for i in kobo.shortcuts.force_list(exclude): - cmd.extend(["-x", i]) - - if boot_args: - cmd.extend(boot_args) - - cmd.extend(["-o", iso]) - - if grafts: - cmd.append("-graft-points") - cmd.extend(["-path-list", grafts]) - - return cmd - - def _get_implantisomd5_cmd(self, opts): - """ - Implants md5 into iso - """ - cmd = ["/usr/bin/implantisomd5", "--supported-iso", opts['iso_name']] - returned_cmd = ' '.join(cmd) - return returned_cmd - - def _get_manifest_cmd(self, opts): - """ - Gets an ISO manifest - """ - if opts['use_xorrisofs']: - return """/usr/bin/xorriso -dev %s --find | - tail -n+2 | - tr -d "'" | - cut -c2- | sort >> %s.manifest""" % ( - shlex.quote(opts['iso_name']), - shlex.quote(opts['iso_name']), - ) - else: - return "/usr/bin/isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s.manifest" % ( - shlex.quote(opts['iso_name']), - shlex.quote(opts['iso_name']), - ) - - def _get_isohybrid_cmd(self, opts): - cmd = [] - if not opts['use_xorrisofs']: - if opts['arch'] == "x86_64": - cmd = ["/usr/bin/isohybrid"] - cmd.append("--uefi") - cmd.append(opts['iso_name']) - returned_cmd = ' '.join(cmd) - else: - returned_cmd = '' - - return returned_cmd - - def _get_make_image_cmd(self, opts): - """ - Generates the command to actually make the image in the first place - """ - isokwargs = {} - isokwargs["boot_args"] = self._get_boot_options( - opts['arch'], - os.path.join("$TEMPLATE", "config_files/ppc"), - hfs_compat=self.hfs_compat, - ) - - if opts['arch'] in ("ppc64", "ppc64le"): - isokwargs["input_charset"] = None - - if opts['use_xorrisofs']: - cmd = ['/usr/bin/xorriso', '-dialog', 'on', '<', opts['graft_points']] - else: - cmd = self._get_mkisofs_cmd( - opts['iso_name'], - volid=opts['volid'], - exclude=["./lost+found"], - grafts=opts['graft_points'], - use_xorrisofs=False, - iso_level=opts['iso_level'], - **isokwargs - ) - - returned_cmd = ' '.join(cmd) - return returned_cmd - - def podman_cmd(self) -> str: - """ - This generates the podman run command. This is in the case that we want - to do reposyncs in parallel as we cannot reasonably run multiple - instances of dnf reposync on a single system. - """ - cmd = None - if os.path.exists("/usr/bin/podman"): - cmd = "/usr/bin/podman" - else: - self.log.error('/usr/bin/podman was not found. Good bye.') - raise SystemExit("\n\n/usr/bin/podman was not found.\n\nPlease " - " ensure that you have installed the necessary packages on " - " this system. " + Color.BOLD + "Note that docker is not " - "supported." + Color.END - ) - return cmd class LiveBuild: """ - This helps us build the live images for Rocky Linux. + This helps us build the live images for Rocky Linux. The mode is "simple" + by default when using mock. """ + def __init__( + self, + rlvars, + config, + major, + hfs_compat: bool = False, + force_download: bool = False, + isolation: str = 'simple', + live_iso_mode: str = 'local', + compose_dir_is_here: bool = False, + hashed: bool = False, + image=None, + justcopyit: bool = False, + force_build: bool = False, + logger=None + ): + + self.image = image + self.justcopyit = justcopyit + self.fullname = rlvars['fullname'] + self.distname = config['distname'] + self.shortname = config['shortname'] + self.current_arch = config['arch'] + # Relevant config items + self.major_version = major + self.compose_dir_is_here = compose_dir_is_here + self.date_stamp = config['date_stamp'] + self.date = time.strftime("%Y%m%d", time.localtime()) + self.compose_root = config['compose_root'] + self.compose_base = config['compose_root'] + "/" + major + self.current_arch = config['arch'] + self.livemap = rlvars['livemap'] + self.required_pkgs = rlvars['livemap']['required_pkgs'] + self.mock_work_root = config['mock_work_root'] + self.live_result_root = config['mock_work_root'] + "/lmc" + self.mock_isolation = isolation + self.force_download = force_download + self.force_build = force_build + self.live_iso_mode = live_iso_mode + self.checksum = rlvars['checksum'] + self.profile = rlvars['profile'] + self.hashed = hashed + + # Relevant major version items + self.arch = config['arch'] + self.arches = rlvars['allowed_arches'] + self.release = rlvars['revision'] + self.minor_version = rlvars['minor'] + self.revision = rlvars['revision'] + "-" + rlvars['rclvl'] + self.rclvl = rlvars['rclvl'] + self.disttag = config['dist'] + self.repos = rlvars['iso_map']['lorax']['repos'] + self.repo_base_url = config['repo_base_url'] + self.project_id = rlvars['project_id'] + self.structure = rlvars['structure'] + self.bugurl = rlvars['bugurl'] + + self.container = config['container'] + if 'container' in rlvars and len(rlvars['container']) > 0: + self.container = rlvars['container'] + + # Templates + file_loader = FileSystemLoader(f"{_rootdir}/templates") + self.tmplenv = Environment(loader=file_loader) + + self.compose_latest_dir = os.path.join( + config['compose_root'], + major, + "latest-{}-{}".format( + self.shortname, + self.profile + ) + ) + + self.compose_latest_sync = os.path.join( + self.compose_latest_dir, + "compose" + ) + + self.compose_log_dir = os.path.join( + self.compose_latest_dir, + "work/logs" + ) + + self.live_work_dir = os.path.join( + self.compose_latest_dir, + "work/live" + ) + + # This is temporary for now. + if logger is None: + self.log = logging.getLogger("iso") + self.log.setLevel(logging.INFO) + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s :: %(name)s :: %(message)s', + '%Y-%m-%d %H:%M:%S' + ) + handler.setFormatter(formatter) + self.log.addHandler(handler) + + self.log.info('live build init') + self.repolist = Shared.build_repo_list( + self.repo_base_url, + self.repos, + self.project_id, + self.current_arch, + self.compose_latest_sync, + self.compose_dir_is_here, + self.hashed + ) + self.log.info(self.revision) + + if not os.path.exists(self.compose_latest_dir): + self.log.warn(Color.WARN + 'A compose directory was not found ' + + 'here. If there is a failure, it may be due to it ' + + 'missing. You may want to generate a fake compose if ' + + 'you are simply making your own live images and you run ' + + 'into any errors beyond this point.' + ) + + def run_build_live_iso(self): + """ + Builds DVD images based on the data created from the initial lorax on + each arch. This should NOT be called during the usual run() section. + """ + sync_root = self.compose_latest_sync + + self.log.info(Color.INFO + 'Starting Live ISOs phase') + + # Check that the arch we're assigned is valid... + if self.current_arch not in self.livemap['allowed_arches']: + self.log.error(Color.FAIL + 'Running an unsupported architecture.') + raise SystemExit() + + self._live_iso_build_wrap() + + self.log.info('Compose repo directory: %s' % sync_root) + self.log.info('Live ISO result directory: %s/$arch' % self.live_work_dir) + self.log.info(Color.INFO + 'Live ISO phase completed.') + + def _live_iso_build_wrap(self): + """ + Prepare and actually build the live images. Based on arguments in self, + we'll either do it on mock in a loop or in podman, just like with the + extra iso phase. + """ + work_root = os.path.join( + self.compose_latest_dir, + 'work' + ) + + images_to_build = list(self.livemap['ksentry'].keys()) + if self.image: + images_to_build = [self.image] + + self.log.info( + Color.INFO + 'We are planning to build: ' + + ', '.join(images_to_build) + ) + + for i in images_to_build: + self._live_iso_local_config(i, work_root) + + if self.live_iso_mode == 'local': + self._live_iso_local_run(self.current_arch, i, work_root) + elif self.live_iso_mode == 'podman': + continue + else: + self.log.error(Color.FAIL + 'Mode specified is not valid.') + raise SystemExit() + + if self.live_iso_mode == 'podman': + #self._live_iso_podman_run(self.current_arch, images_to_build, work_root) + self.log.error(Color.FAIL + 'At this time, live images cannot be ' + + 'built in podman.') + raise SystemExit() + + def _live_iso_local_config(self, image, work_root): + """ + Live ISO build configuration - This generates both mock and podman + entries, regardless of which one is being used. + """ + self.log.info('Generating Live ISO configuration and script') + + entries_dir = os.path.join(work_root, "entries") + mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg') + mock_sh_template = self.tmplenv.get_template('liveisobuild.tmpl.sh') + iso_template = self.tmplenv.get_template('buildLiveImage.tmpl.sh') + + mock_iso_path = '/var/tmp/live-{}.cfg'.format(self.major_version) + mock_sh_path = '{}/liveisobuild-{}-{}.sh'.format( + entries_dir, + self.current_arch, + image + ) + iso_template_path = '{}/buildLiveImage-{}-{}.sh'.format( + entries_dir, + self.current_arch, + image + ) + + log_root = os.path.join( + work_root, + "logs", + self.date_stamp + ) + + ks_start = self.livemap['ksentry'][image] + + if not os.path.exists(log_root): + os.makedirs(log_root, exist_ok=True) + + log_path_command = '| tee -a {}/{}-{}.log'.format( + log_root, + self.current_arch, + image + ) + required_pkgs = self.livemap['required_pkgs'] + + volid = '{}-{}-{}'.format( + self.shortname, + image, + self.release + ) + + isoname = '{}-{}-{}-{}-{}.iso'.format( + self.shortname, + image, + self.release, + self.current_arch, + self.date + ) + + live_pkg_cmd = '/usr/bin/dnf install {} -y {}'.format( + ' '.join(required_pkgs), + log_path_command + ) + + git_clone_cmd = '/usr/bin/git clone {} -b {} /builddir/ks {}'.format( + self.livemap['git_repo'], + self.livemap['branch'], + log_path_command + ) + + make_image_cmd = ('/usr/sbin/livemedia-creator --ks {} --no-virt ' + '--resultdir /builddir/lmc --project="{} {}" --make-iso --volid {} ' + '--iso-only --iso-name {} --releasever={} --nomacboot {}').format( + '/builddir/ks.cfg', + self.distname, + image, + volid, + isoname, + self.release, + log_path_command + ) + + mock_iso_template_output = mock_iso_template.render( + arch=self.current_arch, + major=self.major_version, + fullname=self.fullname, + shortname=self.shortname, + required_pkgs=required_pkgs, + dist=self.disttag, + repos=self.repolist, + compose_dir_is_here=True, + user_agent='{{ user_agent }}', + compose_dir=self.compose_root, + ) + + mock_sh_template_output = mock_sh_template.render( + arch=self.current_arch, + major=self.major_version, + isolation=self.mock_isolation, + builddir=self.mock_work_root, + shortname=self.shortname, + isoname=isoname, + entries_dir=entries_dir, + image=image, + ) + + iso_template_output = iso_template.render( + live_iso_mode=self.live_iso_mode, + arch=self.current_arch, + compose_live_work_dir=self.live_work_dir, + make_image=make_image_cmd, + live_pkg_cmd=live_pkg_cmd, + isoname=isoname, + major=self.major_version, + git_clone=git_clone_cmd, + ks_file=ks_start, + ) + + with open(mock_iso_path, "w+") as mip: + mip.write(mock_iso_template_output) + mip.close() + + with open(mock_sh_path, "w+") as msp: + msp.write(mock_sh_template_output) + msp.close() + + with open(iso_template_path, "w+") as itp: + itp.write(iso_template_output) + itp.close() + + os.chmod(mock_sh_path, 0o755) + os.chmod(iso_template_path, 0o755) + + def _live_iso_podman_run(self, arch, images, work_root): + """ + Does all the image building in podman containers to parallelize the + process. This is a case where you can call this instead of looping mock + or not run in peridot. This gives the Release Engineer a little more + flexibility if they care enough. + + This honestly assumes you are running this on a machine that has access + to the compose directories. It's the same as if you were doing a + reposync of the repositories. + """ + cmd = Shared.podman_cmd(self.log) + entries_dir = os.path.join(work_root, "entries") + isos_dir = self.live_work_dir + bad_exit_list = [] + checksum_list = [] + entry_name_list = [] + self.log.warn(Color.WARN + 'This mode does not work properly. It will fail.') + for i in images: + image_name = i + entry_name = 'buildLiveImage-{}-{}.sh'.format(arch, i) + entry_name_list.append(entry_name) + + isoname = '{}/{}-{}-{}-{}-{}.iso'.format( + arch, + self.shortname, + i, + self.major_version, + arch, + self.date + ) + + checksum_list.append(isoname) + + print(entry_name_list, cmd, entries_dir) + for pod in entry_name_list: + podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format( + cmd, + self.compose_root, + self.compose_root, + entries_dir, + entries_dir, + pod, + entries_dir, + pod, + self.container + ) + + process = subprocess.call( + shlex.split(podman_cmd_entry), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL + ) + + join_all_pods = ' '.join(entry_name_list) + time.sleep(3) + self.log.info(Color.INFO + 'Building requested live images ...') + + pod_watcher = '{} wait {}'.format( + cmd, + join_all_pods + ) + + watch_man = subprocess.call( + shlex.split(pod_watcher), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL + ) + + # After the above is done, we'll check each pod process for an exit + # code. + pattern = "Exited (0)" + for pod in entry_name_list: + checkcmd = '{} ps -f status=exited -f name={}'.format( + cmd, + pod + ) + podcheck = subprocess.Popen( + checkcmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True + ) + + output, errors = podcheck.communicate() + if 'Exited (0)' not in output.decode(): + self.log.error(Color.FAIL + pod) + bad_exit_list.append(pod) + + rmcmd = '{} rm {}'.format( + cmd, + join_all_pods + ) + + rmpod = subprocess.Popen( + rmcmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + shell=True + ) + + entry_name_list.clear() + for p in checksum_list: + path = os.path.join(isos_dir, p) + if os.path.exists(path): + self.log.info(Color.INFO + 'Performing checksum for ' + p) + checksum = Shared.get_checksum(path, self.checksum, self.log) + if not checksum: + self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?') + with open(path + '.CHECKSUM', "w+") as c: + c.write(checksum) + c.close() + + self.log.info(Color.INFO + 'Building live images completed') + + if len(bad_exit_list) == 0: + self.log.info(Color.INFO + 'Live images completed successfully.') + else: + self.log.error( + Color.FAIL + + 'There were issues with the work done. As a result, ' + + 'some or all ISOs may not be copied later.' + ) + + def _live_iso_local_run(self, arch, image, work_root): + """ + Runs the actual local process using mock. This is for running in + peridot or running on a machine that does not have podman, but does + have mock available. + """ + entries_dir = os.path.join(work_root, "entries") + live_dir_arch = os.path.join(self.live_work_dir, arch) + isoname = '{}-{}-{}-{}-{}.iso'.format( + self.shortname, + image, + self.release, + arch, + self.date + ) + live_res_dir = '/var/lib/mock/{}-{}-{}/result'.format( + self.shortname.lower(), + self.major_version, + arch + ) + + if self.justcopyit: + if os.path.exists(os.path.join(live_dir_arch, isoname)): + self.log.warn(Color.WARN + 'Image already exists.') + if self.force_build: + self.log.warn(Color.WARN + 'Building anyway.') + else: + self.log.warn(Color.WARN + 'Skipping.') + return + + live_iso_cmd = '/bin/bash {}/liveisobuild-{}-{}.sh'.format(entries_dir, arch, image) + self.log.info('Starting mock build...') + p = subprocess.call(shlex.split(live_iso_cmd)) + if p != 0: + self.log.error('An error occured during execution.') + self.log.error('See the logs for more information.') + raise SystemExit() + + self.log.warn( + Color.WARN + 'This is meant for builds done in peridot or ' + + 'locally for an end user.' + ) + self.log.warn( + Color.WARN + + 'If you are looping images, your built image may get ' + + 'overwritten. Ensure you have justcopyit enabled to avoid this.' + ) + + if self.justcopyit: + self.log.info(Color.INFO + 'Copying image to work directory') + source_path = os.path.join(live_res_dir, isoname) + dest_path = os.path.join(live_dir_arch, isoname) + os.makedirs(live_dir_arch, exist_ok=True) + shutil.copy2(source_path, dest_path) + self.log.info(Color.INFO + 'Generating checksum') + checksum = Shared.get_checksum(dest_path, self.checksum, self.log) + if not checksum: + self.log.error(Color.FAIL + dest_path + ' not found. Did we copy it?') + return + with open(dest_path + '.CHECKSUM', "w+") as c: + c.write(checksum) + c.close() diff --git a/iso/empanadas/empanadas/util/shared.py b/iso/empanadas/empanadas/util/shared.py index d9bb357..8155090 100644 --- a/iso/empanadas/empanadas/util/shared.py +++ b/iso/empanadas/empanadas/util/shared.py @@ -1,7 +1,42 @@ # These are shared utilities used import os +import json import hashlib +import shlex +import subprocess +import yaml +import requests +import boto3 +import xmltodict +import productmd.treeinfo +import productmd.composeinfo +import empanadas +import kobo.shortcuts +from empanadas.common import Color + +class ArchCheck: + """ + Arches and their files + """ + archfile = { + 'x86_64': [ + 'isolinux/vmlinuz', + 'images/grub.conf', + 'EFI/BOOT/BOOTX64.EFI' + ], + 'aarch64': [ + 'EFI/BOOT/BOOTAA64.EFI' + ], + 'ppc64le': [ + 'ppc/bootinfo.txt', + 'ppc/ppc64/vmlinuz' + ], + 's390x': [ + 'generic.ins', + 'images/generic.prm' + ] + } class Shared: """ @@ -44,6 +79,189 @@ class Shared: checksum.hexdigest() ) + @staticmethod + def treeinfo_new_write( + file_path, + distname, + shortname, + release, + arch, + time, + repo + ): + """ + Writes really basic treeinfo, this is for single repository treeinfo + data. This is usually called in the case of a fresh run and each repo + needs one. This basic info may be overwritten later either by lorax + data or a full run. + """ + ti = productmd.treeinfo.TreeInfo() + ti.release.name = distname + ti.release.short = shortname + ti.release.version = release + ti.tree.arch = arch + ti.tree.build_timestamp = time + # Variants (aka repos) + variant = productmd.treeinfo.Variant(ti) + variant.id = repo + variant.uid = repo + variant.name = repo + variant.type = "variant" + variant.paths.repository = "." + variant.paths.packages = "Packages" + ti.variants.add(variant) + ti.dump(file_path) + + @staticmethod + def treeinfo_modify_write(data, imagemap, logger): + """ + Modifies a specific treeinfo with already available data. This is in + the case of modifying treeinfo for primary repos or images. + """ + arch = data['arch'] + variant = data['variant'] + variant_path = data['variant_path'] + checksum = data['checksum'] + distname = data['distname'] + fullname = data['fullname'] + shortname = data['shortname'] + release = data['release'] + timestamp = data['timestamp'] + + os_or_ks = '' + if '/os' in variant_path or not imagemap['disc']: + os_or_ks = 'os' + if '/kickstart' in variant_path: + os_or_ks = 'kickstart' + + image = os.path.join(variant_path) + treeinfo = os.path.join(image, '.treeinfo') + discinfo = os.path.join(image, '.discinfo') + mediarepo = os.path.join(image, 'media.repo') + #imagemap = self.iso_map['images'][variant] + primary = imagemap['variant'] + repos = imagemap['repos'] + is_disc = False + + if imagemap['disc']: + is_disc = True + discnum = 1 + + # load up productmd + ti = productmd.treeinfo.TreeInfo() + ti.load(treeinfo) + + # Set the name + ti.release.name = distname + ti.release.short = shortname + # Set the version (the initial lorax run does this, but we are setting + # it just in case) + ti.release.version = release + # Assign the present images into a var as a copy. For each platform, + # clear out the present dictionary. For each item and path in the + # assigned var, assign it back to the platform dictionary. If the path + # is empty, continue. Do checksums afterwards. + plats = ti.images.images.copy() + for platform in ti.images.images: + ti.images.images[platform] = {} + for i, p in plats[platform].items(): + if not p: + continue + if 'boot.iso' in i and is_disc: + continue + ti.images.images[platform][i] = p + ti.checksums.add(p, checksum, root_dir=image) + + # stage2 checksums + if ti.stage2.mainimage: + ti.checksums.add(ti.stage2.mainimage, checksum, root_dir=image) + + if ti.stage2.instimage: + ti.checksums.add(ti.stage2.instimage, checksum, root_dir=image) + + # If we are a disc, set the media section appropriately. + if is_disc: + ti.media.discnum = discnum + ti.media.totaldiscs = discnum + + # Create variants + # Note to self: There's a lot of legacy stuff running around for + # Fedora, ELN, and RHEL in general. This is the general structure, + # apparently. But there could be a chance it'll change. We may need to + # put in a configuration to deal with it at some point. + #ti.variants.variants.clear() + for y in repos: + if y in ti.variants.variants.keys(): + vari = ti.variants.variants[y] + else: + vari = productmd.treeinfo.Variant(ti) + + vari.id = y + vari.uid = y + vari.name = y + vari.type = "variant" + if is_disc: + vari.paths.repository = y + vari.paths.packages = y + "/Packages" + else: + if y == primary: + vari.paths.repository = "." + vari.paths.packages = "Packages" + else: + vari.paths.repository = "../../../" + y + "/" + arch + "/" + os_or_ks + vari.paths.packages = "../../../" + y + "/" + arch + "/" + os_or_ks + "/Packages" + + if y not in ti.variants.variants.keys(): + ti.variants.add(vari) + + del vari + + # Set default variant + logger.info('Writing treeinfo') + ti.dump(treeinfo, main_variant=primary) + # Set discinfo + logger.info('Writing discinfo') + Shared.discinfo_write(timestamp, fullname, arch, discinfo) + # Set media.repo + logger.info('Writing media.repo') + Shared.media_repo_write(timestamp, fullname, mediarepo) + + @staticmethod + def write_metadata( + timestamp, + datestamp, + fullname, + release, + compose_id, + file_path + ): + + metadata = { + "header": { + "name": "empanadas", + "version": empanadas.__version__, + "type": "toolkit", + "maintainer": "SIG/Core" + }, + "payload": { + "compose": { + "date": datestamp, + "id": compose_id, + "fullname": fullname, + "release": release, + "timestamp": timestamp + } + } + } + + with open(file_path + ".json", "w+") as fp: + json.dump(metadata, fp, indent=4) + fp.close() + + with open(file_path + ".yaml", "w+") as yp: + yaml.dump(metadata, yp) + yp.close() + @staticmethod def discinfo_write(timestamp, fullname, arch, file_path): """ @@ -53,7 +271,8 @@ class Shared: "%s" % timestamp, "%s" % fullname, "%s" % arch, - "ALL" + "ALL", + "" ] with open(file_path, "w+") as f: @@ -77,3 +296,668 @@ class Shared: with open(file_path, "w") as f: f.write("\n".join(data)) + + @staticmethod + def generate_compose_dirs( + compose_base, + shortname, + version, + date_stamp, + logger + ) -> str: + """ + Generate compose dirs for full runs + """ + compose_base_dir = os.path.join( + compose_base, + "{}-{}-{}".format( + shortname, + version, + date_stamp + ) + ) + logger.info('Creating compose directory %s' % compose_base_dir) + if not os.path.exists(compose_base_dir): + os.makedirs(compose_base_dir) + os.makedirs(compose_base_dir + '/work') + os.makedirs(compose_base_dir + '/work/entries') + os.makedirs(compose_base_dir + '/work/logs') + os.makedirs(compose_base_dir + '/compose') + + return compose_base_dir + + @staticmethod + def podman_cmd(logger) -> str: + """ + This generates the podman run command. This is in the case that we want + to do reposyncs in parallel as we cannot reasonably run multiple + instances of dnf reposync on a single system. + """ + cmd = None + if os.path.exists("/usr/bin/podman"): + cmd = "/usr/bin/podman" + else: + logger.error(Color.FAIL + '/usr/bin/podman was not found. Good bye.') + raise SystemExit("\n\n/usr/bin/podman was not found.\n\nPlease " + " ensure that you have installed the necessary packages on " + " this system. " + Color.BOLD + "Note that docker is not " + "supported." + Color.END + ) + return cmd + + @staticmethod + def reposync_cmd(logger) -> str: + """ + This generates the reposync command. We don't support reposync by + itself and will raise an error. + + :return: The path to the reposync command. If dnf exists, we'll use + that. Otherwise, fail immediately. + """ + cmd = None + if os.path.exists("/usr/bin/dnf"): + cmd = "/usr/bin/dnf reposync" + else: + logger(Color.FAIL + '/usr/bin/dnf was not found. Good bye.') + raise SystemExit("/usr/bin/dnf was not found. \n\n/usr/bin/reposync " + "is not sufficient and you are likely running on an el7 " + "system or a grossly modified EL8+ system, " + Color.BOLD + + "which tells us that you probably made changes to these tools " + "expecting them to work and got to this point." + Color.END) + return cmd + + @staticmethod + def git_cmd(logger) -> str: + """ + This generates the git command. This is when we need to pull down extra + files or do work from a git repository. + """ + cmd = None + if os.path.exists("/usr/bin/git"): + cmd = "/usr/bin/git" + else: + logger.error(Color.FAIL + '/usr/bin/git was not found. Good bye.') + raise SystemExit("\n\n/usr/bin/git was not found.\n\nPlease " + " ensure that you have installed the necessary packages on " + " this system. " + ) + return cmd + + @staticmethod + def mock_cmd(logger) -> str: + """ + This generates the mock command. This is when we are building or + performing any kind of operation in mock. + """ + cmd = None + if os.path.exists("/usr/bin/mock"): + cmd = "/usr/bin/mock" + else: + logger.error(Color.FAIL + '/usr/bin/mock was not found. Good bye.') + raise SystemExit("\n\n/usr/bin/mock was not found.\n\nPlease " + " ensure that you have installed the necessary packages on " + " this system. " + ) + return cmd + + @staticmethod + def generate_conf(data, logger, dest_path='/var/tmp') -> str: + """ + Generates the necessary repo conf file for the operation. This repo + file should be temporary in nature. This will generate a repo file + with all repos by default. If a repo is chosen for sync, that will be + the only one synced. + + :param dest_path: The destination where the temporary conf goes + :param repo: The repo object to create a file for + """ + fname = os.path.join( + dest_path, + "{}-{}-config.repo".format(data.shortname, data.major_version) + ) + data.log.info('Generating the repo configuration: %s' % fname) + + if data.repo_base_url.startswith("/"): + logger.error("Local file syncs are not supported.") + raise SystemExit(Color.BOLD + "Local file syncs are not " + "supported." + Color.END) + + prehashed = '' + if data.hashed: + prehashed = "hashed-" + # create dest_path + if not os.path.exists(dest_path): + os.makedirs(dest_path, exist_ok=True) + config_file = open(fname, "w+") + repolist = [] + for repo in data.repos: + + constructed_url = '{}/{}/repo/{}{}/$basearch'.format( + data.repo_base_url, + data.project_id, + prehashed, + repo, + ) + + constructed_url_src = '{}/{}/repo/{}{}/src'.format( + data.repo_base_url, + data.project_id, + prehashed, + repo, + ) + + repodata = { + 'name': repo, + 'baseurl': constructed_url, + 'srcbaseurl': constructed_url_src, + 'gpgkey': data.extra_files['git_raw_path'] + data.extra_files['gpg'][data.gpgkey] + } + repolist.append(repodata) + + template = data.tmplenv.get_template('repoconfig.tmpl') + output = template.render(repos=repolist) + config_file.write(output) + + config_file.close() + return fname + + @staticmethod + def quick_sync(src, dest, logger, tmp_dir): + """ + Does a quick sync from one place to another. This determines the method + in which will be used. We will look for fpsync and fall back to + parallel | rsync if that is also available. It will fail if parallel is + not available. + + Return true or false on completion? + """ + + @staticmethod + def simple_sync(src, dest): + """ + This is for simple syncs only, using rsync or copytree. + """ + + @staticmethod + def fpsync_method(src, dest, tmp_dir): + """ + Returns a list for the fpsync command + """ + cmd = '/usr/bin/fpsync' + rsync_switches = '-av --numeric-ids --no-compress --chown=10004:10005' + if not os.path.exists(cmd): + message = 'fpsync not found' + retval = 1 + return message, retval + + os.makedirs(tmp_dir, exist_ok=True) + + fpsync_cmd = '{} -o "{}" -n 18 -t {} {} {}'.format( + cmd, + rsync_switches, + tmp_dir, + src, + dest + ) + + process = subprocess.call( + shlex.split(fpsync_cmd), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + if process != 0: + message = 'Syncing (fpsync) failed' + retval = process + return message, retval + + if os.path.exists(dest): + message = 'Syncing (fpsync) succeeded' + retval = process + else: + message = 'Path synced does not seem to exist for some reason.' + retval = 1 + + #shutil.rmtree(tmp_dir) + + return message, retval + + @staticmethod + def rsync_method(src, dest): + """ + Returns a string for the rsync command plus parallel. Yes, this is a + hack. + """ + find_cmd = '/usr/bin/find' + parallel_cmd = '/usr/bin/parallel' + rsync_cmd = '/usr/bin/rsync' + switches = '-av --chown=10004:10005 --progress --relative --human-readable' + + os.makedirs(dest, exist_ok=True) + + return 'Not available', 1 + + @staticmethod + def s3_determine_latest(s3_bucket, release, arches, filetype, name, logger): + """ + Using native s3, determine the latest artifacts and return a dict + """ + temp = [] + data = {} + s3 = boto3.client('s3') + + try: + s3.list_objects(Bucket=s3_bucket)['Contents'] + except: + logger.error( + '[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + + 'Cannot access s3 bucket.' + ) + raise SystemExit() + + for y in s3.list_objects(Bucket=s3_bucket)['Contents']: + if filetype in y['Key'] and release in y['Key'] and name in y['Key']: + temp.append(y['Key']) + + for arch in arches: + temps = [] + for y in temp: + if arch in y: + temps.append(y) + temps.sort(reverse=True) + if len(temps) > 0: + data[arch] = temps[0] + + return data + + @staticmethod + def s3_download_artifacts(force_download, s3_bucket, source, dest, logger): + """ + Download the requested artifact(s) via s3 + """ + s3 = boto3.client('s3') + if os.path.exists(dest): + if not force_download: + logger.warn( + '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + + 'Artifact at ' + dest + ' already exists' + ) + return + + logger.info('Downloading ({}) to: {}'.format(source, dest)) + try: + s3.download_file( + Bucket=s3_bucket, + Key=source, + Filename=dest + ) + except: + logger.error('There was an issue downloading from %s' % s3_bucket) + + @staticmethod + def reqs_determine_latest(s3_bucket_url, release, arches, filetype, name, logger): + """ + Using requests, determine the latest artifacts and return a list + """ + temp = [] + data = {} + + try: + bucket_data = requests.get(s3_bucket_url) + except requests.exceptions.RequestException as e: + logger.error('The s3 bucket http endpoint is inaccessible') + raise SystemExit(e) + + resp = xmltodict.parse(bucket_data.content) + + for y in resp['ListBucketResult']['Contents']: + if filetype in y['Key'] and release in y['Key'] and name in y['Key']: + temp.append(y['Key']) + + for arch in arches: + temps = [] + for y in temp: + if arch in y: + temps.append(y) + temps.sort(reverse=True) + if len(temps) > 0: + data[arch] = temps[0] + + return data + + @staticmethod + def reqs_download_artifacts(force_download, s3_bucket_url, source, dest, logger): + """ + Download the requested artifact(s) via requests only + """ + if os.path.exists(dest): + if not force_download: + logger.warn( + '[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' + + 'Artifact at ' + dest + ' already exists' + ) + return + unurl = s3_bucket_url + '/' + source + + logger.info('Downloading ({}) to: {}'.format(source, dest)) + try: + with requests.get(unurl, allow_redirects=True) as r: + with open(dest, 'wb') as f: + f.write(r.content) + f.close() + r.close() + except requests.exceptions.RequestException as e: + logger.error('There was a problem downloading the artifact') + raise SystemExit(e) + + # ISO related + @staticmethod + def get_boot_options(arch, createfrom, efi=True, hfs_compat=False): + """ + Gets boot options based on architecture, the iso commands are not + universal. + """ + if arch in ("armhfp",): + result = [] + return result + + if arch in ("aarch64",): + result = [ + "-eltorito-alt-boot", + "-e", + "images/efiboot.img", + "-no-emul-boot", + ] + return result + + if arch in ("i386", "i686", "x86_64"): + result = [ + "-b", + "isolinux/isolinux.bin", + "-c", + "isolinux/boot.cat", + "-no-emul-boot", + "-boot-load-size", + "4", + "-boot-info-table", + ] + + # EFI args + if arch == "x86_64": + result.extend( + [ + "-eltorito-alt-boot", + "-e", + "images/efiboot.img", + "-no-emul-boot" + ] + ) + return result + + # need to go double check if this is needed with stream 9 + if arch == "ppc64le" and hfs_compat: + result = [ + "-part", + "-hfs", + "-r", + "-l", + "-sysid", + "PPC", + "-no-desktop", + "-allow-multidot", + "-chrp-boot", + "-map", + os.path.join(createfrom, "mapping"), + "-hfs-bless", + "/ppc/mac" + ] + return result + + if arch == "ppc64le" and not hfs_compat: + result = [ + "-r", + "-l", + "-sysid", + "PPC", + "-chrp-boot", + ] + return result + + if arch in ("s390x",): + result = [ + "-eltorito-boot", + "images/cdboot.img", + "-no-emul-boot", + ] + return result + + raise ValueError("Architecture %s%s%s is NOT known" % (Color.BOLD, arch, Color.END)) + + @staticmethod + def get_mkisofs_cmd( + iso, + appid=None, + volid=None, + volset=None, + exclude=None, + boot_args=None, + input_charset="utf-8", + grafts=None, + use_xorrisofs=False, + iso_level=None, + ): + # I should hardcode this I think + #untranslated_filenames = True + translation_table = True + #joliet = True + #joliet_long = True + #rock = True + cmd = ["/usr/bin/xorrisofs" if use_xorrisofs else "/usr/bin/genisoimage"] + if not os.path.exists(cmd[0]): + #logger.error('%s was not found. Good bye.' % cmd[0]) + raise SystemExit("\n\n" + cmd[0] + " was not found.\n\nPlease " + " ensure that you have installed the necessary packages on " + " this system. " + ) + + if iso_level: + cmd.extend(["-iso-level", str(iso_level)]) + + if appid: + cmd.extend(["-appid", appid]) + + #if untranslated_filenames: + cmd.append("-untranslated-filenames") + + if volid: + cmd.extend(["-volid", volid]) + + #if joliet: + cmd.append("-J") + + #if joliet_long: + cmd.append("-joliet-long") + + if volset: + cmd.extend(["-volset", volset]) + + #if rock: + cmd.append("-rational-rock") + + if not use_xorrisofs and translation_table: + cmd.append("-translation-table") + + if input_charset: + cmd.extend(["-input-charset", input_charset]) + + if exclude: + for i in kobo.shortcuts.force_list(exclude): + cmd.extend(["-x", i]) + + if boot_args: + cmd.extend(boot_args) + + cmd.extend(["-o", iso]) + + if grafts: + cmd.append("-graft-points") + cmd.extend(["-path-list", grafts]) + + return cmd + + @staticmethod + def get_make_image_cmd(opts, hfs_compat): + """ + Generates the command to actually make the image in the first place + """ + isokwargs = {} + isokwargs["boot_args"] = Shared.get_boot_options( + opts['arch'], + os.path.join("$TEMPLATE", "config_files/ppc"), + hfs_compat=hfs_compat, + ) + + if opts['arch'] in ("ppc64", "ppc64le"): + isokwargs["input_charset"] = None + + if opts['use_xorrisofs']: + cmd = ['/usr/bin/xorriso', '-dialog', 'on', '<', opts['graft_points']] + else: + cmd = Shared.get_mkisofs_cmd( + opts['iso_name'], + volid=opts['volid'], + exclude=["./lost+found"], + grafts=opts['graft_points'], + use_xorrisofs=False, + iso_level=opts['iso_level'], + **isokwargs + ) + + returned_cmd = ' '.join(cmd) + return returned_cmd + + @staticmethod + def get_isohybrid_cmd(opts): + cmd = [] + if not opts['use_xorrisofs']: + if opts['arch'] == "x86_64": + cmd = ["/usr/bin/isohybrid"] + cmd.append("--uefi") + cmd.append(opts['iso_name']) + returned_cmd = ' '.join(cmd) + else: + returned_cmd = '' + + return returned_cmd + + @staticmethod + def get_implantisomd5_cmd(opts): + """ + Implants md5 into iso + """ + cmd = ["/usr/bin/implantisomd5", "--supported-iso", opts['iso_name']] + returned_cmd = ' '.join(cmd) + return returned_cmd + + @staticmethod + def get_manifest_cmd(opts): + """ + Gets an ISO manifest + """ + if opts['use_xorrisofs']: + return """/usr/bin/xorriso -dev %s --find | + tail -n+2 | + tr -d "'" | + cut -c2- | sort >> %s.manifest""" % ( + shlex.quote(opts['iso_name']), + shlex.quote(opts['iso_name']), + ) + else: + return "/usr/bin/isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s.manifest" % ( + shlex.quote(opts['iso_name']), + shlex.quote(opts['iso_name']), + ) + + @staticmethod + def build_repo_list( + repo_base_url, + repos, + project_id, + current_arch, + compose_latest_sync, + compose_dir_is_here: bool = False, + hashed: bool = False, + ): + """ + Builds the repo dictionary + """ + repolist = [] + prehashed = '' + if hashed: + prehashed = 'hashed-' + + for name in repos: + if not compose_dir_is_here: + constructed_url = '{}/{}/repo/{}{}/{}'.format( + repo_base_url, + project_id, + prehashed, + name, + current_arch + ) + else: + constructed_url = 'file://{}/{}/{}/os'.format( + compose_latest_sync, + name, + current_arch + ) + + + repodata = { + 'name': name, + 'url': constructed_url + } + + repolist.append(repodata) + + return repolist + + @staticmethod + def composeinfo_write( + file_path, + distname, + shortname, + release, + release_type, + datestamp, + arches: list = [], + repos: list = [] + ): + """ + Write compose info similar to pungi. + + arches and repos may be better suited for a dictionary. that is a + future thing we will work on for 0.3.0. + """ + cijson = file_path + '.json' + ciyaml = file_path + '.yaml' + ci = productmd.composeinfo.ComposeInfo() + ci.release.name = distname + ci.release.short = shortname + ci.release.version = release + ci.release.type = release_type + + ci.compose.id = '{}-{}-{}'.format(shortname, release, datestamp) + ci.compose.type = "production" + ci.compose.date = datestamp + ci.compose.respin = 0 + + ci.dump(cijson) + + with open(cijson, 'r') as cidump: + jsonData = json.load(cidump) + cidump.close() + + with open(ciyaml, 'w+') as ymdump: + yaml.dump(jsonData, ymdump) + ymdump.close() diff --git a/iso/empanadas/imagefactory.patch b/iso/empanadas/imagefactory.patch new file mode 100644 index 0000000..43254b8 --- /dev/null +++ b/iso/empanadas/imagefactory.patch @@ -0,0 +1,16 @@ +diff --git a/imagefactory_plugins/TinMan/TinMan.info b/imagefactory_plugins/TinMan/TinMan.info +index bd61a02..00a8112 100644 +--- a/imagefactory_plugins/TinMan/TinMan.info ++++ b/imagefactory_plugins/TinMan/TinMan.info +@@ -3,7 +3,10 @@ + "targets": [ ["Fedora", null, null], ["RHEL-6", null, null], ["RHEL-5", null, null], + ["Ubuntu", null, null], ["CentOS-6", null, null], ["CentOS-5", null, null], + ["ScientificLinux-6", null, null], ["ScientificLinux-5", null, null], ["OpenSUSE", null, null], +- [ "RHEL-7", null, null ], [ "CentOS-7", null, null ], [ "ScientificLinux-7", null, null ] ], ++ [ "RHEL-7", null, null ], [ "CentOS-7", null, null ], [ "ScientificLinux-7", null, null ], ++ [ "RHEL-8", null, null ], [ "CentOS-8", null, null ], [ "Rocky-8", null, null ], ++ [ "RHEL-9", null, null ], [ "CentOS-9", null, null ], [ "Rocky-9", null, null ] ++ ], + "description": "Plugin to support most Oz customize capable guest types", + "maintainer": { + "name": "Red Hat, Inc.", diff --git a/iso/empanadas/oz.rpm b/iso/empanadas/oz.rpm new file mode 100644 index 0000000..83e5a53 Binary files /dev/null and b/iso/empanadas/oz.rpm differ diff --git a/iso/empanadas/poetry.lock b/iso/empanadas/poetry.lock index 35ab49d..7a23d84 100644 --- a/iso/empanadas/poetry.lock +++ b/iso/empanadas/poetry.lock @@ -10,7 +10,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" name = "attrs" version = "21.4.0" description = "Classes Without Boilerplate" -category = "dev" +category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" @@ -22,14 +22,14 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (> [[package]] name = "boto3" -version = "1.24.14" +version = "1.24.22" description = "The AWS SDK for Python" category = "main" optional = false python-versions = ">= 3.7" [package.dependencies] -botocore = ">=1.27.14,<1.28.0" +botocore = ">=1.27.22,<1.28.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -38,7 +38,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.27.14" +version = "1.27.22" description = "Low-level, data-driven core of boto 3." category = "main" optional = false @@ -62,11 +62,11 @@ python-versions = ">=3.6" [[package]] name = "charset-normalizer" -version = "2.0.12" +version = "2.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false -python-versions = ">=3.5.0" +python-versions = ">=3.6.0" [package.extras] unicode_backport = ["unicodedata2"] @@ -89,7 +89,7 @@ python-versions = ">=3.5" [[package]] name = "importlib-metadata" -version = "4.11.4" +version = "4.12.0" description = "Read metadata from Python packages" category = "dev" optional = false @@ -102,7 +102,7 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] [[package]] name = "importlib-resources" @@ -143,7 +143,7 @@ python-versions = ">=3.7" [[package]] name = "kobo" -version = "0.24.1" +version = "0.24.2" description = "A pile of python modules used by Red Hat release engineering to build their tools" category = "main" optional = false @@ -267,7 +267,7 @@ python-versions = ">=3.6" [[package]] name = "requests" -version = "2.28.0" +version = "2.28.1" description = "Python HTTP for Humans." category = "main" optional = false @@ -275,13 +275,13 @@ python-versions = ">=3.7, <4" [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2.0.0,<2.1.0" +charset-normalizer = ">=2,<3" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<1.27" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] +use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rpm-py-installer" @@ -315,7 +315,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "typing-extensions" -version = "4.2.0" +version = "4.3.0" description = "Backported and Experimental Type Hints for Python 3.7+" category = "dev" optional = false @@ -365,7 +365,7 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest- [metadata] lock-version = "1.1" python-versions = ">=3.7,<4" -content-hash = "ccd47ad1b0819968dbad34b68c3f9afd98bd657ee639f9037731fd2a0746bd16" +content-hash = "42676fd0ceb350c8cd90246dc688cfcd404e14d22229052d0527fe342c135b95" [metadata.files] atomicwrites = [ @@ -377,20 +377,20 @@ attrs = [ {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, ] boto3 = [ - {file = "boto3-1.24.14-py3-none-any.whl", hash = "sha256:490f5e88f5551b33ae3019a37412158b76426d63d1fb910968ade9b6a024e5fe"}, - {file = "boto3-1.24.14.tar.gz", hash = "sha256:e284705da36faa668c715ae1f74ebbff4320dbfbe3a733df3a8ab076d1ed1226"}, + {file = "boto3-1.24.22-py3-none-any.whl", hash = "sha256:c9a9f893561f64f5b81de197714ac4951251a328672a8dba28ad4c4a589c3adf"}, + {file = "boto3-1.24.22.tar.gz", hash = "sha256:67d404c643091d4aa37fc485193289ad859f1f65f94d0fa544e13bdd1d4187c1"}, ] botocore = [ - {file = "botocore-1.27.14-py3-none-any.whl", hash = "sha256:df1e9b208ff93daac7c645b0b04fb6dccd7f20262eae24d87941727025cbeece"}, - {file = "botocore-1.27.14.tar.gz", hash = "sha256:bb56fa77b8fa1ec367c2e16dee62d60000451aac5140dcce3ebddc167fd5c593"}, + {file = "botocore-1.27.22-py3-none-any.whl", hash = "sha256:7145d9b7cae87999a9f074de700d02a1b3222ee7d1863aa631ff56c5fc868035"}, + {file = "botocore-1.27.22.tar.gz", hash = "sha256:f57cb33446deef92e552b0be0e430d475c73cf64bc9e46cdb4783cdfe39cb6bb"}, ] certifi = [ {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"}, ] charset-normalizer = [ - {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, - {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, + {file = "charset-normalizer-2.1.0.tar.gz", hash = "sha256:575e708016ff3a5e3681541cb9d79312c416835686d054a23accb873b254f413"}, + {file = "charset_normalizer-2.1.0-py3-none-any.whl", hash = "sha256:5189b6f22b01957427f35b6a08d9a0bc45b46d3788ef5a92e978433c7a35f8a5"}, ] colorama = [ {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, @@ -401,8 +401,8 @@ idna = [ {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.11.4-py3-none-any.whl", hash = "sha256:c58c8eb8a762858f49e18436ff552e83914778e50e9d2f1660535ffb364552ec"}, - {file = "importlib_metadata-4.11.4.tar.gz", hash = "sha256:5d26852efe48c0a32b0509ffbc583fda1a2266545a78d104a6f4aff3db17d700"}, + {file = "importlib_metadata-4.12.0-py3-none-any.whl", hash = "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"}, + {file = "importlib_metadata-4.12.0.tar.gz", hash = "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670"}, ] importlib-resources = [ {file = "importlib_resources-5.8.0-py3-none-any.whl", hash = "sha256:7952325ffd516c05a8ad0858c74dff2c3343f136fe66a6002b2623dd1d43f223"}, @@ -417,7 +417,7 @@ jmespath = [ {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, ] kobo = [ - {file = "kobo-0.24.1.tar.gz", hash = "sha256:d5a30cc20c323f3e9d9b4b2e511650c4b98929b88859bd8cf57463876686e407"}, + {file = "kobo-0.24.2.tar.gz", hash = "sha256:1b3c17260a93d933d2238884373fbf3485ecd417d930acf984285dc012410e2b"}, ] markupsafe = [ {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, @@ -558,8 +558,8 @@ pyyaml = [ {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, ] requests = [ - {file = "requests-2.28.0-py3-none-any.whl", hash = "sha256:bc7861137fbce630f17b03d3ad02ad0bf978c844f3536d0edda6499dafce2b6f"}, - {file = "requests-2.28.0.tar.gz", hash = "sha256:d568723a7ebd25875d8d1eaf5dfa068cd2fc8194b2e483d7b1f7c81918dbec6b"}, + {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"}, + {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"}, ] rpm-py-installer = [ {file = "rpm-py-installer-1.1.0.tar.gz", hash = "sha256:66e5f4f9247752ed386345642683103afaee50fb16928878a204bc12504b9bbe"}, @@ -573,8 +573,8 @@ six = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] typing-extensions = [ - {file = "typing_extensions-4.2.0-py3-none-any.whl", hash = "sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708"}, - {file = "typing_extensions-4.2.0.tar.gz", hash = "sha256:f1c24655a0da0d1b67f07e17a5e6b2a105894e6824b92096378bb3668ef02376"}, + {file = "typing_extensions-4.3.0-py3-none-any.whl", hash = "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02"}, + {file = "typing_extensions-4.3.0.tar.gz", hash = "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"}, ] urllib3 = [ {file = "urllib3-1.26.9-py2.py3-none-any.whl", hash = "sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14"}, diff --git a/iso/empanadas/prep-azure.sh b/iso/empanadas/prep-azure.sh new file mode 100755 index 0000000..4f45658 --- /dev/null +++ b/iso/empanadas/prep-azure.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +usage() { + cat << EOF +$0: prep raw image for azure + +usage: $0 raw_image + +Description: Takes a raw image and calculates the closest whole-MegaByte, +resizing a copy of the raw image, and returning the path to the resize 'vpc' +image (a .vhd file to upload) + +Dumps VHD in \$PWD by default. Override with ``OUTDIR=/path/to/outdir`` + +Don't try to compress it. +EOF +} + +log() { + local level="$1"; shift + local msg="$@" + local out=$([ "$level" == "error" ] && echo 2 || echo 1) + printf "[%s] %s: %s\n" "$(date '+%Y-%m-%d %H:%M:%S')" "${level}" "${msg}" >&${out} + if [[ "${level}" == "error" ]]; then + exit + fi +} + +MB=$((1024*1024)) # for calculations - 1048576 bytes + +if ! command -v qemu-img 2>&1 >/dev/null; then + log error "Need qemu-img."; + usage + exit +fi + +rawdisk="$1" + +if [[ -z "$rawdisk" ]]; then + usage + log error "need path to a raw image to prep" +fi + +outdir="${2:-${PWD}}" + +size=$(qemu-img info -f raw --output json "${rawdisk}" | gawk 'match($0, /"virtual-size": ([0-9]+),/, val) {print val[1]}') + +rounded_size=$(((($size+$MB-1)/$MB)*$MB)) # size (in bytes) + 1MB, less one, and rounded. + +outfilename=$(basename ${rawdisk//body/vhd}) +outfile="${outdir}/${outfilename}" +qemu-img resize -f raw "${rawdisk}" "${rounded_size}" || log error "failed to resize" +qemu-img convert -f raw -o subformat=fixed,force_size -O vpc "${rawdisk}" "${outfile}" || log error "failed to convert to VHD format" + +echo "${outfile}" diff --git a/iso/empanadas/pyproject.toml b/iso/empanadas/pyproject.toml index a43a91d..20164a4 100644 --- a/iso/empanadas/pyproject.toml +++ b/iso/empanadas/pyproject.toml @@ -1,8 +1,8 @@ [tool.poetry] name = "empanadas" -version = "0.1.0" +version = "0.3.0" description = "hand crafted ISOs with love and spice" -authors = ["Louis Abel ", "Neil Hanlon "] +authors = ["Louis Abel ", "Neil Hanlon "] [tool.poetry.dependencies] python = ">=3.7,<4" @@ -16,6 +16,7 @@ boto3 = "^1.24.12" xmltodict = "^0.13.0" requests = "^2.28.0" kobo = "^0.24.1" +attrs = "^21.4.0" [tool.poetry.dev-dependencies] pytest = "~5" @@ -26,8 +27,13 @@ sync_from_peridot_test = "empanadas.scripts.sync_from_peridot_test:run" sync_sig = "empanadas.scripts.sync_sig:run" build-iso = "empanadas.scripts.build_iso:run" build-iso-extra = "empanadas.scripts.build_iso_extra:run" +build-iso-live = "empanadas.scripts.build_iso_live:run" pull-unpack-tree = "empanadas.scripts.pull_unpack_tree:run" launch-builds = "empanadas.scripts.launch_builds:run" +build-image = "empanadas.scripts.build_image:run" +finalize_compose = "empanadas.scripts.finalize_compose:run" +pull-cloud-image = "empanadas.scripts.pull_cloud_image:run" +generate_compose = "empanadas.scripts.generate_compose:run" [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/iso/empanadas/tests/test_empanadas.py b/iso/empanadas/tests/test_empanadas.py index 4561768..195fc4b 100644 --- a/iso/empanadas/tests/test_empanadas.py +++ b/iso/empanadas/tests/test_empanadas.py @@ -2,4 +2,4 @@ from empanadas import __version__ def test_version(): - assert __version__ == '0.1.0' + assert __version__ == '0.2.0' diff --git a/mangle/common b/mangle/common index c6c28e8..7b1d1a8 100644 --- a/mangle/common +++ b/mangle/common @@ -3,6 +3,7 @@ # To be sourced by scripts as needed # The mirrorlist url -MIRRORLIST_BASE="http://mirrors.rockylinux.org/mirrorlist" +LIST=${LIST:-mirrorlist} +MIRRORLIST_BASE="http://mirrors.rockylinux.org/${LIST}" MIRROR_DISPLAY_COUNT=1 diff --git a/mangle/validate_repos b/mangle/validate_repos index a845cff..d897cf5 100755 --- a/mangle/validate_repos +++ b/mangle/validate_repos @@ -1,11 +1,14 @@ #!/usr/bin/env bash +### +# Use RLVER=9 for rocky 9 + # Source mangle vars # shellcheck source=./common disable=SC1091,1090 -source "$(dirname "$0")/common" +source "$(dirname "${BASH_SOURCE[0]}")/common" # Source sync / migrate vars for repository information # shellcheck source=../sync/common disable=SC1091,1090 -source "$(dirname "$0")/../sync/common" +source "$(dirname "${BASH_SOURCE[0]}")/../sync/common" # How many ARG1=${1} @@ -30,12 +33,16 @@ cleanup_repo () { repo="${repo^^}" fi - # Everything has an 8 appended to it - repo="${repo}-8" + # Append the major version from sync/common to support 8 and 9 + repo="${repo}-${MAJOR}" return 0 } -for repo in "${ALL_REPOS[@]}"; do +# Sort the array +IFS=$'\n' sorted=($(sort <<<"${ALL_REPOS[*]}")) +unset IFS + +for repo in "${sorted[@]}"; do # Business logic must be done, sometimes... cleanup_repo "${repo}" @@ -52,8 +59,8 @@ for repo in "${ALL_REPOS[@]}"; do result=$(curl -s "${MIRRORLIST_BASE}?repo=${repo}&arch=${arch}&time&country=global") print_result - # x86 and a64 have 'debug' types, as well - if [[ "${arch}" =~ ^(x86_|aarch)64$ ]]; then + # x86 and a64 have 'debug' types, as well ("arch" != "source") + if [[ "${arch}" =~ ^(x86_|aarch)64|(s390x|ppc64le)$ ]]; then result=$(curl -s "${MIRRORLIST_BASE}?repo=${repo}-debug&arch=${arch}&time&country=global") print_result fi diff --git a/sync/common b/sync/common index 0141d01..be3c832 100644 --- a/sync/common +++ b/sync/common @@ -32,7 +32,7 @@ ARCHES=(x86_64 aarch64) # Source Major common # Override: Not Allowed -test -f "$(dirname "$0")/common_${RLVER}" && source "$(dirname "$0")/common_${RLVER}" +test -f "$(dirname "${BASH_SOURCE[0]}")/common_${RLVER}" && source "$(dirname "${BASH_SOURCE[0]}")/common_${RLVER}" if [ "$?" -ne 0 ]; then echo "Could not source common_${RLVER}" exit 1 diff --git a/sync/common_9 b/sync/common_9 index 8b1cbb8..777ae46 100644 --- a/sync/common_9 +++ b/sync/common_9 @@ -9,7 +9,7 @@ MAJOR="${REVISION:0:1}" MINOR="${REVISION:2:1}" # comment or blank if needed -APPEND_TO_DIR="-RC1" +APPEND_TO_DIR="-RC2" STAGING_ROOT="/mnt/repos-staging" PRODUCTION_ROOT="/mnt/repos-production" diff --git a/sync/gen-torrents.sh b/sync/gen-torrents.sh index c80c90d..dc1486b 100755 --- a/sync/gen-torrents.sh +++ b/sync/gen-torrents.sh @@ -3,7 +3,7 @@ # Source common variables # shellcheck disable=SC2046,1091,1090 -source "$(dirname "$0")/common" +source "$(dirname "${BASH_SOURCE[0]}")/common" NAME=gen-torrents diff --git a/sync/sync-to-prod-9.sh b/sync/sync-to-prod-9.sh new file mode 100644 index 0000000..1c90de1 --- /dev/null +++ b/sync/sync-to-prod-9.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Syncs everything from staging to production + +# Source common variables +# shellcheck disable=SC2046,1091,1090 +source "$(dirname "$0")/common" + +REV=${REVISION}${APPEND_TO_DIR} + +cd "${STAGING_ROOT}/${CATEGORY_STUB}/${REV}" || { echo "Failed to change directory"; ret_val=1; exit 1; } +ret_val=$? + +if [ $ret_val -eq "0" ]; then + TARGET="${PRODUCTION_ROOT}/${CATEGORY_STUB}/${REV:0:3}" + mkdir -p "${TARGET}" + echo "Syncing ${REVISION}" + sudo -l && time fpsync -o '-av --numeric-ids --no-compress --chown=10004:10005' -n 24 -t /mnt/compose/partitions "${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/" "${TARGET}/" + + # Full file list update for production root + cd "${PRODUCTION_ROOT}/" || { echo "Failed to change directory"; exit 1; } + echo "Getting a full file list for the root dir" + find . > fullfilelist + if [[ -f /usr/local/bin/create-filelist ]]; then + # We're already here, but Justin Case wanted this + cd "${PRODUCTION_ROOT}/" || { echo "Failed to change directory"; exit 1; } + /bin/cp fullfiletimelist-rocky fullfiletimelist-rocky-old + /usr/local/bin/create-filelist > fullfiletimelist-rocky + cp fullfiletimelist-rocky fullfiletimelist + fi + # Full file list update for rocky linux itself + cd "${PRODUCTION_ROOT}/${CATEGORY_STUB}/" || { echo "Failed to change directory"; exit 1; } + # Hardlink everything except xml files + echo "Hard linking" + hardlink -x '.*\.xml.*' "${REVISION}" + echo "Getting a full file list for the rocky dir" + find . > fullfilelist + if [[ -f /usr/local/bin/create-filelist ]]; then + # We're already here, but Justin Case wanted this + cd "${PRODUCTION_ROOT}/${CATEGORY_STUB}/" || { echo "Failed to change directory"; exit 1; } + /bin/cp fullfiletimelist-rocky fullfiletimelist-rocky-old + /usr/local/bin/create-filelist > fullfiletimelist-rocky + cp fullfiletimelist-rocky fullfiletimelist + fi + chown 10004:10005 fullfilelist fullfiletimelist-rocky fullfiletimelist +fi + diff --git a/sync/sync-to-prod.sh b/sync/sync-to-prod.sh index 76045c7..63b0048 100644 --- a/sync/sync-to-prod.sh +++ b/sync/sync-to-prod.sh @@ -22,7 +22,27 @@ if [ $ret_val -eq "0" ]; then sudo -l && find ** -maxdepth 0 -type l | parallel --will-cite -j 18 sudo rsync -av --chown=10004:10005 --progress --relative --human-readable \ {} "${TARGET}" - # Full file list update + # Temporary until empanadas has this support + if [ -f "COMPOSE_ID" ]; then + cp COMPOSE_ID "${TARGET}" + chown 10004:10005 "${TARGET}/COMPOSE_ID" + fi + + if [ -d "metadata" ]; then + rsync -av --chown=10004:10005 --progress --relative --human-readable metadata "${TARGET}" + fi + + # Full file list update for production root + cd "${PRODUCTION_ROOT}/" || echo { echo "Failed to change directory"; exit 1; } + find . > fullfilelist + if [[ -f /usr/local/bin/create-filelist ]]; then + # We're already here, but Justin Case wanted this + cd "${PRODUCTION_ROOT}/" || { echo "Failed to change directory"; exit 1; } + /bin/cp fullfiletimelist-rocky fullfiletimelist-rocky-old + /usr/local/bin/create-filelist > fullfiletimelist-rocky + cp fullfiletimelist-rocky fullfiletimelist + fi + # Full file list update for rocky linux itself cd "${PRODUCTION_ROOT}/${CATEGORY_STUB}/" || { echo "Failed to change directory"; exit 1; } # Hardlink everything except xml files hardlink -x '.*\.xml.*' "${REVISION}" diff --git a/sync/sync-to-staging-9.sh b/sync/sync-to-staging-9.sh new file mode 100644 index 0000000..a5d59df --- /dev/null +++ b/sync/sync-to-staging-9.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Source common variables +# shellcheck disable=SC2046,1091,1090 +source "$(dirname "$0")/common" + +if [[ $# -eq 0 ]]; then + echo "You must specify a short name." + exit 1 +fi + +# Major Version (eg, 8) +MAJ=${RLVER} +# Short name (eg, NFV, extras, Rocky, gluster9) +SHORT=${1} +PROFILE=${2} + +cd "/mnt/compose/${MAJ}/latest-${SHORT}-${MAJ}${PROFILE}/compose" || { echo "Failed to change directory"; ret_val=1; exit 1; } +ret_val=$? + +if [ $ret_val -eq "0" ]; then + TARGET="${STAGING_ROOT}/${CATEGORY_STUB}/${REV}" + mkdir -p "${TARGET}" + # disabling because none of our files should be starting with dashes. If they + # are something is *seriously* wrong here. + # shellcheck disable=SC2035 + #sudo -l && find **/* -maxdepth 0 -type d | parallel --will-cite -j 18 sudo rsync -av --chown=10004:10005 --progress --relative --human-readable \ + # {} "${TARGET}" + sudo -l && time fpsync -o '-av --numeric-ids --no-compress --chown=10004:10005' -n 24 -t /mnt/compose/partitions "/mnt/compose/${MAJ}/latest-${SHORT}-${MAJ}${PROFILE}/compose/" "${TARGET}/" + + # This is temporary until we implement rsync into empanadas + #if [ -f "COMPOSE_ID" ]; then + # cp COMPOSE_ID "${TARGET}" + # chown 10004:10005 "${TARGET}/COMPOSE_ID" + #fi + + #if [ -d "metadata" ]; then + # rsync -av --chown=10004:10005 --progress --relative --human-readable metadata "${TARGET}" + #fi +fi diff --git a/sync/sync-to-staging.sh b/sync/sync-to-staging.sh index 1e764c8..d256fd5 100644 --- a/sync/sync-to-staging.sh +++ b/sync/sync-to-staging.sh @@ -25,4 +25,14 @@ if [ $ret_val -eq "0" ]; then # shellcheck disable=SC2035 sudo -l && find **/* -maxdepth 0 -type d | parallel --will-cite -j 18 sudo rsync -av --chown=10004:10005 --progress --relative --human-readable \ {} "${TARGET}" + + # This is temporary until we implement rsync into empanadas + if [ -f "COMPOSE_ID" ]; then + cp COMPOSE_ID "${TARGET}" + chown 10004:10005 "${TARGET}/COMPOSE_ID" + fi + + if [ -d "metadata" ]; then + rsync -av --chown=10004:10005 --progress --relative --human-readable metadata "${TARGET}" + fi fi