Merge branch 'devel' into 'main'

chore: merge devel->main

See merge request release-engineering/public/toolkit!43
This commit is contained in:
Louis Abel 2022-06-09 23:14:43 +00:00
commit 58248b1e70
41 changed files with 2168 additions and 43 deletions

View file

@ -3,7 +3,39 @@ sig-core-toolkit
Release Engineering toolkit for repeatable operations or functionality testing. Release Engineering toolkit for repeatable operations or functionality testing.
Currently mirrored at our [github](https://github.com/rocky-linux),
[Rocky Linux Git Service](https://git.rockylinux.org), and the
[RESF Git Service](https://git.resf.org). Changes either occur at the Rocky
Linux Git Service or RESF Git Service.
What does this have?
--------------------
* analyze -> Analysis utilities (such as download stats)
* chat -> mattermost related utilities
* func -> (mostly defunct) testing scripts and tools to test base functionality
* iso -> ISO related utilities
* live -> Live image related utilities
* mangle -> Manglers and other misc stuff
* sync -> Sync tools, primarily for Rocky Linux 8
How can I help?
---------------
Fork this repository and open a PR with your changes. Keep these things in mind
when you make changes:
* Have pre-commit installed
* Have shellcheck installed
* Shell Scripts: These must pass a shellcheck test!
* Python scripts: Try your best to follow PEP8 guidelines
Your PR should be against the devel branch at all times. PR's against the main
branch will be closed.
Will some of this be moved into separate repositories?
------------------------------------------------------
There may be some things that will be moved to its own repository in the near There may be some things that will be moved to its own repository in the near
future. This repository may be mirrored to github. Currently changes in the future. From a SIG/Core standpoint, we believe a good chunk of this should stay
[Rocky Linux](https://git.rockylinux.org) are allowed - Changes in the github here as it makes it easier for us to maintain and manage.
repository will not be synced.

0
analyze/common Normal file
View file

0
analyze/common_8 Normal file
View file

View file

@ -0,0 +1,81 @@
#!/usr/bin/env bash
log () {
printf "[LOG] %s\n" "$1"
}
log "Begin generation"
log "Generating intermediary ISO logfile"
# Generate intermediary iso log
if [[ ! -f intermediary_isos.log ]]; then
awk '/Rocky-8.4-(aarch64|x86_64)-.*?\.iso/ {if ($12==200 && $4!="3.134.114.30") print $0}' *.log **/*.log > intermediary_isos.log
else
log "Skipped ISO intermediary"
fi
log "Done"
log "Generating intermediary mirrorlist stats"
# Generate intermediary mirrorlist stats
if [[ ! -f intermediary_mirrorlist.log || ! -f mirrorlist_parsed ]]; then
awk '/GET \/mirrorlist/ { if ($12==200 && $4!="3.134.114.30") print $0}' *.log **/*.log > intermediary_mirrorlist.log
awk '{ date=substr($7,2,11); ip=$4; path=$10; match(path, /arch=(x86_64|aarch64|source)/, arch_matches); match(path,/repo=([a-zA-z\-0-9]+)/, repo_matches); arch=arch_matches[1]; repository=repo_matches[1] } { print date, arch, repository, ip }' intermediary_mirrorlist.log > mirrorlist_parsed
else
log "Skipped mirrorlist intermediary"
fi
log "Done"
log "Count unique and nonunique requests"
# "Unique" count by IP addresses
totaldls_u=$(awk '{print $4}' intermediary_isos.log | sort | uniq | wc -l)
# Total count
totaldls=$(wc -l intermediary_isos.log | awk '{print $1}')
log "Generate download stats for every date"
# Get all the dates
declare -a dates
dates=( $(awk '{print substr($7,2,11)}' intermediary_isos.log | sort | uniq) )
download_res=""
for date in "${dates[@]}"; do
total_count=$(grep "${date}" intermediary_isos.log | wc -l)
download_res="${download_res}${date} ${total_count}\n"
done
log "Done"
log "Generate mirrorlist stats for every date"
dates=( $(awk '{print $1}' mirrorlist_parsed | sort | uniq) )
#repositories=( $(awk '{print $3}' mirrorlist_parsed | sort | uniq ) )
repositories=({AppStream,BaseOS,PowerTools,ResilientStorage,Minimal,Devel,HighAvailability,extras,rockyplus,NFV}-{8,8-source})
mirror_res="Date Total x86_64 aarch64 source ${repositories[@]}\n"
for date in "${dates[@]}"; do
today=$(grep "${date}" mirrorlist_parsed)
total_count=$(echo "${today}" | wc -l)
arches=($(echo "${today}" | awk 'BEGIN {source=0; x86=0; a64=0; }{ if ($2=="x86_64") { x86+=1 } else if ($2=="aarch64") { a64+=1 } else if ($2=="source") { source+=1 } } END { print x86, a64, source }'))
declare -A repos
for repo in "${repositories[@]}"; do
repos["${repo}"]=$(echo "${today}" | grep "${repo}" | wc -l)
done
mirror_res="${mirror_res}${date} ${total_count} ${arches[@]} ${repos[@]}\n"
done
log "Done"
log "End processing. Begin output"
# Output shit
echo -e "Download Information\n------------------"
echo -e "Total: ${totaldls}\nUnique: ${totaldls_u}\n\n\n"
echo -e "Downloads by date\n------------------"
echo -e "${download_res}" | column -t
echo -e "Mirror requests by date\n------------------"
# Sort by date
echo -e "${mirror_res}" | column -t | sort -t'/' -Mk2

2
chat/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
.envrc

1
chat/common Normal file
View file

@ -0,0 +1 @@
SERVICE_ID=7mRT77Q5CL2BNpM5zxse2v

View file

@ -0,0 +1,25 @@
#!/bin/bash
version=$1
if [[ -z "$1" ]]; then
printf "usage: $0 mmversion\n"; exit 2
fi
#tmpdir=$(mktemp -d)
tmpdir=/tmp/
outfile="${tmpdir}/mattermost-${version}.tar.gz"
if [[ ! -f "${outfile}" ]]; then
curl -Lo "$outfile" "https://releases.mattermost.com/${version}/mattermost-${version}-linux-amd64.tar.gz" || exit 1
fi
outdir="${tmpdir}mattermost-${version}/"
if [[ ! -d "${outdir}" ]]; then
mkdir "${outdir}"
fi
tar --strip-components 2 -C "${outdir}" -xvf "$outfile" mattermost/client
echo "Wrote to ${outdir}"

View file

@ -4,7 +4,7 @@ r_log "kernel" "Testing the kernel keyring (GPG)"
ARCH=$(uname -m) ARCH=$(uname -m)
#KERNEL=$(uname -r | cut -d'-' -f1) #KERNEL=$(uname -r | cut -d'-' -f1)
if [ "${ARCH}" == "aarch64" ]; then if [ "${ARCH}" == "aarch64" ] || [ "${ARCH}" == "ppc64le" ] || [ "${ARCH}" == "s390x" ]; then
r_log "kernel" "Architecture not tested: $ARCH" r_log "kernel" "Architecture not tested: $ARCH"
exit 0 exit 0
fi fi

View file

@ -4,7 +4,7 @@ r_log "release" "Ensure the release is actually where it should be"
case $RELEASE_NAME in case $RELEASE_NAME in
rocky) rocky)
r_log "rocky release" "Base Repo Check" r_log "rocky release" "Base Repo Check"
grep -q 'name=Rocky' /etc/yum.repos.d/Rocky*-Base*.repo grep -q 'name=Rocky' /etc/yum.repos.d/*ocky*.repo
r_checkExitStatus $? r_checkExitStatus $?
r_log "rocky release" "Check /etc/rocky-release" r_log "rocky release" "Check /etc/rocky-release"
grep -q "Rocky" /etc/rocky-release grep -q "Rocky" /etc/rocky-release

View file

@ -1,6 +1,11 @@
#!/bin/bash #!/bin/bash
r_log "rocky" "Check the GPG keys" r_log "rocky" "Check the GPG keys"
file /etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial > /dev/null 2>&1 && \ if [ "$RL_VER" -eq 8 ]; then
file /etc/pki/rpm-gpg/RPM-GPG-KEY-rockytesting > /dev/null 2>&1 file /etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial > /dev/null 2>&1 && \
file /etc/pki/rpm-gpg/RPM-GPG-KEY-rockytesting > /dev/null 2>&1
else
file "/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-${RL_VER}" > /ev/null 2>&1 && \
file "/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-${RL_VER}-Testing" > /ev/null 2>&1
fi
r_checkExitStatus $? r_checkExitStatus $?

View file

@ -1,25 +1 @@
# Place packages that were modified for debranding, regardless if their # Packages that have been debranded will be in the rocky/metadata git repository
# release tag was modified.
#
# The format is this:
# -> Rocky Version ($RL_VER, so major version)
# -> Package Name
# X|name
ALL|abrt
ALL|anaconda
8|cloud-init
8|cockpit
ALL|dhcp
ALL|firefox
ALL|fwupdate
ALL|httpd
ALL|initial-setup
ALL|kernel
ALL|libreport
ALL|nginx
ALL|PackageKit
ALL|redhat-rpm-config
ALL|shim
ALL|sos
ALL|subscription-manager
ALL|thunderbird

4
iso/py/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
__pycache__/
*.py[cod]
*$py.class
*.so

56
iso/py/README.md Normal file
View file

@ -0,0 +1,56 @@
# iso
## TODO
Verbose mode should exist to output everything that's being called or ran.
There should be additional logging regardless, not just to stdout, but also to a file.
## scripts
* sync-variant-pungi
* sync-variant-peridot
* sync-from-pungi
* sync-from-peridot
* sync-sig
* build-all-iso
* sign-repos-only
## wrappers
* lorax-generators
* sync-generators
## rules
### imports
When making a script, you *must* import common. This is insanely bad practice,
but we would prefer if we started out this way:
```
from common import *
import argparse
```
Whatever is imported in common will effectively be imported in your scripts as
well, but there is nothing stopping you from defining them again, even out of
habit. `argparse` is there because you better have a very, *very* good reason
to not be writing scripts that are major version specific.
If you are writing something that could be arch specific based on the major
version (which is likely), make sure to import the util module and use it arch
checker appropriately. Small (but weak) example.
```
from util import Checks
rlvars = rldict['9']
r = Checks(rlvars, arch)
r.check_valid_arch()
```
### script names and permissions
* Callable scripts should *not* end in `.py`
* They should have at least `775` or `+x` permissions

7
iso/py/build-iso Normal file
View file

@ -0,0 +1,7 @@
#!/usr/bin/env python3
# builds ISO's
import argparse
from common import *
from util import Checks
from util import IsoBuild

56
iso/py/common.py Normal file
View file

@ -0,0 +1,56 @@
# All imports are here
import os
import platform
import time
import glob
import rpm
import yaml
import logging
# These are a bunch of colors we may use in terminal output
class Color:
RED = '\033[91m'
GREEN = '\033[92m'
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
YELLOW = '\033[93m'
UNDERLINE = '\033[4m'
BOLD = '\033[1m'
END = '\033[0m'
# vars and additional checks
rldict = {}
config = {
"rlmacro": rpm.expandMacro('%rhel'),
"arch": platform.machine(),
"date_stamp": time.strftime("%Y%m%d.%H%M%S", time.localtime()),
"compose_root": "/mnt/compose",
"staging_root": "/mnt/repos-staging",
"production_root": "/mnt/repos-production",
"category_stub": "mirror/pub/rocky",
"sig_category_stub": "mirror/pub/sig",
"repo_base_url": "https://yumrepofs.build.resf.org/v1/projects",
"container": "centos:stream9"
}
# Importing the config from yaml
for conf in glob.iglob('configs/*.yaml'):
with open(conf, 'r', encoding="utf-8") as file:
rldict.update(yaml.safe_load(file))
# The system needs to be a RHEL-like system. It cannot be Fedora or SuSE.
#if "%rhel" in config['rlmacro']:
# raise SystemExit(Color.BOLD + 'This is not a RHEL-like system.' + Color.END
# + '\n\nPlease verify you are running on a RHEL-like system that is '
# 'not Fedora nor SuSE. This means that the %rhel macro will be '
# 'defined with a value equal to the version you are targetting. RHEL'
# ' and its derivatives have this set.')
# These will be set in their respective var files
#REVISION = rlvars['revision'] + '-' + rlvars['rclvl']
#rlvars = rldict[rlver]
#rlvars = rldict[rlmacro]
#COMPOSE_ISO_WORKDIR = COMPOSE_ROOT + "work/" + arch + "/" + date_stamp

Binary file not shown.

89
iso/py/configs/el8.yaml Normal file
View file

@ -0,0 +1,89 @@
---
'8':
revision: '8.6'
rclvl: 'RC2'
allowed_arches:
- x86_64
- aarch64
provide_multilib: False
project_id: ''
required_packages:
- 'lorax'
- 'genisoimage'
- 'isomd5sum'
repo_symlinks:
devel: 'Devel'
NFV: 'nfv'
renames: {}
all_repos:
- 'BaseOS'
- 'AppStream'
- 'PowerTools'
- 'HighAvailability'
- 'ResilientStorage'
- 'RT'
- 'NFV'
- 'extras'
- 'devel'
- 'plus'
- 'rockyrpi'
no_comps_or_groups:
- 'extras'
- 'devel'
- 'plus'
- 'rockyrpi'
comps_or_groups:
- 'BaseOS'
- 'AppStream'
- 'PowerTools'
- 'HighAvailability'
- 'ResilientStorage'
- 'RT'
- 'NFV'
has_modules:
- 'AppStream'
- 'PowerTools'
iso_map:
hosts:
x86_64: ''
aarch64: ''
ppc64le: ''
s390x: ''
images:
- dvd1
- minimal
- boot
repoclosure_map:
arches:
x86_64: '--arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch'
aarch64: '--arch=aarch64 --arch=noarch'
ppc64le: '--arch=ppc64le --arch=noarch'
s390x: '--arch=s390x --arch=noarch'
repos:
BaseOS: []
AppStream:
- BaseOS
PowerTools:
- BaseOS
- AppStream
HighAvailability:
- BaseOS
- AppStream
ResilientStorage:
- BaseOS
- AppStream
RT:
- BaseOS
- AppStream
NFV:
- BaseOS
- AppStream
extra_files:
git_repo: 'https://git.rockylinux.org/staging/src/rocky-release.git'
branch: 'r8'
list:
- 'SOURCES/COMMUNITY-CHARTER'
- 'SOURCES/EULA'
- 'SOURCES/LICENSE'
- 'SOURCES/RPM-GPG-KEY-rockyofficial'
...

109
iso/py/configs/el9.yaml Normal file
View file

@ -0,0 +1,109 @@
---
'9':
revision: '9.0'
rclvl: 'RC1'
allowed_arches:
- x86_64
- aarch64
- ppc64le
- s390x
provide_multilib: True
project_id: '55b17281-bc54-4929-8aca-a8a11d628738'
required_packages:
- 'lorax'
- 'genisoimage'
- 'isomd5sum'
repo_symlinks:
devel: 'Devel'
NFV: 'nfv'
renames:
all: 'nplb'
all_repos:
- 'all'
- 'BaseOS'
- 'AppStream'
- 'CRB'
- 'HighAvailability'
- 'ResilientStorage'
- 'RT'
- 'NFV'
- 'SAP'
- 'SAPHANA'
- 'extras'
- 'devel'
- 'plus'
no_comps_or_groups:
- 'all'
- 'extras'
- 'devel'
- 'plus'
comps_or_groups:
- 'BaseOS'
- 'AppStream'
- 'CRB'
- 'HighAvailability'
- 'ResilientStorage'
- 'RT'
- 'NFV'
- 'SAP'
- 'SAPHANA'
has_modules:
- 'AppStream'
- 'CRB'
iso_map:
hosts:
x86_64: ''
aarch64: ''
ppc64le: ''
s390x: ''
images:
- dvd1
- minimal
- boot
repos:
- 'BaseOS'
- 'AppStream'
repoclosure_map:
arches:
x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch'
aarch64: '--forcearch=aarch64 --arch=aarch64 --arch=noarch'
ppc64le: '--forcearch=ppc64le --arch=ppc64le --arch=noarch'
s390x: '--forcearch=s390x --arch=s390x --arch=noarch'
repos:
nplb: []
BaseOS: []
AppStream:
- BaseOS
CRB:
- BaseOS
- AppStream
HighAvailability:
- BaseOS
- AppStream
ResilientStorage:
- BaseOS
- AppStream
RT:
- BaseOS
- AppStream
NFV:
- BaseOS
- AppStream
SAP:
- BaseOS
- AppStream
- HighAvailability
SAPHANA:
- BaseOS
- AppStream
- HighAvailability
extra_files:
git_repo: 'https://git.rockylinux.org/staging/src/rocky-release.git'
branch: 'r9'
list:
- 'SOURCES/COMMUNITY-CHARTER'
- 'SOURCES/EULA'
- 'SOURCES/LICENSE'
- 'SOURCES/RPM-GPG-KEY-Rocky-9'
- 'SOURCES/RPM-GPG-KEY-Rocky-9-Testing'
...

12
iso/py/sig/altarch.yaml Normal file
View file

@ -0,0 +1,12 @@
---
'8':
rockyrpi:
project_id: ''
additional_dirs:
- 'images'
'9':
rockyrpi:
project_id: ''
additional_dirs:
- 'images'
...

10
iso/py/sig/cloud.yaml Normal file
View file

@ -0,0 +1,10 @@
---
'8':
cloud-kernel:
project_id: 'f91da90d-5bdb-4cf2-80ea-e07f8dae5a5c'
cloud-common:
project_id: ''
'9':
cloud-common:
project_id: ''
...

12
iso/py/sig/core.yaml Normal file
View file

@ -0,0 +1,12 @@
---
'8':
core-common:
project_id: ''
core-infra:
project_id: ''
'9':
core-common:
project_id: ''
core-infra:
project_id: ''
...

57
iso/py/sync-from-peridot Executable file
View file

@ -0,0 +1,57 @@
#!/usr/bin/env python3
# This script can be called to do single syncs or full on syncs.
import argparse
from common import *
from util import Checks
from util import RepoSync
#rlvars = rldict['9']
#r = Checks(rlvars, config['arch'])
#r.check_valid_arch()
# Start up the parser baby
parser = argparse.ArgumentParser(description="Peridot Sync and Compose")
# All of our options
parser.add_argument('--release', type=str, help="Major Release Version", required=True)
parser.add_argument('--repo', type=str, help="Repository name")
parser.add_argument('--arch', type=str, help="Architecture")
parser.add_argument('--ignore-debug', action='store_true')
parser.add_argument('--ignore-source', action='store_true')
parser.add_argument('--repoclosure', action='store_true')
parser.add_argument('--skip-all', action='store_true')
parser.add_argument('--dry-run', action='store_true')
parser.add_argument('--full-run', action='store_true')
parser.add_argument('--no-fail', action='store_true')
# I am aware this is confusing, I want podman to be the default option
parser.add_argument('--simple', action='store_false')
parser.add_argument('--logger', type=str)
# Parse them
results = parser.parse_args()
rlvars = rldict[results.release]
r = Checks(rlvars, config['arch'])
r.check_valid_arch()
# Send them and do whatever I guess
a = RepoSync(
rlvars,
config,
major=results.release,
repo=results.repo,
arch=results.arch,
ignore_debug=results.ignore_debug,
ignore_source=results.ignore_source,
repoclosure=results.repoclosure,
skip_all=results.skip_all,
parallel=results.simple,
dryrun=results.dry_run,
fullrun=results.full_run,
nofail=results.no_fail,
logger=results.logger
)
a.run()

16
iso/py/sync-from-peridot-test Executable file
View file

@ -0,0 +1,16 @@
#!/usr/bin/env python3
# This is a testing script to ensure the RepoSync class is working as intended.
from common import *
import argparse
from util import Checks
from util import RepoSync
rlvars = rldict['9']
r = Checks(rlvars, config['arch'])
r.check_valid_arch()
#a = RepoSync(rlvars, config, major="9", repo="ResilientStorage", parallel=True, ignore_debug=False, ignore_source=False)
a = RepoSync(rlvars, config, major="9", repo="ResilientStorage", parallel=True, ignore_debug=False, ignore_source=False)
a.run()

9
iso/py/test.py Normal file
View file

@ -0,0 +1,9 @@
#!/usr/bin/env python3
from common import *
import argparse
from util import Checks
rlvars = rldict['9']
r = Checks(rlvars, arch)
r.check_valid_arch()

38
iso/py/test2.py Normal file
View file

@ -0,0 +1,38 @@
#!/usr/bin/env python3
import desert
from attrs import define, field
import typing as t
CONFIG = {
"8": {
"allowed_arches": ["x86_64", "aarch64"],
"repo_url_list": ["some", "shit", "here"]
},
"9": {
"allowed_arches": ["x86_64", "aarch64", "ppc64le", "s390x"],
"repo_url_list": ["some", "other", "shit", "here"]
}
}
@define
class VersionConfig:
allowed_arches: t.List[str] = field()
repo_url_list: t.List[str] = field()
@allowed_arches.validator
def check(self, attribute, value):
if not all(v in ["x86_64", "aarch64", "ppc64le", "s390x"] for v in value):
raise ValueError("Architecture list does not match")
def new(version):
schema = desert.schema(VersionConfig)
config = CONFIG[str(version)]
return schema.load(config)
eight = new(8)
nine = new(9)
print(eight)
print(eight.allowed_arches)
print(nine)

21
iso/py/util/__init__.py Normal file
View file

@ -0,0 +1,21 @@
"""
Imports all of our classes for this local module
"""
from .check import (
Checks,
)
from .dnf_utils import (
RepoSync,
)
from .iso_utils import (
IsoBuild,
LiveBuild
)
__all__ = [
'Checks',
'RepoSync'
]

14
iso/py/util/check.py Normal file
View file

@ -0,0 +1,14 @@
# Is our arch allowed for this particular release? Some previous releases do
# not support ppc or s390x
from common import Color
class Checks:
"""This class helps check some things"""
def __init__(self, rlvars, arch):
self.arches = rlvars['allowed_arches']
self.arch = arch
def check_valid_arch(self):
if self.arch not in self.arches:
raise SystemExit(Color.BOLD + 'This architecture is not supported.'
+ Color.END + '\n\nEnsure that the architecture you are '
'building for is supported for this compose process.')

912
iso/py/util/dnf_utils.py Normal file
View file

@ -0,0 +1,912 @@
"""
Syncs yum repos for mirroring and composing.
Louis Abel <label AT rockylinux.org>
"""
#import shutil
import logging
import sys
import os
import os.path
import subprocess
import shlex
import time
import re
#import pipes
from common import Color
#HAS_LIBREPO = True
#try:
# import librepo
#except:
# HAS_LIBREPO = False
class RepoSync:
"""
This helps us do reposync operations for the base system. SIG syncs are a
different class entirely. This is on purpose. Please use the SigRepoSync
class for SIG syncs.
"""
def __init__(
self,
rlvars,
config,
major,
repo=None,
arch=None,
ignore_debug: bool = False,
ignore_source: bool = False,
repoclosure: bool = False,
skip_all: bool = False,
parallel: bool = False,
dryrun: bool = False,
fullrun: bool = False,
nofail: bool = False,
logger=None
):
self.nofail = nofail
self.dryrun = dryrun
self.fullrun = fullrun
self.arch = arch
self.ignore_debug = ignore_debug
self.ignore_source = ignore_source
self.skip_all = skip_all
self.repoclosure = repoclosure
# Enables podman syncing, which should effectively speed up operations
self.parallel = parallel
# Relevant config items
self.major_version = major
self.date_stamp = config['date_stamp']
self.repo_base_url = config['repo_base_url']
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
# Relevant major version items
self.revision = rlvars['revision'] + "-" + rlvars['rclvl']
self.arches = rlvars['allowed_arches']
self.project_id = rlvars['project_id']
self.repo_renames = rlvars['renames']
self.repos = rlvars['all_repos']
self.multilib = rlvars['provide_multilib']
self.repo = repo
self.extra_files = rlvars['extra_files']
# each el can have its own designated container to run stuff in,
# otherwise we'll just default to the default config.
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
if 'repoclosure_map' in rlvars and len(rlvars['repoclosure_map']) > 0:
self.repoclosure_map = rlvars['repoclosure_map']
self.staging_dir = os.path.join(
config['staging_root'],
config['category_stub'],
self.revision
)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
"latest-Rocky-{}".format(major)
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("reposync")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('reposync init')
self.log.info(self.revision)
self.dnf_config = self.generate_conf()
def run(self):
"""
This must be called to perform the sync. This will run through, create
the configuration file as required, and try to do a sync of every repo
applicable or the repo actually specified. If self.repo is None, it
will be assumed all repos are synced as dictated by rlvars.
* Dry runs only create initial directories and structure
* Full runs sync everything from the top and setup structure,
including creating a symlink to latest-Rocky-X
* self.repo is ignored during full runs (noted in stdout)
* self.arch being set will force only that arch to sync
"""
if self.fullrun and self.repo:
self.log.error('WARNING: repo ignored when doing a full sync')
if self.fullrun and self.dryrun:
self.log.error('A full and dry run is currently not supported.')
raise SystemExit('\nA full and dry run is currently not supported.')
# This should create the initial compose dir and set the path.
# Otherwise, just use the latest link.
if self.fullrun:
generated_dir = self.generate_compose_dirs()
work_root = os.path.join(
generated_dir,
'work'
)
sync_root = os.path.join(
generated_dir,
'compose'
)
else:
# Put in a verification here.
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
sync_root = self.compose_latest_sync
# Verify if the link even exists
if not os.path.exists(self.compose_latest_dir):
self.log.error('!! Latest compose link is broken does not exist: %s' % self.compose_latest_dir)
self.log.error('!! Please perform a full run if you have not done so.')
raise SystemExit()
log_root = os.path.join(
work_root,
"logs"
)
if self.dryrun:
self.log.error('Dry Runs are not supported just yet. Sorry!')
raise SystemExit()
self.sync(self.repo, sync_root, work_root, log_root, self.arch)
if self.fullrun:
self.deploy_extra_files()
self.symlink_to_latest()
if self.repoclosure:
self.repoclosure_work(sync_root, work_root, log_root)
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('Compose logs: %s' % log_root)
self.log.info('Compose completed.')
def sync(self, repo, sync_root, work_root, log_root, arch=None):
"""
Calls out syncing of the repos. We generally sync each component of a
repo:
* each architecture
* each architecture debug
* each source
If parallel is true, we will run in podman.
"""
if self.parallel:
self.podman_sync(repo, sync_root, work_root, log_root, arch)
else:
self.dnf_sync(repo, sync_root, work_root, arch)
def dnf_sync(self, repo, sync_root, work_root, arch):
"""
This is for normal dnf syncs. This is very slow.
"""
cmd = self.reposync_cmd()
sync_single_arch = False
arches_to_sync = self.arches
if arch:
sync_single_arch = True
arches_to_sync = [arch]
sync_single_repo = False
repos_to_sync = self.repos
if repo and not self.fullrun:
sync_single_repo = True
repos_to_sync = [repo]
# dnf reposync --download-metadata \
# --repoid fedora -p /tmp/test \
# --forcearch aarch64 --norepopath
self.log.info(
Color.BOLD + '!! WARNING !! ' + Color.END + 'You are performing a '
'local reposync, which may incur delays in your compose.'
)
self.log.info(
Color.BOLD + '!! WARNING !! ' + Color.END + 'Standard dnf reposync '
'is not really a supported method. Only use this for general testing.'
)
if self.fullrun:
self.log.info(
Color.BOLD + '!! WARNING !! ' + Color.END + 'This is a full '
'run! This will take a LONG TIME.'
)
for r in repos_to_sync:
for a in arches_to_sync:
repo_name = r
if r in self.repo_renames:
repo_name = self.repo_renames[r]
os_sync_path = os.path.join(
sync_root,
repo_name,
a,
'os'
)
debug_sync_path = os.path.join(
sync_root,
repo_name,
a,
'debug/tree'
)
sync_cmd = "{} -c {} --download-metadata --repoid={} -p {} --forcearch {} --norepopath".format(
cmd,
self.dnf_config,
r,
os_sync_path,
a
)
debug_sync_cmd = "{} -c {} --download-metadata --repoid={}-debug -p {} --forcearch {} --norepopath".format(
cmd,
self.dnf_config,
r,
debug_sync_path,
a
)
self.log.info('Syncing {} {}'.format(r, a))
#self.log.info(sync_cmd)
# Try to figure out where to send the actual output of this...
# Also consider on running a try/except here? Basically if
# something happens (like a repo doesn't exist for some arch,
# eg RT for aarch64), make a note of it somehow (but don't
# break the entire sync). As it stands with this
# implementation, if something fails, it just continues on.
process = subprocess.call(
shlex.split(sync_cmd),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if not self.ignore_debug:
self.log.info('Syncing {} {} (debug)'.format(r, a))
process_debug = subprocess.call(
shlex.split(debug_sync_cmd),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# This is an ugly hack. We don't want to list i686 as an
# available arch for an el because that would imply each repo
# gets an i686 repo. However, being able to set "arch" to i686
# should be possible, thus avoiding this block altogether.
# "available_arches" in the configuration isn't meant to be a
# restriction here, but mainly a restriction in the lorax
# process (which isn't done here)
if 'x86_64' in a and 'all' in r and self.multilib:
i686_os_sync_path = os.path.join(
sync_root,
repo_name,
a,
'os'
)
i686_sync_cmd = "{} -c {} --download-metadata --repoid={} -p {} --forcearch {} --norepopath".format(
cmd,
self.dnf_config,
r,
i686_os_sync_path,
'i686'
)
self.log.info('Syncing {} {}'.format(r, 'i686'))
process_i686 = subprocess.call(
shlex.split(i686_sync_cmd),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if not self.ignore_source:
source_sync_path = os.path.join(
sync_root,
repo_name,
'source/tree'
)
source_sync_cmd = "{} -c {} --download-metadata --repoid={}-source -p {} --norepopath".format(
cmd,
self.dnf_config,
r,
source_sync_path
)
self.log.info('Syncing {} source'.format(r))
process_source = subprocess.call(
shlex.split(source_sync_cmd),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
self.log.info('Syncing complete')
def podman_sync(self, repo, sync_root, work_root, log_root, arch):
"""
This is for podman syncs
Create sync_root/work/entries
Generate scripts as needed into dir
Each container runs their own script
wait till all is finished
"""
cmd = self.podman_cmd()
contrunlist = []
bad_exit_list = []
self.log.info('Generating container entries')
entries_dir = os.path.join(work_root, "entries")
if not os.path.exists(entries_dir):
os.makedirs(entries_dir, exist_ok=True)
# yeah, I know.
if not os.path.exists(log_root):
os.makedirs(log_root, exist_ok=True)
sync_single_arch = False
arches_to_sync = self.arches
if arch:
sync_single_arch = True
arches_to_sync = [arch]
sync_single_repo = False
repos_to_sync = self.repos
if repo and not self.fullrun:
sync_single_repo = True
repos_to_sync = [repo]
for r in repos_to_sync:
entry_name_list = []
repo_name = r
arch_sync = arches_to_sync.copy()
if r in self.repo_renames:
repo_name = self.repo_renames[r]
if 'all' in r and 'x86_64' in arches_to_sync and self.multilib:
arch_sync.append('i686')
# There should be a check here that if it's "all" and multilib
# is on, i686 should get synced too.
for a in arch_sync:
entry_name = '{}-{}'.format(r, a)
debug_entry_name = '{}-debug-{}'.format(r, a)
entry_name_list.append(entry_name)
if not self.ignore_debug:
entry_name_list.append(debug_entry_name)
entry_point_sh = os.path.join(
entries_dir,
entry_name
)
debug_entry_point_sh = os.path.join(
entries_dir,
debug_entry_name
)
os_sync_path = os.path.join(
sync_root,
repo_name,
a,
'os'
)
debug_sync_path = os.path.join(
sync_root,
repo_name,
a,
'debug/tree'
)
arch_force_cp = ("/usr/bin/sed 's|$basearch|{}|g' {} > {}.{}".format(
a,
self.dnf_config,
self.dnf_config,
a
))
sync_cmd = ("/usr/bin/dnf reposync -c {}.{} --download-metadata "
"--repoid={} -p {} --forcearch {} --norepopath 2>&1 "
"| tee -a {}/{}-{}-{}.log").format(
self.dnf_config,
a,
r,
os_sync_path,
a,
log_root,
repo_name,
a,
self.date_stamp
)
debug_sync_cmd = ("/usr/bin/dnf reposync -c {}.{} "
"--download-metadata --repoid={}-debug -p {} --forcearch {} "
"--norepopath 2>&1 | tee -a {}/{}-{}-debug-{}.log").format(
self.dnf_config,
a,
r,
debug_sync_path,
a,
log_root,
repo_name,
a,
self.date_stamp
)
entry_point_open = open(entry_point_sh, "w+")
debug_entry_point_open = open(debug_entry_point_sh, "w+")
entry_point_open.write('#!/bin/bash\n')
entry_point_open.write('set -o pipefail\n')
entry_point_open.write(arch_force_cp + '\n')
entry_point_open.write('/usr/bin/dnf install dnf-plugins-core -y\n')
entry_point_open.write(sync_cmd + '\n')
debug_entry_point_open.write('#!/bin/bash\n')
debug_entry_point_open.write('set -o pipefail\n')
debug_entry_point_open.write(arch_force_cp + '\n')
debug_entry_point_open.write('/usr/bin/dnf install dnf-plugins-core -y\n')
debug_entry_point_open.write(debug_sync_cmd + '\n')
entry_point_open.close()
debug_entry_point_open.close()
os.chmod(entry_point_sh, 0o755)
os.chmod(debug_entry_point_sh, 0o755)
# We ignoring sources?
if not self.ignore_source:
source_entry_name = '{}-source'.format(r)
entry_name_list.append(source_entry_name)
source_entry_point_sh = os.path.join(
entries_dir,
source_entry_name
)
source_sync_path = os.path.join(
sync_root,
repo_name,
'source/tree'
)
source_sync_cmd = ("/usr/bin/dnf reposync -c {} "
"--download-metadata --repoid={}-source -p {} "
"--norepopath | tee -a {}/{}-source-{}.log").format(
self.dnf_config,
r,
source_sync_path,
log_root,
repo_name,
self.date_stamp
)
source_entry_point_open = open(source_entry_point_sh, "w+")
source_entry_point_open.write('#!/bin/bash\n')
source_entry_point_open.write('set -o pipefail\n')
source_entry_point_open.write('/usr/bin/dnf install dnf-plugins-core -y\n')
source_entry_point_open.write(source_sync_cmd + '\n')
source_entry_point_open.close()
os.chmod(source_entry_point_sh, 0o755)
# Spawn up all podman processes for repo
self.log.info('Starting podman processes for %s ...' % r)
#print(entry_name_list)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
self.dnf_config,
self.dnf_config,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
#print(podman_cmd_entry)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info('Syncing %s ...' % r)
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
#print(pod_watcher)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' in output.decode():
self.log.info('%s seems ok' % pod)
else:
self.log.error('%s had issues syncing' % pod)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
self.log.info('Syncing %s completed' % r)
if len(bad_exit_list) > 0:
self.log.error(
Color.BOLD + Color.RED + 'There were issues syncing these '
'repositories:' + Color.END
)
for issue in bad_exit_list:
self.log.error(issue)
def generate_compose_dirs(self) -> str:
"""
Generate compose dirs for full runs
"""
compose_base_dir = os.path.join(
self.compose_base,
"Rocky-{}-{}".format(self.major_version, self.date_stamp)
)
self.log.info('Creating compose directory %s' % compose_base_dir)
if not os.path.exists(compose_base_dir):
os.makedirs(compose_base_dir)
return compose_base_dir
def symlink_to_latest(self):
"""
Emulates pungi and symlinks latest-Rocky-X
This link will be what is updated in full runs. Whatever is in this
'latest' directory is what is rsynced on to staging after completion.
This link should not change often.
"""
pass
def generate_conf(self, dest_path='/var/tmp') -> str:
"""
Generates the necessary repo conf file for the operation. This repo
file should be temporary in nature. This will generate a repo file
with all repos by default. If a repo is chosen for sync, that will be
the only one synced.
:param dest_path: The destination where the temporary conf goes
:param repo: The repo object to create a file for
"""
fname = os.path.join(
dest_path,
"{}-config.repo".format(self.major_version)
)
self.log.info('Generating the repo configuration: %s' % fname)
if self.repo_base_url.startswith("/"):
self.log.error("Local file syncs are not supported.")
raise SystemExit(Color.BOLD + "Local file syncs are not "
"supported." + Color.END)
# create dest_path
if not os.path.exists(dest_path):
os.makedirs(dest_path, exist_ok=True)
config_file = open(fname, "w+")
for repo in self.repos:
constructed_url = '{}/{}/repo/{}/$basearch'.format(
self.repo_base_url,
self.project_id,
repo,
)
constructed_url_debug = '{}/{}/repo/{}/$basearch-debug'.format(
self.repo_base_url,
self.project_id,
repo,
)
constructed_url_src = '{}/{}/repo/{}/src'.format(
self.repo_base_url,
self.project_id,
repo,
)
# normal
config_file.write('[%s]\n' % repo)
config_file.write('name=%s\n' % repo)
config_file.write('baseurl=%s\n' % constructed_url)
config_file.write("enabled=1\n")
config_file.write("gpgcheck=0\n\n")
# debug
config_file.write('[%s-debug]\n' % repo)
config_file.write('name=%s debug\n' % repo)
config_file.write('baseurl=%s\n' % constructed_url_debug)
config_file.write("enabled=1\n")
config_file.write("gpgcheck=0\n\n")
# src
config_file.write('[%s-source]\n' % repo)
config_file.write('name=%s source\n' % repo)
config_file.write('baseurl=%s\n' % constructed_url_src)
config_file.write("enabled=1\n")
config_file.write("gpgcheck=0\n\n")
config_file.close()
return fname
def reposync_cmd(self) -> str:
"""
This generates the reposync command. We don't support reposync by
itself and will raise an error.
:return: The path to the reposync command. If dnf exists, we'll use
that. Otherwise, fail immediately.
"""
cmd = None
if os.path.exists("/usr/bin/dnf"):
cmd = "/usr/bin/dnf reposync"
else:
self.log.error('/usr/bin/dnf was not found. Good bye.')
raise SystemExit("/usr/bin/dnf was not found. \n\n/usr/bin/reposync "
"is not sufficient and you are likely running on an el7 "
"system or a grossly modified EL8+ system, " + Color.BOLD +
"which tells us that you probably made changes to these tools "
"expecting them to work and got to this point." + Color.END)
return cmd
def podman_cmd(self) -> str:
"""
This generates the podman run command. This is in the case that we want
to do reposyncs in parallel as we cannot reasonably run multiple
instances of dnf reposync on a single system.
"""
cmd = None
if os.path.exists("/usr/bin/podman"):
cmd = "/usr/bin/podman"
else:
self.log.error('/usr/bin/podman was not found. Good bye.')
raise SystemExit("\n\n/usr/bin/podman was not found.\n\nPlease "
" ensure that you have installed the necessary packages on "
" this system. " + Color.BOLD + "Note that docker is not "
"supported." + Color.END
)
return cmd
def repoclosure_work(self, sync_root, work_root, log_root):
"""
This is where we run repoclosures, based on the configuration of each
EL version. Each major version should have a dictionary of lists that
point to which repos they'll be targetting. An empty list because the
repoclosure is ran against itself, and itself only. In the case of 8,
9, and perhaps 10, BaseOS is the only repo that should be checking
against itself. (This means BaseOS should be able to survive by
itself.)
"""
cmd = self.podman_cmd()
entries_dir = os.path.join(work_root, "entries")
bad_exit_list = []
if not self.parallel:
self.log.error('repoclosure is too slow to run one by one. enable parallel mode.')
raise SystemExit()
self.log.info('Beginning repoclosure phase')
for repo in self.repoclosure_map['repos']:
if self.repo and repo not in self.repo:
continue
repoclosure_entry_name_list = []
self.log.info('Setting up repoclosure for {}'.format(repo))
for arch in self.repoclosure_map['arches']:
repo_combination = []
repoclosure_entry_name = 'repoclosure-{}-{}'.format(repo, arch)
repoclosure_entry_name_list.append(repoclosure_entry_name)
repoclosure_arch_list = self.repoclosure_map['arches'][arch]
# Some repos will have additional repos to close against - this
# helps append
if len(self.repoclosure_map['repos'][repo]) > 0:
for l in self.repoclosure_map['repos'][repo]:
stretch = '--repofrompath={},file://{}/{}/{}/os --repo={}'.format(
l,
sync_root,
l,
arch,
l
)
repo_combination.append(stretch)
join_repo_comb = ' '.join(repo_combination)
repoclosure_entry_point_sh = os.path.join(
entries_dir,
repoclosure_entry_name
)
repoclosure_entry_point_sh = os.path.join(
entries_dir,
repoclosure_entry_name
)
repoclosure_cmd = ('/usr/bin/dnf repoclosure {} '
'--repofrompath={},file://{}/{}/{}/os --repo={} --check={} {} '
'| tee -a {}/{}-repoclosure-{}-{}.log').format(
repoclosure_arch_list,
repo,
sync_root,
repo,
arch,
repo,
repo,
join_repo_comb,
log_root,
repo,
arch,
self.date_stamp
)
repoclosure_entry_point_open = open(repoclosure_entry_point_sh, "w+")
repoclosure_entry_point_open.write('#!/bin/bash\n')
repoclosure_entry_point_open.write('set -o pipefail\n')
repoclosure_entry_point_open.write('/usr/bin/dnf install dnf-plugins-core -y\n')
repoclosure_entry_point_open.write('/usr/bin/dnf clean all\n')
repoclosure_entry_point_open.write(repoclosure_cmd + '\n')
repoclosure_entry_point_open.close()
os.chmod(repoclosure_entry_point_sh, 0o755)
repo_combination.clear()
self.log.info('Spawning pods for %s' % repo)
for pod in repoclosure_entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
self.dnf_config,
self.dnf_config,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
#print(podman_cmd_entry)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(repoclosure_entry_name_list)
time.sleep(3)
self.log.info('Performing repoclosure on %s ... ' % repo)
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
for pod in repoclosure_entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' in output.decode():
self.log.info('%s seems ok' % pod)
else:
self.log.error('%s had issues closing' % pod)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
repoclosure_entry_name_list.clear()
self.log.info('Syncing %s completed' % repo)
if len(bad_exit_list) > 0:
self.log.error(
Color.BOLD + Color.RED + 'There were issues closing these '
'repositories:' + Color.END
)
for issue in bad_exit_list:
self.log.error(issue)
def deploy_extra_files(self):
"""
deploys extra files based on info of rlvars
"""
pass
class SigRepoSync:
"""
This helps us do reposync operations for SIG's. Do not use this for the
base system. Use RepoSync for that.
"""

152
iso/py/util/iso_utils.py Normal file
View file

@ -0,0 +1,152 @@
"""
Builds ISO's for Rocky Linux.
Louis Abel <label AT rockylinux.org>
"""
import logging
import sys
import os
import os.path
import subprocess
import shlex
import time
import re
from common import Color
class IsoBuild:
"""
This helps us build the generic ISO's for a Rocky Linux release. In
particular, this is for the boot and dvd images.
Live images are built in another class.
"""
def __init__(
self,
rlvars,
config,
major,
host=None,
image=None,
arch=None,
logger=None
):
self.arch = arch
self.image = image
self.host = host
# Relevant config items
self.major_version = major
self.date_stamp = config['date_stamp']
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
self.iso_base = config['compose_root'] + "/" + major + "/isos"
self.current_arch = config['arch']
self.extra_files = rlvars['extra_files']
# Relevant major version items
self.revision = rlvars['revision'] + "-" + rlvars['rclvl']
self.arches = rlvars['allowed_arches']
self.staging_dir = os.path.join(
config['staging_root'],
config['category_stub'],
self.revision
)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
"latest-Rocky-{}".format(major)
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("iso")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('iso build init')
self.log.info(self.revision)
def run(self):
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
sync_root = self.compose_latest_sync
log_root = os.path.join(
work_root,
"logs"
)
self.iso_build(
sync_root,
work_root,
log_root,
self.arch,
self.host
)
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('ISO Build Logs: %s' % log_root)
self.log.info('ISO Build completed.')
def iso_build(self, sync_root, work_root, log_root, arch, host):
"""
Calls out the ISO builds to the individual hosts listed in the map.
Each architecture is expected to build their own ISOs, similar to
runroot operations of koji and pungi.
It IS possible to run locally, but that would mean this only builds
ISOs for the architecture of the running machine. Please keep this in
mind when stating host=local.
"""
# Check for local build, build accordingly
# Check for arch specific build, build accordingly
# local AND arch cannot be used together, local supersedes. print
# warning.
local_only = False
if 'local' in self.host:
local_only = True
arch = self.arch.copy()
if local_only and self.arch:
self.log.warn('You cannot set local build AND an architecture.')
self.log.warn('The architecture %s will be set' % self.current_arch)
arch = self.current_arch
def iso_build_local(self, sync_root, work_root, log_root):
"""
Local iso builds only. Architecture is locked.
"""
print()
def iso_build_remote(self, sync_root, work_root, log_root, arch):
"""
Remote ISO builds. Architecture is all or single.
"""
print()
class LiveBuild:
"""
This helps us build the live images for Rocky Linux.
"""

50
iso/sh/common Normal file
View file

@ -0,0 +1,50 @@
# To be sourced by scripts to use
# Variables that can be overriden should be noted with optional context. It is
# expected that these values are here in this file (per variable or per set):
#
# * Allowed
# * Allowed with caveats
# * Not Allowed
# * Required
# Set the Rocky Linux version.
# Override: Required
if [ -z "$RLVER" ]; then
echo "RLVER is not defined."
exit 2
fi
# Architecture of the system - Overriding this would be a mistake. Lorax and
# other runroot-like operations should occur on their respective architectures.
# Override: Not Allowed
ARCH="$(uname -p)"
# Source Major common
# Override: Not Allowed
test -f "$(dirname "$0")/common_${RLVER}" && source "$(dirname "$0")/common_${RLVER}"
if [ "$?" -ne 0 ]; then
echo "Could not source common_${RLVER}"
exit 1
fi
DATE_STAMP="$(date +%Y%m%d)"
COMPOSE_ROOT="/mnt/compose/${RLVER}"
COMPOSE_ISO_WORKDIR="${COMPOSE_ROOT}/work/${ARCH}/${DATE_STAMP}"
# ISO Functions
function build_graft_points() {
echo ""
}
function build_lorax_source_list() {
echo ""
}
function build_lorax() {
echo ""
}
function build_extra_iso() {
echo ""
}

4
iso/sh/common_8 Normal file
View file

@ -0,0 +1,4 @@
# To be sourced by scripts to use
# These are the architectures supported for 8
ARCHES=(x86_64 aarch64)

4
iso/sh/common_9 Normal file
View file

@ -0,0 +1,4 @@
# To be sourced by scripts to use
# These are the architectures supported for 9
ARCHES=(x86_64 aarch64 ppc64le s390x)

34
live/common Normal file
View file

@ -0,0 +1,34 @@
# To be sourced by scripts that build live images
# Variables that can be overriden should be noted with optional context. It is
# expected that these values are here in this file (per variable or per set):
#
# * Allowed
# * Allowed with caveats
# * Not Allowed
# * Required
# Temporary probably. This makes it so if RLVER=... is called before the script
# it will set the version for the variables to call up. This was easier than
# creating duplicates of a bunch of stuff. Default version is 8.
# Override: Required
if [ -z "$RLVER" ]; then
echo "RLVER is not defined."
exit 2
fi
# Set git branch name scheme
# Override: Allowed with caveats
GIT_BRANCH="r${RLVER}"
# Source Major common
# Override: Not Allowed
test -f "$(dirname "$0")/common_${RLVER}" && source "$(dirname "$0")/common_${RLVER}"
if [ "$?" -ne 0 ]; then
echo "Could not source common_${RLVER}"
exit 1
fi
# Used to iterate over types of live images
VARIANTS=(XFCE KDE Workstation Workstation-Lite)

View file

@ -7,7 +7,7 @@ MAJOR="${REVISION:0:1}"
MINOR="${REVISION:2:1}" MINOR="${REVISION:2:1}"
# comment or blank if needed # comment or blank if needed
APPEND_TO_DIR="-RC1" APPEND_TO_DIR="-RC2"
STAGING_ROOT="/mnt/repos-staging" STAGING_ROOT="/mnt/repos-staging"
PRODUCTION_ROOT="/mnt/repos-production" PRODUCTION_ROOT="/mnt/repos-production"
@ -20,13 +20,19 @@ RELEASE_DIR="${CATEGORY_STUB}/${REVISION}${APPEND_TO_DIR}"
# cases where repos will not be available by normal means. It's just for # cases where repos will not be available by normal means. It's just for
# consistency. # consistency.
NONMODS_REPOS=( NONMODS_REPOS=(
Extras extras
Devel Devel
nfv
plus plus
rockyrpi rockyrpi
) )
# These repos were originally separate from the main compose and need symlinks
declare -A LINK_REPOS
LINK_REPOS=(
[NFV]="nfv"
[Devel]="devel"
)
# These repos have comps/groups, except for debuginfo and sources # These repos have comps/groups, except for debuginfo and sources
MODS_REPOS=( MODS_REPOS=(
BaseOS BaseOS
@ -35,6 +41,7 @@ MODS_REPOS=(
ResilientStorage ResilientStorage
PowerTools PowerTools
RT RT
NFV
) )
ALL_REPOS=( ALL_REPOS=(
@ -46,7 +53,6 @@ NONSIG_COMPOSE=(
Rocky Rocky
Extras Extras
Rocky-devel Rocky-devel
NFV
Plus Plus
rockyrpi rockyrpi
) )

174
sync/common_9 Normal file
View file

@ -0,0 +1,174 @@
# To be sourced by scripts to use
# Revision must always start with a major number
REVISION=9.0
ARCHES=(x86_64 aarch64 ppc64le s390x)
MAJOR="${REVISION:0:1}"
MINOR="${REVISION:2:1}"
# comment or blank if needed
APPEND_TO_DIR="-RC1"
STAGING_ROOT="/mnt/repos-staging"
PRODUCTION_ROOT="/mnt/repos-production"
# relative to ${ENV}_ROOT
CATEGORY_STUB="mirror/pub/rocky"
RELEASE_DIR="${CATEGORY_STUB}/${REVISION}${APPEND_TO_DIR}"
# Set all repos that have no comps/groups associated with them. This is even in
# cases where repos will not be available by normal means. It's just for
# consistency.
NONMODS_REPOS=(
extras
Devel
plus
rockyrpi
)
# These repos were originally separate from the main compose and need symlinks
declare -A LINK_REPOS
LINK_REPOS=(
[NFV]="nfv"
)
# These repos have comps/groups, except for debuginfo and sources
MODS_REPOS=(
BaseOS
AppStream
HighAvailability
ResilientStorage
CRB
RT
NFV
SAP
SAPHANA
)
ALL_REPOS=(
"${NONMODS_REPOS[@]}"
"${MODS_REPOS[@]}"
)
NONSIG_COMPOSE=(
Rocky
Extras
Rocky-devel
Plus
rockyrpi
)
declare -A SIG_COMPOSE
SIG_COMPOSE=(
[kernel]="kernel/kernel"
[kmod]="kernel/kmod"
)
# These repos have modules
MODS=(
AppStream
PowerTools
)
# functions
# Note, EL8 and EL9 may not be the same, do not put in 'common'
function treeinfoSaver() {
BaseOSArch="${1}"
TREEINFO_VAR="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/os/.treeinfo"
PRISTINE_TREE="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/os/.treeinfo-pristine"
/bin/cp "${TREEINFO_VAR}" "${PRISTINE_TREE}"
}
function treeinfoFixer() {
BaseOSArch="${1}"
TREEINFO_VAR="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/os/.treeinfo"
PRISTINE_TREE="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/os/.treeinfo-pristine"
test -f "${PRISTINE_TREE}"
pris_retval=$?
if [ "$pris_retval" -eq 0 ]; then
/bin/cp "${PRISTINE_TREE}" "${TREEINFO_VAR}"
else
echo "WARNING: We do not have a pristine treeinfo to copy from"
fi
}
# Note, EL8 and EL9 may not be the same, do not put in 'common'
function treeinfoModder() {
BaseOSArch="${1}"
TREEINFO_VAR="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/os/.treeinfo"
PRISTINE_TREE="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/os/.treeinfo-pristine"
test -f "${TREEINFO_VAR}"
treeinfo_retval=$?
test -x /usr/bin/python3
python_retval=$?
# There is an awk way to do this, but it was easier to implement python and
# cat heredoc together. It felt cleaner. This was a trick I had used in a
# previous life when I had to admin Solaris systems, and I needed a way to
# add a solaris 10 system into FreeIPA (it was not fun, let me tell you). But
# the take away is I learned something kind of on the fly and well, it worked.
# Emails should have stamps.
if [ "$treeinfo_retval" -eq 0 ] && [ "$python_retval" -eq 0 ]; then
cat <<EOF | /usr/bin/python3
from configparser import ConfigParser
config = ConfigParser()
config.read('${TREEINFO_VAR}')
config.set('tree', 'variants', 'BaseOS,AppStream')
config.add_section('variant-AppStream')
config.set('variant-AppStream', 'id', 'AppStream')
config.set('variant-AppStream', 'name', 'AppStream')
config.set('variant-AppStream', 'type', 'variant')
config.set('variant-AppStream', 'uid', 'AppStream')
config.set('variant-AppStream', 'packages', '../../../AppStream/${BaseOSArch}/os/Packages')
config.set('variant-AppStream', 'repository', '../../../AppStream/${BaseOSArch}/os/')
with open('${TREEINFO_VAR}', 'w') as configfile:
config.write(configfile)
EOF
else
echo "${TREEINFO_VAR}, or python3 does not exist on this system."
fi
/bin/cp "${TREEINFO_VAR}" "${PRISTINE_TREE}"
}
function treeinfoModderKickstart() {
BaseOSArch="${1}"
TREEINFO_VAR="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/kickstart/.treeinfo"
PRISTINE_TREE="${STAGING_ROOT}/${RELEASE_DIR}/BaseOS/${BaseOSArch}/kickstart/.treeinfo-pristine"
test -f "${TREEINFO_VAR}"
treeinfo_retval=$?
test -x /usr/bin/python3
python_retval=$?
# There is an awk way to do this, but it was easier to implement python and
# cat heredoc together. It felt cleaner. This was a trick I had used in a
# previous life when I had to admin Solaris systems, and I needed a way to
# add a solaris 10 system into FreeIPA (it was not fun, let me tell you). But
# the take away is I learned something kind of on the fly and well, it worked.
# Emails should have stamps.
if [ "$treeinfo_retval" -eq 0 ] && [ "$python_retval" -eq 0 ]; then
cat <<EOF | /usr/bin/python3
from configparser import ConfigParser
config = ConfigParser()
config.read('${TREEINFO_VAR}')
config.set('tree', 'variants', 'BaseOS,AppStream')
config.add_section('variant-AppStream')
config.set('variant-AppStream', 'id', 'AppStream')
config.set('variant-AppStream', 'name', 'AppStream')
config.set('variant-AppStream', 'type', 'variant')
config.set('variant-AppStream', 'uid', 'AppStream')
config.set('variant-AppStream', 'packages', '../../../AppStream/${BaseOSArch}/kickstart/Packages')
config.set('variant-AppStream', 'repository', '../../../AppStream/${BaseOSArch}/kickstart/')
with open('${TREEINFO_VAR}', 'w') as configfile:
config.write(configfile)
EOF
else
echo "${TREEINFO_VAR}, or python3 does not exist on this system."
fi
/bin/cp "${TREEINFO_VAR}" "${PRISTINE_TREE}"
}
export -f treeinfoFixer
export -f treeinfoModder
export -f treeinfoModderKickstart

View file

@ -38,7 +38,7 @@ TORRENT_TRACKERS=(
) )
# Regex of paths to exclude # Regex of paths to exclude
TORRENT_EXCLUDES='.*\/CHECKSUM.asc' TORRENT_EXCLUDES='.*\/CHECKSUM.asc'
TORRENT_COMMENT="https://docs.rockylinux.org/release_notes/${REVISION}/" TORRENT_COMMENT="https://docs.rockylinux.org/release_notes/${REVISION//\./_}/" # dots are bad, mkay?
THREADS=10 THREADS=10
printf "* Step 1: Create scaffolding and link\n" printf "* Step 1: Create scaffolding and link\n"

View file

@ -41,6 +41,12 @@ for SIG in "${!SIG_COMPOSE[@]}"; do
{} "${TARGET}" {} "${TARGET}"
done done
# Create symlinks for repos that were once separate from the main compose
for LINK in "${LINK_REPOS[@]}"; do
ln -sr "${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/${LINK}" \
"${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/${LINK_REPOS[$LINK]}"
done
# copy around the ISOs a bit, make things comfortable # copy around the ISOs a bit, make things comfortable
for ARCH in "${ARCHES[@]}"; do for ARCH in "${ARCHES[@]}"; do
TARGET="${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/isos/${ARCH}" TARGET="${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/isos/${ARCH}"
@ -50,11 +56,12 @@ for ARCH in "${ARCHES[@]}"; do
# Hardcoding this for now # Hardcoding this for now
SOURCE="/mnt/compose/${MAJ}/latest-Rocky-${MAJ}/compose/${x}/${ARCH}/iso" SOURCE="/mnt/compose/${MAJ}/latest-Rocky-${MAJ}/compose/${x}/${ARCH}/iso"
TARGET_ARCH="${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/${x}/${ARCH}/iso" TARGET_ARCH="${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/${x}/${ARCH}/iso"
mkdir -p "${SOURCE}" "${TARGET}" "${TARGET_ARCH}" mkdir -p "${TARGET}"
#mkdir -p "${SOURCE}" "${TARGET}" "${TARGET_ARCH}"
# Copy the ISO and manifests into their target architecture # Copy the ISO and manifests into their target architecture
cp -n "${SOURCE}"/*.iso "${TARGET_ARCH}/" #cp -n "${SOURCE}"/*.iso "${TARGET_ARCH}/"
cp -n "${SOURCE}"/*.iso.manifest "${TARGET_ARCH}/" #cp -n "${SOURCE}"/*.iso.manifest "${TARGET_ARCH}/"
cp -n "${SOURCE}/CHECKSUM" "${TARGET_ARCH}/" #cp -n "${SOURCE}/CHECKSUM" "${TARGET_ARCH}/"
# Copy the ISO and manifests into the main isos target # Copy the ISO and manifests into the main isos target
cp "${SOURCE}"/*.iso "${TARGET}/" cp "${SOURCE}"/*.iso "${TARGET}/"
cp "${SOURCE}"/*.iso.manifest "${TARGET}/" cp "${SOURCE}"/*.iso.manifest "${TARGET}/"

View file

@ -44,7 +44,7 @@ for x in "${ARCHES[@]}"; do
test -d "${STAGING_ROOT}/${RELEASE_DIR}/${y}/${x}/${z}" test -d "${STAGING_ROOT}/${RELEASE_DIR}/${y}/${x}/${z}"
ret_val=$? ret_val=$?
if [ "$ret_val" -eq 0 ]; then if [ "$ret_val" -eq 0 ]; then
createrepo --update "${STAGING_ROOT}/${RELEASE_DIR}/${y}/${x}/${z}" \ createrepo "${STAGING_ROOT}/${RELEASE_DIR}/${y}/${x}/${z}" \
"--distro=cpe:/o:rocky:rocky:${REVISION:0:1},Rocky Linux ${REVISION:0:1}" \ "--distro=cpe:/o:rocky:rocky:${REVISION:0:1},Rocky Linux ${REVISION:0:1}" \
--workers 8 --workers 8
sed -i '/<open-size><\/open-size>/d' \ sed -i '/<open-size><\/open-size>/d' \

122
sync/propagate-image.sh Normal file
View file

@ -0,0 +1,122 @@
#!/bin/bash
source_ami="$1"
source_region="${2:-us-east-1}"
if [[ -z $source_ami || -z $source_region ]]; then
echo "usage: $0 source_ami source_region"
exit 2
fi
RESF_AMI_ACCOUNT_ID=792107900819
REGIONS=$(aws --profile resf-ami ec2 describe-regions \
--all-regions \
--query "Regions[].{Name:RegionName}" \
--output text | grep -vE "$source_region")
SOURCE_AMI_NAME=$(aws --profile resf-ami ec2 describe-images \
--region "$source_region" --image-ids "$source_ami" --query 'Images[0].Name'\
--output text )
# Enforce a name structure
# Rocky-8-ec2-8.6-20220515.0.x86_64
if [[ ! "${SOURCE_AMI_NAME}" =~ Rocky-[89]-ec2-[89]\.[0-9]-[0-9]+\.[0-9]+\.((aarch|x86_)64|ppc64le|s390x) ]]; then
echo "Bad source ami (${SOURCE_AMI_NAME}). Exiting."
exit 1
fi
function copy(){
for region in $REGIONS; do
if find_image_by_name $region; then
echo "Found copy of $source_ami in $region - $found_image_id - Skipping"
continue
fi
echo -n "Creating copy job for $region..."
ami_id=$(aws --profile resf-ami ec2 copy-image \
--region $region \
--name "${SOURCE_AMI_NAME}" \
--source-image-id "${source_ami}" \
--source-region "${source_region}" \
--output text 2>&1)
if [[ $? -eq 0 ]]; then
unset ami_ids[$region]
echo ". $ami_id"
if [[ ! -z "$ami_id" ]]; then
ami_ids[$region]="$ami_id"
fi
continue
fi
echo ".an error occurred (likely region is not signed up). Skipping."
done
}
function change_privacy(){
local status="$1"
local launch_permission
case $status in
Private)
launch_permission="Remove=[{Group=all}]"
;;
Public)
launch_permission="Add=[{Group=all}]"
;;
esac
local finished=false
while ! $finished; do
for region in "${!ami_ids[@]}"; do
echo -n "Making ${ami_ids[$region]} in $region $status..."
aws --profile resf-ami ec2 modify-image-attribute \
--region $region \
--image-id "${ami_ids[$region]}" \
--launch-permission "${launch_permission}" 2>/dev/null
if [[ $? -eq 0 ]]; then
unset ami_ids[$region]
echo ". Done"
continue
fi
echo ". Still pending"
done
if [[ ${#ami_ids[@]} -gt 0 ]]; then
echo -n "Sleeping for one minute... "
for (( i=0; i<60; i++ )); do
if [[ $((i%10)) -eq 0 ]]; then
echo -n "$i"
else
echo -n "."
fi
sleep 1
done
echo ""
else
finished=true
break
fi
done
echo "Completed!"
}
function find_image_by_name(){
# found_ami_ids[region]=ami_id
# ami-id "name"
local query="$(printf 'Images[?Name==`%s`]|[?Public==`true`].[ImageId,Name][]' "${SOURCE_AMI_NAME}")"
mapfile -t res < <(
aws --profile resf-ami ec2 describe-images --region $region --owners $RESF_AMI_ACCOUNT_ID \
--query "${query}" 2>/dev/null \
| jq -r '.|@sh'
)
res=($res)
if [[ ${#res[@]} -eq 0 ]]; then
# Skip empty results
return 1 #not found
fi
id=${res[0]//\"}
name=${res[@]/$id}
# printf "Found public image: %s in %s with name '%s'\n" "$id" "$region" "${name//\"}"
found_image_id=$id
return 0 # found
}
declare -A ami_ids
copy
change_privacy Public # uses ami_ids

View file

@ -0,0 +1,25 @@
#!/bin/bash
# shellcheck disable=SC2046,1091,1090
source $(dirname "$0")/common
for ARCH in "${ARCHES[@]}"; do
pushd "${STAGING_ROOT}/${CATEGORY_STUB}/${REV}/isos/${ARCH}" || { echo "Could not change directory"; break; }
if [ -f "CHECKSUM" ]; then
rm CHECKSUM
fi
for ISO in *.iso; do
ln -s "${ISO}" "${ISO//.[0-9]/-latest}"
done
# shellcheck disable=SC2086
for file in *.iso; do
printf "# %s: %s bytes\n%s\n" \
"${file}" \
"$(stat -c %s ${file})" \
"$(sha256sum --tag ${file})" \
| sudo tee -a CHECKSUM;
done
popd || { echo "Could not change directory"; break; }
done

View file

@ -16,7 +16,10 @@ if [ $ret_val -eq "0" ]; then
# disabling because none of our files should be starting with dashes. If they # disabling because none of our files should be starting with dashes. If they
# are something is *seriously* wrong here. # are something is *seriously* wrong here.
# shellcheck disable=SC2035 # shellcheck disable=SC2035
sudo -l && find **/* -maxdepth 0 -type d | parallel --will-cite -j 18 sudo rsync -av --chown=10004:10005 --progress --relative --human-readable \ sudo -l && find ./ -mindepth 1 -maxdepth 1 -type d -exec find {}/ -mindepth 1 -maxdepth 1 -type d \;|sed 's/^..//g' | parallel --will-cite -j 18 sudo rsync -av --chown=10004:10005 --progress --relative --human-readable \
{} "${TARGET}"
# shellcheck disable=SC2035
sudo -l && find ** -maxdepth 0 -type l | parallel --will-cite -j 18 sudo rsync -av --chown=10004:10005 --progress --relative --human-readable \
{} "${TARGET}" {} "${TARGET}"
# Full file list update # Full file list update