From 7917876676dd415e7a498ec2b4b28a2aed3fea09 Mon Sep 17 00:00:00 2001 From: nazunalika Date: Fri, 10 Jun 2022 16:05:44 -0700 Subject: [PATCH] add hash flag --- iso/py/common.py | 6 + iso/py/sig/altarch.yaml | 25 ++-- iso/py/sig/cloud.yaml | 28 +++-- iso/py/sig/core.yaml | 21 ++-- iso/py/sync-from-peridot | 2 + iso/py/sync-sig | 61 +++++++++ iso/py/util/__init__.py | 1 + iso/py/util/dnf_utils.py | 258 +++++++++++++++++---------------------- 8 files changed, 225 insertions(+), 177 deletions(-) create mode 100755 iso/py/sync-sig diff --git a/iso/py/common.py b/iso/py/common.py index 1d07574..509e89e 100644 --- a/iso/py/common.py +++ b/iso/py/common.py @@ -22,6 +22,7 @@ class Color: # vars and additional checks rldict = {} +sigdict = {} config = { "rlmacro": rpm.expandMacro('%rhel'), "arch": platform.machine(), @@ -40,6 +41,11 @@ for conf in glob.iglob('configs/*.yaml'): with open(conf, 'r', encoding="utf-8") as file: rldict.update(yaml.safe_load(file)) +# Import all SIG configs from yaml +for conf in glob.iglob('sig/*.yaml'): + with open(conf, 'r', encoding="utf-8") as file: + sigdict.update(yaml.safe_load(file)) + # The system needs to be a RHEL-like system. It cannot be Fedora or SuSE. #if "%rhel" in config['rlmacro']: # raise SystemExit(Color.BOLD + 'This is not a RHEL-like system.' + Color.END diff --git a/iso/py/sig/altarch.yaml b/iso/py/sig/altarch.yaml index 0eb9d4e..50c425a 100644 --- a/iso/py/sig/altarch.yaml +++ b/iso/py/sig/altarch.yaml @@ -1,12 +1,17 @@ --- -'8': - rockyrpi: - project_id: '' - additional_dirs: - - 'images' -'9': - rockyrpi: - project_id: '' - additional_dirs: - - 'images' +altarch: + '8': + rockyrpi: + allowed_arches: + - aarch64 + project_id: '' + additional_dirs: + - 'images' + '9': + rockyrpi: + allowed_arches: + - aarch64 + project_id: '' + additional_dirs: + - 'images' ... diff --git a/iso/py/sig/cloud.yaml b/iso/py/sig/cloud.yaml index 985a405..f30f94a 100644 --- a/iso/py/sig/cloud.yaml +++ b/iso/py/sig/cloud.yaml @@ -1,10 +1,22 @@ --- -'8': - cloud-kernel: - project_id: 'f91da90d-5bdb-4cf2-80ea-e07f8dae5a5c' - cloud-common: - project_id: '' -'9': - cloud-common: - project_id: '' +cloud: + '8': + cloud-kernel: + project_id: 'f91da90d-5bdb-4cf2-80ea-e07f8dae5a5c' + allowed_arches: + - aarch64 + - x86_64 + cloud-common: + allowed_arches: + - aarch64 + - x86_64 + project_id: '' + '9': + cloud-common: + project_id: '' + allowed_arches: + - aarch64 + - x86_64 + - ppc64le + - s390x ... diff --git a/iso/py/sig/core.yaml b/iso/py/sig/core.yaml index 0047ac0..b8a97fd 100644 --- a/iso/py/sig/core.yaml +++ b/iso/py/sig/core.yaml @@ -1,12 +1,13 @@ --- -'8': - core-common: - project_id: '' - core-infra: - project_id: '' -'9': - core-common: - project_id: '' - core-infra: - project_id: '' +core: + '8': + core-common: + project_id: '' + core-infra: + project_id: '' + '9': + core-common: + project_id: '' + core-infra: + project_id: '' ... diff --git a/iso/py/sync-from-peridot b/iso/py/sync-from-peridot index 56ba74d..d114a59 100755 --- a/iso/py/sync-from-peridot +++ b/iso/py/sync-from-peridot @@ -22,6 +22,7 @@ parser.add_argument('--ignore-debug', action='store_true') parser.add_argument('--ignore-source', action='store_true') parser.add_argument('--repoclosure', action='store_true') parser.add_argument('--skip-all', action='store_true') +parser.add_argument('--hashed', action='store_true') parser.add_argument('--dry-run', action='store_true') parser.add_argument('--full-run', action='store_true') parser.add_argument('--no-fail', action='store_true') @@ -47,6 +48,7 @@ a = RepoSync( ignore_source=results.ignore_source, repoclosure=results.repoclosure, skip_all=results.skip_all, + hashed=results.hashed, parallel=results.simple, dryrun=results.dry_run, fullrun=results.full_run, diff --git a/iso/py/sync-sig b/iso/py/sync-sig new file mode 100755 index 0000000..ff9c8e1 --- /dev/null +++ b/iso/py/sync-sig @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 + +# This script can be called to do single syncs or full on syncs. + +import argparse +from common import * +from util import Checks +from util import SigRepoSync + +#rlvars = rldict['9'] +#r = Checks(rlvars, config['arch']) +#r.check_valid_arch() + +# Start up the parser baby +parser = argparse.ArgumentParser(description="Peridot Sync and Compose") + +# All of our options +parser.add_argument('--release', type=str, help="Major Release Version", required=True) +parser.add_argument('--repo', type=str, help="Repository name") +parser.add_argument('--sig', type=str, help="SIG name") +parser.add_argument('--arch', type=str, help="Architecture") +parser.add_argument('--ignore-debug', action='store_true') +parser.add_argument('--ignore-source', action='store_true') +parser.add_argument('--repoclosure', action='store_true') +parser.add_argument('--skip-all', action='store_true') +parser.add_argument('--hashed', action='store_true') +parser.add_argument('--dry-run', action='store_true') +parser.add_argument('--full-run', action='store_true') +parser.add_argument('--no-fail', action='store_true') +# I am aware this is confusing, I want podman to be the default option +parser.add_argument('--simple', action='store_false') +parser.add_argument('--logger', type=str) + +# Parse them +results = parser.parse_args() + +rlvars = rldict[results.release] +sigvars = sigdict[results.sig][results.release] +r = Checks(rlvars, config['arch']) +r.check_valid_arch() + +# Send them and do whatever I guess +a = SigRepoSync( + rlvars, + config, + sigvars, + major=results.release, + repo=results.repo, + arch=results.arch, + ignore_source=results.ignore_source, + repoclosure=results.repoclosure, + skip_all=results.skip_all, + hashed=results.hashed, + parallel=results.simple, + dryrun=results.dry_run, + fullrun=results.full_run, + nofail=results.no_fail, + logger=results.logger +) + +a.run() diff --git a/iso/py/util/__init__.py b/iso/py/util/__init__.py index 0cbbf89..1c96258 100644 --- a/iso/py/util/__init__.py +++ b/iso/py/util/__init__.py @@ -8,6 +8,7 @@ from .check import ( from .dnf_utils import ( RepoSync, + SigRepoSync ) from .iso_utils import ( diff --git a/iso/py/util/dnf_utils.py b/iso/py/util/dnf_utils.py index 2882d95..4354e45 100644 --- a/iso/py/util/dnf_utils.py +++ b/iso/py/util/dnf_utils.py @@ -38,6 +38,7 @@ class RepoSync: ignore_source: bool = False, repoclosure: bool = False, skip_all: bool = False, + hashed: bool = False, parallel: bool = False, dryrun: bool = False, fullrun: bool = False, @@ -51,6 +52,7 @@ class RepoSync: self.ignore_debug = ignore_debug self.ignore_source = ignore_source self.skip_all = skip_all + self.hashed = hashed self.repoclosure = repoclosure # Enables podman syncing, which should effectively speed up operations self.parallel = parallel @@ -206,152 +208,9 @@ class RepoSync: """ This is for normal dnf syncs. This is very slow. """ - cmd = self.reposync_cmd() - - sync_single_arch = False - arches_to_sync = self.arches - if arch: - sync_single_arch = True - arches_to_sync = [arch] - - sync_single_repo = False - repos_to_sync = self.repos - if repo and not self.fullrun: - sync_single_repo = True - repos_to_sync = [repo] - - - # dnf reposync --download-metadata \ - # --repoid fedora -p /tmp/test \ - # --forcearch aarch64 --norepopath - - self.log.info( - Color.BOLD + '!! WARNING !! ' + Color.END + 'You are performing a ' - 'local reposync, which may incur delays in your compose.' - ) - - self.log.info( - Color.BOLD + '!! WARNING !! ' + Color.END + 'Standard dnf reposync ' - 'is not really a supported method. Only use this for general testing.' - ) - - if self.fullrun: - self.log.info( - Color.BOLD + '!! WARNING !! ' + Color.END + 'This is a full ' - 'run! This will take a LONG TIME.' - ) - - for r in repos_to_sync: - for a in arches_to_sync: - repo_name = r - if r in self.repo_renames: - repo_name = self.repo_renames[r] - - os_sync_path = os.path.join( - sync_root, - repo_name, - a, - 'os' - ) - - debug_sync_path = os.path.join( - sync_root, - repo_name, - a, - 'debug/tree' - ) - - sync_cmd = "{} -c {} --download-metadata --repoid={} -p {} --forcearch {} --norepopath".format( - cmd, - self.dnf_config, - r, - os_sync_path, - a - ) - - debug_sync_cmd = "{} -c {} --download-metadata --repoid={}-debug -p {} --forcearch {} --norepopath".format( - cmd, - self.dnf_config, - r, - debug_sync_path, - a - ) - - self.log.info('Syncing {} {}'.format(r, a)) - #self.log.info(sync_cmd) - # Try to figure out where to send the actual output of this... - # Also consider on running a try/except here? Basically if - # something happens (like a repo doesn't exist for some arch, - # eg RT for aarch64), make a note of it somehow (but don't - # break the entire sync). As it stands with this - # implementation, if something fails, it just continues on. - process = subprocess.call( - shlex.split(sync_cmd), - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL - ) - - if not self.ignore_debug: - self.log.info('Syncing {} {} (debug)'.format(r, a)) - process_debug = subprocess.call( - shlex.split(debug_sync_cmd), - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL - ) - - # This is an ugly hack. We don't want to list i686 as an - # available arch for an el because that would imply each repo - # gets an i686 repo. However, being able to set "arch" to i686 - # should be possible, thus avoiding this block altogether. - # "available_arches" in the configuration isn't meant to be a - # restriction here, but mainly a restriction in the lorax - # process (which isn't done here) - if 'x86_64' in a and 'all' in r and self.multilib: - i686_os_sync_path = os.path.join( - sync_root, - repo_name, - a, - 'os' - ) - - i686_sync_cmd = "{} -c {} --download-metadata --repoid={} -p {} --forcearch {} --norepopath".format( - cmd, - self.dnf_config, - r, - i686_os_sync_path, - 'i686' - ) - - self.log.info('Syncing {} {}'.format(r, 'i686')) - process_i686 = subprocess.call( - shlex.split(i686_sync_cmd), - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL - ) - - if not self.ignore_source: - source_sync_path = os.path.join( - sync_root, - repo_name, - 'source/tree' - ) - - source_sync_cmd = "{} -c {} --download-metadata --repoid={}-source -p {} --norepopath".format( - cmd, - self.dnf_config, - r, - source_sync_path - ) - - - self.log.info('Syncing {} source'.format(r)) - process_source = subprocess.call( - shlex.split(source_sync_cmd), - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL - ) - - self.log.info('Syncing complete') + self.log.error('DNF syncing has been removed.') + self.log.error('Please install podman and enable parallel') + raise SystemExit() def podman_sync(self, repo, sync_root, work_root, log_root, arch): """ @@ -654,26 +513,32 @@ class RepoSync: raise SystemExit(Color.BOLD + "Local file syncs are not " "supported." + Color.END) + prehashed = '' + if self.hashed: + prehashed = "hashed-" # create dest_path if not os.path.exists(dest_path): os.makedirs(dest_path, exist_ok=True) config_file = open(fname, "w+") for repo in self.repos: - constructed_url = '{}/{}/repo/{}/$basearch'.format( + constructed_url = '{}/{}/repo/{}{}/$basearch'.format( self.repo_base_url, self.project_id, + prehashed, repo, ) - constructed_url_debug = '{}/{}/repo/{}/$basearch-debug'.format( + constructed_url_debug = '{}/{}/repo/{}{}/$basearch-debug'.format( self.repo_base_url, self.project_id, + prehashed, repo, ) - constructed_url_src = '{}/{}/repo/{}/src'.format( + constructed_url_src = '{}/{}/repo/{}{}/src'.format( self.repo_base_url, self.project_id, + prehashed, repo, ) @@ -910,3 +775,98 @@ class SigRepoSync: This helps us do reposync operations for SIG's. Do not use this for the base system. Use RepoSync for that. """ + def __init__( + self, + rlvars, + config, + sigvars, + major, + repo=None, + arch=None, + ignore_source: bool = False, + repoclosure: bool = False, + skip_all: bool = False, + hashed: bool = False, + parallel: bool = False, + dryrun: bool = False, + fullrun: bool = False, + nofail: bool = False, + logger=None + ): + self.nofail = nofail + self.dryrun = dryrun + self.fullrun = fullrun + self.arch = arch + self.ignore_source = ignore_source + self.skip_all = skip_all + self.hashed = hashed + self.repoclosure = repoclosure + # Enables podman syncing, which should effectively speed up operations + self.parallel = parallel + # Relevant config items + self.major_version = major + self.date_stamp = config['date_stamp'] + self.repo_base_url = config['repo_base_url'] + self.compose_root = config['compose_root'] + self.compose_base = config['compose_root'] + "/" + major + + # Relevant major version items + self.sigvars = sigvars + self.sigrepos = sigvars.keys() + #self.arches = sigvars['allowed_arches'] + #self.project_id = sigvars['project_id'] + self.sigrepo = repo + + # each el can have its own designated container to run stuff in, + # otherwise we'll just default to the default config. + self.container = config['container'] + if 'container' in rlvars and len(rlvars['container']) > 0: + self.container = rlvars['container'] + + if 'repoclosure_map' in rlvars and len(rlvars['repoclosure_map']) > 0: + self.repoclosure_map = rlvars['repoclosure_map'] + + self.staging_dir = os.path.join( + config['staging_root'], + config['sig_category_stub'], + major + ) + + self.compose_latest_dir = os.path.join( + config['compose_root'], + major, + "latest-Rocky-{}-SIG".format(major) + ) + + self.compose_latest_sync = os.path.join( + self.compose_latest_dir, + "compose" + ) + + self.compose_log_dir = os.path.join( + self.compose_latest_dir, + "work/logs" + ) + + # This is temporary for now. + if logger is None: + self.log = logging.getLogger("sigreposync") + self.log.setLevel(logging.INFO) + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s :: %(name)s :: %(message)s', + '%Y-%m-%d %H:%M:%S' + ) + handler.setFormatter(formatter) + self.log.addHandler(handler) + + self.log.info('sig reposync init') + self.log.info(major) + #self.dnf_config = self.generate_conf() + + def run(self): + """ + This runs the sig sync. + """ + pass