add repoclosure support

This commit is contained in:
Louis Abel 2022-05-28 19:28:29 -07:00
parent e49a69ceea
commit baf4893850
Signed by untrusted user: label
GPG Key ID: B37E62D143879B36
5 changed files with 221 additions and 32 deletions

View File

@ -43,4 +43,29 @@
has_modules: has_modules:
- 'AppStream' - 'AppStream'
- 'PowerTools' - 'PowerTools'
repoclosure_map:
arches:
x86_64: '--arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch'
aarch64: '--arch=aarch64 --arch=noarch'
ppc64le: '--arch=ppc64le --arch=noarch'
s390x: '--arch=s390x --arch=noarch'
repos:
BaseOS: []
AppStream:
- BaseOS
PowerTools:
- BaseOS
- AppStream
HighAvailability:
- BaseOS
- AppStream
ResilientStorage:
- BaseOS
- AppStream
RT:
- BaseOS
- AppStream
NFV:
- BaseOS
- AppStream
... ...

View File

@ -51,30 +51,36 @@
- 'AppStream' - 'AppStream'
- 'CRB' - 'CRB'
repoclosure_map: repoclosure_map:
BaseOS: [] arches:
AppStream: x86_64: '--arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch'
- BaseOS aarch64: '--arch=aarch64 --arch=noarch'
CRB: ppc64le: '--arch=ppc64le --arch=noarch'
- BaseOS s390x: '--arch=s390x --arch=noarch'
- AppStream repos:
HighAvailability: BaseOS: []
- BaseOS AppStream:
- AppStream - BaseOS
ResilientStorage: CRB:
- BaseOS - BaseOS
- AppStream - AppStream
RT: HighAvailability:
- BaseOS - BaseOS
- AppStream - AppStream
NFV: ResilientStorage:
- BaseOS - BaseOS
- AppStream - AppStream
SAP: RT:
- BaseOS - BaseOS
- AppStream - AppStream
- HighAvailability NFV:
SAPHANA: - BaseOS
- BaseOS - AppStream
- AppStream SAP:
- HighAvailability - BaseOS
- AppStream
- HighAvailability
SAPHANA:
- BaseOS
- AppStream
- HighAvailability
... ...

8
iso/py/sig/altarch.yaml Normal file
View File

@ -0,0 +1,8 @@
---
'8':
rockyrpi:
project_id: ''
'9':
rockyrpi:
project_id: ''
...

View File

@ -11,6 +11,6 @@ rlvars = rldict['9']
r = Checks(rlvars, config['arch']) r = Checks(rlvars, config['arch'])
r.check_valid_arch() r.check_valid_arch()
#a = RepoSync(rlvars, config, major="9", repo="ResilientStorage", parallel=True, ignore_debug=False, ignore_source=False)
a = RepoSync(rlvars, config, major="9", repo="ResilientStorage", parallel=True, ignore_debug=False, ignore_source=False) a = RepoSync(rlvars, config, major="9", repo="ResilientStorage", parallel=True, ignore_debug=False, ignore_source=False)
#a = RepoSync(rlvars, config, major="9", repo="ResilientStorage", parallel=True, ignore_debug=False, ignore_source=False, fullrun=True)
a.run() a.run()

View File

@ -181,6 +181,10 @@ class RepoSync:
if self.repoclosure: if self.repoclosure:
self.repoclosure_work(sync_root, work_root, log_root) self.repoclosure_work(sync_root, work_root, log_root)
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('Compose logs: %s' % log_root)
self.log.info('Compose completed.')
def sync(self, repo, sync_root, work_root, log_root, arch=None): def sync(self, repo, sync_root, work_root, log_root, arch=None):
""" """
Calls out syncing of the repos. We generally sync each component of a Calls out syncing of the repos. We generally sync each component of a
@ -603,10 +607,6 @@ class RepoSync:
for issue in bad_exit_list: for issue in bad_exit_list:
self.log.error(issue) self.log.error(issue)
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('Compose logs: %s' % log_root)
self.log.info('Compose completed.')
def generate_compose_dirs(self) -> str: def generate_compose_dirs(self) -> str:
""" """
Generate compose dirs for full runs Generate compose dirs for full runs
@ -739,7 +739,157 @@ class RepoSync:
return cmd return cmd
def repoclosure_work(self, sync_root, work_root, log_root): def repoclosure_work(self, sync_root, work_root, log_root):
pass """
This is where we run repoclosures, based on the configuration of each
EL version. Each major version should have a dictionary of lists that
point to which repos they'll be targetting. An empty list because the
repoclosure is ran against itself, and itself only. In the case of 8,
9, and perhaps 10, BaseOS is the only repo that should be checking
against itself. (This means BaseOS should be able to survive by
itself.)
"""
cmd = self.podman_cmd()
entries_dir = os.path.join(work_root, "entries")
bad_exit_list = []
if not self.parallel:
self.log.error('repoclosure is too slow to run one by one. enable parallel mode.')
raise SystemExit()
self.log.info('Beginning repoclosure phase')
for repo in self.repoclosure_map['repos']:
repoclosure_entry_name_list = []
self.log.info('Setting up repoclosure for {}'.format(repo))
for arch in self.repoclosure_map['arches']:
repo_combination = []
repoclosure_entry_name = 'repoclosure-{}-{}'.format(repo, arch)
repoclosure_entry_name_list.append(repoclosure_entry_name)
repoclosure_arch_list = self.repoclosure_map['arches'][arch]
# Some repos will have additional repos to close against - this
# helps append
if len(self.repoclosure_map['repos'][repo]) > 0:
for l in self.repoclosure_map['repos'][repo]:
stretch = '--repofrompath={},file://{}/{}/{}/os --repo={}'.format(
l,
sync_root,
l,
arch,
l
)
repo_combination.append(stretch)
join_repo_comb = ' '.join(repo_combination)
repoclosure_entry_point_sh = os.path.join(
entries_dir,
repoclosure_entry_name
)
repoclosure_entry_point_sh = os.path.join(
entries_dir,
repoclosure_entry_name
)
repoclosure_cmd = ('/usr/bin/dnf repoclosure {} '
'--repofrompath={},file://{}/{}/{}/os --check={} {} '
'| tee -a {}/{}-repoclosure-{}.log').format(
repoclosure_arch_list,
repo,
sync_root,
repo,
arch,
repo,
join_repo_comb,
log_root,
repo,
arch
)
repoclosure_entry_point_open = open(repoclosure_entry_point_sh, "w+")
repoclosure_entry_point_open.write('#!/bin/bash\n')
repoclosure_entry_point_open.write('set -o pipefail\n')
repoclosure_entry_point_open.write('/usr/bin/dnf install dnf-plugins-core -y\n')
repoclosure_entry_point_open.write(repoclosure_cmd + '\n')
repoclosure_entry_point_open.close()
os.chmod(repoclosure_entry_point_sh, 0o755)
repo_combination.clear()
self.log.info('Spawning pods for %s' % repo)
for pod in repoclosure_entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
self.dnf_config,
self.dnf_config,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
#print(podman_cmd_entry)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(repoclosure_entry_name_list)
time.sleep(3)
self.log.info('Performing repoclosure on %s ... ' % repo)
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
for pod in repoclosure_entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' in output.decode():
self.log.info('%s seems ok' % pod)
else:
self.log.error('%s had issues closing' % pod)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
repoclosure_entry_name_list.clear()
self.log.info('Syncing %s completed' % repo)
if len(bad_exit_list) > 0:
self.log.error(
Color.BOLD + Color.RED + 'There were issues closing these '
'repositories:' + Color.END
)
for issue in bad_exit_list:
self.log.error(issue)
class SigRepoSync: class SigRepoSync:
""" """