toolkit/iso/empanadas/empanadas/util/iso_utils.py

1962 lines
68 KiB
Python
Raw Normal View History

"""
Builds ISO's for Rocky Linux.
Louis Abel <label AT rockylinux.org>
"""
import logging
import sys
import os
import os.path
import subprocess
import shlex
import time
2022-06-20 05:52:20 +00:00
import tarfile
2022-06-20 08:34:10 +00:00
import shutil
2022-06-20 05:52:20 +00:00
# lazy person's s3 parser
#import requests
#import json
#import xmltodict
2022-06-20 05:52:20 +00:00
# if we can access s3
#import boto3
# relative_path, compute_file_checksums
2022-06-22 00:21:37 +00:00
import kobo.shortcuts
from fnmatch import fnmatch
2022-06-17 03:31:33 +00:00
2022-06-17 18:49:22 +00:00
# This is for treeinfo
from configparser import ConfigParser
2022-06-13 14:37:50 +00:00
from productmd.common import SortedConfigParser
2022-06-17 18:49:22 +00:00
from productmd.images import Image
from productmd.extra_files import ExtraFiles
import productmd.treeinfo
# End treeinfo
2022-06-17 03:31:33 +00:00
2022-06-15 20:53:12 +00:00
from jinja2 import Environment, FileSystemLoader
from empanadas.common import Color, _rootdir
2022-06-30 20:14:27 +00:00
from empanadas.util import Shared, ArchCheck
2022-06-17 03:31:33 +00:00
class IsoBuild:
"""
This helps us build the generic ISO's for a Rocky Linux release. In
2022-06-15 20:53:12 +00:00
particular, this is for the boot images.
2022-07-06 14:44:37 +00:00
There are functions to build the DVD (and potentially other) images. Each
particular build or process starts with "run" in their name.
"""
def __init__(
self,
rlvars,
config,
major,
2022-06-19 14:29:01 +00:00
arch=None,
2022-06-22 00:21:37 +00:00
hfs_compat: bool = False,
2022-06-16 19:24:19 +00:00
rc: bool = False,
2022-06-19 14:29:01 +00:00
s3: bool = False,
2022-06-20 05:52:20 +00:00
force_download: bool = False,
2022-06-17 18:49:22 +00:00
force_unpack: bool = False,
2022-06-15 20:53:12 +00:00
isolation: str = 'auto',
extra_iso=None,
extra_iso_mode: str = 'local',
2022-06-15 20:53:12 +00:00
compose_dir_is_here: bool = False,
2022-07-11 08:06:26 +00:00
hashed: bool = False,
updated_image: bool = False,
image_increment: str = '0',
image=None,
logger=None
):
self.image = image
2022-06-13 14:37:50 +00:00
self.fullname = rlvars['fullname']
2022-06-15 20:53:12 +00:00
self.distname = config['distname']
self.shortname = config['shortname']
# Relevant config items
self.major_version = major
2022-06-15 20:53:12 +00:00
self.compose_dir_is_here = compose_dir_is_here
2022-06-13 14:37:50 +00:00
self.disttag = config['dist']
self.date_stamp = config['date_stamp']
2022-06-13 14:37:50 +00:00
self.timestamp = time.time()
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
self.current_arch = config['arch']
2022-06-24 22:37:32 +00:00
self.required_pkgs = rlvars['iso_map']['lorax']['required_pkgs']
2022-06-15 20:53:12 +00:00
self.mock_work_root = config['mock_work_root']
self.lorax_result_root = config['mock_work_root'] + "/" + "lorax"
self.mock_isolation = isolation
self.iso_map = rlvars['iso_map']
#self.livemap = rlvars['livemap']
self.cloudimages = rlvars['cloudimages']
2022-06-16 19:24:19 +00:00
self.release_candidate = rc
2022-06-19 14:29:01 +00:00
self.s3 = s3
2022-06-17 18:49:22 +00:00
self.force_unpack = force_unpack
2022-06-20 05:52:20 +00:00
self.force_download = force_download
self.extra_iso = extra_iso
self.extra_iso_mode = extra_iso_mode
self.checksum = rlvars['checksum']
self.profile = rlvars['profile']
2022-07-11 08:06:26 +00:00
self.hashed = hashed
self.updated_image = updated_image
self.updated_image_increment = "." + image_increment
self.updated_image_date = (time.strftime("%Y%m%d", time.localtime())
+ self.updated_image_increment)
# Relevant major version items
2022-06-19 14:29:01 +00:00
self.arch = arch
self.arches = rlvars['allowed_arches']
2022-06-15 20:53:12 +00:00
self.release = rlvars['revision']
2022-06-17 06:25:17 +00:00
self.minor_version = rlvars['minor']
self.revision = rlvars['revision'] + "-" + rlvars['rclvl']
2022-06-16 19:24:19 +00:00
self.rclvl = rlvars['rclvl']
2022-06-24 22:37:32 +00:00
self.repos = rlvars['iso_map']['lorax']['repos']
2022-06-15 20:53:12 +00:00
self.repo_base_url = config['repo_base_url']
self.project_id = rlvars['project_id']
self.structure = rlvars['structure']
self.bugurl = rlvars['bugurl']
2022-06-15 20:53:12 +00:00
self.extra_files = rlvars['extra_files']
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
self.staging_dir = os.path.join(
config['staging_root'],
config['category_stub'],
self.revision
)
2022-06-19 14:29:01 +00:00
# all bucket related info
self.s3_region = config['aws_region']
self.s3_bucket = config['bucket']
self.s3_bucket_url = config['bucket_url']
#if s3:
# self.s3 = boto3.client('s3')
2022-06-20 05:52:20 +00:00
2022-06-22 00:21:37 +00:00
# arch specific
self.hfs_compat = hfs_compat
2022-06-15 20:53:12 +00:00
# Templates
file_loader = FileSystemLoader(f"{_rootdir}/templates")
2022-06-15 20:53:12 +00:00
self.tmplenv = Environment(loader=file_loader)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
"latest-{}-{}".format(
self.shortname,
self.profile
)
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
2022-06-23 22:29:22 +00:00
self.iso_work_dir = os.path.join(
self.compose_latest_dir,
"work/isos"
2022-06-23 22:29:22 +00:00
)
2022-06-15 20:53:12 +00:00
self.live_work_dir = os.path.join(
self.compose_latest_dir,
"work/live"
)
self.image_work_dir = os.path.join(
self.compose_latest_dir,
"work/images"
)
2022-06-19 14:29:01 +00:00
self.lorax_work_dir = os.path.join(
self.compose_latest_dir,
"work/lorax"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("iso")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('iso build init')
2022-07-11 08:06:26 +00:00
self.repolist = Shared.build_repo_list(
self.repo_base_url,
self.repos,
self.project_id,
self.current_arch,
self.compose_latest_sync,
self.compose_dir_is_here,
self.hashed
)
self.log.info(self.revision)
def run(self):
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
sync_root = self.compose_latest_sync
log_root = os.path.join(
work_root,
"logs"
)
2022-06-15 20:53:12 +00:00
self.iso_build()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('ISO Build Logs: /var/lib/mock/{}-{}-{}/result'.format(
self.shortname.lower(), self.major_version, self.current_arch)
)
self.log.info('ISO Build completed.')
2022-06-15 20:53:12 +00:00
def iso_build(self):
"""
2022-06-15 20:53:12 +00:00
This does the general ISO building for the current running
architecture. This generates the mock config and the general script
needed to get this part running.
"""
2022-06-15 20:53:12 +00:00
# Check for local build, build accordingly
# Check for arch specific build, build accordingly
# local AND arch cannot be used together, local supersedes. print
# warning.
self.generate_iso_scripts()
2022-06-17 05:55:07 +00:00
self.run_lorax()
2022-06-13 14:37:50 +00:00
def generate_iso_scripts(self):
"""
2022-06-17 05:55:07 +00:00
Generates the scripts needed to be ran to run lorax in mock as well as
package up the results.
2022-06-13 14:37:50 +00:00
"""
2022-06-16 19:24:19 +00:00
self.log.info('Generating ISO configuration and scripts')
2022-06-15 20:53:12 +00:00
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('isobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildImage.tmpl.sh')
mock_iso_path = '/var/tmp/lorax-' + self.major_version + '.cfg'
mock_sh_path = '/var/tmp/isobuild.sh'
iso_template_path = '/var/tmp/buildImage.sh'
2022-06-16 19:24:19 +00:00
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
2022-06-27 05:45:31 +00:00
# This is kind of a hack. Installing xorrisofs sets the alternatives to
# it, so backwards compatibility is sort of guaranteed. But we want to
# emulate as close as possible to what pungi does, so unless we
# explicitly ask for xorr (in el8 and 9), we should NOT be using it.
# For RLN and el10, we'll use xorr all the way through. When 8 is no
# longer getting ISO's, we'll remove this section.
required_pkgs = self.required_pkgs.copy()
if self.iso_map['xorrisofs']:
if 'genisoimage' in required_pkgs and 'xorriso' not in required_pkgs:
required_pkgs.append('xorriso')
2022-06-15 20:53:12 +00:00
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
fullname=self.fullname,
2022-06-17 05:55:07 +00:00
shortname=self.shortname,
2022-06-27 05:45:31 +00:00
required_pkgs=required_pkgs,
2022-06-15 20:53:12 +00:00
dist=self.disttag,
repos=self.repolist,
user_agent='{{ user_agent }}',
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
revision=self.release,
2022-06-15 20:53:12 +00:00
)
iso_template_output = iso_template.render(
arch=self.current_arch,
major=self.major_version,
minor=self.minor_version,
shortname=self.shortname,
repos=self.repolist,
2022-06-24 22:37:32 +00:00
variant=self.iso_map['lorax']['variant'],
lorax=self.iso_map['lorax']['lorax_removes'],
2022-06-15 20:53:12 +00:00
distname=self.distname,
revision=self.release,
2022-06-16 19:24:19 +00:00
rc=rclevel,
2022-06-17 05:55:07 +00:00
builddir=self.mock_work_root,
lorax_work_root=self.lorax_result_root,
bugurl=self.bugurl,
2022-06-15 20:53:12 +00:00
)
mock_iso_entry = open(mock_iso_path, "w+")
mock_iso_entry.write(mock_iso_template_output)
mock_iso_entry.close()
mock_sh_entry = open(mock_sh_path, "w+")
mock_sh_entry.write(mock_sh_template_output)
mock_sh_entry.close()
iso_template_entry = open(iso_template_path, "w+")
iso_template_entry.write(iso_template_output)
iso_template_entry.close()
2022-06-13 14:37:50 +00:00
2022-06-17 05:55:07 +00:00
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def run_lorax(self):
"""
This actually runs lorax on this system. It will call the right scripts
to do so.
"""
lorax_cmd = '/bin/bash /var/tmp/isobuild.sh'
self.log.info('Starting lorax...')
p = subprocess.call(shlex.split(lorax_cmd))
if p != 0:
2022-06-17 05:55:07 +00:00
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
2022-06-19 14:29:01 +00:00
def run_pull_lorax_artifacts(self):
"""
2022-06-20 05:52:20 +00:00
Pulls the required artifacts and unpacks it to work/lorax/$arch
2022-06-19 14:29:01 +00:00
"""
2022-06-20 05:52:20 +00:00
# Determine if we're only managing one architecture out of all of them.
# It does not hurt to do everything at once. But the option is there.
unpack_single_arch = False
arches_to_unpack = self.arches
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
self.log.info(Color.INFO + 'Determining the latest pulls...')
2022-06-20 05:52:20 +00:00
if self.s3:
latest_artifacts = Shared.s3_determine_latest(
self.s3_bucket,
self.release,
self.arches,
'tar.gz',
'lorax',
self.log
)
2022-06-20 05:52:20 +00:00
else:
latest_artifacts = Shared.reqs_determine_latest(
self.s3_bucket_url,
self.release,
self.arches,
'tar.gz',
'lorax',
self.log
)
2022-06-20 05:52:20 +00:00
self.log.info(Color.INFO + 'Downloading requested artifact(s)')
2022-06-20 05:52:20 +00:00
for arch in arches_to_unpack:
lorax_arch_dir = os.path.join(
self.lorax_work_dir,
arch
)
source_path = latest_artifacts[arch]
full_drop = '{}/lorax-{}-{}.tar.gz'.format(
lorax_arch_dir,
self.release,
2022-06-20 05:52:20 +00:00
arch
)
if not os.path.exists(lorax_arch_dir):
os.makedirs(lorax_arch_dir, exist_ok=True)
self.log.info(
'Downloading artifact for ' + Color.BOLD + arch + Color.END
)
if self.s3:
Shared.s3_download_artifacts(
2022-06-20 05:52:20 +00:00
self.force_download,
self.s3_bucket,
2022-06-20 05:52:20 +00:00
source_path,
full_drop,
self.log
2022-06-20 05:52:20 +00:00
)
else:
Shared.reqs_download_artifacts(
2022-06-20 05:52:20 +00:00
self.force_download,
self.s3_bucket_url,
2022-06-20 05:52:20 +00:00
source_path,
full_drop,
self.log
2022-06-20 05:52:20 +00:00
)
self.log.info(Color.INFO + 'Download phase completed')
self.log.info(Color.INFO + 'Beginning unpack phase...')
2022-06-20 05:52:20 +00:00
for arch in arches_to_unpack:
tarname = 'lorax-{}-{}.tar.gz'.format(
self.release,
2022-06-20 05:52:20 +00:00
arch
)
tarball = os.path.join(
self.lorax_work_dir,
arch,
tarname
)
if not os.path.exists(tarball):
self.log.error(Color.FAIL + 'Artifact does not exist: ' + tarball)
2022-06-20 05:52:20 +00:00
continue
2022-06-19 14:29:01 +00:00
2022-06-20 05:52:20 +00:00
self._unpack_artifacts(self.force_unpack, arch, tarball)
self.log.info(Color.INFO + 'Unpack phase completed')
self.log.info(Color.INFO + 'Beginning image variant phase')
2022-06-20 08:34:10 +00:00
for arch in arches_to_unpack:
self.log.info(
'Copying base lorax for ' + Color.BOLD + arch + Color.END
)
2022-06-24 22:37:32 +00:00
for variant in self.iso_map['images']:
2022-06-20 08:34:10 +00:00
self._copy_lorax_to_variant(self.force_unpack, arch, variant)
self._copy_boot_to_work(self.force_unpack, arch)
self.log.info(Color.INFO + 'Image variant phase completed')
2022-06-20 08:34:10 +00:00
self.log.info(Color.INFO + 'Beginning treeinfo phase')
2022-06-20 08:34:10 +00:00
2022-06-24 22:37:32 +00:00
for arch in arches_to_unpack:
for variant in self.iso_map['images']:
self.log.info(
2022-06-28 14:49:23 +00:00
'Configuring treeinfo and discinfo for %s%s %s%s' % (Color.BOLD, arch, variant, Color.END)
2022-06-24 22:37:32 +00:00
)
self._treeinfo_wrapper(arch, variant)
2022-06-27 05:05:24 +00:00
# Do a dirsync for non-disc data
if not self.iso_map['images'][variant]['disc']:
self.log.info(
'Syncing repo data and images for %s%s%s' % (Color.BOLD, variant, Color.END)
)
self._copy_nondisc_to_repo(self.force_unpack, arch, variant)
2022-06-24 22:37:32 +00:00
2022-06-20 05:52:20 +00:00
def _unpack_artifacts(self, force_unpack, arch, tarball):
2022-06-19 14:29:01 +00:00
"""
Unpack the requested artifacts(s)
"""
2022-06-20 05:52:20 +00:00
unpack_dir = os.path.join(self.lorax_work_dir, arch)
if not force_unpack:
file_check = os.path.join(unpack_dir, 'lorax/.treeinfo')
if os.path.exists(file_check):
self.log.warn(Color.WARN + 'Artifact (' + arch + ') already unpacked')
2022-06-20 05:52:20 +00:00
return
self.log.info('Unpacking %s' % tarball)
with tarfile.open(tarball) as t:
t.extractall(unpack_dir)
t.close()
2022-06-20 08:34:10 +00:00
def _copy_lorax_to_variant(self, force_unpack, arch, image):
2022-06-20 05:52:20 +00:00
"""
Copy to variants for easy access of mkiso and copying to compose dirs
"""
2022-06-20 08:34:10 +00:00
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
'lorax'
)
iso_to_go = os.path.join(
self.iso_work_dir,
arch
)
2022-06-20 08:34:10 +00:00
if not os.path.exists(os.path.join(src_to_image, '.treeinfo')):
self.log.error(Color.FAIL + 'Lorax base image does not exist')
2022-06-20 08:34:10 +00:00
return
path_to_image = os.path.join(
self.lorax_work_dir,
arch,
image
)
if not force_unpack:
file_check = os.path.join(path_to_image, '.treeinfo')
if os.path.exists(file_check):
self.log.warn(Color.WARN + 'Lorax image for ' + image + ' already exists')
2022-06-20 08:34:10 +00:00
return
self.log.info('Copying base lorax to %s directory...' % image)
2022-06-20 23:49:14 +00:00
try:
shutil.copytree(src_to_image, path_to_image, copy_function=shutil.copy2, dirs_exist_ok=True)
2022-06-20 23:49:14 +00:00
except:
self.log.error('%s already exists??' % image)
2022-06-19 14:29:01 +00:00
if self.iso_map['images'][image]['disc']:
self.log.info('Removing boot.iso from %s' % image)
try:
os.remove(path_to_image + '/images/boot.iso')
2022-08-04 06:07:47 +00:00
os.remove(path_to_image + '/images/boot.iso.manifest')
except:
self.log.error(
'[' + Color.BOLD + Color.YELLOW + 'FAIL' + Color.END + '] ' +
'Cannot remove boot.iso'
)
2022-06-21 03:27:30 +00:00
def _copy_boot_to_work(self, force_unpack, arch):
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
'lorax'
)
iso_to_go = os.path.join(
self.iso_work_dir,
arch
)
path_to_src_image = '{}/{}'.format(
src_to_image,
'/images/boot.iso'
)
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
discname = '{}-{}.{}{}-{}-{}.iso'.format(
self.shortname,
self.major_version,
self.minor_version,
rclevel,
arch,
'boot'
)
isobootpath = '{}/{}'.format(
iso_to_go,
discname
)
manifest = '{}.{}'.format(
isobootpath,
'manifest'
)
if not force_unpack:
file_check = isobootpath
if os.path.exists(file_check):
self.log.warn(Color.WARN + 'Boot image (' + discname + ') already exists')
return
self.log.info('Copying %s boot iso to work directory...' % arch)
os.makedirs(iso_to_go, exist_ok=True)
shutil.copy2(path_to_src_image, isobootpath)
if os.path.exists(path_to_src_image + '.manifest'):
shutil.copy2(path_to_src_image + '.manifest', manifest)
self.log.info('Creating checksum for %s boot iso...' % arch)
checksum = Shared.get_checksum(isobootpath, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + isobootpath + ' not found! Are you sure we copied it?')
return
with open(isobootpath + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
2022-06-27 05:05:24 +00:00
def _copy_nondisc_to_repo(self, force_unpack, arch, repo):
"""
Syncs data from a non-disc set of images to the appropriate repo. Repo
and image MUST match names for this to work.
"""
2022-06-30 20:14:27 +00:00
pathway = os.path.join(
self.compose_latest_sync,
repo,
arch,
'os'
)
kspathway = os.path.join(
self.compose_latest_sync,
repo,
arch,
'kickstart'
)
2022-06-30 20:14:27 +00:00
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
repo
)
2022-06-30 22:38:50 +00:00
if not os.path.exists(pathway):
self.log.error(Color.FAIL +
'Repo and Image variant either does NOT match or does ' +
'NOT exist. Are you sure you have synced the repository?'
2022-06-30 22:38:50 +00:00
)
2022-06-30 20:14:27 +00:00
if not force_unpack:
found_files = []
for y in ArchCheck.archfile[arch]:
imgpath = os.path.join(
pathway,
y
)
if os.path.exists(imgpath):
found_files.append(y)
if os.path.exists(pathway + '/images/boot.iso'):
found_files.append('/images/boot.iso')
if len(found_files) > 0:
self.log.warn(Color.WARN + 'Images and data for ' + repo + ' and ' + arch + ' already exists.')
2022-06-30 20:14:27 +00:00
return
self.log.info(Color.INFO + 'Copying images and data for ' + repo + ' ' + arch)
2022-06-30 22:38:50 +00:00
2022-06-30 20:14:27 +00:00
try:
shutil.copytree(src_to_image, pathway, copy_function=shutil.copy2, dirs_exist_ok=True)
shutil.copytree(src_to_image, kspathway, copy_function=shutil.copy2, dirs_exist_ok=True)
2022-06-30 20:14:27 +00:00
except:
self.log.error('%s already exists??' % repo)
2022-06-27 05:05:24 +00:00
2022-06-19 14:29:01 +00:00
def run_boot_sync(self):
2022-06-17 18:49:22 +00:00
"""
This unpacks into BaseOS/$arch/os, assuming there's no data actually
there. There should be checks.
2022-06-19 14:29:01 +00:00
1. Sync from work/lorax/$arch to work/lorax/$arch/dvd
2. Sync from work/lorax/$arch to work/lorax/$arch/minimal
3. Sync from work/lorax/$arch to BaseOS/$arch/os
2022-06-17 18:49:22 +00:00
4. Modify (3) .treeinfo
5. Modify (1) .treeinfo, keep out boot.iso checksum
6. Create a .treeinfo for AppStream
"""
2022-06-19 14:29:01 +00:00
unpack_single_arch = False
arches_to_unpack = self.arches
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
2022-06-17 18:49:22 +00:00
2022-06-22 00:21:37 +00:00
self._sync_boot(force_unpack=self.force_unpack, arch=self.arch, image=None)
2022-06-24 22:37:32 +00:00
#self._treeinfo_write(arch=self.arch)
2022-06-19 14:29:01 +00:00
2022-06-20 23:49:14 +00:00
def _sync_boot(self, force_unpack, arch, image):
2022-06-17 18:49:22 +00:00
"""
2022-06-19 14:29:01 +00:00
Syncs whatever
2022-06-17 18:49:22 +00:00
"""
2022-06-20 23:49:14 +00:00
self.log.info('Copying lorax to %s directory...' % image)
2022-06-19 14:29:01 +00:00
# checks here, report that it already exists
2022-06-17 05:55:07 +00:00
2022-06-24 22:37:32 +00:00
def _treeinfo_wrapper(self, arch, variant):
2022-06-13 14:37:50 +00:00
"""
2022-06-28 14:49:23 +00:00
Ensure treeinfo and discinfo is written correctly based on the variant
passed. Each file should be configured similarly but also differently
from the next. The Shared module does have a .treeinfo writer, but it
is for basic use. Eventually it'll be expanded to handle this scenario.
2022-06-13 14:37:50 +00:00
"""
2022-06-24 22:37:32 +00:00
image = os.path.join(self.lorax_work_dir, arch, variant)
imagemap = self.iso_map['images'][variant]
2022-07-04 04:00:57 +00:00
data = {
'arch': arch,
'variant': variant,
'variant_path': image,
'checksum': self.checksum,
'distname': self.distname,
'fullname': self.fullname,
'shortname': self.shortname,
'release': self.release,
'timestamp': self.timestamp,
}
2022-07-04 04:00:57 +00:00
try:
2022-07-04 06:42:35 +00:00
Shared.treeinfo_modify_write(data, imagemap, self.log)
2022-07-04 04:00:57 +00:00
except Exception as e:
self.log.error(Color.FAIL + 'There was an error writing treeinfo.')
2022-07-04 04:00:57 +00:00
self.log.error(e)
2022-06-22 00:21:37 +00:00
# Next set of functions are loosely borrowed (in concept) from pungi. Some
# stuff may be combined/mixed together, other things may be simplified or
# reduced in nature.
def run_build_extra_iso(self):
2022-06-15 20:53:12 +00:00
"""
Builds DVD images based on the data created from the initial lorax on
each arch. This should NOT be called during the usual run() section.
"""
sync_root = self.compose_latest_sync
self.log.info(Color.INFO + 'Starting Extra ISOs phase')
if not os.path.exists(self.compose_base):
self.log.info(Color.FAIL + 'The compose directory MUST be here. Cannot continue.')
raise SystemExit()
self._extra_iso_build_wrap()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('ISO result directory: %s/$arch' % self.lorax_work_dir)
self.log.info(Color.INFO + 'Extra ISO phase completed.')
def _extra_iso_build_wrap(self):
"""
Try to figure out where the build is going, we only support mock for
now.
"""
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
arches_to_build = self.arches
if self.arch:
arches_to_build = [self.arch]
images_to_build = self.iso_map['images']
if self.extra_iso:
images_to_build = [self.extra_iso]
for y in images_to_build:
2022-06-24 22:37:32 +00:00
if 'isoskip' in self.iso_map['images'][y] and self.iso_map['images'][y]['isoskip']:
self.log.info(Color.WARN + 'Skipping ' + y + ' image')
2022-06-24 22:37:32 +00:00
continue
2022-07-14 03:01:23 +00:00
# Kind of hacky, but if we decide to have more than boot/dvd iso's,
# we need to make sure volname matches the initial lorax image,
# which the volid contains "dvd". AKA, file name doesn't always
# equate to volume ID
if 'volname' in self.iso_map['images'][y]:
volname = self.iso_map['images'][y]['volname']
else:
volname = y
for a in arches_to_build:
lorax_path = os.path.join(self.lorax_work_dir, a, 'lorax', '.treeinfo')
image_path = os.path.join(self.lorax_work_dir, a, y, '.treeinfo')
if not os.path.exists(image_path):
self.log.error(Color.FAIL + 'Lorax data not found for ' + y + '. Skipping.')
if not os.path.exists(lorax_path):
self.log.error(Color.FAIL + 'Lorax not found at all. This is considered fatal.')
raise SystemExit()
grafts = self._generate_graft_points(
a,
y,
self.iso_map['images'][y]['repos'],
)
2022-07-14 03:01:23 +00:00
self._extra_iso_local_config(a, y, grafts, work_root, volname)
if self.extra_iso_mode == 'local':
self._extra_iso_local_run(a, y, work_root)
elif self.extra_iso_mode == 'podman':
continue
else:
2022-07-11 08:06:26 +00:00
self.log.error(Color.FAIL + 'Mode specified is not valid.')
raise SystemExit()
if self.extra_iso_mode == 'podman':
self._extra_iso_podman_run(arches_to_build, images_to_build, work_root)
2022-07-14 03:01:23 +00:00
def _extra_iso_local_config(self, arch, image, grafts, work_root, volname):
"""
Local ISO build configuration - This generates the configuration for
both mock and podman entries
"""
self.log.info('Generating Extra ISO configuration and script')
entries_dir = os.path.join(work_root, "entries")
boot_iso = os.path.join(work_root, "lorax", arch, "lorax/images/boot.iso")
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('extraisobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildExtraImage.tmpl.sh')
xorriso_template = self.tmplenv.get_template('xorriso.tmpl.txt')
mock_iso_path = '/var/tmp/lorax-{}.cfg'.format(self.major_version)
mock_sh_path = '{}/extraisobuild-{}-{}.sh'.format(entries_dir, arch, image)
iso_template_path = '{}/buildExtraImage-{}-{}.sh'.format(entries_dir, arch, image)
xorriso_template_path = '{}/xorriso-{}-{}.txt'.format(entries_dir, arch, image)
2022-06-27 07:26:42 +00:00
log_root = os.path.join(
work_root,
"logs",
self.date_stamp
)
if not os.path.exists(log_root):
os.makedirs(log_root, exist_ok=True)
2022-06-27 07:40:28 +00:00
log_path_command = '| tee -a {}/{}-{}.log'.format(log_root, arch, image)
2022-06-27 07:26:42 +00:00
2022-06-27 05:45:31 +00:00
# This is kind of a hack. Installing xorrisofs sets the alternatives to
# it, so backwards compatibility is sort of guaranteed. But we want to
# emulate as close as possible to what pungi does, so unless we
# explicitly ask for xorr (in el8 and 9), we should NOT be using it.
# For RLN and el10, we'll use xorr all the way through. When 8 is no
# longer getting ISO's, we'll remove this section.
required_pkgs = self.required_pkgs.copy()
if self.iso_map['xorrisofs']:
if 'genisoimage' in required_pkgs and 'xorriso' not in required_pkgs:
required_pkgs.append('xorriso')
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
datestamp = ''
if self.updated_image:
datestamp = '-' + self.updated_image_date.copy()
volid = '{}-{}-{}{}-{}-{}'.format(
2022-06-23 22:29:22 +00:00
self.shortname,
self.major_version,
self.minor_version,
rclevel,
2022-06-23 22:29:22 +00:00
arch,
2022-07-14 03:01:23 +00:00
volname
2022-06-23 22:29:22 +00:00
)
isoname = '{}-{}.{}{}-{}-{}.iso'.format(
2022-06-23 22:29:22 +00:00
self.shortname,
self.major_version,
self.minor_version,
rclevel,
2022-06-23 22:29:22 +00:00
arch,
image
)
2022-06-27 07:40:28 +00:00
lorax_pkg_cmd = '/usr/bin/dnf install {} -y {}'.format(
' '.join(required_pkgs),
log_path_command
2022-06-23 22:29:22 +00:00
)
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
fullname=self.fullname,
shortname=self.shortname,
2022-06-27 05:45:31 +00:00
required_pkgs=required_pkgs,
dist=self.disttag,
repos=self.repolist,
user_agent='{{ user_agent }}',
compose_dir_is_here=True,
compose_dir=self.compose_root,
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
2022-06-23 22:29:22 +00:00
isoname=isoname,
entries_dir=entries_dir,
2022-06-27 05:05:24 +00:00
image=image,
)
2022-06-23 22:29:22 +00:00
opts = {
'arch': arch,
'iso_name': isoname,
'volid': volid,
'graft_points': grafts,
'use_xorrisofs': self.iso_map['xorrisofs'],
'iso_level': self.iso_map['iso_level'],
}
2022-06-27 08:13:12 +00:00
if opts['use_xorrisofs']:
# Generate a xorriso compatible dialog
xp = open(grafts)
xorpoint = xp.read()
xp.close()
xorriso_template_output = xorriso_template.render(
boot_iso=boot_iso,
isoname=isoname,
volid=volid,
graft=xorpoint,
arch=arch,
2022-06-27 08:13:12 +00:00
)
xorriso_template_entry = open(xorriso_template_path, "w+")
xorriso_template_entry.write(xorriso_template_output)
xorriso_template_entry.close()
opts['graft_points'] = xorriso_template_path
make_image = '{} {}'.format(
Shared.get_make_image_cmd(
opts,
self.hfs_compat
),
log_path_command
)
isohybrid = Shared.get_isohybrid_cmd(opts)
implantmd5 = Shared.get_implantisomd5_cmd(opts)
make_manifest = Shared.get_manifest_cmd(opts)
2022-06-23 22:29:22 +00:00
iso_template_output = iso_template.render(
extra_iso_mode=self.extra_iso_mode,
2022-06-23 22:29:22 +00:00
arch=arch,
compose_work_iso_dir=self.iso_work_dir,
make_image=make_image,
isohybrid=isohybrid,
implantmd5=implantmd5,
make_manifest=make_manifest,
lorax_pkg_cmd=lorax_pkg_cmd,
isoname=isoname,
2022-06-23 22:29:22 +00:00
)
mock_iso_entry = open(mock_iso_path, "w+")
mock_iso_entry.write(mock_iso_template_output)
mock_iso_entry.close()
mock_sh_entry = open(mock_sh_path, "w+")
mock_sh_entry.write(mock_sh_template_output)
mock_sh_entry.close()
iso_template_entry = open(iso_template_path, "w+")
iso_template_entry.write(iso_template_output)
iso_template_entry.close()
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def _extra_iso_local_run(self, arch, image, work_root):
"""
Runs the actual local process using mock. This is for running in
peridot or running on a machine that does not have podman, but does
have mock available.
"""
entries_dir = os.path.join(work_root, "entries")
extra_iso_cmd = '/bin/bash {}/extraisobuild-{}-{}.sh'.format(entries_dir, arch, image)
self.log.info('Starting mock build...')
p = subprocess.call(shlex.split(extra_iso_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
# Copy it if the compose dir is here?
def _extra_iso_podman_run(self, arches, images, work_root):
"""
Does all the image building in podman containers to parallelize the
builds. This is a case where you can call this instead of looping mock,
or not run it in peridot. This gives the Release Engineer a little more
flexibility if they care enough.
This honestly assumes you are running this on a machine that has access
to the compose directories. It's the same as if you were doing a
reposync of the repositories.
"""
2022-07-03 07:19:13 +00:00
cmd = Shared.podman_cmd(self.log)
entries_dir = os.path.join(work_root, "entries")
isos_dir = os.path.join(work_root, "isos")
2022-06-27 06:51:08 +00:00
bad_exit_list = []
checksum_list = []
for i in images:
entry_name_list = []
image_name = i
arch_sync = arches.copy()
for a in arch_sync:
entry_name = 'buildExtraImage-{}-{}.sh'.format(a, i)
entry_name_list.append(entry_name)
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
isoname = '{}/{}-{}.{}{}-{}-{}.iso'.format(
a,
self.shortname,
self.major_version,
self.minor_version,
rclevel,
a,
i
)
checksum_list.append(isoname)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info(Color.INFO + 'Building ' + i + ' ...')
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' not in output.decode():
self.log.error(Color.FAIL + pod)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
for p in checksum_list:
path = os.path.join(isos_dir, p)
if os.path.exists(path):
self.log.info(Color.INFO + 'Performing checksum for ' + p)
checksum = Shared.get_checksum(path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?')
with open(path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
self.log.info(Color.INFO + 'Building ' + i + ' completed')
if len(bad_exit_list) == 0:
2022-07-11 22:12:41 +00:00
self.log.info(Color.INFO + 'Images built successfully.')
else:
self.log.error(
Color.FAIL +
'There were issues with the work done. As a result, ' +
2022-07-11 22:12:41 +00:00
'some/all ISOs may not exist.'
)
2022-06-13 14:37:50 +00:00
def _generate_graft_points(
self,
arch,
iso,
variants,
):
2022-06-13 14:37:50 +00:00
"""
2022-06-15 20:53:12 +00:00
Get a list of packages for an extras ISO. This should NOT be called
during the usual run() section.
2022-06-13 14:37:50 +00:00
"""
lorax_base_dir = os.path.join(self.lorax_work_dir, arch)
global_work_dir = os.path.join(self.compose_latest_dir, "work/global")
self.log.info(Color.INFO + 'Generating graft points for extra iso: (' + arch + ') ' + iso)
files = {}
# This is the data we need to actually boot
lorax_for_var = os.path.join(lorax_base_dir, iso)
if not os.path.exists(lorax_for_var + '/.treeinfo'):
self.log.info(
Color.FAIL +
'!! .treeinfo is missing, does this variant actually exist? !!'
)
return
# extra files
extra_files_for_var = os.path.join(
global_work_dir,
"extra-files"
)
# actually get the boot data
files = self._get_grafts([lorax_for_var, extra_files_for_var])
# This is to get all the packages for each repo
for repo in variants:
pkg_for_var = os.path.join(
self.compose_latest_sync,
repo,
arch,
self.structure['packages']
)
rd_for_var = os.path.join(
self.compose_latest_sync,
repo,
arch,
self.structure['repodata']
)
for k, v in self._get_grafts([pkg_for_var]).items():
files[os.path.join(repo, "Packages", k)] = v
for k, v in self._get_grafts([rd_for_var]).items():
files[os.path.join(repo, "repodata", k)] = v
grafts = '{}/{}-{}-grafts'.format(
lorax_base_dir,
iso,
arch
)
xorrs = '{}/xorriso-{}.txt'.format(
lorax_base_dir,
arch
)
self._write_grafts(
grafts,
xorrs,
files,
exclude=["*/lost+found", "*/boot.iso"]
)
if self.iso_map['xorrisofs']:
grafters = xorrs
else:
grafters = grafts
return grafters
2022-06-22 00:21:37 +00:00
def _get_grafts(self, paths, exclusive_paths=None, exclude=None):
2022-06-22 00:21:37 +00:00
"""
Actually get some grafts (get_iso_contents), called by generate grafts
"""
result = {}
exclude = exclude or []
exclusive_paths = exclusive_paths or []
for p in paths:
if isinstance(p, dict):
tree = p
else:
tree = self._scanning(p)
result = self._merging(result, tree)
for p in exclusive_paths:
tree = self._scanning(p)
result = self._merging(result, tree, exclusive=True)
# Resolves possible symlinks
for key in result.keys():
path = result[key]
if os.path.islink(path):
real_path = os.readlink(path)
abspath = os.path.normpath(os.path.join(os.path.dirname(path), real_path))
if not abspath.startswith(self.compose_base):
result[key] = abspath
return result
2022-06-22 00:21:37 +00:00
def _write_grafts(self, filepath, xorrspath, u, exclude=None):
2022-06-22 00:21:37 +00:00
"""
Write out the graft points
2022-06-22 00:21:37 +00:00
"""
seen = set()
exclude = exclude or []
result = {}
for zl in sorted(u, reverse=True):
dirn = os.path.dirname(zl)
if not zl.endswith("/"):
result[zl] = u[zl]
seen.add(dirn)
continue
found = False
for j in seen:
if j.startswith(dirn):
found = True
break
if not found:
result[zl] = u[zl]
seen.add(dirn)
if self.iso_map['xorrisofs']:
fx = open(xorrspath, "w")
for zm in sorted(result, key=self._sorting):
found = False
for excl in exclude:
if fnmatch(zm, excl):
found = True
break
if found:
continue
fx.write("-map %s %s\n" % (u[zm], zm))
fx.close()
else:
fh = open(filepath, "w")
for zl in sorted(result, key=self._sorting):
found = False
for excl in exclude:
if fnmatch(zl, excl):
found = True
break
if found:
continue
fh.write("%s=%s\n" % (zl, u[zl]))
fh.close()
2022-06-22 00:21:37 +00:00
def _scanning(self, p):
2022-06-22 00:21:37 +00:00
"""
Scan tree
"""
path = os.path.abspath(p)
result = {}
for root, dirs, files in os.walk(path):
for file in files:
abspath = os.path.join(root, file)
relpath = kobo.shortcuts.relative_path(abspath, path.rstrip("/") + "/")
result[relpath] = abspath
# Include empty directories too
if root != path:
abspath = os.path.join(root, "")
relpath = kobo.shortcuts.relative_path(abspath, path.rstrip("/") + "/")
result[relpath] = abspath
return result
2022-06-22 00:21:37 +00:00
def _merging(self, tree_a, tree_b, exclusive=False):
2022-06-22 00:21:37 +00:00
"""
Merge tree
"""
result = tree_b.copy()
all_dirs = set(
[os.path.dirname(dirn).rstrip("/") for dirn in result if os.path.dirname(dirn) != ""]
)
for dirn in tree_a:
dn = os.path.dirname(dirn)
if exclusive:
match = False
for x in all_dirs:
if dn == x or dn.startswith("%s/" % x):
match = True
break
if match:
continue
if dirn in result:
continue
2022-06-22 00:21:37 +00:00
result[dirn] = tree_a[dirn]
return result
def _sorting(self, k):
2022-06-22 00:21:37 +00:00
"""
Sorting using the is_rpm and is_image funcs. Images are first, extras
next, rpm's last.
2022-06-22 00:21:37 +00:00
"""
rolling = (0 if self._is_image(k) else 2 if self._is_rpm(k) else 1, k)
return rolling
2022-06-22 00:21:37 +00:00
def _is_rpm(self, k):
2022-06-22 00:21:37 +00:00
"""
Is this an RPM? :o
"""
result = k.endswith(".rpm")
return result
2022-06-22 00:21:37 +00:00
def _is_image(self, k):
2022-06-22 00:21:37 +00:00
"""
Is this an image? :o
"""
if (
k.startswith("images/") or
k.startswith("isolinux/") or
k.startswith("EFI/") or
k.startswith("etc/") or
k.startswith("ppc/")
):
return True
if (
k.endswith(".img") or
k.endswith(".ins")
):
return True
return False
2022-06-22 00:21:37 +00:00
def _get_vol_id(self):
"""
Gets a volume ID
"""
def run_pull_generic_images(self):
"""
Pulls generic images built in peridot and places them where they need
to be. This relies on a list called "cloudimages" in the version
configuration.
"""
unpack_single_arch = False
arches_to_unpack = self.arches
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
2022-07-07 06:23:07 +00:00
for imagename in self.cloudimages['images']:
self.log.info(Color.INFO + 'Determining the latest images for ' + imagename + ' ...')
formattype = self.cloudimages['images'][imagename]['format']
if self.s3:
latest_artifacts = Shared.s3_determine_latest(
self.s3_bucket,
self.release,
arches_to_unpack,
formattype,
imagename,
self.log
)
else:
latest_artifacts = Shared.reqs_determine_latest(
self.s3_bucket_url,
self.release,
arches_to_unpack,
formattype,
imagename,
self.log
)
if not len(latest_artifacts) > 0:
self.log.warn(Color.WARN + 'No images found.')
continue
self.log.info(Color.INFO + 'Attempting to download requested artifacts')
for arch in arches_to_unpack:
image_arch_dir = os.path.join(
self.image_work_dir,
arch
)
if arch not in latest_artifacts.keys():
self.log.warn(Color.WARN + 'Artifact for ' + imagename +
' ' + arch + ' (' + formattype + ') does not exist.')
continue
source_path = latest_artifacts[arch]
drop_name = source_path.split('/')[-1]
checksum_name = drop_name + '.CHECKSUM'
full_drop = '{}/{}'.format(
image_arch_dir,
drop_name
)
checksum_drop = '{}/{}.CHECKSUM'.format(
image_arch_dir,
drop_name
)
if not os.path.exists(image_arch_dir):
os.makedirs(image_arch_dir, exist_ok=True)
self.log.info('Downloading artifact for ' + Color.BOLD + arch + Color.END)
if self.s3:
Shared.s3_download_artifacts(
self.force_download,
self.s3_bucket,
source_path,
full_drop,
self.log
)
else:
Shared.reqs_download_artifacts(
self.force_download,
self.s3_bucket_url,
source_path,
full_drop,
self.log
)
self.log.info('Creating checksum ...')
checksum = Shared.get_checksum(full_drop, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + full_drop + ' not found! Are you sure we copied it?')
2022-07-07 06:23:07 +00:00
continue
with open(checksum_drop, 'w+') as c:
c.write(checksum)
c.close()
2022-07-07 06:23:07 +00:00
2022-07-11 08:06:26 +00:00
self.log.info('Creating a symlink to latest image...')
latest_name = '{}/{}-{}-{}.latest.{}.{}'.format(
image_arch_dir,
self.shortname,
self.major_version,
imagename,
arch,
formattype
)
latest_path = latest_name.split('/')[-1]
latest_checksum = '{}/{}-{}-{}.latest.{}.{}.CHECKSUM'.format(
image_arch_dir,
self.shortname,
self.major_version,
imagename,
arch,
formattype
)
2022-07-11 08:06:26 +00:00
# For some reason python doesn't have a "yeah just change this
# link" part of the function
if os.path.exists(latest_name):
os.remove(latest_name)
os.symlink(drop_name, latest_name)
self.log.info('Creating checksum for latest symlinked image...')
shutil.copy2(checksum_drop, latest_checksum)
with open(latest_checksum, 'r') as link:
checkdata = link.read()
checkdata = checkdata.replace(drop_name, latest_path)
with open(latest_checksum, 'w+') as link:
link.write(checkdata)
link.close()
self.log.info(Color.INFO + 'Image download phase completed')
class LiveBuild:
"""
This helps us build the live images for Rocky Linux. The mode is "simple"
by default when using mock.
"""
def __init__(
self,
rlvars,
config,
major,
hfs_compat: bool = False,
force_download: bool = False,
isolation: str = 'simple',
live_iso_mode: str = 'local',
compose_dir_is_here: bool = False,
2022-07-11 08:06:26 +00:00
hashed: bool = False,
image=None,
2022-07-11 22:12:41 +00:00
justcopyit: bool = False,
2022-07-11 23:01:19 +00:00
force_build: bool = False,
logger=None
):
self.image = image
2022-07-11 22:12:41 +00:00
self.justcopyit = justcopyit
self.fullname = rlvars['fullname']
self.distname = config['distname']
self.shortname = config['shortname']
self.current_arch = config['arch']
# Relevant config items
self.major_version = major
self.compose_dir_is_here = compose_dir_is_here
self.date_stamp = config['date_stamp']
2022-07-11 08:06:26 +00:00
self.date = time.strftime("%Y%m%d", time.localtime())
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
self.current_arch = config['arch']
self.livemap = rlvars['livemap']
self.required_pkgs = rlvars['livemap']['required_pkgs']
self.mock_work_root = config['mock_work_root']
self.live_result_root = config['mock_work_root'] + "/lmc"
self.mock_isolation = isolation
self.force_download = force_download
2022-07-11 23:01:19 +00:00
self.force_build = force_build
self.live_iso_mode = live_iso_mode
self.checksum = rlvars['checksum']
self.profile = rlvars['profile']
2022-07-11 08:06:26 +00:00
self.hashed = hashed
# Relevant major version items
self.arch = config['arch']
self.arches = rlvars['allowed_arches']
self.release = rlvars['revision']
self.minor_version = rlvars['minor']
self.revision = rlvars['revision'] + "-" + rlvars['rclvl']
self.rclvl = rlvars['rclvl']
2022-07-11 08:06:26 +00:00
self.disttag = config['dist']
self.repos = rlvars['iso_map']['lorax']['repos']
self.repo_base_url = config['repo_base_url']
self.project_id = rlvars['project_id']
self.structure = rlvars['structure']
self.bugurl = rlvars['bugurl']
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
# Templates
file_loader = FileSystemLoader(f"{_rootdir}/templates")
self.tmplenv = Environment(loader=file_loader)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
"latest-{}-{}".format(
self.shortname,
self.profile
)
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
self.live_work_dir = os.path.join(
self.compose_latest_dir,
"work/live"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("iso")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('live build init')
2022-07-11 08:06:26 +00:00
self.repolist = Shared.build_repo_list(
self.repo_base_url,
self.repos,
self.project_id,
self.current_arch,
self.compose_latest_sync,
self.compose_dir_is_here,
self.hashed
)
self.log.info(self.revision)
2022-07-11 22:12:41 +00:00
if not os.path.exists(self.compose_latest_dir):
self.log.warn(Color.WARN + 'A compose directory was not found ' +
'here. If there is a failure, it may be due to it ' +
'missing. You may want to generate a fake compose if ' +
'you are simply making your own live images and you run ' +
'into any errors beyond this point.'
)
def run_build_live_iso(self):
"""
Builds DVD images based on the data created from the initial lorax on
each arch. This should NOT be called during the usual run() section.
"""
sync_root = self.compose_latest_sync
self.log.info(Color.INFO + 'Starting Live ISOs phase')
# Check that the arch we're assigned is valid...
if self.current_arch not in self.livemap['allowed_arches']:
self.log.error(Color.FAIL + 'Running an unsupported architecture.')
raise SystemExit()
2022-07-06 14:44:37 +00:00
self._live_iso_build_wrap()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('Live ISO result directory: %s/$arch' % self.live_work_dir)
self.log.info(Color.INFO + 'Live ISO phase completed.')
def _live_iso_build_wrap(self):
"""
Prepare and actually build the live images. Based on arguments in self,
we'll either do it on mock in a loop or in podman, just like with the
extra iso phase.
"""
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
images_to_build = list(self.livemap['ksentry'].keys())
if self.image:
images_to_build = [self.image]
self.log.info(
Color.INFO + 'We are planning to build: ' +
', '.join(images_to_build)
)
2022-07-11 08:06:26 +00:00
for i in images_to_build:
self._live_iso_local_config(i, work_root)
if self.live_iso_mode == 'local':
self._live_iso_local_run(self.current_arch, i, work_root)
elif self.live_iso_mode == 'podman':
continue
else:
self.log.error(Color.FAIL + 'Mode specified is not valid.')
raise SystemExit()
if self.live_iso_mode == 'podman':
2022-07-11 22:12:41 +00:00
#self._live_iso_podman_run(self.current_arch, images_to_build, work_root)
self.log.error(Color.FAIL + 'At this time, live images cannot be ' +
'built in podman.')
raise SystemExit()
2022-07-11 08:06:26 +00:00
def _live_iso_local_config(self, image, work_root):
"""
Live ISO build configuration - This generates both mock and podman
entries, regardless of which one is being used.
"""
self.log.info('Generating Live ISO configuration and script')
2022-07-11 08:06:26 +00:00
entries_dir = os.path.join(work_root, "entries")
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('liveisobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildLiveImage.tmpl.sh')
mock_iso_path = '/var/tmp/live-{}.cfg'.format(self.major_version)
mock_sh_path = '{}/liveisobuild-{}-{}.sh'.format(
entries_dir,
self.current_arch,
image
)
iso_template_path = '{}/buildLiveImage-{}-{}.sh'.format(
entries_dir,
self.current_arch,
image
)
log_root = os.path.join(
work_root,
"logs",
self.date_stamp
)
ks_start = self.livemap['ksentry'][image]
if not os.path.exists(log_root):
os.makedirs(log_root, exist_ok=True)
log_path_command = '| tee -a {}/{}-{}.log'.format(
log_root,
self.current_arch,
image
)
required_pkgs = self.livemap['required_pkgs']
2022-07-30 04:58:10 +00:00
volid = '{}-{}-{}-{}'.format(
2022-07-11 08:06:26 +00:00
self.shortname,
2022-07-30 04:58:10 +00:00
self.major_version,
self.minor_version,
image
2022-07-11 08:06:26 +00:00
)
isoname = '{}-{}-{}-{}-{}.iso'.format(
self.shortname,
self.release,
2022-07-30 04:58:10 +00:00
image,
2022-07-11 08:06:26 +00:00
self.current_arch,
self.date
)
live_pkg_cmd = '/usr/bin/dnf install {} -y {}'.format(
' '.join(required_pkgs),
log_path_command
)
git_clone_cmd = '/usr/bin/git clone {} -b {} /builddir/ks {}'.format(
self.livemap['git_repo'],
self.livemap['branch'],
log_path_command
)
make_image_cmd = ('/usr/sbin/livemedia-creator --ks {} --no-virt '
2022-07-12 02:26:13 +00:00
'--resultdir /builddir/lmc --project="{} {}" --make-iso --volid {} '
2022-07-11 08:06:26 +00:00
'--iso-only --iso-name {} --releasever={} --nomacboot {}').format(
'/builddir/ks.cfg',
self.distname,
2022-07-12 02:26:13 +00:00
image,
2022-07-11 08:06:26 +00:00
volid,
isoname,
self.release,
log_path_command
)
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
fullname=self.fullname,
shortname=self.shortname,
required_pkgs=required_pkgs,
dist=self.disttag,
repos=self.repolist,
compose_dir_is_here=True,
user_agent='{{ user_agent }}',
compose_dir=self.compose_root,
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
isoname=isoname,
entries_dir=entries_dir,
image=image,
)
iso_template_output = iso_template.render(
live_iso_mode=self.live_iso_mode,
arch=self.current_arch,
compose_live_work_dir=self.live_work_dir,
make_image=make_image_cmd,
live_pkg_cmd=live_pkg_cmd,
isoname=isoname,
major=self.major_version,
git_clone=git_clone_cmd,
ks_file=ks_start,
)
with open(mock_iso_path, "w+") as mip:
mip.write(mock_iso_template_output)
mip.close()
with open(mock_sh_path, "w+") as msp:
msp.write(mock_sh_template_output)
msp.close()
with open(iso_template_path, "w+") as itp:
itp.write(iso_template_output)
itp.close()
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def _live_iso_podman_run(self, arch, images, work_root):
"""
Does all the image building in podman containers to parallelize the
process. This is a case where you can call this instead of looping mock
or not run in peridot. This gives the Release Engineer a little more
flexibility if they care enough.
This honestly assumes you are running this on a machine that has access
to the compose directories. It's the same as if you were doing a
reposync of the repositories.
"""
cmd = Shared.podman_cmd(self.log)
entries_dir = os.path.join(work_root, "entries")
isos_dir = self.live_work_dir
bad_exit_list = []
checksum_list = []
2022-07-11 08:06:26 +00:00
entry_name_list = []
2022-07-11 22:12:41 +00:00
self.log.warn(Color.WARN + 'This mode does not work properly. It will fail.')
for i in images:
image_name = i
2022-07-11 08:06:26 +00:00
entry_name = 'buildLiveImage-{}-{}.sh'.format(arch, i)
entry_name_list.append(entry_name)
isoname = '{}/{}-{}-{}-{}-{}.iso'.format(
arch,
self.shortname,
i,
self.major_version,
arch,
self.date
)
checksum_list.append(isoname)
print(entry_name_list, cmd, entries_dir)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info(Color.INFO + 'Building requested live images ...')
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' not in output.decode():
self.log.error(Color.FAIL + pod)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
for p in checksum_list:
path = os.path.join(isos_dir, p)
if os.path.exists(path):
self.log.info(Color.INFO + 'Performing checksum for ' + p)
checksum = Shared.get_checksum(path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?')
with open(path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
self.log.info(Color.INFO + 'Building live images completed')
if len(bad_exit_list) == 0:
2022-07-11 22:12:41 +00:00
self.log.info(Color.INFO + 'Live images completed successfully.')
2022-07-11 08:06:26 +00:00
else:
self.log.error(
Color.FAIL +
'There were issues with the work done. As a result, ' +
2022-07-11 22:12:41 +00:00
'some or all ISOs may not be copied later.'
2022-07-11 08:06:26 +00:00
)
def _live_iso_local_run(self, arch, image, work_root):
"""
Runs the actual local process using mock. This is for running in
peridot or running on a machine that does not have podman, but does
have mock available.
"""
entries_dir = os.path.join(work_root, "entries")
2022-07-11 22:12:41 +00:00
live_dir_arch = os.path.join(self.live_work_dir, arch)
isoname = '{}-{}-{}-{}-{}.iso'.format(
self.shortname,
self.release,
2022-07-30 04:58:10 +00:00
image,
2022-07-11 22:12:41 +00:00
arch,
self.date
)
live_res_dir = '/var/lib/mock/{}-{}-{}/result'.format(
self.shortname.lower(),
2022-07-11 22:12:41 +00:00
self.major_version,
arch
)
2022-07-11 23:01:19 +00:00
if self.justcopyit:
2022-07-11 23:53:20 +00:00
if os.path.exists(os.path.join(live_dir_arch, isoname)):
2022-07-11 23:01:19 +00:00
self.log.warn(Color.WARN + 'Image already exists.')
if self.force_build:
self.log.warn(Color.WARN + 'Building anyway.')
else:
self.log.warn(Color.WARN + 'Skipping.')
return
live_iso_cmd = '/bin/bash {}/liveisobuild-{}-{}.sh'.format(entries_dir, arch, image)
self.log.info('Starting mock build...')
p = subprocess.call(shlex.split(live_iso_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
2022-07-11 08:06:26 +00:00
self.log.warn(
Color.WARN + 'This is meant for builds done in peridot or ' +
'locally for an end user.'
)
self.log.warn(
Color.WARN +
2022-07-11 22:12:41 +00:00
'If you are looping images, your built image may get ' +
'overwritten. Ensure you have justcopyit enabled to avoid this.'
)
if self.justcopyit:
self.log.info(Color.INFO + 'Copying image to work directory')
source_path = os.path.join(live_res_dir, isoname)
dest_path = os.path.join(live_dir_arch, isoname)
os.makedirs(live_dir_arch, exist_ok=True)
shutil.copy2(source_path, dest_path)
self.log.info(Color.INFO + 'Generating checksum')
checksum = Shared.get_checksum(dest_path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + dest_path + ' not found. Did we copy it?')
return
with open(dest_path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()