toolkit/iso/empanadas/empanadas/util/iso_utils.py

2093 lines
74 KiB
Python

"""
Builds ISO's for Rocky Linux.
Louis Abel <label AT rockylinux.org>
"""
import logging
import sys
import os
import os.path
import subprocess
import shlex
import time
import tarfile
import shutil
# lazy person's s3 parser
#import requests
#import json
#import xmltodict
# if we can access s3
#import boto3
# relative_path, compute_file_checksums
import kobo.shortcuts
from fnmatch import fnmatch
# This is for treeinfo
from configparser import ConfigParser
from productmd.common import SortedConfigParser
from productmd.images import Image
from productmd.extra_files import ExtraFiles
import productmd.treeinfo
# End treeinfo
from jinja2 import Environment, FileSystemLoader
from empanadas.common import Color, _rootdir
from empanadas.util import Shared, ArchCheck
class IsoBuild:
"""
This helps us build the generic ISO's for a Rocky Linux release. In
particular, this is for the boot images.
There are functions to build the DVD (and potentially other) images. Each
particular build or process starts with "run" in their name.
"""
def __init__(
self,
rlvars,
config,
major,
arch=None,
hfs_compat: bool = False,
rc: bool = False,
s3: bool = False,
force_download: bool = False,
force_unpack: bool = False,
isolation: str = 'auto',
extra_iso=None,
extra_iso_mode: str = 'local',
compose_dir_is_here: bool = False,
hashed: bool = False,
updated_image: bool = False,
image_increment: str = '0',
image=None,
logger=None
):
self.image = image
self.fullname = rlvars['fullname']
self.distname = config['distname']
self.shortname = config['shortname']
# Relevant config items
self.major_version = major
self.compose_dir_is_here = compose_dir_is_here
self.disttag = rlvars['disttag']
self.date_stamp = config['date_stamp']
self.timestamp = time.time()
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
self.current_arch = config['arch']
self.required_pkgs = rlvars['iso_map']['lorax']['required_pkgs']
self.mock_work_root = config['mock_work_root']
self.lorax_result_root = config['mock_work_root'] + "/" + "lorax"
self.mock_isolation = isolation
self.iso_map = rlvars['iso_map']
#self.livemap = rlvars['livemap']
self.cloudimages = rlvars['cloudimages']
self.release_candidate = rc
self.s3 = s3
self.force_unpack = force_unpack
self.force_download = force_download
self.extra_iso = extra_iso
self.extra_iso_mode = extra_iso_mode
self.checksum = rlvars['checksum']
self.profile = rlvars['profile']
self.hashed = hashed
self.updated_image = updated_image
self.updated_image_increment = "." + image_increment
self.updated_image_date = (time.strftime("%Y%m%d", time.localtime())
+ self.updated_image_increment)
# Relevant major version items
self.arch = arch
self.arches = rlvars['allowed_arches']
self.release = rlvars['revision']
self.minor_version = rlvars['minor']
self.revision_level = rlvars['revision'] + "-" + rlvars['rclvl']
self.revision = rlvars['revision']
self.rclvl = rlvars['rclvl']
self.repos = rlvars['iso_map']['lorax']['repos']
self.repo_base_url = config['repo_base_url']
self.project_id = rlvars['project_id']
self.structure = rlvars['structure']
self.bugurl = rlvars['bugurl']
self.extra_files = rlvars['extra_files']
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
# all bucket related info
self.s3_region = config['aws_region']
self.s3_bucket = config['bucket']
self.s3_bucket_url = config['bucket_url']
#if s3:
# self.s3 = boto3.client('s3')
# arch specific
self.hfs_compat = hfs_compat
# Templates
file_loader = FileSystemLoader(f"{_rootdir}/templates")
self.tmplenv = Environment(loader=file_loader)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
"latest-{}-{}".format(
self.shortname,
self.profile
)
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
self.iso_work_dir = os.path.join(
self.compose_latest_dir,
"work/isos"
)
self.live_work_dir = os.path.join(
self.compose_latest_dir,
"work/live"
)
self.image_work_dir = os.path.join(
self.compose_latest_dir,
"work/images"
)
self.lorax_work_dir = os.path.join(
self.compose_latest_dir,
"work/lorax"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("iso")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('iso build init')
self.repolist = Shared.build_repo_list(
self.repo_base_url,
self.repos,
self.project_id,
self.current_arch,
self.compose_latest_sync,
self.compose_dir_is_here,
self.hashed
)
self.log.info(self.revision_level)
def run(self):
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
sync_root = self.compose_latest_sync
log_root = os.path.join(
work_root,
"logs"
)
self.iso_build()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('ISO Build Logs: /var/lib/mock/{}-{}-{}/result'.format(
self.shortname.lower(), self.major_version, self.current_arch)
)
self.log.info('ISO Build completed.')
def iso_build(self):
"""
This does the general ISO building for the current running
architecture. This generates the mock config and the general script
needed to get this part running.
"""
# Check for local build, build accordingly
# Check for arch specific build, build accordingly
# local AND arch cannot be used together, local supersedes. print
# warning.
self.generate_iso_scripts()
self.run_lorax()
def generate_iso_scripts(self):
"""
Generates the scripts needed to be ran to run lorax in mock as well as
package up the results.
"""
self.log.info('Generating ISO configuration and scripts')
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('isobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildImage.tmpl.sh')
mock_iso_path = '/var/tmp/lorax-' + self.major_version + '.cfg'
mock_sh_path = '/var/tmp/isobuild.sh'
iso_template_path = '/var/tmp/buildImage.sh'
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
# This is kind of a hack. Installing xorrisofs sets the alternatives to
# it, so backwards compatibility is sort of guaranteed. But we want to
# emulate as close as possible to what pungi does, so unless we
# explicitly ask for xorr (in el8 and 9), we should NOT be using it.
# For RLN and el10, we'll use xorr all the way through. When 8 is no
# longer getting ISO's, we'll remove this section.
required_pkgs = self.required_pkgs.copy()
if self.iso_map['xorrisofs']:
if 'genisoimage' in required_pkgs and 'xorriso' not in required_pkgs:
required_pkgs.append('xorriso')
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
fullname=self.fullname,
shortname=self.shortname,
required_pkgs=required_pkgs,
dist=self.disttag,
repos=self.repolist,
user_agent='{{ user_agent }}',
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
revision=self.release,
)
iso_template_output = iso_template.render(
arch=self.current_arch,
major=self.major_version,
minor=self.minor_version,
shortname=self.shortname,
repos=self.repolist,
variant=self.iso_map['lorax']['variant'],
lorax=self.iso_map['lorax']['lorax_removes'],
distname=self.distname,
revision=self.release,
rc=rclevel,
builddir=self.mock_work_root,
lorax_work_root=self.lorax_result_root,
bugurl=self.bugurl,
)
mock_iso_entry = open(mock_iso_path, "w+")
mock_iso_entry.write(mock_iso_template_output)
mock_iso_entry.close()
mock_sh_entry = open(mock_sh_path, "w+")
mock_sh_entry.write(mock_sh_template_output)
mock_sh_entry.close()
iso_template_entry = open(iso_template_path, "w+")
iso_template_entry.write(iso_template_output)
iso_template_entry.close()
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def run_lorax(self):
"""
This actually runs lorax on this system. It will call the right scripts
to do so.
"""
lorax_cmd = '/bin/bash /var/tmp/isobuild.sh'
self.log.info('Starting lorax...')
p = subprocess.call(shlex.split(lorax_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
def run_pull_lorax_artifacts(self):
"""
Pulls the required artifacts and unpacks it to work/lorax/$arch
"""
# Determine if we're only managing one architecture out of all of them.
# It does not hurt to do everything at once. But the option is there.
unpack_single_arch = False
arches_to_unpack = self.arches
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
self.log.info(Color.INFO + 'Determining the latest pulls...')
if self.s3:
latest_artifacts = Shared.s3_determine_latest(
self.s3_bucket,
self.release,
self.arches,
'tar.gz',
'lorax',
self.log
)
else:
latest_artifacts = Shared.reqs_determine_latest(
self.s3_bucket_url,
self.release,
self.arches,
'tar.gz',
'lorax',
self.log
)
self.log.info(Color.INFO + 'Downloading requested artifact(s)')
for arch in arches_to_unpack:
lorax_arch_dir = os.path.join(
self.lorax_work_dir,
arch
)
if arch not in latest_artifacts:
self.log.error(Color.FAIL + 'No lorax artifacts for ' + arch)
continue
source_path = latest_artifacts[arch]
full_drop = '{}/lorax-{}-{}.tar.gz'.format(
lorax_arch_dir,
self.release,
arch
)
if not os.path.exists(lorax_arch_dir):
os.makedirs(lorax_arch_dir, exist_ok=True)
self.log.info(
'Downloading artifact for ' + Color.BOLD + arch + Color.END
)
if self.s3:
Shared.s3_download_artifacts(
self.force_download,
self.s3_bucket,
source_path,
full_drop,
self.log
)
else:
Shared.reqs_download_artifacts(
self.force_download,
self.s3_bucket_url,
source_path,
full_drop,
self.log
)
self.log.info(Color.INFO + 'Download phase completed')
self.log.info(Color.INFO + 'Beginning unpack phase...')
for arch in arches_to_unpack:
tarname = 'lorax-{}-{}.tar.gz'.format(
self.release,
arch
)
tarball = os.path.join(
self.lorax_work_dir,
arch,
tarname
)
if not os.path.exists(tarball):
self.log.error(Color.FAIL + 'Artifact does not exist: ' + tarball)
continue
self._unpack_artifacts(self.force_unpack, arch, tarball)
self.log.info(Color.INFO + 'Unpack phase completed')
self.log.info(Color.INFO + 'Beginning image variant phase')
for arch in arches_to_unpack:
self.log.info(
'Copying base lorax for ' + Color.BOLD + arch + Color.END
)
for variant in self.iso_map['images']:
self._copy_lorax_to_variant(self.force_unpack, arch, variant)
self._copy_boot_to_work(self.force_unpack, arch)
self.log.info(Color.INFO + 'Image variant phase completed')
self.log.info(Color.INFO + 'Beginning treeinfo phase')
for arch in arches_to_unpack:
for variant in self.iso_map['images']:
self.log.info(
'Configuring treeinfo and discinfo for %s%s %s%s' % (Color.BOLD, arch, variant, Color.END)
)
self._treeinfo_wrapper(arch, variant)
# Do a dirsync for non-disc data
if not self.iso_map['images'][variant]['disc']:
self.log.info(
'Syncing repo data and images for %s%s%s' % (Color.BOLD, variant, Color.END)
)
self._copy_nondisc_to_repo(self.force_unpack, arch, variant)
def _unpack_artifacts(self, force_unpack, arch, tarball):
"""
Unpack the requested artifacts(s)
"""
unpack_dir = os.path.join(self.lorax_work_dir, arch)
if not force_unpack:
file_check = os.path.join(unpack_dir, 'lorax/.treeinfo')
if os.path.exists(file_check):
self.log.warn(Color.WARN + 'Artifact (' + arch + ') already unpacked')
return
self.log.info('Unpacking %s' % tarball)
with tarfile.open(tarball) as t:
t.extractall(unpack_dir)
t.close()
def _copy_lorax_to_variant(self, force_unpack, arch, image):
"""
Copy to variants for easy access of mkiso and copying to compose dirs
"""
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
'lorax'
)
iso_to_go = os.path.join(
self.iso_work_dir,
arch
)
if not os.path.exists(os.path.join(src_to_image, '.treeinfo')):
self.log.error(Color.FAIL + 'Lorax base image does not exist')
return
path_to_image = os.path.join(
self.lorax_work_dir,
arch,
image
)
if not force_unpack:
file_check = os.path.join(path_to_image, '.treeinfo')
if os.path.exists(file_check):
self.log.warn(Color.WARN + 'Lorax image for ' + image + ' already exists')
return
self.log.info('Copying base lorax to %s directory...' % image)
try:
shutil.copytree(src_to_image, path_to_image, copy_function=shutil.copy2, dirs_exist_ok=True)
except:
self.log.error('%s already exists??' % image)
if self.iso_map['images'][image]['disc']:
self.log.info('Removing boot.iso from %s' % image)
try:
os.remove(path_to_image + '/images/boot.iso')
os.remove(path_to_image + '/images/boot.iso.manifest')
except:
self.log.error(
'[' + Color.BOLD + Color.YELLOW + 'FAIL' + Color.END + '] ' +
'Cannot remove boot.iso'
)
def _copy_boot_to_work(self, force_unpack, arch):
src_to_image = os.path.join(self.lorax_work_dir, arch, 'lorax')
iso_to_go = os.path.join(self.iso_work_dir, arch)
path_to_src_image = os.path.join(src_to_image, 'images/boot.iso')
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
discname = '{}-{}.{}{}-{}-{}.iso'.format(
self.shortname,
self.major_version,
self.minor_version,
rclevel,
arch,
'boot'
)
isobootpath = os.path.join(iso_to_go, discname)
manifest = '{}.manifest'.format(isobootpath)
link_name = '{}-{}-boot.iso'.format(self.shortname, arch)
link_manifest = link_name + '.manifest'
isobootpath = os.path.join(iso_to_go, discname)
linkbootpath = os.path.join(iso_to_go, link_name)
manifestlink = os.path.join(iso_to_go, link_manifest)
if not force_unpack:
file_check = isobootpath
if os.path.exists(file_check):
self.log.warn(Color.WARN + 'Boot image (' + discname + ') already exists')
return
self.log.info('Copying %s boot iso to work directory...' % arch)
os.makedirs(iso_to_go, exist_ok=True)
try:
shutil.copy2(path_to_src_image, isobootpath)
if os.path.exists(linkbootpath):
os.remove(linkbootpath)
os.symlink(discname, linkbootpath)
except Exception as e:
self.log.error(Color.FAIL + 'We could not copy the image or create a symlink.')
raise SystemExit(e)
if os.path.exists(path_to_src_image + '.manifest'):
shutil.copy2(path_to_src_image + '.manifest', manifest)
os.symlink(manifest.split('/')[-1], manifestlink)
self.log.info('Creating checksum for %s boot iso...' % arch)
checksum = Shared.get_checksum(isobootpath, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + isobootpath + ' not found! Are you sure we copied it?')
return
with open(isobootpath + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
linksum = Shared.get_checksum(linkbootpath, self.checksum, self.log)
if not linksum:
self.log.error(Color.FAIL + linkbootpath + ' not found! Did we actually make the symlink?')
return
with open(linkbootpath + '.CHECKSUM', "w+") as l:
l.write(linksum)
l.close()
def _copy_nondisc_to_repo(self, force_unpack, arch, repo):
"""
Syncs data from a non-disc set of images to the appropriate repo. Repo
and image MUST match names for this to work.
"""
pathway = os.path.join(
self.compose_latest_sync,
repo,
arch,
'os'
)
kspathway = os.path.join(
self.compose_latest_sync,
repo,
arch,
'kickstart'
)
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
repo
)
if not os.path.exists(pathway):
self.log.error(Color.FAIL +
'Repo and Image variant either does NOT match or does ' +
'NOT exist. Are you sure you have synced the repository?'
)
if not force_unpack:
found_files = []
for y in ArchCheck.archfile[arch]:
imgpath = os.path.join(
pathway,
y
)
if os.path.exists(imgpath):
found_files.append(y)
if os.path.exists(pathway + '/images/boot.iso'):
found_files.append('/images/boot.iso')
if len(found_files) > 0:
self.log.warn(Color.WARN + 'Images and data for ' + repo + ' and ' + arch + ' already exists.')
return
self.log.info(Color.INFO + 'Copying images and data for ' + repo + ' ' + arch)
try:
shutil.copytree(src_to_image, pathway, copy_function=shutil.copy2, dirs_exist_ok=True)
shutil.copytree(src_to_image, kspathway, copy_function=shutil.copy2, dirs_exist_ok=True)
except:
self.log.error('%s already exists??' % repo)
def run_boot_sync(self):
"""
This unpacks into BaseOS/$arch/os, assuming there's no data actually
there. There should be checks.
1. Sync from work/lorax/$arch to work/lorax/$arch/dvd
2. Sync from work/lorax/$arch to work/lorax/$arch/minimal
3. Sync from work/lorax/$arch to BaseOS/$arch/os
4. Modify (3) .treeinfo
5. Modify (1) .treeinfo, keep out boot.iso checksum
6. Create a .treeinfo for AppStream
"""
unpack_single_arch = False
arches_to_unpack = self.arches
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
self._sync_boot(force_unpack=self.force_unpack, arch=self.arch, image=None)
#self._treeinfo_write(arch=self.arch)
def _sync_boot(self, force_unpack, arch, image):
"""
Syncs whatever
"""
self.log.info('Copying lorax to %s directory...' % image)
# checks here, report that it already exists
def _treeinfo_wrapper(self, arch, variant):
"""
Ensure treeinfo and discinfo is written correctly based on the variant
passed. Each file should be configured similarly but also differently
from the next. The Shared module does have a .treeinfo writer, but it
is for basic use. Eventually it'll be expanded to handle this scenario.
"""
image = os.path.join(self.lorax_work_dir, arch, variant)
imagemap = self.iso_map['images'][variant]
data = {
'arch': arch,
'variant': variant,
'variant_path': image,
'checksum': self.checksum,
'distname': self.distname,
'fullname': self.fullname,
'shortname': self.shortname,
'release': self.release,
'timestamp': self.timestamp,
}
try:
Shared.treeinfo_modify_write(data, imagemap, self.log)
except Exception as e:
self.log.error(Color.FAIL + 'There was an error writing treeinfo.')
self.log.error(e)
# Next set of functions are loosely borrowed (in concept) from pungi. Some
# stuff may be combined/mixed together, other things may be simplified or
# reduced in nature.
def run_build_extra_iso(self):
"""
Builds DVD images based on the data created from the initial lorax on
each arch. This should NOT be called during the usual run() section.
"""
sync_root = self.compose_latest_sync
self.log.info(Color.INFO + 'Starting Extra ISOs phase')
if not os.path.exists(self.compose_base):
self.log.info(Color.FAIL + 'The compose directory MUST be here. Cannot continue.')
raise SystemExit()
self._extra_iso_build_wrap()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('ISO result directory: %s/$arch' % self.lorax_work_dir)
self.log.info(Color.INFO + 'Extra ISO phase completed.')
def _extra_iso_build_wrap(self):
"""
Try to figure out where the build is going, we only support mock for
now.
"""
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
arches_to_build = self.arches
if self.arch:
arches_to_build = [self.arch]
images_to_build = self.iso_map['images']
if self.extra_iso:
images_to_build = [self.extra_iso]
for y in images_to_build:
if 'isoskip' in self.iso_map['images'][y] and self.iso_map['images'][y]['isoskip']:
self.log.info(Color.WARN + 'Skipping ' + y + ' image')
continue
# Kind of hacky, but if we decide to have more than boot/dvd iso's,
# we need to make sure volname matches the initial lorax image,
# which the volid contains "dvd". AKA, file name doesn't always
# equate to volume ID
if 'volname' in self.iso_map['images'][y]:
volname = self.iso_map['images'][y]['volname']
else:
volname = y
for a in arches_to_build:
lorax_path = os.path.join(self.lorax_work_dir, a, 'lorax', '.treeinfo')
image_path = os.path.join(self.lorax_work_dir, a, y, '.treeinfo')
if not os.path.exists(image_path):
self.log.error(Color.FAIL + 'Lorax data not found for ' + y + '. Skipping.')
if not os.path.exists(lorax_path):
self.log.error(Color.FAIL + 'Lorax not found at all. This is considered fatal.')
raise SystemExit()
grafts = self._generate_graft_points(
a,
y,
self.iso_map['images'][y]['repos'],
)
self._extra_iso_local_config(a, y, grafts, work_root, volname)
if self.extra_iso_mode == 'local':
self._extra_iso_local_run(a, y, work_root)
elif self.extra_iso_mode == 'podman':
continue
else:
self.log.error(Color.FAIL + 'Mode specified is not valid.')
raise SystemExit()
if self.extra_iso_mode == 'podman':
self._extra_iso_podman_run(arches_to_build, images_to_build, work_root)
def _extra_iso_local_config(self, arch, image, grafts, work_root, volname):
"""
Local ISO build configuration - This generates the configuration for
both mock and podman entries
"""
self.log.info('Generating Extra ISO configuration and script')
entries_dir = os.path.join(work_root, "entries")
boot_iso = os.path.join(work_root, "lorax", arch, "lorax/images/boot.iso")
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('extraisobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildExtraImage.tmpl.sh')
xorriso_template = self.tmplenv.get_template('xorriso.tmpl.txt')
iso_readme_template = self.tmplenv.get_template('ISOREADME.tmpl')
mock_iso_path = '/var/tmp/lorax-{}.cfg'.format(self.major_version)
mock_sh_path = '{}/extraisobuild-{}-{}.sh'.format(entries_dir, arch, image)
iso_template_path = '{}/buildExtraImage-{}-{}.sh'.format(entries_dir, arch, image)
xorriso_template_path = '{}/xorriso-{}-{}.txt'.format(entries_dir, arch, image)
iso_readme_path = '{}/{}/README'.format(self.iso_work_dir, arch)
print(iso_readme_path)
log_root = os.path.join(
work_root,
"logs",
self.date_stamp
)
if not os.path.exists(log_root):
os.makedirs(log_root, exist_ok=True)
log_path_command = '| tee -a {}/{}-{}.log'.format(log_root, arch, image)
# This is kind of a hack. Installing xorrisofs sets the alternatives to
# it, so backwards compatibility is sort of guaranteed. But we want to
# emulate as close as possible to what pungi does, so unless we
# explicitly ask for xorr (in el8 and 9), we should NOT be using it.
# For RLN and el10, we'll use xorr all the way through. When 8 is no
# longer getting ISO's, we'll remove this section.
required_pkgs = self.required_pkgs.copy()
if self.iso_map['xorrisofs']:
if 'genisoimage' in required_pkgs and 'xorriso' not in required_pkgs:
required_pkgs.append('xorriso')
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
datestamp = ''
if self.updated_image:
datestamp = '-' + self.updated_image_date
volid = '{}-{}-{}{}-{}-{}'.format(
self.shortname,
self.major_version,
self.minor_version,
rclevel,
arch,
volname
)
isoname = '{}-{}{}{}-{}-{}.iso'.format(
self.shortname,
self.revision,
rclevel,
datestamp,
arch,
image
)
generic_isoname = '{}-{}-{}.iso'.format(self.shortname, arch, image)
lorax_pkg_cmd = '/usr/bin/dnf install {} -y {}'.format(
' '.join(required_pkgs),
log_path_command
)
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
fullname=self.fullname,
shortname=self.shortname,
required_pkgs=required_pkgs,
dist=self.disttag,
repos=self.repolist,
user_agent='{{ user_agent }}',
compose_dir_is_here=True,
compose_dir=self.compose_root,
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
isoname=isoname,
entries_dir=entries_dir,
image=image,
)
opts = {
'arch': arch,
'iso_name': isoname,
'volid': volid,
'graft_points': grafts,
'use_xorrisofs': self.iso_map['xorrisofs'],
'iso_level': self.iso_map['iso_level'],
}
if opts['use_xorrisofs']:
# Generate a xorriso compatible dialog
xp = open(grafts)
xorpoint = xp.read()
xp.close()
xorriso_template_output = xorriso_template.render(
boot_iso=boot_iso,
isoname=isoname,
volid=volid,
graft=xorpoint,
arch=arch,
)
xorriso_template_entry = open(xorriso_template_path, "w+")
xorriso_template_entry.write(xorriso_template_output)
xorriso_template_entry.close()
opts['graft_points'] = xorriso_template_path
make_image = '{} {}'.format(
Shared.get_make_image_cmd(
opts,
self.hfs_compat
),
log_path_command
)
isohybrid = Shared.get_isohybrid_cmd(opts)
implantmd5 = Shared.get_implantisomd5_cmd(opts)
make_manifest = Shared.get_manifest_cmd(opts)
iso_template_output = iso_template.render(
extra_iso_mode=self.extra_iso_mode,
arch=arch,
compose_work_iso_dir=self.iso_work_dir,
make_image=make_image,
isohybrid=isohybrid,
implantmd5=implantmd5,
make_manifest=make_manifest,
lorax_pkg_cmd=lorax_pkg_cmd,
isoname=isoname,
generic_isoname=generic_isoname,
)
iso_readme_template_output = iso_readme_template.render(
arch=arch
)
mock_iso_entry = open(mock_iso_path, "w+")
mock_iso_entry.write(mock_iso_template_output)
mock_iso_entry.close()
mock_sh_entry = open(mock_sh_path, "w+")
mock_sh_entry.write(mock_sh_template_output)
mock_sh_entry.close()
iso_template_entry = open(iso_template_path, "w+")
iso_template_entry.write(iso_template_output)
iso_template_entry.close()
iso_readme_entry = open(iso_readme_path, "w+")
iso_readme_entry.write(iso_readme_template_output)
iso_readme_entry.close()
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def _extra_iso_local_run(self, arch, image, work_root):
"""
Runs the actual local process using mock. This is for running in
peridot or running on a machine that does not have podman, but does
have mock available.
"""
entries_dir = os.path.join(work_root, "entries")
extra_iso_cmd = '/bin/bash {}/extraisobuild-{}-{}.sh'.format(entries_dir, arch, image)
self.log.info('Starting mock build...')
p = subprocess.call(shlex.split(extra_iso_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
# Copy it if the compose dir is here?
def _extra_iso_podman_run(self, arches, images, work_root):
"""
Does all the image building in podman containers to parallelize the
builds. This is a case where you can call this instead of looping mock,
or not run it in peridot. This gives the Release Engineer a little more
flexibility if they care enough.
This honestly assumes you are running this on a machine that has access
to the compose directories. It's the same as if you were doing a
reposync of the repositories.
"""
cmd = Shared.podman_cmd(self.log)
entries_dir = os.path.join(work_root, "entries")
isos_dir = os.path.join(work_root, "isos")
bad_exit_list = []
checksum_list = []
datestamp = ''
if self.updated_image:
datestamp = '-' + self.updated_image_date
for i in images:
entry_name_list = []
image_name = i
arch_sync = arches.copy()
for a in arch_sync:
entry_name = 'buildExtraImage-{}-{}.sh'.format(a, i)
entry_name_list.append(entry_name)
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
isoname = '{}/{}-{}{}{}-{}-{}.iso'.format(
a,
self.shortname,
self.revision,
rclevel,
datestamp,
a,
i
)
genericname = '{}/{}-{}-{}.iso'.format(
a,
self.shortname,
a,
i
)
checksum_list.append(isoname)
checksum_list.append(genericname)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info(Color.INFO + 'Building ' + i + ' ...')
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' not in output.decode():
self.log.error(Color.FAIL + pod)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
for p in checksum_list:
path = os.path.join(isos_dir, p)
if os.path.exists(path):
self.log.info(Color.INFO + 'Performing checksum for ' + p)
checksum = Shared.get_checksum(path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?')
with open(path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
self.log.info(Color.INFO + 'Building ' + i + ' completed')
if len(bad_exit_list) == 0:
self.log.info(Color.INFO + 'Images built successfully.')
else:
self.log.error(
Color.FAIL +
'There were issues with the work done. As a result, ' +
'some/all ISOs may not exist.'
)
def _generate_graft_points(
self,
arch,
iso,
variants,
):
"""
Get a list of packages for an extras ISO. This should NOT be called
during the usual run() section.
"""
lorax_base_dir = os.path.join(self.lorax_work_dir, arch)
global_work_dir = os.path.join(self.compose_latest_dir, "work/global")
self.log.info(Color.INFO + 'Generating graft points for extra iso: (' + arch + ') ' + iso)
files = {}
# This is the data we need to actually boot
lorax_for_var = os.path.join(lorax_base_dir, iso)
if not os.path.exists(lorax_for_var + '/.treeinfo'):
self.log.info(
Color.FAIL +
'!! .treeinfo is missing, does this variant actually exist? !!'
)
return
# extra files
extra_files_for_var = os.path.join(
global_work_dir,
"extra-files"
)
# actually get the boot data
files = self._get_grafts([lorax_for_var, extra_files_for_var])
# This is to get all the packages for each repo
for repo in variants:
pkg_for_var = os.path.join(
self.compose_latest_sync,
repo,
arch,
self.structure['packages']
)
rd_for_var = os.path.join(
self.compose_latest_sync,
repo,
arch,
self.structure['repodata']
)
for k, v in self._get_grafts([pkg_for_var]).items():
files[os.path.join(repo, "Packages", k)] = v
for k, v in self._get_grafts([rd_for_var]).items():
files[os.path.join(repo, "repodata", k)] = v
grafts = '{}/{}-{}-grafts'.format(
lorax_base_dir,
iso,
arch
)
xorrs = '{}/xorriso-{}.txt'.format(
lorax_base_dir,
arch
)
self._write_grafts(
grafts,
xorrs,
files,
exclude=["*/lost+found", "*/boot.iso"]
)
if self.iso_map['xorrisofs']:
grafters = xorrs
else:
grafters = grafts
return grafters
def _get_grafts(self, paths, exclusive_paths=None, exclude=None):
"""
Actually get some grafts (get_iso_contents), called by generate grafts
"""
result = {}
exclude = exclude or []
exclusive_paths = exclusive_paths or []
for p in paths:
if isinstance(p, dict):
tree = p
else:
tree = self._scanning(p)
result = self._merging(result, tree)
for p in exclusive_paths:
tree = self._scanning(p)
result = self._merging(result, tree, exclusive=True)
# Resolves possible symlinks
for key in result.keys():
path = result[key]
if os.path.islink(path):
real_path = os.readlink(path)
abspath = os.path.normpath(os.path.join(os.path.dirname(path), real_path))
if not abspath.startswith(self.compose_base):
result[key] = abspath
return result
def _write_grafts(self, filepath, xorrspath, u, exclude=None):
"""
Write out the graft points
"""
seen = set()
exclude = exclude or []
result = {}
for zl in sorted(u, reverse=True):
dirn = os.path.dirname(zl)
if not zl.endswith("/"):
result[zl] = u[zl]
seen.add(dirn)
continue
found = False
for j in seen:
if j.startswith(dirn):
found = True
break
if not found:
result[zl] = u[zl]
seen.add(dirn)
if self.iso_map['xorrisofs']:
fx = open(xorrspath, "w")
for zm in sorted(result, key=self._sorting):
found = False
for excl in exclude:
if fnmatch(zm, excl):
found = True
break
if found:
continue
fx.write("-map %s %s\n" % (u[zm], zm))
fx.close()
else:
fh = open(filepath, "w")
for zl in sorted(result, key=self._sorting):
found = False
for excl in exclude:
if fnmatch(zl, excl):
found = True
break
if found:
continue
fh.write("%s=%s\n" % (zl, u[zl]))
fh.close()
def _scanning(self, p):
"""
Scan tree
"""
path = os.path.abspath(p)
result = {}
for root, dirs, files in os.walk(path):
for file in files:
abspath = os.path.join(root, file)
relpath = kobo.shortcuts.relative_path(abspath, path.rstrip("/") + "/")
result[relpath] = abspath
# Include empty directories too
if root != path:
abspath = os.path.join(root, "")
relpath = kobo.shortcuts.relative_path(abspath, path.rstrip("/") + "/")
result[relpath] = abspath
return result
def _merging(self, tree_a, tree_b, exclusive=False):
"""
Merge tree
"""
result = tree_b.copy()
all_dirs = set(
[os.path.dirname(dirn).rstrip("/") for dirn in result if os.path.dirname(dirn) != ""]
)
for dirn in tree_a:
dn = os.path.dirname(dirn)
if exclusive:
match = False
for x in all_dirs:
if dn == x or dn.startswith("%s/" % x):
match = True
break
if match:
continue
if dirn in result:
continue
result[dirn] = tree_a[dirn]
return result
def _sorting(self, k):
"""
Sorting using the is_rpm and is_image funcs. Images are first, extras
next, rpm's last.
"""
rolling = (0 if self._is_image(k) else 2 if self._is_rpm(k) else 1, k)
return rolling
def _is_rpm(self, k):
"""
Is this an RPM? :o
"""
result = k.endswith(".rpm")
return result
def _is_image(self, k):
"""
Is this an image? :o
"""
if (
k.startswith("images/") or
k.startswith("isolinux/") or
k.startswith("EFI/") or
k.startswith("etc/") or
k.startswith("ppc/")
):
return True
if (
k.endswith(".img") or
k.endswith(".ins")
):
return True
return False
def _get_vol_id(self):
"""
Gets a volume ID
"""
def run_pull_generic_images(self):
"""
Pulls generic images built in peridot and places them where they need
to be. This relies on a list called "cloudimages" in the version
configuration.
"""
unpack_single_arch = False
arches_to_unpack = self.arches
latest_artifacts = {}
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
for name, extra in self.cloudimages['images'].items():
self.log.info(Color.INFO + 'Determining the latest images for ' + name + ' ...')
formattype = extra['format']
latest_artifacts[name] = {}
primary_variant = extra['primary_variant'] if 'primary_variant' in extra else None
latest_artifacts[name]['primary'] = primary_variant
latest_artifacts[name]['formattype'] = formattype
variants = extra['variants'] if 'variants' in extra.keys() else [None] # need to loop once
imagename = name
variantname = name
for variant in variants:
if variant:
variantname = f"{name}-{variant}"
self.log.info(Color.INFO + 'Getting latest for variant ' + variant + ' ...')
if self.s3:
latest_artifacts[name][variantname] = Shared.s3_determine_latest(
self.s3_bucket,
self.release,
arches_to_unpack,
formattype,
variantname,
self.log
)
else:
latest_artifacts[name][variantname] = Shared.reqs_determine_latest(
self.s3_bucket_url,
self.release,
arches_to_unpack,
formattype,
variantname,
self.log
)
# latest_artifacts should have at least 1 result if has_variants, else == 1
if not len(latest_artifacts[name][variantname]) > 0:
self.log.warn(Color.WARN + 'No images found for ' + variantname +
'. This means it will be skipped.')
del imagename
del variantname
del variants
#print(latest_artifacts)
for keyname in latest_artifacts.keys():
primary = latest_artifacts[keyname]['primary']
filetype = latest_artifacts[keyname]['formattype']
for imgname in latest_artifacts[keyname]:
keysect = latest_artifacts[keyname][imgname]
if imgname == 'primary':
continue
if not keysect:
continue
self.log.info(Color.INFO + 'Attempting to download requested ' +
'artifacts (' + keyname + ')')
for arch in arches_to_unpack:
image_arch_dir = os.path.join(
self.image_work_dir,
arch
)
if arch not in keysect:
self.log.warn(Color.WARN + 'This architecture (' + arch + ') does not exist for this image.')
continue
source_path = keysect[arch]
drop_name = source_path.split('/')[-1]
# Docker containers get a "layer" name, this hack gets
# around it. I didn't feel like adding another config opt.
if 'layer' in drop_name:
fsuffix = drop_name.replace('layer', '')
drop_name = source_path.split('/')[-3] + fsuffix
checksum_name = drop_name + '.CHECKSUM'
full_drop = '{}/{}'.format(
image_arch_dir,
drop_name
)
checksum_drop = '{}/{}.CHECKSUM'.format(
image_arch_dir,
drop_name
)
if not os.path.exists(image_arch_dir):
os.makedirs(image_arch_dir, exist_ok=True)
self.log.info('Downloading artifact for ' + Color.BOLD + arch + Color.END)
if self.s3:
Shared.s3_download_artifacts(
self.force_download,
self.s3_bucket,
source_path,
full_drop,
self.log
)
else:
Shared.reqs_download_artifacts(
self.force_download,
self.s3_bucket_url,
source_path,
full_drop,
self.log
)
self.log.info('Creating checksum ...')
checksum = Shared.get_checksum(full_drop, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + full_drop + ' not found! Are you sure we copied it?')
continue
with open(checksum_drop, 'w+') as c:
c.write(checksum)
c.close()
self.log.info('Creating a symlink to latest image...')
latest_name = '{}/{}-{}-{}.latest.{}.{}'.format(
image_arch_dir,
self.shortname,
self.major_version,
imgname,
arch,
filetype
)
latest_path = latest_name.split('/')[-1]
latest_checksum = '{}/{}-{}-{}.latest.{}.{}.CHECKSUM'.format(
image_arch_dir,
self.shortname,
self.major_version,
imgname,
arch,
filetype
)
# For some reason python doesn't have a "yeah just change this
# link" part of the function
if os.path.exists(latest_name):
os.remove(latest_name)
os.symlink(drop_name, latest_name)
self.log.info('Creating checksum for latest symlinked image...')
shutil.copy2(checksum_drop, latest_checksum)
with open(latest_checksum, 'r') as link:
checkdata = link.read()
checkdata = checkdata.replace(drop_name, latest_path)
with open(latest_checksum, 'w+') as link:
link.write(checkdata)
link.close()
# If this is the primary image, set the appropriate symlink
# and checksum
if primary and primary in drop_name:
# If an image is the primary, we set this.
latest_primary_name = '{}/{}-{}-{}.latest.{}.{}'.format(
image_arch_dir,
self.shortname,
self.major_version,
keyname,
arch,
filetype
)
latest_primary_checksum = '{}/{}-{}-{}.latest.{}.{}.CHECKSUM'.format(
image_arch_dir,
self.shortname,
self.major_version,
keyname,
arch,
filetype
)
latest_primary_path = latest_primary_name.split('/')[-1]
self.log.info('This is the primary image, setting link and checksum')
if os.path.exists(latest_primary_name):
os.remove(latest_primary_name)
os.symlink(drop_name, latest_primary_name)
shutil.copy2(checksum_drop, latest_primary_checksum)
with open(latest_primary_checksum) as link:
checkpdata = link.read()
checkpdata = checkpdata.replace(drop_name, latest_primary_path)
with open(latest_primary_checksum, 'w+') as link:
link.write(checkpdata)
link.close()
self.log.info(Color.INFO + 'Image download phase completed')
class LiveBuild:
"""
This helps us build the live images for Rocky Linux. The mode is "simple"
by default when using mock.
"""
def __init__(
self,
rlvars,
config,
major,
hfs_compat: bool = False,
force_download: bool = False,
isolation: str = 'simple',
live_iso_mode: str = 'local',
compose_dir_is_here: bool = False,
hashed: bool = False,
image=None,
justcopyit: bool = False,
force_build: bool = False,
updated_image: bool = False,
image_increment: str = '0',
logger=None
):
self.image = image
self.justcopyit = justcopyit
self.fullname = rlvars['fullname']
self.distname = config['distname']
self.shortname = config['shortname']
self.current_arch = config['arch']
# Relevant config items
self.major_version = major
self.compose_dir_is_here = compose_dir_is_here
self.date_stamp = config['date_stamp']
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
self.current_arch = config['arch']
self.livemap = rlvars['livemap']
self.required_pkgs = rlvars['livemap']['required_pkgs']
self.mock_work_root = config['mock_work_root']
self.live_result_root = config['mock_work_root'] + "/lmc"
self.mock_isolation = isolation
self.force_download = force_download
self.force_build = force_build
self.live_iso_mode = live_iso_mode
self.checksum = rlvars['checksum']
self.profile = rlvars['profile']
self.hashed = hashed
# Relevant major version items
self.arch = config['arch']
self.arches = rlvars['allowed_arches']
self.release = rlvars['revision']
self.minor_version = rlvars['minor']
self.revision = rlvars['revision'] + "-" + rlvars['rclvl']
self.rclvl = rlvars['rclvl']
self.disttag = rlvars['disttag']
self.repos = rlvars['iso_map']['lorax']['repos']
self.repo_base_url = config['repo_base_url']
self.project_id = rlvars['project_id']
self.structure = rlvars['structure']
self.bugurl = rlvars['bugurl']
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
self.updated_image = updated_image
self.updated_image_increment = "." + image_increment
self.date = (time.strftime("%Y%m%d", time.localtime())
+ self.updated_image_increment)
# Templates
file_loader = FileSystemLoader(f"{_rootdir}/templates")
self.tmplenv = Environment(loader=file_loader)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
"latest-{}-{}".format(
self.shortname,
self.profile
)
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
self.live_work_dir = os.path.join(
self.compose_latest_dir,
"work/live"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("iso")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('live build init')
self.repolist = Shared.build_repo_list(
self.repo_base_url,
self.repos,
self.project_id,
self.current_arch,
self.compose_latest_sync,
self.compose_dir_is_here,
self.hashed
)
self.log.info(self.revision)
if not os.path.exists(self.compose_latest_dir):
self.log.warn(Color.WARN + 'A compose directory was not found ' +
'here. If there is a failure, it may be due to it ' +
'missing. You may want to generate a fake compose if ' +
'you are simply making your own live images and you run ' +
'into any errors beyond this point.'
)
def run_build_live_iso(self):
"""
Builds DVD images based on the data created from the initial lorax on
each arch. This should NOT be called during the usual run() section.
"""
sync_root = self.compose_latest_sync
self.log.info(Color.INFO + 'Starting Live ISOs phase')
# Check that the arch we're assigned is valid...
if self.current_arch not in self.livemap['allowed_arches']:
self.log.error(Color.FAIL + 'Running an unsupported architecture.')
raise SystemExit()
self._live_iso_build_wrap()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('Live ISO result directory: %s/$arch' % self.live_work_dir)
self.log.info(Color.INFO + 'Live ISO phase completed.')
def _live_iso_build_wrap(self):
"""
Prepare and actually build the live images. Based on arguments in self,
we'll either do it on mock in a loop or in podman, just like with the
extra iso phase.
"""
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
images_to_build = list(self.livemap['ksentry'].keys())
if self.image:
images_to_build = [self.image]
self.log.info(
Color.INFO + 'We are planning to build: ' +
', '.join(images_to_build)
)
for i in images_to_build:
self._live_iso_local_config(i, work_root)
if self.live_iso_mode == 'local':
self._live_iso_local_run(self.current_arch, i, work_root)
elif self.live_iso_mode == 'podman':
continue
else:
self.log.error(Color.FAIL + 'Mode specified is not valid.')
raise SystemExit()
if self.live_iso_mode == 'podman':
#self._live_iso_podman_run(self.current_arch, images_to_build, work_root)
self.log.error(Color.FAIL + 'At this time, live images cannot be ' +
'built in podman.')
raise SystemExit()
def _live_iso_local_config(self, image, work_root):
"""
Live ISO build configuration - This generates both mock and podman
entries, regardless of which one is being used.
"""
self.log.info('Generating Live ISO configuration and script')
entries_dir = os.path.join(work_root, "entries")
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('liveisobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildLiveImage.tmpl.sh')
mock_iso_path = '/var/tmp/live-{}.cfg'.format(self.major_version)
mock_sh_path = '{}/liveisobuild-{}-{}.sh'.format(
entries_dir,
self.current_arch,
image
)
iso_template_path = '{}/buildLiveImage-{}-{}.sh'.format(
entries_dir,
self.current_arch,
image
)
log_root = os.path.join(
work_root,
"logs",
self.date_stamp
)
ks_start = self.livemap['ksentry'][image]
if not os.path.exists(log_root):
os.makedirs(log_root, exist_ok=True)
log_path_command = '| tee -a {}/{}-{}.log'.format(
log_root,
self.current_arch,
image
)
required_pkgs = self.livemap['required_pkgs']
volid = '{}-{}-{}-{}'.format(
self.shortname,
self.major_version,
self.minor_version,
image
)
isoname = '{}-{}-{}-{}-{}.iso'.format(
self.shortname,
self.release,
image,
self.current_arch,
self.date
)
live_pkg_cmd = '/usr/bin/dnf install {} -y {}'.format(
' '.join(required_pkgs),
log_path_command
)
git_clone_cmd = '/usr/bin/git clone {} -b {} /builddir/ks {}'.format(
self.livemap['git_repo'],
self.livemap['branch'],
log_path_command
)
make_image_cmd = ('/usr/sbin/livemedia-creator --ks {} --no-virt '
'--resultdir /builddir/lmc --project="{} {}" --make-iso --volid {} '
'--iso-only --iso-name {} --releasever={} --nomacboot {}').format(
'/builddir/ks.cfg',
self.distname,
image,
volid,
isoname,
self.release,
log_path_command
)
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
fullname=self.fullname,
shortname=self.shortname,
required_pkgs=required_pkgs,
dist=self.disttag,
repos=self.repolist,
compose_dir_is_here=True,
user_agent='{{ user_agent }}',
compose_dir=self.compose_root,
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
isoname=isoname,
entries_dir=entries_dir,
image=image,
)
iso_template_output = iso_template.render(
live_iso_mode=self.live_iso_mode,
arch=self.current_arch,
compose_live_work_dir=self.live_work_dir,
make_image=make_image_cmd,
live_pkg_cmd=live_pkg_cmd,
isoname=isoname,
major=self.major_version,
git_clone=git_clone_cmd,
ks_file=ks_start,
)
with open(mock_iso_path, "w+") as mip:
mip.write(mock_iso_template_output)
mip.close()
with open(mock_sh_path, "w+") as msp:
msp.write(mock_sh_template_output)
msp.close()
with open(iso_template_path, "w+") as itp:
itp.write(iso_template_output)
itp.close()
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def _live_iso_podman_run(self, arch, images, work_root):
"""
Does all the image building in podman containers to parallelize the
process. This is a case where you can call this instead of looping mock
or not run in peridot. This gives the Release Engineer a little more
flexibility if they care enough.
This honestly assumes you are running this on a machine that has access
to the compose directories. It's the same as if you were doing a
reposync of the repositories.
"""
cmd = Shared.podman_cmd(self.log)
entries_dir = os.path.join(work_root, "entries")
isos_dir = self.live_work_dir
bad_exit_list = []
checksum_list = []
entry_name_list = []
self.log.warn(Color.WARN + 'This mode does not work properly. It will fail.')
for i in images:
image_name = i
entry_name = 'buildLiveImage-{}-{}.sh'.format(arch, i)
entry_name_list.append(entry_name)
isoname = '{}/{}-{}-{}-{}-{}.iso'.format(
arch,
self.shortname,
i,
self.major_version,
arch,
self.date
)
checksum_list.append(isoname)
print(entry_name_list, cmd, entries_dir)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info(Color.INFO + 'Building requested live images ...')
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' not in output.decode():
self.log.error(Color.FAIL + pod)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
for p in checksum_list:
path = os.path.join(isos_dir, p)
if os.path.exists(path):
self.log.info(Color.INFO + 'Performing checksum for ' + p)
checksum = Shared.get_checksum(path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?')
with open(path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
self.log.info(Color.INFO + 'Building live images completed')
if len(bad_exit_list) == 0:
self.log.info(Color.INFO + 'Live images completed successfully.')
else:
self.log.error(
Color.FAIL +
'There were issues with the work done. As a result, ' +
'some or all ISOs may not be copied later.'
)
def _live_iso_local_run(self, arch, image, work_root):
"""
Runs the actual local process using mock. This is for running in
peridot or running on a machine that does not have podman, but does
have mock available.
"""
entries_dir = os.path.join(work_root, "entries")
live_dir_arch = os.path.join(self.live_work_dir, arch)
isoname = '{}-{}-{}-{}-{}.iso'.format(
self.shortname,
self.release,
image,
arch,
self.date
)
isolink = '{}-{}-{}-{}-{}.iso'.format(
self.shortname,
self.major_version,
image,
arch,
'latest'
)
live_res_dir = '/var/lib/mock/{}-{}-{}/result'.format(
self.shortname.lower(),
self.major_version,
arch
)
if self.justcopyit:
if os.path.exists(os.path.join(live_dir_arch, isoname)):
self.log.warn(Color.WARN + 'Image already exists.')
if self.force_build:
self.log.warn(Color.WARN + 'Building anyway.')
else:
self.log.warn(Color.WARN + 'Skipping.')
return
live_iso_cmd = '/bin/bash {}/liveisobuild-{}-{}.sh'.format(entries_dir, arch, image)
self.log.info('Starting mock build...')
p = subprocess.call(shlex.split(live_iso_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
self.log.warn(
Color.WARN + 'This is meant for builds done in peridot or ' +
'locally for an end user.'
)
self.log.warn(
Color.WARN +
'If you are looping images, your built image may get ' +
'overwritten. Ensure you have justcopyit enabled to avoid this.'
)
if self.justcopyit:
self.log.info(Color.INFO + 'Copying image to work directory')
source_path = os.path.join(live_res_dir, isoname)
dest_path = os.path.join(live_dir_arch, isoname)
link_path = os.path.join(live_dir_arch, isolink)
os.makedirs(live_dir_arch, exist_ok=True)
try:
shutil.copy2(source_path, dest_path)
if os.path.exists(link_path):
os.remove(link_path)
os.symlink(isoname, link_path)
except:
self.log.error(Color.FAIL + 'We could not copy the image or create a symlink.')
return
self.log.info(Color.INFO + 'Generating checksum')
checksum = Shared.get_checksum(dest_path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + dest_path + ' not found. Did we copy it?')
return
with open(dest_path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
linksum = Shared.get_checksum(link_path, self.checksum, self.log)
if not linksum:
self.log.error(Color.FAIL + link_path + ' not found. Did we copy it?')
return
with open(link_path + '.CHECKSUM', "w+") as c:
c.write(linksum)
c.close()