toolkit/iso/empanadas/empanadas/util/iso_utils.py

2026 lines
75 KiB
Python
Raw Normal View History

"""
Builds ISO's for Rocky Linux.
Louis Abel <label AT rockylinux.org>
"""
import logging
import sys
import os
import os.path
import subprocess
import shlex
import time
2022-06-20 05:52:20 +00:00
import tarfile
2022-06-20 08:34:10 +00:00
import shutil
2022-06-20 05:52:20 +00:00
# lazy person's s3 parser
#import requests
#import json
#import xmltodict
2022-06-20 05:52:20 +00:00
# if we can access s3
#import boto3
# relative_path, compute_file_checksums
2022-06-22 00:21:37 +00:00
import kobo.shortcuts
from fnmatch import fnmatch
2022-06-17 03:31:33 +00:00
2022-06-17 18:49:22 +00:00
# This is for treeinfo
from configparser import ConfigParser
2022-06-13 14:37:50 +00:00
from productmd.common import SortedConfigParser
2022-06-17 18:49:22 +00:00
from productmd.images import Image
from productmd.extra_files import ExtraFiles
import productmd.treeinfo
# End treeinfo
2022-06-17 03:31:33 +00:00
2022-06-15 20:53:12 +00:00
from jinja2 import Environment, FileSystemLoader
from empanadas.common import Color, _rootdir
2023-03-29 08:04:44 +00:00
from empanadas.util import Shared, ArchCheck, Idents
2022-06-17 03:31:33 +00:00
class IsoBuild:
"""
This helps us build the generic ISO's for a Rocky Linux release. In
2022-06-15 20:53:12 +00:00
particular, this is for the boot images.
2022-07-06 14:44:37 +00:00
There are functions to build the DVD (and potentially other) images. Each
particular build or process starts with "run" in their name.
"""
def __init__(
self,
rlvars,
config,
major,
2022-06-19 14:29:01 +00:00
arch=None,
2022-06-22 00:21:37 +00:00
hfs_compat: bool = False,
2022-06-16 19:24:19 +00:00
rc: bool = False,
2022-06-19 14:29:01 +00:00
s3: bool = False,
2022-06-20 05:52:20 +00:00
force_download: bool = False,
2022-06-17 18:49:22 +00:00
force_unpack: bool = False,
2022-06-15 20:53:12 +00:00
isolation: str = 'auto',
extra_iso=None,
extra_iso_mode: str = 'local',
2022-06-15 20:53:12 +00:00
compose_dir_is_here: bool = False,
2022-07-11 08:06:26 +00:00
hashed: bool = False,
updated_image: bool = False,
image_increment: str = '0',
image=None,
s3_region=None,
s3_bucket=None,
s3_bucket_url=None,
logger=None
):
self.image = image
2022-06-13 14:37:50 +00:00
self.fullname = rlvars['fullname']
2022-06-15 20:53:12 +00:00
self.distname = config['distname']
self.shortname = config['shortname']
# Relevant config items
self.major_version = major
2022-06-15 20:53:12 +00:00
self.compose_dir_is_here = compose_dir_is_here
2022-10-20 08:17:25 +00:00
self.disttag = rlvars['disttag']
self.date_stamp = config['date_stamp']
2022-06-13 14:37:50 +00:00
self.timestamp = time.time()
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
self.current_arch = config['arch']
2022-06-24 22:37:32 +00:00
self.required_pkgs = rlvars['iso_map']['lorax']['required_pkgs']
2022-06-15 20:53:12 +00:00
self.mock_work_root = config['mock_work_root']
self.lorax_result_root = config['mock_work_root'] + "/" + "lorax"
self.mock_isolation = isolation
self.iso_map = rlvars['iso_map']
#self.livemap = rlvars['livemap']
self.cloudimages = rlvars['cloudimages']
2022-06-16 19:24:19 +00:00
self.release_candidate = rc
2022-06-19 14:29:01 +00:00
self.s3 = s3
2022-06-17 18:49:22 +00:00
self.force_unpack = force_unpack
2022-06-20 05:52:20 +00:00
self.force_download = force_download
self.extra_iso = extra_iso
self.extra_iso_mode = extra_iso_mode
self.checksum = rlvars['checksum']
self.profile = rlvars['profile']
2022-07-11 08:06:26 +00:00
self.hashed = hashed
self.updated_image = updated_image
self.updated_image_increment = "." + image_increment
self.updated_image_date = (time.strftime("%Y%m%d", time.localtime())
+ self.updated_image_increment)
# Relevant major version items
2022-06-19 14:29:01 +00:00
self.arch = arch
self.arches = rlvars['allowed_arches']
2022-06-15 20:53:12 +00:00
self.release = rlvars['revision']
2022-06-17 06:25:17 +00:00
self.minor_version = rlvars['minor']
self.revision_level = rlvars['revision'] + "-" + rlvars['rclvl']
self.revision = rlvars['revision']
2022-06-16 19:24:19 +00:00
self.rclvl = rlvars['rclvl']
2022-06-24 22:37:32 +00:00
self.repos = rlvars['iso_map']['lorax']['repos']
2022-06-15 20:53:12 +00:00
self.repo_base_url = config['repo_base_url']
self.project_id = rlvars['project_id']
self.structure = rlvars['structure']
self.bugurl = rlvars['bugurl']
2022-06-15 20:53:12 +00:00
self.extra_files = rlvars['extra_files']
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
2022-06-19 14:29:01 +00:00
# all bucket related info
if s3_region:
self.s3_region = s3_region
else:
self.s3_region = config['aws_region']
if s3_bucket:
self.s3_bucket = s3_bucket
else:
self.s3_bucket = config['bucket']
if s3_bucket_url:
self.s3_bucket_url = s3_bucket_url
else:
self.s3_bucket_url = config['bucket_url']
2022-06-19 14:29:01 +00:00
#if s3:
# self.s3 = boto3.client('s3')
2022-06-20 05:52:20 +00:00
2022-06-22 00:21:37 +00:00
# arch specific
self.hfs_compat = hfs_compat
2022-06-15 20:53:12 +00:00
# Templates
file_loader = FileSystemLoader(f"{_rootdir}/templates")
2022-06-15 20:53:12 +00:00
self.tmplenv = Environment(loader=file_loader)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
2023-03-14 08:40:13 +00:00
f"latest-{self.shortname}-{self.profile}"
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
2022-06-23 22:29:22 +00:00
self.iso_work_dir = os.path.join(
self.compose_latest_dir,
"work/isos"
2022-06-23 22:29:22 +00:00
)
2022-06-15 20:53:12 +00:00
self.live_work_dir = os.path.join(
self.compose_latest_dir,
"work/live"
)
self.image_work_dir = os.path.join(
self.compose_latest_dir,
"work/images"
)
2022-06-19 14:29:01 +00:00
self.lorax_work_dir = os.path.join(
self.compose_latest_dir,
"work/lorax"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("iso")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('iso build init')
2022-07-11 08:06:26 +00:00
self.repolist = Shared.build_repo_list(
self.repo_base_url,
self.repos,
self.project_id,
self.current_arch,
self.compose_latest_sync,
self.compose_dir_is_here,
self.hashed
)
self.log.info(self.revision_level)
def run(self):
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
sync_root = self.compose_latest_sync
log_root = os.path.join(
work_root,
"logs"
)
2022-06-15 20:53:12 +00:00
self.iso_build()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('ISO Build Logs: /var/lib/mock/{}-{}-{}/result'.format(
self.shortname.lower(), self.major_version, self.current_arch)
)
self.log.info('ISO Build completed.')
2022-06-15 20:53:12 +00:00
def iso_build(self):
"""
2022-06-15 20:53:12 +00:00
This does the general ISO building for the current running
architecture. This generates the mock config and the general script
needed to get this part running.
"""
2022-06-15 20:53:12 +00:00
# Check for local build, build accordingly
# Check for arch specific build, build accordingly
# local AND arch cannot be used together, local supersedes. print
# warning.
self.generate_iso_scripts()
2022-06-17 05:55:07 +00:00
self.run_lorax()
2022-06-13 14:37:50 +00:00
def generate_iso_scripts(self):
"""
2022-06-17 05:55:07 +00:00
Generates the scripts needed to be ran to run lorax in mock as well as
package up the results.
2022-06-13 14:37:50 +00:00
"""
2022-06-16 19:24:19 +00:00
self.log.info('Generating ISO configuration and scripts')
2022-06-15 20:53:12 +00:00
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('isobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildImage.tmpl.sh')
2023-11-17 01:39:31 +00:00
mock_iso_path = '/var/tmp/lorax-' + self.release + '.cfg'
2022-06-15 20:53:12 +00:00
mock_sh_path = '/var/tmp/isobuild.sh'
iso_template_path = '/var/tmp/buildImage.sh'
2022-06-16 19:24:19 +00:00
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
2022-06-27 05:45:31 +00:00
# This is kind of a hack. Installing xorrisofs sets the alternatives to
# it, so backwards compatibility is sort of guaranteed. But we want to
# emulate as close as possible to what pungi does, so unless we
# explicitly ask for xorr (in el8 and 9), we should NOT be using it.
# For RLN and el10, we'll use xorr all the way through. When 8 is no
# longer getting ISO's, we'll remove this section.
required_pkgs = self.required_pkgs.copy()
if self.iso_map['xorrisofs']:
if 'genisoimage' in required_pkgs and 'xorriso' not in required_pkgs:
required_pkgs.append('xorriso')
2022-06-15 20:53:12 +00:00
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
2023-11-17 01:39:31 +00:00
releasever=self.release,
2022-06-15 20:53:12 +00:00
fullname=self.fullname,
2022-06-17 05:55:07 +00:00
shortname=self.shortname,
2022-06-27 05:45:31 +00:00
required_pkgs=required_pkgs,
2022-06-15 20:53:12 +00:00
dist=self.disttag,
repos=self.repolist,
user_agent='{{ user_agent }}',
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
2023-11-17 01:39:31 +00:00
releasever=self.release,
2022-06-15 20:53:12 +00:00
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
revision=self.release,
2022-06-15 20:53:12 +00:00
)
iso_template_output = iso_template.render(
arch=self.current_arch,
major=self.major_version,
minor=self.minor_version,
shortname=self.shortname,
repos=self.repolist,
2022-06-24 22:37:32 +00:00
variant=self.iso_map['lorax']['variant'],
lorax=self.iso_map['lorax']['lorax_removes'],
2022-06-15 20:53:12 +00:00
distname=self.distname,
revision=self.release,
2022-06-16 19:24:19 +00:00
rc=rclevel,
2022-06-17 05:55:07 +00:00
builddir=self.mock_work_root,
lorax_work_root=self.lorax_result_root,
bugurl=self.bugurl,
2022-06-15 20:53:12 +00:00
)
with open(mock_iso_path, "w+") as mock_iso_entry:
mock_iso_entry.write(mock_iso_template_output)
mock_iso_entry.close()
2022-06-15 20:53:12 +00:00
with open(mock_sh_path, "w+") as mock_sh_entry:
mock_sh_entry.write(mock_sh_template_output)
mock_sh_entry.close()
2022-06-15 20:53:12 +00:00
with open(iso_template_path, "w+") as iso_template_entry:
iso_template_entry.write(iso_template_output)
iso_template_entry.close()
2022-06-13 14:37:50 +00:00
2022-06-17 05:55:07 +00:00
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def run_lorax(self):
"""
This actually runs lorax on this system. It will call the right scripts
to do so.
"""
lorax_cmd = '/bin/bash /var/tmp/isobuild.sh'
self.log.info('Starting lorax...')
p = subprocess.call(shlex.split(lorax_cmd))
if p != 0:
2022-06-17 05:55:07 +00:00
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
2022-06-19 14:29:01 +00:00
def run_pull_lorax_artifacts(self):
"""
2022-06-20 05:52:20 +00:00
Pulls the required artifacts and unpacks it to work/lorax/$arch
2022-06-19 14:29:01 +00:00
"""
2022-06-20 05:52:20 +00:00
# Determine if we're only managing one architecture out of all of them.
# It does not hurt to do everything at once. But the option is there.
unpack_single_arch = False
arches_to_unpack = self.arches
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
self.log.info(Color.INFO + 'Determining the latest pulls...')
2022-06-20 05:52:20 +00:00
if self.s3:
latest_artifacts = Shared.s3_determine_latest(
self.s3_bucket,
self.release,
self.arches,
'tar.gz',
'lorax',
self.log
)
2022-06-20 05:52:20 +00:00
else:
latest_artifacts = Shared.reqs_determine_latest(
self.s3_bucket_url,
self.release,
self.arches,
'tar.gz',
'lorax',
self.log
)
2022-06-20 05:52:20 +00:00
self.log.info(Color.INFO + 'Downloading requested artifact(s)')
2022-06-20 05:52:20 +00:00
for arch in arches_to_unpack:
lorax_arch_dir = os.path.join(
self.lorax_work_dir,
arch
)
2022-11-19 04:33:10 +00:00
if arch not in latest_artifacts:
self.log.error(Color.FAIL + 'No lorax artifacts for ' + arch)
continue
2022-06-20 05:52:20 +00:00
source_path = latest_artifacts[arch]
2023-03-14 08:40:13 +00:00
full_drop = f'{lorax_arch_dir}/lorax-{self.release}-{arch}.tar.gz'
2022-06-20 05:52:20 +00:00
if not os.path.exists(lorax_arch_dir):
os.makedirs(lorax_arch_dir, exist_ok=True)
self.log.info(
'Downloading artifact for ' + Color.BOLD + arch + Color.END
)
if self.s3:
Shared.s3_download_artifacts(
2022-06-20 05:52:20 +00:00
self.force_download,
self.s3_bucket,
2022-06-20 05:52:20 +00:00
source_path,
full_drop,
self.log
2022-06-20 05:52:20 +00:00
)
else:
Shared.reqs_download_artifacts(
2022-06-20 05:52:20 +00:00
self.force_download,
self.s3_bucket_url,
2022-06-20 05:52:20 +00:00
source_path,
full_drop,
self.log
2022-06-20 05:52:20 +00:00
)
self.log.info(Color.INFO + 'Download phase completed')
self.log.info(Color.INFO + 'Beginning unpack phase...')
2022-06-20 05:52:20 +00:00
for arch in arches_to_unpack:
2023-03-14 08:40:13 +00:00
tarname = f'lorax-{self.release}-{arch}.tar.gz'
2022-06-20 05:52:20 +00:00
tarball = os.path.join(
self.lorax_work_dir,
arch,
tarname
)
if not os.path.exists(tarball):
self.log.error(Color.FAIL + 'Artifact does not exist: ' + tarball)
2022-06-20 05:52:20 +00:00
continue
2022-06-19 14:29:01 +00:00
2022-06-20 05:52:20 +00:00
self._unpack_artifacts(self.force_unpack, arch, tarball)
self.log.info(Color.INFO + 'Unpack phase completed')
self.log.info(Color.INFO + 'Beginning image variant phase')
2022-06-20 08:34:10 +00:00
for arch in arches_to_unpack:
self.log.info(
'Copying base lorax for ' + Color.BOLD + arch + Color.END
)
2022-06-24 22:37:32 +00:00
for variant in self.iso_map['images']:
2022-06-20 08:34:10 +00:00
self._copy_lorax_to_variant(self.force_unpack, arch, variant)
self._copy_boot_to_work(self.force_unpack, arch)
self.log.info(Color.INFO + 'Image variant phase completed')
2022-06-20 08:34:10 +00:00
self.log.info(Color.INFO + 'Beginning treeinfo phase')
2022-06-20 08:34:10 +00:00
2022-06-24 22:37:32 +00:00
for arch in arches_to_unpack:
for variant in self.iso_map['images']:
self.log.info(
2022-06-28 14:49:23 +00:00
'Configuring treeinfo and discinfo for %s%s %s%s' % (Color.BOLD, arch, variant, Color.END)
2022-06-24 22:37:32 +00:00
)
self._treeinfo_wrapper(arch, variant)
2022-06-27 05:05:24 +00:00
# Do a dirsync for non-disc data
if not self.iso_map['images'][variant]['disc']:
self.log.info(
'Syncing repo data and images for %s%s%s' % (Color.BOLD, variant, Color.END)
)
self._copy_nondisc_to_repo(self.force_unpack, arch, variant)
2022-06-24 22:37:32 +00:00
2022-06-20 05:52:20 +00:00
def _unpack_artifacts(self, force_unpack, arch, tarball):
2022-06-19 14:29:01 +00:00
"""
Unpack the requested artifacts(s)
"""
2022-06-20 05:52:20 +00:00
unpack_dir = os.path.join(self.lorax_work_dir, arch)
if not force_unpack:
file_check = os.path.join(unpack_dir, 'lorax/.treeinfo')
if os.path.exists(file_check):
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'Artifact (' + arch + ') already unpacked')
2022-06-20 05:52:20 +00:00
return
self.log.info('Unpacking %s' % tarball)
with tarfile.open(tarball) as t:
Shared.tar_safe_extractall(t, unpack_dir)
2022-06-20 05:52:20 +00:00
t.close()
2022-06-20 08:34:10 +00:00
def _copy_lorax_to_variant(self, force_unpack, arch, image):
2022-06-20 05:52:20 +00:00
"""
Copy to variants for easy access of mkiso and copying to compose dirs
"""
2022-06-20 08:34:10 +00:00
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
'lorax'
)
iso_to_go = os.path.join(
self.iso_work_dir,
arch
)
2022-06-20 08:34:10 +00:00
if not os.path.exists(os.path.join(src_to_image, '.treeinfo')):
self.log.error(Color.FAIL + 'Lorax base image does not exist')
2022-06-20 08:34:10 +00:00
return
path_to_image = os.path.join(
self.lorax_work_dir,
arch,
image
)
if not force_unpack:
file_check = os.path.join(path_to_image, '.treeinfo')
if os.path.exists(file_check):
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'Lorax image for ' + image + ' already exists')
2022-06-20 08:34:10 +00:00
return
self.log.info('Copying base lorax to %s directory...' % image)
2022-06-20 23:49:14 +00:00
try:
shutil.copytree(src_to_image, path_to_image, copy_function=shutil.copy2, dirs_exist_ok=True)
2022-06-20 23:49:14 +00:00
except:
self.log.error('%s already exists??' % image)
2022-06-19 14:29:01 +00:00
if self.iso_map['images'][image]['disc']:
self.log.info('Removing boot.iso from %s' % image)
try:
os.remove(path_to_image + '/images/boot.iso')
2022-08-04 06:07:47 +00:00
os.remove(path_to_image + '/images/boot.iso.manifest')
except:
self.log.error(
'[' + Color.BOLD + Color.YELLOW + 'FAIL' + Color.END + '] ' +
'Cannot remove boot.iso'
)
2022-06-21 03:27:30 +00:00
def _copy_boot_to_work(self, force_unpack, arch):
src_to_image = os.path.join(self.lorax_work_dir, arch, 'lorax')
iso_to_go = os.path.join(self.iso_work_dir, arch)
path_to_src_image = os.path.join(src_to_image, 'images/boot.iso')
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
2023-11-17 01:39:31 +00:00
discname = f'{self.shortname}-{self.release}{rclevel}-{arch}-boot.iso'
isobootpath = os.path.join(iso_to_go, discname)
2023-03-29 08:04:44 +00:00
manifest = f'{isobootpath}.manifest'
2023-03-14 08:40:13 +00:00
link_name = f'{self.shortname}-{arch}-boot.iso'
link_manifest = link_name + '.manifest'
2023-03-14 08:40:13 +00:00
latest_link_name = f'{self.shortname}-{self.major_version}-latest-{arch}-boot.iso'
latest_link_manifest = latest_link_name + '.manifest'
isobootpath = os.path.join(iso_to_go, discname)
linkbootpath = os.path.join(iso_to_go, link_name)
manifestlink = os.path.join(iso_to_go, link_manifest)
latestlinkbootpath = os.path.join(iso_to_go, latest_link_name)
latestmanifestlink = os.path.join(iso_to_go, latest_link_manifest)
if not force_unpack:
file_check = isobootpath
if os.path.exists(file_check):
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'Boot image (' + discname + ') already exists')
return
self.log.info('Copying %s boot iso to work directory...' % arch)
os.makedirs(iso_to_go, exist_ok=True)
try:
shutil.copy2(path_to_src_image, isobootpath)
# For Rocky-ARCH-boot.iso
if os.path.exists(linkbootpath):
os.remove(linkbootpath)
os.symlink(discname, linkbootpath)
# For Rocky-X-latest-ARCH-boot.iso
if os.path.exists(latestlinkbootpath):
os.remove(latestlinkbootpath)
os.symlink(discname, latestlinkbootpath)
except Exception as e:
self.log.error(Color.FAIL + 'We could not copy the image or create a symlink.')
raise SystemExit(e)
# For Rocky-ARCH-boot.iso
if os.path.exists(path_to_src_image + '.manifest'):
shutil.copy2(path_to_src_image + '.manifest', manifest)
os.symlink(manifest.split('/')[-1], manifestlink)
os.symlink(manifest.split('/')[-1], latestmanifestlink)
self.log.info('Creating checksum for %s boot iso...' % arch)
checksum = Shared.get_checksum(isobootpath, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + isobootpath + ' not found! Are you sure we copied it?')
return
with open(isobootpath + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
# For Rocky-ARCH-boot.iso
linksum = Shared.get_checksum(linkbootpath, self.checksum, self.log)
if not linksum:
self.log.error(Color.FAIL + linkbootpath + ' not found! Did we actually make the symlink?')
return
with open(linkbootpath + '.CHECKSUM', "w+") as l:
l.write(linksum)
l.close()
# For Rocky-X-latest-ARCH-boot.iso
latestlinksum = Shared.get_checksum(latestlinkbootpath, self.checksum, self.log)
if not latestlinksum:
self.log.error(Color.FAIL + latestlinkbootpath + ' not found! Did we actually make the symlink?')
return
with open(latestlinkbootpath + '.CHECKSUM', "w+") as l:
l.write(latestlinksum)
l.close()
2022-06-27 05:05:24 +00:00
def _copy_nondisc_to_repo(self, force_unpack, arch, repo):
"""
Syncs data from a non-disc set of images to the appropriate repo. Repo
and image MUST match names for this to work.
"""
2022-06-30 20:14:27 +00:00
pathway = os.path.join(
self.compose_latest_sync,
repo,
arch,
'os'
)
kspathway = os.path.join(
self.compose_latest_sync,
repo,
arch,
'kickstart'
)
2022-06-30 20:14:27 +00:00
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
repo
)
2022-06-30 22:38:50 +00:00
if not os.path.exists(pathway):
self.log.error(Color.FAIL +
'Repo and Image variant either does NOT match or does ' +
'NOT exist. Are you sure you have synced the repository?'
2022-06-30 22:38:50 +00:00
)
2022-06-30 20:14:27 +00:00
if not force_unpack:
found_files = []
for y in ArchCheck.archfile[arch]:
imgpath = os.path.join(
pathway,
y
)
if os.path.exists(imgpath):
found_files.append(y)
if os.path.exists(pathway + '/images/boot.iso'):
found_files.append('/images/boot.iso')
if len(found_files) > 0:
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'Images and data for ' + repo + ' and ' + arch + ' already exists.')
2022-06-30 20:14:27 +00:00
return
self.log.info(Color.INFO + 'Copying images and data for ' + repo + ' ' + arch)
2022-06-30 22:38:50 +00:00
2022-06-30 20:14:27 +00:00
try:
shutil.copytree(src_to_image, pathway, copy_function=shutil.copy2, dirs_exist_ok=True)
shutil.copytree(src_to_image, kspathway, copy_function=shutil.copy2, dirs_exist_ok=True)
2022-06-30 20:14:27 +00:00
except:
self.log.error('%s already exists??' % repo)
2022-06-27 05:05:24 +00:00
2022-06-19 14:29:01 +00:00
def run_boot_sync(self):
2022-06-17 18:49:22 +00:00
"""
This unpacks into BaseOS/$arch/os, assuming there's no data actually
there. There should be checks.
2022-06-19 14:29:01 +00:00
1. Sync from work/lorax/$arch to work/lorax/$arch/dvd
2. Sync from work/lorax/$arch to work/lorax/$arch/minimal
3. Sync from work/lorax/$arch to BaseOS/$arch/os
2022-06-17 18:49:22 +00:00
4. Modify (3) .treeinfo
5. Modify (1) .treeinfo, keep out boot.iso checksum
6. Create a .treeinfo for AppStream
"""
2022-06-19 14:29:01 +00:00
unpack_single_arch = False
arches_to_unpack = self.arches
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
2022-06-17 18:49:22 +00:00
2022-06-22 00:21:37 +00:00
self._sync_boot(force_unpack=self.force_unpack, arch=self.arch, image=None)
2022-06-24 22:37:32 +00:00
#self._treeinfo_write(arch=self.arch)
2022-06-19 14:29:01 +00:00
2022-06-20 23:49:14 +00:00
def _sync_boot(self, force_unpack, arch, image):
2022-06-17 18:49:22 +00:00
"""
2022-06-19 14:29:01 +00:00
Syncs whatever
2022-06-17 18:49:22 +00:00
"""
2022-06-20 23:49:14 +00:00
self.log.info('Copying lorax to %s directory...' % image)
2022-06-19 14:29:01 +00:00
# checks here, report that it already exists
2022-06-17 05:55:07 +00:00
2022-06-24 22:37:32 +00:00
def _treeinfo_wrapper(self, arch, variant):
2022-06-13 14:37:50 +00:00
"""
2022-06-28 14:49:23 +00:00
Ensure treeinfo and discinfo is written correctly based on the variant
passed. Each file should be configured similarly but also differently
from the next. The Shared module does have a .treeinfo writer, but it
is for basic use. Eventually it'll be expanded to handle this scenario.
2022-06-13 14:37:50 +00:00
"""
2022-06-24 22:37:32 +00:00
image = os.path.join(self.lorax_work_dir, arch, variant)
imagemap = self.iso_map['images'][variant]
2022-07-04 04:00:57 +00:00
data = {
'arch': arch,
'variant': variant,
'variant_path': image,
'checksum': self.checksum,
'distname': self.distname,
'fullname': self.fullname,
'shortname': self.shortname,
'release': self.release,
'timestamp': self.timestamp,
}
2022-07-04 04:00:57 +00:00
try:
2022-07-04 06:42:35 +00:00
Shared.treeinfo_modify_write(data, imagemap, self.log)
2022-07-04 04:00:57 +00:00
except Exception as e:
self.log.error(Color.FAIL + 'There was an error writing treeinfo.')
2022-07-04 04:00:57 +00:00
self.log.error(e)
2022-06-22 00:21:37 +00:00
# Next set of functions are loosely borrowed (in concept) from pungi. Some
# stuff may be combined/mixed together, other things may be simplified or
# reduced in nature.
def run_build_extra_iso(self):
2022-06-15 20:53:12 +00:00
"""
Builds DVD images based on the data created from the initial lorax on
each arch. This should NOT be called during the usual run() section.
"""
sync_root = self.compose_latest_sync
self.log.info(Color.INFO + 'Starting Extra ISOs phase')
if not os.path.exists(self.compose_base):
self.log.info(Color.FAIL + 'The compose directory MUST be here. Cannot continue.')
raise SystemExit()
self._extra_iso_build_wrap()
self.log.info('Compose repo directory: %s' % sync_root)
2022-12-15 02:09:39 +00:00
self.log.info('ISO result directory: %s/$arch' % self.iso_work_dir)
self.log.info(Color.INFO + 'Extra ISO phase completed.')
def _extra_iso_build_wrap(self):
"""
Try to figure out where the build is going, we only support mock for
now.
"""
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
arches_to_build = self.arches
if self.arch:
arches_to_build = [self.arch]
images_to_build = self.iso_map['images']
if self.extra_iso:
images_to_build = [self.extra_iso]
for y in images_to_build:
2022-06-24 22:37:32 +00:00
if 'isoskip' in self.iso_map['images'][y] and self.iso_map['images'][y]['isoskip']:
self.log.info(Color.WARN + 'Skipping ' + y + ' image')
2022-06-24 22:37:32 +00:00
continue
2022-07-14 03:01:23 +00:00
# Kind of hacky, but if we decide to have more than boot/dvd iso's,
# we need to make sure volname matches the initial lorax image,
# which the volid contains "dvd". AKA, file name doesn't always
# equate to volume ID
if 'volname' in self.iso_map['images'][y]:
volname = self.iso_map['images'][y]['volname']
else:
volname = y
for a in arches_to_build:
lorax_path = os.path.join(self.lorax_work_dir, a, 'lorax', '.treeinfo')
image_path = os.path.join(self.lorax_work_dir, a, y, '.treeinfo')
if not os.path.exists(image_path):
self.log.error(Color.FAIL + 'Lorax data not found for ' + y + '. Skipping.')
if not os.path.exists(lorax_path):
self.log.error(Color.FAIL + 'Lorax not found at all. This is considered fatal.')
raise SystemExit()
grafts = self._generate_graft_points(
a,
y,
self.iso_map['images'][y]['repos'],
)
2022-07-14 03:01:23 +00:00
self._extra_iso_local_config(a, y, grafts, work_root, volname)
if self.extra_iso_mode == 'local':
self._extra_iso_local_run(a, y, work_root)
elif self.extra_iso_mode == 'podman':
continue
else:
2022-07-11 08:06:26 +00:00
self.log.error(Color.FAIL + 'Mode specified is not valid.')
raise SystemExit()
if self.extra_iso_mode == 'podman':
self._extra_iso_podman_run(arches_to_build, images_to_build, work_root)
2022-07-14 03:01:23 +00:00
def _extra_iso_local_config(self, arch, image, grafts, work_root, volname):
"""
Local ISO build configuration - This generates the configuration for
both mock and podman entries
"""
self.log.info('Generating Extra ISO configuration and script')
entries_dir = os.path.join(work_root, "entries")
boot_iso = os.path.join(work_root, "lorax", arch, "lorax/images/boot.iso")
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('extraisobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildExtraImage.tmpl.sh')
xorriso_template = self.tmplenv.get_template('xorriso.tmpl.txt')
2022-08-08 23:04:52 +00:00
iso_readme_template = self.tmplenv.get_template('ISOREADME.tmpl')
2023-03-14 08:40:13 +00:00
mock_iso_path = f'/var/tmp/lorax-{self.major_version}.cfg'
mock_sh_path = f'{entries_dir}/extraisobuild-{arch}-{image}.sh'
iso_template_path = f'{entries_dir}/buildExtraImage-{arch}-{image}.sh'
xorriso_template_path = f'{entries_dir}/xorriso-{arch}-{image}.txt'
iso_readme_path = f'{self.iso_work_dir}/{arch}/README'
2022-12-15 02:09:39 +00:00
#print(iso_readme_path)
2022-06-27 07:26:42 +00:00
log_root = os.path.join(
work_root,
"logs",
self.date_stamp
)
if not os.path.exists(log_root):
os.makedirs(log_root, exist_ok=True)
2023-03-14 08:40:13 +00:00
log_path_command = f'| tee -a {log_root}/{arch}-{image}.log'
2022-06-27 07:26:42 +00:00
2022-06-27 05:45:31 +00:00
# This is kind of a hack. Installing xorrisofs sets the alternatives to
# it, so backwards compatibility is sort of guaranteed. But we want to
# emulate as close as possible to what pungi does, so unless we
# explicitly ask for xorr (in el8 and 9), we should NOT be using it.
# For RLN and el10, we'll use xorr all the way through. When 8 is no
# longer getting ISO's, we'll remove this section.
required_pkgs = self.required_pkgs.copy()
if self.iso_map['xorrisofs']:
if 'genisoimage' in required_pkgs and 'xorriso' not in required_pkgs:
required_pkgs.append('xorriso')
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
datestamp = ''
if self.updated_image:
datestamp = '-' + self.updated_image_date
2023-03-14 08:40:13 +00:00
volid = f'{self.shortname}-{self.major_version}-{self.minor_version}{rclevel}-{arch}-{volname}'
2023-11-17 01:39:31 +00:00
isoname = f'{self.shortname}-{self.release}{rclevel}{datestamp}-{arch}-{image}.iso'
2023-03-14 08:40:13 +00:00
generic_isoname = f'{self.shortname}-{arch}-{image}.iso'
latest_isoname = f'{self.shortname}-{self.major_version}-latest-{arch}-{image}.iso'
2022-06-27 07:40:28 +00:00
lorax_pkg_cmd = '/usr/bin/dnf install {} -y {}'.format(
' '.join(required_pkgs),
log_path_command
2022-06-23 22:29:22 +00:00
)
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
fullname=self.fullname,
shortname=self.shortname,
2022-06-27 05:45:31 +00:00
required_pkgs=required_pkgs,
dist=self.disttag,
repos=self.repolist,
user_agent='{{ user_agent }}',
compose_dir_is_here=True,
compose_dir=self.compose_root,
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
2022-06-23 22:29:22 +00:00
isoname=isoname,
entries_dir=entries_dir,
2022-06-27 05:05:24 +00:00
image=image,
)
2022-06-23 22:29:22 +00:00
opts = {
'arch': arch,
'iso_name': isoname,
'volid': volid,
'graft_points': grafts,
'use_xorrisofs': self.iso_map['xorrisofs'],
'iso_level': self.iso_map['iso_level'],
}
2022-06-27 08:13:12 +00:00
if opts['use_xorrisofs']:
# Generate a xorriso compatible dialog
with open(grafts) as xp:
xorpoint = xp.read()
xp.close()
2022-06-27 08:13:12 +00:00
xorriso_template_output = xorriso_template.render(
boot_iso=boot_iso,
isoname=isoname,
volid=volid,
graft=xorpoint,
arch=arch,
2022-06-27 08:13:12 +00:00
)
with open(xorriso_template_path, "w+") as xorriso_template_entry:
xorriso_template_entry.write(xorriso_template_output)
xorriso_template_entry.close()
2022-06-27 08:13:12 +00:00
opts['graft_points'] = xorriso_template_path
make_image = '{} {}'.format(
Shared.get_make_image_cmd(
opts,
self.hfs_compat
),
log_path_command
)
isohybrid = Shared.get_isohybrid_cmd(opts)
implantmd5 = Shared.get_implantisomd5_cmd(opts)
make_manifest = Shared.get_manifest_cmd(opts)
2022-06-23 22:29:22 +00:00
iso_template_output = iso_template.render(
extra_iso_mode=self.extra_iso_mode,
2022-06-23 22:29:22 +00:00
arch=arch,
compose_work_iso_dir=self.iso_work_dir,
make_image=make_image,
isohybrid=isohybrid,
implantmd5=implantmd5,
make_manifest=make_manifest,
lorax_pkg_cmd=lorax_pkg_cmd,
isoname=isoname,
generic_isoname=generic_isoname,
latest_isoname=latest_isoname,
2022-06-23 22:29:22 +00:00
)
2022-08-08 23:04:52 +00:00
iso_readme_template_output = iso_readme_template.render(
arch=arch
)
with open(mock_iso_path, "w+") as mock_iso_entry:
mock_iso_entry.write(mock_iso_template_output)
mock_iso_entry.close()
2022-06-23 22:29:22 +00:00
with open(mock_sh_path, "w+") as mock_sh_entry:
mock_sh_entry.write(mock_sh_template_output)
mock_sh_entry.close()
2022-06-23 22:29:22 +00:00
with open(iso_template_path, "w+") as iso_template_entry:
iso_template_entry.write(iso_template_output)
iso_template_entry.close()
2022-06-23 22:29:22 +00:00
with open(iso_readme_path, "w+") as iso_readme_entry:
iso_readme_entry.write(iso_readme_template_output)
iso_readme_entry.close()
2022-08-08 23:04:52 +00:00
2022-06-23 22:29:22 +00:00
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def _extra_iso_local_run(self, arch, image, work_root):
"""
Runs the actual local process using mock. This is for running in
peridot or running on a machine that does not have podman, but does
have mock available.
"""
entries_dir = os.path.join(work_root, "entries")
2023-03-14 08:40:13 +00:00
extra_iso_cmd = f'/bin/bash {entries_dir}/extraisobuild-{arch}-{image}.sh'
self.log.info('Starting mock build...')
p = subprocess.call(shlex.split(extra_iso_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
# Copy it if the compose dir is here?
def _extra_iso_podman_run(self, arches, images, work_root):
"""
Does all the image building in podman containers to parallelize the
builds. This is a case where you can call this instead of looping mock,
or not run it in peridot. This gives the Release Engineer a little more
flexibility if they care enough.
This honestly assumes you are running this on a machine that has access
to the compose directories. It's the same as if you were doing a
reposync of the repositories.
"""
2022-07-03 07:19:13 +00:00
cmd = Shared.podman_cmd(self.log)
entries_dir = os.path.join(work_root, "entries")
isos_dir = os.path.join(work_root, "isos")
2022-06-27 06:51:08 +00:00
bad_exit_list = []
checksum_list = []
datestamp = ''
if self.updated_image:
datestamp = '-' + self.updated_image_date
for i in images:
entry_name_list = []
image_name = i
arch_sync = arches.copy()
for a in arch_sync:
2023-03-14 08:40:13 +00:00
entry_name = f'buildExtraImage-{a}-{i}.sh'
entry_name_list.append(entry_name)
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
isoname = '{}/{}-{}{}{}-{}-{}.iso'.format(
a,
self.shortname,
self.revision,
rclevel,
datestamp,
a,
i
)
genericname = '{}/{}-{}-{}.iso'.format(
a,
self.shortname,
a,
i
)
2023-01-04 06:50:01 +00:00
latestname = '{}/{}-{}-latest-{}-{}.iso'.format(
a,
self.shortname,
self.major_version,
a,
i
)
checksum_list.append(isoname)
checksum_list.append(genericname)
2023-01-04 06:50:01 +00:00
checksum_list.append(latestname)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info(Color.INFO + 'Building ' + i + ' ...')
2023-03-14 08:40:13 +00:00
pod_watcher = f'{cmd} wait {join_all_pods}'
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
2023-03-14 08:40:13 +00:00
checkcmd = f'{cmd} ps -f status=exited -f name={pod}'
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' not in output.decode():
self.log.error(Color.FAIL + pod)
bad_exit_list.append(pod)
2023-03-14 08:40:13 +00:00
rmcmd = f'{cmd} rm {join_all_pods}'
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
for p in checksum_list:
path = os.path.join(isos_dir, p)
if os.path.exists(path):
self.log.info(Color.INFO + 'Performing checksum for ' + p)
checksum = Shared.get_checksum(path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?')
with open(path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
self.log.info(Color.INFO + 'Building ' + i + ' completed')
if len(bad_exit_list) == 0:
2022-07-11 22:12:41 +00:00
self.log.info(Color.INFO + 'Images built successfully.')
else:
self.log.error(
Color.FAIL +
'There were issues with the work done. As a result, ' +
2022-07-11 22:12:41 +00:00
'some/all ISOs may not exist.'
)
2022-06-13 14:37:50 +00:00
def _generate_graft_points(
self,
arch,
iso,
variants,
):
2022-06-13 14:37:50 +00:00
"""
2022-06-15 20:53:12 +00:00
Get a list of packages for an extras ISO. This should NOT be called
during the usual run() section.
2022-06-13 14:37:50 +00:00
"""
lorax_base_dir = os.path.join(self.lorax_work_dir, arch)
global_work_dir = os.path.join(self.compose_latest_dir, "work/global")
self.log.info(Color.INFO + 'Generating graft points for extra iso: (' + arch + ') ' + iso)
files = {}
# This is the data we need to actually boot
lorax_for_var = os.path.join(lorax_base_dir, iso)
if not os.path.exists(lorax_for_var + '/.treeinfo'):
self.log.info(
Color.FAIL +
'!! .treeinfo is missing, does this variant actually exist? !!'
)
return
# extra files
extra_files_for_var = os.path.join(
global_work_dir,
"extra-files"
)
# actually get the boot data
files = self._get_grafts([lorax_for_var, extra_files_for_var])
# This is to get all the packages for each repo
for repo in variants:
pkg_for_var = os.path.join(
self.compose_latest_sync,
repo,
arch,
self.structure['packages']
)
rd_for_var = os.path.join(
self.compose_latest_sync,
repo,
arch,
self.structure['repodata']
)
for k, v in self._get_grafts([pkg_for_var]).items():
files[os.path.join(repo, "Packages", k)] = v
for k, v in self._get_grafts([rd_for_var]).items():
files[os.path.join(repo, "repodata", k)] = v
2023-03-14 08:40:13 +00:00
grafts = f'{lorax_base_dir}/{iso}-{arch}-grafts'
2023-03-14 08:40:13 +00:00
xorrs = f'{lorax_base_dir}/xorriso-{iso}-{arch}.txt'
# Generate exclusion list/dict from boot.iso manifest
2023-03-14 08:40:13 +00:00
boot_manifest = f'{lorax_base_dir}/lorax/images/boot.iso.manifest'
# Boot configs and images that may change
# It's unlikely these will be changed in empanadas, they're used as is
# and it works fine. This is a carry over from a recent pungi commit,
# based on an issue I had filed. The above was the original part, the
# below is a pungi "buildinstall" thing that we don't do, but may
# include as a feature if it ever happens.
2023-03-28 04:14:48 +00:00
updatable_files = set(ArchCheck.boot_configs + ArchCheck.boot_images + ['.discinfo'])
2023-03-29 07:19:23 +00:00
ignores = set()
updatables = set()
try:
with open(boot_manifest) as i:
2023-03-29 07:19:23 +00:00
# ignores = set(line.lstrip("/").rstrip("\n") for line in i)
for line in i:
path = line.lstrip("/").rstrip("\n")
if path in updatable_files:
updatables.add(path)
else:
ignores.add(path)
except Exception as e:
self.log.error(Color.FAIL + 'File was likely not found.')
raise SystemExit(e)
self._write_grafts(
grafts,
xorrs,
files,
exclude=ignores,
update=updatables
)
if self.iso_map['xorrisofs']:
grafters = xorrs
else:
grafters = grafts
return grafters
2022-06-22 00:21:37 +00:00
def _get_grafts(self, paths, exclusive_paths=None, exclude=None):
2022-06-22 00:21:37 +00:00
"""
Actually get some grafts (get_iso_contents), called by generate grafts
"""
result = {}
exclude = exclude or []
exclusive_paths = exclusive_paths or []
for p in paths:
if isinstance(p, dict):
tree = p
else:
2023-03-29 08:04:44 +00:00
tree = Idents.scanning(p)
result = Idents.merging(result, tree)
for p in exclusive_paths:
2023-03-29 08:04:44 +00:00
tree = Idents.scanning(p)
result = Idents.merging(result, tree, exclusive=True)
# Resolves possible symlinks
for key in result.keys():
path = result[key]
if os.path.islink(path):
real_path = os.readlink(path)
abspath = os.path.normpath(os.path.join(os.path.dirname(path), real_path))
if not abspath.startswith(self.compose_base):
result[key] = abspath
return result
2022-06-22 00:21:37 +00:00
def _write_grafts(self, filepath, xorrspath, u, exclude=None, update=None):
2022-06-22 00:21:37 +00:00
"""
Write out the graft points
2022-06-22 00:21:37 +00:00
"""
seen = set()
# There are files that are on the exclude list typically.
exclude = exclude or []
# There is a chance files may get updated before being placed in a
# variant ISO - it's rare though. most that will be different is
# .discinfo
update = update or []
result = {}
for zl in sorted(u, reverse=True):
dirn = os.path.dirname(zl)
if not zl.endswith("/"):
result[zl] = u[zl]
seen.add(dirn)
continue
found = False
for j in seen:
if j.startswith(dirn):
found = True
break
if not found:
result[zl] = u[zl]
seen.add(dirn)
# We check first if a file needs to be updated first before relying on
# the boot.iso manifest to exclude a file
if self.iso_map['xorrisofs']:
with open(xorrspath, "w") as fx:
for zm in sorted(result, key=Idents.sorting):
found = False
replace = False
for upda in update:
if fnmatch(zm, upda):
#print(f'updating: {zm} {upda}')
replace = True
break
for excl in exclude:
if fnmatch(zm, excl):
#print(f'ignoring: {zm} {excl}')
found = True
break
if found:
continue
mcmd = "-update" if replace else "-map"
fx.write("%s %s %s\n" % (mcmd, u[zm], zm))
fx.close()
else:
with open(filepath, "w") as fh:
self.log.info('%sNothing should be excluded in legacy ' \
'genisoimage. Ignoring exclude list.', Color.WARN)
for zl in sorted(result, key=Idents.sorting):
#found = False
#for excl in exclude:
# if fnmatch(zl, excl):
# found = True
# break
#if found:
# continue
fh.write(f"{zl}={u[zl]}\n")
fh.close()
2022-06-22 00:21:37 +00:00
2023-04-13 00:56:17 +00:00
def run_pull_iso_images(self):
"""
Pulls ISO's made in v2
"""
arches_to_unpack = self.arches
latest_artifacts = {}
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
print("not supported")
sys.exit(1)
def run_pull_generic_images(self):
"""
Pulls generic images built in peridot and places them where they need
to be. This relies on a list called "cloudimages" in the version
configuration.
"""
unpack_single_arch = False
arches_to_unpack = self.arches
2022-11-13 20:16:38 +00:00
latest_artifacts = {}
if self.arch:
unpack_single_arch = True
arches_to_unpack = [self.arch]
for name, extra in self.cloudimages['images'].items():
self.log.info(Color.INFO + 'Determining the latest images for ' + name + ' ...')
formattype = extra['format']
2022-11-13 20:16:38 +00:00
latest_artifacts[name] = {}
primary_variant = extra['primary_variant'] if 'primary_variant' in extra else None
latest_artifacts[name]['primary'] = primary_variant
2022-11-20 20:15:06 +00:00
latest_artifacts[name]['formattype'] = formattype
variants = extra['variants'] if 'variants' in extra.keys() else [None] # need to loop once
imagename = name
2022-11-13 20:16:38 +00:00
variantname = name
for variant in variants:
if variant:
2022-11-13 20:16:38 +00:00
variantname = f"{name}-{variant}"
self.log.info(Color.INFO + 'Getting latest for variant ' + variant + ' ...')
if self.s3:
2022-11-13 20:16:38 +00:00
latest_artifacts[name][variantname] = Shared.s3_determine_latest(
self.s3_bucket,
self.release,
arches_to_unpack,
formattype,
2022-11-13 20:16:38 +00:00
variantname,
self.log
2022-11-13 20:16:38 +00:00
)
else:
2022-11-13 20:16:38 +00:00
latest_artifacts[name][variantname] = Shared.reqs_determine_latest(
self.s3_bucket_url,
self.release,
arches_to_unpack,
formattype,
2022-11-13 20:16:38 +00:00
variantname,
self.log
2022-11-13 20:16:38 +00:00
)
2022-11-13 20:16:38 +00:00
# latest_artifacts should have at least 1 result if has_variants, else == 1
if not len(latest_artifacts[name][variantname]) > 0:
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'No images found for ' + variantname +
2022-11-13 20:16:38 +00:00
'. This means it will be skipped.')
del imagename
del variantname
del variants
#print(latest_artifacts)
for keyname in latest_artifacts.keys():
primary = latest_artifacts[keyname]['primary']
2022-11-20 20:15:06 +00:00
filetype = latest_artifacts[keyname]['formattype']
2022-11-13 20:16:38 +00:00
for imgname in latest_artifacts[keyname]:
keysect = latest_artifacts[keyname][imgname]
if imgname == 'primary':
continue
2022-11-13 20:16:38 +00:00
if not keysect:
continue
if type(keysect) == str:
continue
2022-11-13 20:16:38 +00:00
self.log.info(Color.INFO + 'Attempting to download requested ' +
'artifacts (' + keyname + ')')
for arch in arches_to_unpack:
image_arch_dir = os.path.join(
self.image_work_dir,
arch
)
if arch not in keysect:
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'This architecture (' + arch + ') does not exist for this image orvar is a string.')
continue
2022-11-13 20:16:38 +00:00
source_path = keysect[arch]
drop_name = source_path.split('/')[-1]
2022-11-13 20:16:38 +00:00
# Docker containers get a "layer" name, this hack gets
# around it. I didn't feel like adding another config opt.
if 'layer' in drop_name:
fsuffix = drop_name.replace('layer', '')
drop_name = source_path.split('/')[-3] + fsuffix
checksum_name = drop_name + '.CHECKSUM'
2023-03-14 08:40:13 +00:00
full_drop = f'{image_arch_dir}/{drop_name}'
2023-03-14 08:40:13 +00:00
checksum_drop = f'{image_arch_dir}/{drop_name}.CHECKSUM'
if not os.path.exists(image_arch_dir):
os.makedirs(image_arch_dir, exist_ok=True)
self.log.info('Downloading artifact for ' + Color.BOLD + arch + Color.END)
if self.s3:
Shared.s3_download_artifacts(
self.force_download,
self.s3_bucket,
source_path,
full_drop,
self.log
)
else:
Shared.reqs_download_artifacts(
self.force_download,
self.s3_bucket_url,
source_path,
full_drop,
self.log
)
self.log.info('Creating checksum ...')
checksum = Shared.get_checksum(full_drop, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + full_drop + ' not found! Are you sure we copied it?')
continue
with open(checksum_drop, 'w+') as c:
c.write(checksum)
c.close()
self.log.info('Creating a symlink to latest image...')
latest_name = '{}/{}-{}-{}.latest.{}.{}'.format(
image_arch_dir,
self.shortname,
self.major_version,
2022-11-13 20:16:38 +00:00
imgname,
arch,
2022-11-20 20:15:06 +00:00
filetype
)
latest_path = latest_name.split('/')[-1]
latest_checksum = '{}/{}-{}-{}.latest.{}.{}.CHECKSUM'.format(
image_arch_dir,
self.shortname,
self.major_version,
2022-11-13 20:16:38 +00:00
imgname,
arch,
2022-11-20 20:15:06 +00:00
filetype
)
# For some reason python doesn't have a "yeah just change this
# link" part of the function
if os.path.exists(latest_name):
os.remove(latest_name)
os.symlink(drop_name, latest_name)
self.log.info('Creating checksum for latest symlinked image...')
shutil.copy2(checksum_drop, latest_checksum)
with open(latest_checksum, 'r') as link:
checkdata = link.read()
checkdata = checkdata.replace(drop_name, latest_path)
with open(latest_checksum, 'w+') as link:
link.write(checkdata)
link.close()
# If this is the primary image, set the appropriate symlink
# and checksum
2022-11-13 20:16:38 +00:00
if primary and primary in drop_name:
# If an image is the primary, we set this.
latest_primary_name = '{}/{}-{}-{}.latest.{}.{}'.format(
image_arch_dir,
self.shortname,
self.major_version,
keyname,
arch,
2022-11-20 20:15:06 +00:00
filetype
2022-11-13 20:16:38 +00:00
)
latest_primary_checksum = '{}/{}-{}-{}.latest.{}.{}.CHECKSUM'.format(
image_arch_dir,
self.shortname,
self.major_version,
keyname,
arch,
2022-11-20 20:15:06 +00:00
filetype
2022-11-13 20:16:38 +00:00
)
2022-11-18 23:16:01 +00:00
latest_primary_path = latest_primary_name.split('/')[-1]
2022-11-13 20:16:38 +00:00
self.log.info('This is the primary image, setting link and checksum')
if os.path.exists(latest_primary_name):
os.remove(latest_primary_name)
os.symlink(drop_name, latest_primary_name)
shutil.copy2(checksum_drop, latest_primary_checksum)
with open(latest_primary_checksum) as link:
checkpdata = link.read()
checkpdata = checkpdata.replace(drop_name, latest_primary_path)
with open(latest_primary_checksum, 'w+') as link:
link.write(checkpdata)
link.close()
self.log.info(Color.INFO + 'Image download phase completed')
class LiveBuild:
"""
This helps us build the live images for Rocky Linux. The mode is "simple"
by default when using mock.
"""
def __init__(
self,
rlvars,
config,
major,
hfs_compat: bool = False,
force_download: bool = False,
isolation: str = 'simple',
live_iso_mode: str = 'local',
compose_dir_is_here: bool = False,
2022-07-11 08:06:26 +00:00
hashed: bool = False,
image=None,
2022-07-11 22:12:41 +00:00
justcopyit: bool = False,
2022-07-11 23:01:19 +00:00
force_build: bool = False,
updated_image: bool = False,
image_increment: str = '0',
peridot: bool = False,
2024-03-28 00:12:15 +00:00
builder: str = 'default',
logger=None
):
self.image = image
2022-07-11 22:12:41 +00:00
self.justcopyit = justcopyit
self.fullname = rlvars['fullname']
self.distname = config['distname']
self.shortname = config['shortname']
self.current_arch = config['arch']
# Relevant config items
self.major_version = major
self.compose_dir_is_here = compose_dir_is_here
self.date_stamp = config['date_stamp']
self.compose_root = config['compose_root']
self.compose_base = config['compose_root'] + "/" + major
self.current_arch = config['arch']
self.livemap = rlvars['livemap']
self.required_pkgs = rlvars['livemap']['required_pkgs']
self.mock_work_root = config['mock_work_root']
self.live_result_root = config['mock_work_root'] + "/lmc"
self.mock_isolation = isolation
self.force_download = force_download
2022-07-11 23:01:19 +00:00
self.force_build = force_build
self.live_iso_mode = live_iso_mode
self.checksum = rlvars['checksum']
self.profile = rlvars['profile']
2022-07-11 08:06:26 +00:00
self.hashed = hashed
self.peridot = peridot
2024-03-28 00:12:15 +00:00
# determine builder to use. if a config doesn't have it set, assume
# lorax, the default option.
if rlvars['livemap']['builder']:
self.livebuilder = rlvars['livemap']['builder']
else:
self.livebuilder = "lorax"
if builder == "default":
self.livebuilder = builder
# Relevant major version items
self.arch = config['arch']
self.arches = rlvars['allowed_arches']
self.release = rlvars['revision']
self.minor_version = rlvars['minor']
self.revision = rlvars['revision'] + "-" + rlvars['rclvl']
self.rclvl = rlvars['rclvl']
2022-10-20 08:17:25 +00:00
self.disttag = rlvars['disttag']
self.repos = rlvars['iso_map']['lorax']['repos']
self.repo_base_url = config['repo_base_url']
self.project_id = rlvars['project_id']
self.structure = rlvars['structure']
self.bugurl = rlvars['bugurl']
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
self.updated_image = updated_image
self.updated_image_increment = "." + image_increment
self.date = (time.strftime("%Y%m%d", time.localtime())
+ self.updated_image_increment)
# Templates
file_loader = FileSystemLoader(f"{_rootdir}/templates")
self.tmplenv = Environment(loader=file_loader)
self.compose_latest_dir = os.path.join(
config['compose_root'],
major,
2023-03-14 08:40:13 +00:00
f"latest-{self.shortname}-{self.profile}"
)
self.compose_latest_sync = os.path.join(
self.compose_latest_dir,
"compose"
)
self.compose_log_dir = os.path.join(
self.compose_latest_dir,
"work/logs"
)
self.live_work_dir = os.path.join(
self.compose_latest_dir,
"work/live"
)
# This is temporary for now.
if logger is None:
self.log = logging.getLogger("iso")
self.log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s :: %(name)s :: %(message)s',
'%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.info('live build init')
2022-07-11 08:06:26 +00:00
self.repolist = Shared.build_repo_list(
self.repo_base_url,
self.repos,
self.project_id,
self.current_arch,
self.compose_latest_sync,
self.compose_dir_is_here,
self.hashed
)
self.log.info(self.revision)
2022-07-11 22:12:41 +00:00
if not os.path.exists(self.compose_latest_dir):
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'A compose directory was not found ' +
2022-07-11 22:12:41 +00:00
'here. If there is a failure, it may be due to it ' +
'missing. You may want to generate a fake compose if ' +
'you are simply making your own live images and you run ' +
'into any errors beyond this point.'
)
def run_build_live_iso(self):
"""
Builds DVD images based on the data created from the initial lorax on
each arch. This should NOT be called during the usual run() section.
"""
sync_root = self.compose_latest_sync
self.log.info(Color.INFO + 'Starting Live ISOs phase')
# Check that the arch we're assigned is valid...
if self.current_arch not in self.livemap['allowed_arches']:
self.log.error(Color.FAIL + 'Running an unsupported architecture.')
raise SystemExit()
2022-07-06 14:44:37 +00:00
2024-03-28 00:12:15 +00:00
# Check that the builder is lorax, we don't support anything else yet
if self.livebuilder != "lorax":
self.log.error(Color.FAIL + 'Attempting to use an unsupported builder.')
raise SystemExit()
self._live_iso_build_wrap()
self.log.info('Compose repo directory: %s' % sync_root)
self.log.info('Live ISO result directory: %s/$arch' % self.live_work_dir)
self.log.info(Color.INFO + 'Live ISO phase completed.')
def _live_iso_build_wrap(self):
"""
Prepare and actually build the live images. Based on arguments in self,
we'll either do it on mock in a loop or in podman, just like with the
extra iso phase.
"""
work_root = os.path.join(
self.compose_latest_dir,
'work'
)
images_to_build = list(self.livemap['ksentry'].keys())
if self.image:
images_to_build = [self.image]
self.log.info(
Color.INFO + 'We are planning to build: ' +
', '.join(images_to_build)
)
2022-07-11 08:06:26 +00:00
for i in images_to_build:
self._live_iso_local_config(i, work_root)
if self.live_iso_mode == 'local':
self._live_iso_local_run(self.current_arch, i, work_root)
elif self.live_iso_mode == 'podman':
continue
else:
self.log.error(Color.FAIL + 'Mode specified is not valid.')
raise SystemExit()
if self.live_iso_mode == 'podman':
2022-07-11 22:12:41 +00:00
#self._live_iso_podman_run(self.current_arch, images_to_build, work_root)
self.log.error(Color.FAIL + 'At this time, live images cannot be ' +
'built in podman.')
raise SystemExit()
2022-07-11 08:06:26 +00:00
def _live_iso_local_config(self, image, work_root):
"""
Live ISO build configuration - This generates both mock and podman
entries, regardless of which one is being used.
"""
self.log.info('Generating Live ISO configuration and script')
2022-07-11 08:06:26 +00:00
entries_dir = os.path.join(work_root, "entries")
mock_iso_template = self.tmplenv.get_template('isomock.tmpl.cfg')
mock_sh_template = self.tmplenv.get_template('liveisobuild.tmpl.sh')
iso_template = self.tmplenv.get_template('buildLiveImage.tmpl.sh')
kloc = 'stage'
if self.peridot:
kloc = 'peridot'
2022-07-11 08:06:26 +00:00
2023-11-17 01:39:31 +00:00
mock_iso_path = f'/var/tmp/live-{self.release}.cfg'
2023-03-14 08:40:13 +00:00
mock_sh_path = f'{entries_dir}/liveisobuild-{self.current_arch}-{image}.sh'
iso_template_path = f'{entries_dir}/buildLiveImage-{self.current_arch}-{image}.sh'
2022-07-11 08:06:26 +00:00
log_root = os.path.join(
work_root,
"logs",
self.date_stamp
)
ks_start = self.livemap['ksentry'][image]
if not os.path.exists(log_root):
os.makedirs(log_root, exist_ok=True)
2023-03-14 08:40:13 +00:00
log_path_command = f'| tee -a {log_root}/{self.current_arch}-{image}.log'
2022-07-11 08:06:26 +00:00
required_pkgs = self.livemap['required_pkgs']
2023-11-17 01:39:31 +00:00
volid = f'{self.shortname}-{self.release}-{image}'
2022-07-11 08:06:26 +00:00
2023-03-14 08:40:13 +00:00
isoname = f'{self.shortname}-{self.release}-{image}-{self.current_arch}-{self.date}.iso'
2022-07-11 08:06:26 +00:00
live_pkg_cmd = '/usr/bin/dnf install {} -y {}'.format(
' '.join(required_pkgs),
log_path_command
)
git_clone_cmd = '/usr/bin/git clone {} -b {} /builddir/ks {}'.format(
self.livemap['git_repo'],
self.livemap['branch'],
log_path_command
)
make_image_cmd = ('/usr/sbin/livemedia-creator --ks {} --no-virt '
2022-07-12 02:26:13 +00:00
'--resultdir /builddir/lmc --project="{} {}" --make-iso --volid {} '
2022-07-11 08:06:26 +00:00
'--iso-only --iso-name {} --releasever={} --nomacboot {}').format(
'/builddir/ks.cfg',
self.distname,
2022-07-12 02:26:13 +00:00
image,
2022-07-11 08:06:26 +00:00
volid,
isoname,
self.release,
log_path_command
)
mock_iso_template_output = mock_iso_template.render(
arch=self.current_arch,
major=self.major_version,
2023-11-17 01:39:31 +00:00
releasever=self.release,
2022-07-11 08:06:26 +00:00
fullname=self.fullname,
shortname=self.shortname,
required_pkgs=required_pkgs,
dist=self.disttag,
repos=self.repolist,
compose_dir_is_here=True,
user_agent='{{ user_agent }}',
compose_dir=self.compose_root,
)
mock_sh_template_output = mock_sh_template.render(
arch=self.current_arch,
major=self.major_version,
2023-11-17 01:39:31 +00:00
releasever=self.release,
2022-07-11 08:06:26 +00:00
isolation=self.mock_isolation,
builddir=self.mock_work_root,
shortname=self.shortname,
isoname=isoname,
entries_dir=entries_dir,
image=image,
)
iso_template_output = iso_template.render(
live_iso_mode=self.live_iso_mode,
arch=self.current_arch,
compose_live_work_dir=self.live_work_dir,
make_image=make_image_cmd,
live_pkg_cmd=live_pkg_cmd,
isoname=isoname,
major=self.major_version,
git_clone=git_clone_cmd,
ks_file=ks_start,
kloc=kloc,
2022-07-11 08:06:26 +00:00
)
with open(mock_iso_path, "w+") as mip:
mip.write(mock_iso_template_output)
mip.close()
with open(mock_sh_path, "w+") as msp:
msp.write(mock_sh_template_output)
msp.close()
with open(iso_template_path, "w+") as itp:
itp.write(iso_template_output)
itp.close()
os.chmod(mock_sh_path, 0o755)
os.chmod(iso_template_path, 0o755)
def _live_iso_podman_run(self, arch, images, work_root):
"""
Does all the image building in podman containers to parallelize the
process. This is a case where you can call this instead of looping mock
or not run in peridot. This gives the Release Engineer a little more
flexibility if they care enough.
This honestly assumes you are running this on a machine that has access
to the compose directories. It's the same as if you were doing a
reposync of the repositories.
"""
cmd = Shared.podman_cmd(self.log)
entries_dir = os.path.join(work_root, "entries")
isos_dir = self.live_work_dir
bad_exit_list = []
checksum_list = []
2022-07-11 08:06:26 +00:00
entry_name_list = []
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'There is no support for podman in empanadas.')
self.log.warning(Color.WARN + "If this fails, it's on you to determine the fix.")
for i in images:
image_name = i
2023-03-14 08:40:13 +00:00
entry_name = f'buildLiveImage-{arch}-{i}.sh'
2022-07-11 08:06:26 +00:00
entry_name_list.append(entry_name)
2023-03-14 08:40:13 +00:00
isoname = f'{arch}/{self.shortname}-{i}-{self.major_version}-{arch}-{self.date}.iso'
2022-07-11 08:06:26 +00:00
checksum_list.append(isoname)
print(entry_name_list, cmd, entries_dir)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info(Color.INFO + 'Building requested live images ...')
2023-03-14 08:40:13 +00:00
pod_watcher = f'{cmd} wait {join_all_pods}'
2022-07-11 08:06:26 +00:00
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
2023-03-14 08:40:13 +00:00
checkcmd = f'{cmd} ps -f status=exited -f name={pod}'
2022-07-11 08:06:26 +00:00
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' not in output.decode():
self.log.error(Color.FAIL + pod)
bad_exit_list.append(pod)
2023-03-14 08:40:13 +00:00
rmcmd = f'{cmd} rm {join_all_pods}'
2022-07-11 08:06:26 +00:00
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
for p in checksum_list:
path = os.path.join(isos_dir, p)
if os.path.exists(path):
self.log.info(Color.INFO + 'Performing checksum for ' + p)
checksum = Shared.get_checksum(path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + path + ' not found! Are you sure it was built?')
with open(path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
self.log.info(Color.INFO + 'Building live images completed')
if len(bad_exit_list) == 0:
2022-07-11 22:12:41 +00:00
self.log.info(Color.INFO + 'Live images completed successfully.')
2022-07-11 08:06:26 +00:00
else:
self.log.error(
Color.FAIL +
'There were issues with the work done. As a result, ' +
2022-07-11 22:12:41 +00:00
'some or all ISOs may not be copied later.'
2022-07-11 08:06:26 +00:00
)
def _live_iso_local_run(self, arch, image, work_root):
"""
Runs the actual local process using mock. This is for running in
peridot or running on a machine that does not have podman, but does
have mock available.
"""
entries_dir = os.path.join(work_root, "entries")
2022-07-11 22:12:41 +00:00
live_dir_arch = os.path.join(self.live_work_dir, arch)
2023-03-14 08:40:13 +00:00
isoname = f'{self.shortname}-{self.release}-{image}-{arch}-{self.date}.iso'
isolink = f'{self.shortname}-{self.major_version}-{image}-{arch}-latest.iso'
2023-11-17 01:39:31 +00:00
live_res_dir = f'/var/lib/mock/{self.shortname.lower()}-{self.release}-{arch}/result'
2022-07-11 23:01:19 +00:00
if self.justcopyit:
2022-07-11 23:53:20 +00:00
if os.path.exists(os.path.join(live_dir_arch, isoname)):
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'Image already exists.')
2022-07-11 23:01:19 +00:00
if self.force_build:
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'Building anyway.')
2022-07-11 23:01:19 +00:00
else:
2023-11-08 21:16:49 +00:00
self.log.warning(Color.WARN + 'Skipping.')
2022-07-11 23:01:19 +00:00
return
2023-03-14 08:40:13 +00:00
live_iso_cmd = f'/bin/bash {entries_dir}/liveisobuild-{arch}-{image}.sh'
self.log.info('Starting mock build...')
p = subprocess.call(shlex.split(live_iso_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
2023-11-08 21:16:49 +00:00
self.log.warning(
2022-07-11 08:06:26 +00:00
Color.WARN + 'This is meant for builds done in peridot or ' +
'locally for an end user.'
)
2023-11-08 21:16:49 +00:00
self.log.warning(
Color.WARN +
2022-07-11 22:12:41 +00:00
'If you are looping images, your built image may get ' +
'overwritten. Ensure you have justcopyit enabled to avoid this.'
)
if self.justcopyit:
self.log.info(Color.INFO + 'Copying image to work directory')
source_path = os.path.join(live_res_dir, isoname)
dest_path = os.path.join(live_dir_arch, isoname)
link_path = os.path.join(live_dir_arch, isolink)
2022-07-11 22:12:41 +00:00
os.makedirs(live_dir_arch, exist_ok=True)
try:
shutil.copy2(source_path, dest_path)
if os.path.exists(link_path):
os.remove(link_path)
os.symlink(isoname, link_path)
except:
self.log.error(Color.FAIL + 'We could not copy the image or create a symlink.')
return
2022-07-11 22:12:41 +00:00
self.log.info(Color.INFO + 'Generating checksum')
checksum = Shared.get_checksum(dest_path, self.checksum, self.log)
if not checksum:
self.log.error(Color.FAIL + dest_path + ' not found. Did we copy it?')
return
with open(dest_path + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
2022-08-06 01:14:24 +00:00
linksum = Shared.get_checksum(link_path, self.checksum, self.log)
if not linksum:
self.log.error(Color.FAIL + link_path + ' not found. Did we copy it?')
return
with open(link_path + '.CHECKSUM', "w+") as c:
c.write(linksum)
c.close()