attempt to build ISO in both mock and podman

This commit is contained in:
Louis Abel 2022-06-26 21:29:55 -07:00
parent 43470e336b
commit 37c0be3fd0
Signed by untrusted user: label
GPG Key ID: B37E62D143879B36
8 changed files with 262 additions and 23 deletions

View File

@ -6,6 +6,7 @@ import glob
import rpm
import yaml
import logging
import hashlib
# These are a bunch of colors we may use in terminal output
class Color:
@ -20,6 +21,47 @@ class Color:
BOLD = '\033[1m'
END = '\033[0m'
class Utils:
"""
Quick utilities that may be commonly used
"""
@staticmethod
def get_checksum(path, hashtype, logger):
"""
Generates a checksum from the provided path by doing things in chunks.
This way we don't do it in memory.
"""
try:
checksum = hashlib.new(hashtype)
except ValueError:
logger.error("Invalid hash type: %s" % hashtype)
return False
try:
input_file = open(path, "rb")
except IOError as e:
logger.error("Could not open file %s: %s" % (path, e))
return False
while True:
chunk = input_file.read(8192)
if not chunk:
break
checksum.update(chunk)
input_file.close()
stat = os.stat(path)
base = os.path.basename(path)
# This emulates our current syncing scripts that runs stat and
# sha256sum and what not with a very specific output.
return "%s: %s bytes\n%s (%s) = %s" % (
base,
stat.st_size,
hashtype.upper(),
base,
checksum.hexdigest()
)
# vars and additional checks
rldict = {}
sigdict = {}

View File

@ -70,6 +70,7 @@
- 'isomd5sum'
- 'lorax-templates-rhel'
- 'lorax-templates-generic'
- 'xorriso'
repoclosure_map:
arches:
x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch'

View File

@ -70,6 +70,7 @@
- 'isomd5sum'
- 'lorax-templates-rhel'
- 'lorax-templates-generic'
- 'xorriso'
repoclosure_map:
arches:
x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch'

View File

@ -70,6 +70,7 @@
- 'isomd5sum'
- 'lorax-templates-rhel'
- 'lorax-templates-generic'
- 'xorriso'
repoclosure_map:
arches:
x86_64: '--forcearch=x86_64 --arch=x86_64 --arch=athlon --arch=i686 --arch=i586 --arch=i486 --arch=i386 --arch=noarch'

View File

@ -65,7 +65,6 @@
- 'libreport-rhel-anaconda-bugzilla'
required_pkgs:
- 'lorax'
- 'genisoimage'
- 'isomd5sum'
- 'lorax-templates-rhel'
- 'lorax-templates-generic'

View File

@ -39,6 +39,18 @@ fi
# If we didn't fail, let's pack up everything!
cd "${MOCKBLD}"
# Get ISO manifest
if [ -f "/usr/bin/xorriso" ]; then
/usr/bin/xorriso -dev lorax/images/boot.iso --find |
tail -n+2 |
tr -d "'" |
cut -c2- sort >> lorax/images/boot.iso.manifest
elif [ -f "/usr/bin/isoinfo" ]; then
/usr/bin/isoinfo -R -f -i lorax/images/boot.iso |
grep -v '/TRANS.TBL$' | sort >> lorax/images/boot.iso.manifest
fi
tar czf "${LORAX_TAR}" lorax "${LOGFILE}"
tar_ret_val=$?

View File

@ -40,7 +40,7 @@ mock_ret_val=$?
if [ $mock_ret_val -eq 0 ]; then
# Copy resulting data to /var/lib/mock/{{ shortname|lower }}-{{ major }}-{{ arch }}/result
mkdir -p "${MOCK_RESL}"
cp "${MOCK_CHRO}${BUILDDIR}/${IMAGE_ISO}" "${MOCK_RESL}"
cp "${MOCK_CHRO}${BUILDDIR}/${IMAGE_ISO}*" "${MOCK_RESL}"
else
echo "!! EXTRA ISO RUN FAILED !!"
exit 1

View File

@ -35,7 +35,7 @@ import productmd.treeinfo
from jinja2 import Environment, FileSystemLoader
from empanadas.common import Color, _rootdir
from empanadas.common import Color, _rootdir, Utils
class IsoBuild:
"""
@ -105,6 +105,10 @@ class IsoBuild:
self.extra_files = rlvars['extra_files']
self.container = config['container']
if 'container' in rlvars and len(rlvars['container']) > 0:
self.container = rlvars['container']
self.staging_dir = os.path.join(
config['staging_root'],
config['category_stub'],
@ -416,6 +420,8 @@ class IsoBuild:
for variant in self.iso_map['images']:
self._copy_lorax_to_variant(self.force_unpack, arch, variant)
self._copy_boot_to_work(self.force_unpack, arch)
self.log.info(
'[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' +
'Image variant phase completed'
@ -595,25 +601,6 @@ class IsoBuild:
)
return
self.log.info('Copying %s boot iso to work directory...' % arch)
os.makedirs(iso_to_go, exist_ok=True)
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
isobootpath = '{}/{}-{}.{}{}-{}-{}.iso'.format(
iso_to_go,
self.shortname,
self.major_version,
self.minor_version,
rclevel,
arch,
image
)
shutil.copy2(src_to_image + '/images/boot.iso', isobootpath)
self.log.info('Copying base lorax to %s directory...' % image)
try:
shutil.copytree(src_to_image, path_to_image, copy_function=shutil.copy2, dirs_exist_ok=True)
@ -630,6 +617,73 @@ class IsoBuild:
'Cannot remove boot.iso'
)
def _copy_boot_to_work(self, force_unpack, arch):
src_to_image = os.path.join(
self.lorax_work_dir,
arch,
'lorax'
)
iso_to_go = os.path.join(
self.iso_work_dir,
arch
)
path_to_src_image = '{}/{}'.format(
src_to_image,
'/images/boot.iso'
)
rclevel = ''
if self.release_candidate:
rclevel = '-' + self.rclvl
discname = '{}-{}.{}{}-{}-{}.iso'.format(
self.shortname,
self.major_version,
self.minor_version,
rclevel,
arch,
'boot'
)
isobootpath = '{}/{}'.format(
iso_to_go,
discname
)
manifest = '{}.{}'.format(
isobootpath,
'manifest'
)
if not force_unpack:
file_check = isobootpath
if os.path.exists(file_check):
self.log.warn(
'[' + Color.BOLD + Color.YELLOW + 'WARN' + Color.END + '] ' +
'Boot image (' + discname + ') already exists'
)
return
self.log.info('Copying %s boot iso to work directory...' % arch)
os.makedirs(iso_to_go, exist_ok=True)
shutil.copy2(path_to_src_image, isobootpath)
if os.path.exists(path_to_src_image + '.manifest'):
shutil.copy2(path_to_src_image + '.manifest', manifest)
self.log.info('Creating checksum for %s boot iso...' % arch)
checksum = Utils.get_checksum(isobootpath, self.checksum, self.log)
if not checksum:
self.log.error(
'[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' +
isobootpath + ' not found! Are you sure we copied it?'
)
return
with open(isobootpath + '.CHECKSUM', "w+") as c:
c.write(checksum)
c.close()
def run_boot_sync(self):
"""
This unpacks into BaseOS/$arch/os, assuming there's no data actually
@ -866,9 +920,13 @@ class IsoBuild:
)
raise SystemExit()
if self.extra_iso_mode == 'podman':
self._extra_iso_podman_run(arches_to_build, images_to_build, work_root)
def _extra_iso_local_config(self, arch, image, grafts, work_root):
"""
Local ISO build mode - this should build in mock
Local ISO build configuration - This generates the configuration for
both mock and podman entries
"""
self.log.info('Generating Extra ISO configuration and script')
@ -997,6 +1055,114 @@ class IsoBuild:
"""
Runs the actual local process using mock
"""
entries_dir = os.path.join(work_root, "entries")
extra_iso_cmd = '/bin/bash {}/extraisobuild-{}-{}.sh'.format(entries_dir, arch, image)
self.log.info('Starting mock build...')
p = subprocess.call(shlex.split(extra_iso_cmd))
if p != 0:
self.log.error('An error occured during execution.')
self.log.error('See the logs for more information.')
raise SystemExit()
# Copy it if the compose dir is here?
def _extra_iso_podman_run(self, arches, images, work_root):
"""
Does all the image building in podman containers to parallelize the
builds. This is a case where you can call this instead of looping mock,
or not run it in peridot. This gives the Release Engineer a little more
flexibility if they care enough.
This honestly assumes you are running this on a machine that has access
to the compose directories. It's the same as if you were doing a
reposync of the repositories.
"""
cmd = self.podman_cmd()
entries_dir = os.path.join(work_root, "entries")
for i in images:
entry_name_list = []
image_name = i
arch_sync = arches.copy()
for a in arch_sync:
entry_name = 'buildExtraImage-{}-{}.sh'.format(a, i)
entry_name_list.append(entry_name)
for pod in entry_name_list:
podman_cmd_entry = '{} run -d -it -v "{}:{}" -v "{}:{}" --name {} --entrypoint {}/{} {}'.format(
cmd,
self.compose_root,
self.compose_root,
entries_dir,
entries_dir,
pod,
entries_dir,
pod,
self.container
)
process = subprocess.call(
shlex.split(podman_cmd_entry),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
join_all_pods = ' '.join(entry_name_list)
time.sleep(3)
self.log.info(
'[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' +
'Building ' + i + ' ...'
)
pod_watcher = '{} wait {}'.format(
cmd,
join_all_pods
)
watch_man = subprocess.call(
shlex.split(pod_watcher),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# After the above is done, we'll check each pod process for an exit
# code.
pattern = "Exited (0)"
for pod in entry_name_list:
checkcmd = '{} ps -f status=exited -f name={}'.format(
cmd,
pod
)
podcheck = subprocess.Popen(
checkcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output, errors = podcheck.communicate()
if 'Exited (0)' not in output.decode():
self.log.error(
'[' + Color.BOLD + Color.RED + 'FAIL' + Color.END + '] ' + pod
)
bad_exit_list.append(pod)
rmcmd = '{} rm {}'.format(
cmd,
join_all_pods
)
rmpod = subprocess.Popen(
rmcmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True
)
entry_name_list.clear()
self.log.info(
'[' + Color.BOLD + Color.GREEN + 'INFO' + Color.END + '] ' +
'Building ' + i + ' completed'
)
def _generate_graft_points(
self,
@ -1471,6 +1637,23 @@ class IsoBuild:
returned_cmd = ' '.join(cmd)
return returned_cmd
def podman_cmd(self) -> str:
"""
This generates the podman run command. This is in the case that we want
to do reposyncs in parallel as we cannot reasonably run multiple
instances of dnf reposync on a single system.
"""
cmd = None
if os.path.exists("/usr/bin/podman"):
cmd = "/usr/bin/podman"
else:
self.log.error('/usr/bin/podman was not found. Good bye.')
raise SystemExit("\n\n/usr/bin/podman was not found.\n\nPlease "
" ensure that you have installed the necessary packages on "
" this system. " + Color.BOLD + "Note that docker is not "
"supported." + Color.END
)
return cmd
class LiveBuild:
"""