handling scheduling of jobs for multiple images

This handles scheduling of jobs for more than one type of
image; currently we'll run tests for Workstation live as well.
It requires some cleverness to run some tests for *all* images
(currently just default_boot_and_install) but run all the tests
that can be run with any non-live installer image with the best
image available for the compose. We introduce a special (openQA,
not fedfind) 'flavor' called 'universal'; we run a couple of
checks to find the best image in the compose for running the
universal tests, and schedule tests for the 'universal' flavor
with that image. The 'best' image is a server or 'generic' DVD
if possible, and if not, a server or 'generic' boot.iso.

ISO files have the compose's version identifier prepended to
their names. Otherwise they retain their original names, which
should usually be unique within a given compose, except for
boot.iso files, which have their payload and arch added into
their names to ensure they don't overwrite each other.

This also adds a mechanism for TESTCASES (in conf_test_suites)
to define a callback which will be called with the flavor of
the image being tested; the result of the callback will be used
as the 'test name' for relval result reporting purposes. This
allows us to report results against the correct 'test instance'
for the image being tested, for tests like Boot_default_install
which have 'test instances' for each image. We can extend this
general approach in future for other cases where we have
multiple 'test instances' for a single test case.
This commit is contained in:
Adam Williamson 2015-03-18 14:51:01 -07:00
parent 9a2e608395
commit cbe7769748
3 changed files with 88 additions and 41 deletions

View File

@ -1,10 +1,15 @@
def default_install_cb(flavor):
"""Figure out the correct test case name for a default_boot_and_
install pass for a given flavor.
"""
(payload, imagetype) = flavor.split('_')
imagetype = imagetype.replace('boot', 'netinst')
imagetype = imagetype.replace('dvd', 'offline')
return "{0} {1}".format(payload, imagetype)
TESTCASES = {
"Server offline": {
"section": 'Default boot and install',
"env": "$RUNARCH$",
"type": "Installation",
},
"Server netinst": {
"QA:Testcase_Boot_default_install": {
"name_cb": default_install_cb,
"section": 'Default boot and install',
"env": "$RUNARCH$",
"type": "Installation",
@ -120,6 +125,7 @@ TESTCASES = {
"type": "Installation",
},
# "": {
# "name_cb": callbackfunc # optional, called with 'flavor'
# "section": "",
# "env": "x86",
# "type": "Installation",
@ -128,16 +134,21 @@ TESTCASES = {
TESTSUITES = {
"server_simple":[
"Server netinst",
"default_install":[
"QA:Testcase_Boot_default_install",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_Anaconda_User_Interface_Graphical",
"QA:Testcase_Anaconda_user_creation",
],
"package_set_minimal":[
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_Anaconda_User_Interface_Graphical",
"QA:Testcase_Anaconda_user_creation",
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_delete_pata":[
"Server netinst",
"QA:Testcase_install_to_PATA",
"QA:Testcase_partitioning_guided_delete_all",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -145,7 +156,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_sata_multi":[
"Server netinst",
"QA:Testcase_install_to_SATA",
"QA:Testcase_partitioning_guided_multi_select",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -153,7 +163,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_scsi_updates_img":[
"Server netinst",
"QA:Testcase_install_to_SCSI",
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_Anaconda_updates.img_via_URL",
@ -168,7 +177,6 @@ TESTSUITES = {
"QA:Testcase_Kickstart_Http_Server_Ks_Cfg",
],
"server_mirrorlist_graphical":[
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -177,7 +185,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_repository_http_graphical":[
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -186,7 +193,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_repository_http_variation":[
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -195,7 +201,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_mirrorlist_http_variation":[
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -204,7 +209,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_simple_encrypted": [
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_empty",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -213,7 +217,6 @@ TESTSUITES = {
"QA:Testcase_partitioning_guided_encrypted",
],
"server_delete_partial": [
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_delete_partial",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -221,7 +224,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_simple_free_space": [
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_free_space",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -229,7 +231,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_multi_empty": [
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_partitioning_guided_multi_empty_all",
"QA:Testcase_Anaconda_User_Interface_Graphical",
@ -237,7 +238,6 @@ TESTSUITES = {
"QA:Testcase_Package_Sets_Minimal_Package_Install",
],
"server_software_raid": [
"Server netinst",
"QA:Testcase_install_to_VirtIO",
"QA:Testcase_Partitioning_On_Software_RAID",
"QA:Testcase_Anaconda_User_Interface_Graphical",

View File

@ -20,7 +20,7 @@ from report_job_results import report_results
PERSISTENT = "/var/tmp/openqa_watcher.json"
ISO_PATH = "/var/lib/openqa/factory/iso/"
RUN_COMMAND = "/var/lib/openqa/script/client isos post ISO=%s DISTRI=fedora VERSION=rawhide FLAVOR=server ARCH=%s BUILD=%s"
RUN_COMMAND = "/var/lib/openqa/script/client isos post ISO=%s DISTRI=fedora VERSION=rawhide FLAVOR=%s ARCH=%s BUILD=%s"
VERSIONS = ['i386', 'x86_64']
# read last tested version from file
@ -39,20 +39,24 @@ def read_last():
return result, json_parsed
def download_image(image):
"""Download a given image with a name that should be unique for
this event and arch (until we start testing different images
for the same event and arch). Returns the filename of the image
(not the path).
"""Download a given image with a name that should be unique.
Returns the filename of the image (not the path).
"""
isoname = "{0}_{1}.iso".format(image.version.replace(' ', '_'), image.arch)
ver = image.version.replace(' ', '_')
if image.imagetype == 'boot':
isoname = "{0}_{1}_{2}_boot.iso".format(ver, image.payload, image.arch)
else:
isoname = "{0}_{1}".format(ver, image.filename)
filename = os.path.join(ISO_PATH, isoname)
if not os.path.isfile(filename):
print("Downloading {0} ({1}) to {2}...".format(
image.url, image.desc, filename))
# Icky hack around a urlgrabber bug:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=715416
urlgrabber.urlgrab(image.url.replace('https', 'http'), filename)
return isoname
def run_openqa_jobs(isoname, arch, image_version):
def run_openqa_jobs(isoname, flavor, arch, build):
"""# run OpenQA 'isos' job on selected isoname, with given arch
and a version string. **NOTE**: the version passed to OpenQA as
BUILD and is parsed back into the 'relval report-auto' arguments
@ -61,7 +65,7 @@ def run_openqa_jobs(isoname, arch, image_version):
will be passed as --release, --compose and --milestone. Returns
list of job IDs.
"""
command = RUN_COMMAND % (isoname, arch, image_version)
command = RUN_COMMAND % (isoname, flavor, arch, build)
# starts OpenQA jobs
output = subprocess.check_output(command.split())
@ -113,23 +117,64 @@ def jobs_from_fedfind(ff_release, arches=VERSIONS):
jobs on them. arches is an iterable of arches to run on, if not
specified, we'll use our constant.
"""
# Find boot.iso images for our arches; third query is a bit of a
# bodge till I know what 22 TCs/RCs will actually look like,
# ideally we want a query that will reliably return one image per
# arch without us having to filter further, but we can always just
# take the first image for each arch if necessary
# Find currently-testable images for our arches.
jobs = []
queries = (
fedfind.release.Query('imagetype', ('boot',)),
fedfind.release.Query('imagetype', ('boot', 'live')),
fedfind.release.Query('arch', arches),
fedfind.release.Query('payload', ('server', 'generic')))
fedfind.release.Query('payload', ('server', 'generic', 'workstation')))
images = ff_release.find_images(queries)
for image in ff_release.find_images(queries):
print("{0} {1}".format(image.url, image.desc))
isoname = download_image(image)
version = '_'.join(
# Now schedule jobs. First, let's get the BUILD value for openQA.
build = '_'.join(
(ff_release.release, ff_release.milestone, ff_release.compose))
job_ids = run_openqa_jobs(isoname, image.arch, version)
# Next let's schedule the 'universal' tests.
# We have different images in different composes: nightlies only
# have a generic boot.iso, TC/RC builds have Server netinst/boot
# and DVD. We always want to run *some* tests -
# default_boot_and_install at least - for all images we find, then
# we want to run all the tests that are not image-dependent on
# just one image. So we have a special 'universal' flavor and
# product in openQA; all the image-independent test suites run for
# that product. Here, we find the 'best' image we can for the
# compose we're running on (a DVD if possible, a boot.iso or
# netinst if not), and schedule the 'universal' jobs on that
# image.
for arch in arches:
okimgs = (img for img in images if img.arch == arch and
any(img.imagetype == okt
for okt in ('dvd', 'boot', 'netinst')))
bestscore = 0
bestimg = None
for img in okimgs:
if img.imagetype == 'dvd':
score = 10
else:
score = 1
if img.payload == 'generic':
score += 5
elif img.payload == 'server':
score += 3
elif img.payload == 'workstation':
score += 1
if score > bestscore:
bestimg = img
bestscore = score
if not bestimg:
print("No universal tests image found for {0)!".format(arch))
continue
print("Running universal tests for {0} with {1}!".format(
arch, bestimg.desc))
isoname = download_image(bestimg)
job_ids = run_openqa_jobs(isoname, 'universal', arch, build)
jobs.extend(job_ids)
# Now schedule per-image jobs.
for image in images:
isoname = download_image(image)
flavor = '_'.join((image.payload, image.imagetype))
job_ids = run_openqa_jobs(isoname, flavor, image.arch, build)
jobs.extend(job_ids)
return jobs

View File

@ -45,7 +45,7 @@ def get_relval_commands(passed_testcases):
commands = []
for key in passed_testcases:
cmd_ = relval_template
version, _, build, arch = key
version, flavor, build, arch = key
cmd_ += ' --release "%s" --milestone "%s" --compose "%s"' % tuple(build.split('_'))
for tc_name in passed_testcases[key]:
@ -53,6 +53,8 @@ def get_relval_commands(passed_testcases):
tc_env = arch if testcase['env'] == '$RUNARCH$' else testcase['env']
tc_type = testcase['type']
tc_section = testcase['section']
if 'name_cb' in testcase:
tc_name = testcase['name_cb'](flavor)
commands.append('%s --environment "%s" --testtype "%s" --section "%s" --testcase "%s" pass' % (cmd_, tc_env, tc_type, tc_section, tc_name))