createhdds/openqa_trigger/report_job_results.py
Adam Williamson cbe7769748 handling scheduling of jobs for multiple images
This handles scheduling of jobs for more than one type of
image; currently we'll run tests for Workstation live as well.
It requires some cleverness to run some tests for *all* images
(currently just default_boot_and_install) but run all the tests
that can be run with any non-live installer image with the best
image available for the compose. We introduce a special (openQA,
not fedfind) 'flavor' called 'universal'; we run a couple of
checks to find the best image in the compose for running the
universal tests, and schedule tests for the 'universal' flavor
with that image. The 'best' image is a server or 'generic' DVD
if possible, and if not, a server or 'generic' boot.iso.

ISO files have the compose's version identifier prepended to
their names. Otherwise they retain their original names, which
should usually be unique within a given compose, except for
boot.iso files, which have their payload and arch added into
their names to ensure they don't overwrite each other.

This also adds a mechanism for TESTCASES (in conf_test_suites)
to define a callback which will be called with the flavor of
the image being tested; the result of the callback will be used
as the 'test name' for relval result reporting purposes. This
allows us to report results against the correct 'test instance'
for the image being tested, for tests like Boot_default_install
which have 'test instances' for each image. We can extend this
general approach in future for other cases where we have
multiple 'test instances' for a single test case.
2015-03-18 14:51:01 -07:00

92 lines
3.0 KiB
Python

import requests
import argparse
import os
import time
import conf_test_suites
API_ROOT = "http://localhost/api/v1"
SLEEPTIME = 60
def get_passed_testcases(job_ids):
"""
job_ids ~ list of int (job ids)
Returns ~ list of str - names of passed testcases
"""
running_jobs = dict([(job_id, "%s/jobs/%s" % (API_ROOT, job_id)) for job_id in job_ids])
finished_jobs = {}
while running_jobs:
for job_id, url in running_jobs.items():
job_state = requests.get(url).json()['job']
if job_state['state'] == 'done':
print "Job %s is done" % job_id
finished_jobs[job_id] = job_state
del running_jobs[job_id]
if running_jobs:
time.sleep(SLEEPTIME)
passed_testcases = {} # key = VERSION_BUILD_ARCH
for job_id in job_ids:
job = finished_jobs[job_id]
if job['result'] =='passed':
key = (job['settings']['VERSION'], job['settings']['FLAVOR'], job['settings'].get('BUILD', None), job['settings']['ARCH'])
passed_testcases.setdefault(key, [])
passed_testcases[key].extend(conf_test_suites.TESTSUITES[job['settings']['TEST']])
for key, value in passed_testcases.iteritems():
passed_testcases[key] = sorted(list(set(value)))
return passed_testcases
def get_relval_commands(passed_testcases):
relval_template = "relval report-auto"
commands = []
for key in passed_testcases:
cmd_ = relval_template
version, flavor, build, arch = key
cmd_ += ' --release "%s" --milestone "%s" --compose "%s"' % tuple(build.split('_'))
for tc_name in passed_testcases[key]:
testcase = conf_test_suites.TESTCASES[tc_name]
tc_env = arch if testcase['env'] == '$RUNARCH$' else testcase['env']
tc_type = testcase['type']
tc_section = testcase['section']
if 'name_cb' in testcase:
tc_name = testcase['name_cb'](flavor)
commands.append('%s --environment "%s" --testtype "%s" --section "%s" --testcase "%s" pass' % (cmd_, tc_env, tc_type, tc_section, tc_name))
return commands
def report_results(job_ids):
commands = get_relval_commands(get_passed_testcases(job_ids))
print "Running relval commands:"
for command in commands:
print command
os.system(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate per-testcase results from OpenQA job runs")
parser.add_argument('jobs', type=int, nargs='+')
parser.add_argument('--report', default=False, action='store_true')
args = parser.parse_args()
passed_testcases = get_passed_testcases(args.jobs)
commands = get_relval_commands(passed_testcases)
import pprint
pprint.pprint(passed_testcases)
if not args.report:
print "\n\n### No reporting is done! ###\n\n"
pprint.pprint(commands)
else:
for command in commands:
print command
os.system(command)