Update testing: log packages in update and installed packages

Summary:
This adds some logging related to the update testing workflow,
so we have some idea what we actually tested. We log precisely
which packages were actually downloaded from the update - this
is important as updates can be edited and when examining results
we'll want to know which packages actually got used. We also
add a new module which runs at the end of postinstall and tries
to figure out which packages from the update were installed in
the course of the test. This still isn't a guarantee the test
actually *tested them* in any way, but it at least means they
got installed successfully and didn't interfere with the test.

Test Plan:
Run the update test workflow, check the logs get
uploaded and seem accurate (sometimes some RPM garbage messages
wind up in the package log, I'm not too worried about that at
present). Run the compose test workflow and check it didn't
break.

Reviewers: jsedlak

Reviewed By: jsedlak

Subscribers: tflink

Differential Revision: https://phab.qa.fedoraproject.org/D1149
This commit is contained in:
Adam Williamson 2017-02-22 12:59:39 -08:00
parent 92d588f245
commit 461f3a6132
4 changed files with 45 additions and 1 deletions

View File

@ -325,10 +325,16 @@ sub _repo_setup_updates {
}
else {
# bodhi client 0.9
# latest git python-fedora fixes bug which makes bodhi -D UPDATE_ID fail
# use git python-fedora for
# https://github.com/fedora-infra/python-fedora/pull/192
# until packages with that fix are pushed stable
assert_script_run "git clone https://github.com/fedora-infra/python-fedora.git";
assert_script_run "PYTHONPATH=python-fedora/ bodhi -D " . get_var("ADVISORY"), 300;
}
# log the exact packages in the update at test time, with their
# source packages and epochs. log is uploaded by _advisory_update
# and used for later comparison by _advisory_post
assert_script_run 'rpm -qp *.rpm --qf "%{SOURCERPM} %{EPOCH} %{NAME}-%{VERSION}-%{RELEASE}\n" | sort -u > /var/log/updatepkgs.txt';
# create the repo metadata
assert_script_run "createrepo .";
# write a repo config file

View File

@ -257,6 +257,12 @@ sub load_postinstall_tests() {
}
}
# load the ADVISORY post-install test - this records which update
# packages were actually installed during the test
if (get_var("ADVISORY")) {
autotest::loadtest "tests/_advisory_post.pm";
}
# we should shut down before uploading disk images
if (get_var("STORE_HDD_1") || get_var("PUBLISH_HDD_1")) {
autotest::loadtest "tests/_console_shutdown.pm";

28
tests/_advisory_post.pm Normal file
View File

@ -0,0 +1,28 @@
use base "installedtest";
use strict;
use testapi;
use utils;
sub run {
my $self = shift;
# figure out which packages from the update actually got installed
# (if any) as part of this test
$self->root_console(tty=>3);
assert_script_run 'rpm -qa --qf "%{SOURCERPM} %{EPOCH} %{NAME}-%{VERSION}-%{RELEASE}\n" | sort -u > /tmp/allpkgs.txt';
# this finds lines which appear in both files
# http://www.unix.com/unix-for-dummies-questions-and-answers/34549-find-matching-lines-between-2-files.html
assert_script_run 'comm -12 /tmp/allpkgs.txt /var/log/updatepkgs.txt > /var/log/testedpkgs.txt';
upload_logs "/var/log/testedpkgs.txt";
}
sub test_flags {
# without anything - rollback to 'lastgood' snapshot if failed
# 'fatal' - whole test suite is in danger if this fails
# 'milestone' - after this test succeeds, update 'lastgood'
# 'important' - if this fails, set the overall state to 'fail'
return { fatal => 1 };
}
1;
# vim: set sw=4 et:

View File

@ -9,6 +9,10 @@ sub run {
# update packages and run 'dnf update'
$self->root_console(tty=>3);
repo_setup;
# upload the log of installed packages which repo_setup created
# we do this here and not in repo_setup because this is the best
# place to make sure it happens once and only once per job
upload_logs "/var/log/updatepkgs.txt";
# reboot, in case any of the updates need a reboot to apply
script_run "reboot", 0;
}