os-autoinst-distri-rocky-mi.../templates

5848 lines
286 KiB
Plaintext
Raw Normal View History

2015-01-26 10:33:26 +00:00
#!/usr/share/openqa/script/load_templates
#
#
# Fedora Machines, Products, TestSuites and JobTemplates
2015-01-26 10:33:26 +00:00
#
# use load_templates to load the file into the database
#
{
JobTemplates => [
{
2015-01-26 13:39:49 +00:00
machine => { name => "64bit" },
prio => 10,
2015-01-26 10:33:26 +00:00
product => {
arch => "x86_64",
2015-01-26 13:39:49 +00:00
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
2015-01-26 10:33:26 +00:00
},
test_suite => { name => "install_default_upload" },
2015-01-26 10:33:26 +00:00
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "release_identification" },
},
{
machine => { name => "uefi" },
prio => 11,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "ARM" },
prio => 12,
product => {
arch => "arm",
distri => "fedora",
flavor => "Minimal-raw_xz-raw.xz",
version => "*",
},
test_suite => { name => "install_arm_image_deployment_upload" },
},
2015-02-13 09:58:24 +00:00
{
machine => { name => "64bit" },
prio => 10,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "uefi" },
prio => 11,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 10,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "release_identification" },
},
{
machine => { name => "uefi" },
prio => 11,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 10,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Everything-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "uefi" },
prio => 11,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Everything-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 15,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 100,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-boot-iso",
version => "*",
},
test_suite => { name => "memory_check" },
},
{
machine => { name => "uefi" },
prio => 100,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-boot-iso",
version => "*",
},
test_suite => { name => "memory_check" },
},
{
machine => { name => "uefi" },
prio => 16,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 15,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "release_identification" },
},
{
machine => { name => "uefi" },
prio => 16,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "AtomicHost-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "uefi" },
prio => 51,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "AtomicHost-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "release_identification" },
},
{
machine => { name => "uefi" },
prio => 51,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 21,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "install_no_user" },
},
add a base_selinux test (follow-on from default_install) Summary: so here's our first attempt to use the 'carry on from a previous test' stuff! This adds a base_selinux test that uses a disk image from a previous default_install run, and adds jobtemplates to run base_selinux for appropriate products: generic_boot (for nightly tests), server_dvd, and workstation_live. Note that you'll want to either update to the newest openQA package I just built in COPR or create /var/lib/openqa/share/factory/tmp owned by geekotest; openQA tries to use that directory as MOJO_TMPDIR but in 4.2, if the directory doesn't exist, it doesn't create it, and we wind up with the default MOJO_TMPDIR which is /tmp; when the disk image is uploaded it creates a huge temp file in /tmp and may well exhaust the available space as it's a tmpfs. I've backported a recent upstream commit that tries to create the directory if it doesn't exist, in 4.2-10. It seems like openQA is smart enough to figure out the dependencies correctly, so the 'base_selinux' test for each product depends on the 'default_install' test for the same product (not any of the other default_install runs) and will use the hard disk image it produces. Test Plan: Do a full test run and make sure base_selinux tests appear for appropriate products, depend on the correct default_ install test, the default_install test uploads the hard disk image correctly, and the base_selinux test runs correctly. And of course that nothing else broke in the process... Reviewers: jskladan, garretraziel Reviewed By: jskladan Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D699
2015-12-17 20:46:14 +00:00
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
add a base_selinux test (follow-on from default_install) Summary: so here's our first attempt to use the 'carry on from a previous test' stuff! This adds a base_selinux test that uses a disk image from a previous default_install run, and adds jobtemplates to run base_selinux for appropriate products: generic_boot (for nightly tests), server_dvd, and workstation_live. Note that you'll want to either update to the newest openQA package I just built in COPR or create /var/lib/openqa/share/factory/tmp owned by geekotest; openQA tries to use that directory as MOJO_TMPDIR but in 4.2, if the directory doesn't exist, it doesn't create it, and we wind up with the default MOJO_TMPDIR which is /tmp; when the disk image is uploaded it creates a huge temp file in /tmp and may well exhaust the available space as it's a tmpfs. I've backported a recent upstream commit that tries to create the directory if it doesn't exist, in 4.2-10. It seems like openQA is smart enough to figure out the dependencies correctly, so the 'base_selinux' test for each product depends on the 'default_install' test for the same product (not any of the other default_install runs) and will use the hard disk image it produces. Test Plan: Do a full test run and make sure base_selinux tests appear for appropriate products, depend on the correct default_ install test, the default_install test uploads the hard disk image correctly, and the base_selinux test runs correctly. And of course that nothing else broke in the process... Reviewers: jskladan, garretraziel Reviewed By: jskladan Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D699
2015-12-17 20:46:14 +00:00
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
add a base_selinux test (follow-on from default_install) Summary: so here's our first attempt to use the 'carry on from a previous test' stuff! This adds a base_selinux test that uses a disk image from a previous default_install run, and adds jobtemplates to run base_selinux for appropriate products: generic_boot (for nightly tests), server_dvd, and workstation_live. Note that you'll want to either update to the newest openQA package I just built in COPR or create /var/lib/openqa/share/factory/tmp owned by geekotest; openQA tries to use that directory as MOJO_TMPDIR but in 4.2, if the directory doesn't exist, it doesn't create it, and we wind up with the default MOJO_TMPDIR which is /tmp; when the disk image is uploaded it creates a huge temp file in /tmp and may well exhaust the available space as it's a tmpfs. I've backported a recent upstream commit that tries to create the directory if it doesn't exist, in 4.2-10. It seems like openQA is smart enough to figure out the dependencies correctly, so the 'base_selinux' test for each product depends on the 'default_install' test for the same product (not any of the other default_install runs) and will use the hard disk image it produces. Test Plan: Do a full test run and make sure base_selinux tests appear for appropriate products, depend on the correct default_ install test, the default_install test uploads the hard disk image correctly, and the base_selinux test runs correctly. And of course that nothing else broke in the process... Reviewers: jskladan, garretraziel Reviewed By: jskladan Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D699
2015-12-17 20:46:14 +00:00
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
machine => { name => "64bit" },
prio => 42,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
machine => { name => "64bit" },
prio => 42,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
machine => { name => "ARM" },
prio => 42,
product => {
arch => "arm",
distri => "fedora",
flavor => "Minimal-raw_xz-raw.xz",
version => "*",
},
test_suite => { name => "base_services_start_arm" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
machine => { name => "64bit" },
prio => 42,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_update_cli" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_update_cli" },
},
{
machine => { name => "64bit" },
prio => 22,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "base_update_cli" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_system_logging" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_system_logging" },
},
{
machine => { name => "64bit" },
prio => 22,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "base_system_logging" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_system_logging" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_update_graphical" },
},
{
machine => { name => "64bit" },
prio => 32,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "desktop_update_graphical" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_terminal" },
},
{
machine => { name => "64bit" },
prio => 22,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "desktop_terminal" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "desktop_terminal" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_browser" },
},
{
machine => { name => "64bit" },
prio => 22,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "desktop_browser" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "desktop_browser" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_background" },
},
{
machine => { name => "64bit" },
prio => 22,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "desktop_background" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "desktop_background" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_notifications_live" },
},
{
machine => { name => "64bit" },
prio => 32,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "desktop_notifications_live" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "apps_startstop" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "apps_startstop" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_notifications_postinstall" },
},
{
machine => { name => "64bit" },
prio => 32,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "desktop_notifications_postinstall" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_package_set_minimal" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_anaconda_text" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_serial_console" },
},
{
machine => { name => "64bit" },
prio => 31,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_rescue_encrypted" },
},
{
machine => { name => "uefi" },
prio => 32,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_rescue_encrypted" },
},
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{
machine => { name => "64bit" },
prio => 10,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "support_server" },
},
{
machine => { name => "64bit" },
prio => 10,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "mediakit_fileconflicts" },
},
{
machine => { name => "64bit" },
prio => 10,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "mediakit_repoclosure" },
},
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfs_variation" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfs_graphical" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfsiso_variation" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_hd_variation" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vnc_server" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vnc_client" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vncconnect_server" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vncconnect_client" },
},
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
{
machine => { name => "64bit" },
prio => 20,
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_role_deploy_domain_controller" },
},
{
machine => { name => "64bit" },
prio => 20,
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_realmd_join_kickstart" },
},
add cockpit_default and cockpit_basic tests Summary: This adds tests for the Server_cockpit_default and cockpit_basic test cases. Some notes: I was initially thinking of combining these into a single test with multiple test modules and coming up with a system for doing wiki reporting based on individual test module status, but because we'll also want to do a cockpit FreeIPA enrol test, I decided against it. We don't really want to combine all three because then we would skip the cockpit tests whenever FreeIPA server deployment failed, which isn't ideal. So since we'll need a separate FreeIPA enrolment test anyway it doesn't really make sense to go to the trouble of designing a system for loading multiple postinstall tests (though I have an idea for that!) and a per-module wiki reporting system. This was the most minimal and hopefully reliable method for running Cockpit from a stock Server install that I could think of. An alternative approach would be to have, say, the most recent stable Workstation live as a 'stock' asset and have two tests, one which runs a stock Server install and just waits and another which boots the live image and accesses the cockpit running on the other box, but that seems a bit over-complex. It is not possible to have dependencies between tests for different ISOs, in case you were wondering about having a Workstation live test which runs parallel with a Server DVD test, we can't do that. One funny thing is the font that winds up getting used for the desktop, but I don't *think* that should be a problem. Picking needles was a bit tricky; any improvement suggestions are welcome. I'm hoping it turns out to be safe to rely on some dbus log messages being present; I think logging into Cockpit triggers activation of the realmd dbus interface, so there *should* always be some messages related to that. An alternative would just be to match on a sliver of the dark grey table header and the light grey row beneath it and assume that'll always be the first message (whatever the message is), but then we have to find some area of the message details screen which is always present for any message, and it just seems a tad more likely to result in false passes. Similary I'm making an assumption that auditd is always going to show up on the first page of the Services screen and the details screen will always show that 'loaded...enabled' text. Test Plan: Run the tests and see if they work! See https://openqa.stg.fedoraproject.org/tests/21373 and https://openqa.stg.fedoraproject.org/tests/21371 for my tests. Reviewers: garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D874
2016-06-01 16:05:33 +00:00
{
machine => { name => "64bit" },
prio => 20,
add cockpit_default and cockpit_basic tests Summary: This adds tests for the Server_cockpit_default and cockpit_basic test cases. Some notes: I was initially thinking of combining these into a single test with multiple test modules and coming up with a system for doing wiki reporting based on individual test module status, but because we'll also want to do a cockpit FreeIPA enrol test, I decided against it. We don't really want to combine all three because then we would skip the cockpit tests whenever FreeIPA server deployment failed, which isn't ideal. So since we'll need a separate FreeIPA enrolment test anyway it doesn't really make sense to go to the trouble of designing a system for loading multiple postinstall tests (though I have an idea for that!) and a per-module wiki reporting system. This was the most minimal and hopefully reliable method for running Cockpit from a stock Server install that I could think of. An alternative approach would be to have, say, the most recent stable Workstation live as a 'stock' asset and have two tests, one which runs a stock Server install and just waits and another which boots the live image and accesses the cockpit running on the other box, but that seems a bit over-complex. It is not possible to have dependencies between tests for different ISOs, in case you were wondering about having a Workstation live test which runs parallel with a Server DVD test, we can't do that. One funny thing is the font that winds up getting used for the desktop, but I don't *think* that should be a problem. Picking needles was a bit tricky; any improvement suggestions are welcome. I'm hoping it turns out to be safe to rely on some dbus log messages being present; I think logging into Cockpit triggers activation of the realmd dbus interface, so there *should* always be some messages related to that. An alternative would just be to match on a sliver of the dark grey table header and the light grey row beneath it and assume that'll always be the first message (whatever the message is), but then we have to find some area of the message details screen which is always present for any message, and it just seems a tad more likely to result in false passes. Similary I'm making an assumption that auditd is always going to show up on the first page of the Services screen and the details screen will always show that 'loaded...enabled' text. Test Plan: Run the tests and see if they work! See https://openqa.stg.fedoraproject.org/tests/21373 and https://openqa.stg.fedoraproject.org/tests/21371 for my tests. Reviewers: garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D874
2016-06-01 16:05:33 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_cockpit_default" },
},
{
machine => { name => "64bit" },
prio => 30,
add cockpit_default and cockpit_basic tests Summary: This adds tests for the Server_cockpit_default and cockpit_basic test cases. Some notes: I was initially thinking of combining these into a single test with multiple test modules and coming up with a system for doing wiki reporting based on individual test module status, but because we'll also want to do a cockpit FreeIPA enrol test, I decided against it. We don't really want to combine all three because then we would skip the cockpit tests whenever FreeIPA server deployment failed, which isn't ideal. So since we'll need a separate FreeIPA enrolment test anyway it doesn't really make sense to go to the trouble of designing a system for loading multiple postinstall tests (though I have an idea for that!) and a per-module wiki reporting system. This was the most minimal and hopefully reliable method for running Cockpit from a stock Server install that I could think of. An alternative approach would be to have, say, the most recent stable Workstation live as a 'stock' asset and have two tests, one which runs a stock Server install and just waits and another which boots the live image and accesses the cockpit running on the other box, but that seems a bit over-complex. It is not possible to have dependencies between tests for different ISOs, in case you were wondering about having a Workstation live test which runs parallel with a Server DVD test, we can't do that. One funny thing is the font that winds up getting used for the desktop, but I don't *think* that should be a problem. Picking needles was a bit tricky; any improvement suggestions are welcome. I'm hoping it turns out to be safe to rely on some dbus log messages being present; I think logging into Cockpit triggers activation of the realmd dbus interface, so there *should* always be some messages related to that. An alternative would just be to match on a sliver of the dark grey table header and the light grey row beneath it and assume that'll always be the first message (whatever the message is), but then we have to find some area of the message details screen which is always present for any message, and it just seems a tad more likely to result in false passes. Similary I'm making an assumption that auditd is always going to show up on the first page of the Services screen and the details screen will always show that 'loaded...enabled' text. Test Plan: Run the tests and see if they work! See https://openqa.stg.fedoraproject.org/tests/21373 and https://openqa.stg.fedoraproject.org/tests/21371 for my tests. Reviewers: garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D874
2016-06-01 16:05:33 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_cockpit_basic" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_cockpit_updates" },
},
add a cockpit realmd FreeIPA join test Summary: This requires a few other changes: * turn clone_host_resolv into clone_host_file, letting you clone any given host file (cloning /etc/hosts seems to make both server deployment and client enrolment faster/more reliable) * allow loading of multiple POSTINSTALL tests (so we can share the freeipa_client_postinstall test). Note this is compatible, existing uses will work fine * move initial password change for the IPA test users into the server deployment test (so the client tests don't conflict over doing that) * add GRUB_POSTINSTALL, for specifying boot parameters for boot of the installed system, and make it work by tweaking _console_wait _login (doesn't work for _graphical_wait_login yet, as I didn't need that) * make the static networking config for tap tests into a library function so the tests can share it * handle ABRT problem dirs showing up in /var/spool/abrt as well as /var/tmp/abrt (because the enrol attempt hits #1330766 and the crash report shows up in /var/spool/abrt, don't ask me why the difference, I just work here) * specify the DNS servers from the worker host's resolv.conf as the forwarders for the FreeIPA server when deploying it; if we don't do this, rolekit defaults to using the root servers as forwarders(!) and thus we get the public, not phx2-appropriate, results for e.g. mirrors.fedoraproject.org, some of which the workers can't reach, so PackageKit package install always fails (boy, was it fun figuring THAT mess out) Even after all that, the test still doesn't actually pass, but I'm reasonably confident this is because it's hitting actual bugs, not because it's broken. It runs into #1330766 nearly every time (I think I saw *one* time the enrolment actually succeeded), and seems to run into a subsequent bug I hadn't seen before when trying to work around that by trying the join again (see https://bugzilla.redhat.com/show_bug.cgi?id=1330766#c37 ). Test Plan: Run the test, see what happens. If you're really lucky, it'll actually pass. But you'll probably run into #1330766#c37, I'm mostly posting for comment. You'll need a tap-capable openQA instance to test this. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D880
2016-06-07 20:00:39 +00:00
{
machine => { name => "64bit" },
prio => 30,
add a cockpit realmd FreeIPA join test Summary: This requires a few other changes: * turn clone_host_resolv into clone_host_file, letting you clone any given host file (cloning /etc/hosts seems to make both server deployment and client enrolment faster/more reliable) * allow loading of multiple POSTINSTALL tests (so we can share the freeipa_client_postinstall test). Note this is compatible, existing uses will work fine * move initial password change for the IPA test users into the server deployment test (so the client tests don't conflict over doing that) * add GRUB_POSTINSTALL, for specifying boot parameters for boot of the installed system, and make it work by tweaking _console_wait _login (doesn't work for _graphical_wait_login yet, as I didn't need that) * make the static networking config for tap tests into a library function so the tests can share it * handle ABRT problem dirs showing up in /var/spool/abrt as well as /var/tmp/abrt (because the enrol attempt hits #1330766 and the crash report shows up in /var/spool/abrt, don't ask me why the difference, I just work here) * specify the DNS servers from the worker host's resolv.conf as the forwarders for the FreeIPA server when deploying it; if we don't do this, rolekit defaults to using the root servers as forwarders(!) and thus we get the public, not phx2-appropriate, results for e.g. mirrors.fedoraproject.org, some of which the workers can't reach, so PackageKit package install always fails (boy, was it fun figuring THAT mess out) Even after all that, the test still doesn't actually pass, but I'm reasonably confident this is because it's hitting actual bugs, not because it's broken. It runs into #1330766 nearly every time (I think I saw *one* time the enrolment actually succeeded), and seems to run into a subsequent bug I hadn't seen before when trying to work around that by trying the join again (see https://bugzilla.redhat.com/show_bug.cgi?id=1330766#c37 ). Test Plan: Run the test, see what happens. If you're really lucky, it'll actually pass. But you'll probably run into #1330766#c37, I'm mostly posting for comment. You'll need a tap-capable openQA instance to test this. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D880
2016-06-07 20:00:39 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "realmd_join_cockpit" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "realmd_join_sssd" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_freeipa_replication_master" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_freeipa_replication_replica" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_freeipa_replication_client" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_role_deploy_database_server" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_database_client" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_remote_logging_server" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_remote_logging_client" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_updates_nfs" },
},
{
machine => { name => "64bit" },
prio => 10,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "support_server" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_repository_http_variation" },
2015-02-13 09:58:24 +00:00
},
{
machine => { name => "64bit" },
prio => 20,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_repository_http_graphical" },
2015-02-13 09:58:24 +00:00
},
{
machine => { name => "64bit" },
prio => 20,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_mirrorlist_graphical" },
2015-02-13 09:58:24 +00:00
},
{
machine => { name => "64bit" },
prio => 20,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_delete_pata" },
2015-02-13 09:58:24 +00:00
},
{
machine => { name => "uefi" },
prio => 21,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_delete_pata" },
},
2017-01-16 12:07:47 +00:00
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_sata" },
},
{
machine => { name => "uefi" },
prio => 21,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_sata" },
},
2015-02-13 09:58:24 +00:00
{
machine => { name => "64bit" },
prio => 20,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_kickstart_user_creation" },
2015-02-13 09:58:24 +00:00
},
{
machine => { name => "64bit" },
prio => 20,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_scsi_updates_img" },
2015-02-13 09:58:24 +00:00
},
{
machine => { name => "64bit" },
prio => 20,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_multi" },
2015-02-13 09:58:24 +00:00
},
{
machine => { name => "uefi" },
prio => 21,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_multi" },
},
2015-02-13 09:58:24 +00:00
{
machine => { name => "64bit" },
prio => 30,
2015-02-13 09:58:24 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-02-13 09:58:24 +00:00
},
test_suite => { name => "install_simple_encrypted" },
2015-02-13 09:58:24 +00:00
},
2015-03-05 10:44:02 +00:00
{
machine => { name => "64bit" },
prio => 30,
2015-03-05 10:44:02 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-03-05 10:44:02 +00:00
},
test_suite => { name => "install_simple_free_space" },
2015-03-05 10:44:02 +00:00
},
2015-03-05 12:57:47 +00:00
{
machine => { name => "64bit" },
prio => 30,
2015-03-05 12:57:47 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-03-05 12:57:47 +00:00
},
test_suite => { name => "install_multi_empty" },
2015-03-05 12:57:47 +00:00
},
2015-03-06 09:36:25 +00:00
{
machine => { name => "64bit" },
prio => 30,
2015-03-06 09:36:25 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-03-06 09:36:25 +00:00
},
test_suite => { name => "install_software_raid" },
2015-03-06 09:36:25 +00:00
},
2015-03-12 09:58:04 +00:00
{
machine => { name => "64bit" },
prio => 30,
2015-03-12 09:58:04 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-03-12 09:58:04 +00:00
},
test_suite => { name => "install_delete_partial" },
2015-03-12 09:58:04 +00:00
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_btrfs" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_ext3" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_xfs" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_lvmthin" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_no_swap" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_iscsi" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_ext3" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_btrfs" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_no_swap" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_xfs" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_software_raid" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_lvmthin" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_ext3" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_btrfs" },
},
{
machine => { name => "uefi" },
prio => 51,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_no_swap" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_xfs" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_software_raid" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_lvmthin" },
},
{
machine => { name => "64bit" },
prio => 50,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_package_set_kde" },
},
{
machine => { name => "uefi" },
prio => 31,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_simple_encrypted" },
},
{
machine => { name => "uefi" },
prio => 31,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_simple_free_space" },
},
{
machine => { name => "uefi" },
prio => 31,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_multi_empty" },
},
{
machine => { name => "uefi" },
prio => 31,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_software_raid" },
},
{
machine => { name => "uefi" },
prio => 31,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_delete_partial" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_btrfs" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_ext3" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_xfs" },
},
{
machine => { name => "uefi" },
prio => 41,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_lvmthin" },
},
{
machine => { name => "uefi" },
prio => 51,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_no_swap" },
},
2015-03-12 09:58:04 +00:00
{
machine => { name => "64bit" },
prio => 30,
2015-03-12 09:58:04 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
2015-03-12 09:58:04 +00:00
},
test_suite => { name => "install_kickstart_hdd" },
2015-03-12 09:58:04 +00:00
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
convert upgrade tests to dnf-plugin-system-upgrade Summary: This is a first cut which more or less works for now. Issues: 1) We're not really testing the BUILD, here. All the test does is try and upgrade to the specified VERSION - so it'll be using the latest 'stable' for the given VERSION at the time the test runs. This isn't really that terrible, but especially for TC/RC validation, we might want to make things a bit more elaborate and set up the repo for the actual BUILD (and disable the main repos). 2) We'd actually need --nogpgcheck for non-Rawhide, at one specific point in the release cycle - after Branching but before Bodhi activation (which is when we can be sure all packages are signed). This won't matter until 24 branches, and maybe releng will have it fixed by then...if not, I'll tweak it. 3) We don't really test that the upgrade actually *happened* for desktop, at the moment - the only thing in the old test that really checked that was where we checked for the fedup boot menu entry, but that has no analog in dnf. What we should probably do is check that GUI login works, then switch to a console and check /etc/fedora-release just as the minimal test does. Test Plan: Run the tests. Note that creating the desktop disk image doesn't work ATM, so I can't verify the desktop test works, but the minimal one seems to (with D565). There'll be a matching diff for openqa_fedora_tools to update the test case names there. Reviewers: jskladan, garretraziel Reviewed By: jskladan, garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D567
2015-09-10 21:49:13 +00:00
test_suite => { name => "upgrade_minimal_64bit" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
convert upgrade tests to dnf-plugin-system-upgrade Summary: This is a first cut which more or less works for now. Issues: 1) We're not really testing the BUILD, here. All the test does is try and upgrade to the specified VERSION - so it'll be using the latest 'stable' for the given VERSION at the time the test runs. This isn't really that terrible, but especially for TC/RC validation, we might want to make things a bit more elaborate and set up the repo for the actual BUILD (and disable the main repos). 2) We'd actually need --nogpgcheck for non-Rawhide, at one specific point in the release cycle - after Branching but before Bodhi activation (which is when we can be sure all packages are signed). This won't matter until 24 branches, and maybe releng will have it fixed by then...if not, I'll tweak it. 3) We don't really test that the upgrade actually *happened* for desktop, at the moment - the only thing in the old test that really checked that was where we checked for the fedup boot menu entry, but that has no analog in dnf. What we should probably do is check that GUI login works, then switch to a console and check /etc/fedora-release just as the minimal test does. Test Plan: Run the tests. Note that creating the desktop disk image doesn't work ATM, so I can't verify the desktop test works, but the minimal one seems to (with D565). There'll be a matching diff for openqa_fedora_tools to update the test case names there. Reviewers: jskladan, garretraziel Reviewed By: jskladan, garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D567
2015-09-10 21:49:13 +00:00
test_suite => { name => "upgrade_desktop_64bit" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_server_64bit" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_server_domain_controller" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_realmd_client" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_kde_64bit" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_desktop_encrypted_64bit" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_minimal_64bit" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_desktop_64bit" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_server_64bit" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_kde_64bit" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_desktop_encrypted_64bit" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_updates_img_local" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_shrink_ext4" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_shrink_ntfs" },
},
add a french (encrypted) test Summary: this handles Non-English European Language Install. Basically it's a bunch of new screenshots for existing tag names, plus a bit of configurability in _boot_to_anaconda and tweaking some existing needles to do non-text matches. The weird 'half-the- icon' needles are for cases where there may or may not be a warning triangle but we want to click it either way (saves duplicating the needle). This also sets up a convention for tagging what languages a needle is appropriate for. If it's specifically appropriate for one or more languages, a tag ENV-LANGUAGE-(LANGUAGE) should be applied for each language, where (LANGUAGE) is the install language in upper-case ('LANGUAGE' variable, which should also be the string that will be typed into the language selection screen). If the needle ought to be used for *all* languages - i.e. it's not a text match, or any text in the match is known not to be translated - the tag ENV-INSTLANG-ALL should be applied. To back this, main.pm now unregisters all needles that are not tagged with either ENV-LANGUAGE-ALL or the tag for the language actually being used (if the LANGUAGE var is not set, we assume english). The point of this is to check the install is actually translated; if we allow all needles to match, the test would pass even if no translations appeared at all. Test Plan: Run all tests and make sure you get the expected results. You can schedule a run against 23 Beta TC1 to see the French test fails 'correctly' when translations are missing. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D577
2015-09-15 01:08:58 +00:00
{
machine => { name => "64bit" },
prio => 40,
add a french (encrypted) test Summary: this handles Non-English European Language Install. Basically it's a bunch of new screenshots for existing tag names, plus a bit of configurability in _boot_to_anaconda and tweaking some existing needles to do non-text matches. The weird 'half-the- icon' needles are for cases where there may or may not be a warning triangle but we want to click it either way (saves duplicating the needle). This also sets up a convention for tagging what languages a needle is appropriate for. If it's specifically appropriate for one or more languages, a tag ENV-LANGUAGE-(LANGUAGE) should be applied for each language, where (LANGUAGE) is the install language in upper-case ('LANGUAGE' variable, which should also be the string that will be typed into the language selection screen). If the needle ought to be used for *all* languages - i.e. it's not a text match, or any text in the match is known not to be translated - the tag ENV-INSTLANG-ALL should be applied. To back this, main.pm now unregisters all needles that are not tagged with either ENV-LANGUAGE-ALL or the tag for the language actually being used (if the LANGUAGE var is not set, we assume english). The point of this is to check the install is actually translated; if we allow all needles to match, the test would pass even if no translations appeared at all. Test Plan: Run all tests and make sure you get the expected results. You can schedule a run against 23 Beta TC1 to see the French test fails 'correctly' when translations are missing. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D577
2015-09-15 01:08:58 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_european_language" },
add a french (encrypted) test Summary: this handles Non-English European Language Install. Basically it's a bunch of new screenshots for existing tag names, plus a bit of configurability in _boot_to_anaconda and tweaking some existing needles to do non-text matches. The weird 'half-the- icon' needles are for cases where there may or may not be a warning triangle but we want to click it either way (saves duplicating the needle). This also sets up a convention for tagging what languages a needle is appropriate for. If it's specifically appropriate for one or more languages, a tag ENV-LANGUAGE-(LANGUAGE) should be applied for each language, where (LANGUAGE) is the install language in upper-case ('LANGUAGE' variable, which should also be the string that will be typed into the language selection screen). If the needle ought to be used for *all* languages - i.e. it's not a text match, or any text in the match is known not to be translated - the tag ENV-INSTLANG-ALL should be applied. To back this, main.pm now unregisters all needles that are not tagged with either ENV-LANGUAGE-ALL or the tag for the language actually being used (if the LANGUAGE var is not set, we assume english). The point of this is to check the install is actually translated; if we allow all needles to match, the test would pass even if no translations appeared at all. Test Plan: Run all tests and make sure you get the expected results. You can schedule a run against 23 Beta TC1 to see the French test fails 'correctly' when translations are missing. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D577
2015-09-15 01:08:58 +00:00
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_cyrillic_language" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_arabic_language" },
},
{
machine => { name => "64bit" },
prio => 40,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_asian_language" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_firewall_disabled" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_firewall_configured" },
},
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{
machine => { name => "64bit" },
prio => 30,
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_nfs" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_pxeboot" },
},
{
machine => { name => "uefi" },
prio => 31,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_pxeboot" },
},
{
machine => { name => "64bit" },
prio => 30,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "modularity_tests" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_filesystem_default" },
},
{
machine => { name => "64bit" },
prio => 20,
product => {
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_firewall_default" },
},
{
machine => { name => "64bit" },
prio => 61,
product => {
arch => "i386",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 62,
product => {
arch => "i386",
distri => "fedora",
flavor => "Server-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 62,
product => {
arch => "i386",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 62,
product => {
arch => "i386",
distri => "fedora",
flavor => "Everything-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 100,
product => {
arch => "i386",
distri => "fedora",
flavor => "Workstation-boot-iso",
version => "*",
},
test_suite => { name => "memory_check" },
},
{
machine => { name => "64bit" },
prio => 67,
product => {
arch => "i386",
distri => "fedora",
flavor => "Workstation-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 67,
product => {
arch => "i386",
distri => "fedora",
flavor => "KDE-live-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
machine => { name => "64bit" },
prio => 82,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_package_set_minimal" },
},
{
machine => { name => "64bit" },
prio => 72,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_repository_http_graphical" },
},
{
machine => { name => "64bit" },
prio => 72,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_scsi_updates_img" },
},
{
machine => { name => "64bit" },
prio => 82,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_simple_encrypted" },
},
{
machine => { name => "64bit" },
prio => 82,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_software_raid" },
},
{
machine => { name => "64bit" },
prio => 92,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_btrfs" },
},
{
machine => { name => "64bit" },
prio => 92,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_ext3" },
},
{
machine => { name => "64bit" },
prio => 92,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_lvmthin" },
},
{
machine => { name => "64bit" },
prio => 93,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_ext3" },
},
{
machine => { name => "64bit" },
prio => 93,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_btrfs" },
},
{
machine => { name => "64bit" },
prio => 93,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_no_swap" },
},
{
machine => { name => "64bit" },
prio => 93,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_xfs" },
},
{
machine => { name => "64bit" },
prio => 93,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_software_raid" },
},
{
machine => { name => "64bit" },
prio => 93,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_lvmthin" },
},
{
machine => { name => "64bit" },
prio => 82,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
convert upgrade tests to dnf-plugin-system-upgrade Summary: This is a first cut which more or less works for now. Issues: 1) We're not really testing the BUILD, here. All the test does is try and upgrade to the specified VERSION - so it'll be using the latest 'stable' for the given VERSION at the time the test runs. This isn't really that terrible, but especially for TC/RC validation, we might want to make things a bit more elaborate and set up the repo for the actual BUILD (and disable the main repos). 2) We'd actually need --nogpgcheck for non-Rawhide, at one specific point in the release cycle - after Branching but before Bodhi activation (which is when we can be sure all packages are signed). This won't matter until 24 branches, and maybe releng will have it fixed by then...if not, I'll tweak it. 3) We don't really test that the upgrade actually *happened* for desktop, at the moment - the only thing in the old test that really checked that was where we checked for the fedup boot menu entry, but that has no analog in dnf. What we should probably do is check that GUI login works, then switch to a console and check /etc/fedora-release just as the minimal test does. Test Plan: Run the tests. Note that creating the desktop disk image doesn't work ATM, so I can't verify the desktop test works, but the minimal one seems to (with D565). There'll be a matching diff for openqa_fedora_tools to update the test case names there. Reviewers: jskladan, garretraziel Reviewed By: jskladan, garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D567
2015-09-10 21:49:13 +00:00
test_suite => { name => "upgrade_desktop_32bit" },
2015-12-07 18:12:06 +00:00
},
{
machine => { name => "64bit" },
prio => 92,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_desktop_32bit" },
},
{
machine => { name => "64bit" },
prio => 92,
product => {
arch => "i386",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_package_set_kde" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "release_identification" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_update_cli" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "base_system_logging" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_update_graphical" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_terminal" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_browser" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_background" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_notifications_live" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "apps_startstop" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
version => "*",
},
test_suite => { name => "desktop_notifications_postinstall" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "AtomicHost-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 62,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Everything-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_update_cli" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_system_logging" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_package_set_minimal" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_anaconda_text" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_serial_console" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 31,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_rescue_encrypted" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "support_server" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "mediakit_fileconflicts" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "mediakit_repoclosure" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfs_variation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfs_graphical" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfsiso_variation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_hd_variation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vnc_server" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vnc_client" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vncconnect_server" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vncconnect_client" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_role_deploy_domain_controller" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_realmd_join_kickstart" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_cockpit_default" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_cockpit_basic" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "realmd_join_cockpit" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "realmd_join_sssd" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_role_deploy_database_server" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_database_client" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_remote_logging_server" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_remote_logging_client" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_updates_nfs" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 10,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "support_server" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_repository_http_variation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_repository_http_graphical" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_mirrorlist_graphical" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_user_creation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "modularity_tests" },
},
{
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_scsi_updates_img" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_multi" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_simple_encrypted" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_simple_free_space" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_multi_empty" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_software_raid" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_delete_partial" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_btrfs" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_ext3" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_xfs" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_lvmthin" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_no_swap" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_iscsi" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_ext3" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_btrfs" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_no_swap" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_server_domain_controller" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_realmd_client" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_xfs" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_software_raid" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_lvmthin" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_hdd" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_minimal_64bit" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_server_64bit" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_minimal_64bit" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_server_64bit" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_shrink_ext4" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_shrink_ntfs" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_european_language" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_cyrillic_language" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_arabic_language" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 40,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_asian_language" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_firewall_disabled" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_firewall_configured" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_nfs" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_pxeboot" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_package_set_kde" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 30,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_updates_img_local" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_filesystem_default" },
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_firewall_default" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_freeipa_replication_master" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_freeipa_replication_replica" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 20,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_freeipa_replication_client" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "release_identification" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "release_identification" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "base_system_logging" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "desktop_terminal" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "desktop_browser" },
},
{
group_name => "Fedora PowerPC",
machine => { name => "ppc64le" },
prio => 50,
product => {
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
version => "*",
},
test_suite => { name => "desktop_background" },
},
2018-03-07 02:47:53 +00:00
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 10,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "AtomicHost-dvd_ostree-iso",
version => "*",
2018-03-07 02:47:53 +00:00
},
test_suite => { name => "install_default_upload" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 10,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-boot-iso",
version => "*",
},
test_suite => { name => "install_default" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 10,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_default_upload" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_selinux" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_services_start" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_service_manipulation" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "base_update_cli" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_package_set_minimal" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_anaconda_text" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_serial_console" },
},
2018-03-07 02:47:53 +00:00
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 31,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_rescue_encrypted" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 10,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "support_server" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 10,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "mediakit_fileconflicts" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 10,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "mediakit_repoclosure" },
},
2018-03-07 02:47:53 +00:00
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfs_variation" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfs_graphical" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_nfsiso_variation" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_repository_hd_variation" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vnc_server" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vnc_client" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vncconnect_server" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_vncconnect_client" },
},
2018-03-07 02:47:53 +00:00
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_role_deploy_domain_controller" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_realmd_join_kickstart" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_cockpit_default" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_cockpit_basic" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "realmd_join_cockpit" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "realmd_join_sssd" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_role_deploy_database_server" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_database_client" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "install_updates_nfs" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 10,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "support_server" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_repository_http_variation" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_repository_http_graphical" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_mirrorlist_graphical" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_user_creation" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "modularity_tests" },
},
{
2018-03-07 02:47:53 +00:00
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_scsi_updates_img" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_multi" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_simple_encrypted" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_simple_free_space" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_multi_empty" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_software_raid" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_delete_partial" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_btrfs" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_ext3" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_xfs" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_lvmthin" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 50,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_no_swap" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_iscsi" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_ext3" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_btrfs" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 50,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_no_swap" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_xfs" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_software_raid" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_blivet_lvmthin" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_hdd" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_minimal_64bit" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_server_64bit" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_minimal_64bit" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "upgrade_2_server_64bit" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_shrink_ext4" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_shrink_ntfs" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_european_language" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_cyrillic_language" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_arabic_language" },
},
2018-03-07 02:47:53 +00:00
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 40,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_asian_language" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_firewall_disabled" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_firewall_configured" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_kickstart_nfs" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 30,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "universal",
version => "*",
},
test_suite => { name => "install_pxeboot" },
},
2018-03-07 02:47:53 +00:00
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_filesystem_default" },
},
{
group_name => "Fedora AArch64",
machine => { name => "aarch64" },
prio => 20,
product => {
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
version => "*",
},
test_suite => { name => "server_firewall_default" },
},
2015-01-26 10:33:26 +00:00
],
Machines => [
{
backend => "qemu",
name => "64bit",
2015-02-19 13:15:29 +00:00
settings => [
{ key => "QEMUCPU", value => "Nehalem" },
{ key => "QEMUCPUS", value => "2"},
{ key => "QEMUVGA", value => "virtio"},
{ key => "QEMURAM", value => "2048"},
{ key => "ARCH_BASE_MACHINE", value => "64bit" },
{ key => "PART_TABLE_TYPE", value => "mbr"},
{ key => "WORKER_CLASS", value => "qemu_x86_64" },
{ key => "QEMU_VIRTIO_RNG", value => "1"}
],
},
{
backend => "qemu",
name => "uefi",
settings => [
{ key => "QEMUCPU", value => "Nehalem" },
{ key => "QEMUCPUS", value => "2"},
{ key => "QEMUVGA", value => "virtio"},
{ key => "QEMURAM", value => "2048"},
{ key => "ARCH_BASE_MACHINE", value => "64bit" },
{ key => "UEFI", value => "1"},
{ key => "UEFI_PFLASH_CODE", value => "/usr/share/edk2/ovmf/OVMF_CODE.fd"},
{ key => "UEFI_PFLASH_VARS", value => "/usr/share/edk2/ovmf/OVMF_VARS.fd"},
{ key => "PART_TABLE_TYPE", value => "gpt"},
{ key => "WORKER_CLASS", value => "qemu_x86_64" },
{ key => "QEMU_VIRTIO_RNG", value => "1"}
],
},
{
backend => "qemu",
name => "ARM",
settings => [
{ key => "QEMU", value => "arm" },
{ key => "QEMUCPUS", value => "2"},
{ key => "QEMUMACHINE", value => "virt"},
{ key => "QEMURAM", value => "1024"},
{ key => "ARCH_BASE_MACHINE", value => "ARM" },
{ key => "QEMU_NO_KVM", value => "1"},
{ key => "TIMEOUT_SCALE", value => "5" },
{ key => "SERIALDEV", value => "ttyAMA0" },
# we're running ARM tests on x86_64 for now as we have
# no ARM workers
{ key => "WORKER_CLASS", value => "qemu_x86_64" },
{ key => "QEMU_VIRTIO_RNG", value => "1"}
],
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
backend => "qemu",
name => "ppc64le",
settings => [
{ key => "QEMU", value => "ppc64" },
{ key => "OFW", value => 1 },
{ key => "QEMUVGA", value => "virtio" },
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{ key => "QEMURAM", value => 4096 },
{ key => "QEMUCPU", value => "host" },
# pseries-4.0 is a workaround for
# https://bugzilla.redhat.com/show_bug.cgi?id=1769600
# usb=off is an os-autoinst default when QEMUMACHINE is not set
{ key => "QEMUMACHINE", value => "pseries-4.0,usb=off" },
{ key => "ARCH_BASE_MACHINE", value => "ppc64le" },
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{ key => "WORKER_CLASS", value => "qemu_ppc64le" },
{ key => "PART_TABLE_TYPE", value => "mbr"},
{ key => "QEMU_VIRTIO_RNG", value => "1"}
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
],
},
{
2018-03-07 02:51:25 +00:00
backend => "qemu",
name => "aarch64",
settings => [
{ key => "QEMU", value => "aarch64" },
{ key => "QEMUCPUS", value => "2"},
{ key => "QEMUMACHINE", value => "virt"},
{ key => "QEMURAM", value => 3072 },
2018-03-07 02:51:25 +00:00
{ key => "QEMUCPU", value => "host" },
{ key => "ARCH_BASE_MACHINE", value => "aarch64" },
{ key => "TIMEOUT_SCALE", value => "1.5" },
2018-03-07 02:51:25 +00:00
{ key => "SERIALDEV", value => "ttyAMA0" },
{ key => "UEFI", value => "1"},
{ key => "UEFI_PFLASH_CODE", value => "/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw"},
{ key => "UEFI_PFLASH_VARS", value => "/usr/share/edk2/aarch64/vars-template-pflash.raw"},
{ key => "PART_TABLE_TYPE", value => "gpt"},
{ key => "WORKER_CLASS", value => "qemu_aarch64" },
{ key => "QEMU_VIRTIO_RNG", value => "1"}
2018-03-07 02:51:25 +00:00
],
},
2015-01-26 10:33:26 +00:00
],
Products => [
{
arch => "x86_64",
distri => "fedora",
flavor => "universal",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "x86_64",
distri => "fedora",
flavor => "Everything-boot-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "x86_64",
distri => "fedora",
flavor => "Server-boot-iso",
2015-01-26 10:33:26 +00:00
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
2015-01-26 10:33:26 +00:00
],
version => "*",
2015-01-26 10:33:26 +00:00
},
{
arch => "x86_64",
distri => "fedora",
flavor => "Server-dvd-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "x86_64",
distri => "fedora",
flavor => "AtomicHost-dvd_ostree-iso",
name => "",
settings => [
{ key => "CANNED", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-boot-iso",
name => "",
settings => [
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "ISO" },
{ key => "HDDSIZEGB", value => "13" }
],
version => "*",
},
{
arch => "x86_64",
distri => "fedora",
flavor => "Workstation-live-iso",
name => "",
settings => [
{ key => "LIVE", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "x86_64",
distri => "fedora",
flavor => "KDE-live-iso",
name => "",
settings => [
{ key => "LIVE", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "DESKTOP", value => "kde" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "i386",
distri => "fedora",
flavor => "universal",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "i386",
distri => "fedora",
flavor => "Everything-boot-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "i386",
distri => "fedora",
flavor => "Server-boot-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "i386",
distri => "fedora",
flavor => "Server-dvd-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "i386",
distri => "fedora",
flavor => "Workstation-boot-iso",
name => "",
settings => [
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "ISO" },
{ key => "HDDSIZEGB", value => "12" }
],
version => "*",
},
{
arch => "i386",
distri => "fedora",
flavor => "Workstation-live-iso",
name => "",
settings => [
{ key => "LIVE", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "i386",
distri => "fedora",
flavor => "KDE-live-iso",
name => "",
settings => [
{ key => "LIVE", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "DESKTOP", value => "kde" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "arm",
distri => "fedora",
flavor => "Minimal-raw_xz-raw.xz",
name => "",
settings => [
# HDD_2 gets posted by trigger and is always set to target disk image
{ key => "TEST_TARGET", value => "HDD_2" }
],
version => "*",
},
{
arch => "x86_64",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
name => "",
settings => [
{ key => "CANNED", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "ppc64le",
distri => "fedora",
flavor => "Everything-boot-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "ppc64le",
distri => "fedora",
flavor => "Silverblue-dvd_ostree-iso",
name => "",
settings => [
{ key => "CANNED", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "ppc64le",
distri => "fedora",
flavor => "Workstation-live-iso",
name => "",
settings => [
{ key => "LIVE", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "DESKTOP", value => "gnome" },
{ key => "HDDSIZEGB", value => "13" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
arch => "ppc64le",
distri => "fedora",
flavor => "universal",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "ppc64le",
distri => "fedora",
flavor => "Server-dvd-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "ppc64le",
distri => "fedora",
flavor => "Server-boot-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "ppc64le",
distri => "fedora",
flavor => "AtomicHost-dvd_ostree-iso",
name => "",
settings => [
{ key => "CANNED", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{
2018-03-07 02:53:25 +00:00
arch => "aarch64",
distri => "fedora",
flavor => "universal",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "aarch64",
distri => "fedora",
flavor => "Server-dvd-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "aarch64",
distri => "fedora",
flavor => "Server-boot-iso",
name => "",
settings => [
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
},
{
arch => "aarch64",
distri => "fedora",
flavor => "AtomicHost-dvd_ostree-iso",
name => "",
settings => [
{ key => "CANNED", value => "1" },
{ key => "PACKAGE_SET", value => "default" },
{ key => "TEST_TARGET", value => "ISO" }
],
version => "*",
2018-03-07 02:53:25 +00:00
},
2015-01-26 10:33:26 +00:00
],
TestSuites => [
{
name => "support_server",
settings => [
{ key => "PARALLEL_CANCEL_WHOLE_CLUSTER", value => "0" },
{ key => "NUMDISKS", value => "2" },
{ key => "HDD_1", value => "disk_f%CURRREL%_support_5_%ARCH%.img" },
{ key => "POSTINSTALL", value => "_support_server" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "POST_STATIC", value => "10.0.2.110 support.domain.local" },
{ key => "TEST_TARGET", value => "NONE" },
],
},
2015-01-26 10:33:26 +00:00
{
name => "install_default",
settings => [
{ key => "PACKAGE_SET", value => "default" },
{ key => "POSTINSTALL", value => "_collect_data" },
],
},
{
name => "install_default_upload",
settings => [
{ key => "PACKAGE_SET", value => "default" },
{ key => "POSTINSTALL", value => "_collect_data" },
add a base_selinux test (follow-on from default_install) Summary: so here's our first attempt to use the 'carry on from a previous test' stuff! This adds a base_selinux test that uses a disk image from a previous default_install run, and adds jobtemplates to run base_selinux for appropriate products: generic_boot (for nightly tests), server_dvd, and workstation_live. Note that you'll want to either update to the newest openQA package I just built in COPR or create /var/lib/openqa/share/factory/tmp owned by geekotest; openQA tries to use that directory as MOJO_TMPDIR but in 4.2, if the directory doesn't exist, it doesn't create it, and we wind up with the default MOJO_TMPDIR which is /tmp; when the disk image is uploaded it creates a huge temp file in /tmp and may well exhaust the available space as it's a tmpfs. I've backported a recent upstream commit that tries to create the directory if it doesn't exist, in 4.2-10. It seems like openQA is smart enough to figure out the dependencies correctly, so the 'base_selinux' test for each product depends on the 'default_install' test for the same product (not any of the other default_install runs) and will use the hard disk image it produces. Test Plan: Do a full test run and make sure base_selinux tests appear for appropriate products, depend on the correct default_ install test, the default_install test uploads the hard disk image correctly, and the base_selinux test runs correctly. And of course that nothing else broke in the process... Reviewers: jskladan, garretraziel Reviewed By: jskladan Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D699
2015-12-17 20:46:14 +00:00
{ key => "STORE_HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "install_arm_image_deployment_upload",
settings => [
# run ARM entrypoint and also make sure that VM shuts down correctly
{ key => "ENTRYPOINT", value => "install_arm_image_deployment _console_shutdown" },
# we don't want HDD_2 to be really connected, but we need to use it to download
# HDD ISO, see https://github.com/os-autoinst/openQA/issues/684
{ key => "NUMDISKS", value => "1" },
{ key => "HDD_1", value => "%HDD_2%" },
{ key => "STORE_HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
# set kernel arguments for DKB
{ key => "APPEND", value => "rw root=LABEL=_/ rootwait console=ttyAMA0 console=tty0 consoleblank=0" },
],
},
{
name => "install_anaconda_text",
settings => [
{ key => "ANACONDA_TEXT", value => "1" },
],
},
{
name => "install_serial_console",
settings => [
{ key => "ANACONDA_TEXT", value => "1" },
{ key => "SERIAL_CONSOLE", value => "1" },
# we want one console for anaconda and one for a root
# terminal
{ key => "VIRTIO_CONSOLE_NUM", value => "2" },
# we don't need to check this here and it doesn't work
# with serial console
{ key => "NO_UEFI_POST", value => "1" },
],
},
{
name => "install_rescue_encrypted",
settings => [
{ key => "BOOTFROM", value => "d" },
{ key => "ENTRYPOINT", value => "rescue_mode_encrypted" },
{ key => "HDD_1", value => "disk_%MACHINE%_encrypted.qcow2" },
{ key => "START_AFTER_TEST", value => "install_simple_encrypted" },
],
},
{
name => "install_package_set_minimal",
settings => [
{ key => "PACKAGE_SET", value => "minimal" },
{ key => "POSTINSTALL", value => "_collect_data" },
],
2015-01-26 10:33:26 +00:00
},
{
name => "install_multi",
settings => [
{ key => "PARTITIONING", value => "guided_multi" },
2015-01-27 15:35:10 +00:00
{ key => "NUMDISKS", value => "2" },
{ key => "HDD_2", value => "disk_full_mbr.img" },
2015-02-13 09:01:38 +00:00
{ key => "ROOT_PASSWORD", value => "weakpassword" },
2015-01-27 12:35:27 +00:00
],
},
{
name => "install_scsi_updates_img",
2015-01-27 12:35:27 +00:00
settings => [
{ key => "TEST_UPDATES", value => "1" },
{ key => "GRUB", value => "inst.updates=https://fedorapeople.org/groups/qa/updates/updates-openqa.img" },
{ key => "HDDMODEL", value => "scsi-hd" },
{ key => "CDMODEL", value => "scsi-cd" },
{ key => "SCSICONTROLLER", value => "virtio-scsi-pci" },
2015-01-27 12:35:27 +00:00
],
},
{
name => "install_kickstart_user_creation",
2015-01-27 12:35:27 +00:00
settings => [
{ key => "KICKSTART", value => "1" },
2015-02-04 13:05:20 +00:00
{ key => "GRUB", value => "inst.ks=http://jskladan.fedorapeople.org/kickstarts/root-user-crypted-net.ks" },
2015-01-27 12:35:27 +00:00
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "test" },
{ key => "ROOT_PASSWORD", value => "111111" },
],
},
2015-01-27 13:22:35 +00:00
{
name => "install_delete_pata",
2015-01-27 13:22:35 +00:00
settings => [
{ key => "PARTITIONING", value => "guided_delete_all" },
2015-01-27 13:22:35 +00:00
{ key => "HDDMODEL", value => "ide-hd" },
{ key => "HDD_1", value => "disk_full_mbr.img" },
2015-01-27 13:22:35 +00:00
],
},
2017-01-16 12:07:47 +00:00
{
name => "install_sata",
settings => [
{ key => "HDDMODEL", value => "ide-drive,bus=ahci0.0" },
2017-01-16 12:07:47 +00:00
{ key => "ATACONTROLLER", value => "ich9-ahci" },
],
},
2015-02-04 13:05:20 +00:00
{
name => "install_mirrorlist_graphical",
2015-02-04 13:05:20 +00:00
settings => [
{ key => "MIRRORLIST_GRAPHICAL", value => "1" },
],
},
{
name => "install_repository_http_graphical",
2015-02-04 13:05:20 +00:00
settings => [
use compose repository (not master repo) for most tests Summary: we have a long-standing problem with all the tests that hit the repositories. The tests are triggered as soon as a compose completes. At this point in time, the compose is not synced to the mirrors, where the default 'fedora' repo definition looks; the sync happens after the compose completes, and there is also a metadata sync step that must happen after *that* before any operation that uses the 'fedora' repository definition will actually use the packages from the new compose. Thus all net install tests and tests that installed packages have been effectively testing the previous compose, not the current one. We have some thoughts about how to fix this 'properly' (such that the openQA tests wouldn't have to do anything special, but their 'fedora' repository would somehow reflect the compose under test), but none of them is in place right now or likely to happen in the short term, so in the mean time this should deal with most of the issues. With this change, everything but the default_install tests for the netinst images should use the compose-under-test's Everything tree instead of the 'fedora' repository, and thus should install and test the correct packages. This relies on a corresponding change to openqa_fedora_tools to set the LOCATION openQA setting (which is simply the base location of the compose under test). Test Plan: Do a full test run, check (as far as you can) tests run sensibly and use appropriate repositories. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D989
2016-09-01 15:22:59 +00:00
{ key => "REPOSITORY_GRAPHICAL", value => "%LOCATION%" },
2015-02-04 13:05:20 +00:00
],
},
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{
name => "install_repository_nfs_graphical",
settings => [
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{ key => "REPOSITORY_GRAPHICAL", value => "nfs:nfsvers=4:10.0.2.110:/repo" },
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{ key => "PARALLEL_WITH", value => "support_server" },
{ key => "INSTALL_UNLOCK", value => "support_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
2015-02-04 13:45:37 +00:00
{
name => "install_repository_http_variation",
2015-02-04 13:45:37 +00:00
settings => [
use compose repository (not master repo) for most tests Summary: we have a long-standing problem with all the tests that hit the repositories. The tests are triggered as soon as a compose completes. At this point in time, the compose is not synced to the mirrors, where the default 'fedora' repo definition looks; the sync happens after the compose completes, and there is also a metadata sync step that must happen after *that* before any operation that uses the 'fedora' repository definition will actually use the packages from the new compose. Thus all net install tests and tests that installed packages have been effectively testing the previous compose, not the current one. We have some thoughts about how to fix this 'properly' (such that the openQA tests wouldn't have to do anything special, but their 'fedora' repository would somehow reflect the compose under test), but none of them is in place right now or likely to happen in the short term, so in the mean time this should deal with most of the issues. With this change, everything but the default_install tests for the netinst images should use the compose-under-test's Everything tree instead of the 'fedora' repository, and thus should install and test the correct packages. This relies on a corresponding change to openqa_fedora_tools to set the LOCATION openQA setting (which is simply the base location of the compose under test). Test Plan: Do a full test run, check (as far as you can) tests run sensibly and use appropriate repositories. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D989
2016-09-01 15:22:59 +00:00
{ key => "REPOSITORY_VARIATION", value => "%LOCATION%" },
2015-02-04 13:45:37 +00:00
],
},
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{
name => "install_repository_nfs_variation",
settings => [
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{ key => "REPOSITORY_VARIATION", value => "nfs:nfsvers=4:10.0.2.110:/repo" },
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{ key => "PARALLEL_WITH", value => "support_server" },
{ key => "INSTALL_UNLOCK", value => "support_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_repository_nfsiso_variation",
settings => [
{ key => "REPOSITORY_VARIATION", value => "nfs:nfsvers=4:10.0.2.110:/iso/image.iso" },
{ key => "PARALLEL_WITH", value => "support_server" },
{ key => "INSTALL_UNLOCK", value => "support_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_pxeboot",
settings => [
# this delays VM boot so we can wait till the PXE server
# is ready
{ key => "DELAYED_START", value => "1" },
# this is to ensure the test never 'accidentally' passes
# by falling back to boot from ISO
{ key => "+ISO", value => "" },
{ key => "TEST_TARGET", value => "COMPOSE" },
{ key => "PXEBOOT", value => "once" },
{ key => "KICKSTART", value => "1" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "111111" },
{ key => "PARALLEL_WITH", value => "support_server:%ARCH_BASE_MACHINE%" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_repository_hd_variation",
settings => [
{ key => "PREINSTALL", value => "preinstall_iso_in_hd" },
{ key => "REPOSITORY_VARIATION", value => "hd:vdb1:/fedora_image.iso" },
{ key => "NUMDISKS", value => "2" },
{ key => "HDD_2", value => "disk_full_mbr.img" },
],
},
2015-02-13 08:51:24 +00:00
{
name => "install_delete_partial",
2015-02-13 08:51:24 +00:00
settings => [
{ key => "PARTITIONING", value => "guided_delete_partial" },
{ key => "HDD_1", value => "disk_full_%PART_TABLE_TYPE%.img" },
2015-02-13 08:51:24 +00:00
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
2015-02-13 09:58:24 +00:00
{
name => "install_simple_encrypted",
2015-02-13 09:58:24 +00:00
settings => [
{ key => "ENCRYPT_PASSWORD", value => "weakpassword" },
{ key => "STORE_HDD_1", value => "disk_%MACHINE%_encrypted.qcow2" },
2015-02-13 09:58:24 +00:00
],
},
2015-03-05 10:44:02 +00:00
{
name => "install_simple_free_space",
2015-03-05 10:44:02 +00:00
settings => [
{ key => "PARTITIONING", value => "guided_free_space" },
{ key => "HDD_1", value => "disk_freespace_%PART_TABLE_TYPE%.img" },
2015-03-05 10:44:02 +00:00
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
2015-03-05 12:57:47 +00:00
{
name => "install_multi_empty",
2015-03-05 12:57:47 +00:00
settings => [
{ key => "PARTITIONING", value => "guided_multi_empty_all" },
2015-03-05 12:57:47 +00:00
{ key => "NUMDISKS", value => "2" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
2015-03-06 09:36:25 +00:00
{
name => "install_software_raid",
2015-03-06 09:36:25 +00:00
settings => [
{ key => "PARTITIONING", value => "custom_software_raid" },
2015-03-06 09:36:25 +00:00
{ key => "NUMDISKS", value => "2" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
2015-03-12 09:58:04 +00:00
{
name => "install_btrfs",
settings => [
{ key => "PARTITIONING", value => "custom_btrfs" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_ext3",
settings => [
{ key => "PARTITIONING", value => "custom_ext3" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_lvmthin",
settings => [
{ key => "PARTITIONING", value => "custom_lvmthin" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_no_swap",
settings => [
{ key => "PARTITIONING", value => "custom_no_swap" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_blivet_ext3",
settings => [
{ key => "PARTITIONING", value => "custom_blivet_ext3" },
{ key => "POSTINSTALL", value => "disk_custom_ext3_postinstall" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_blivet_btrfs",
settings => [
{ key => "PARTITIONING", value => "custom_blivet_btrfs" },
{ key => "POSTINSTALL", value => "disk_custom_btrfs_postinstall" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_blivet_no_swap",
settings => [
{ key => "PARTITIONING", value => "custom_blivet_no_swap" },
{ key => "POSTINSTALL", value => "disk_custom_no_swap_postinstall" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_blivet_xfs",
settings => [
{ key => "PARTITIONING", value => "custom_blivet_xfs" },
{ key => "POSTINSTALL", value => "disk_custom_xfs_postinstall" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_blivet_software_raid",
settings => [
{ key => "PARTITIONING", value => "custom_blivet_software_raid" },
{ key => "POSTINSTALL", value => "disk_custom_software_raid_postinstall" },
{ key => "NUMDISKS", value => "2" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_blivet_lvmthin",
settings => [
{ key => "PARTITIONING", value => "custom_blivet_lvmthin" },
{ key => "POSTINSTALL", value => "disk_custom_lvmthin_postinstall" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_kickstart_hdd",
2015-03-12 09:58:04 +00:00
settings => [
{ key => "KICKSTART", value => "1" },
{ key => "GRUB", value => "inst.ks=hd:vdb1:/root-user-crypted-net.ks" },
{ key => "NUMDISKS", value => "2" },
{ key => "HDD_2", value => "disk_ks_3.img" },
2015-03-12 09:58:04 +00:00
{ key => "ROOT_PASSWORD", value => "111111" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "test" },
],
},
{
convert upgrade tests to dnf-plugin-system-upgrade Summary: This is a first cut which more or less works for now. Issues: 1) We're not really testing the BUILD, here. All the test does is try and upgrade to the specified VERSION - so it'll be using the latest 'stable' for the given VERSION at the time the test runs. This isn't really that terrible, but especially for TC/RC validation, we might want to make things a bit more elaborate and set up the repo for the actual BUILD (and disable the main repos). 2) We'd actually need --nogpgcheck for non-Rawhide, at one specific point in the release cycle - after Branching but before Bodhi activation (which is when we can be sure all packages are signed). This won't matter until 24 branches, and maybe releng will have it fixed by then...if not, I'll tweak it. 3) We don't really test that the upgrade actually *happened* for desktop, at the moment - the only thing in the old test that really checked that was where we checked for the fedup boot menu entry, but that has no analog in dnf. What we should probably do is check that GUI login works, then switch to a console and check /etc/fedora-release just as the minimal test does. Test Plan: Run the tests. Note that creating the desktop disk image doesn't work ATM, so I can't verify the desktop test works, but the minimal one seems to (with D565). There'll be a matching diff for openqa_fedora_tools to update the test case names there. Reviewers: jskladan, garretraziel Reviewed By: jskladan, garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D567
2015-09-10 21:49:13 +00:00
name => "upgrade_minimal_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "false" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%CURRREL%_minimal_3_%ARCH%.img" },
{ key => "UPGRADE", value => "1" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
convert upgrade tests to dnf-plugin-system-upgrade Summary: This is a first cut which more or less works for now. Issues: 1) We're not really testing the BUILD, here. All the test does is try and upgrade to the specified VERSION - so it'll be using the latest 'stable' for the given VERSION at the time the test runs. This isn't really that terrible, but especially for TC/RC validation, we might want to make things a bit more elaborate and set up the repo for the actual BUILD (and disable the main repos). 2) We'd actually need --nogpgcheck for non-Rawhide, at one specific point in the release cycle - after Branching but before Bodhi activation (which is when we can be sure all packages are signed). This won't matter until 24 branches, and maybe releng will have it fixed by then...if not, I'll tweak it. 3) We don't really test that the upgrade actually *happened* for desktop, at the moment - the only thing in the old test that really checked that was where we checked for the fedup boot menu entry, but that has no analog in dnf. What we should probably do is check that GUI login works, then switch to a console and check /etc/fedora-release just as the minimal test does. Test Plan: Run the tests. Note that creating the desktop disk image doesn't work ATM, so I can't verify the desktop test works, but the minimal one seems to (with D565). There'll be a matching diff for openqa_fedora_tools to update the test case names there. Reviewers: jskladan, garretraziel Reviewed By: jskladan, garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D567
2015-09-10 21:49:13 +00:00
name => "upgrade_desktop_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%CURRREL%_desktop_4_x86_64.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_server_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{ key => "HDD_1", value => "disk_f%CURRREL%_server_3_%ARCH%.img" },
{ key => "UPGRADE", value => "1" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_server_domain_controller",
settings => [
{ key => "PARALLEL_CANCEL_WHOLE_CLUSTER", value => "0" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "+HDD_1", value => "disk_f%CURRREL%_server_3_%ARCH%.img" },
{ key => "UPGRADE", value => "1" },
{ key => "TEST_TARGET", value => "COMPOSE" },
{ key => "PREUPGRADE", value => "role_deploy_domain_controller" },
{ key => "POSTINSTALL", value => "role_deploy_domain_controller_check" },
{ key => "USER_LOGIN", value => "false" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "BOOTFROM", value => "c" },
{ key => "GRUB", value => "net.ifnames=0 biosdevname=0" },
{ key => "POST_STATIC", value => "10.0.2.100 ipa001.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "upgrade_realmd_client",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "+HDD_1", value => "disk_f%CURRREL%_server_3_%ARCH%.img" },
{ key => "UPGRADE", value => "1" },
{ key => "TEST_TARGET", value => "COMPOSE" },
{ key => "PREUPGRADE", value => "realmd_join_sssd" },
{ key => "POSTINSTALL", value => "_setup_browser freeipa_webui freeipa_password_change freeipa_client" },
{ key => "PARALLEL_WITH", value => "upgrade_server_domain_controller" },
{ key => "USER_LOGIN", value => "false" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "BOOTFROM", value => "c" },
{ key => "GRUB", value => "net.ifnames=0 biosdevname=0" },
{ key => "POST_STATIC", value => "10.0.2.103 client003.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "upgrade_kde_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "+HDD_1", value => "disk_f%CURRREL%_kde_3_x86_64.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "kde" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_desktop_encrypted_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%CURRREL%_desktopencrypt_x86_64.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "ENCRYPT_PASSWORD", value => "weakpassword" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_2_minimal_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "false" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%PREVREL%_minimal_3_%ARCH%.img" },
{ key => "UPGRADE", value => "1" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_2_desktop_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%PREVREL%_desktop_4_x86_64.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_2_server_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
Add PowerPC support in templates * use only a subset of tests for ppc64 and ppc64le with a new "Fedora PowerPC group" and only three flavors "Server-boot-iso", "Server-dvd-iso", "universal", * TEST_TARGET for all PowerPC Products set as ISO * increase disk size for asian cyrillic and european tests add HDDSIZEGB = 12 for related tests install_asian_language install_cyrillic_language install_european_language This is required to avoid anaconda failure like: (my own translation) "... Fedora requests 10.03GB of free space, with 5.95GB for software and 4.08GB for swap. Your selected disks have the following free space: 10GB free space for use..." * Remove hardcoded arch in some HDD_1 key replaced by ARCH variable That concerns the images generated by createhdds tool (only for supported PowerPC tests not all of them) eg change from: "disk_f%CURRREL%_support_3_x86_64.img" to: "disk_f%CURRREL%_support_3_%ARCH%.img" Warning: use ARCH and not MACHINE variable * Try to keep same order for PowerPC as for x86_64 tests and same priorities as documented in cid a5861ebc5d56a8b8d7fca40e00f21b21a203c371: 0-20: critical smoke tests (higher than Alpha priority) 20-29: Alpha priority 30-39: Beta priority 40-49: Final priority 50+: Optional priority * force nfsvers=4 as bypass bugs: https://bugzilla.redhat.com/show_bug.cgi?id=1386059 https://bugzilla.redhat.com/show_bug.cgi?id=1368932 * role_deploy_domain_controller failed for ppc64 (BE) https://bugzilla.redhat.com/show_bug.cgi?id=1437793 * Warning: tests failure for PowerPC, not added: install_delete_pata install_sata install_package_set_kde install_updates_img_local * tests not tried: upgrade_server_domain_controller upgrade_realmd_client upgrade_desktop_encrypted_64bit * Note: TIMEOUT_SCALE initially set for PowerPC machines has been removed from this commit as seems not required anymore after upstream merge. Will need to track if two following timer values may create problem on remote openQA instances: tests/install_source_graphical.pm (300 to 600) tests/_boot_to_anaconda.pm (300 to 1200) Signed-off-by: Guy Menanteau <menantea@linux.vnet.ibm.com> Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
2017-06-27 13:16:46 +00:00
{ key => "HDD_1", value => "disk_f%PREVREL%_server_3_%ARCH%.img" },
{ key => "UPGRADE", value => "1" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_2_kde_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%PREVREL%_kde_3_x86_64.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "kde" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_2_desktop_encrypted_64bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%PREVREL%_desktopencrypt_x86_64.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "ENCRYPT_PASSWORD", value => "weakpassword" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_desktop_32bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%CURRREL%_desktop_4_i686.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "upgrade_2_desktop_32bit",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "USER_LOGIN", value => "test" },
{ key => "USER_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_f%PREVREL%_desktop_4_i686.img" },
{ key => "UPGRADE", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "TEST_TARGET", value => "COMPOSE" },
],
},
{
name => "install_updates_img_local",
settings => [
{ key => "NUMDISKS", value => "2" },
{ key => "HDD_2", value => "disk_updates_img_2.img" },
{ key => "TEST_UPDATES", value => "1" },
{ key => "GRUB", value => "inst.updates=hd:LABEL=UPDATES_IMG:/updates.img" },
],
},
{
name => "install_shrink_ext4",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "HDD_1", value => "disk_shrink_ext4.img" },
{ key => "PARTITIONING", value => "guided_shrink" },
],
},
{
name => "install_shrink_ntfs",
settings => [
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "HDD_1", value => "disk_shrink_ntfs.img" },
{ key => "PARTITIONING", value => "guided_shrink" },
],
},
add a french (encrypted) test Summary: this handles Non-English European Language Install. Basically it's a bunch of new screenshots for existing tag names, plus a bit of configurability in _boot_to_anaconda and tweaking some existing needles to do non-text matches. The weird 'half-the- icon' needles are for cases where there may or may not be a warning triangle but we want to click it either way (saves duplicating the needle). This also sets up a convention for tagging what languages a needle is appropriate for. If it's specifically appropriate for one or more languages, a tag ENV-LANGUAGE-(LANGUAGE) should be applied for each language, where (LANGUAGE) is the install language in upper-case ('LANGUAGE' variable, which should also be the string that will be typed into the language selection screen). If the needle ought to be used for *all* languages - i.e. it's not a text match, or any text in the match is known not to be translated - the tag ENV-INSTLANG-ALL should be applied. To back this, main.pm now unregisters all needles that are not tagged with either ENV-LANGUAGE-ALL or the tag for the language actually being used (if the LANGUAGE var is not set, we assume english). The point of this is to check the install is actually translated; if we allow all needles to match, the test would pass even if no translations appeared at all. Test Plan: Run all tests and make sure you get the expected results. You can schedule a run against 23 Beta TC1 to see the French test fails 'correctly' when translations are missing. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D577
2015-09-15 01:08:58 +00:00
{
name => "install_european_language",
add a french (encrypted) test Summary: this handles Non-English European Language Install. Basically it's a bunch of new screenshots for existing tag names, plus a bit of configurability in _boot_to_anaconda and tweaking some existing needles to do non-text matches. The weird 'half-the- icon' needles are for cases where there may or may not be a warning triangle but we want to click it either way (saves duplicating the needle). This also sets up a convention for tagging what languages a needle is appropriate for. If it's specifically appropriate for one or more languages, a tag ENV-LANGUAGE-(LANGUAGE) should be applied for each language, where (LANGUAGE) is the install language in upper-case ('LANGUAGE' variable, which should also be the string that will be typed into the language selection screen). If the needle ought to be used for *all* languages - i.e. it's not a text match, or any text in the match is known not to be translated - the tag ENV-INSTLANG-ALL should be applied. To back this, main.pm now unregisters all needles that are not tagged with either ENV-LANGUAGE-ALL or the tag for the language actually being used (if the LANGUAGE var is not set, we assume english). The point of this is to check the install is actually translated; if we allow all needles to match, the test would pass even if no translations appeared at all. Test Plan: Run all tests and make sure you get the expected results. You can schedule a run against 23 Beta TC1 to see the French test fails 'correctly' when translations are missing. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D577
2015-09-15 01:08:58 +00:00
settings => [
{ key => "LANGUAGE", value => "french" },
{ key => "DESKTOP", value => "gnome" },
{ key => "PACKAGE_SET", value => "workstation" },
add a french (encrypted) test Summary: this handles Non-English European Language Install. Basically it's a bunch of new screenshots for existing tag names, plus a bit of configurability in _boot_to_anaconda and tweaking some existing needles to do non-text matches. The weird 'half-the- icon' needles are for cases where there may or may not be a warning triangle but we want to click it either way (saves duplicating the needle). This also sets up a convention for tagging what languages a needle is appropriate for. If it's specifically appropriate for one or more languages, a tag ENV-LANGUAGE-(LANGUAGE) should be applied for each language, where (LANGUAGE) is the install language in upper-case ('LANGUAGE' variable, which should also be the string that will be typed into the language selection screen). If the needle ought to be used for *all* languages - i.e. it's not a text match, or any text in the match is known not to be translated - the tag ENV-INSTLANG-ALL should be applied. To back this, main.pm now unregisters all needles that are not tagged with either ENV-LANGUAGE-ALL or the tag for the language actually being used (if the LANGUAGE var is not set, we assume english). The point of this is to check the install is actually translated; if we allow all needles to match, the test would pass even if no translations appeared at all. Test Plan: Run all tests and make sure you get the expected results. You can schedule a run against 23 Beta TC1 to see the French test fails 'correctly' when translations are missing. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D577
2015-09-15 01:08:58 +00:00
{ key => "USER_LOGIN", value => "qwerty" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "ENCRYPT_PASSWORD", value => "weakpassword" },
{ key => "POSTINSTALL", value => "_console_login" },
{ key => "REPOSITORY_VARIATION", value => "%LOCATION%" },
{ key => "HDDSIZEGB", value => "13" },
{ key => "QEMU_DISABLE_SNAPSHOTS", value => "1" },
{ key => "NO_UEFI_POST", value => "1" },
add a french (encrypted) test Summary: this handles Non-English European Language Install. Basically it's a bunch of new screenshots for existing tag names, plus a bit of configurability in _boot_to_anaconda and tweaking some existing needles to do non-text matches. The weird 'half-the- icon' needles are for cases where there may or may not be a warning triangle but we want to click it either way (saves duplicating the needle). This also sets up a convention for tagging what languages a needle is appropriate for. If it's specifically appropriate for one or more languages, a tag ENV-LANGUAGE-(LANGUAGE) should be applied for each language, where (LANGUAGE) is the install language in upper-case ('LANGUAGE' variable, which should also be the string that will be typed into the language selection screen). If the needle ought to be used for *all* languages - i.e. it's not a text match, or any text in the match is known not to be translated - the tag ENV-INSTLANG-ALL should be applied. To back this, main.pm now unregisters all needles that are not tagged with either ENV-LANGUAGE-ALL or the tag for the language actually being used (if the LANGUAGE var is not set, we assume english). The point of this is to check the install is actually translated; if we allow all needles to match, the test would pass even if no translations appeared at all. Test Plan: Run all tests and make sure you get the expected results. You can schedule a run against 23 Beta TC1 to see the French test fails 'correctly' when translations are missing. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D577
2015-09-15 01:08:58 +00:00
],
},
{
name => "install_cyrillic_language",
settings => [
{ key => "LANGUAGE", value => "russian" },
{ key => "DESKTOP", value => "gnome" },
{ key => "PACKAGE_SET", value => "workstation" },
{ key => "SWITCHED_LAYOUT", value => "1" },
{ key => "USER_LOGIN", value => "qwerty" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "ENCRYPT_PASSWORD", value => "weakpassword" },
{ key => "POSTINSTALL", value => "_console_login" },
{ key => "REPOSITORY_VARIATION", value => "%LOCATION%" },
{ key => "HDDSIZEGB", value => "13" },
{ key => "QEMU_DISABLE_SNAPSHOTS", value => "1" },
],
},
{
name => "install_arabic_language",
settings => [
{ key => "LANGUAGE", value => "arabic" },
{ key => "DESKTOP", value => "gnome" },
{ key => "PACKAGE_SET", value => "workstation" },
{ key => "SWITCHED_LAYOUT", value => "1" },
{ key => "USER_LOGIN", value => "qwerty" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "ENCRYPT_PASSWORD", value => "weakpassword" },
{ key => "POSTINSTALL", value => "_console_login" },
{ key => "REPOSITORY_VARIATION", value => "%LOCATION%" },
{ key => "HDDSIZEGB", value => "13" },
{ key => "QEMU_DISABLE_SNAPSHOTS", value => "1" },
],
},
{
name => "install_asian_language",
settings => [
{ key => "LANGUAGE", value => "japanese" },
{ key => "DESKTOP", value => "gnome" },
{ key => "PACKAGE_SET", value => "workstation" },
{ key => "INPUT_METHOD", value => "1" },
{ key => "USER_LOGIN", value => "qwerty" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "ENCRYPT_PASSWORD", value => "weakpassword" },
{ key => "POSTINSTALL", value => "_console_login" },
{ key => "REPOSITORY_VARIATION", value => "%LOCATION%" },
{ key => "HDDSIZEGB", value => "13" },
{ key => "QEMU_DISABLE_SNAPSHOTS", value => "1" },
],
},
{
name => "install_xfs",
settings => [
{ key => "PARTITIONING", value => "custom_xfs" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
],
},
{
name => "install_iscsi",
settings => [
{ key => "PARTITIONING", value => "custom_iscsi" },
{ key => "ANACONDA_STATIC", value => "10.0.2.111" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "PARALLEL_WITH", value => "support_server" },
{ key => "INSTALL_UNLOCK", value => "support_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_package_set_kde",
settings => [
{ key => "DESKTOP", value => "kde" },
{ key => "HDDSIZEGB", value => "12" },
{ key => "PACKAGE_SET", value => "kde" },
{ key => "POSTINSTALL", value => "_collect_data" },
use compose repository (not master repo) for most tests Summary: we have a long-standing problem with all the tests that hit the repositories. The tests are triggered as soon as a compose completes. At this point in time, the compose is not synced to the mirrors, where the default 'fedora' repo definition looks; the sync happens after the compose completes, and there is also a metadata sync step that must happen after *that* before any operation that uses the 'fedora' repository definition will actually use the packages from the new compose. Thus all net install tests and tests that installed packages have been effectively testing the previous compose, not the current one. We have some thoughts about how to fix this 'properly' (such that the openQA tests wouldn't have to do anything special, but their 'fedora' repository would somehow reflect the compose under test), but none of them is in place right now or likely to happen in the short term, so in the mean time this should deal with most of the issues. With this change, everything but the default_install tests for the netinst images should use the compose-under-test's Everything tree instead of the 'fedora' repository, and thus should install and test the correct packages. This relies on a corresponding change to openqa_fedora_tools to set the LOCATION openQA setting (which is simply the base location of the compose under test). Test Plan: Do a full test run, check (as far as you can) tests run sensibly and use appropriate repositories. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D989
2016-09-01 15:22:59 +00:00
{ key => "REPOSITORY_VARIATION", value => "%LOCATION%" },
],
},
{
name => "install_vnc_server",
settings => [
{ key => "VNC_SERVER", value => "1" },
{ key => "GRUB", value => "inst.vnc net.ifnames=0 biosdevname=0 ip=10.0.2.114::10.0.2.2:255.255.255.0:vnc001.domain.local:eth0:off" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_vnc_client",
settings => [
{ key => "VNC_CLIENT", value => "1" },
{ key => "BOOTFROM", value => "c" },
{ key => "INSTALL", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "HDD_1", value => "disk_f%CURRREL%_desktop_4_%ARCH%.img" },
{ key => "PARALLEL_WITH", value => "install_vnc_server" },
{ key => "PREINSTALL", value => "_graphical_wait_login _vnc_client_connect" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_vncconnect_server",
settings => [
{ key => "VNC_SERVER", value => "1" },
{ key => "GRUB", value => "inst.vnc inst.vncconnect=10.0.2.117:5500 net.ifnames=0 biosdevname=0 ip=10.0.2.116::10.0.2.2:255.255.255.0:vnc003.domain.local:eth0:off" },
# it's important that we set PARALLEL_WITH *here* and
# not for the client test due to mutex locking - we
# want the client test to be the 'parent' as it makes
# the mutex stuff simpler
{ key => "PARALLEL_WITH", value => "install_vncconnect_client" },
{ key => "INSTALL_UNLOCK", value => "vncconnect_client_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_vncconnect_client",
settings => [
{ key => "VNC_CLIENT", value => "1" },
{ key => "BOOTFROM", value => "c" },
{ key => "INSTALL", value => "1" },
{ key => "DESKTOP", value => "gnome" },
{ key => "HDD_1", value => "disk_f%CURRREL%_desktop_4_%ARCH%.img" },
{ key => "PREINSTALL", value => "_graphical_wait_login _vncconnect_client_setup" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
add a base_selinux test (follow-on from default_install) Summary: so here's our first attempt to use the 'carry on from a previous test' stuff! This adds a base_selinux test that uses a disk image from a previous default_install run, and adds jobtemplates to run base_selinux for appropriate products: generic_boot (for nightly tests), server_dvd, and workstation_live. Note that you'll want to either update to the newest openQA package I just built in COPR or create /var/lib/openqa/share/factory/tmp owned by geekotest; openQA tries to use that directory as MOJO_TMPDIR but in 4.2, if the directory doesn't exist, it doesn't create it, and we wind up with the default MOJO_TMPDIR which is /tmp; when the disk image is uploaded it creates a huge temp file in /tmp and may well exhaust the available space as it's a tmpfs. I've backported a recent upstream commit that tries to create the directory if it doesn't exist, in 4.2-10. It seems like openQA is smart enough to figure out the dependencies correctly, so the 'base_selinux' test for each product depends on the 'default_install' test for the same product (not any of the other default_install runs) and will use the hard disk image it produces. Test Plan: Do a full test run and make sure base_selinux tests appear for appropriate products, depend on the correct default_ install test, the default_install test uploads the hard disk image correctly, and the base_selinux test runs correctly. And of course that nothing else broke in the process... Reviewers: jskladan, garretraziel Reviewed By: jskladan Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D699
2015-12-17 20:46:14 +00:00
{
name => "base_selinux",
settings => [
consolidate login waits, use postinstall not entrypoint for base Summary: I started out wanting to fix an issue I noticed today where graphical upgrade tests were failing because they didn't wait for the graphical login screen properly; the test was sitting at the 'full Fedora logo' state of plymouth for a long time, so the current boot_to_login_screen's wait_still_screen was triggered by it and the function wound up failing on the assert_screen, because it was still some time before the real login screen appeared. So I tweaked the boot_to_login_screen implementation to work slightly differently (look for a login screen match, *then* - if we're dealing with a graphical login - wait_still_screen to defeat the 'old GPU buffer showing login screen' problem and assert the login screen again). But while working on it, I figured we really should consolidate all the various places that handle the bootloader -> login, we were doing it quite differently in all sorts of different places. And as part of that, I converted the base tests to use POSTINSTALL (and thus go through the shared _wait_login tests) instead of handling boot themselves. As part of *that*, I tweaked main.pm to not require all POSTINSTALL tests have the _postinstall suffix on their names, as it really doesn't make sense, and renamed the tests. Test Plan: Run all tests, see if they work. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
{ key => "POSTINSTALL", value => "base_selinux" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "modularity_tests",
settings => [
{ key => "POSTINSTALL", value => "modularity_module_list modularity_enable_disable_module modularity_install_module modularity_checkdefaults" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
add a base_selinux test (follow-on from default_install) Summary: so here's our first attempt to use the 'carry on from a previous test' stuff! This adds a base_selinux test that uses a disk image from a previous default_install run, and adds jobtemplates to run base_selinux for appropriate products: generic_boot (for nightly tests), server_dvd, and workstation_live. Note that you'll want to either update to the newest openQA package I just built in COPR or create /var/lib/openqa/share/factory/tmp owned by geekotest; openQA tries to use that directory as MOJO_TMPDIR but in 4.2, if the directory doesn't exist, it doesn't create it, and we wind up with the default MOJO_TMPDIR which is /tmp; when the disk image is uploaded it creates a huge temp file in /tmp and may well exhaust the available space as it's a tmpfs. I've backported a recent upstream commit that tries to create the directory if it doesn't exist, in 4.2-10. It seems like openQA is smart enough to figure out the dependencies correctly, so the 'base_selinux' test for each product depends on the 'default_install' test for the same product (not any of the other default_install runs) and will use the hard disk image it produces. Test Plan: Do a full test run and make sure base_selinux tests appear for appropriate products, depend on the correct default_ install test, the default_install test uploads the hard disk image correctly, and the base_selinux test runs correctly. And of course that nothing else broke in the process... Reviewers: jskladan, garretraziel Reviewed By: jskladan Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D699
2015-12-17 20:46:14 +00:00
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "base_services_start",
settings => [
consolidate login waits, use postinstall not entrypoint for base Summary: I started out wanting to fix an issue I noticed today where graphical upgrade tests were failing because they didn't wait for the graphical login screen properly; the test was sitting at the 'full Fedora logo' state of plymouth for a long time, so the current boot_to_login_screen's wait_still_screen was triggered by it and the function wound up failing on the assert_screen, because it was still some time before the real login screen appeared. So I tweaked the boot_to_login_screen implementation to work slightly differently (look for a login screen match, *then* - if we're dealing with a graphical login - wait_still_screen to defeat the 'old GPU buffer showing login screen' problem and assert the login screen again). But while working on it, I figured we really should consolidate all the various places that handle the bootloader -> login, we were doing it quite differently in all sorts of different places. And as part of that, I converted the base tests to use POSTINSTALL (and thus go through the shared _wait_login tests) instead of handling boot themselves. As part of *that*, I tweaked main.pm to not require all POSTINSTALL tests have the _postinstall suffix on their names, as it really doesn't make sense, and renamed the tests. Test Plan: Run all tests, see if they work. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
{ key => "POSTINSTALL", value => "base_services_start" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "base_services_start_arm",
settings => [
consolidate login waits, use postinstall not entrypoint for base Summary: I started out wanting to fix an issue I noticed today where graphical upgrade tests were failing because they didn't wait for the graphical login screen properly; the test was sitting at the 'full Fedora logo' state of plymouth for a long time, so the current boot_to_login_screen's wait_still_screen was triggered by it and the function wound up failing on the assert_screen, because it was still some time before the real login screen appeared. So I tweaked the boot_to_login_screen implementation to work slightly differently (look for a login screen match, *then* - if we're dealing with a graphical login - wait_still_screen to defeat the 'old GPU buffer showing login screen' problem and assert the login screen again). But while working on it, I figured we really should consolidate all the various places that handle the bootloader -> login, we were doing it quite differently in all sorts of different places. And as part of that, I converted the base tests to use POSTINSTALL (and thus go through the shared _wait_login tests) instead of handling boot themselves. As part of *that*, I tweaked main.pm to not require all POSTINSTALL tests have the _postinstall suffix on their names, as it really doesn't make sense, and renamed the tests. Test Plan: Run all tests, see if they work. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
{ key => "POSTINSTALL", value => "base_services_start" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_arm_image_deployment_upload" },
{ key => "NUMDISKS", value => "1" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
# set kernel arguments for DKB
{ key => "APPEND", value => "rw root=LABEL=_/ rootwait console=ttyAMA0 console=tty0 consoleblank=0" },
],
},
{
name => "base_service_manipulation",
settings => [
consolidate login waits, use postinstall not entrypoint for base Summary: I started out wanting to fix an issue I noticed today where graphical upgrade tests were failing because they didn't wait for the graphical login screen properly; the test was sitting at the 'full Fedora logo' state of plymouth for a long time, so the current boot_to_login_screen's wait_still_screen was triggered by it and the function wound up failing on the assert_screen, because it was still some time before the real login screen appeared. So I tweaked the boot_to_login_screen implementation to work slightly differently (look for a login screen match, *then* - if we're dealing with a graphical login - wait_still_screen to defeat the 'old GPU buffer showing login screen' problem and assert the login screen again). But while working on it, I figured we really should consolidate all the various places that handle the bootloader -> login, we were doing it quite differently in all sorts of different places. And as part of that, I converted the base tests to use POSTINSTALL (and thus go through the shared _wait_login tests) instead of handling boot themselves. As part of *that*, I tweaked main.pm to not require all POSTINSTALL tests have the _postinstall suffix on their names, as it really doesn't make sense, and renamed the tests. Test Plan: Run all tests, see if they work. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
{ key => "POSTINSTALL", value => "base_service_manipulation" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "base_update_cli",
settings => [
consolidate login waits, use postinstall not entrypoint for base Summary: I started out wanting to fix an issue I noticed today where graphical upgrade tests were failing because they didn't wait for the graphical login screen properly; the test was sitting at the 'full Fedora logo' state of plymouth for a long time, so the current boot_to_login_screen's wait_still_screen was triggered by it and the function wound up failing on the assert_screen, because it was still some time before the real login screen appeared. So I tweaked the boot_to_login_screen implementation to work slightly differently (look for a login screen match, *then* - if we're dealing with a graphical login - wait_still_screen to defeat the 'old GPU buffer showing login screen' problem and assert the login screen again). But while working on it, I figured we really should consolidate all the various places that handle the bootloader -> login, we were doing it quite differently in all sorts of different places. And as part of that, I converted the base tests to use POSTINSTALL (and thus go through the shared _wait_login tests) instead of handling boot themselves. As part of *that*, I tweaked main.pm to not require all POSTINSTALL tests have the _postinstall suffix on their names, as it really doesn't make sense, and renamed the tests. Test Plan: Run all tests, see if they work. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
{ key => "POSTINSTALL", value => "base_update_cli" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "base_system_logging",
settings => [
{ key => "POSTINSTALL", value => "base_system_logging" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "install_kickstart_firewall_disabled",
settings => [
{ key => "KICKSTART", value => "1" },
{ key => "GRUB", value => "inst.ks=http://fedorapeople.org/groups/qa/kickstarts/firewall-disabled-net.ks" },
{ key => "POSTINSTALL", value => "firewall_disabled" },
{ key => "ROOT_PASSWORD", value => "anaconda" },
{ key => "USER_LOGIN", value => "false" },
],
},
{
name => "install_kickstart_firewall_configured",
settings => [
{ key => "KICKSTART", value => "1" },
{ key => "GRUB", value => "inst.ks=http://fedorapeople.org/groups/qa/kickstarts/firewall-configured-net.ks" },
{ key => "POSTINSTALL", value => "firewall_configured" },
{ key => "ROOT_PASSWORD", value => "anaconda" },
{ key => "USER_LOGIN", value => "false" },
],
},
{
name => "server_filesystem_default",
settings => [
{ key => "POSTINSTALL", value => "server_filesystem_default" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "server_firewall_default",
settings => [
{ key => "POSTINSTALL", value => "server_firewall_default" },
{ key => "USER_LOGIN", value => "false" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
{
name => "server_role_deploy_domain_controller",
settings => [
{ key => "PARALLEL_CANCEL_WHOLE_CLUSTER", value => "0" },
{ key => "POSTINSTALL", value => "role_deploy_domain_controller role_deploy_domain_controller_check" },
consolidate login waits, use postinstall not entrypoint for base Summary: I started out wanting to fix an issue I noticed today where graphical upgrade tests were failing because they didn't wait for the graphical login screen properly; the test was sitting at the 'full Fedora logo' state of plymouth for a long time, so the current boot_to_login_screen's wait_still_screen was triggered by it and the function wound up failing on the assert_screen, because it was still some time before the real login screen appeared. So I tweaked the boot_to_login_screen implementation to work slightly differently (look for a login screen match, *then* - if we're dealing with a graphical login - wait_still_screen to defeat the 'old GPU buffer showing login screen' problem and assert the login screen again). But while working on it, I figured we really should consolidate all the various places that handle the bootloader -> login, we were doing it quite differently in all sorts of different places. And as part of that, I converted the base tests to use POSTINSTALL (and thus go through the shared _wait_login tests) instead of handling boot themselves. As part of *that*, I tweaked main.pm to not require all POSTINSTALL tests have the _postinstall suffix on their names, as it really doesn't make sense, and renamed the tests. Test Plan: Run all tests, see if they work. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB", value => "net.ifnames=0 biosdevname=0" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "POST_STATIC", value => "10.0.2.100 ipa001.domain.local" },
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
],
},
{
name => "server_realmd_join_kickstart",
settings => [
{ key => "KICKSTART", value => "1" },
{ key => "GRUB", value => "inst.ks=hd:vdb1:/freeipaclient.ks" },
{ key => "NUMDISKS", value => "2" },
{ key => "HDD_2", value => "disk_ks_3.img" },
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
{ key => "POSTINSTALL", value => "freeipa_client" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "anaconda" },
{ key => "PARALLEL_WITH", value => "server_role_deploy_domain_controller" },
{ key => "INSTALL_UNLOCK", value => "freeipa_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
add FreeIPA server role deploy and kickstart enrolment tests Summary: These require openQA tap networking to allow the server and client boxes to communicate, and require masquerading (NAT) so the server at least can reach a repository (dnf/rolekit really, really do not want to work without a repo connection). They use the 'parallel' test support to have the server deploy run first while the client enrol test waits at the grub menu until the server is done before it goes ahead. This is all deployed and working on stg. The really tricky bit was getting all the openvswitch and firewall config right in ansible. We *could* do the server deploy test as a follow-on from the default install test to save the install, but then we'd have to teach it to change the hostname and set up static networking post-install. I'm not sure if it's worth doing that. This requires the corresponding openqa_fedora_tools commit that adds the hard disks (containing the kickstarts - it's possible to get them from remote during install, but we have to set up name resolution or hard code the IP of the server). Test Plan: Deploy this and the openqa_fedora_tools commit, generate the disks, configure the networking (good luck! See the docs in openqa_fedora_tools) and see if you can run the tests. If you're using Docker, uh...sorry. You somehow need to set things up so the workers can use tap interfaces that can talk to each other and are NATed to the outside world. Have fun. I can talk you through it on IRC... Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D831
2016-05-04 18:53:11 +00:00
],
},
add cockpit_default and cockpit_basic tests Summary: This adds tests for the Server_cockpit_default and cockpit_basic test cases. Some notes: I was initially thinking of combining these into a single test with multiple test modules and coming up with a system for doing wiki reporting based on individual test module status, but because we'll also want to do a cockpit FreeIPA enrol test, I decided against it. We don't really want to combine all three because then we would skip the cockpit tests whenever FreeIPA server deployment failed, which isn't ideal. So since we'll need a separate FreeIPA enrolment test anyway it doesn't really make sense to go to the trouble of designing a system for loading multiple postinstall tests (though I have an idea for that!) and a per-module wiki reporting system. This was the most minimal and hopefully reliable method for running Cockpit from a stock Server install that I could think of. An alternative approach would be to have, say, the most recent stable Workstation live as a 'stock' asset and have two tests, one which runs a stock Server install and just waits and another which boots the live image and accesses the cockpit running on the other box, but that seems a bit over-complex. It is not possible to have dependencies between tests for different ISOs, in case you were wondering about having a Workstation live test which runs parallel with a Server DVD test, we can't do that. One funny thing is the font that winds up getting used for the desktop, but I don't *think* that should be a problem. Picking needles was a bit tricky; any improvement suggestions are welcome. I'm hoping it turns out to be safe to rely on some dbus log messages being present; I think logging into Cockpit triggers activation of the realmd dbus interface, so there *should* always be some messages related to that. An alternative would just be to match on a sliver of the dark grey table header and the light grey row beneath it and assume that'll always be the first message (whatever the message is), but then we have to find some area of the message details screen which is always present for any message, and it just seems a tad more likely to result in false passes. Similary I'm making an assumption that auditd is always going to show up on the first page of the Services screen and the details screen will always show that 'loaded...enabled' text. Test Plan: Run the tests and see if they work! See https://openqa.stg.fedoraproject.org/tests/21373 and https://openqa.stg.fedoraproject.org/tests/21371 for my tests. Reviewers: garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D874
2016-06-01 16:05:33 +00:00
{
name => "server_cockpit_default",
settings => [
{ key => "POSTINSTALL", value => "_setup_browser server_cockpit_default" },
add cockpit_default and cockpit_basic tests Summary: This adds tests for the Server_cockpit_default and cockpit_basic test cases. Some notes: I was initially thinking of combining these into a single test with multiple test modules and coming up with a system for doing wiki reporting based on individual test module status, but because we'll also want to do a cockpit FreeIPA enrol test, I decided against it. We don't really want to combine all three because then we would skip the cockpit tests whenever FreeIPA server deployment failed, which isn't ideal. So since we'll need a separate FreeIPA enrolment test anyway it doesn't really make sense to go to the trouble of designing a system for loading multiple postinstall tests (though I have an idea for that!) and a per-module wiki reporting system. This was the most minimal and hopefully reliable method for running Cockpit from a stock Server install that I could think of. An alternative approach would be to have, say, the most recent stable Workstation live as a 'stock' asset and have two tests, one which runs a stock Server install and just waits and another which boots the live image and accesses the cockpit running on the other box, but that seems a bit over-complex. It is not possible to have dependencies between tests for different ISOs, in case you were wondering about having a Workstation live test which runs parallel with a Server DVD test, we can't do that. One funny thing is the font that winds up getting used for the desktop, but I don't *think* that should be a problem. Picking needles was a bit tricky; any improvement suggestions are welcome. I'm hoping it turns out to be safe to rely on some dbus log messages being present; I think logging into Cockpit triggers activation of the realmd dbus interface, so there *should* always be some messages related to that. An alternative would just be to match on a sliver of the dark grey table header and the light grey row beneath it and assume that'll always be the first message (whatever the message is), but then we have to find some area of the message details screen which is always present for any message, and it just seems a tad more likely to result in false passes. Similary I'm making an assumption that auditd is always going to show up on the first page of the Services screen and the details screen will always show that 'loaded...enabled' text. Test Plan: Run the tests and see if they work! See https://openqa.stg.fedoraproject.org/tests/21373 and https://openqa.stg.fedoraproject.org/tests/21371 for my tests. Reviewers: garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D874
2016-06-01 16:05:33 +00:00
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "STORE_HDD_1", value => "disk_%MACHINE%_cockpit.qcow2" },
],
},
{
name => "server_cockpit_basic",
settings => [
{ key => "POSTINSTALL", value => "server_cockpit_basic" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
# to understand the '+' values in this test and the next,
# see the `jobs_from_update` docstring in fedora_openqa
# schedule.py
{ key => "+START_AFTER_TEST", value => "server_cockpit_default" },
{ key => "BOOTFROM", value => "c" },
{ key => "+HDD_1", value => "disk_%MACHINE%_cockpit.qcow2" },
],
},
{
name => "server_cockpit_updates",
settings => [
{ key => "POSTINSTALL", value => "server_cockpit_updates server_cockpit_autoupdate" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
# to understand the '+' values in this test and the next,
# see the `jobs_from_update` docstring in fedora_openqa
# schedule.py
{ key => "+START_AFTER_TEST", value => "server_cockpit_default" },
add cockpit_default and cockpit_basic tests Summary: This adds tests for the Server_cockpit_default and cockpit_basic test cases. Some notes: I was initially thinking of combining these into a single test with multiple test modules and coming up with a system for doing wiki reporting based on individual test module status, but because we'll also want to do a cockpit FreeIPA enrol test, I decided against it. We don't really want to combine all three because then we would skip the cockpit tests whenever FreeIPA server deployment failed, which isn't ideal. So since we'll need a separate FreeIPA enrolment test anyway it doesn't really make sense to go to the trouble of designing a system for loading multiple postinstall tests (though I have an idea for that!) and a per-module wiki reporting system. This was the most minimal and hopefully reliable method for running Cockpit from a stock Server install that I could think of. An alternative approach would be to have, say, the most recent stable Workstation live as a 'stock' asset and have two tests, one which runs a stock Server install and just waits and another which boots the live image and accesses the cockpit running on the other box, but that seems a bit over-complex. It is not possible to have dependencies between tests for different ISOs, in case you were wondering about having a Workstation live test which runs parallel with a Server DVD test, we can't do that. One funny thing is the font that winds up getting used for the desktop, but I don't *think* that should be a problem. Picking needles was a bit tricky; any improvement suggestions are welcome. I'm hoping it turns out to be safe to rely on some dbus log messages being present; I think logging into Cockpit triggers activation of the realmd dbus interface, so there *should* always be some messages related to that. An alternative would just be to match on a sliver of the dark grey table header and the light grey row beneath it and assume that'll always be the first message (whatever the message is), but then we have to find some area of the message details screen which is always present for any message, and it just seems a tad more likely to result in false passes. Similary I'm making an assumption that auditd is always going to show up on the first page of the Services screen and the details screen will always show that 'loaded...enabled' text. Test Plan: Run the tests and see if they work! See https://openqa.stg.fedoraproject.org/tests/21373 and https://openqa.stg.fedoraproject.org/tests/21371 for my tests. Reviewers: garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D874
2016-06-01 16:05:33 +00:00
{ key => "BOOTFROM", value => "c" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "+HDD_1", value => "disk_%MACHINE%_cockpit.qcow2" },
add cockpit_default and cockpit_basic tests Summary: This adds tests for the Server_cockpit_default and cockpit_basic test cases. Some notes: I was initially thinking of combining these into a single test with multiple test modules and coming up with a system for doing wiki reporting based on individual test module status, but because we'll also want to do a cockpit FreeIPA enrol test, I decided against it. We don't really want to combine all three because then we would skip the cockpit tests whenever FreeIPA server deployment failed, which isn't ideal. So since we'll need a separate FreeIPA enrolment test anyway it doesn't really make sense to go to the trouble of designing a system for loading multiple postinstall tests (though I have an idea for that!) and a per-module wiki reporting system. This was the most minimal and hopefully reliable method for running Cockpit from a stock Server install that I could think of. An alternative approach would be to have, say, the most recent stable Workstation live as a 'stock' asset and have two tests, one which runs a stock Server install and just waits and another which boots the live image and accesses the cockpit running on the other box, but that seems a bit over-complex. It is not possible to have dependencies between tests for different ISOs, in case you were wondering about having a Workstation live test which runs parallel with a Server DVD test, we can't do that. One funny thing is the font that winds up getting used for the desktop, but I don't *think* that should be a problem. Picking needles was a bit tricky; any improvement suggestions are welcome. I'm hoping it turns out to be safe to rely on some dbus log messages being present; I think logging into Cockpit triggers activation of the realmd dbus interface, so there *should* always be some messages related to that. An alternative would just be to match on a sliver of the dark grey table header and the light grey row beneath it and assume that'll always be the first message (whatever the message is), but then we have to find some area of the message details screen which is always present for any message, and it just seems a tad more likely to result in false passes. Similary I'm making an assumption that auditd is always going to show up on the first page of the Services screen and the details screen will always show that 'loaded...enabled' text. Test Plan: Run the tests and see if they work! See https://openqa.stg.fedoraproject.org/tests/21373 and https://openqa.stg.fedoraproject.org/tests/21371 for my tests. Reviewers: garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D874
2016-06-01 16:05:33 +00:00
],
},
add a cockpit realmd FreeIPA join test Summary: This requires a few other changes: * turn clone_host_resolv into clone_host_file, letting you clone any given host file (cloning /etc/hosts seems to make both server deployment and client enrolment faster/more reliable) * allow loading of multiple POSTINSTALL tests (so we can share the freeipa_client_postinstall test). Note this is compatible, existing uses will work fine * move initial password change for the IPA test users into the server deployment test (so the client tests don't conflict over doing that) * add GRUB_POSTINSTALL, for specifying boot parameters for boot of the installed system, and make it work by tweaking _console_wait _login (doesn't work for _graphical_wait_login yet, as I didn't need that) * make the static networking config for tap tests into a library function so the tests can share it * handle ABRT problem dirs showing up in /var/spool/abrt as well as /var/tmp/abrt (because the enrol attempt hits #1330766 and the crash report shows up in /var/spool/abrt, don't ask me why the difference, I just work here) * specify the DNS servers from the worker host's resolv.conf as the forwarders for the FreeIPA server when deploying it; if we don't do this, rolekit defaults to using the root servers as forwarders(!) and thus we get the public, not phx2-appropriate, results for e.g. mirrors.fedoraproject.org, some of which the workers can't reach, so PackageKit package install always fails (boy, was it fun figuring THAT mess out) Even after all that, the test still doesn't actually pass, but I'm reasonably confident this is because it's hitting actual bugs, not because it's broken. It runs into #1330766 nearly every time (I think I saw *one* time the enrolment actually succeeded), and seems to run into a subsequent bug I hadn't seen before when trying to work around that by trying the join again (see https://bugzilla.redhat.com/show_bug.cgi?id=1330766#c37 ). Test Plan: Run the test, see what happens. If you're really lucky, it'll actually pass. But you'll probably run into #1330766#c37, I'm mostly posting for comment. You'll need a tap-capable openQA instance to test this. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D880
2016-06-07 20:00:39 +00:00
{
name => "realmd_join_cockpit",
settings => [
add QA:Testcase_FreeIPA_password_change test Summary: again, added as a non-fatal module for realmd_join_cockpit as it's convenient to do it here. Also abstract a couple of ipa bits into a new exporter package in the style of SUSE's mm_network, rather than using ill-fitting class inheritance as we have before - we should probably convert our existing class based stuff to work this way. Also a few minor tweaks and clean-ups of the other tests: The path in console_login() where we detect login of a regular user when we want root or vice versa and log out was actually broken because it would 'wait' for the result of the 'exit' command, which obviously doesn't work (as it relies on running another command afterwards, and we're no longer at a shell). This commit no longer actually uses that path, but I spotted the bug with an earlier version of this which did, and we may as well keep the fix. /var/log/lastlog is an apparently-extremely-large sparse file. A couple of times it seemed to cause tar to run very slowly while creating the /var/log archive for upload on failure. It's no use for diagnosing bugs, so we may as well exclude it from the archive. I caught cockpit webUI login failing one time when testing the test, so threw in a wait_still_screen before starting to type the URL, as we have for the FreeIPA webUI. I also caught a timing issue with the openQA webUI policy add step; the test flips from the Users screen to the HBAC screen then clicks the 'add' button, but there's actually an identical 'add' button on *both* screens, so it could wind up trying to click the one on the Users screen instead, if the web UI took a few milliseconds to switch. So we throw in a needle match to make sure we're actually on the HBAC screen before clicking the button. We make the freeipa_webui test a 'milestone' so that if the new test fails, restoring to the last-known-good milestone doesn't take so long; it actually seems like openQA can get confused and try to cancel the test if restoring the milestone takes a *really* long time, and wind up with a zombie qemu process, which isn't good. This seems to avoid that happening. Test Plan: In the simple case, just run all the FreeIPA-related tests on Fedora 24 (as Rawhide is broken) and make sure they all work properly. To get a bit more advanced you can throw in an `assert_script_run 'false'` in either of the non-fatal tests to break it and make sure things go properly when that happens (the last milestone should be restored - which should be right after freeipa_webui, sitting at tty1 - and run properly; things are set up so each test starts with root logged in on tty1). Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D935
2016-08-03 20:21:12 +00:00
{ key => "POSTINSTALL", value => "realmd_join_cockpit freeipa_webui freeipa_password_change freeipa_client" },
add a cockpit realmd FreeIPA join test Summary: This requires a few other changes: * turn clone_host_resolv into clone_host_file, letting you clone any given host file (cloning /etc/hosts seems to make both server deployment and client enrolment faster/more reliable) * allow loading of multiple POSTINSTALL tests (so we can share the freeipa_client_postinstall test). Note this is compatible, existing uses will work fine * move initial password change for the IPA test users into the server deployment test (so the client tests don't conflict over doing that) * add GRUB_POSTINSTALL, for specifying boot parameters for boot of the installed system, and make it work by tweaking _console_wait _login (doesn't work for _graphical_wait_login yet, as I didn't need that) * make the static networking config for tap tests into a library function so the tests can share it * handle ABRT problem dirs showing up in /var/spool/abrt as well as /var/tmp/abrt (because the enrol attempt hits #1330766 and the crash report shows up in /var/spool/abrt, don't ask me why the difference, I just work here) * specify the DNS servers from the worker host's resolv.conf as the forwarders for the FreeIPA server when deploying it; if we don't do this, rolekit defaults to using the root servers as forwarders(!) and thus we get the public, not phx2-appropriate, results for e.g. mirrors.fedoraproject.org, some of which the workers can't reach, so PackageKit package install always fails (boy, was it fun figuring THAT mess out) Even after all that, the test still doesn't actually pass, but I'm reasonably confident this is because it's hitting actual bugs, not because it's broken. It runs into #1330766 nearly every time (I think I saw *one* time the enrolment actually succeeded), and seems to run into a subsequent bug I hadn't seen before when trying to work around that by trying the join again (see https://bugzilla.redhat.com/show_bug.cgi?id=1330766#c37 ). Test Plan: Run the test, see what happens. If you're really lucky, it'll actually pass. But you'll probably run into #1330766#c37, I'm mostly posting for comment. You'll need a tap-capable openQA instance to test this. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D880
2016-06-07 20:00:39 +00:00
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "+START_AFTER_TEST", value => "server_cockpit_default" },
add a cockpit realmd FreeIPA join test Summary: This requires a few other changes: * turn clone_host_resolv into clone_host_file, letting you clone any given host file (cloning /etc/hosts seems to make both server deployment and client enrolment faster/more reliable) * allow loading of multiple POSTINSTALL tests (so we can share the freeipa_client_postinstall test). Note this is compatible, existing uses will work fine * move initial password change for the IPA test users into the server deployment test (so the client tests don't conflict over doing that) * add GRUB_POSTINSTALL, for specifying boot parameters for boot of the installed system, and make it work by tweaking _console_wait _login (doesn't work for _graphical_wait_login yet, as I didn't need that) * make the static networking config for tap tests into a library function so the tests can share it * handle ABRT problem dirs showing up in /var/spool/abrt as well as /var/tmp/abrt (because the enrol attempt hits #1330766 and the crash report shows up in /var/spool/abrt, don't ask me why the difference, I just work here) * specify the DNS servers from the worker host's resolv.conf as the forwarders for the FreeIPA server when deploying it; if we don't do this, rolekit defaults to using the root servers as forwarders(!) and thus we get the public, not phx2-appropriate, results for e.g. mirrors.fedoraproject.org, some of which the workers can't reach, so PackageKit package install always fails (boy, was it fun figuring THAT mess out) Even after all that, the test still doesn't actually pass, but I'm reasonably confident this is because it's hitting actual bugs, not because it's broken. It runs into #1330766 nearly every time (I think I saw *one* time the enrolment actually succeeded), and seems to run into a subsequent bug I hadn't seen before when trying to work around that by trying the join again (see https://bugzilla.redhat.com/show_bug.cgi?id=1330766#c37 ). Test Plan: Run the test, see what happens. If you're really lucky, it'll actually pass. But you'll probably run into #1330766#c37, I'm mostly posting for comment. You'll need a tap-capable openQA instance to test this. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D880
2016-06-07 20:00:39 +00:00
{ key => "PARALLEL_WITH", value => "server_role_deploy_domain_controller" },
{ key => "BOOTFROM", value => "c" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "+HDD_1", value => "disk_%MACHINE%_cockpit.qcow2" },
add a cockpit realmd FreeIPA join test Summary: This requires a few other changes: * turn clone_host_resolv into clone_host_file, letting you clone any given host file (cloning /etc/hosts seems to make both server deployment and client enrolment faster/more reliable) * allow loading of multiple POSTINSTALL tests (so we can share the freeipa_client_postinstall test). Note this is compatible, existing uses will work fine * move initial password change for the IPA test users into the server deployment test (so the client tests don't conflict over doing that) * add GRUB_POSTINSTALL, for specifying boot parameters for boot of the installed system, and make it work by tweaking _console_wait _login (doesn't work for _graphical_wait_login yet, as I didn't need that) * make the static networking config for tap tests into a library function so the tests can share it * handle ABRT problem dirs showing up in /var/spool/abrt as well as /var/tmp/abrt (because the enrol attempt hits #1330766 and the crash report shows up in /var/spool/abrt, don't ask me why the difference, I just work here) * specify the DNS servers from the worker host's resolv.conf as the forwarders for the FreeIPA server when deploying it; if we don't do this, rolekit defaults to using the root servers as forwarders(!) and thus we get the public, not phx2-appropriate, results for e.g. mirrors.fedoraproject.org, some of which the workers can't reach, so PackageKit package install always fails (boy, was it fun figuring THAT mess out) Even after all that, the test still doesn't actually pass, but I'm reasonably confident this is because it's hitting actual bugs, not because it's broken. It runs into #1330766 nearly every time (I think I saw *one* time the enrolment actually succeeded), and seems to run into a subsequent bug I hadn't seen before when trying to work around that by trying the join again (see https://bugzilla.redhat.com/show_bug.cgi?id=1330766#c37 ). Test Plan: Run the test, see what happens. If you're really lucky, it'll actually pass. But you'll probably run into #1330766#c37, I'm mostly posting for comment. You'll need a tap-capable openQA instance to test this. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D880
2016-06-07 20:00:39 +00:00
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "POST_STATIC", value => "10.0.2.102 client002.domain.local" },
add a cockpit realmd FreeIPA join test Summary: This requires a few other changes: * turn clone_host_resolv into clone_host_file, letting you clone any given host file (cloning /etc/hosts seems to make both server deployment and client enrolment faster/more reliable) * allow loading of multiple POSTINSTALL tests (so we can share the freeipa_client_postinstall test). Note this is compatible, existing uses will work fine * move initial password change for the IPA test users into the server deployment test (so the client tests don't conflict over doing that) * add GRUB_POSTINSTALL, for specifying boot parameters for boot of the installed system, and make it work by tweaking _console_wait _login (doesn't work for _graphical_wait_login yet, as I didn't need that) * make the static networking config for tap tests into a library function so the tests can share it * handle ABRT problem dirs showing up in /var/spool/abrt as well as /var/tmp/abrt (because the enrol attempt hits #1330766 and the crash report shows up in /var/spool/abrt, don't ask me why the difference, I just work here) * specify the DNS servers from the worker host's resolv.conf as the forwarders for the FreeIPA server when deploying it; if we don't do this, rolekit defaults to using the root servers as forwarders(!) and thus we get the public, not phx2-appropriate, results for e.g. mirrors.fedoraproject.org, some of which the workers can't reach, so PackageKit package install always fails (boy, was it fun figuring THAT mess out) Even after all that, the test still doesn't actually pass, but I'm reasonably confident this is because it's hitting actual bugs, not because it's broken. It runs into #1330766 nearly every time (I think I saw *one* time the enrolment actually succeeded), and seems to run into a subsequent bug I hadn't seen before when trying to work around that by trying the join again (see https://bugzilla.redhat.com/show_bug.cgi?id=1330766#c37 ). Test Plan: Run the test, see what happens. If you're really lucky, it'll actually pass. But you'll probably run into #1330766#c37, I'm mostly posting for comment. You'll need a tap-capable openQA instance to test this. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D880
2016-06-07 20:00:39 +00:00
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "realmd_join_sssd",
settings => [
{ key => "POSTINSTALL", value => "realmd_join_sssd freeipa_client" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "PARALLEL_WITH", value => "server_role_deploy_domain_controller" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "POST_STATIC", value => "10.0.2.103 client003.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "server_freeipa_replication_master",
settings => [
{ key => "PARALLEL_CANCEL_WHOLE_CLUSTER", value => "0" },
{ key => "POSTINSTALL", value => "role_deploy_domain_controller role_deploy_domain_controller_check" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "FREEIPA_REPLICA_MASTER", value => "1" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB", value => "net.ifnames=0 biosdevname=0" },
{ key => "POST_STATIC", value => "10.0.2.106 ipa002.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "server_freeipa_replication_replica",
settings => [
{ key => "PARALLEL_CANCEL_WHOLE_CLUSTER", value => "0" },
{ key => "POSTINSTALL", value => "realmd_join_sssd" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "FREEIPA_REPLICA", value => "1" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "PARALLEL_WITH", value => "server_freeipa_replication_master" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB", value => "net.ifnames=0 biosdevname=0" },
{ key => "POST_STATIC", value => "10.0.2.107 ipa003.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "server_freeipa_replication_client",
settings => [
{ key => "POSTINSTALL", value => "realmd_join_sssd freeipa_client" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "FREEIPA_REPLICA_CLIENT", value => "1" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "PARALLEL_WITH", value => "server_freeipa_replication_replica" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB", value => "net.ifnames=0 biosdevname=0" },
{ key => "POST_STATIC", value => "10.0.2.108 client005.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "server_role_deploy_database_server",
settings => [
{ key => "PARALLEL_CANCEL_WHOLE_CLUSTER", value => "0" },
{ key => "POSTINSTALL", value => "role_deploy_database_server" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "POST_STATIC", value => "10.0.2.104 db.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "server_database_client",
settings => [
{ key => "POSTINSTALL", value => "database_client" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "PARALLEL_WITH", value => "server_role_deploy_database_server" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
Add support for testing updates Summary: This adds an entirely new workflow for testing distribution updates. The `ADVISORY` variable is introduced: when set, `main.pm` will load an early post-install test that sets up a repository containing the packages from the specified update, runs `dnf -y update`, and reboots. A new templates file is added, `templates-updates`, which adds two new flavors called `updates-server` and `updates-workstation`, each containing job templates for appropriate post-install tests. Scheduler is expected to post `ADVISORY=(update ID) HDD_1=(base image) FLAVOR=updates-(server|workstation)`, where (base image) is one of the stable release base disk images produced by `createhdds` and usually used for upgrade testing. This will result in the appropriate job templates being loaded. We rejig postinstall test loading and static network config a bit so that this works for both the 'compose' and 'updates' test flows: we have to ensure we bring up networking for the tap tests before we try and install the updates, but still allow later adjustment of the configuration. We take advantage of the openQA feature that was added a few months back to run the same module multiple times, so the `_advisory_update` module can reboot after installing the updates and the modules that take care of bootloader, encryption and login get run again. This looks slightly wacky in the web UI, though - it doesn't show the later runs of each module. We also use the recently added feature to specify `+HDD_1` in the test suites which use a disk image uploaded by an earlier post-install test, so the test suite value will take priority over the value POSTed by the scheduler for those tests, and we will use the uploaded disk image (and not the clean base image POSTed by the scheduler) for those tests. My intent here is to enhance the scheduler, adding a consumer which listens out for critpath updates, and runs this test flow for each one, then reports the results to ResultsDB where Bodhi could query and display them. We could also add a list of other packages to have one or both sets of update tests run on it, I guess. Test Plan: Try a post something like: HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25 FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24 Pick an appropriate `ADVISORY` (ideally, one containing some packages which might actually be involved in the tests), and matching `FLAVOR` and `HDD_1`. The appropriate tests should run, a repo with the update packages should be created and enabled (and dnf update run), and the tests should work properly. Also test a regular compose run to make sure I didn't break anything. Reviewers: jskladan, jsedlak Reviewed By: jsedlak Subscribers: tflink Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
{ key => "POST_STATIC", value => "10.0.2.105 dbclient.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "server_remote_logging_server",
settings => [
{ key => "PARALLEL_CANCEL_WHOLE_CLUSTER", value => "0" },
{ key => "POSTINSTALL", value => "server_remote_logging_server" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "POST_STATIC", value => "10.0.2.112 rsyslogserver.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "server_remote_logging_client",
settings => [
{ key => "POSTINSTALL", value => "server_remote_logging_client" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "PARALLEL_WITH", value => "server_remote_logging_server" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "GRUB_POSTINSTALL", value => "net.ifnames=0 biosdevname=0" },
{ key => "POST_STATIC", value => "10.0.2.113 rsyslogclient.domain.local" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "desktop_update_graphical",
settings => [
{ key => "POSTINSTALL", value => "desktop_update_graphical" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "desktop_terminal",
settings => [
{ key => "POSTINSTALL", value => "desktop_terminal" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "desktop_browser",
settings => [
{ key => "POSTINSTALL", value => "desktop_browser" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "desktop_notifications_postinstall",
settings => [
{ key => "ENTRYPOINT", value => "desktop_notifications" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "desktop_background",
settings => [
{ key => "POSTINSTALL", value => "desktop_background" },
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
],
},
{
name => "apps_startstop",
settings => [
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "STARTSTOP", value => "true" },
],
},
{
name => "release_identification",
settings => [
{ key => "START_AFTER_TEST", value => "install_default_upload" },
{ key => "BOOTFROM", value => "c" },
{ key => "HDD_1", value => "disk_%FLAVOR%_%MACHINE%.qcow2" },
{ key => "ENTRYPOINT", value => "text_login_gui fedora_release os_release" },
{ key => "USER_LOGIN", value => "false" },
],
},
{
name => "desktop_notifications_live",
settings => [
consolidate login waits, use postinstall not entrypoint for base Summary: I started out wanting to fix an issue I noticed today where graphical upgrade tests were failing because they didn't wait for the graphical login screen properly; the test was sitting at the 'full Fedora logo' state of plymouth for a long time, so the current boot_to_login_screen's wait_still_screen was triggered by it and the function wound up failing on the assert_screen, because it was still some time before the real login screen appeared. So I tweaked the boot_to_login_screen implementation to work slightly differently (look for a login screen match, *then* - if we're dealing with a graphical login - wait_still_screen to defeat the 'old GPU buffer showing login screen' problem and assert the login screen again). But while working on it, I figured we really should consolidate all the various places that handle the bootloader -> login, we were doing it quite differently in all sorts of different places. And as part of that, I converted the base tests to use POSTINSTALL (and thus go through the shared _wait_login tests) instead of handling boot themselves. As part of *that*, I tweaked main.pm to not require all POSTINSTALL tests have the _postinstall suffix on their names, as it really doesn't make sense, and renamed the tests. Test Plan: Run all tests, see if they work. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
{ key => "ENTRYPOINT", value => "desktop_notifications" },
],
},
add NFS tests (and DHCP/DNS in the support server) Summary: Set up the support server to provide DHCP/DNS functionality and an NFS server, providing a kickstart. Add a kickstart test just like the other root-user-crypted-net kickstart tests except it gets the kickstart from the support server via NFS. Also add NFS repository tests and a second support server for Server-dvd-iso flavor: this test must run on that flavor to ensure that packages are actually available. The support server just mounts the attached 'DVD' and exports it via NFS. Note we don't need to do anything clever to avoid IP conflicts between the two support servers, because os-autoinst-openvswitch ensures each worker group is on its own VLAN. As part of adding the NFS repo tests, I did a bit of cleanup, moving little things we were repeating a lot into anacondatest, and sharing the 'check if the repo was used' logic between all the tests (by making it into a test step that's loaded for all of them). I also simplified the 'was repo used' checks a bit, it seems silly to run a 'grep' command inside the VM then have os-autoinst do a grep on the output (which is effectively what we were doing before), instead we'll just use a single grep within the VM, and clean up the messy quoting/escaping a bit. Test Plan: Run all tests - at least all repository tests - and check they work (make sure the tests are actually still sane, not just that they pass). I've done runs of all the repo tests and they look good to me, but please double-check. I'm currently re-running the whole 24-20160609.n.0 test on staging with these changes. Reviewers: jskladan, garretraziel Reviewed By: garretraziel Subscribers: tflink Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
{
name => "install_kickstart_nfs",
settings => [
{ key => "KICKSTART", value => "1" },
{ key => "GRUB", value => "inst.ks=nfs:10.0.2.110:/export/root-user-crypted-net.ks" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "111111" },
{ key => "PARALLEL_WITH", value => "support_server" },
{ key => "INSTALL_UNLOCK", value => "support_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "install_updates_nfs",
settings => [
{ key => "GRUB", value => "inst.stage2=nfs:nfsvers=4:10.0.2.110:/repo" },
{ key => "TEST_UPDATES", value => "1" },
{ key => "PARALLEL_WITH", value => "support_server" },
{ key => "INSTALL_UNLOCK", value => "support_ready" },
{ key => "NICTYPE", value => "tap" },
{ key => "WORKER_CLASS", value => "tap" },
],
},
{
name => "memory_check",
settings => [
{ key => "PACKAGE_SET", value => "default" },
{ key => "MEMCHECK", value => "1" },
{ key => "REPOSITORY_VARIATION", value => "%LOCATION%" },
{ key => "TEST_TARGET", value => "NONE" },
],
},
{
name => "mediakit_fileconflicts",
settings => [
# we use support server image here as we know it's available
# and there's no need to wait on an install to run this
{ key => "HDD_1", value => "disk_f%CURRREL%_support_5_%ARCH%.img" },
{ key => "POSTINSTALL", value => "mediakit_fileconflicts" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
],
},
{
name => "mediakit_repoclosure",
settings => [
# we use support server image here as we know it's available
# and there's no need to wait on an install to run this
{ key => "HDD_1", value => "disk_f%CURRREL%_support_5_%ARCH%.img" },
{ key => "POSTINSTALL", value => "mediakit_repoclosure" },
{ key => "USER_LOGIN", value => "false" },
{ key => "ROOT_PASSWORD", value => "weakpassword" },
{ key => "BOOTFROM", value => "c" },
],
},
{
name => "install_no_user",
settings => [
{ key => "INSTALL_NO_USER", value => "1" },
],
},
2015-01-26 10:33:26 +00:00
],
}