Compare commits

..

No commits in common. "multivm_networking" and "main" have entirely different histories.

24 changed files with 104 additions and 632 deletions

View File

@ -1,4 +1,3 @@
---
# .ansible-lint
warn_list:
- '204' # Lines should be less than 160 characters

View File

@ -17,30 +17,26 @@ This repository is for openQA operations and management.
├── handlers
│   └── main.yml
├── init-rocky-openqa-developer-host.yml
├── init-rocky-openqa-worker-host.yml
├── localhost.yml
├── README.md
├── roles
│   └── README.md
├── tasks
│   ├── main.yml
│   ├── openqa-worker.yml
│   └── openqa.yml
├── templates
│   └── etc
│   ├── firewalld
│   │   └── services
│   │   ├── openqa-socket.xml.j2
│   │   └── openqa-vnc.xml.j2
│   └── openqa
│   ├── client.conf.j2
│   ├── openqa.ini.j2
│   └── workers.conf.j2
│   └── openqa.ini.j2
├── tests
│   ├── README.md
│   └── test.yml
└── vars
├── main.yml
├── openqa-worker.yml
└── openqa.yml
```

View File

@ -1,3 +0,0 @@
---
collections:
- ansible.posix

View File

@ -1,37 +1,2 @@
---
- name: Reload firewalld
ansible.builtin.systemd:
name: firewalld
state: reloaded
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart openQA workers
ansible.builtin.systemd:
name: "openqa-worker@{{ item }}"
state: restarted
enabled: true
# range "end" parameter is exclusive, so add 1
loop: "{{ range(1, (openqa_worker_count | int + 1)) | list }}"
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart openqa services
ansible.builtin.systemd:
name: "{{ item }}"
state: restarted
loop: "{{ openqa_services }}"
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart os-autoinst-openvswitch
ansible.builtin.systemd:
name: os-autoinst-openvswitch
state: restarted
enabled: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart httpd
ansible.builtin.service:
name: httpd
state: restarted
enabled: true
ignore_errors: "{{ ansible_check_mode }}"
...
# Handlers

View File

@ -24,17 +24,16 @@
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
ansible.builtin.stat:
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
ansible.builtin.assert:
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
@ -42,11 +41,13 @@
tasks:
- name: Install and configure OpenQA
ansible.builtin.import_tasks: tasks/openqa.yml
import_tasks: tasks/openqa.yml
- name: Apply Rocky Linux OpenQA Branding
import_tasks: tasks/openqa_branding.yml
post_tasks:
- name: Touching run file that ansible has ran here
ansible.builtin.file:
file:
path: /var/log/ansible.run
state: touch
mode: '0644'

View File

@ -1,49 +0,0 @@
# Configure an openQA worker host
#
# Usages:
# # Install and configure an openQA worker-only host
# ansible-playbook init-rocky-openqa-worker-host.yml
#
# # Install and configure an openQA worker-only host with a parameters file
# ansible-playbook init-rocky-openqa-worker-host.yml -e @my-worker-host.yml
#
# Created: @akatch
---
- name: Rocky openQA Worker Runbook
hosts: openqa_workers
become: true
gather_facts: false
vars_files:
- vars/openqa-worker.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
ansible.builtin.stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
ansible.builtin.assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Install and configure OpenQA workers
ansible.builtin.import_tasks: tasks/openqa-worker.yml
post_tasks:
- name: Touching run file that ansible has ran here
ansible.builtin.file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -1,41 +0,0 @@
# Delete local OpenQA testing environment
# This playbook is *NOT* intended for WAN-facing systems!
# Created: @akatch
---
- name: Rocky OpenQA Runbook
hosts: localhost
connection: local
become: true
vars_files:
- vars/openqa.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
ansible.builtin.stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
ansible.builtin.assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Remove OpenQA installation from this system
ansible.builtin.import_tasks: tasks/remove_openqa.yml
post_tasks:
- name: Touching run file that ansible has ran here
ansible.builtin.file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root

View File

@ -1,54 +0,0 @@
# Sets up local OpenQA testing environment
# This playbook is *NOT* intended for WAN-facing systems!
#
# Usages:
# # Install and configure an openQA developer host, download all current Rocky ISOs,
# # and POST a test job
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml
#
# # Only perform ISO download tasks
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=download_isos
#
# # Only perform configuration, do not download ISOs or POST a job
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=configure
#
# Created: @akatch
---
- name: Rocky OpenQA Runbook
hosts: localhost
connection: local
become: true
vars_files:
- vars/openqa.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
ansible.builtin.stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
ansible.builtin.assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Remove openqa multivm networking configs
ansible.builtin.import_tasks: tasks/remove_openqa-multivm-networking.yml
post_tasks:
- name: Touching run file that ansible has ran here
ansible.builtin.file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

4
tasks/main.yml Normal file
View File

@ -0,0 +1,4 @@
---
# No tasks
- debug: msg="No tasks are provided here. Please import the task as needed in your playbook."
...

View File

@ -1,111 +0,0 @@
---
# {{ openqa_multivm_bridge_interface }} should not exist or we should use a different name
- name: Assert bridge interface does not exist
ansible.builtin.assert:
that:
- 'openqa_multivm_bridge_interface not in ansible_interfaces'
success_msg: 'interface does not exist, can proceed'
fail_msg: '{{ openqa_multivm_bridge_interface }} already exists, please supply an alternative'
- name: Install multivm networking packages
ansible.builtin.dnf:
pkg:
- os-autoinst-openvswitch
- tunctl
- name: Create /etc/sysconfig/os-autoinst-openvswitch
ansible.builtin.copy:
src: etc/sysconfig/os-autoinst-openvswitch.j2
dest: /etc/sysconfig/os-autoinst-openvswitch
mode: '0644'
notify: Restart os-autoinst-openvswitch
- name: Create bridge interface configuration
ansible.builtin.copy:
src: etc/sysconfig/network-scripts/ifcfg-br.j2
dest: /etc/sysconfig/network-scripts/ifcfg-{{ openqa_multivm_bridge_interface }}
mode: '0644'
- name: Create worker tap interface configs
ansible.builtin.copy:
src: etc/sysconfig/network-scripts/ifcfg-tap.j2
dest: /etc/sysconfig/network-scripts/ifcfg-tap{{ item }}
mode: '0644'
loop: "{{ range(openqa_worker_count) | list }}"
- name: Update /sbin/ifup-pre-local
ansible.builtin.template:
src: sbin/ifup-pre-local.j2
dest: /sbin/ifup-pre-local
mode: 'ug+x'
- name: Enable bridge interface for internal zone
ansible.posix.firewalld:
permanent: true
interface: '{{ openqa_multivm_bridge_interface }}'
state: enabled
zone: internal
notify: Reload firewalld
- name: Enable masquerade for public and internal zones
ansible.posix.firewalld:
masquerade: true
permanent: true
state: enabled
zone: '{{ item }}'
loop:
- public
- internal
notify: Reload firewalld
- name: Enable ipv4 IP forwarding
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
state: present
sysctl_file: /etc/sysctl.d/ip-forward.conf
sysctl_set: true
- name: Set-target ACCEPT on public zone
ansible.posix.firewalld:
permanent: true
state: present
zone: public
target: ACCEPT
notify: Reload firewalld
# Only needed for multi-host setups
- name: Add port for GRE tunnel
ansible.posix.firewalld:
permanent: true
port: 1723/tcp
state: enabled
- name: Enable openvswitch services
ansible.builtin.systemd_service:
name: "{{ item }}"
state: started
enabled: true
loop:
- openvswitch
- os-autoinst-openvswitch
ignore_errors: "{{ ansible_check_mode }}"
- name: Set WORKER_CLASS for tap interfaces
community.general.ini_file:
path: /etc/openqa/workers.ini
section: global
option: WORKER_CLASS
value: qemu_x86_64,tap
state: present
mode: '0644'
notify: Restart openqa services
- name: Enable bridge interface for openvswitch
ansible.builtin.command: ovs-vsctl add-br {{ openqa_multivm_bridge_interface }}
changed_when: true
- name: Enable capability
ansible.builtin.command: setcap CAP_NET_ADMIN=ep /usr/bin/qemu-system-x86_64
changed_when: true
...

View File

@ -1,57 +0,0 @@
---
- name: Install OpenQA worker packages
ansible.builtin.dnf:
name: "{{ openqa_worker_packages }}"
state: present
tags:
- packages
- name: Create openQA group
ansible.builtin.group:
name: "{{ openqa_group }}"
system: true
- name: Create openQA user
ansible.builtin.user:
name: "{{ openqa_user }}"
groups: "{{ openqa_group }}"
append: true
system: true
- name: Configure firewalld for openQA worker connections
ansible.builtin.template:
src: etc/firewalld/services/{{ item }}.xml.j2
dest: /etc/firewalld/services/{{ item }}.xml
owner: root
group: root
mode: "0644"
loop:
- openqa-socket
- openqa-vnc
tags:
- configure
notify: Reload firewalld
- name: Write openQA configuration file
ansible.builtin.template:
src: etc/openqa/{{ item }}.j2
dest: /etc/openqa/{{ item }}
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
mode: "0444"
loop:
- client.conf
- workers.ini
tags:
- configure
notify: Restart openQA workers
- name: Start openQA cache services
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
loop:
- openqa-worker-cacheservice
- openqa-worker-cacheservice-minion
...

View File

@ -1,29 +1,28 @@
---
- name: Install OpenQA packages
ansible.builtin.yum:
yum:
name: "{{ openqa_packages }}"
state: present
tags:
- packages
- name: Copy httpd configuration files
ansible.builtin.copy:
copy:
remote_src: true
src: /etc/httpd/conf.d/{{ item }}.template
dest: /etc/httpd/conf.d/{{ item }}
mode: "0644"
mode: '0644'
owner: root
group: root
loop:
- openqa.conf
- openqa-ssl.conf
notify: Restart httpd
notify: restart_httpd
tags:
- configure
ignore_errors: "{{ ansible_check_mode }}"
- name: Template OpenQA configuration files
ansible.builtin.template:
template:
src: etc/openqa/{{ item }}.j2
dest: /etc/openqa/{{ item }}
owner: "{{ openqa_user }}"
@ -34,35 +33,30 @@
- client.conf
tags:
- configure
notify: Restart openQA workers
- name: Get service facts
ansible.builtin.service_facts:
check_mode: false
service_facts:
- name: Check for non-empty postgres data directory
ansible.builtin.stat:
stat:
path: /var/lib/pgsql/data/base
register: postgres_data_dir
- name: If postgresql is not already running, initialize database
ansible.builtin.command: postgresql-setup --initdb
command: postgresql-setup --initdb
when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" )
and not postgres_data_dir.stat.exists
changed_when: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Enable and start postgresql service
ansible.builtin.systemd:
systemd:
name: postgresql
state: started
enabled: true
when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" )
and not postgres_data_dir.stat.exists
ignore_errors: "{{ ansible_check_mode }}"
- name: Configure SELinux to allow httpd connection to network
ansible.posix.seboolean:
seboolean:
name: httpd_can_network_connect
state: true
persistent: true
@ -70,17 +64,16 @@
- configure
- name: Enable and start OpenQA services
ansible.builtin.systemd:
systemd:
name: "{{ item }}"
state: started
enabled: true
loop: "{{ openqa_services }}"
tags:
- configure
ignore_errors: "{{ ansible_check_mode }}"
- name: Create openqa-vnc firewalld service
ansible.builtin.template:
template:
src: etc/firewalld/services/openqa-vnc.xml.j2
dest: /etc/firewalld/services/openqa-vnc.xml
owner: root
@ -88,13 +81,15 @@
mode: "0644"
tags:
- configure
notify: Reload firewalld
- name: Systemctl daemon-reload
ansible.builtin.systemd:
daemon_reload: true
- name: Load openqa-vnc firewalld service
systemd:
name: firewalld
state: reloaded
tags:
- configure
- name: Permit traffic for http and openqa-vnc services
- name: Permit traffic for {{ item }} service
ansible.posix.firewalld:
service: "{{ item }}"
permanent: true
@ -104,17 +99,23 @@
- openqa-vnc
tags:
- configure
notify: Reload firewalld
- name: Reload FirewallD
systemd:
name: firewalld
state: reloaded
tags:
- configure
- name: Check for existing repository
ansible.builtin.stat:
stat:
path: "{{ openqa_homedir }}/share/tests/rocky"
register: rocky_testing_repo
tags:
- configure
- name: Clone repository if it does not already exist
ansible.builtin.git:
git:
accept_hostkey: true
dest: "{{ openqa_homedir }}/share/tests/rocky"
repo: "{{ openqa_rocky_testing_repo }}"
@ -124,23 +125,68 @@
- configure
- name: Set owner/group/permissions on repo contents
ansible.builtin.file:
file:
path: "{{ openqa_homedir }}/share/tests/rocky"
recurse: true
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
mode: "0775"
mode: "u+rwX,g+rwX,o+rX,o-w"
tags:
- configure
- name: Create asset directories
ansible.builtin.file:
path: "{{ openqa_homedir }}/share/factory/{{ item }}/fixed"
# fifloader.py will fail if the Demo user is not logged in
- name: Authenticate to web UI the first time
uri:
url: "http://{{ openqa_host }}/login"
- name: Run fifloader.py
command: ./fifloader.py -l -c templates.fif.json templates-updates.fif.json
changed_when: "1 != 1"
args:
chdir: "{{ openqa_homedir }}/share/tests/rocky"
- name: Create ISO directory
file:
path: "{{ openqa_homedir }}/share/factory/iso/fixed"
state: directory
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
mode: "0775"
loop:
- iso
- hdd
tags:
- download_isos
- name: Download ISOs
get_url:
dest: "{{ openqa_homedir }}/share/factory/iso/fixed/{{ item.name }}"
url: "{{ rocky_iso_download_url }}/{{ item.name }}"
checksum: "{{ item.checksum }}"
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
tmp_dest: "/var/tmp"
mode: "0644"
loop: "{{ openqa_isos }}"
tags:
- download_isos
- name: Start {{ openqa_worker_count }} OpenQA workers
ansible.builtin.systemd:
name: "openqa-worker@{{ item }}"
state: started
enabled: true
# range 'end' parameter is exclusive, so add 1
loop: "{{ range(1, (openqa_worker_count|int + 1)) | list }}"
tags:
- start_workers
- configure
- name: POST a job
command: |
openqa-cli api -X POST isos \
ISO=Rocky-{{ rocky_version }}-{{ rocky_arch }}-minimal.iso \
ARCH={{ rocky_arch }} \
DISTRI=rocky \
FLAVOR=minimal-iso \
VERSION={{ rocky_version }} \
BUILD="{{ '%Y%m%d.%H%M%S' | strftime }}.0"
changed_when: "1 != 1"
...

View File

@ -1,92 +0,0 @@
---
- name: Remove files
ansible.builtin.file:
path: '{{ item }}'
state: absent
loop:
- /etc/sysconfig/os-autoinst-openvswitch
- /etc/sysconfig/network-scripts/ifcfg-{{ openqa_multivm_bridge_interface }}
- name: Remove tap interface configurations
ansible.builtin.file:
path: /etc/sysconfig/network-scripts/ifcfg-tap{{ item }}
state: absent
loop: "{{ range(openqa_worker_count | int) | list }}"
- name: Delete bridge interface
ansible.builtin.command: ovs-vsctl del-br {{ openqa_multivm_bridge_interface }}
changed_when: true
- name: Disable openvswitch services
ansible.builtin.systemd:
name: "{{ item }}"
state: stopped
enabled: false
loop:
- os-autoinst-openvswitch
- openvswitch
- name: Remove packages
ansible.builtin.dnf:
pkg:
- os-autoinst-openvswitch
- tunctl
- network-scripts
state: absent
- name: Remove /sbin/ifup-pre-local
ansible.builtin.file:
path: /sbin/ifup-pre-local
state: absent
- name: Disable bridge interface for internal zone
ansible.posix.firewalld:
permanent: true
interface: br0
state: disabled
zone: internal
notify: reload_firewalld
- name: Disable masquerade for public and internal zones
ansible.posix.firewalld:
masquerade: true
permanent: true
state: disabled
zone: '{{ item }}'
loop:
- public
- internal
notify: reload_firewalld
- name: Disable ipv4 IP forwarding
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
state: absent
sysctl_file: /etc/sysctl.d/ip-forward.conf
sysctl_set: true
- name: Set-target ACCEPT on public zone
ansible.posix.firewalld:
permanent: true
state: absent
zone: public
target: ACCEPT
notify: reload_firewalld
- name: Remove port for GRE tunnel
ansible.posix.firewalld:
permanent: true
port: 1723/tcp
state: disabled
notify: reload_firewalld
- name: Set WORKER_CLASS for tap interfaces
community.general.ini_file:
path: /etc/openqa/workers.ini
section: global
option: WORKER_CLASS
value: qemu_x86_64,tap
state: absent
mode: '0644'
...

View File

@ -1,42 +0,0 @@
---
- name: Uninstall OpenQA packages
ansible.builtin.yum:
name: "{{ openqa_packages }}"
state: absent
- name: Delete OpenQA files and directories
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "{{ openqa_homedir }}"
- /var/lib/pgsql
- /etc/openqa
- /etc/httpd/conf.d/openqa.conf
- /etc/httpd/conf.d/openqa-ssl.conf
- name: Disable httpd_can_network_connect
ansible.posix.seboolean:
name: httpd_can_network_connect
state: false
persistent: true
- name: Deny traffic for services
ansible.posix.firewalld:
service: "{{ item }}"
permanent: true
state: disabled
loop:
- http
- openqa-vnc
- name: Deny VNC traffic for local workers
ansible.posix.firewalld:
port: "{{ openqa_min_vnc_port }}-{{ openqa_max_vnc_port }}/tcp"
permanent: true
state: disabled
- name: Reload FirewallD
ansible.builtin.systemd:
name: firewalld
state: reloaded

View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<service>
<port port="{{ openqa_min_socket_port }}-{{ openqa_max_socket_port }}" protocol="tcp"/>
</service>

View File

@ -1,7 +0,0 @@
[global]
HOST = https://{{ openqa_host }}
CACHEDIRECTORY = /var/lib/openqa/cache
CACHE_MIN_FREE_PERCENTAGE = 10
[https://{{ openqa_host }}]
TESTPOOLSERVER = rsync://{{ openqa_host }}/tests

View File

@ -1,10 +0,0 @@
DEVICETYPE='ovs'
TYPE='OVSBridge'
BOOTPROTO='static'
IPADDR='172.16.2.2'
NETMASK='255.254.0.0'
DEVICE={{ openqa_multivm_bridge_interface }}
STP=off
ONBOOT='yes'
NAME='{{ openqa_multivm_bridge_interface }}'
HOTPLUG='no'

View File

@ -1,7 +0,0 @@
DEVICETYPE='ovs'
TYPE='OVSPort'
OVS_BRIDGE='{{ openqa_multivm_bridge_interface }}'
DEVICE='tap{{ item }}'
ONBOOT='yes'
BOOTPROTO='none'
HOTPLUG='no'

View File

@ -1,3 +0,0 @@
OS_AUTOINST_BRIDGE_LOCAL_IP=172.16.2.2
OS_AUTOINST_BRIDGE_REWRITE_TARGET=172.17.0.0
OS_AUTOINST_USE_BRIDGE={{ openqa_multivm_bridge_interface }}

View File

@ -1,20 +0,0 @@
#!/bin/sh
if=$(echo "$1" | sed -e 's,ifcfg-,,')
iftype=$(echo "$if" | sed -e 's,[0-9]\+$,,')
# if the interface being brought up is tap[n], create
# the tap device first
if [ "$iftype" == "tap" ]; then
tunctl -u _openqa-worker -p -t "$if"
fi
# if the interface being brough up is {{ openqa_multivm_bridge_interface }}, create
# the gre tunnels
if [ "$if" == "{{ openqa_multivm_bridge_interface }}" ]; then
ovs-vsctl set bridge {{ openqa_multivm_bridge_interface }} stp_enable=true
# This is only needed for multi-host setups
{% for w in range(1, openqa_worker_count+1) %}
#ovs-vsctl --may-exist add-port {{ openqa_multivm_bridge_interface }} gre{{ w }} -- set interface gre{{ w }} type=gre options:remote_ip=172.16.2.{{ 2 + w|int }}
{% endfor %}
fi

View File

@ -1,9 +1,5 @@
---
- name: Run tests
hosts: localhost
- hosts: localhost
remote_user: root
tasks:
- name: Ensure required variables are defined
ansible.builtin.assert:
that:
- openqa_host is defined
- import_tasks: example.yml

View File

@ -1,35 +0,0 @@
---
# The primary openQA host
openqa_host: localhost
openqa_client_key: 1234567890ABCDEF
openqa_client_secret: 1234567890ABCDEF
# Default OpenQA user and group
openqa_user: geekotest
openqa_group: geekotest
# The number of workers to enable on this system
openqa_worker_count: 1
# Port range to open for VNC access to local workers.
# The max port should be 5990 + n where n is the total
# number of workers you want to enable on your system.
openqa_min_vnc_port: 5991
openqa_max_vnc_port: "{{ 5990 + openqa_worker_count | int }}"
# Port range to open for socket connections from the primary host.
openqa_min_socket_port: 20000
openqa_max_socket_port: 20089
# Packages to install
openqa_worker_packages:
- firewalld
- guestfs-tools
- libguestfs-xfs
- libvirt-daemon-config-network
- openqa-worker
- perl-REST-Client
- python3-libguestfs
- virt-install
- withlock
...

View File

@ -45,7 +45,7 @@ openqa_worker_count: 1
# The max port should be 5990 + n where n is the total
# number of workers you want to enable on your system.
openqa_min_vnc_port: 5991
openqa_max_vnc_port: "{{ 5990 + openqa_worker_count | int }}"
openqa_max_vnc_port: "{{ 5990 + openqa_worker_count|int }}"
# Packages to install
openqa_packages:

View File

@ -9,6 +9,6 @@ templates_src_dir: "{{ playbook_dir }}/files/usr/share/openqa/templates"
templates_dest_dir: "/usr/share/openqa/templates"
branding_patches:
- {path: /usr/share/openqa/assets/assetpack.def, patch: /usr/share/openqa/assets/assetpack.def.patch}
- {path: /usr/share/openqa/templates/webapi/main/index.html.ep, patch: /usr/share/openqa/templates/webapi/main/index.html.ep.patch}
- { path: /usr/share/openqa/assets/assetpack.def, patch: /usr/share/openqa/assets/assetpack.def.patch }
- { path: /usr/share/openqa/templates/webapi/main/index.html.ep, patch: /usr/share/openqa/templates/webapi/main/index.html.ep.patch }
...