Compare commits

...

16 Commits

Author SHA1 Message Date
Al Bowles 203286d4ac Automation to uninstall openQA on developer hosts 2024-03-22 22:59:42 +00:00
Al Bowles c4d9d3ea24 Refinements to the devbox setup 2024-03-22 22:59:42 +00:00
Al Bowles acbbcace69 Add templates for multivm networking 2024-03-22 22:59:42 +00:00
Al Bowles 95cf46f6d1 Appease the linter 2024-03-22 22:59:42 +00:00
Al Bowles 22cdf9dec5 WIP: Automation for configuring multivm networking 2024-03-22 22:59:42 +00:00
Al Bowles 69923813dd Correctly name workers.ini, move worker restart to handler 2024-03-22 22:59:42 +00:00
Al Bowles 2c6f93ff8e Perform firewalld reload as a handler 2024-03-22 22:59:42 +00:00
Al Bowles 05fb2aa93b Linter fixes 2024-03-22 22:59:40 +00:00
Al Bowles 1c26ea706f
Use some more defaulty defaults 2023-11-17 22:56:23 -06:00
Al Bowles ca08f4bb0e
Add some usages 2023-11-17 22:56:23 -06:00
Al Bowles beb14b7a96
It turns out openqa does not like inline comments in its ini files 2023-11-17 22:56:23 -06:00
Al Bowles d7ab984b4f
Start cache services 2023-11-17 22:56:18 -06:00
Al Bowles 96f68226dc
Move requirements file to meet convention 2023-11-17 22:56:18 -06:00
Al Bowles b337544f5f
Add requirements file 2023-11-17 22:56:18 -06:00
Al Bowles 7d8bd73307
Update filelist 2023-11-17 22:56:09 -06:00
Al Bowles 2a237385ef
Automation for configuring a worker-only host 2023-11-17 22:56:02 -06:00
23 changed files with 632 additions and 101 deletions

View File

@ -1,6 +1,7 @@
---
# .ansible-lint
warn_list:
- '204' # Lines should be less than 160 characters
- '701' # meta/main.yml should contain relevant info
- '204' # Lines should be less than 160 characters
- '701' # meta/main.yml should contain relevant info
skip_list:
- '106' # Role name must match ^[a-z][a-z0-9_]+$ pattern
- '106' # Role name must match ^[a-z][a-z0-9_]+$ pattern

View File

@ -17,27 +17,31 @@ This repository is for openQA operations and management.
├── handlers
│   └── main.yml
├── init-rocky-openqa-developer-host.yml
├── localhost.yml
├── init-rocky-openqa-worker-host.yml
├── README.md
├── roles
│   └── README.md
├── tasks
│   ├── main.yml
│   ├── openqa-worker.yml
│   └── openqa.yml
├── templates
│   └── etc
│   ├── firewalld
│   │   └── services
│   │   ├── openqa-socket.xml.j2
│   │   └── openqa-vnc.xml.j2
│   └── openqa
│   ├── client.conf.j2
│   └── openqa.ini.j2
│   ├── openqa.ini.j2
│   └── workers.conf.j2
├── tests
│   ├── README.md
│   └── test.yml
└── vars
├── main.yml
└── openqa.yml
├── openqa-worker.yml
└── openqa.yml
```
## Guidelines

View File

@ -0,0 +1,3 @@
---
collections:
- ansible.posix

View File

@ -1,2 +1,37 @@
---
# Handlers
- name: Reload firewalld
ansible.builtin.systemd:
name: firewalld
state: reloaded
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart openQA workers
ansible.builtin.systemd:
name: "openqa-worker@{{ item }}"
state: restarted
enabled: true
# range "end" parameter is exclusive, so add 1
loop: "{{ range(1, (openqa_worker_count | int + 1)) | list }}"
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart openqa services
ansible.builtin.systemd:
name: "{{ item }}"
state: restarted
loop: "{{ openqa_services }}"
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart os-autoinst-openvswitch
ansible.builtin.systemd:
name: os-autoinst-openvswitch
state: restarted
enabled: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Restart httpd
ansible.builtin.service:
name: httpd
state: restarted
enabled: true
ignore_errors: "{{ ansible_check_mode }}"
...

View File

@ -24,16 +24,17 @@
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
ansible.builtin.stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
ansible.builtin.assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
@ -41,13 +42,14 @@
tasks:
- name: Install and configure OpenQA
import_tasks: tasks/openqa.yml
ansible.builtin.import_tasks: tasks/openqa.yml
- name: Apply Rocky Linux OpenQA Branding
import_tasks: tasks/openqa_branding.yml
ansible.builtin.import_tasks: tasks/openqa_branding.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
ansible.builtin.file:
path: /var/log/ansible.run
state: touch
mode: '0644'

View File

@ -0,0 +1,49 @@
# Configure an openQA worker host
#
# Usages:
# # Install and configure an openQA worker-only host
# ansible-playbook init-rocky-openqa-worker-host.yml
#
# # Install and configure an openQA worker-only host with a parameters file
# ansible-playbook init-rocky-openqa-worker-host.yml -e @my-worker-host.yml
#
# Created: @akatch
---
- name: Rocky openQA Worker Runbook
hosts: openqa_workers
become: true
gather_facts: false
vars_files:
- vars/openqa-worker.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
ansible.builtin.stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
ansible.builtin.assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Install and configure OpenQA workers
ansible.builtin.import_tasks: tasks/openqa-worker.yml
post_tasks:
- name: Touching run file that ansible has ran here
ansible.builtin.file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,41 @@
# Delete local OpenQA testing environment
# This playbook is *NOT* intended for WAN-facing systems!
# Created: @akatch
---
- name: Rocky OpenQA Runbook
hosts: localhost
connection: local
become: true
vars_files:
- vars/openqa.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
ansible.builtin.stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
ansible.builtin.assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Remove OpenQA installation from this system
ansible.builtin.import_tasks: tasks/remove_openqa.yml
post_tasks:
- name: Touching run file that ansible has ran here
ansible.builtin.file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root

View File

@ -0,0 +1,54 @@
# Sets up local OpenQA testing environment
# This playbook is *NOT* intended for WAN-facing systems!
#
# Usages:
# # Install and configure an openQA developer host, download all current Rocky ISOs,
# # and POST a test job
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml
#
# # Only perform ISO download tasks
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=download_isos
#
# # Only perform configuration, do not download ISOs or POST a job
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=configure
#
# Created: @akatch
---
- name: Rocky OpenQA Runbook
hosts: localhost
connection: local
become: true
vars_files:
- vars/openqa.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- name: Import handlers
ansible.builtin.import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
ansible.builtin.stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
ansible.builtin.assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Remove openqa multivm networking configs
ansible.builtin.import_tasks: tasks/remove_openqa-multivm-networking.yml
post_tasks:
- name: Touching run file that ansible has ran here
ansible.builtin.file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -1,4 +0,0 @@
---
# No tasks
- debug: msg="No tasks are provided here. Please import the task as needed in your playbook."
...

View File

@ -0,0 +1,111 @@
---
# {{ openqa_multivm_bridge_interface }} should not exist or we should use a different name
- name: Assert bridge interface does not exist
ansible.builtin.assert:
that:
- 'openqa_multivm_bridge_interface not in ansible_interfaces'
success_msg: 'interface does not exist, can proceed'
fail_msg: '{{ openqa_multivm_bridge_interface }} already exists, please supply an alternative'
- name: Install multivm networking packages
ansible.builtin.dnf:
pkg:
- os-autoinst-openvswitch
- tunctl
- name: Create /etc/sysconfig/os-autoinst-openvswitch
ansible.builtin.copy:
src: etc/sysconfig/os-autoinst-openvswitch.j2
dest: /etc/sysconfig/os-autoinst-openvswitch
mode: '0644'
notify: Restart os-autoinst-openvswitch
- name: Create bridge interface configuration
ansible.builtin.copy:
src: etc/sysconfig/network-scripts/ifcfg-br.j2
dest: /etc/sysconfig/network-scripts/ifcfg-{{ openqa_multivm_bridge_interface }}
mode: '0644'
- name: Create worker tap interface configs
ansible.builtin.copy:
src: etc/sysconfig/network-scripts/ifcfg-tap.j2
dest: /etc/sysconfig/network-scripts/ifcfg-tap{{ item }}
mode: '0644'
loop: "{{ range(openqa_worker_count) | list }}"
- name: Update /sbin/ifup-pre-local
ansible.builtin.template:
src: sbin/ifup-pre-local.j2
dest: /sbin/ifup-pre-local
mode: 'ug+x'
- name: Enable bridge interface for internal zone
ansible.posix.firewalld:
permanent: true
interface: '{{ openqa_multivm_bridge_interface }}'
state: enabled
zone: internal
notify: Reload firewalld
- name: Enable masquerade for public and internal zones
ansible.posix.firewalld:
masquerade: true
permanent: true
state: enabled
zone: '{{ item }}'
loop:
- public
- internal
notify: Reload firewalld
- name: Enable ipv4 IP forwarding
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
state: present
sysctl_file: /etc/sysctl.d/ip-forward.conf
sysctl_set: true
- name: Set-target ACCEPT on public zone
ansible.posix.firewalld:
permanent: true
state: present
zone: public
target: ACCEPT
notify: Reload firewalld
# Only needed for multi-host setups
- name: Add port for GRE tunnel
ansible.posix.firewalld:
permanent: true
port: 1723/tcp
state: enabled
- name: Enable openvswitch services
ansible.builtin.systemd_service:
name: "{{ item }}"
state: started
enabled: true
loop:
- openvswitch
- os-autoinst-openvswitch
ignore_errors: "{{ ansible_check_mode }}"
- name: Set WORKER_CLASS for tap interfaces
community.general.ini_file:
path: /etc/openqa/workers.ini
section: global
option: WORKER_CLASS
value: qemu_x86_64,tap
state: present
mode: '0644'
notify: Restart openqa services
- name: Enable bridge interface for openvswitch
ansible.builtin.command: ovs-vsctl add-br {{ openqa_multivm_bridge_interface }}
changed_when: true
- name: Enable capability
ansible.builtin.command: setcap CAP_NET_ADMIN=ep /usr/bin/qemu-system-x86_64
changed_when: true
...

57
tasks/openqa-worker.yml Normal file
View File

@ -0,0 +1,57 @@
---
- name: Install OpenQA worker packages
ansible.builtin.dnf:
name: "{{ openqa_worker_packages }}"
state: present
tags:
- packages
- name: Create openQA group
ansible.builtin.group:
name: "{{ openqa_group }}"
system: true
- name: Create openQA user
ansible.builtin.user:
name: "{{ openqa_user }}"
groups: "{{ openqa_group }}"
append: true
system: true
- name: Configure firewalld for openQA worker connections
ansible.builtin.template:
src: etc/firewalld/services/{{ item }}.xml.j2
dest: /etc/firewalld/services/{{ item }}.xml
owner: root
group: root
mode: "0644"
loop:
- openqa-socket
- openqa-vnc
tags:
- configure
notify: Reload firewalld
- name: Write openQA configuration file
ansible.builtin.template:
src: etc/openqa/{{ item }}.j2
dest: /etc/openqa/{{ item }}
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
mode: "0444"
loop:
- client.conf
- workers.ini
tags:
- configure
notify: Restart openQA workers
- name: Start openQA cache services
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
loop:
- openqa-worker-cacheservice
- openqa-worker-cacheservice-minion
...

View File

@ -1,28 +1,29 @@
---
- name: Install OpenQA packages
yum:
ansible.builtin.yum:
name: "{{ openqa_packages }}"
state: present
tags:
- packages
- name: Copy httpd configuration files
copy:
ansible.builtin.copy:
remote_src: true
src: /etc/httpd/conf.d/{{ item }}.template
dest: /etc/httpd/conf.d/{{ item }}
mode: '0644'
mode: "0644"
owner: root
group: root
loop:
- openqa.conf
- openqa-ssl.conf
notify: restart_httpd
notify: Restart httpd
tags:
- configure
ignore_errors: "{{ ansible_check_mode }}"
- name: Template OpenQA configuration files
template:
ansible.builtin.template:
src: etc/openqa/{{ item }}.j2
dest: /etc/openqa/{{ item }}
owner: "{{ openqa_user }}"
@ -33,30 +34,35 @@
- client.conf
tags:
- configure
notify: Restart openQA workers
- name: Get service facts
service_facts:
ansible.builtin.service_facts:
check_mode: false
- name: Check for non-empty postgres data directory
stat:
ansible.builtin.stat:
path: /var/lib/pgsql/data/base
register: postgres_data_dir
- name: If postgresql is not already running, initialize database
command: postgresql-setup --initdb
ansible.builtin.command: postgresql-setup --initdb
when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" )
and not postgres_data_dir.stat.exists
changed_when: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Enable and start postgresql service
systemd:
ansible.builtin.systemd:
name: postgresql
state: started
enabled: true
when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" )
and not postgres_data_dir.stat.exists
ignore_errors: "{{ ansible_check_mode }}"
- name: Configure SELinux to allow httpd connection to network
seboolean:
ansible.posix.seboolean:
name: httpd_can_network_connect
state: true
persistent: true
@ -64,16 +70,17 @@
- configure
- name: Enable and start OpenQA services
systemd:
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
loop: "{{ openqa_services }}"
tags:
- configure
ignore_errors: "{{ ansible_check_mode }}"
- name: Create openqa-vnc firewalld service
template:
ansible.builtin.template:
src: etc/firewalld/services/openqa-vnc.xml.j2
dest: /etc/firewalld/services/openqa-vnc.xml
owner: root
@ -81,15 +88,13 @@
mode: "0644"
tags:
- configure
notify: Reload firewalld
- name: Load openqa-vnc firewalld service
systemd:
name: firewalld
state: reloaded
tags:
- configure
- name: Systemctl daemon-reload
ansible.builtin.systemd:
daemon_reload: true
- name: Permit traffic for {{ item }} service
- name: Permit traffic for http and openqa-vnc services
ansible.posix.firewalld:
service: "{{ item }}"
permanent: true
@ -99,23 +104,17 @@
- openqa-vnc
tags:
- configure
- name: Reload FirewallD
systemd:
name: firewalld
state: reloaded
tags:
- configure
notify: Reload firewalld
- name: Check for existing repository
stat:
ansible.builtin.stat:
path: "{{ openqa_homedir }}/share/tests/rocky"
register: rocky_testing_repo
tags:
- configure
- name: Clone repository if it does not already exist
git:
ansible.builtin.git:
accept_hostkey: true
dest: "{{ openqa_homedir }}/share/tests/rocky"
repo: "{{ openqa_rocky_testing_repo }}"
@ -125,68 +124,23 @@
- configure
- name: Set owner/group/permissions on repo contents
file:
ansible.builtin.file:
path: "{{ openqa_homedir }}/share/tests/rocky"
recurse: true
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
mode: "u+rwX,g+rwX,o+rX,o-w"
mode: "0775"
tags:
- configure
# fifloader.py will fail if the Demo user is not logged in
- name: Authenticate to web UI the first time
uri:
url: "http://{{ openqa_host }}/login"
- name: Run fifloader.py
command: ./fifloader.py -l -c templates.fif.json templates-updates.fif.json
changed_when: "1 != 1"
args:
chdir: "{{ openqa_homedir }}/share/tests/rocky"
- name: Create ISO directory
file:
path: "{{ openqa_homedir }}/share/factory/iso/fixed"
- name: Create asset directories
ansible.builtin.file:
path: "{{ openqa_homedir }}/share/factory/{{ item }}/fixed"
state: directory
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
mode: "0775"
tags:
- download_isos
- name: Download ISOs
get_url:
dest: "{{ openqa_homedir }}/share/factory/iso/fixed/{{ item.name }}"
url: "{{ rocky_iso_download_url }}/{{ item.name }}"
checksum: "{{ item.checksum }}"
owner: "{{ openqa_user }}"
group: "{{ openqa_group }}"
tmp_dest: "/var/tmp"
mode: "0644"
loop: "{{ openqa_isos }}"
tags:
- download_isos
- name: Start {{ openqa_worker_count }} OpenQA workers
ansible.builtin.systemd:
name: "openqa-worker@{{ item }}"
state: started
enabled: true
# range 'end' parameter is exclusive, so add 1
loop: "{{ range(1, (openqa_worker_count|int + 1)) | list }}"
tags:
- start_workers
- configure
- name: POST a job
command: |
openqa-cli api -X POST isos \
ISO=Rocky-{{ rocky_version }}-{{ rocky_arch }}-minimal.iso \
ARCH={{ rocky_arch }} \
DISTRI=rocky \
FLAVOR=minimal-iso \
VERSION={{ rocky_version }} \
BUILD="{{ '%Y%m%d.%H%M%S' | strftime }}.0"
changed_when: "1 != 1"
loop:
- iso
- hdd
...

View File

@ -0,0 +1,92 @@
---
- name: Remove files
ansible.builtin.file:
path: '{{ item }}'
state: absent
loop:
- /etc/sysconfig/os-autoinst-openvswitch
- /etc/sysconfig/network-scripts/ifcfg-{{ openqa_multivm_bridge_interface }}
- name: Remove tap interface configurations
ansible.builtin.file:
path: /etc/sysconfig/network-scripts/ifcfg-tap{{ item }}
state: absent
loop: "{{ range(openqa_worker_count | int) | list }}"
- name: Delete bridge interface
ansible.builtin.command: ovs-vsctl del-br {{ openqa_multivm_bridge_interface }}
changed_when: true
- name: Disable openvswitch services
ansible.builtin.systemd:
name: "{{ item }}"
state: stopped
enabled: false
loop:
- os-autoinst-openvswitch
- openvswitch
- name: Remove packages
ansible.builtin.dnf:
pkg:
- os-autoinst-openvswitch
- tunctl
- network-scripts
state: absent
- name: Remove /sbin/ifup-pre-local
ansible.builtin.file:
path: /sbin/ifup-pre-local
state: absent
- name: Disable bridge interface for internal zone
ansible.posix.firewalld:
permanent: true
interface: br0
state: disabled
zone: internal
notify: reload_firewalld
- name: Disable masquerade for public and internal zones
ansible.posix.firewalld:
masquerade: true
permanent: true
state: disabled
zone: '{{ item }}'
loop:
- public
- internal
notify: reload_firewalld
- name: Disable ipv4 IP forwarding
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
state: absent
sysctl_file: /etc/sysctl.d/ip-forward.conf
sysctl_set: true
- name: Set-target ACCEPT on public zone
ansible.posix.firewalld:
permanent: true
state: absent
zone: public
target: ACCEPT
notify: reload_firewalld
- name: Remove port for GRE tunnel
ansible.posix.firewalld:
permanent: true
port: 1723/tcp
state: disabled
notify: reload_firewalld
- name: Set WORKER_CLASS for tap interfaces
community.general.ini_file:
path: /etc/openqa/workers.ini
section: global
option: WORKER_CLASS
value: qemu_x86_64,tap
state: absent
mode: '0644'
...

42
tasks/remove_openqa.yml Normal file
View File

@ -0,0 +1,42 @@
---
- name: Uninstall OpenQA packages
ansible.builtin.yum:
name: "{{ openqa_packages }}"
state: absent
- name: Delete OpenQA files and directories
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "{{ openqa_homedir }}"
- /var/lib/pgsql
- /etc/openqa
- /etc/httpd/conf.d/openqa.conf
- /etc/httpd/conf.d/openqa-ssl.conf
- name: Disable httpd_can_network_connect
ansible.posix.seboolean:
name: httpd_can_network_connect
state: false
persistent: true
- name: Deny traffic for services
ansible.posix.firewalld:
service: "{{ item }}"
permanent: true
state: disabled
loop:
- http
- openqa-vnc
- name: Deny VNC traffic for local workers
ansible.posix.firewalld:
port: "{{ openqa_min_vnc_port }}-{{ openqa_max_vnc_port }}/tcp"
permanent: true
state: disabled
- name: Reload FirewallD
ansible.builtin.systemd:
name: firewalld
state: reloaded

View File

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<service>
<port port="{{ openqa_min_socket_port }}-{{ openqa_max_socket_port }}" protocol="tcp"/>
</service>

View File

@ -0,0 +1,7 @@
[global]
HOST = https://{{ openqa_host }}
CACHEDIRECTORY = /var/lib/openqa/cache
CACHE_MIN_FREE_PERCENTAGE = 10
[https://{{ openqa_host }}]
TESTPOOLSERVER = rsync://{{ openqa_host }}/tests

View File

@ -0,0 +1,10 @@
DEVICETYPE='ovs'
TYPE='OVSBridge'
BOOTPROTO='static'
IPADDR='172.16.2.2'
NETMASK='255.254.0.0'
DEVICE={{ openqa_multivm_bridge_interface }}
STP=off
ONBOOT='yes'
NAME='{{ openqa_multivm_bridge_interface }}'
HOTPLUG='no'

View File

@ -0,0 +1,7 @@
DEVICETYPE='ovs'
TYPE='OVSPort'
OVS_BRIDGE='{{ openqa_multivm_bridge_interface }}'
DEVICE='tap{{ item }}'
ONBOOT='yes'
BOOTPROTO='none'
HOTPLUG='no'

View File

@ -0,0 +1,3 @@
OS_AUTOINST_BRIDGE_LOCAL_IP=172.16.2.2
OS_AUTOINST_BRIDGE_REWRITE_TARGET=172.17.0.0
OS_AUTOINST_USE_BRIDGE={{ openqa_multivm_bridge_interface }}

View File

@ -0,0 +1,20 @@
#!/bin/sh
if=$(echo "$1" | sed -e 's,ifcfg-,,')
iftype=$(echo "$if" | sed -e 's,[0-9]\+$,,')
# if the interface being brought up is tap[n], create
# the tap device first
if [ "$iftype" == "tap" ]; then
tunctl -u _openqa-worker -p -t "$if"
fi
# if the interface being brough up is {{ openqa_multivm_bridge_interface }}, create
# the gre tunnels
if [ "$if" == "{{ openqa_multivm_bridge_interface }}" ]; then
ovs-vsctl set bridge {{ openqa_multivm_bridge_interface }} stp_enable=true
# This is only needed for multi-host setups
{% for w in range(1, openqa_worker_count+1) %}
#ovs-vsctl --may-exist add-port {{ openqa_multivm_bridge_interface }} gre{{ w }} -- set interface gre{{ w }} type=gre options:remote_ip=172.16.2.{{ 2 + w|int }}
{% endfor %}
fi

View File

@ -1,5 +1,9 @@
---
- hosts: localhost
- name: Run tests
hosts: localhost
remote_user: root
tasks:
- import_tasks: example.yml
- name: Ensure required variables are defined
ansible.builtin.assert:
that:
- openqa_host is defined

35
vars/openqa-worker.yml Normal file
View File

@ -0,0 +1,35 @@
---
# The primary openQA host
openqa_host: localhost
openqa_client_key: 1234567890ABCDEF
openqa_client_secret: 1234567890ABCDEF
# Default OpenQA user and group
openqa_user: geekotest
openqa_group: geekotest
# The number of workers to enable on this system
openqa_worker_count: 1
# Port range to open for VNC access to local workers.
# The max port should be 5990 + n where n is the total
# number of workers you want to enable on your system.
openqa_min_vnc_port: 5991
openqa_max_vnc_port: "{{ 5990 + openqa_worker_count | int }}"
# Port range to open for socket connections from the primary host.
openqa_min_socket_port: 20000
openqa_max_socket_port: 20089
# Packages to install
openqa_worker_packages:
- firewalld
- guestfs-tools
- libguestfs-xfs
- libvirt-daemon-config-network
- openqa-worker
- perl-REST-Client
- python3-libguestfs
- virt-install
- withlock
...

View File

@ -45,7 +45,7 @@ openqa_worker_count: 1
# The max port should be 5990 + n where n is the total
# number of workers you want to enable on your system.
openqa_min_vnc_port: 5991
openqa_max_vnc_port: "{{ 5990 + openqa_worker_count|int }}"
openqa_max_vnc_port: "{{ 5990 + openqa_worker_count | int }}"
# Packages to install
openqa_packages: