init
This commit is contained in:
commit
699ec2e2f0
6
.ansible-lint
Normal file
6
.ansible-lint
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# .ansible-lint
|
||||||
|
warn_list:
|
||||||
|
- '204' # Lines should be less than 160 characters
|
||||||
|
- '701' # meta/main.yml should contain relevant info
|
||||||
|
skip_list:
|
||||||
|
- '106' # Role name must match ^[a-z][a-z0-9_]+$ pattern
|
33
.pre-commit-config.yaml
Normal file
33
.pre-commit-config.yaml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v3.4.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-case-conflict
|
||||||
|
- id: check-executables-have-shebangs
|
||||||
|
- id: check-json
|
||||||
|
- id: pretty-format-json
|
||||||
|
- id: detect-private-key
|
||||||
|
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: ansible-lint
|
||||||
|
name: Ansible-lint
|
||||||
|
description: This hook runs ansible-lint.
|
||||||
|
entry: ansible-lint --force-color
|
||||||
|
language: python
|
||||||
|
# do not pass files to ansible-lint, see:
|
||||||
|
# https://github.com/ansible/ansible-lint/issues/611
|
||||||
|
pass_filenames: false
|
||||||
|
always_run: true
|
||||||
|
|
||||||
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
|
rev: v1.26.0
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
files: \.(yaml|yml)$
|
||||||
|
types: [file, yaml]
|
||||||
|
entry: yamllint
|
7
.yamllint
Normal file
7
.yamllint
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
line-length:
|
||||||
|
max: 140
|
||||||
|
level: warning
|
37
README.md
Normal file
37
README.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# Ansible AWX Template: Ops Management
|
||||||
|
|
||||||
|
Ansible AWX is the method used for the Rocky Linux infrastructure, as a replacement for using the CLI. This template is used specifically for management of systems and infrastructure and takes bits and pieces from the original infrastructure git repository on GitHub.
|
||||||
|
|
||||||
|
This repository may include duplicate playbooks from other ansible management repositories. Some pieces may also be removed and put into their own repository.
|
||||||
|
|
||||||
|
## Notes on local runs and playbooks for local development systems
|
||||||
|
|
||||||
|
There are some playbooks that are meant to be ran locally. There are also cases where AWX is not feasible. To run said playbooks, these are things to keep in mind:
|
||||||
|
|
||||||
|
* local-ansible.cfg will need to be used
|
||||||
|
* `init-rocky-ansible-host.yml` will need to be ran using that configuration file (if there are roles/collections needed)
|
||||||
|
|
||||||
|
## Provides / Information
|
||||||
|
|
||||||
|
This repository is for Infrastructure operations.
|
||||||
|
|
||||||
|
```
|
||||||
|
.
|
||||||
|
├── README.md
|
||||||
|
├── defaults
|
||||||
|
│ └── main.yml
|
||||||
|
├── files
|
||||||
|
│ └── README.md
|
||||||
|
├── handlers
|
||||||
|
│ └── main.yml
|
||||||
|
├── tasks
|
||||||
|
│ └── main.yml
|
||||||
|
├── templates
|
||||||
|
│ └── README.md
|
||||||
|
├── tests
|
||||||
|
│ ├── README.md
|
||||||
|
│ ├── inventory
|
||||||
|
│ └── test.yml
|
||||||
|
└── vars
|
||||||
|
└── main.yml
|
||||||
|
```
|
8
adhoc-facts-refresh.yml
Normal file
8
adhoc-facts-refresh.yml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
become: true
|
||||||
|
tasks:
|
||||||
|
- name: Force a fact refresh to have those available in local cache
|
||||||
|
setup:
|
||||||
|
gather_timeout: 30
|
||||||
|
...
|
87
adhoc-rabbitmqqueue.yml
Normal file
87
adhoc-rabbitmqqueue.yml
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
---
|
||||||
|
# This playbook is meant to be used with callable variables, like adhoc or AWX.
|
||||||
|
# What: Creates RabbitMQ Users
|
||||||
|
# Required parameters:
|
||||||
|
# -> username: The username to create in RabbitMQ, which should match an LDAP
|
||||||
|
# name or the CN of a certificate. Note that if it's a hostname
|
||||||
|
# it must be the FQDN.
|
||||||
|
# -> queue_name: Name of the queue to create. This should be setup with a
|
||||||
|
# prefix_suffix name, where prefix is the username, and
|
||||||
|
# the suffix is a service name.
|
||||||
|
# -> routing_keys: A list to be used as routing keys.
|
||||||
|
# Optional:
|
||||||
|
# -> write_queues: A list of queues name prefixes that which the user will
|
||||||
|
# be allowed to publish.
|
||||||
|
# -> thresholds: A dictionary with two keys "warning" and "critical" - The
|
||||||
|
# values are numbers. In the event we have a monitoring system
|
||||||
|
# this can be a number of messages that could cause an alert.
|
||||||
|
# -> vhost: The vhost this queue will be part of. The default is /pubsub.
|
||||||
|
|
||||||
|
- name: Create a User
|
||||||
|
hosts: all
|
||||||
|
become: false
|
||||||
|
gather_facts: false
|
||||||
|
vars_files:
|
||||||
|
- vars/rabbitmq.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Checking for user variables"
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- username != "admin"
|
||||||
|
- username != "guest"
|
||||||
|
- username != "mq-monitoring"
|
||||||
|
success_msg: "Required variables provided"
|
||||||
|
fail_msg: "Username is reserved"
|
||||||
|
tags:
|
||||||
|
- rabbitmq
|
||||||
|
|
||||||
|
- name: "Validate username queue name"
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "queue_name.startswith(username)"
|
||||||
|
tags:
|
||||||
|
- rabbitmq
|
||||||
|
|
||||||
|
- name: "Creating User Account"
|
||||||
|
community.rabbitmq.rabbitmq_user:
|
||||||
|
user: "{{ username }}"
|
||||||
|
vhost: "{{ vhost|default('/pubsub') }}"
|
||||||
|
read_priv: "^(zmq\\.topic)|^(amq\\.topic)|({{ username }}.*)$"
|
||||||
|
write_priv: "^(amq\\.topic)|({{ username }}.*){% for queue in write_queues|default([]) %}|({{ queue }}.*){% endfor %}$"
|
||||||
|
configure_priv: "^$"
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- rabbitmq
|
||||||
|
|
||||||
|
- name: "Create {{ queue_name }}"
|
||||||
|
delegate_to: "{{ rabbitmq_cluster_list[0] }}"
|
||||||
|
community.rabbitmq.rabbitmq_queue:
|
||||||
|
name: "{{ queue_name }}"
|
||||||
|
vhost: "{{ vhost|default('/pubsub') }}"
|
||||||
|
auto_delete: false
|
||||||
|
durable: true
|
||||||
|
message_ttl: "{{ message_ttl|default('null') }}"
|
||||||
|
state: present
|
||||||
|
login_user: admin
|
||||||
|
login_password: "{{ rabbitmq_admin_password }}"
|
||||||
|
tags:
|
||||||
|
- rabbitmq
|
||||||
|
|
||||||
|
- name: "Bind {{ queue_name }} to amq.topic exchange"
|
||||||
|
delegate_to: "{{ rabbitmq_cluster_list[0] }}"
|
||||||
|
community.rabbitmq.rabbitmq_binding:
|
||||||
|
name: "amq.topic"
|
||||||
|
destination: "{{ queue_name }}"
|
||||||
|
destination_type: queue
|
||||||
|
routing_key: "{{ routing_item }}"
|
||||||
|
vhost: "{{ vhost|default('/pubsub') }}"
|
||||||
|
state: present
|
||||||
|
login_user: admin
|
||||||
|
login_password: "{{ rabbitmq_admin_password }}"
|
||||||
|
loop: "{{ routing_keys }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: routing_item
|
||||||
|
tags:
|
||||||
|
- rabbitmq
|
||||||
|
...
|
35
adhoc-rabbitmquser.yml
Normal file
35
adhoc-rabbitmquser.yml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
# This playbook is meant to be used with callable variables, like adhoc or AWX.
|
||||||
|
# What: Creates RabbitMQ Users
|
||||||
|
# The username is the required parameter
|
||||||
|
|
||||||
|
- name: Create a User
|
||||||
|
hosts: all
|
||||||
|
become: false
|
||||||
|
gather_facts: false
|
||||||
|
vars_files:
|
||||||
|
- vars/rabbitmq.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Checking for user variables"
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- username != "admin"
|
||||||
|
- username != "guest"
|
||||||
|
- username != "mq-monitoring"
|
||||||
|
success_msg: "Required variables provided"
|
||||||
|
fail_msg: "Username is reserved"
|
||||||
|
tags:
|
||||||
|
- rabbitmq
|
||||||
|
|
||||||
|
- name: "Creating User Account"
|
||||||
|
community.rabbitmq.rabbitmq_user:
|
||||||
|
user: "{{ username }}"
|
||||||
|
vhost: "{{ vhost }}"
|
||||||
|
read_priv: "^$"
|
||||||
|
write_priv: "amq\\.topic"
|
||||||
|
configure_priv: "^$"
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- rabbitmq
|
||||||
|
...
|
14
collections/README.md
Normal file
14
collections/README.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Collections
|
||||||
|
|
||||||
|
If you are wanting to use a collection specifically for this, you will need to define it in a `requirements.yml`, otherwise AWX will not install what you need to run your tasks.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
---
|
||||||
|
# Roles
|
||||||
|
collections:
|
||||||
|
- netbox.netbox
|
||||||
|
- community.aws
|
||||||
|
- containers.podman
|
||||||
|
```
|
18
collections/requirements.yml
Normal file
18
collections/requirements.yml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
collections:
|
||||||
|
# freeipa
|
||||||
|
- name: freeipa.ansible_freeipa
|
||||||
|
version: 1.6.3
|
||||||
|
- name: community.general
|
||||||
|
- name: community.mysql
|
||||||
|
- name: community.rabbitmq
|
||||||
|
- name: ansible.posix
|
||||||
|
- name: ansible.utils
|
||||||
|
- name: ktdreyer.koji_ansible
|
||||||
|
- name: netbox.netbox
|
||||||
|
- name: community.aws
|
||||||
|
- name: community.libvirt
|
||||||
|
- name: containers.podman
|
||||||
|
- name: nginxinc.nginx_core
|
||||||
|
version: 0.3.0
|
||||||
|
...
|
2
defaults/main.yml
Normal file
2
defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
# Defaults
|
1
files/README.md
Normal file
1
files/README.md
Normal file
@ -0,0 +1 @@
|
|||||||
|
Files come here
|
1
files/etc/authselect/custom/sssd-rocky/CentOS-8-system-auth
Symbolic link
1
files/etc/authselect/custom/sssd-rocky/CentOS-8-system-auth
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
RedHat-8-system-auth
|
40
files/etc/authselect/custom/sssd-rocky/RedHat-8-system-auth
Normal file
40
files/etc/authselect/custom/sssd-rocky/RedHat-8-system-auth
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
{imply "with-smartcard" if "with-smartcard-required"}
|
||||||
|
auth required pam_env.so
|
||||||
|
auth required pam_faildelay.so delay=2000000
|
||||||
|
auth required pam_faillock.so preauth audit silent deny=5 unlock_time=900 {include if "with-faillock"}
|
||||||
|
auth [success=1 default=ignore] pam_succeed_if.so service notin login:gdm:xdm:kdm:xscreensaver:gnome-screensaver:kscreensaver quiet use_uid {include if "with-smartcard-required"}
|
||||||
|
auth [success=done ignore=ignore default=die] pam_sss.so require_cert_auth ignore_authinfo_unavail {include if "with-smartcard-required"}
|
||||||
|
auth sufficient pam_fprintd.so {include if "with-fingerprint"}
|
||||||
|
auth sufficient pam_u2f.so cue {include if "with-pam-u2f"}
|
||||||
|
auth required pam_u2f.so cue nouserok {include if "with-pam-u2f-2fa"}
|
||||||
|
auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
|
||||||
|
auth [default=1 ignore=ignore success=ok] pam_localuser.so {exclude if "with-smartcard"}
|
||||||
|
auth [default=2 ignore=ignore success=ok] pam_localuser.so {include if "with-smartcard"}
|
||||||
|
auth [success=done authinfo_unavail=ignore ignore=ignore default=die] pam_sss.so try_cert_auth {include if "with-smartcard"}
|
||||||
|
auth sufficient pam_unix.so {if not "without-nullok":nullok} try_first_pass
|
||||||
|
auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
|
||||||
|
auth sufficient pam_sss.so forward_pass
|
||||||
|
auth required pam_faillock.so authfail audit deny=5 unlock_time=900 fail_interval=900 {include if "with-faillock"}
|
||||||
|
auth required pam_deny.so
|
||||||
|
|
||||||
|
account required pam_access.so {include if "with-pamaccess"}
|
||||||
|
account required pam_faillock.so {include if "with-faillock"}
|
||||||
|
account required pam_unix.so
|
||||||
|
account sufficient pam_localuser.so {exclude if "with-files-access-provider"}
|
||||||
|
account sufficient pam_usertype.so issystem
|
||||||
|
account [default=bad success=ok user_unknown=ignore] pam_sss.so
|
||||||
|
account required pam_permit.so
|
||||||
|
|
||||||
|
password requisite pam_pwquality.so try_first_pass local_users_only minlen=14 dcredit=-1 lcredit=-1 ucredit=-1 ocredit=-1 retry=3
|
||||||
|
password requisite pam_pwhistory.so use_authok remember=5
|
||||||
|
password sufficient pam_unix.so sha512 shadow {if not "without-nullok":nullok} try_first_pass use_authtok
|
||||||
|
password sufficient pam_sss.so use_authtok
|
||||||
|
password required pam_deny.so
|
||||||
|
|
||||||
|
session optional pam_keyinit.so revoke
|
||||||
|
session required pam_limits.so
|
||||||
|
-session optional pam_systemd.so
|
||||||
|
session optional pam_oddjob_mkhomedir.so umask=0077 {include if "with-mkhomedir"}
|
||||||
|
session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
|
||||||
|
session required pam_unix.so
|
||||||
|
session optional pam_sss.so
|
1
files/etc/authselect/custom/sssd-rocky/Rocky-8-system-auth
Symbolic link
1
files/etc/authselect/custom/sssd-rocky/Rocky-8-system-auth
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
RedHat-8-system-auth
|
1
files/etc/pam.d/CentOS-7-system-auth-ac
Symbolic link
1
files/etc/pam.d/CentOS-7-system-auth-ac
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
RedHat-7-system-auth-ac
|
34
files/etc/pam.d/RedHat-7-system-auth-ac
Normal file
34
files/etc/pam.d/RedHat-7-system-auth-ac
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
#%PAM-1.0
|
||||||
|
# This file is auto-generated.
|
||||||
|
# User changes will be destroyed the next time authconfig is run.
|
||||||
|
auth required pam_env.so
|
||||||
|
auth required pam_faildelay.so delay=2000000
|
||||||
|
auth required pam_faillock.so preauth audit silent deny=5 unlock_time=900
|
||||||
|
auth [default=1 success=ok] pam_localuser.so
|
||||||
|
auth [success=done ignore=ignore default=bad] pam_unix.so nullok try_first_pass
|
||||||
|
auth requisite pam_succeed_if.so uid >= 1000 quiet_success
|
||||||
|
auth sufficient pam_sss.so forward_pass
|
||||||
|
auth [default=die] pam_faillock.so authfail audit deny=5 unlock_time=900
|
||||||
|
auth required pam_deny.so
|
||||||
|
|
||||||
|
account required pam_faillock.so
|
||||||
|
account required pam_unix.so
|
||||||
|
account sufficient pam_localuser.so
|
||||||
|
account sufficient pam_succeed_if.so uid < 1000 quiet
|
||||||
|
account [default=bad success=ok user_unknown=ignore] pam_sss.so
|
||||||
|
account required pam_permit.so
|
||||||
|
|
||||||
|
password requisite pam_pwquality.so try_first_pass minlen=14 dcredit=-1 lcredit=-1 ucredit=-1 ocredit=-1 local_users_only retry=3
|
||||||
|
password requisite pam_pwhistory.so use_authok remember=5
|
||||||
|
password sufficient pam_unix.so sha512 shadow try_first_pass use_authtok
|
||||||
|
password sufficient pam_sss.so use_authtok
|
||||||
|
password required pam_deny.so
|
||||||
|
|
||||||
|
session optional pam_keyinit.so revoke
|
||||||
|
session required pam_limits.so
|
||||||
|
-session optional pam_systemd.so
|
||||||
|
session optional pam_oddjob_mkhomedir.so umask=0077
|
||||||
|
session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
|
||||||
|
session required pam_unix.so
|
||||||
|
session optional pam_sss.so
|
||||||
|
|
3
files/etc/rockybanner
Normal file
3
files/etc/rockybanner
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
This is a Rocky Linux system
|
||||||
|
|
||||||
|
All access is logged and monitored. Unauthorized access is prohibited.
|
2
files/etc/sudoers.d/cis
Normal file
2
files/etc/sudoers.d/cis
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Defaults use_pty
|
||||||
|
Defaults logfile="/var/log/sudo.log"
|
16
files/etc/systemd/system/noggin.service
Normal file
16
files/etc/systemd/system/noggin.service
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=noggin
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment=FLASK_APP=/opt/noggin/noggin/noggin/app.py
|
||||||
|
Environment=NOGGIN_CONFIG_PATH=/opt/noggin/noggin.cfg
|
||||||
|
Environment=FLASK_DEBUG=1
|
||||||
|
User=noggin
|
||||||
|
WorkingDirectory=/opt/noggin/noggin
|
||||||
|
ExecStart=/bin/bash /opt/noggin/start_noggin.sh
|
||||||
|
PrivateTmp=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
185
files/usr/local/bin/dmidecode-pretty
Normal file
185
files/usr/local/bin/dmidecode-pretty
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
#!/usr/bin/perl -w
|
||||||
|
# Louis Abel <tucklesepk@gmail.com>
|
||||||
|
use strict;
|
||||||
|
|
||||||
|
# Check for version of dmidecode
|
||||||
|
my $dmi_test = `dmidecode -q 2>/dev/null; echo \$?`;
|
||||||
|
chomp($dmi_test);
|
||||||
|
our $dmi_ver = "rhel8";
|
||||||
|
our $dmidecode_cmd = "dmidecode -q";
|
||||||
|
if( $dmi_test eq "1" ) { $dmi_ver = "rhel4"; $dmidecode_cmd = "dmidecode"; }
|
||||||
|
|
||||||
|
# Figure out number of cores per cpu
|
||||||
|
my $c_cpuinfo = `grep -c processor /proc/cpuinfo`;
|
||||||
|
chomp($c_cpuinfo);
|
||||||
|
my $c_dmidecode = `$dmidecode_cmd | grep -c 'Processor Information'`;
|
||||||
|
chomp($c_dmidecode);
|
||||||
|
|
||||||
|
# Figure out hyperthreaded cores
|
||||||
|
my $htt;
|
||||||
|
my $lscpu_test = `lscpu 2>/dev/null; echo \$?`;
|
||||||
|
chomp($lscpu_test);
|
||||||
|
if( $lscpu_test eq "127" ) {
|
||||||
|
$htt = "Cannot Detect Threads";
|
||||||
|
} else {
|
||||||
|
$htt = `lscpu | awk -F':' '/Thread/ {print \$2}'`;
|
||||||
|
chomp($htt);
|
||||||
|
}
|
||||||
|
$htt =~ s/^\s+|\s+$//g;
|
||||||
|
|
||||||
|
my $cores;
|
||||||
|
if( $c_cpuinfo eq $c_dmidecode ) {
|
||||||
|
$cores = "single core";
|
||||||
|
} elsif ( $c_cpuinfo > $c_dmidecode ) {
|
||||||
|
my $num_cores = $c_cpuinfo / $c_dmidecode / $htt;
|
||||||
|
$cores = "$num_cores cores";
|
||||||
|
} else {
|
||||||
|
$cores = "failed to determine number of cores";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse dmidecode output
|
||||||
|
our %manufacturer;
|
||||||
|
our %cpu;
|
||||||
|
our %memory;
|
||||||
|
our %network;
|
||||||
|
|
||||||
|
open( FH, "$dmidecode_cmd |") or die "Couldn't run $dmidecode_cmd: $!\n\n";
|
||||||
|
my ($section, $dim, $dim_size);
|
||||||
|
my $dims_used = 0;
|
||||||
|
my $dims_total = 0;
|
||||||
|
my $eths_section = 0;
|
||||||
|
my $eths_total = 0;
|
||||||
|
while( my $line = <FH> ) {
|
||||||
|
chomp($line);
|
||||||
|
|
||||||
|
# Store section information
|
||||||
|
if( $line =~ /^\S+/ ) { $section = $line; }
|
||||||
|
|
||||||
|
# Print Bios Information
|
||||||
|
if( $section eq "BIOS Information" || $section =~ /Handle 0x0000/ ) {
|
||||||
|
if( $line =~ /^\s+Version:\s+(.+)\s*$/ ) { $manufacturer{bios} = $1; }
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print System Information
|
||||||
|
if( $section eq "System Information" || $section =~ /Handle 0x0100/ ) {
|
||||||
|
if( $line =~ /^\s+Manufacturer:\s+(.+)\s*$/ ) { if( $1 =~ /Dell Computer Corporation/ ) { $manufacturer{make} = "Dell Inc."; } else { $manufacturer{make} = $1; } }
|
||||||
|
if( $line =~ /^\s+Product Name:\s+(.+)\s*$/ ) { my $tmp = $1; $tmp =~ s/\s+$//g; $manufacturer{model} = $tmp; }
|
||||||
|
if( $line =~ /^\s+Serial Number:\s+(.+)\s*$/ ) { $manufacturer{serial} = $1; }
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print Chassis Information
|
||||||
|
if( $section eq "Chassis Information" || $section =~ /Handle 0x0300/ ) {
|
||||||
|
if( $line =~ /^\s+Type:\s+(.+)\s*$/ ) { $manufacturer{chassis_type} = $1; }
|
||||||
|
if( $line =~ /^\s+Height:\s+(.+)\s*$/ ) { $manufacturer{chassis_height} = $1; }
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print Processor Information
|
||||||
|
if( $section eq "Processor Information" || $section =~ /Handle 0x040/ ) {
|
||||||
|
if( $line =~ /^\s+Version:\s+(.+)\s*$/ ) {
|
||||||
|
my $cpu_model = $1;
|
||||||
|
|
||||||
|
if( $cpu_model =~ /Not Specified/ ) {
|
||||||
|
$cpu_model = `cat /proc/cpuinfo | grep 'model name' | awk -F: {'print \$2'} | head -n 1`;
|
||||||
|
chomp( $cpu_model );
|
||||||
|
$cpu_model =~ s/^\s*//g;
|
||||||
|
}
|
||||||
|
|
||||||
|
$cpu_model =~ s/\s+/ /g;
|
||||||
|
|
||||||
|
$cpu{physical} = $c_dmidecode;
|
||||||
|
$cpu{virtual} = $c_cpuinfo;
|
||||||
|
$cpu{model} = "$cpu_model ($cores) (Threads: $htt)";
|
||||||
|
}
|
||||||
|
|
||||||
|
if( $line =~ /^\s+Speed:\s+(.+)\s*$/ ) { $cpu{speed} = $1; }
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print Physical Memory Array
|
||||||
|
if( $section eq "Physical Memory Array" || $section =~ /Handle 0x1000/ ) {
|
||||||
|
if( $line =~ /^\s+Error Correction Type:\s+(.+)\s*$/ ) { $memory{error} = $1; }
|
||||||
|
if( $line =~ /^\s+Maximum Capacity:\s+(.+)\s*$/ ) { $memory{max} = $1; }
|
||||||
|
if( $line =~ /^\s+Number Of Devices:\s+(.+)\s*$/ ) { $memory{count} = $1; }
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print Memory Device
|
||||||
|
if( $section eq "Memory Device" || $section =~ /Handle 0x110/ ) {
|
||||||
|
if( $line =~ /^\s+Locator:\s+(.+)\s*$/ ) { $dim = $1; $dim =~ s/\s+//g; $dims_total++}
|
||||||
|
if( $line =~ /^\s+Size:\s+(.+)\s*$/ ) { $dim_size = $1; }
|
||||||
|
if( $line =~ /^\s+Speed:\s+(.+)\s*$/ ) { next if( $dim_size =~ /No Module Installed/ ); $memory{$dims_total}{location} = $dim; $memory{$dims_total}{size} = $dim_size; $memory{$dims_total}{speed} = $1; $dims_used++; }
|
||||||
|
if( $line =~ /^\s+Type:\s+(.+)\s*$/ ) { $memory{type} = $1; }
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print Ethernet Devices
|
||||||
|
$network{total} = 0;
|
||||||
|
if( $section =~ /^On Board Device/ || $section =~ /Handle 0x0A00/ || $section =~ /^Onboard Device/ ) {
|
||||||
|
if( $line =~ /^\s+Type:\s+Ethernet\s*$/ ) { $eths_section = 1; $eths_total++; $network{total} = $eths_total; }
|
||||||
|
next if( $eths_section == 0 );
|
||||||
|
|
||||||
|
if( $line =~ /^\s+Status:\s+(.+)\s*$/ ) { $network{$eths_total}{status} = $1; }
|
||||||
|
if( $line =~ /^\s+Description:\s+(.+)\s*$/ ) { $network{$eths_total}{desc} = $1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(FH);
|
||||||
|
|
||||||
|
|
||||||
|
# Clean up missing data
|
||||||
|
$manufacturer{chassis_height} = "<UNKNOWN>" unless( defined($manufacturer{chassis_height}) );
|
||||||
|
$memory{used} = $dims_total;
|
||||||
|
|
||||||
|
#Print Data
|
||||||
|
print "Make: $manufacturer{make}\n";
|
||||||
|
print "Model: $manufacturer{model}\n";
|
||||||
|
print "Serial: $manufacturer{serial}\n";
|
||||||
|
print "Bios Rev: $manufacturer{bios}\n";
|
||||||
|
print "Chassis Type: $manufacturer{chassis_type}\n";
|
||||||
|
print "Chassis Height: $manufacturer{chassis_height}\n";
|
||||||
|
print "$cpu{physical} x $cpu{model}\n";
|
||||||
|
print_memory_info();
|
||||||
|
print_network_info();
|
||||||
|
|
||||||
|
|
||||||
|
#### Functions ####
|
||||||
|
|
||||||
|
sub print_memory_info {
|
||||||
|
my ($maxsize, $max_unit) = $memory{max} =~ /^\s*(\d+)\s*(\w+)\s*$/;
|
||||||
|
my $dim_count = $memory{count};
|
||||||
|
my $max_per_dim = $maxsize / $dim_count;
|
||||||
|
|
||||||
|
my $size_error = "";
|
||||||
|
my $speed_error = "";
|
||||||
|
my $common_size;
|
||||||
|
my $common_speed;
|
||||||
|
for( my $i = 1; $i < $dims_used + 1; $i++ ) {
|
||||||
|
my $size = $memory{$i}{size} || 0;
|
||||||
|
my $speed = $memory{$i}{speed} || 0;
|
||||||
|
|
||||||
|
if( defined($common_size) && $common_size ne $size ) { $size_error = 1; }
|
||||||
|
else { $common_size = $size; }
|
||||||
|
if( defined($common_speed) && $common_speed ne $speed ) { $speed_error = 2; }
|
||||||
|
else { $common_speed = $speed; }
|
||||||
|
}
|
||||||
|
|
||||||
|
my ($mem_size, $mem_unit) = $common_size =~ /^\s*(\d+)\s*(\w+)\s*$/;
|
||||||
|
my $total_mem_unit = "MB";
|
||||||
|
if( $mem_unit eq "MB" ) { $total_mem_unit = "GB"; }
|
||||||
|
my $mem_total = ($mem_size * $dims_used) * 1024 ;
|
||||||
|
|
||||||
|
if( $common_size =~ /(\d+\.\d{2})\d+/ ) { $common_size = $1; }
|
||||||
|
if( $mem_size >= 1024 ) { my $gb_size = $mem_size / 1024; $common_size = "$gb_size GB"; }
|
||||||
|
|
||||||
|
print "$common_size @ $common_speed x $dims_used = $mem_total $total_mem_unit";
|
||||||
|
if( $size_error || $speed_error ) { print " $size_error$speed_error"; }
|
||||||
|
print "\n";
|
||||||
|
|
||||||
|
if( $max_per_dim =~ /(\d+\.\d{2})\d+/ ) { $max_per_dim = $1; }
|
||||||
|
print "$max_per_dim $max_unit x $dim_count dims = $maxsize $max_unit maximum capacity\n";
|
||||||
|
print "$memory{type}\n$memory{error}\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub print_network_info {
|
||||||
|
my $num_devices = $network{total};
|
||||||
|
for( my $i=1; $i < $num_devices + 1; $i++ ) {
|
||||||
|
print "$network{$i}{desc} [$network{$i}{status}]\n";
|
||||||
|
}
|
||||||
|
}
|
51
files/usr/local/bin/lock-wrapper
Normal file
51
files/usr/local/bin/lock-wrapper
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Borrowed from Fedora Infra for Rocky Linux
|
||||||
|
|
||||||
|
if [ $# -lt 2 ]; then
|
||||||
|
echo "Usage: $0 [name] [script]"
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
NAME=$1
|
||||||
|
SCRIPT=$2
|
||||||
|
|
||||||
|
SILENT="no"
|
||||||
|
if [ $# -ge 3 -a "$3" == "--silent" ]; then
|
||||||
|
SILENT="yes"
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
|
||||||
|
shift 2
|
||||||
|
|
||||||
|
LOCKDIR="/var/tmp/$NAME"
|
||||||
|
PIDFILE="$LOCKDIR/pid"
|
||||||
|
|
||||||
|
function cleanup {
|
||||||
|
rm -rf "$LOCKDIR"
|
||||||
|
}
|
||||||
|
|
||||||
|
RESTORE_UMASK=$(umask -p)
|
||||||
|
umask 0077
|
||||||
|
mkdir "$LOCKDIR" >& /dev/null
|
||||||
|
if [ $? != 0 ]; then
|
||||||
|
PID=$(cat "$PIDFILE")
|
||||||
|
if [ -n "$PID" ] && /bin/ps $PID > /dev/null
|
||||||
|
then
|
||||||
|
if [ "$SILENT" != "yes" ]; then
|
||||||
|
echo "$PID is still running"
|
||||||
|
/bin/ps -o user,pid,start,time,comm $PID
|
||||||
|
fi
|
||||||
|
exit 1;
|
||||||
|
else
|
||||||
|
echo "$LOCKDIR exists but $PID is dead"
|
||||||
|
echo "Removing lockdir and re-running"
|
||||||
|
/bin/rm -rf $LOCKDIR
|
||||||
|
mkdir $LOCKDIR || exit
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
trap cleanup EXIT SIGQUIT SIGHUP SIGTERM
|
||||||
|
echo $$ > "$PIDFILE"
|
||||||
|
|
||||||
|
$RESTORE_UMASK
|
||||||
|
eval "$SCRIPT $*"
|
49
handlers/main.yml
Normal file
49
handlers/main.yml
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
# Handlers
|
||||||
|
- name: restart_sshd
|
||||||
|
service:
|
||||||
|
name: sshd
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
- name: restart_httpd
|
||||||
|
service:
|
||||||
|
name: httpd
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
- name: restart_nginx
|
||||||
|
service:
|
||||||
|
name: nginx
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
- name: reload_networkmanager
|
||||||
|
service:
|
||||||
|
name: NetworkManager
|
||||||
|
state: reloaded
|
||||||
|
|
||||||
|
- name: regenerate_auditd_rules
|
||||||
|
command: /sbin/augenrules
|
||||||
|
|
||||||
|
- name: reload_chrony
|
||||||
|
systemd:
|
||||||
|
name: "{{ chrony_service_name }}"
|
||||||
|
state: restarted
|
||||||
|
listen: "chrony service restart"
|
||||||
|
|
||||||
|
- name: restart_gitlab
|
||||||
|
command: gitlab-ctl reconfigure
|
||||||
|
register: gitlab_restart
|
||||||
|
failed_when: gitlab_restart_handler_failed_when | bool
|
||||||
|
|
||||||
|
- name: restart_noggin
|
||||||
|
service:
|
||||||
|
name: noggin
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
- name: rehash_postfix_sasl
|
||||||
|
command: "postmap /etc/postfix/sasl_passwd"
|
||||||
|
|
||||||
|
- name: restart_postfix
|
||||||
|
service:
|
||||||
|
name: postfix
|
||||||
|
state: restarted
|
||||||
|
...
|
35
init-rocky-account-services.yml
Normal file
35
init-rocky-account-services.yml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
# Preps a system to be part of Account Services
|
||||||
|
- name: Configure Account Services
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are not able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Deploy Account Services
|
||||||
|
import_tasks: tasks/account_services.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
57
init-rocky-ansible-host.yml
Normal file
57
init-rocky-ansible-host.yml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- hosts: localhost
|
||||||
|
connection: local
|
||||||
|
vars:
|
||||||
|
force_purge: true
|
||||||
|
roles_installation_dir: roles/public
|
||||||
|
collection_installation_dir: collections
|
||||||
|
installation_prefix: ./
|
||||||
|
pre_tasks:
|
||||||
|
# example prepare ansible box for execution
|
||||||
|
# - name: install required pip modules on the host running ansible
|
||||||
|
# pip:
|
||||||
|
# name:
|
||||||
|
# - jmespath
|
||||||
|
# - netaddr
|
||||||
|
# - python-consul
|
||||||
|
# - pyvmomi
|
||||||
|
# - python-ldap
|
||||||
|
# - twine
|
||||||
|
|
||||||
|
- name: Remove existing public roles
|
||||||
|
file:
|
||||||
|
path: "{{ installation_prefix }}{{ roles_installation_dir }}"
|
||||||
|
state: absent
|
||||||
|
when: force_purge | bool
|
||||||
|
|
||||||
|
- name: Install all public roles
|
||||||
|
command: >
|
||||||
|
ansible-galaxy role install
|
||||||
|
{{ ( force_purge | bool ) | ternary('--force','') }}
|
||||||
|
--role-file {{ installation_prefix }}roles/requirements.yml
|
||||||
|
--roles-path {{ installation_prefix }}{{ roles_installation_dir }}
|
||||||
|
register: galaxy_install_role
|
||||||
|
changed_when: '"Installing " in galaxy_install_role.stdout'
|
||||||
|
|
||||||
|
- name: Install needed collections
|
||||||
|
command: >
|
||||||
|
ansible-galaxy collection install
|
||||||
|
{{ ( force_purge | bool ) | ternary('--force-with-deps','') }}
|
||||||
|
-r {{ installation_prefix }}collections/requirements.yml
|
||||||
|
-p {{ installation_prefix }}{{ collection_installation_dir }}
|
||||||
|
register: galaxy_install_collection
|
||||||
|
changed_when: '"Installing " in galaxy_install_collection.stdout'
|
||||||
|
|
||||||
|
- name: cleanup old ssh known_hosts - remove
|
||||||
|
file:
|
||||||
|
path: "../tmp/known_hosts"
|
||||||
|
state: absent
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: cleanup old ssh known_hosts - blank
|
||||||
|
file:
|
||||||
|
path: "../tmp/known_hosts"
|
||||||
|
state: touch
|
||||||
|
mode: "0644"
|
||||||
|
...
|
58
init-rocky-bugzilla.yml
Normal file
58
init-rocky-bugzilla.yml
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
# Installs Bugzilla
|
||||||
|
- name: Configure Bugzilla
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- vars/common.yml
|
||||||
|
- vars/bugzilla.yml
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are not able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: rockylinux.ipagetcert
|
||||||
|
state: present
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Deploy Mantis
|
||||||
|
import_tasks: tasks/bugzilla.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Open firewalld ports
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: "{{ item }}"
|
||||||
|
permanent: true
|
||||||
|
immediate: true
|
||||||
|
state: enabled
|
||||||
|
with_items:
|
||||||
|
- http
|
||||||
|
- https
|
||||||
|
|
||||||
|
- name: Ensure httpd is enabled and running
|
||||||
|
service:
|
||||||
|
name: httpd
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
41
init-rocky-chrony.yml
Normal file
41
init-rocky-chrony.yml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
# Sets Up Chrony Server/Client
|
||||||
|
# Created: @derekmpage
|
||||||
|
# Kudos: @danielkubat @Darkbat91
|
||||||
|
# Fixes: @nazunalika
|
||||||
|
- name: Rocky Chrony Runbook
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- vars/chrony.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Configure Chrony
|
||||||
|
import_tasks: tasks/chrony.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
60
init-rocky-install-kvm-hosts.yml
Normal file
60
init-rocky-install-kvm-hosts.yml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
# Creates a standalone KVM hosts
|
||||||
|
# Created: @SherifNagy
|
||||||
|
# Modified to current standards: @nazunalika
|
||||||
|
- name: Configure KVM host
|
||||||
|
hosts: kvm
|
||||||
|
become: true
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Check for CPU Virtualization
|
||||||
|
shell: "set -o pipefail; lscpu | grep -i virtualization"
|
||||||
|
register: result
|
||||||
|
changed_when: false
|
||||||
|
failed_when: "result.rc != 0"
|
||||||
|
|
||||||
|
# Install KVM packages
|
||||||
|
- name: Installing KVM Packages
|
||||||
|
package:
|
||||||
|
name:
|
||||||
|
- qemu-kvm
|
||||||
|
- libvirt
|
||||||
|
- libvirt-python
|
||||||
|
- libguestfs-tools
|
||||||
|
- virt-install
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Enable and Start libvirtd
|
||||||
|
systemd:
|
||||||
|
name: libvirtd
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: Verify KVM module is loaded
|
||||||
|
shell: "set -o pipefail; lsmod | grep -i kvm"
|
||||||
|
register: result
|
||||||
|
changed_when: false
|
||||||
|
failed_when: "result.rc != 0"
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
59
init-rocky-mantisbt.yml
Normal file
59
init-rocky-mantisbt.yml
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
# Installs the mantis bug tracker
|
||||||
|
# This requries information from the vault
|
||||||
|
- name: Configure MantisBT
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- vars/common.yml
|
||||||
|
- vars/mantis.yml
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are not able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: rockylinux.ipagetcert
|
||||||
|
state: present
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Deploy Mantis
|
||||||
|
import_tasks: tasks/mantis.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Open firewalld ports
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: "{{ item }}"
|
||||||
|
permanent: true
|
||||||
|
immediate: true
|
||||||
|
state: enabled
|
||||||
|
with_items:
|
||||||
|
- http
|
||||||
|
- https
|
||||||
|
|
||||||
|
- name: Ensure httpd is enabled and running
|
||||||
|
service:
|
||||||
|
name: httpd
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
41
init-rocky-noggin-theme.yml
Normal file
41
init-rocky-noggin-theme.yml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
# (Re)deploys the noggin theme
|
||||||
|
- name: Deploy Noggin Theme
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are not able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Deploy Rocky Noggin Theme
|
||||||
|
git:
|
||||||
|
repo: https://github.com/rocky-linux/noggin-theme.git
|
||||||
|
dest: /opt/noggin/noggin/noggin/themes/rocky
|
||||||
|
update: true
|
||||||
|
version: main
|
||||||
|
become_user: noggin
|
||||||
|
notify: restart_noggin
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
35
init-rocky-noggin.yml
Normal file
35
init-rocky-noggin.yml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
# (Re)deploys the noggin theme
|
||||||
|
- name: Deploy Noggin
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are not able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Deploy Noggin
|
||||||
|
import_tasks: "tasks/noggin.yml"
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
53
init-rocky-openqa-developer-host.yml
Normal file
53
init-rocky-openqa-developer-host.yml
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# Sets up local OpenQA testing environment
|
||||||
|
# This playbook is *NOT* intended for WAN-facing systems!
|
||||||
|
#
|
||||||
|
# Usages:
|
||||||
|
# # Install and configure an openQA developer host, download all current Rocky ISOs,
|
||||||
|
# # and POST a test job
|
||||||
|
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml
|
||||||
|
#
|
||||||
|
# # Only perform ISO download tasks
|
||||||
|
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=download_isos
|
||||||
|
#
|
||||||
|
# # Only perform configuration, do not download ISOs or POST a job
|
||||||
|
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=configure
|
||||||
|
#
|
||||||
|
# Created: @akatch
|
||||||
|
---
|
||||||
|
- name: Rocky OpenQA Runbook
|
||||||
|
hosts: localhost
|
||||||
|
connection: local
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- vars/openqa.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Install and configure OpenQA
|
||||||
|
import_tasks: tasks/openqa.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
37
init-rocky-postfix-relay.yml
Normal file
37
init-rocky-postfix-relay.yml
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
# Configures postfix on a system to relay mail
|
||||||
|
# NOTE: smtp vars will be in vaults - originally they were available directly
|
||||||
|
# on the ansible host. This was never a viable and secure option.
|
||||||
|
- name: Configure Postfix Relay
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are not able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Deploy Postfix Relay
|
||||||
|
import_tasks: tasks/postfix_relay.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
35
init-rocky-repo-servers.yml
Normal file
35
init-rocky-repo-servers.yml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
# Preps a system to be a repository
|
||||||
|
- name: Configure repository system
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are not able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Configure repository system
|
||||||
|
import_tasks: tasks/repository.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
57
init-rocky-system-config.yml
Normal file
57
init-rocky-system-config.yml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
# Basic system configuration. All hardening should also be imported here.
|
||||||
|
# Use --extra-vars="host=..." and specify a hostname in the inventory or
|
||||||
|
# provide an ansible host group name. You can also just use "all" if you
|
||||||
|
# want to ensure all systems are up to date on the configuration.
|
||||||
|
- name: Configure system
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Loading Variables from OS Common
|
||||||
|
import_tasks: tasks/variable_loader_common.yml
|
||||||
|
|
||||||
|
- name: Configure SSH
|
||||||
|
import_tasks: tasks/ssh_config.yml
|
||||||
|
|
||||||
|
- name: Configure harden settings
|
||||||
|
import_tasks: tasks/harden.yml
|
||||||
|
|
||||||
|
- name: Configure PAM
|
||||||
|
import_tasks: tasks/authentication.yml
|
||||||
|
|
||||||
|
- name: Configure auditd
|
||||||
|
import_tasks: tasks/auditd.yml
|
||||||
|
|
||||||
|
- name: Configure grub
|
||||||
|
import_tasks: tasks/grub.yml
|
||||||
|
|
||||||
|
- name: Configure common scripts
|
||||||
|
import_tasks: tasks/scripts.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
76
local-ansible.cfg
Normal file
76
local-ansible.cfg
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
[defaults]
|
||||||
|
|
||||||
|
########################################
|
||||||
|
# Display settings
|
||||||
|
########################################
|
||||||
|
|
||||||
|
# Output display
|
||||||
|
force_color = 1
|
||||||
|
nocows = True
|
||||||
|
|
||||||
|
|
||||||
|
# Note: http://docs.ansible.com/ansible/intro_configuration.html#ansible-managed
|
||||||
|
ansible_managed = Ansible managed
|
||||||
|
#ansible_managed = Ansible managed - {file} on {host}
|
||||||
|
|
||||||
|
|
||||||
|
# Warn when ansible think it is better to use module.
|
||||||
|
# Note: http://docs.ansible.com/ansible/intro_configuration.html#id88
|
||||||
|
command_warnings = True
|
||||||
|
|
||||||
|
# Enable this to debug tasks calls
|
||||||
|
display_args_to_stdout = False
|
||||||
|
display_skipped_hosts = false
|
||||||
|
|
||||||
|
########################################
|
||||||
|
# Playbook settings
|
||||||
|
########################################
|
||||||
|
|
||||||
|
|
||||||
|
# Default strategy
|
||||||
|
strategy = free
|
||||||
|
|
||||||
|
# Number of hosts processed in parallel
|
||||||
|
forks = 20
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
# Behaviour settings
|
||||||
|
########################################
|
||||||
|
|
||||||
|
|
||||||
|
# Make role variables private
|
||||||
|
retry_files_enabled = True
|
||||||
|
|
||||||
|
# Fact options
|
||||||
|
gathering = smart
|
||||||
|
#gathering = !all
|
||||||
|
#gathering = smart,network,hardware,virtual,ohai,facter
|
||||||
|
#gathering = network,!hardware,virtual,!ohai,!facter
|
||||||
|
|
||||||
|
# facts caching
|
||||||
|
#fact_caching_connection = tmp/facts_cache
|
||||||
|
#fact_caching = json
|
||||||
|
fact_caching = memory
|
||||||
|
fact_caching_timeout = 1800
|
||||||
|
|
||||||
|
# Enable or disable logs
|
||||||
|
# Note put to false in prod
|
||||||
|
no_log = False
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
# Common destinations
|
||||||
|
########################################
|
||||||
|
|
||||||
|
log_path = tmp/ansible.log
|
||||||
|
known_hosts = tmp/known_hosts
|
||||||
|
roles_path = roles/local:roles/public
|
||||||
|
collections_paths = collections
|
||||||
|
|
||||||
|
########################################
|
||||||
|
# SSH Configuration
|
||||||
|
########################################
|
||||||
|
[ssh_connection]
|
||||||
|
# Disable GSSAPI, which slows down SSH connections for ansible
|
||||||
|
ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o GSSAPIAuthentication=no
|
32
mantis.yml
Normal file
32
mantis.yml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
# mantis vars
|
||||||
|
mantis_version: 2.25.0
|
||||||
|
mantis_checksum: "sha256:d8973d3677ecb2ccbfee95e2267b3128049fbdcc59aa1f007686a342d93a4c0a"
|
||||||
|
mantis_pkg:
|
||||||
|
- php
|
||||||
|
- php-ldap
|
||||||
|
- httpd
|
||||||
|
- mod_ssl
|
||||||
|
- php-pgsql
|
||||||
|
- php-mbstring
|
||||||
|
- php-curl
|
||||||
|
- openldap
|
||||||
|
- php-json
|
||||||
|
mantis_db_host: db.rockylinux.org
|
||||||
|
mantis_db_name: mantisdb
|
||||||
|
mantis_db_user: mantis
|
||||||
|
mantis_binder_user: "{{ rocky_ldap_bind_dn }}"
|
||||||
|
mantis_binder_pass: "{{ rocky_ldap_bind_pw }}"
|
||||||
|
|
||||||
|
# Vault
|
||||||
|
# mantis_db_pass: ThisIsNotThePassword!
|
||||||
|
|
||||||
|
ipa_getcert_requested_hostnames:
|
||||||
|
- name: "{{ ansible_fqdn }}"
|
||||||
|
owner: apache
|
||||||
|
key_location: "/etc/pki/tls/private/bugs.rockylinux.org.key"
|
||||||
|
cert_location: "/etc/pki/tls/certs/bugs.rockylinux.org.crt"
|
||||||
|
postcmd: "/bin/systemctl reload httpd"
|
||||||
|
cnames:
|
||||||
|
- "bugs.rockylinux.org"
|
||||||
|
...
|
41
role-rocky-bootstrap_staging.yml
Normal file
41
role-rocky-bootstrap_staging.yml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
# Manage bootstrap hosts
|
||||||
|
#
|
||||||
|
- name: Manage and configure bootstrap hosts
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- vars/mounts/bootstrap_staging.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- include_tasks: tasks/efs_mount.yml
|
||||||
|
loop: "{{ mounts }}"
|
||||||
|
|
||||||
|
- include_tasks: tasks/srpmproc.yml
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
67
role-rocky-graylog.yml
Normal file
67
role-rocky-graylog.yml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
# Configure and setup graylog
|
||||||
|
# Reccommended specs
|
||||||
|
# CPU: 2 cores
|
||||||
|
# Memory: 4GB
|
||||||
|
# Storage: Yes
|
||||||
|
- name: Install Graylog
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
# Vaults required
|
||||||
|
# vars/vaults/encpass.yml
|
||||||
|
# vars/vaults/hostman.yml
|
||||||
|
- vars/graylog.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
- name: Install SELinux packages
|
||||||
|
package:
|
||||||
|
name: python3-policycoreutils.noarch
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: "Creating DNS Record for ord-prod-graylog.rockylinux.org"
|
||||||
|
freeipa.ansible_freeipa.ipadnsrecord:
|
||||||
|
ipaadmin_principal: "{{ ipa_admin|default('admin') }}"
|
||||||
|
ipaadmin_password: "{{ ipaadmin_password }}"
|
||||||
|
zone_name: "{{ graylog_ipa_dnsrecord_zone_name }}"
|
||||||
|
name: "{{ graylog_ipa_dnsrecord_name }}"
|
||||||
|
record_type: "{{ graylog_ipa_dnsrecord_record_type }}"
|
||||||
|
record_value: "{{ graylog_ipa_dnsrecord_record_value }}"
|
||||||
|
state: "{{ graylog_ipa_dnsrecord_state }}"
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: rockylinux.ipagetcert
|
||||||
|
state: present
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Open firewalld ports
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
port: "{{ item.port }}"
|
||||||
|
permanent: "{{ item.permanent }}"
|
||||||
|
state: "{{ item.state }}"
|
||||||
|
loop: "{{ graylog_server_firewall_rules }}"
|
||||||
|
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
72
role-rocky-mirrormanager.yml
Normal file
72
role-rocky-mirrormanager.yml
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
---
|
||||||
|
# MirrorManager2
|
||||||
|
- name: Install and configure mirrormanager
|
||||||
|
hosts: all
|
||||||
|
become: false
|
||||||
|
vars_files:
|
||||||
|
# This playbook requires vaults!
|
||||||
|
# vars/vaults/hostman.yml
|
||||||
|
# vars/vaults/mirrormanager.yml
|
||||||
|
- vars/mounts/mirrormanager.yml
|
||||||
|
- vars/mirrormanager.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
- name: Install git
|
||||||
|
become: true
|
||||||
|
package:
|
||||||
|
name: git
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Install SELinux packages
|
||||||
|
become: true
|
||||||
|
package:
|
||||||
|
name: python3-policycoreutils.noarch
|
||||||
|
state: present
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
#- include_tasks: tasks/mirrormanager.yml
|
||||||
|
- include_tasks: tasks/efs_mount.yml
|
||||||
|
loop: "{{ mounts }}"
|
||||||
|
tags: ["mounts"]
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: rockylinux.ipagetcert
|
||||||
|
become: true
|
||||||
|
state: present
|
||||||
|
tags: ['certs']
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Open firewalld ports
|
||||||
|
become: true
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
port: "{{ item.port }}"
|
||||||
|
permanent: "{{ item.permanent }}"
|
||||||
|
state: "{{ item.state }}"
|
||||||
|
immediate: yes
|
||||||
|
loop: "{{ firewall_rules }}"
|
||||||
|
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
64
role-rocky-monitoring.yml
Normal file
64
role-rocky-monitoring.yml
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
---
|
||||||
|
# Creates the first monitoring server
|
||||||
|
# Reccommended specs
|
||||||
|
# CPU: 2 cores
|
||||||
|
# Memory: 2GB
|
||||||
|
# Storage: a piece of string
|
||||||
|
- name: Install Prometheus
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
# vars/vaults/encpass.yml
|
||||||
|
- vars/monitoring.yml
|
||||||
|
- vars/monitoring/alertmanager.yml
|
||||||
|
- vars/monitoring/grafana.yml
|
||||||
|
- vars/monitoring/prometheus.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
- name: Install SELinux packages
|
||||||
|
package:
|
||||||
|
name: python3-policycoreutils.noarch
|
||||||
|
state: present
|
||||||
|
|
||||||
|
roles:
|
||||||
|
# - role: rockylinux.ipagetcert
|
||||||
|
# state: present
|
||||||
|
- role: cloudalchemy.prometheus
|
||||||
|
state: present
|
||||||
|
- role: cloudalchemy.alertmanager
|
||||||
|
state: present
|
||||||
|
- role: cloudalchemy.grafana
|
||||||
|
state: present
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Open firewalld ports
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
port: "{{ item.port }}"
|
||||||
|
permanent: "{{ item.permanent }}"
|
||||||
|
state: "{{ item.state }}"
|
||||||
|
loop: "{{ monitoring_server_firewall_rules }}"
|
||||||
|
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
62
role-rocky-mqtt.yml
Normal file
62
role-rocky-mqtt.yml
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
---
|
||||||
|
# Stands up an mqtt instance
|
||||||
|
- name: Configure mqtt
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
# vars/vaults/encpass.yml
|
||||||
|
- vars/mqtt.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
# EPEL and PowerTools are required for ipsilon to function
|
||||||
|
# I also couldn't find an ansible built-in to do this
|
||||||
|
- name: Enable the PowerTools repository
|
||||||
|
community.general.ini_file:
|
||||||
|
dest: /etc/yum.repos.d/Rocky-PowerTools.repo
|
||||||
|
section: powertools
|
||||||
|
option: enabled
|
||||||
|
value: 1
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
# The CentOS extras repos has epel-release provided
|
||||||
|
- name: Enable the EPEL repository
|
||||||
|
yum:
|
||||||
|
name: epel-release
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- packages
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: rockylinux.ipagetcert
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- role: rockylinux.mqtt
|
||||||
|
state: present
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
22
role-rocky-node_exporter.yml
Normal file
22
role-rocky-node_exporter.yml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
- name: Install Prometheus Node Exporter
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Install SELinux packages
|
||||||
|
package:
|
||||||
|
name: python3-policycoreutils.noarch
|
||||||
|
state: present
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: cloudalchemy.node-exporter
|
||||||
|
state: present
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Open firewall for node-exporter
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
port: 9100/tcp
|
||||||
|
permanent: true
|
||||||
|
state: enabled
|
||||||
|
...
|
67
role-rocky-pinnwand.yml
Normal file
67
role-rocky-pinnwand.yml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
# pinnwand
|
||||||
|
- name: Install pinnwand
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- vars/pinnwand.yml
|
||||||
|
# vars/vaults/hostman.yml
|
||||||
|
# vars/vaults/pinnwand.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
- name: Install SELinux packages
|
||||||
|
package:
|
||||||
|
name: python3-policycoreutils.noarch
|
||||||
|
state: present
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
# - include_tasks: tasks/pinnwand.yml
|
||||||
|
# tags: ['includetasks']
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: rockylinux.ipagetcert
|
||||||
|
state: present
|
||||||
|
tags: ['certs']
|
||||||
|
|
||||||
|
- role: rockylinux.pinnwand
|
||||||
|
state: present
|
||||||
|
tags: ['role_pinnwand']
|
||||||
|
|
||||||
|
# Define variables in vars/matomo/nginx.yml
|
||||||
|
- role: nginxinc.nginx_core.nginx
|
||||||
|
tags: ['nginx']
|
||||||
|
# - role: nginxinc.nginx_core.nginx_config
|
||||||
|
# tags: ['nginx']
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Open firewalld ports
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
port: "{{ item.port }}"
|
||||||
|
permanent: "{{ item.permanent | default(yes) }}"
|
||||||
|
state: "{{ item.state | default(present) }}"
|
||||||
|
loop: "{{ firewall_rules }}"
|
||||||
|
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
78
role-rocky-rabbitmq.yml
Normal file
78
role-rocky-rabbitmq.yml
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
---
|
||||||
|
# Stands up a RabbitMQ Cluster
|
||||||
|
- name: Configure RabbitMQ
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- vars/common.yml
|
||||||
|
# vars/vaults/encpass.yml
|
||||||
|
- vars/rabbitmq.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
# We have separate passwords per rabbitmq env
|
||||||
|
- name: Import rabbitmq passwords
|
||||||
|
include_vars:
|
||||||
|
file: "vars/vaults/rabbitmq_{{ rabbitmq_env }}.yml"
|
||||||
|
|
||||||
|
# EPEL and PowerTools are required for ipsilon to function
|
||||||
|
# I also couldn't find an ansible built-in to do this
|
||||||
|
- name: Enable the PowerTools repository
|
||||||
|
community.general.ini_file:
|
||||||
|
dest: /etc/yum.repos.d/Rocky-PowerTools.repo
|
||||||
|
section: powertools
|
||||||
|
option: enabled
|
||||||
|
value: 1
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
# The CentOS extras repos has epel-release provided
|
||||||
|
- name: Enable the EPEL repository
|
||||||
|
yum:
|
||||||
|
name: epel-release
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- packages
|
||||||
|
|
||||||
|
# This will change eventually to a rocky-release-messaging repo or to a
|
||||||
|
# rocky-release-rabbitmq repo
|
||||||
|
#- name: Install centos rabbitmq
|
||||||
|
# yum:
|
||||||
|
# name: centos-release-rabbitmq-38
|
||||||
|
# state: present
|
||||||
|
# tags:
|
||||||
|
# - packages
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: rockylinux.ipagetcert
|
||||||
|
state: present
|
||||||
|
when: rabbitmq_private
|
||||||
|
|
||||||
|
- role: rockylinux.rabbitmq
|
||||||
|
state: present
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
42
role-rocky-repopool.yml
Normal file
42
role-rocky-repopool.yml
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
# Configures an instance to function as a HTTP serving member of repopool
|
||||||
|
- name: Configure Repo Pool hosts
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
# vars/vaults/encpass.yml
|
||||||
|
- vars/common.yml
|
||||||
|
- vars/mounts/repopool.yml
|
||||||
|
|
||||||
|
# This is to try to avoid the handler issue in pre/post tasks
|
||||||
|
handlers:
|
||||||
|
- import_tasks: handlers/main.yml
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check if ansible cannot be run here
|
||||||
|
stat:
|
||||||
|
path: /etc/no-ansible
|
||||||
|
register: no_ansible
|
||||||
|
|
||||||
|
- name: Verify if we can run ansible
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "not no_ansible.stat.exists"
|
||||||
|
success_msg: "We are able to run on this node"
|
||||||
|
fail_msg: "/etc/no-ansible exists - skipping run on this node"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Setup shared filesystem mount"
|
||||||
|
include_tasks: tasks/efs_mount.yml
|
||||||
|
with_items: "{{ mounts }}"
|
||||||
|
tags: ["koji_efs_mount"]
|
||||||
|
|
||||||
|
post_tasks:
|
||||||
|
- name: Touching run file that ansible has ran here
|
||||||
|
file:
|
||||||
|
path: /var/log/ansible.run
|
||||||
|
state: touch
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
...
|
14
roles/README.md
Normal file
14
roles/README.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Roles
|
||||||
|
|
||||||
|
If you are wanting to use role specifically for this, you will need to define it in a `requirements.yml`, otherwise AWX will not install what you need to run your tasks.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
---
|
||||||
|
# Roles
|
||||||
|
roles:
|
||||||
|
- rockylinux.ipagetcert
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-ipa-getcert
|
||||||
|
version: main
|
||||||
|
```
|
44
roles/requirements.yml
Normal file
44
roles/requirements.yml
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
roles:
|
||||||
|
- name: geerlingguy.mysql
|
||||||
|
# monitoring
|
||||||
|
- name: cloudalchemy.node_exporter
|
||||||
|
- name: cloudalchemy.prometheus
|
||||||
|
- name: cloudalchemy.alertmanager
|
||||||
|
- name: cloudalchemy.grafana
|
||||||
|
- name: geerlingguy.gitlab
|
||||||
|
- name: geerlingguy.postgresql
|
||||||
|
- name: geerlingguy.php
|
||||||
|
- name: geerlingguy.nodejs
|
||||||
|
- name: geerlingguy.certbot
|
||||||
|
- name: riemers.gitlab-runner
|
||||||
|
|
||||||
|
- name: rockylinux.ipagetcert
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-ipa-getcert
|
||||||
|
version: main
|
||||||
|
- name: rockylinux.ipsilon
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-ipsilon
|
||||||
|
version: main
|
||||||
|
- name: rockylinux.kojihub
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-kojihub
|
||||||
|
version: main
|
||||||
|
- name: rockylinux.kojid
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-kojid
|
||||||
|
version: main
|
||||||
|
- name: rockylinux.rabbitmq
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-rabbitmq
|
||||||
|
version: main
|
||||||
|
- name: rockylinux.sigul
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-sigul
|
||||||
|
version: main
|
||||||
|
- name: rockylinux.matterbridge
|
||||||
|
src: https://github.com/NeilHanlon/ansible-role-matterbridge
|
||||||
|
version: master
|
||||||
|
- name: rockylinux.pinnwand
|
||||||
|
src: https://github.com/rocky-linux/ansible-role-pinnwand
|
||||||
|
version: main
|
||||||
|
- name: rockylinux.wikijs
|
||||||
|
src: https://git.rockylinux.org/infrastructure/public/ansible/ansible-role-wikijs.git
|
||||||
|
scm: git
|
||||||
|
version: develop
|
||||||
|
...
|
27
tasks/account_services.yml
Normal file
27
tasks/account_services.yml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
# Account Services
|
||||||
|
- name: Install packages
|
||||||
|
package:
|
||||||
|
name:
|
||||||
|
- httpd
|
||||||
|
- mod_ssl
|
||||||
|
- python3
|
||||||
|
- python3-setuptools
|
||||||
|
- python3-kdcproxy
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Deploy relevant httpd configuration
|
||||||
|
template:
|
||||||
|
src: "etc/httpd/conf.d/id.conf.j2"
|
||||||
|
dest: "/etc/httpd/conf.d/id.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
notify: restart_httpd
|
||||||
|
|
||||||
|
- name: Enable and start
|
||||||
|
systemd:
|
||||||
|
name: httpd
|
||||||
|
state: running
|
||||||
|
enabled: true
|
||||||
|
...
|
36
tasks/auditd.yml
Normal file
36
tasks/auditd.yml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure auditd is installed
|
||||||
|
package:
|
||||||
|
name: audit
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Ensure auditd is enabled
|
||||||
|
service:
|
||||||
|
name: auditd
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: Ensure auditd buffer is OK
|
||||||
|
replace:
|
||||||
|
path: /etc/audit/rules.d/audit.rules
|
||||||
|
regexp: '-b \d+'
|
||||||
|
replace: '-b {{ audit_buffer }}'
|
||||||
|
notify:
|
||||||
|
- regenerate_auditd_rules
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Ensure collection audit rules are available
|
||||||
|
template:
|
||||||
|
src: "etc/audit/rules.d/collection.rules.j2"
|
||||||
|
dest: "/etc/audit/rules.d/collection.rules"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0600'
|
||||||
|
backup: true
|
||||||
|
notify:
|
||||||
|
- regenerate_auditd_rules
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
...
|
55
tasks/authentication.yml
Normal file
55
tasks/authentication.yml
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
---
|
||||||
|
# Configures PAM and SSSD post-ipa client installation. It is recommended that
|
||||||
|
# that we use a custom authselect profile and build it out from there.
|
||||||
|
- name: Enterprise Linux 8+ PAM Configuration
|
||||||
|
block:
|
||||||
|
- name: Ensure Custom Profile is removed
|
||||||
|
file:
|
||||||
|
path: /etc/authselect/custom/sssd-rocky
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Create custom authselect profile based on sssd
|
||||||
|
command: >
|
||||||
|
/usr/bin/authselect create-profile sssd-rocky
|
||||||
|
--base-on sssd
|
||||||
|
--symlink-dconf
|
||||||
|
--symlink-meta
|
||||||
|
--symlink=postlogin
|
||||||
|
--symlink=smartcard-auth
|
||||||
|
--symlink=fingerprint-auth
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Override system-auth and password-auth
|
||||||
|
copy:
|
||||||
|
src: "etc/authselect/custom/sssd-rocky/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-system-auth"
|
||||||
|
dest: "{{ item }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
loop:
|
||||||
|
- /etc/authselect/custom/sssd-rocky/system-auth
|
||||||
|
- /etc/authselect/custom/sssd-rocky/password-auth
|
||||||
|
|
||||||
|
- name: Select New Profile
|
||||||
|
command: >
|
||||||
|
/usr/bin/authselect select custom/sssd-rocky
|
||||||
|
without-nullok
|
||||||
|
with-faillock
|
||||||
|
with-mkhomedir
|
||||||
|
with-sudo
|
||||||
|
--force
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Apply new settings
|
||||||
|
command: /usr/bin/authselect apply-changes
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Enable oddjobd
|
||||||
|
service:
|
||||||
|
name: oddjobd
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
when:
|
||||||
|
- ansible_facts['os_family'] == 'RedHat'
|
||||||
|
- ansible_facts['distribution_major_version']|int >= '8'
|
||||||
|
...
|
55
tasks/bugzilla.yml
Normal file
55
tasks/bugzilla.yml
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
---
|
||||||
|
# Configure Bugzilla
|
||||||
|
- name: Configure SELinux booleans
|
||||||
|
ansible.posix.seboolean:
|
||||||
|
name: "{{ item }}"
|
||||||
|
persistent: true
|
||||||
|
state: true
|
||||||
|
with_items:
|
||||||
|
- httpd_can_network_connect_db
|
||||||
|
- httpd_can_network_connect
|
||||||
|
- httpd_can_sendmail
|
||||||
|
|
||||||
|
- name: Install necessary packages
|
||||||
|
yum:
|
||||||
|
name: "{{ bugzilla_pkg }}"
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- packages
|
||||||
|
|
||||||
|
- name: Download the bugtracker
|
||||||
|
get_url:
|
||||||
|
url: "https://ftp.mozilla.org/pub/mozilla.org/webtools/bugzilla-{{ bugzilla_version }}.tar.gz"
|
||||||
|
dest: "/tmp/bugzilla-{{ bugzilla_version }}.tar.gz"
|
||||||
|
checksum: "{{ bugzilla_checksum }}"
|
||||||
|
|
||||||
|
- name: Create initial directory
|
||||||
|
file:
|
||||||
|
path: "{{ bugzilla_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: '0750'
|
||||||
|
owner: root
|
||||||
|
group: apache
|
||||||
|
|
||||||
|
- name: Extract bugzilla
|
||||||
|
unarchive:
|
||||||
|
src: "/tmp/bugzilla-{{ bugzilla_version }}.tar.gz"
|
||||||
|
dest: "{{ bugzilla_dir }}"
|
||||||
|
owner: root
|
||||||
|
group: apache
|
||||||
|
mode: '0640'
|
||||||
|
remote_src: true
|
||||||
|
extra_opts:
|
||||||
|
- '--strip-components=1'
|
||||||
|
|
||||||
|
- name: Configure httpd
|
||||||
|
template:
|
||||||
|
src: "etc/httpd/conf.d/bugzilla.conf.j2"
|
||||||
|
dest: "/etc/httpd/conf.d/bugzilla.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Install necessary pieces
|
||||||
|
import_tasks: bugzilla_install.yml
|
||||||
|
...
|
60
tasks/bugzilla_install.yml
Normal file
60
tasks/bugzilla_install.yml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
# Install bugzilla properly, including modules and stuff
|
||||||
|
|
||||||
|
- name: Check for a localconfig file
|
||||||
|
stat:
|
||||||
|
path: "{{ bugzilla_dir }}/localconfig"
|
||||||
|
register: conf_result
|
||||||
|
|
||||||
|
- name: Deploy answer file
|
||||||
|
template:
|
||||||
|
src: "var/www/bugzilla/answer"
|
||||||
|
dest: "{{ bugzilla_dir }}/answer"
|
||||||
|
owner: root
|
||||||
|
group: apache
|
||||||
|
mode: "0640"
|
||||||
|
when: not conf_result.stat.exists
|
||||||
|
|
||||||
|
- name: Run checksetup.pl
|
||||||
|
shell: "set -o pipefail && /usr/bin/perl checksetup.pl {{ bugzilla_dir }}/answer"
|
||||||
|
args:
|
||||||
|
chdir: "{{ bugzilla_dir }}"
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
when: not conf_result.stat.exists
|
||||||
|
|
||||||
|
- name: Deploy proper configuration
|
||||||
|
template:
|
||||||
|
src: "var/www/bugzilla/localconfig.j2"
|
||||||
|
dest: "{{ bugzilla_dir }}/localconfig"
|
||||||
|
owner: root
|
||||||
|
group: apache
|
||||||
|
mode: '0640'
|
||||||
|
|
||||||
|
- name: Install the proper modules
|
||||||
|
shell: "set -o pipefail && /usr/bin/perl install-module.pl {{ item }}"
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
args:
|
||||||
|
chdir: "{{ bugzilla_dir }}"
|
||||||
|
with_items:
|
||||||
|
- 'Net::SAML2'
|
||||||
|
- 'Template'
|
||||||
|
- 'Template::Plugin::GD::Image'
|
||||||
|
- 'HTML::FormatText::WithLinks'
|
||||||
|
- 'PatchReader'
|
||||||
|
- 'Crypt::OpenSSL::Verify'
|
||||||
|
- 'Crypt::OpenSSL::RSA'
|
||||||
|
- 'JSON::RPC'
|
||||||
|
- 'XML::Twig'
|
||||||
|
- 'Test::Taint'
|
||||||
|
|
||||||
|
- name: Re-run checksetup.pl
|
||||||
|
shell: "set -o pipefail && /usr/bin/perl checksetup.pl"
|
||||||
|
args:
|
||||||
|
chdir: "{{ bugzilla_dir }}"
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
|
||||||
|
- name: Remove answer file
|
||||||
|
file:
|
||||||
|
path: "{{ bugzilla_dir }}/answer"
|
||||||
|
state: absent
|
||||||
|
...
|
33
tasks/chrony.yml
Normal file
33
tasks/chrony.yml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
- name: Create overrides if we're an IPA Replica
|
||||||
|
include_vars: "{{ item }}"
|
||||||
|
with_first_found:
|
||||||
|
- "chronyserver.yml"
|
||||||
|
when: "'chronyservers' in group_names"
|
||||||
|
|
||||||
|
- name: Install chrony packages
|
||||||
|
yum:
|
||||||
|
name: "{{ chrony_packages }}"
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Fix permissions for chrony home directory
|
||||||
|
file:
|
||||||
|
path: "{{ chrony_homedir }}"
|
||||||
|
mode: 0750
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Deploy configuration
|
||||||
|
template:
|
||||||
|
src: chrony.conf.j2
|
||||||
|
dest: "{{ chrony_config_file }}"
|
||||||
|
owner: "{{ chrony_owner }}"
|
||||||
|
group: "{{ chrony_group }}"
|
||||||
|
mode: "{{ chrony_mode }}"
|
||||||
|
notify: "chrony service restart"
|
||||||
|
|
||||||
|
- name: Manage the state of service
|
||||||
|
systemd:
|
||||||
|
name: "{{ chrony_service_name }}"
|
||||||
|
state: "{{ chrony_service_state }}"
|
||||||
|
enabled: "{{ chrony_service_enabled }}"
|
||||||
|
...
|
45
tasks/efs_mount.yml
Normal file
45
tasks/efs_mount.yml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
# Requires amazon-efs-utils; included, but should probably be split out?
|
||||||
|
#
|
||||||
|
|
||||||
|
- name: "Installing amazon-efs-utils"
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
yum:
|
||||||
|
name: 'https://kojidev.rockylinux.org/kojifiles/packages/amazon-efs-utils/1.31.3/1.5c58a2f.el8/noarch/amazon-efs-utils-1.31.3-1.5c58a2f.el8.noarch.rpm'
|
||||||
|
disable_gpg_check: true
|
||||||
|
validate_certs: true
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- amazon_efs_utils
|
||||||
|
- packages
|
||||||
|
- mounts
|
||||||
|
|
||||||
|
- name: "Gathering ec2 facts"
|
||||||
|
amazon.aws.ec2_metadata_facts:
|
||||||
|
tags:
|
||||||
|
- mounts
|
||||||
|
|
||||||
|
# "you can use /etc/hosts" https://github.com/aws/efs-utils/issues/1
|
||||||
|
- name: "Install custom hosts file because fmlC-w amazon said so."
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/hosts
|
||||||
|
line: "{{ item.ip_map[ansible_ec2_placement_availability_zone] }} {{ item.fsid }}.efs.{{ ansible_ec2_placement_region }}.amazonaws.com"
|
||||||
|
create: true
|
||||||
|
tags:
|
||||||
|
- mounts
|
||||||
|
|
||||||
|
- name: "Creating and mounting {{ item.fsid }} at {{ item.mount_point }}"
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
ansible.posix.mount:
|
||||||
|
path: "{{ item.mount_point }}"
|
||||||
|
src: "{{ item.fsid }}:/"
|
||||||
|
fstype: "{{ item.fstype }}"
|
||||||
|
opts: "{{ item.fsopts | join(',') }}"
|
||||||
|
state: "{{ item.state | default('mounted') }}"
|
||||||
|
tags:
|
||||||
|
- mounts
|
||||||
|
...
|
5
tasks/grub.yml
Normal file
5
tasks/grub.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Add kernel boot options to all kernels and default config
|
||||||
|
command: /usr/sbin/grubby --update-kernel=ALL --args "{{ grub_boot_options }}"
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
...
|
217
tasks/harden.yml
Normal file
217
tasks/harden.yml
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
---
|
||||||
|
# Initial hardening ideas from CIS
|
||||||
|
- name: sysctl hardening and limits
|
||||||
|
block:
|
||||||
|
- name: create combined sysctl-dict if overwrites are defined
|
||||||
|
set_fact:
|
||||||
|
sysctl_config: '{{ sysctl_config | combine(sysctl_overwrite) }}'
|
||||||
|
when: sysctl_overwrite | default()
|
||||||
|
|
||||||
|
- name: Kernel parameters
|
||||||
|
sysctl:
|
||||||
|
name: "{{ item.key }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
state: present
|
||||||
|
ignoreerrors: true
|
||||||
|
sysctl_set: true
|
||||||
|
sysctl_file: /etc/sysctl.d/99-ansible.conf
|
||||||
|
with_dict: "{{ sysctl_config }}"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
- kernel
|
||||||
|
|
||||||
|
- name: Security limits
|
||||||
|
pam_limits:
|
||||||
|
dest: "/etc/security/limits.d/cis.conf"
|
||||||
|
domain: "{{ item.domain }}"
|
||||||
|
limit_type: "{{ item.limit_type }}"
|
||||||
|
limit_item: "{{ item.limit_item }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
with_items: "{{ limits }}"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Standard login settings
|
||||||
|
block:
|
||||||
|
- name: useradd defaults
|
||||||
|
lineinfile:
|
||||||
|
line: "INACTIVE=30"
|
||||||
|
regexp: "^INACTIVE=.*"
|
||||||
|
path: "/etc/login.defs"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: login defs maximum days
|
||||||
|
replace:
|
||||||
|
path: /etc/login.defs
|
||||||
|
regexp: '(PASS_MAX_DAYS).*\d+'
|
||||||
|
replace: '\1\t{{ login_max_days }}'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: login defs minimum days
|
||||||
|
replace:
|
||||||
|
path: /etc/login.defs
|
||||||
|
regexp: '(PASS_MIN_DAYS).*\d+'
|
||||||
|
replace: '\1\t{{ login_min_days }}'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: login defs minimum length
|
||||||
|
replace:
|
||||||
|
path: /etc/login.defs
|
||||||
|
regexp: '(PASS_MIN_LEN).*\d+'
|
||||||
|
replace: '\1\t{{ login_min_len }}'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: login defs warn age
|
||||||
|
replace:
|
||||||
|
path: /etc/login.defs
|
||||||
|
regexp: '(PASS_WARN_AGE).*\d+'
|
||||||
|
replace: '\1\t{{ login_warn_age }}'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: cron directories permissions
|
||||||
|
file:
|
||||||
|
path: '{{ item }}'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0700'
|
||||||
|
state: directory
|
||||||
|
loop: '{{ login_cron_directories }}'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Create cron/at allows
|
||||||
|
file:
|
||||||
|
path: '{{ item }}'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0600'
|
||||||
|
state: touch
|
||||||
|
loop: '{{ login_cron_allows }}'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Remove cron/at denies
|
||||||
|
file:
|
||||||
|
path: '{{ item }}'
|
||||||
|
state: absent
|
||||||
|
loop: '{{ login_cron_denies }}'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
# TODO: Use pamd module to establish password policy
|
||||||
|
- name: pwquality - minlen
|
||||||
|
lineinfile:
|
||||||
|
line: "minlen = 14"
|
||||||
|
regexp: "^# minlen =.*"
|
||||||
|
path: "/etc/security/pwquality.conf"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: pwquality - dcredit
|
||||||
|
lineinfile:
|
||||||
|
line: "dcredit = -1"
|
||||||
|
regexp: "^# dcredit =.*"
|
||||||
|
path: "/etc/security/pwquality.conf"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: pwquality - ucredit
|
||||||
|
lineinfile:
|
||||||
|
line: "ucredit = -1"
|
||||||
|
regexp: "^# ucredit =.*"
|
||||||
|
path: "/etc/security/pwquality.conf"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: pwquality - lcredit
|
||||||
|
lineinfile:
|
||||||
|
line: "lcredit = -1"
|
||||||
|
regexp: "^# lcredit =.*"
|
||||||
|
path: "/etc/security/pwquality.conf"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: pwquality - ocredit
|
||||||
|
lineinfile:
|
||||||
|
line: "ocredit = -1"
|
||||||
|
regexp: "^# ocredit =.*"
|
||||||
|
path: "/etc/security/pwquality.conf"
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Remove packages not allowed by CIS
|
||||||
|
package:
|
||||||
|
name: "{{ remove_packages }}"
|
||||||
|
state: absent
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Disable Services
|
||||||
|
service:
|
||||||
|
name: "{{ item }}"
|
||||||
|
enabled: false
|
||||||
|
state: stopped
|
||||||
|
loop: "{{ disable_svc }}"
|
||||||
|
register: service_check
|
||||||
|
failed_when: service_check is failed and not 'Could not find the requested service' in service_check.msg
|
||||||
|
tags:
|
||||||
|
- services
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: modprobe settings
|
||||||
|
block:
|
||||||
|
- name: remove vfat from filesystem list if we are EFI
|
||||||
|
set_fact:
|
||||||
|
modprobe_unused_filesystems: "{{ modprobe_unused_filesystems | difference('vfat') }}"
|
||||||
|
when:
|
||||||
|
- efi_installed.stat.isdir is defined
|
||||||
|
- efi_installed.stat.isdir
|
||||||
|
tags:
|
||||||
|
- efi
|
||||||
|
|
||||||
|
- name: disable unused filesystems
|
||||||
|
template:
|
||||||
|
src: "etc/modprobe.d/cis.conf.j2"
|
||||||
|
dest: "/etc/modprobe.d/cis.conf"
|
||||||
|
owner: 'root'
|
||||||
|
group: 'root'
|
||||||
|
mode: '0644'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Set init umask
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/sysconfig/init
|
||||||
|
state: present
|
||||||
|
regexp: ^umask
|
||||||
|
line: "umask 027"
|
||||||
|
create: true
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
when: ansible_distribution_major_version == '7'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: CIS sudoers configuration
|
||||||
|
copy:
|
||||||
|
src: "etc/sudoers.d/cis"
|
||||||
|
dest: "/etc/sudoers.d/cis"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0440'
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
|
||||||
|
- name: Remove packages not allowed by CIS
|
||||||
|
package:
|
||||||
|
name: "{{ remove_packages }}"
|
||||||
|
state: absent
|
||||||
|
tags:
|
||||||
|
- harden
|
||||||
|
...
|
4
tasks/main.yml
Normal file
4
tasks/main.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
# No tasks
|
||||||
|
- debug: msg="No tasks are provided here. Please import the task as needed in your playbook."
|
||||||
|
...
|
100
tasks/mantis.yml
Normal file
100
tasks/mantis.yml
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
---
|
||||||
|
# Configure mantis
|
||||||
|
- name: Configure SELinux booleans
|
||||||
|
ansible.posix.seboolean:
|
||||||
|
name: "{{ item }}"
|
||||||
|
persistent: true
|
||||||
|
state: true
|
||||||
|
with_items:
|
||||||
|
- httpd_can_network_connect_db
|
||||||
|
- httpd_can_network_connect
|
||||||
|
- httpd_can_sendmail
|
||||||
|
|
||||||
|
- name: Install necessary packages
|
||||||
|
yum:
|
||||||
|
name: "{{ mantis_pkg }}"
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- packages
|
||||||
|
|
||||||
|
- name: Download the bugtracker
|
||||||
|
get_url:
|
||||||
|
url: "http://downloads.sourceforge.net/mantisbt/mantisbt-{{ mantis_version }}.tar.gz"
|
||||||
|
dest: "/tmp/mantisbt-{{ mantis_version }}.tar.gz"
|
||||||
|
checksum: "{{ mantis_checksum }}"
|
||||||
|
|
||||||
|
- name: Extract mantis
|
||||||
|
unarchive:
|
||||||
|
src: "/tmp/mantisbt-{{ mantis_version }}.tar.gz"
|
||||||
|
dest: "/var/www"
|
||||||
|
owner: apache
|
||||||
|
group: apache
|
||||||
|
mode: '0644'
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Generate crypto salt
|
||||||
|
shell: "set -o pipefail && cat /dev/urandom | head -c 64 | base64 --wrap=0"
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
register: cryptosalt_string
|
||||||
|
|
||||||
|
- name: Configure mantis
|
||||||
|
template:
|
||||||
|
src: "var/www/mantis/config/config_inc.php.j2"
|
||||||
|
dest: "/var/www/mantisbt-{{ mantis_version }}/config/config_inc.php"
|
||||||
|
owner: apache
|
||||||
|
group: apache
|
||||||
|
mode: '0640'
|
||||||
|
|
||||||
|
- name: Deploy plugins from Mantis GitHub
|
||||||
|
git:
|
||||||
|
repo: "https://github.com/mantisbt-plugins/{{ item }}.git"
|
||||||
|
dest: "/var/www/mantisbt-{{ mantis_version }}/plugins/{{ item }}"
|
||||||
|
update: true
|
||||||
|
version: master
|
||||||
|
with_items:
|
||||||
|
- Snippets
|
||||||
|
|
||||||
|
- name: Deploy custom libravatar plugin
|
||||||
|
git:
|
||||||
|
repo: "https://github.com/nazunalika/mantisbt-libravatar.git"
|
||||||
|
dest: "/var/www/mantisbt-{{ mantis_version }}/plugins/Libravatar"
|
||||||
|
update: true
|
||||||
|
version: main
|
||||||
|
|
||||||
|
- name: Deploy custom mattermost plugin
|
||||||
|
git:
|
||||||
|
repo: "https://github.com/nazunalika/mantisbt-mattermost.git"
|
||||||
|
dest: "/var/www/mantisbt-{{ mantis_version }}/plugins/Mattermost"
|
||||||
|
update: true
|
||||||
|
version: main
|
||||||
|
|
||||||
|
- name: Configure httpd
|
||||||
|
template:
|
||||||
|
src: "etc/httpd/conf.d/mantis.conf.j2"
|
||||||
|
dest: "/etc/httpd/conf.d/mantis.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Database import template
|
||||||
|
template:
|
||||||
|
src: "tmp/mantis_import.sql.j2"
|
||||||
|
dest: "/tmp/mantis_import.sql.j2"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0600'
|
||||||
|
|
||||||
|
# We will need to generate this
|
||||||
|
# name: Import database if required
|
||||||
|
# community.general.postgresql_db:
|
||||||
|
# name: "{{ mantis_db_name }}"
|
||||||
|
# target: /tmp/mantis_import.sql
|
||||||
|
# owner: "{{ mantis_db_user }}"
|
||||||
|
# state: restore
|
||||||
|
# login_host: "{{ mantis_db_host }}"
|
||||||
|
# login_user: "{{ mantis_db_user }}"
|
||||||
|
# login_password: "{{ mantis_db_pass }}"
|
||||||
|
|
||||||
|
- name: Patch up some pages
|
||||||
|
import_tasks: mantispatch.yml
|
||||||
|
...
|
26
tasks/mantispatch.yml
Normal file
26
tasks/mantispatch.yml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
# Patch up various pieces of mantis to customize it. We do not rely on local
|
||||||
|
# bug tracker accounts. We are doing regex instead of just replacing the
|
||||||
|
# file as a whole. Should make it easier to deal with upgrades in theory.
|
||||||
|
- name: Change signup_page.php to Account Services
|
||||||
|
replace:
|
||||||
|
path: "/var/www/mantisbt-{{ mantis_version }}/{{ item }}"
|
||||||
|
regexp: 'signup_page.php'
|
||||||
|
replace: 'https://accounts.rockylinux.org'
|
||||||
|
with_items:
|
||||||
|
- core/print_api.php
|
||||||
|
- lost_pwd_page.php
|
||||||
|
- login_page.php
|
||||||
|
|
||||||
|
- name: Change special signup_page.php reference
|
||||||
|
replace:
|
||||||
|
path: "/var/www/mantisbt-{{ mantis_version }}/core/layout_api.php"
|
||||||
|
regexp: "' . helper_mantis_url( 'signup_page.php' ) . '"
|
||||||
|
replace: 'https://accounts.rockylinux.org'
|
||||||
|
|
||||||
|
- name: Remove LDAP from checks for signup button
|
||||||
|
lineinfile:
|
||||||
|
path: "/var/www/mantisbt-{{ mantis_version }}/login_page.php"
|
||||||
|
state: absent
|
||||||
|
regex: 'LDAP != config_get_global'
|
||||||
|
...
|
68
tasks/mirrormanager.yml
Normal file
68
tasks/mirrormanager.yml
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
---
|
||||||
|
# Mirrormanager tasks
|
||||||
|
- name: Configure SELinux booleans
|
||||||
|
become: true
|
||||||
|
ansible.posix.seboolean:
|
||||||
|
name: "{{ item }}"
|
||||||
|
persistent: true
|
||||||
|
state: true
|
||||||
|
with_items:
|
||||||
|
- httpd_can_network_connect_db
|
||||||
|
- httpd_can_network_connect
|
||||||
|
|
||||||
|
- name: Create mirrormanager group
|
||||||
|
become: true
|
||||||
|
ansible.builtin.group:
|
||||||
|
name: "{{ (mirrormanager_user | default({})).group }}"
|
||||||
|
gid: "{{ (mirrormanager_user | default({})).gid | default(omit) }}"
|
||||||
|
system: "{{ (mirrormanager_user | default({})).system | default('yes') }}"
|
||||||
|
when: (mirrormanager_user | default({})).group is defined
|
||||||
|
|
||||||
|
- name: Create mirrormanager user
|
||||||
|
become: true
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ (mirrormanager_user | default({})).name | default(_wiki_defaultusr) }}"
|
||||||
|
comment: "{{ (mirrormanager_user | default({})).comment | default(omit) }}"
|
||||||
|
uid: "{{ (mirrormanager_user | default({})).uid | default(omit) }}"
|
||||||
|
group: "{{ (mirrormanager_user | default({})).group | default(omit) }}"
|
||||||
|
groups: "{{ (mirrormanager_user | default({})).groups | default(omit) }}"
|
||||||
|
home: "{{ (mirrormanager_user | default({})).home | default(mirrormanager_dir) }}"
|
||||||
|
create_home: "{{ (mirrormanager_user | default({})).create_home | default('no') }}"
|
||||||
|
shell: "{{ (mirrormanager_user | default({})).shell | default(omit) }}"
|
||||||
|
system: "{{ (mirrormanager_user | default({})).system | default('no') }}"
|
||||||
|
|
||||||
|
- name: Create webroot directory
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: "{{ mirrormanager_dir }}"
|
||||||
|
state: directory
|
||||||
|
group: "{{ mirrormanager_user.group }}"
|
||||||
|
owner: "{{ mirrormanager_user.name }}"
|
||||||
|
mode: "u=rwX,g=rX,o=rX" # 755 folders, 644 files
|
||||||
|
recurse: yes
|
||||||
|
seuser: system_u
|
||||||
|
serole: object_r
|
||||||
|
setype: httpd_sys_content_t
|
||||||
|
|
||||||
|
#- name: Checkout git repository at version
|
||||||
|
# become: true
|
||||||
|
# ansible.builtin.git:
|
||||||
|
# repo: "https://github.com/fedora-infra/mirrormanager2.git"
|
||||||
|
# dest: "{{ mirrormanager_dir }}/app"
|
||||||
|
# depth: 1
|
||||||
|
# version: "ee381257fcfef2eb38705d98f992d2ae8fb7bb8c"
|
||||||
|
# update: no
|
||||||
|
|
||||||
|
- name: Deploy MM2 config
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "opt/mirrormanager/mirrormanager2.cfg.j2"
|
||||||
|
dest: "{{ mirrormanager_dir }}/app/mirrormanager2.cfg"
|
||||||
|
group: "{{ mirrormanager_user.group }}"
|
||||||
|
owner: "{{ mirrormanager_user.name }}"
|
||||||
|
mode: 0700
|
||||||
|
seuser: system_u
|
||||||
|
serole: object_r
|
||||||
|
setype: httpd_sys_rw_content_t
|
||||||
|
tags:
|
||||||
|
- config
|
89
tasks/noggin.yml
Normal file
89
tasks/noggin.yml
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure python is installed
|
||||||
|
yum:
|
||||||
|
name:
|
||||||
|
- python3
|
||||||
|
- python3-pip
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Ensure noggin user exists
|
||||||
|
user:
|
||||||
|
name: noggin
|
||||||
|
comment: "Noggin FAS"
|
||||||
|
|
||||||
|
- name: Create noggin directory
|
||||||
|
file:
|
||||||
|
path: /opt/noggin
|
||||||
|
state: directory
|
||||||
|
mode: '0700'
|
||||||
|
owner: noggin
|
||||||
|
group: noggin
|
||||||
|
|
||||||
|
- name: Deploy noggin
|
||||||
|
git:
|
||||||
|
repo: https://github.com/fedora-infra/noggin.git
|
||||||
|
dest: /opt/noggin/noggin
|
||||||
|
update: true
|
||||||
|
version: main
|
||||||
|
become: true
|
||||||
|
become_user: noggin
|
||||||
|
|
||||||
|
- name: Noggin user must install poetry
|
||||||
|
pip:
|
||||||
|
name: poetry
|
||||||
|
executable: pip3
|
||||||
|
become: true
|
||||||
|
become_user: noggin
|
||||||
|
|
||||||
|
- name: Remove any pycache
|
||||||
|
file:
|
||||||
|
path: "/home/noggin/.cache/pypoetry"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Noggin installation
|
||||||
|
command: "/home/noggin/.local/bin/poetry install --no-dev --extras deploy"
|
||||||
|
become: true
|
||||||
|
become_user: noggin
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
args:
|
||||||
|
chdir: "/opt/noggin/noggin"
|
||||||
|
|
||||||
|
- name: Get the noggin poetry virtualenv
|
||||||
|
shell:
|
||||||
|
cmd: "poetry env list | awk '{print $1}'"
|
||||||
|
chdir: "/opt/noggin/noggin"
|
||||||
|
become: true
|
||||||
|
become_user: noggin
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
register: virtualenv_location
|
||||||
|
|
||||||
|
- name: Deploy start up script
|
||||||
|
template:
|
||||||
|
src: "opt/noggin/start_noggin.sh.j2"
|
||||||
|
dest: "/opt/noggin/start_noggin.sh"
|
||||||
|
mode: '0750'
|
||||||
|
user: noggin
|
||||||
|
group: noggin
|
||||||
|
|
||||||
|
- name: Deploy systemd unit
|
||||||
|
copy:
|
||||||
|
src: "etc/systemd/system/noggin.service"
|
||||||
|
dest: "/etc/systemd/system/noggin.service"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Deploy noggin configuration
|
||||||
|
template:
|
||||||
|
src: "opt/noggin/noggin.cfg.j2"
|
||||||
|
dest: "/opt/noggin/noggin.cfg"
|
||||||
|
owner: noggin
|
||||||
|
group: noggin
|
||||||
|
mode: '0600'
|
||||||
|
|
||||||
|
# The only way to run it properly, at least on EL8, is we need this line
|
||||||
|
- name: Add missing create_app call
|
||||||
|
lineinfile:
|
||||||
|
path: "/opt/noggin/noggin/noggin/app.py"
|
||||||
|
line: "app = create_app()"
|
||||||
|
...
|
192
tasks/openqa.yml
Normal file
192
tasks/openqa.yml
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
---
|
||||||
|
- name: Install OpenQA packages
|
||||||
|
yum:
|
||||||
|
name: "{{ openqa_packages }}"
|
||||||
|
state: present
|
||||||
|
tags:
|
||||||
|
- packages
|
||||||
|
|
||||||
|
- name: Copy httpd configuration files
|
||||||
|
copy:
|
||||||
|
remote_src: true
|
||||||
|
src: /etc/httpd/conf.d/{{ item }}.template
|
||||||
|
dest: /etc/httpd/conf.d/{{ item }}
|
||||||
|
mode: '0644'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
loop:
|
||||||
|
- openqa.conf
|
||||||
|
- openqa-ssl.conf
|
||||||
|
notify: restart_httpd
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Template OpenQA configuration files
|
||||||
|
template:
|
||||||
|
src: etc/openqa/{{ item }}.j2
|
||||||
|
dest: /etc/openqa/{{ item }}
|
||||||
|
owner: "{{ openqa_user }}"
|
||||||
|
group: "{{ openqa_group }}"
|
||||||
|
mode: "0444"
|
||||||
|
loop:
|
||||||
|
- openqa.ini
|
||||||
|
- client.conf
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Get service facts
|
||||||
|
service_facts:
|
||||||
|
|
||||||
|
- name: Check for non-empty postgres data directory
|
||||||
|
stat:
|
||||||
|
path: /var/lib/pgsql/data/base
|
||||||
|
register: postgres_data_dir
|
||||||
|
|
||||||
|
- name: If postgresql is not already running, initialize database
|
||||||
|
command: postgresql-setup --initdb
|
||||||
|
when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" )
|
||||||
|
and not postgres_data_dir.stat.exists
|
||||||
|
|
||||||
|
- name: Enable and start postgresql service
|
||||||
|
systemd:
|
||||||
|
name: postgresql
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" )
|
||||||
|
and not postgres_data_dir.stat.exists
|
||||||
|
|
||||||
|
- name: Configure SELinux to allow httpd connection to network
|
||||||
|
seboolean:
|
||||||
|
name: httpd_can_network_connect
|
||||||
|
state: true
|
||||||
|
persistent: true
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Enable and start OpenQA services
|
||||||
|
systemd:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
loop: "{{ openqa_services }}"
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Create openqa-vnc firewalld service
|
||||||
|
template:
|
||||||
|
src: etc/firewalld/services/openqa-vnc.xml.j2
|
||||||
|
dest: /etc/firewalld/services/openqa-vnc.xml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Load openqa-vnc firewalld service
|
||||||
|
systemd:
|
||||||
|
name: firewalld
|
||||||
|
state: reloaded
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Permit traffic for {{ item }} service
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: "{{ item }}"
|
||||||
|
permanent: true
|
||||||
|
state: enabled
|
||||||
|
loop:
|
||||||
|
- http
|
||||||
|
- openqa-vnc
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Reload FirewallD
|
||||||
|
systemd:
|
||||||
|
name: firewalld
|
||||||
|
state: reloaded
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Check for existing repository
|
||||||
|
stat:
|
||||||
|
path: "{{ openqa_homedir }}/share/tests/rocky"
|
||||||
|
register: rocky_testing_repo
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Clone repository if it does not already exist
|
||||||
|
git:
|
||||||
|
accept_hostkey: true
|
||||||
|
dest: "{{ openqa_homedir }}/share/tests/rocky"
|
||||||
|
repo: "{{ openqa_rocky_testing_repo }}"
|
||||||
|
version: develop
|
||||||
|
when: not rocky_testing_repo.stat.exists
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: Set owner/group/permissions on repo contents
|
||||||
|
file:
|
||||||
|
path: "{{ openqa_homedir }}/share/tests/rocky"
|
||||||
|
recurse: true
|
||||||
|
owner: "{{ openqa_user }}"
|
||||||
|
group: "{{ openqa_group }}"
|
||||||
|
mode: "u+rwX,g+rwX,o+rX,o-w"
|
||||||
|
tags:
|
||||||
|
- configure
|
||||||
|
|
||||||
|
# fifloader.py will fail if the Demo user is not logged in
|
||||||
|
- name: Authenticate to web UI the first time
|
||||||
|
uri:
|
||||||
|
url: "http://{{ openqa_host }}/login"
|
||||||
|
|
||||||
|
- name: Run fifloader.py
|
||||||
|
command: ./fifloader.py -l -c templates.fif.json templates-updates.fif.json
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
args:
|
||||||
|
chdir: "{{ openqa_homedir }}/share/tests/rocky"
|
||||||
|
|
||||||
|
- name: Create ISO directory
|
||||||
|
file:
|
||||||
|
path: "{{ openqa_homedir }}/share/factory/iso/fixed"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ openqa_user }}"
|
||||||
|
group: "{{ openqa_group }}"
|
||||||
|
mode: "0775"
|
||||||
|
tags:
|
||||||
|
- download_isos
|
||||||
|
|
||||||
|
- name: Download ISOs
|
||||||
|
get_url:
|
||||||
|
dest: "{{ openqa_homedir }}/share/factory/iso/fixed/{{ item.name }}"
|
||||||
|
url: "{{ rocky_iso_download_url }}/{{ item.name }}"
|
||||||
|
checksum: "{{ item.checksum }}"
|
||||||
|
owner: "{{ openqa_user }}"
|
||||||
|
group: "{{ openqa_group }}"
|
||||||
|
tmp_dest: "/var/tmp"
|
||||||
|
mode: "0644"
|
||||||
|
loop: "{{ openqa_isos }}"
|
||||||
|
tags:
|
||||||
|
- download_isos
|
||||||
|
|
||||||
|
- name: Start {{ openqa_worker_count }} OpenQA workers
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: "openqa-worker@{{ item }}"
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
# range 'end' parameter is exclusive, so add 1
|
||||||
|
loop: "{{ range(1, (openqa_worker_count|int + 1)) | list }}"
|
||||||
|
tags:
|
||||||
|
- start_workers
|
||||||
|
- configure
|
||||||
|
|
||||||
|
- name: POST a job
|
||||||
|
command: |
|
||||||
|
openqa-cli api -X POST isos \
|
||||||
|
ISO=Rocky-{{ rocky_version }}-{{ rocky_arch }}-minimal.iso \
|
||||||
|
ARCH={{ rocky_arch }} \
|
||||||
|
DISTRI=rocky \
|
||||||
|
FLAVOR=minimal-iso \
|
||||||
|
VERSION={{ rocky_version }} \
|
||||||
|
BUILD="{{ '%Y%m%d.%H%M%S' | strftime }}.0"
|
||||||
|
changed_when: "1 != 1"
|
||||||
|
...
|
38
tasks/postfix_relay.yml
Normal file
38
tasks/postfix_relay.yml
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
# Configure relay
|
||||||
|
- name: Ensure postfix is installed
|
||||||
|
yum:
|
||||||
|
name:
|
||||||
|
- postfix
|
||||||
|
- cyrus-sasl-plain
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Add password map
|
||||||
|
template:
|
||||||
|
src: etc/postfix/sasl_passwd.j2
|
||||||
|
dest: /etc/postfix/sasl_passwd
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0600'
|
||||||
|
notify: rehash_postfix_sasl
|
||||||
|
|
||||||
|
- name: Add relay information to postfix
|
||||||
|
blockinfile:
|
||||||
|
path: /etc/postfix/main.cf
|
||||||
|
marker: "## ANSIBLE MANAGED ##"
|
||||||
|
block: |
|
||||||
|
smtp_tls_note_starttls_offer = yes
|
||||||
|
relayhost = [{{ smtp_relayhost }}]:587
|
||||||
|
smtp_use_tls = yes
|
||||||
|
smtp_sasl_auth_enable = yes
|
||||||
|
smtp_sasl_security_options =
|
||||||
|
smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd
|
||||||
|
smtp_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt
|
||||||
|
notify: restart_postfix
|
||||||
|
|
||||||
|
- name: Ensure postfix is running and enabled
|
||||||
|
service:
|
||||||
|
name: postfix
|
||||||
|
state: restarted
|
||||||
|
enabled: true
|
||||||
|
...
|
3
tasks/repository.yml
Normal file
3
tasks/repository.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# no tasks yet
|
||||||
|
...
|
18
tasks/scripts.yml
Normal file
18
tasks/scripts.yml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
# Common scripts that rocky uses on nodes
|
||||||
|
- name: Lock Wrapper script
|
||||||
|
copy:
|
||||||
|
src: "usr/local/bin/lock-wrapper"
|
||||||
|
dest: "/usr/local/bin/lock-wrapper"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: dmidecode pretty script
|
||||||
|
copy:
|
||||||
|
src: "usr/local/bin/dmidecode-pretty"
|
||||||
|
dest: "/usr/local/bin/dmidecode-pretty"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
...
|
10
tasks/srpmproc.yml
Normal file
10
tasks/srpmproc.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: Configure SELinux booleans
|
||||||
|
ansible.posix.seboolean:
|
||||||
|
name: "{{ item }}"
|
||||||
|
persistent: true
|
||||||
|
state: true
|
||||||
|
with_items:
|
||||||
|
- httpd_can_network_connect_db
|
||||||
|
- httpd_can_network_connect
|
||||||
|
...
|
46
tasks/ssh_config.yml
Normal file
46
tasks/ssh_config.yml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure SSH server is installed
|
||||||
|
package:
|
||||||
|
name: openssh-server
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Ensure SSH daemon is enabled
|
||||||
|
service:
|
||||||
|
name: sshd
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
# TODO: Prepare for /etc/ssh/sshd_config.d/* style of configuration
|
||||||
|
- name: SSH daemon configuration - global
|
||||||
|
block:
|
||||||
|
- name: SSH daemon configuration - base
|
||||||
|
template:
|
||||||
|
src: "etc/ssh/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-sshd_config.j2"
|
||||||
|
dest: "/etc/ssh/sshd_config"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0600'
|
||||||
|
validate: /usr/sbin/sshd -t -f %s
|
||||||
|
backup: true
|
||||||
|
notify: restart_sshd
|
||||||
|
rescue:
|
||||||
|
- name: Print errors for configuration and validation
|
||||||
|
debug:
|
||||||
|
msg: "Error in SSH daemon configuration or template"
|
||||||
|
|
||||||
|
- name: SSH banner
|
||||||
|
copy:
|
||||||
|
src: "etc/rockybanner"
|
||||||
|
dest: "/etc/rockybanner"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
notify: restart_sshd
|
||||||
|
|
||||||
|
- name: Remove DSA keys
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- /etc/ssh/ssh_host_dsa_key.pub
|
||||||
|
- /etc/ssh/ssh_host_dsa_key
|
||||||
|
...
|
22
tasks/variable_loader_common.yml
Normal file
22
tasks/variable_loader_common.yml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
- name: Standard System Configuration Variables
|
||||||
|
block:
|
||||||
|
- name: Loading Variables from OS Common
|
||||||
|
include_vars: "{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- "{{ ansible_distribution }}.yml"
|
||||||
|
|
||||||
|
- name: Create overrides if we're an IPA Replica
|
||||||
|
include_vars: "{{ item }}"
|
||||||
|
with_first_found:
|
||||||
|
- "ipaserver.yml"
|
||||||
|
when: "'ipaserver' in group_names"
|
||||||
|
|
||||||
|
- name: Check if system is EFI
|
||||||
|
stat:
|
||||||
|
path: "/sys/firmware/efi"
|
||||||
|
register: efi_installed
|
||||||
|
|
||||||
|
always:
|
||||||
|
- debug: msg="Variables are now loaded"
|
||||||
|
...
|
1
templates/README.md
Normal file
1
templates/README.md
Normal file
@ -0,0 +1 @@
|
|||||||
|
Templates go here
|
37
templates/etc/httpd/conf.d/bugzilla.conf.j2
Normal file
37
templates/etc/httpd/conf.d/bugzilla.conf.j2
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
<VirtualHost *:80>
|
||||||
|
ServerAdmin infrastructure@rockylinux.org
|
||||||
|
DocumentRoot "{{ bugzilla_dir }}"
|
||||||
|
ServerName bugs.rockylinux.org
|
||||||
|
TransferLog /var/log/httpd/bugzilla_access.log
|
||||||
|
ErrorLog /var/log/httpd/bugzilla_error.log
|
||||||
|
<Directory "{{ bugzilla_dir }}/">
|
||||||
|
AddHandler cgi-script .cgi
|
||||||
|
DirectoryIndex index.cgi
|
||||||
|
Options MultiViews FollowSymlinks ExecCGI FollowSymLinks
|
||||||
|
AllowOverride All
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
<VirtualHost *:443>
|
||||||
|
SSLEngine on
|
||||||
|
SSLHonorCipherOrder on
|
||||||
|
SSLCipherSuite PROFILE=SYSTEM
|
||||||
|
SSLProxyCipherSuite PROFILE=SYSTEM
|
||||||
|
SSLCertificateFile /etc/pki/tls/certs/bugs.rockylinux.org.crt
|
||||||
|
SSLCertificateKeyFile /etc/pki/tls/private/bugs.rockylinux.org.key
|
||||||
|
ServerAdmin infrastructure@rockylinux.org
|
||||||
|
DocumentRoot "{{ bugzilla_dir }}"
|
||||||
|
ServerName bugs.rockylinux.org
|
||||||
|
TransferLog /var/log/httpd/bugzilla_access.log
|
||||||
|
ErrorLog /var/log/httpd/bugzilla_error.log
|
||||||
|
<Directory "{{ bugzilla_dir }}/">
|
||||||
|
AddHandler cgi-script .cgi
|
||||||
|
DirectoryIndex index.cgi
|
||||||
|
Options MultiViews FollowSymlinks ExecCGI FollowSymLinks
|
||||||
|
AllowOverride All
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
33
templates/etc/httpd/conf.d/mantis.conf.j2
Normal file
33
templates/etc/httpd/conf.d/mantis.conf.j2
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
<VirtualHost *:80>
|
||||||
|
ServerAdmin infrastructure@rockylinux.org
|
||||||
|
DocumentRoot "/var/www/mantisbt-{{ mantis_version }}"
|
||||||
|
ServerName bugs.rockylinux.org
|
||||||
|
TransferLog /var/log/httpd/mantis_access.log
|
||||||
|
ErrorLog /var/log/httpd/mantis_error.log
|
||||||
|
<Directory "/var/www/mantisbt-{{ mantis_version }}/">
|
||||||
|
Options MultiViews FollowSymlinks
|
||||||
|
AllowOverride All
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
<VirtualHost *:443>
|
||||||
|
SSLEngine on
|
||||||
|
SSLHonorCipherOrder on
|
||||||
|
SSLCipherSuite PROFILE=SYSTEM
|
||||||
|
SSLProxyCipherSuite PROFILE=SYSTEM
|
||||||
|
SSLCertificateFile /etc/pki/tls/certs/bugs.rockylinux.org.crt
|
||||||
|
SSLCertificateKeyFile /etc/pki/tls/private/bugs.rockylinux.org.key
|
||||||
|
ServerAdmin infrastructure@rockylinux.org
|
||||||
|
DocumentRoot "/var/www/mantisbt-{{ mantis_version }}"
|
||||||
|
ServerName bugs.rockylinux.org
|
||||||
|
TransferLog /var/log/httpd/mantis_access.log
|
||||||
|
ErrorLog /var/log/httpd/mantis_error.log
|
||||||
|
<Directory "/var/www/mantisbt-{{ mantis_version }}/">
|
||||||
|
Options MultiViews FollowSymlinks
|
||||||
|
AllowOverride All
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
1
templates/etc/postfix/sasl_passwd.j2
Normal file
1
templates/etc/postfix/sasl_passwd.j2
Normal file
@ -0,0 +1 @@
|
|||||||
|
[{{ smtp_relayhost }}]:587 {{ smtp_user_name }}:{{ smtp_user_pass }}
|
169
templates/opt/mirrormanager/mirrormanager2.cfg.j2
Normal file
169
templates/opt/mirrormanager/mirrormanager2.cfg.j2
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
'''
|
||||||
|
MirrorManager2 sample configuration.
|
||||||
|
'''
|
||||||
|
|
||||||
|
###
|
||||||
|
# Most important configuration items
|
||||||
|
###
|
||||||
|
|
||||||
|
# the number of items to display on the search pages
|
||||||
|
# Default: ``50``.
|
||||||
|
ITEMS_PER_PAGE = 50
|
||||||
|
|
||||||
|
|
||||||
|
# url to the database server:
|
||||||
|
DB_URL='postgresql://{{ mirrormanager_db.user }}:{{ mirrormanager_db.password }}@{{ mirrormanager_db.host }}:{{ mirrormanager_db.port }}/{{ mirrormanager_db.dbname }}'
|
||||||
|
|
||||||
|
# secret key used to generate unique csrf token
|
||||||
|
SECRET_KEY = '{{ mirrormanager_secret_key }}'
|
||||||
|
|
||||||
|
# Seed used to make the password harder to brute force in case of leaking
|
||||||
|
# This should be kept really secret!
|
||||||
|
PASSWORD_SEED = "{{ mirrormanager_password_seed }}"
|
||||||
|
|
||||||
|
# Make browsers send session cookie only via HTTPS
|
||||||
|
SESSION_COOKIE_SECURE=True
|
||||||
|
|
||||||
|
###
|
||||||
|
# Other configuration items for the web-app
|
||||||
|
###
|
||||||
|
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
|
# Set the time after which the session expires. Flask's default is 31 days.
|
||||||
|
# Default: ``timedelta(hours=1)`` corresponds to 1 hour.
|
||||||
|
PERMANENT_SESSION_LIFETIME = timedelta(hours=1)
|
||||||
|
|
||||||
|
# Folder containing the theme to use.
|
||||||
|
# Default: ``fedora``.
|
||||||
|
THEME_FOLDER = 'fedora'
|
||||||
|
|
||||||
|
# Which authentication method to use, defaults to `fas` can be or `local`
|
||||||
|
# Default: ``fas``.
|
||||||
|
MM_AUTHENTICATION = 'fas'
|
||||||
|
|
||||||
|
# If the authentication method is `fas`, groups in which should be the user
|
||||||
|
# to be recognized as an admin.
|
||||||
|
ADMIN_GROUP = ['sysadmin-main', 'sysadmin-web']
|
||||||
|
|
||||||
|
# Email of the admin to which send notification or error
|
||||||
|
ADMIN_EMAIL = ['admin@rockylinux.org', 'neil@rockylinux.org']
|
||||||
|
|
||||||
|
# Email address used in the 'From' field of the emails sent.
|
||||||
|
# Default: ``nobody@fedoraproject.org``.
|
||||||
|
EMAIL_FROM = 'nobody@rockylinux.org'
|
||||||
|
|
||||||
|
# SMTP server to use,
|
||||||
|
# Default: ``localhost``.
|
||||||
|
SMTP_SERVER = 'localhost'
|
||||||
|
|
||||||
|
# If the SMTP server requires authentication, fill in the information here
|
||||||
|
# SMTP_USERNAME = 'username'
|
||||||
|
# SMTP_PASSWORD = 'password'
|
||||||
|
|
||||||
|
# When this is set to True, an additional menu item is shown which can
|
||||||
|
# be used to browse the different statistics generated by
|
||||||
|
# mirrorlist_statistics.py.
|
||||||
|
SHOW_STATISTICS = True
|
||||||
|
|
||||||
|
# This is the directory the code enabled by SHOW_STATISTICS will use
|
||||||
|
# to locate the statistics files and display them.
|
||||||
|
STATISTICS_BASE = '/var/www/mirrormanager-statistics/data'
|
||||||
|
|
||||||
|
# Countries which have to be excluded.
|
||||||
|
EMBARGOED_COUNTRIES = ['CU', 'IR', 'KP', 'SD', 'SY']
|
||||||
|
|
||||||
|
# When this is set to True, an additional menu item is shown which
|
||||||
|
# displays the maps generated with mm2_generate-worldmap.
|
||||||
|
SHOW_MAPS = True
|
||||||
|
|
||||||
|
# Location of the static map displayed in the map tab.
|
||||||
|
STATIC_MAP = '/map/map.png'
|
||||||
|
|
||||||
|
# Location of the interactive openstreetmap based map.
|
||||||
|
INTERACTIVE_MAP = '/map/mirrors.html'
|
||||||
|
|
||||||
|
# The crawler can generate propagation statistics which can be
|
||||||
|
# converted into svg/pdf with mm2_propagation. These files
|
||||||
|
# can be displayed next to the statistics and maps tab if desired.
|
||||||
|
SHOW_PROPAGATION = True
|
||||||
|
|
||||||
|
# Where to look for the above mentioned propagation images.
|
||||||
|
PROPAGATION_BASE = '/var/www/mirrormanager-statistics/data/propagation'
|
||||||
|
|
||||||
|
# Disable master rsync server ACL
|
||||||
|
# Fedora does not use it and therefore it is set to False
|
||||||
|
MASTER_RSYNC_ACL = False
|
||||||
|
|
||||||
|
# When this is set to True, the session cookie will only be returned to the
|
||||||
|
# server via ssl (https). If you connect to the server via plain http, the
|
||||||
|
# cookie will not be sent. This prevents sniffing of the cookie contents.
|
||||||
|
# This may be set to False when testing your application but should always
|
||||||
|
# be set to True in production.
|
||||||
|
# Default: ``True``.
|
||||||
|
MM_COOKIE_REQUIRES_HTTPS = True
|
||||||
|
|
||||||
|
# The name of the cookie used to store the session id.
|
||||||
|
# Default: ``.MirrorManager``.
|
||||||
|
MM_COOKIE_NAME = 'MirrorManager'
|
||||||
|
|
||||||
|
# If this variable is set (and the directory exists) the crawler
|
||||||
|
# will create per host log files in MM_LOG_DIR/crawler/<hostid>.log
|
||||||
|
# which can the be used in the web interface by the mirror admins.
|
||||||
|
# Other parts besides the crawler are also using this variable to
|
||||||
|
# decide where to store log files.
|
||||||
|
MM_LOG_DIR = '/var/log/mirrormanager'
|
||||||
|
|
||||||
|
# This is used to exclude certain protocols to be entered
|
||||||
|
# for host category URLs at all.
|
||||||
|
# The following is the default for Fedora to exclude FTP based
|
||||||
|
# mirrors to be added. Removing this confguration option
|
||||||
|
# or setting it to '' removes any protocol restrictions.
|
||||||
|
MM_PROTOCOL_REGEX = '^(?!ftp)(.*)$'
|
||||||
|
|
||||||
|
# The netblock size parameters define which netblock sizes can be
|
||||||
|
# added by a site administrator. Larger networks can only be added by
|
||||||
|
# mirrormanager admins.
|
||||||
|
MM_IPV4_NETBLOCK_SIZE = '/16'
|
||||||
|
MM_IPV6_NETBLOCK_SIZE = '/32'
|
||||||
|
|
||||||
|
# If not specified the application will rely on the root_url when sending
|
||||||
|
# emails, otherwise it will use this URL
|
||||||
|
# Default: ``None``.
|
||||||
|
APPLICATION_URL = None
|
||||||
|
|
||||||
|
# Boolean specifying wether to check the user's IP address when retrieving
|
||||||
|
# its session. This make things more secure (thus is on by default) but
|
||||||
|
# under certain setup it might not work (for example is there are proxies
|
||||||
|
# in front of the application).
|
||||||
|
CHECK_SESSION_IP = True
|
||||||
|
|
||||||
|
# Specify additional rsync parameters for the crawler
|
||||||
|
# # --timeout 14400: abort rsync crawl after 4 hours
|
||||||
|
# # --no-human-readable: because rsync made things pretty by default in 3.1.x
|
||||||
|
CRAWLER_RSYNC_PARAMETERS = '--no-motd --timeout 14400 --exclude=lost+found --no-human-readable'
|
||||||
|
|
||||||
|
# This is a list of directories which MirrorManager will ignore while guessing
|
||||||
|
# the version and architecture from a path.
|
||||||
|
SKIP_PATHS_FOR_VERSION = [
|
||||||
|
'pub/alt',
|
||||||
|
'pub/archive',
|
||||||
|
]
|
||||||
|
|
||||||
|
###
|
||||||
|
# Configuration options used by the crons
|
||||||
|
###
|
||||||
|
|
||||||
|
# Specify whether the crawler should send a report by email
|
||||||
|
CRAWLER_SEND_EMAIL = False
|
||||||
|
|
||||||
|
# If a host fails for CRAWLER_AUTO_DISABLE times in a row
|
||||||
|
# the host will be disable automatically (user_active)
|
||||||
|
CRAWLER_AUTO_DISABLE = 4
|
||||||
|
|
||||||
|
UMDL_PREFIX = '/srv/'
|
||||||
|
|
||||||
|
umdl_master_directories = [
|
||||||
|
]
|
1
templates/tmp/mantis_import.sql.j2
Normal file
1
templates/tmp/mantis_import.sql.j2
Normal file
@ -0,0 +1 @@
|
|||||||
|
# Empty
|
11
templates/var/www/bugzilla/answer
Normal file
11
templates/var/www/bugzilla/answer
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
$answer{'db_host'} = '{{ bugzilla_db_host }}';
|
||||||
|
$answer{'db_driver'} = 'pg';
|
||||||
|
$answer{'db_port'} = 0;
|
||||||
|
$answer{'db_name'} = '{{ bugzilla_db_name }}';
|
||||||
|
$answer{'db_user'} = '{{ bugzilla_db_user }}';
|
||||||
|
$answer{'db_pass'} = '{{ bugzilla_db_pass }}';
|
||||||
|
$answer{'urlbase'} = 'https://bugs.rockylinux.org/';
|
||||||
|
$answer{'ADMIN_EMAIL'} = 'infrastructure@rockylinux.org';
|
||||||
|
$answer{'ADMIN_PASSWORD'} = '{{ bugzilla_admin_password }}';
|
||||||
|
$answer{'ADMIN_REALNAME'} = 'Infrastructure';
|
||||||
|
$answer{'NO_PAUSE'} = 1
|
19
templates/var/www/bugzilla/localconfig.j2
Normal file
19
templates/var/www/bugzilla/localconfig.j2
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
$create_htaccess = 1;
|
||||||
|
$webservergroup = 'apache';
|
||||||
|
$use_suexec = 0;
|
||||||
|
$db_driver = 'pg';
|
||||||
|
$db_host = '{{ bugzilla_db_host }}';
|
||||||
|
$db_name = '{{ bugzilla_db_name }}';
|
||||||
|
$db_user = '{{ bugzilla_db_user }}';
|
||||||
|
$db_pass = '{{ bugzilla_db_pass }}';
|
||||||
|
$db_port = 0;
|
||||||
|
$db_sock = '';
|
||||||
|
$db_check = 1;
|
||||||
|
$db_mysql_ssl_ca_file = '';
|
||||||
|
$db_mysql_ssl_ca_path = '';
|
||||||
|
$db_mysql_ssl_client_cert = '';
|
||||||
|
$db_mysql_ssl_client_key = '';
|
||||||
|
$index_html = 0;
|
||||||
|
$interdiffbin = '/usr/bin/interdiff';
|
||||||
|
$diffpath = '/usr/bin';
|
||||||
|
$site_wide_secret = '{{ lookup('password', '/dev/null length=54 chars=ascii_letters') }}';
|
46
templates/var/www/mantis/config/config_inc.php.j2
Normal file
46
templates/var/www/mantis/config/config_inc.php.j2
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
<?php
|
||||||
|
$g_hostname = '{{ mantis_db_host }}';
|
||||||
|
$g_db_type = 'pgsql';
|
||||||
|
$g_database_name = '{{ mantis_db_name }}';
|
||||||
|
$g_db_username = '{{ mantis_db_user }}';
|
||||||
|
$g_db_password = '{{ mantis_db_pass }}';
|
||||||
|
|
||||||
|
$g_default_timezone = 'UTC';
|
||||||
|
|
||||||
|
$g_crypto_master_salt = '{{ cryptosalt_string.stdout }}';
|
||||||
|
|
||||||
|
# Added
|
||||||
|
$g_login_method = 'LDAP';
|
||||||
|
$g_ldap_server = '{{ rocky_ipaserver_lb }}';
|
||||||
|
$g_ldap_port = '389';
|
||||||
|
$g_ldap_root_dn = '{{ rocky_ldap_user_basedn }}';
|
||||||
|
#$g_ldap_organization = '(objectClass=posixAccount)';
|
||||||
|
$g_ldap_protocol_version = 3;
|
||||||
|
$g_ldap_network_timeout = 30;
|
||||||
|
$g_ldap_follow_referrals = ON;
|
||||||
|
$g_ldap_bind_dn = '{{ mantis_binder_user }}';
|
||||||
|
$g_ldap_bind_passwd = '{{ mantis_binder_pass }}';
|
||||||
|
$g_ldap_uid_field = 'uid';
|
||||||
|
$g_ldap_realname_field = 'cn';
|
||||||
|
$g_use_ldap_realname = ON;
|
||||||
|
$g_use_ldap_email = ON;
|
||||||
|
|
||||||
|
$g_webmaster_email = 'infrastructure@rockylinux.org';
|
||||||
|
$g_from_email = 'auto@rockylinux.org';
|
||||||
|
$g_return_path_email = 'auto@rockylinux.org';
|
||||||
|
$g_from_name = 'Rocky Linux BugTracker';
|
||||||
|
$g_allow_file_upload = ON;
|
||||||
|
$g_file_upload_method = DATABASE; # or DISK
|
||||||
|
$g_dropzone_enabled = ON;
|
||||||
|
$g_show_realname = ON;
|
||||||
|
$g_show_avatar = ON;
|
||||||
|
$g_allowed_files = 'log,patch,txt';
|
||||||
|
$g_disallowed_files = 'exe,pl,sh,py,c,cpp,rar,zip,rpm';
|
||||||
|
$g_window_title = 'Rocky BugTracker';
|
||||||
|
$g_allow_signup = ON;
|
||||||
|
$g_allow_anonymous_login = ON;
|
||||||
|
$g_anonymous_account = 'anonymous';
|
||||||
|
$g_enable_email_notification = ON;
|
||||||
|
|
||||||
|
# Cookie problems
|
||||||
|
$g_allow_permanent_cookie = OFF;
|
3
tests/README.md
Normal file
3
tests/README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Tests
|
||||||
|
|
||||||
|
Basic tests for the playbooks and tasks come here. Generally you need a `test.yml` and `inventory` file with at least `localhost`
|
1
tests/inventory
Normal file
1
tests/inventory
Normal file
@ -0,0 +1 @@
|
|||||||
|
localhost
|
5
tests/test.yml
Normal file
5
tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
remote_user: root
|
||||||
|
tasks:
|
||||||
|
- import_tasks: example.yml
|
1
vars/CentOS.yml
Symbolic link
1
vars/CentOS.yml
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
RedHat.yml
|
162
vars/RedHat.yml
Normal file
162
vars/RedHat.yml
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
# Variables for our common module for RedHat
|
||||||
|
---
|
||||||
|
|
||||||
|
bin_su: /usr/bin/su
|
||||||
|
bin_sudo: /usr/bin/sudo
|
||||||
|
|
||||||
|
# grub variables
|
||||||
|
grub_boot_options: audit=1
|
||||||
|
grub_config_path_link: /etc/grub2.cfg
|
||||||
|
grub_config_path_efi: /etc/grub2-efi.cfg
|
||||||
|
|
||||||
|
ipatype: client
|
||||||
|
|
||||||
|
# Removing TFTP for now because there will likely be tftp/pxe servers
|
||||||
|
remove_packages:
|
||||||
|
- nc
|
||||||
|
- wireshark
|
||||||
|
- prelink
|
||||||
|
- talk
|
||||||
|
- talk-server
|
||||||
|
- rsh
|
||||||
|
- lftp
|
||||||
|
|
||||||
|
# security limits
|
||||||
|
limits:
|
||||||
|
- {domain: '*', limit_type: hard, limit_item: core, value: 0}
|
||||||
|
|
||||||
|
# sysctl settings
|
||||||
|
sysctl_config:
|
||||||
|
net.ipv4.ip_forward: 0
|
||||||
|
net.ipv4.conf.all.rp_filter: 1
|
||||||
|
net.ipv4.conf.default.rp_filter: 1
|
||||||
|
net.ipv4.conf.all.accept_source_route: 0
|
||||||
|
net.ipv4.conf.default.accept_source_route: 0
|
||||||
|
net.ipv4.conf.all.log_martians: 1
|
||||||
|
net.ipv4.conf.default.log_martians: 1
|
||||||
|
net.ipv4.icmp_echo_ignore_broadcasts: 1
|
||||||
|
net.ipv4.icmp_ignore_bogus_error_responses: 1
|
||||||
|
net.ipv4.tcp_syncookies: 1
|
||||||
|
net.ipv4.conf.all.accept_redirects: 0
|
||||||
|
net.ipv4.conf.default.accept_redirects: 0
|
||||||
|
net.ipv4.conf.all.send_redirects: 0
|
||||||
|
net.ipv4.conf.default.send_redirects: 0
|
||||||
|
net.ipv4.conf.all.secure_redirects: 0
|
||||||
|
net.ipv4.conf.default.secure_redirects: 0
|
||||||
|
net.ipv6.conf.all.accept_redirects: 0
|
||||||
|
net.ipv6.conf.default.accept_redirects: 0
|
||||||
|
net.ipv6.conf.all.forwarding: 0
|
||||||
|
net.ipv6.conf.all.accept_ra: 0
|
||||||
|
net.ipv6.conf.default.accept_ra: 0
|
||||||
|
net.ipv6.conf.all.accept_source_route: 0
|
||||||
|
net.ipv6.conf.default.accept_source_route: 0
|
||||||
|
kernel.randomize_va_space: 2
|
||||||
|
fs.suid_dumpable: 0
|
||||||
|
|
||||||
|
# login.defs
|
||||||
|
login_umask: 077
|
||||||
|
login_create_home: "yes"
|
||||||
|
login_encrypt_method: SHA512
|
||||||
|
login_md5_crypt_enab: "no"
|
||||||
|
login_max_days: 84
|
||||||
|
login_min_days: 7
|
||||||
|
login_min_len: 14
|
||||||
|
login_warn_age: 7
|
||||||
|
login_dcredit: -1
|
||||||
|
login_lcredit: -1
|
||||||
|
login_ucredit: -1
|
||||||
|
login_ocredit: -1
|
||||||
|
login_cron_directories:
|
||||||
|
- /etc/cron.hourly
|
||||||
|
- /etc/cron.daily
|
||||||
|
- /etc/cron.weekly
|
||||||
|
- /etc/cron.monthly
|
||||||
|
- /etc/cron.d
|
||||||
|
login_cron_allows:
|
||||||
|
- /etc/cron.allow
|
||||||
|
- /etc/at.allow
|
||||||
|
login_cron_denies:
|
||||||
|
- /etc/cron.deny
|
||||||
|
- /etc/at.deny
|
||||||
|
|
||||||
|
# modprobe
|
||||||
|
modprobe_unused_filesystems:
|
||||||
|
- dccp
|
||||||
|
- sctp
|
||||||
|
- bluetooth
|
||||||
|
- freevxfs
|
||||||
|
- cramfs
|
||||||
|
- jffs2
|
||||||
|
- hfs
|
||||||
|
- hfsplus
|
||||||
|
- squashfs
|
||||||
|
- udf
|
||||||
|
- tipc
|
||||||
|
- usb_storage
|
||||||
|
- vfat
|
||||||
|
|
||||||
|
# auditd
|
||||||
|
audit_package: audit
|
||||||
|
audit_auid: 1000
|
||||||
|
audit_buffer: 8192
|
||||||
|
audit_identity_list:
|
||||||
|
- /etc/group
|
||||||
|
- /etc/passwd
|
||||||
|
- /etc/gshadow
|
||||||
|
- /etc/shadow
|
||||||
|
- /etc/security/opasswd
|
||||||
|
audit_logins:
|
||||||
|
- /var/log/faillog
|
||||||
|
- /var/log/lastlog
|
||||||
|
- /var/log/tallylog
|
||||||
|
- /var/log/faillock/
|
||||||
|
- /var/log/wtmp
|
||||||
|
- /var/log/btmp
|
||||||
|
audit_session:
|
||||||
|
- /var/run/utmp
|
||||||
|
audit_suid_list:
|
||||||
|
- /usr/libexec/sssd/proxy_child
|
||||||
|
- /usr/libexec/sssd/ldap_child
|
||||||
|
- /usr/libexec/sssd/krb5_child
|
||||||
|
- /usr/libexec/sssd/selinux_child
|
||||||
|
- /usr/libexec/dbus-1/dbus-daemon-launch-helper
|
||||||
|
- /usr/libexec/utempter/utempter
|
||||||
|
- /usr/libexec/openssh/ssh-keysign
|
||||||
|
- /usr/lib/polkit-1/polkit-agent-helper-1
|
||||||
|
- /usr/sbin/usernetctl
|
||||||
|
- /usr/sbin/postqueue
|
||||||
|
- /usr/sbin/unix_chkpwd
|
||||||
|
- /usr/sbin/postdrop
|
||||||
|
- /usr/sbin/pam_timestamp_check
|
||||||
|
- /usr/sbin/netreport
|
||||||
|
- /usr/sbin/mount.nfs
|
||||||
|
- /usr/bin/su
|
||||||
|
- /usr/bin/ksu
|
||||||
|
- /usr/bin/write
|
||||||
|
- /usr/bin/newgrp
|
||||||
|
- /usr/bin/chage
|
||||||
|
- /usr/bin/mount
|
||||||
|
- /usr/bin/ssh-agent
|
||||||
|
- /usr/bin/sudo
|
||||||
|
- /usr/bin/passwd
|
||||||
|
- /usr/bin/gpasswd
|
||||||
|
- /usr/bin/at
|
||||||
|
- /usr/bin/wall
|
||||||
|
- /usr/bin/chsh
|
||||||
|
- /usr/bin/locate
|
||||||
|
- /usr/bin/chfn
|
||||||
|
- /usr/bin/umount
|
||||||
|
- /usr/bin/crontab
|
||||||
|
- /usr/bin/pkexec
|
||||||
|
|
||||||
|
disable_svc:
|
||||||
|
- cups
|
||||||
|
- nfs-server
|
||||||
|
- avahi-daemon
|
||||||
|
|
||||||
|
enable_svc:
|
||||||
|
- postfix
|
||||||
|
|
||||||
|
syslog_packages:
|
||||||
|
- rsyslog
|
||||||
|
...
|
1
vars/Rocky.yml
Symbolic link
1
vars/Rocky.yml
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
RedHat.yml
|
53
vars/bugzilla.yml
Normal file
53
vars/bugzilla.yml
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
# bugzilla vars
|
||||||
|
bugzilla_version: 5.0.6
|
||||||
|
bugzilla_checksum: "sha256:dd41a4b0a3a1df0d193bc056f2e3711d7b5605718a00bf6e5d4177bf1be86f77"
|
||||||
|
bugzilla_dir: "/var/www/bugzilla"
|
||||||
|
bugzilla_pkg:
|
||||||
|
- perl
|
||||||
|
- perl-CPAN
|
||||||
|
- perl-DBD-Pg
|
||||||
|
- perl-LDAP
|
||||||
|
- perl-JSON-RPC-CGI
|
||||||
|
- perl-JSON-RPC-Daemon
|
||||||
|
- perl-JSON-RPC-Apache2
|
||||||
|
- perl-JSON-XS
|
||||||
|
- perl-XMLRPC-Lite
|
||||||
|
- perl-CGI
|
||||||
|
- perl-DateTime
|
||||||
|
- perl-DateTime-TimeZone
|
||||||
|
- perl-Template-Toolkit
|
||||||
|
- perl-Email-Sender
|
||||||
|
- perl-Email-MIME
|
||||||
|
- perl-List-MoreUtils
|
||||||
|
- perl-Math-Random-ISAAC
|
||||||
|
- perl-GD
|
||||||
|
- patchutils
|
||||||
|
- httpd
|
||||||
|
- mod_ssl
|
||||||
|
- mod_perl
|
||||||
|
- mod_perl-devel
|
||||||
|
- httpd-devel
|
||||||
|
- gd-devel
|
||||||
|
- graphviz
|
||||||
|
- patchutils
|
||||||
|
- gcc
|
||||||
|
- openssl-devel
|
||||||
|
- wget
|
||||||
|
- curl
|
||||||
|
bugzilla_db_host: db.rockylinux.org
|
||||||
|
bugzilla_db_name: bugzilla_db
|
||||||
|
bugzilla_db_user: bugzilla
|
||||||
|
|
||||||
|
# Vault
|
||||||
|
# bugzilla_db_pass: ThisIsNotThePassword!
|
||||||
|
|
||||||
|
ipa_getcert_requested_hostnames:
|
||||||
|
- name: "{{ ansible_fqdn }}"
|
||||||
|
owner: apache
|
||||||
|
key_location: "/etc/pki/tls/private/bugs.rockylinux.org.key"
|
||||||
|
cert_location: "/etc/pki/tls/certs/bugs.rockylinux.org.crt"
|
||||||
|
postcmd: "/bin/systemctl reload httpd"
|
||||||
|
cnames:
|
||||||
|
- "bugs.rockylinux.org"
|
||||||
|
...
|
23
vars/common.yml
Normal file
23
vars/common.yml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
rocky_ipa_realm: "ROCKYLINUX.ORG"
|
||||||
|
rocky_ldap_bind_dn: "uid=binder,cn=sysaccounts,cn=etc,dc=rockylinux,dc=org"
|
||||||
|
rocky_ldap_user_basedn: "cn=users,cn=accounts,dc=rockylinux,dc=org"
|
||||||
|
rocky_ldap_group_basedn: "cn=groups,cn=accounts,dc=rockylinux,dc=org"
|
||||||
|
rocky_ldap_account_basedn: "cn=accounts,dc=rockylinux,dc=org"
|
||||||
|
# Requires jinja 2.9+
|
||||||
|
rocky_ipaserver_list: "{{ groups['ipaserver'] + groups['ipareplicas'] }}"
|
||||||
|
rocky_ipaserver_lb: "ipa-us-east-2.rockylinux.org"
|
||||||
|
# These will be in a vault
|
||||||
|
rocky_ldap_bind_pw: "{{ ipa_binder_password }}"
|
||||||
|
|
||||||
|
rocky_smtp_address: "email-smtp.us-east-2.amazonaws.com"
|
||||||
|
rocky_smtp_port: "587"
|
||||||
|
# username / pw need to be setup
|
||||||
|
rocky_smtp_domain: "rockylinux.org"
|
||||||
|
rocky_smtp_authentication: "login"
|
||||||
|
rocky_smtp_enable_starttls_auto: "true"
|
||||||
|
rocky_smtp_tls: "true"
|
||||||
|
rocky_smtp_openssl_verify_mode: "none"
|
||||||
|
rocky_smtp_ca_path: "/etc/pki/tls/certs"
|
||||||
|
rocky_smtp_ca_file: "/etc/pki/tls/certs/ca-bundle.crt"
|
||||||
|
...
|
3
vars/ipaserver.yml
Normal file
3
vars/ipaserver.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
ipatype: server
|
||||||
|
...
|
2
vars/main.yml
Normal file
2
vars/main.yml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
# Vars that should not be overridden
|
32
vars/mantis.yml
Normal file
32
vars/mantis.yml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
# mantis vars
|
||||||
|
mantis_version: 2.25.2
|
||||||
|
mantis_checksum: "sha256:8b087e71594fd70d2d2e39d2d2e1285b47a9919d8d8fd268df009df76ebc3671"
|
||||||
|
mantis_pkg:
|
||||||
|
- php
|
||||||
|
- php-ldap
|
||||||
|
- httpd
|
||||||
|
- mod_ssl
|
||||||
|
- php-pgsql
|
||||||
|
- php-mbstring
|
||||||
|
- php-curl
|
||||||
|
- openldap
|
||||||
|
- php-json
|
||||||
|
mantis_db_host: db.rockylinux.org
|
||||||
|
mantis_db_name: mantisdb
|
||||||
|
mantis_db_user: mantis
|
||||||
|
mantis_binder_user: "{{ rocky_ldap_bind_dn }}"
|
||||||
|
mantis_binder_pass: "{{ rocky_ldap_bind_pw }}"
|
||||||
|
|
||||||
|
# Vault
|
||||||
|
# mantis_db_pass: ThisIsNotThePassword!
|
||||||
|
|
||||||
|
ipa_getcert_requested_hostnames:
|
||||||
|
- name: "{{ ansible_fqdn }}"
|
||||||
|
owner: apache
|
||||||
|
key_location: "/etc/pki/tls/private/bugs.rockylinux.org.key"
|
||||||
|
cert_location: "/etc/pki/tls/certs/bugs.rockylinux.org.crt"
|
||||||
|
postcmd: "/bin/systemctl reload httpd"
|
||||||
|
cnames:
|
||||||
|
- "bugs.rockylinux.org"
|
||||||
|
...
|
117
vars/mirrormanager.yml
Normal file
117
vars/mirrormanager.yml
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
---
|
||||||
|
firewall_rules:
|
||||||
|
- port: 443/tcp
|
||||||
|
permanent: true
|
||||||
|
state: enabled
|
||||||
|
- port: 9100/tcp
|
||||||
|
permanent: true
|
||||||
|
state: enabled
|
||||||
|
|
||||||
|
tls_ca_cert: "/etc/pki/tls/certs/ca-bundle.crt"
|
||||||
|
tls_cert: "/etc/pki/tls/certs/{{ ansible_fqdn }}.crt"
|
||||||
|
tls_key: "/etc/pki/tls/private/{{ ansible_fqdn }}.key"
|
||||||
|
|
||||||
|
ipa_getcert_requested_hostnames:
|
||||||
|
- name: "{{ ansible_fqdn }}"
|
||||||
|
owner: nginx
|
||||||
|
key_location: "{{ tls_key }}"
|
||||||
|
cert_location: "{{ tls_cert }}"
|
||||||
|
postcmd: "systemctl reload httpd"
|
||||||
|
|
||||||
|
mirrormanager_dbtype: postgres
|
||||||
|
mirrormanager_db:
|
||||||
|
host: "db.rockylinux.org"
|
||||||
|
port: 5432
|
||||||
|
user: mirrormanager
|
||||||
|
password: "{{ _mirrormanager_db_rw_pass }}"
|
||||||
|
dbname: mirrormanager_db
|
||||||
|
ssl: true
|
||||||
|
|
||||||
|
mirrormanager_user:
|
||||||
|
name: mirrormanager
|
||||||
|
comment: "Mirrormanager user"
|
||||||
|
group: mirrormanager
|
||||||
|
gid: 10005
|
||||||
|
uid: 10004
|
||||||
|
|
||||||
|
mirrormanager_dir: /opt/mirrormanager
|
||||||
|
|
||||||
|
####################
|
||||||
|
### NGINX CONFIG ###
|
||||||
|
####################
|
||||||
|
|
||||||
|
|
||||||
|
# no demo config/template
|
||||||
|
nginx_config_html_demo_template_enable: false
|
||||||
|
|
||||||
|
nginx_config_selinux: true
|
||||||
|
nginx_config_selinux_enforcing: true
|
||||||
|
|
||||||
|
nginx_config_start: true
|
||||||
|
|
||||||
|
nginx_config_debug_output: true
|
||||||
|
nginx_config_debug_tasks: true
|
||||||
|
|
||||||
|
# nginx_config_cleanup: true
|
||||||
|
|
||||||
|
nginx_config_http_template_enable: true
|
||||||
|
nginx_config_main_template_enable: true
|
||||||
|
|
||||||
|
nginx_config_http_template:
|
||||||
|
default:
|
||||||
|
template_file: http/default.conf.j2
|
||||||
|
conf_file_name: default.conf
|
||||||
|
conf_file_location: /etc/nginx/conf.d/
|
||||||
|
servers:
|
||||||
|
redirect_https:
|
||||||
|
listen:
|
||||||
|
v6:
|
||||||
|
ip: '[::]' # Wrap in square brackets for IPv6 addresses
|
||||||
|
port: 80
|
||||||
|
opts: ['default_server']
|
||||||
|
v4:
|
||||||
|
ip: '' # Wrap in square brackets for IPv6 addresses
|
||||||
|
port: 80
|
||||||
|
opts: ['default_server']
|
||||||
|
server_name: "{{ ansible_fqdn }}"
|
||||||
|
error_page: /usr/share/nginx/html
|
||||||
|
access_log:
|
||||||
|
- name: main
|
||||||
|
location: /var/log/nginx/access.log
|
||||||
|
error_log:
|
||||||
|
location: /var/log/nginx/error.log
|
||||||
|
level: warn
|
||||||
|
root: "{{ mirrormanager_dir }}"
|
||||||
|
https_redirect: $host
|
||||||
|
mirrormanager:
|
||||||
|
listen:
|
||||||
|
v6:
|
||||||
|
ip: '[::]' # Wrap in square brackets for IPv6 addresses
|
||||||
|
port: 443
|
||||||
|
ssl: true
|
||||||
|
opts: ['http2', 'default_server']
|
||||||
|
v4:
|
||||||
|
ip: '' # Wrap in square brackets for IPv6 addresses
|
||||||
|
port: 443
|
||||||
|
ssl: true
|
||||||
|
opts: ['http2', 'default_server']
|
||||||
|
ssl:
|
||||||
|
cert: "{{ tls_cert }}"
|
||||||
|
key: "{{ tls_key }}"
|
||||||
|
server_name: "{{ ansible_fqdn }}"
|
||||||
|
error_page: /usr/share/nginx/html
|
||||||
|
access_log:
|
||||||
|
- name: main
|
||||||
|
location: /var/log/nginx/access.log
|
||||||
|
error_log:
|
||||||
|
location: /var/log/nginx/error.log
|
||||||
|
level: warn
|
||||||
|
root: "{{ mirrormanager_dir }}"
|
||||||
|
web_server:
|
||||||
|
locations:
|
||||||
|
default:
|
||||||
|
location: /
|
||||||
|
custom_options:
|
||||||
|
- "proxy_pass http://localhost:3000/;"
|
||||||
|
http_demo_conf: false
|
||||||
|
...
|
19
vars/mounts/bootstrap_staging.yml
Normal file
19
vars/mounts/bootstrap_staging.yml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
x-efs_fs_opts_common: &common_fs_opts
|
||||||
|
fstype: efs
|
||||||
|
fsopts:
|
||||||
|
- _netdev
|
||||||
|
- tls
|
||||||
|
- iam
|
||||||
|
- rw
|
||||||
|
|
||||||
|
mounts:
|
||||||
|
- name: prod-build-compose
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXXX
|
||||||
|
mount_point: /mnt/compose
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.100.100.250
|
||||||
|
us-east-2b: 10.100.101.250
|
||||||
|
us-east-2c: 10.100.102.250
|
||||||
|
...
|
27
vars/mounts/mirrormanager.yml
Normal file
27
vars/mounts/mirrormanager.yml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
x-efs_fs_opts_common: &common_fs_opts
|
||||||
|
fstype: efs
|
||||||
|
fsopts:
|
||||||
|
- _netdev
|
||||||
|
- tls
|
||||||
|
- iam
|
||||||
|
- rw
|
||||||
|
|
||||||
|
mounts:
|
||||||
|
- name: build-repos-internal
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXXX
|
||||||
|
mount_point: /mnt/repos-internal
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.100.100.248
|
||||||
|
us-east-2b: 10.100.101.248
|
||||||
|
us-east-2c: 10.100.102.248
|
||||||
|
- name: build-repos-production
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXXX
|
||||||
|
mount_point: /mnt/repos-production
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.100.100.248
|
||||||
|
us-east-2b: 10.100.101.248
|
||||||
|
us-east-2c: 10.100.102.248
|
||||||
|
...
|
27
vars/mounts/repopool.yml
Normal file
27
vars/mounts/repopool.yml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
x-efs_fs_opts_common: &common_fs_opts
|
||||||
|
fstype: efs
|
||||||
|
fsopts:
|
||||||
|
- _netdev
|
||||||
|
- tls
|
||||||
|
- iam
|
||||||
|
- rw
|
||||||
|
|
||||||
|
mounts:
|
||||||
|
- name: prod-build-repos-staging
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXXX
|
||||||
|
mount_point: /mnt/repos-staging
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.101.100.249
|
||||||
|
us-east-2b: 10.101.101.249
|
||||||
|
us-east-2c: 10.101.102.249
|
||||||
|
- name: prod-build-repos-production
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-YYYYYYYY
|
||||||
|
mount_point: /mnt/repos-production
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.101.100.246
|
||||||
|
us-east-2b: 10.101.101.246
|
||||||
|
us-east-2c: 10.101.102.246
|
||||||
|
...
|
51
vars/mounts/srpmproc.yml
Normal file
51
vars/mounts/srpmproc.yml
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
x-efs_fs_opts_common: &common_fs_opts
|
||||||
|
fstype: efs
|
||||||
|
fsopts:
|
||||||
|
- _netdev
|
||||||
|
- tls
|
||||||
|
- iam
|
||||||
|
- rw
|
||||||
|
|
||||||
|
mounts:
|
||||||
|
- name: prod-build-repos-internal
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXX1
|
||||||
|
mount_point: /mnt/repos-internal
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.101.100.248
|
||||||
|
us-east-2b: 10.101.101.248
|
||||||
|
us-east-2c: 10.101.102.248
|
||||||
|
- name: prod-koji
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXX2
|
||||||
|
mount_point: /mnt/koji
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.101.100.247
|
||||||
|
us-east-2b: 10.101.101.247
|
||||||
|
us-east-2c: 10.101.102.247
|
||||||
|
- name: prod-build-compose
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXX3
|
||||||
|
mount_point: /mnt/compose
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.101.100.250
|
||||||
|
us-east-2b: 10.101.101.250
|
||||||
|
us-east-2c: 10.101.102.250
|
||||||
|
- name: prod-build-repos-staging
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXX4
|
||||||
|
mount_point: /mnt/repos-staging
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.101.100.249
|
||||||
|
us-east-2b: 10.101.101.249
|
||||||
|
us-east-2c: 10.101.102.249
|
||||||
|
- name: prod-build-repos-production
|
||||||
|
<<: *common_fs_opts
|
||||||
|
fsid: fs-XXXXXXX5
|
||||||
|
mount_point: /mnt/repos-production
|
||||||
|
ip_map:
|
||||||
|
us-east-2a: 10.101.100.246
|
||||||
|
us-east-2b: 10.101.101.246
|
||||||
|
us-east-2c: 10.101.102.246
|
||||||
|
...
|
6
vars/mqtt.yml
Normal file
6
vars/mqtt.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# mqtt settings
|
||||||
|
mqtt_tls_ca_cert: "/etc/pki/tls/certs/ca-bundle.crt"
|
||||||
|
mqtt_tls_cert: "/etc/pki/tls/certs/{{ ansible_fqdn }}.crt"
|
||||||
|
mqtt_tls_key: "/etc/pki/tls/private/{{ ansible_fqdn }}.key"
|
||||||
|
...
|
77
vars/openqa.yml
Normal file
77
vars/openqa.yml
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
---
|
||||||
|
# Default OpenQA user and group
|
||||||
|
openqa_user: geekotest
|
||||||
|
openqa_group: geekotest
|
||||||
|
|
||||||
|
# OpenQA data directory
|
||||||
|
openqa_homedir: /var/lib/openqa
|
||||||
|
|
||||||
|
# URL for the repository containing the RockyLinux test automation
|
||||||
|
openqa_rocky_testing_repo: "https://github.com/rocky-linux/os-autoinst-distri-rocky.git"
|
||||||
|
|
||||||
|
# The RockyLinux version to fetch for testing
|
||||||
|
rocky_version: 8.5
|
||||||
|
|
||||||
|
# The RockyLinux architecture to fetch for testing
|
||||||
|
rocky_arch: x86_64
|
||||||
|
|
||||||
|
# Public download URL for RockyLinux ISOs
|
||||||
|
rocky_iso_download_url: "https://download.rockylinux.org/pub/rocky/8.5/isos/{{ rocky_arch }}"
|
||||||
|
|
||||||
|
# Rocky Linux ISOs
|
||||||
|
openqa_isos:
|
||||||
|
- name: "Rocky-{{ rocky_version }}-{{ rocky_arch }}-boot.iso"
|
||||||
|
checksum: "sha256:5a0dc65d1308e47b51a49e23f1030b5ee0f0ece3702483a8a6554382e893333c"
|
||||||
|
- name: "Rocky-{{ rocky_version }}-{{ rocky_arch }}-dvd1.iso"
|
||||||
|
checksum: "sha256:0081f8b969d0cef426530f6d618b962c7a01e71eb12a40581a83241f22dfdc25"
|
||||||
|
- name: "Rocky-{{ rocky_version }}-{{ rocky_arch }}-minimal.iso"
|
||||||
|
checksum: "sha256:4eb2ae6b06876205f2209e4504110fe4115b37540c21ecfbbc0ebc11084cb779"
|
||||||
|
|
||||||
|
# The host the openqa-cli should access when it runs.
|
||||||
|
# Change this if you want to access your OpenQA via an
|
||||||
|
# alternative URL
|
||||||
|
openqa_host: localhost
|
||||||
|
|
||||||
|
# These are the default client credentials.
|
||||||
|
# They will expire 24 hours after installation and must
|
||||||
|
# be replaced with new ones.
|
||||||
|
openqa_client_key: 1234567890ABCDEF
|
||||||
|
openqa_client_secret: 1234567890ABCDEF
|
||||||
|
|
||||||
|
# The number of workers to enable on this system
|
||||||
|
openqa_worker_count: 1
|
||||||
|
|
||||||
|
# Port range to open for VNC access to local workers.
|
||||||
|
# The max port should be 5990 + n where n is the total
|
||||||
|
# number of workers you want to enable on your system.
|
||||||
|
openqa_min_vnc_port: 5991
|
||||||
|
openqa_max_vnc_port: "{{ 5990 + openqa_worker_count|int }}"
|
||||||
|
|
||||||
|
# Packages to install
|
||||||
|
openqa_packages:
|
||||||
|
- git
|
||||||
|
- vim-enhanced
|
||||||
|
- openqa
|
||||||
|
- openqa-httpd
|
||||||
|
- openqa-worker
|
||||||
|
- fedora-messaging
|
||||||
|
- guestfs-tools
|
||||||
|
- libguestfs-xfs
|
||||||
|
- python3-fedfind
|
||||||
|
- python3-libguestfs
|
||||||
|
- libvirt-daemon-config-network
|
||||||
|
- virt-install
|
||||||
|
- withlock
|
||||||
|
- postgresql-server
|
||||||
|
- perl-REST-Client
|
||||||
|
|
||||||
|
# Services to start and enable
|
||||||
|
openqa_services:
|
||||||
|
- sshd
|
||||||
|
- httpd
|
||||||
|
- openqa-gru
|
||||||
|
- openqa-scheduler
|
||||||
|
- openqa-websockets
|
||||||
|
- openqa-webui
|
||||||
|
- fm-consumer@fedora_openqa_scheduler
|
||||||
|
...
|
65
vars/pinnwand.yml
Normal file
65
vars/pinnwand.yml
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
---
|
||||||
|
# pinnwand
|
||||||
|
|
||||||
|
firewall_rules:
|
||||||
|
- port: 443/tcp
|
||||||
|
permanent: true
|
||||||
|
state: enabled
|
||||||
|
- port: 9100/tcp
|
||||||
|
permanent: true
|
||||||
|
state: enabled
|
||||||
|
|
||||||
|
tls_ca_cert: "/etc/pki/tls/certs/ca-bundle.crt"
|
||||||
|
tls_cert: "/etc/pki/tls/certs/{{ ansible_fqdn }}.crt"
|
||||||
|
tls_key: "/etc/pki/tls/private/{{ ansible_fqdn }}.key"
|
||||||
|
|
||||||
|
ipa_getcert_requested_hostnames:
|
||||||
|
- name: "{{ ansible_fqdn }}"
|
||||||
|
owner: nginx
|
||||||
|
key_location: "{{ tls_key }}"
|
||||||
|
cert_location: "{{ tls_cert }}"
|
||||||
|
postcmd: "/bin/systemctl reload nginx"
|
||||||
|
|
||||||
|
pinnwand_config:
|
||||||
|
database:
|
||||||
|
scheme: postgresql
|
||||||
|
username: pinnwand
|
||||||
|
password: "{{ _pinnwand_db_rw_pass }}"
|
||||||
|
hostname: "db.rockylinux.org"
|
||||||
|
port: 5432
|
||||||
|
database: pinnwand_db
|
||||||
|
paste_size: 10485760
|
||||||
|
preferred_lexers: []
|
||||||
|
logo_path: /opt/pinnwand/logo.png
|
||||||
|
page_path: /tmp
|
||||||
|
page_list:
|
||||||
|
- about
|
||||||
|
- removal
|
||||||
|
- expiry
|
||||||
|
footer: ''
|
||||||
|
paste_help: ''
|
||||||
|
report_email: 'abuse@rockylinux.org'
|
||||||
|
expiries:
|
||||||
|
- name: 1hour
|
||||||
|
time: 3600
|
||||||
|
- name: 1day
|
||||||
|
time: 86400
|
||||||
|
- name: 1week
|
||||||
|
time: 604800
|
||||||
|
- name: forever
|
||||||
|
time: 4294967294
|
||||||
|
ratelimits:
|
||||||
|
- name: read
|
||||||
|
capacity: 100
|
||||||
|
consume: 1
|
||||||
|
refill: 2
|
||||||
|
- name: create
|
||||||
|
capacity: 2
|
||||||
|
consume: 2
|
||||||
|
refill: 1
|
||||||
|
- name: delete
|
||||||
|
capacity: 2
|
||||||
|
consume: 2
|
||||||
|
refill: 1
|
||||||
|
spamscore: 50
|
||||||
|
...
|
45
vars/rabbitmq.yml
Normal file
45
vars/rabbitmq.yml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
# rabbitmq settings
|
||||||
|
rabbitmq_tls_ca_cert: "/etc/pki/tls/certs/ca-bundle.crt"
|
||||||
|
rabbitmq_tls_cert: "/etc/pki/tls/certs/{{ ansible_fqdn }}.crt"
|
||||||
|
rabbitmq_tls_key: "/etc/pki/tls/private/{{ ansible_fqdn }}.key"
|
||||||
|
|
||||||
|
# These should be in a vault, with a different value. Generated by:
|
||||||
|
# dd if=/dev/urandom bs=30 count=1 | base64
|
||||||
|
# rabbitmq_cookie: ...
|
||||||
|
|
||||||
|
# Admin passwords - these should be in a vault
|
||||||
|
# rabbitmq_admin_password: ...
|
||||||
|
|
||||||
|
# rabbitmq cluster list and information should be defined in hostvars to ensure
|
||||||
|
# that the configuration is idempotent.
|
||||||
|
# rabbitmq_cluster_name:
|
||||||
|
# rabbitmq_env:
|
||||||
|
|
||||||
|
# Federation / Public Queues
|
||||||
|
rabbitmq_enable_public: false
|
||||||
|
# pubsub_federation_pass:
|
||||||
|
|
||||||
|
# THIS IS DYNAMIC. IT'S ADVISED IT NOT BE STATIC.
|
||||||
|
# This should be changed depending on how inventory is managed. For example, if
|
||||||
|
# it's not possible to have "staging inventory" as opposed to a "production"
|
||||||
|
# inventory, you would likely have a different name than just "rabbitmq". It is
|
||||||
|
# also possible there will be more than one cluster, so these must be taken
|
||||||
|
# into account when setting this variable.
|
||||||
|
rabbitmq_cluster_list: "{{ groups['rabbitmq'] }}"
|
||||||
|
rabbitmq_ldap_servers: "{{ rocky_ipaserver_list }}"
|
||||||
|
rabbitmq_ldap_bind_dn: "uid=rabbitmq_binder,cn=sysaccounts,cn=etc,dc=rockylinux,dc=org"
|
||||||
|
rabbitmq_ldap_bind_pw: "{{ rabbitmq_binder_password }}"
|
||||||
|
rabbitmq_ldap_basedn: "{{ rocky_ldap_account_basedn }}"
|
||||||
|
|
||||||
|
# Messaging queues are generally private
|
||||||
|
rabbitmq_private: true
|
||||||
|
ipa_getcert_requested_hostnames:
|
||||||
|
- name: "{{ ansible_fqdn }}"
|
||||||
|
owner: rabbitmq
|
||||||
|
key_location: "{{ rabbitmq_tls_key }}"
|
||||||
|
cert_location: "{{ rabbitmq_tls_cert }}"
|
||||||
|
postcmd: "/bin/systemctl restart rabbitmq-server"
|
||||||
|
cnames:
|
||||||
|
- "rabbitmq-{{ rabbitmq_env }}.rockylinux.org"
|
||||||
|
...
|
Loading…
Reference in New Issue
Block a user