This commit is contained in:
nazunalika 2022-02-26 20:19:20 -07:00
commit 699ec2e2f0
Signed by: label
GPG Key ID: 6735C0E1BD65D048
97 changed files with 4067 additions and 0 deletions

6
.ansible-lint Normal file
View File

@ -0,0 +1,6 @@
# .ansible-lint
warn_list:
- '204' # Lines should be less than 160 characters
- '701' # meta/main.yml should contain relevant info
skip_list:
- '106' # Role name must match ^[a-z][a-z0-9_]+$ pattern

33
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,33 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.4.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-added-large-files
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-json
- id: pretty-format-json
- id: detect-private-key
- repo: local
hooks:
- id: ansible-lint
name: Ansible-lint
description: This hook runs ansible-lint.
entry: ansible-lint --force-color
language: python
# do not pass files to ansible-lint, see:
# https://github.com/ansible/ansible-lint/issues/611
pass_filenames: false
always_run: true
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.26.0
hooks:
- id: yamllint
files: \.(yaml|yml)$
types: [file, yaml]
entry: yamllint

7
.yamllint Normal file
View File

@ -0,0 +1,7 @@
---
extends: default
rules:
line-length:
max: 140
level: warning

37
README.md Normal file
View File

@ -0,0 +1,37 @@
# Ansible AWX Template: Ops Management
Ansible AWX is the method used for the Rocky Linux infrastructure, as a replacement for using the CLI. This template is used specifically for management of systems and infrastructure and takes bits and pieces from the original infrastructure git repository on GitHub.
This repository may include duplicate playbooks from other ansible management repositories. Some pieces may also be removed and put into their own repository.
## Notes on local runs and playbooks for local development systems
There are some playbooks that are meant to be ran locally. There are also cases where AWX is not feasible. To run said playbooks, these are things to keep in mind:
* local-ansible.cfg will need to be used
* `init-rocky-ansible-host.yml` will need to be ran using that configuration file (if there are roles/collections needed)
## Provides / Information
This repository is for Infrastructure operations.
```
.
├── README.md
├── defaults
│   └── main.yml
├── files
│   └── README.md
├── handlers
│   └── main.yml
├── tasks
│   └── main.yml
├── templates
│   └── README.md
├── tests
│   ├── README.md
│   ├── inventory
│   └── test.yml
└── vars
└── main.yml
```

8
adhoc-facts-refresh.yml Normal file
View File

@ -0,0 +1,8 @@
---
- hosts: all
become: true
tasks:
- name: Force a fact refresh to have those available in local cache
setup:
gather_timeout: 30
...

87
adhoc-rabbitmqqueue.yml Normal file
View File

@ -0,0 +1,87 @@
---
# This playbook is meant to be used with callable variables, like adhoc or AWX.
# What: Creates RabbitMQ Users
# Required parameters:
# -> username: The username to create in RabbitMQ, which should match an LDAP
# name or the CN of a certificate. Note that if it's a hostname
# it must be the FQDN.
# -> queue_name: Name of the queue to create. This should be setup with a
# prefix_suffix name, where prefix is the username, and
# the suffix is a service name.
# -> routing_keys: A list to be used as routing keys.
# Optional:
# -> write_queues: A list of queues name prefixes that which the user will
# be allowed to publish.
# -> thresholds: A dictionary with two keys "warning" and "critical" - The
# values are numbers. In the event we have a monitoring system
# this can be a number of messages that could cause an alert.
# -> vhost: The vhost this queue will be part of. The default is /pubsub.
- name: Create a User
hosts: all
become: false
gather_facts: false
vars_files:
- vars/rabbitmq.yml
tasks:
- name: "Checking for user variables"
assert:
that:
- username != "admin"
- username != "guest"
- username != "mq-monitoring"
success_msg: "Required variables provided"
fail_msg: "Username is reserved"
tags:
- rabbitmq
- name: "Validate username queue name"
assert:
that:
- "queue_name.startswith(username)"
tags:
- rabbitmq
- name: "Creating User Account"
community.rabbitmq.rabbitmq_user:
user: "{{ username }}"
vhost: "{{ vhost|default('/pubsub') }}"
read_priv: "^(zmq\\.topic)|^(amq\\.topic)|({{ username }}.*)$"
write_priv: "^(amq\\.topic)|({{ username }}.*){% for queue in write_queues|default([]) %}|({{ queue }}.*){% endfor %}$"
configure_priv: "^$"
state: present
tags:
- rabbitmq
- name: "Create {{ queue_name }}"
delegate_to: "{{ rabbitmq_cluster_list[0] }}"
community.rabbitmq.rabbitmq_queue:
name: "{{ queue_name }}"
vhost: "{{ vhost|default('/pubsub') }}"
auto_delete: false
durable: true
message_ttl: "{{ message_ttl|default('null') }}"
state: present
login_user: admin
login_password: "{{ rabbitmq_admin_password }}"
tags:
- rabbitmq
- name: "Bind {{ queue_name }} to amq.topic exchange"
delegate_to: "{{ rabbitmq_cluster_list[0] }}"
community.rabbitmq.rabbitmq_binding:
name: "amq.topic"
destination: "{{ queue_name }}"
destination_type: queue
routing_key: "{{ routing_item }}"
vhost: "{{ vhost|default('/pubsub') }}"
state: present
login_user: admin
login_password: "{{ rabbitmq_admin_password }}"
loop: "{{ routing_keys }}"
loop_control:
loop_var: routing_item
tags:
- rabbitmq
...

35
adhoc-rabbitmquser.yml Normal file
View File

@ -0,0 +1,35 @@
---
# This playbook is meant to be used with callable variables, like adhoc or AWX.
# What: Creates RabbitMQ Users
# The username is the required parameter
- name: Create a User
hosts: all
become: false
gather_facts: false
vars_files:
- vars/rabbitmq.yml
tasks:
- name: "Checking for user variables"
assert:
that:
- username != "admin"
- username != "guest"
- username != "mq-monitoring"
success_msg: "Required variables provided"
fail_msg: "Username is reserved"
tags:
- rabbitmq
- name: "Creating User Account"
community.rabbitmq.rabbitmq_user:
user: "{{ username }}"
vhost: "{{ vhost }}"
read_priv: "^$"
write_priv: "amq\\.topic"
configure_priv: "^$"
state: present
tags:
- rabbitmq
...

14
collections/README.md Normal file
View File

@ -0,0 +1,14 @@
# Collections
If you are wanting to use a collection specifically for this, you will need to define it in a `requirements.yml`, otherwise AWX will not install what you need to run your tasks.
Example:
```
---
# Roles
collections:
- netbox.netbox
- community.aws
- containers.podman
```

View File

@ -0,0 +1,18 @@
---
collections:
# freeipa
- name: freeipa.ansible_freeipa
version: 1.6.3
- name: community.general
- name: community.mysql
- name: community.rabbitmq
- name: ansible.posix
- name: ansible.utils
- name: ktdreyer.koji_ansible
- name: netbox.netbox
- name: community.aws
- name: community.libvirt
- name: containers.podman
- name: nginxinc.nginx_core
version: 0.3.0
...

2
defaults/main.yml Normal file
View File

@ -0,0 +1,2 @@
---
# Defaults

1
files/README.md Normal file
View File

@ -0,0 +1 @@
Files come here

View File

@ -0,0 +1 @@
RedHat-8-system-auth

View File

@ -0,0 +1,40 @@
{imply "with-smartcard" if "with-smartcard-required"}
auth required pam_env.so
auth required pam_faildelay.so delay=2000000
auth required pam_faillock.so preauth audit silent deny=5 unlock_time=900 {include if "with-faillock"}
auth [success=1 default=ignore] pam_succeed_if.so service notin login:gdm:xdm:kdm:xscreensaver:gnome-screensaver:kscreensaver quiet use_uid {include if "with-smartcard-required"}
auth [success=done ignore=ignore default=die] pam_sss.so require_cert_auth ignore_authinfo_unavail {include if "with-smartcard-required"}
auth sufficient pam_fprintd.so {include if "with-fingerprint"}
auth sufficient pam_u2f.so cue {include if "with-pam-u2f"}
auth required pam_u2f.so cue nouserok {include if "with-pam-u2f-2fa"}
auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
auth [default=1 ignore=ignore success=ok] pam_localuser.so {exclude if "with-smartcard"}
auth [default=2 ignore=ignore success=ok] pam_localuser.so {include if "with-smartcard"}
auth [success=done authinfo_unavail=ignore ignore=ignore default=die] pam_sss.so try_cert_auth {include if "with-smartcard"}
auth sufficient pam_unix.so {if not "without-nullok":nullok} try_first_pass
auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
auth sufficient pam_sss.so forward_pass
auth required pam_faillock.so authfail audit deny=5 unlock_time=900 fail_interval=900 {include if "with-faillock"}
auth required pam_deny.so
account required pam_access.so {include if "with-pamaccess"}
account required pam_faillock.so {include if "with-faillock"}
account required pam_unix.so
account sufficient pam_localuser.so {exclude if "with-files-access-provider"}
account sufficient pam_usertype.so issystem
account [default=bad success=ok user_unknown=ignore] pam_sss.so
account required pam_permit.so
password requisite pam_pwquality.so try_first_pass local_users_only minlen=14 dcredit=-1 lcredit=-1 ucredit=-1 ocredit=-1 retry=3
password requisite pam_pwhistory.so use_authok remember=5
password sufficient pam_unix.so sha512 shadow {if not "without-nullok":nullok} try_first_pass use_authtok
password sufficient pam_sss.so use_authtok
password required pam_deny.so
session optional pam_keyinit.so revoke
session required pam_limits.so
-session optional pam_systemd.so
session optional pam_oddjob_mkhomedir.so umask=0077 {include if "with-mkhomedir"}
session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
session required pam_unix.so
session optional pam_sss.so

View File

@ -0,0 +1 @@
RedHat-8-system-auth

View File

@ -0,0 +1 @@
RedHat-7-system-auth-ac

View File

@ -0,0 +1,34 @@
#%PAM-1.0
# This file is auto-generated.
# User changes will be destroyed the next time authconfig is run.
auth required pam_env.so
auth required pam_faildelay.so delay=2000000
auth required pam_faillock.so preauth audit silent deny=5 unlock_time=900
auth [default=1 success=ok] pam_localuser.so
auth [success=done ignore=ignore default=bad] pam_unix.so nullok try_first_pass
auth requisite pam_succeed_if.so uid >= 1000 quiet_success
auth sufficient pam_sss.so forward_pass
auth [default=die] pam_faillock.so authfail audit deny=5 unlock_time=900
auth required pam_deny.so
account required pam_faillock.so
account required pam_unix.so
account sufficient pam_localuser.so
account sufficient pam_succeed_if.so uid < 1000 quiet
account [default=bad success=ok user_unknown=ignore] pam_sss.so
account required pam_permit.so
password requisite pam_pwquality.so try_first_pass minlen=14 dcredit=-1 lcredit=-1 ucredit=-1 ocredit=-1 local_users_only retry=3
password requisite pam_pwhistory.so use_authok remember=5
password sufficient pam_unix.so sha512 shadow try_first_pass use_authtok
password sufficient pam_sss.so use_authtok
password required pam_deny.so
session optional pam_keyinit.so revoke
session required pam_limits.so
-session optional pam_systemd.so
session optional pam_oddjob_mkhomedir.so umask=0077
session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
session required pam_unix.so
session optional pam_sss.so

3
files/etc/rockybanner Normal file
View File

@ -0,0 +1,3 @@
This is a Rocky Linux system
All access is logged and monitored. Unauthorized access is prohibited.

2
files/etc/sudoers.d/cis Normal file
View File

@ -0,0 +1,2 @@
Defaults use_pty
Defaults logfile="/var/log/sudo.log"

View File

@ -0,0 +1,16 @@
[Unit]
Description=noggin
After=network-online.target
Wants=network-online.target
[Service]
Environment=FLASK_APP=/opt/noggin/noggin/noggin/app.py
Environment=NOGGIN_CONFIG_PATH=/opt/noggin/noggin.cfg
Environment=FLASK_DEBUG=1
User=noggin
WorkingDirectory=/opt/noggin/noggin
ExecStart=/bin/bash /opt/noggin/start_noggin.sh
PrivateTmp=true
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,185 @@
#!/usr/bin/perl -w
# Louis Abel <tucklesepk@gmail.com>
use strict;
# Check for version of dmidecode
my $dmi_test = `dmidecode -q 2>/dev/null; echo \$?`;
chomp($dmi_test);
our $dmi_ver = "rhel8";
our $dmidecode_cmd = "dmidecode -q";
if( $dmi_test eq "1" ) { $dmi_ver = "rhel4"; $dmidecode_cmd = "dmidecode"; }
# Figure out number of cores per cpu
my $c_cpuinfo = `grep -c processor /proc/cpuinfo`;
chomp($c_cpuinfo);
my $c_dmidecode = `$dmidecode_cmd | grep -c 'Processor Information'`;
chomp($c_dmidecode);
# Figure out hyperthreaded cores
my $htt;
my $lscpu_test = `lscpu 2>/dev/null; echo \$?`;
chomp($lscpu_test);
if( $lscpu_test eq "127" ) {
$htt = "Cannot Detect Threads";
} else {
$htt = `lscpu | awk -F':' '/Thread/ {print \$2}'`;
chomp($htt);
}
$htt =~ s/^\s+|\s+$//g;
my $cores;
if( $c_cpuinfo eq $c_dmidecode ) {
$cores = "single core";
} elsif ( $c_cpuinfo > $c_dmidecode ) {
my $num_cores = $c_cpuinfo / $c_dmidecode / $htt;
$cores = "$num_cores cores";
} else {
$cores = "failed to determine number of cores";
}
# Parse dmidecode output
our %manufacturer;
our %cpu;
our %memory;
our %network;
open( FH, "$dmidecode_cmd |") or die "Couldn't run $dmidecode_cmd: $!\n\n";
my ($section, $dim, $dim_size);
my $dims_used = 0;
my $dims_total = 0;
my $eths_section = 0;
my $eths_total = 0;
while( my $line = <FH> ) {
chomp($line);
# Store section information
if( $line =~ /^\S+/ ) { $section = $line; }
# Print Bios Information
if( $section eq "BIOS Information" || $section =~ /Handle 0x0000/ ) {
if( $line =~ /^\s+Version:\s+(.+)\s*$/ ) { $manufacturer{bios} = $1; }
}
# Print System Information
if( $section eq "System Information" || $section =~ /Handle 0x0100/ ) {
if( $line =~ /^\s+Manufacturer:\s+(.+)\s*$/ ) { if( $1 =~ /Dell Computer Corporation/ ) { $manufacturer{make} = "Dell Inc."; } else { $manufacturer{make} = $1; } }
if( $line =~ /^\s+Product Name:\s+(.+)\s*$/ ) { my $tmp = $1; $tmp =~ s/\s+$//g; $manufacturer{model} = $tmp; }
if( $line =~ /^\s+Serial Number:\s+(.+)\s*$/ ) { $manufacturer{serial} = $1; }
}
# Print Chassis Information
if( $section eq "Chassis Information" || $section =~ /Handle 0x0300/ ) {
if( $line =~ /^\s+Type:\s+(.+)\s*$/ ) { $manufacturer{chassis_type} = $1; }
if( $line =~ /^\s+Height:\s+(.+)\s*$/ ) { $manufacturer{chassis_height} = $1; }
}
# Print Processor Information
if( $section eq "Processor Information" || $section =~ /Handle 0x040/ ) {
if( $line =~ /^\s+Version:\s+(.+)\s*$/ ) {
my $cpu_model = $1;
if( $cpu_model =~ /Not Specified/ ) {
$cpu_model = `cat /proc/cpuinfo | grep 'model name' | awk -F: {'print \$2'} | head -n 1`;
chomp( $cpu_model );
$cpu_model =~ s/^\s*//g;
}
$cpu_model =~ s/\s+/ /g;
$cpu{physical} = $c_dmidecode;
$cpu{virtual} = $c_cpuinfo;
$cpu{model} = "$cpu_model ($cores) (Threads: $htt)";
}
if( $line =~ /^\s+Speed:\s+(.+)\s*$/ ) { $cpu{speed} = $1; }
}
# Print Physical Memory Array
if( $section eq "Physical Memory Array" || $section =~ /Handle 0x1000/ ) {
if( $line =~ /^\s+Error Correction Type:\s+(.+)\s*$/ ) { $memory{error} = $1; }
if( $line =~ /^\s+Maximum Capacity:\s+(.+)\s*$/ ) { $memory{max} = $1; }
if( $line =~ /^\s+Number Of Devices:\s+(.+)\s*$/ ) { $memory{count} = $1; }
}
# Print Memory Device
if( $section eq "Memory Device" || $section =~ /Handle 0x110/ ) {
if( $line =~ /^\s+Locator:\s+(.+)\s*$/ ) { $dim = $1; $dim =~ s/\s+//g; $dims_total++}
if( $line =~ /^\s+Size:\s+(.+)\s*$/ ) { $dim_size = $1; }
if( $line =~ /^\s+Speed:\s+(.+)\s*$/ ) { next if( $dim_size =~ /No Module Installed/ ); $memory{$dims_total}{location} = $dim; $memory{$dims_total}{size} = $dim_size; $memory{$dims_total}{speed} = $1; $dims_used++; }
if( $line =~ /^\s+Type:\s+(.+)\s*$/ ) { $memory{type} = $1; }
}
# Print Ethernet Devices
$network{total} = 0;
if( $section =~ /^On Board Device/ || $section =~ /Handle 0x0A00/ || $section =~ /^Onboard Device/ ) {
if( $line =~ /^\s+Type:\s+Ethernet\s*$/ ) { $eths_section = 1; $eths_total++; $network{total} = $eths_total; }
next if( $eths_section == 0 );
if( $line =~ /^\s+Status:\s+(.+)\s*$/ ) { $network{$eths_total}{status} = $1; }
if( $line =~ /^\s+Description:\s+(.+)\s*$/ ) { $network{$eths_total}{desc} = $1; }
}
}
close(FH);
# Clean up missing data
$manufacturer{chassis_height} = "<UNKNOWN>" unless( defined($manufacturer{chassis_height}) );
$memory{used} = $dims_total;
#Print Data
print "Make: $manufacturer{make}\n";
print "Model: $manufacturer{model}\n";
print "Serial: $manufacturer{serial}\n";
print "Bios Rev: $manufacturer{bios}\n";
print "Chassis Type: $manufacturer{chassis_type}\n";
print "Chassis Height: $manufacturer{chassis_height}\n";
print "$cpu{physical} x $cpu{model}\n";
print_memory_info();
print_network_info();
#### Functions ####
sub print_memory_info {
my ($maxsize, $max_unit) = $memory{max} =~ /^\s*(\d+)\s*(\w+)\s*$/;
my $dim_count = $memory{count};
my $max_per_dim = $maxsize / $dim_count;
my $size_error = "";
my $speed_error = "";
my $common_size;
my $common_speed;
for( my $i = 1; $i < $dims_used + 1; $i++ ) {
my $size = $memory{$i}{size} || 0;
my $speed = $memory{$i}{speed} || 0;
if( defined($common_size) && $common_size ne $size ) { $size_error = 1; }
else { $common_size = $size; }
if( defined($common_speed) && $common_speed ne $speed ) { $speed_error = 2; }
else { $common_speed = $speed; }
}
my ($mem_size, $mem_unit) = $common_size =~ /^\s*(\d+)\s*(\w+)\s*$/;
my $total_mem_unit = "MB";
if( $mem_unit eq "MB" ) { $total_mem_unit = "GB"; }
my $mem_total = ($mem_size * $dims_used) * 1024 ;
if( $common_size =~ /(\d+\.\d{2})\d+/ ) { $common_size = $1; }
if( $mem_size >= 1024 ) { my $gb_size = $mem_size / 1024; $common_size = "$gb_size GB"; }
print "$common_size @ $common_speed x $dims_used = $mem_total $total_mem_unit";
if( $size_error || $speed_error ) { print " $size_error$speed_error"; }
print "\n";
if( $max_per_dim =~ /(\d+\.\d{2})\d+/ ) { $max_per_dim = $1; }
print "$max_per_dim $max_unit x $dim_count dims = $maxsize $max_unit maximum capacity\n";
print "$memory{type}\n$memory{error}\n";
}
sub print_network_info {
my $num_devices = $network{total};
for( my $i=1; $i < $num_devices + 1; $i++ ) {
print "$network{$i}{desc} [$network{$i}{status}]\n";
}
}

View File

@ -0,0 +1,51 @@
#!/bin/bash
# Borrowed from Fedora Infra for Rocky Linux
if [ $# -lt 2 ]; then
echo "Usage: $0 [name] [script]"
exit 1;
fi
NAME=$1
SCRIPT=$2
SILENT="no"
if [ $# -ge 3 -a "$3" == "--silent" ]; then
SILENT="yes"
shift
fi
shift 2
LOCKDIR="/var/tmp/$NAME"
PIDFILE="$LOCKDIR/pid"
function cleanup {
rm -rf "$LOCKDIR"
}
RESTORE_UMASK=$(umask -p)
umask 0077
mkdir "$LOCKDIR" >& /dev/null
if [ $? != 0 ]; then
PID=$(cat "$PIDFILE")
if [ -n "$PID" ] && /bin/ps $PID > /dev/null
then
if [ "$SILENT" != "yes" ]; then
echo "$PID is still running"
/bin/ps -o user,pid,start,time,comm $PID
fi
exit 1;
else
echo "$LOCKDIR exists but $PID is dead"
echo "Removing lockdir and re-running"
/bin/rm -rf $LOCKDIR
mkdir $LOCKDIR || exit
fi
fi
trap cleanup EXIT SIGQUIT SIGHUP SIGTERM
echo $$ > "$PIDFILE"
$RESTORE_UMASK
eval "$SCRIPT $*"

49
handlers/main.yml Normal file
View File

@ -0,0 +1,49 @@
---
# Handlers
- name: restart_sshd
service:
name: sshd
state: restarted
- name: restart_httpd
service:
name: httpd
state: restarted
- name: restart_nginx
service:
name: nginx
state: restarted
- name: reload_networkmanager
service:
name: NetworkManager
state: reloaded
- name: regenerate_auditd_rules
command: /sbin/augenrules
- name: reload_chrony
systemd:
name: "{{ chrony_service_name }}"
state: restarted
listen: "chrony service restart"
- name: restart_gitlab
command: gitlab-ctl reconfigure
register: gitlab_restart
failed_when: gitlab_restart_handler_failed_when | bool
- name: restart_noggin
service:
name: noggin
state: restarted
- name: rehash_postfix_sasl
command: "postmap /etc/postfix/sasl_passwd"
- name: restart_postfix
service:
name: postfix
state: restarted
...

View File

@ -0,0 +1,35 @@
---
# Preps a system to be part of Account Services
- name: Configure Account Services
hosts: all
become: true
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are not able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Deploy Account Services
import_tasks: tasks/account_services.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,57 @@
---
- hosts: localhost
connection: local
vars:
force_purge: true
roles_installation_dir: roles/public
collection_installation_dir: collections
installation_prefix: ./
pre_tasks:
# example prepare ansible box for execution
# - name: install required pip modules on the host running ansible
# pip:
# name:
# - jmespath
# - netaddr
# - python-consul
# - pyvmomi
# - python-ldap
# - twine
- name: Remove existing public roles
file:
path: "{{ installation_prefix }}{{ roles_installation_dir }}"
state: absent
when: force_purge | bool
- name: Install all public roles
command: >
ansible-galaxy role install
{{ ( force_purge | bool ) | ternary('--force','') }}
--role-file {{ installation_prefix }}roles/requirements.yml
--roles-path {{ installation_prefix }}{{ roles_installation_dir }}
register: galaxy_install_role
changed_when: '"Installing " in galaxy_install_role.stdout'
- name: Install needed collections
command: >
ansible-galaxy collection install
{{ ( force_purge | bool ) | ternary('--force-with-deps','') }}
-r {{ installation_prefix }}collections/requirements.yml
-p {{ installation_prefix }}{{ collection_installation_dir }}
register: galaxy_install_collection
changed_when: '"Installing " in galaxy_install_collection.stdout'
- name: cleanup old ssh known_hosts - remove
file:
path: "../tmp/known_hosts"
state: absent
mode: "0644"
- name: cleanup old ssh known_hosts - blank
file:
path: "../tmp/known_hosts"
state: touch
mode: "0644"
...

58
init-rocky-bugzilla.yml Normal file
View File

@ -0,0 +1,58 @@
---
# Installs Bugzilla
- name: Configure Bugzilla
hosts: all
become: true
vars_files:
- vars/common.yml
- vars/bugzilla.yml
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are not able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
roles:
- role: rockylinux.ipagetcert
state: present
tasks:
- name: Deploy Mantis
import_tasks: tasks/bugzilla.yml
post_tasks:
- name: Open firewalld ports
ansible.posix.firewalld:
service: "{{ item }}"
permanent: true
immediate: true
state: enabled
with_items:
- http
- https
- name: Ensure httpd is enabled and running
service:
name: httpd
enabled: true
state: started
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

41
init-rocky-chrony.yml Normal file
View File

@ -0,0 +1,41 @@
---
# Sets Up Chrony Server/Client
# Created: @derekmpage
# Kudos: @danielkubat @Darkbat91
# Fixes: @nazunalika
- name: Rocky Chrony Runbook
hosts: all
become: true
vars_files:
- vars/chrony.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Configure Chrony
import_tasks: tasks/chrony.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,60 @@
---
# Creates a standalone KVM hosts
# Created: @SherifNagy
# Modified to current standards: @nazunalika
- name: Configure KVM host
hosts: kvm
become: true
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Check for CPU Virtualization
shell: "set -o pipefail; lscpu | grep -i virtualization"
register: result
changed_when: false
failed_when: "result.rc != 0"
# Install KVM packages
- name: Installing KVM Packages
package:
name:
- qemu-kvm
- libvirt
- libvirt-python
- libguestfs-tools
- virt-install
state: present
- name: Enable and Start libvirtd
systemd:
name: libvirtd
state: started
enabled: true
- name: Verify KVM module is loaded
shell: "set -o pipefail; lsmod | grep -i kvm"
register: result
changed_when: false
failed_when: "result.rc != 0"
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

59
init-rocky-mantisbt.yml Normal file
View File

@ -0,0 +1,59 @@
---
# Installs the mantis bug tracker
# This requries information from the vault
- name: Configure MantisBT
hosts: all
become: true
vars_files:
- vars/common.yml
- vars/mantis.yml
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are not able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
roles:
- role: rockylinux.ipagetcert
state: present
tasks:
- name: Deploy Mantis
import_tasks: tasks/mantis.yml
post_tasks:
- name: Open firewalld ports
ansible.posix.firewalld:
service: "{{ item }}"
permanent: true
immediate: true
state: enabled
with_items:
- http
- https
- name: Ensure httpd is enabled and running
service:
name: httpd
enabled: true
state: started
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,41 @@
---
# (Re)deploys the noggin theme
- name: Deploy Noggin Theme
hosts: all
become: true
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are not able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Deploy Rocky Noggin Theme
git:
repo: https://github.com/rocky-linux/noggin-theme.git
dest: /opt/noggin/noggin/noggin/themes/rocky
update: true
version: main
become_user: noggin
notify: restart_noggin
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

35
init-rocky-noggin.yml Normal file
View File

@ -0,0 +1,35 @@
---
# (Re)deploys the noggin theme
- name: Deploy Noggin
hosts: all
become: true
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are not able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Deploy Noggin
import_tasks: "tasks/noggin.yml"
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,53 @@
# Sets up local OpenQA testing environment
# This playbook is *NOT* intended for WAN-facing systems!
#
# Usages:
# # Install and configure an openQA developer host, download all current Rocky ISOs,
# # and POST a test job
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml
#
# # Only perform ISO download tasks
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=download_isos
#
# # Only perform configuration, do not download ISOs or POST a job
# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=configure
#
# Created: @akatch
---
- name: Rocky OpenQA Runbook
hosts: localhost
connection: local
become: true
vars_files:
- vars/openqa.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Install and configure OpenQA
import_tasks: tasks/openqa.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,37 @@
---
# Configures postfix on a system to relay mail
# NOTE: smtp vars will be in vaults - originally they were available directly
# on the ansible host. This was never a viable and secure option.
- name: Configure Postfix Relay
hosts: all
become: true
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are not able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Deploy Postfix Relay
import_tasks: tasks/postfix_relay.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,35 @@
---
# Preps a system to be a repository
- name: Configure repository system
hosts: all
become: true
handlers:
- import_tasks: handers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are not able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Configure repository system
import_tasks: tasks/repository.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,57 @@
---
# Basic system configuration. All hardening should also be imported here.
# Use --extra-vars="host=..." and specify a hostname in the inventory or
# provide an ansible host group name. You can also just use "all" if you
# want to ensure all systems are up to date on the configuration.
- name: Configure system
hosts: all
become: true
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: Loading Variables from OS Common
import_tasks: tasks/variable_loader_common.yml
- name: Configure SSH
import_tasks: tasks/ssh_config.yml
- name: Configure harden settings
import_tasks: tasks/harden.yml
- name: Configure PAM
import_tasks: tasks/authentication.yml
- name: Configure auditd
import_tasks: tasks/auditd.yml
- name: Configure grub
import_tasks: tasks/grub.yml
- name: Configure common scripts
import_tasks: tasks/scripts.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

76
local-ansible.cfg Normal file
View File

@ -0,0 +1,76 @@
[defaults]
########################################
# Display settings
########################################
# Output display
force_color = 1
nocows = True
# Note: http://docs.ansible.com/ansible/intro_configuration.html#ansible-managed
ansible_managed = Ansible managed
#ansible_managed = Ansible managed - {file} on {host}
# Warn when ansible think it is better to use module.
# Note: http://docs.ansible.com/ansible/intro_configuration.html#id88
command_warnings = True
# Enable this to debug tasks calls
display_args_to_stdout = False
display_skipped_hosts = false
########################################
# Playbook settings
########################################
# Default strategy
strategy = free
# Number of hosts processed in parallel
forks = 20
########################################
# Behaviour settings
########################################
# Make role variables private
retry_files_enabled = True
# Fact options
gathering = smart
#gathering = !all
#gathering = smart,network,hardware,virtual,ohai,facter
#gathering = network,!hardware,virtual,!ohai,!facter
# facts caching
#fact_caching_connection = tmp/facts_cache
#fact_caching = json
fact_caching = memory
fact_caching_timeout = 1800
# Enable or disable logs
# Note put to false in prod
no_log = False
########################################
# Common destinations
########################################
log_path = tmp/ansible.log
known_hosts = tmp/known_hosts
roles_path = roles/local:roles/public
collections_paths = collections
########################################
# SSH Configuration
########################################
[ssh_connection]
# Disable GSSAPI, which slows down SSH connections for ansible
ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o GSSAPIAuthentication=no

32
mantis.yml Normal file
View File

@ -0,0 +1,32 @@
---
# mantis vars
mantis_version: 2.25.0
mantis_checksum: "sha256:d8973d3677ecb2ccbfee95e2267b3128049fbdcc59aa1f007686a342d93a4c0a"
mantis_pkg:
- php
- php-ldap
- httpd
- mod_ssl
- php-pgsql
- php-mbstring
- php-curl
- openldap
- php-json
mantis_db_host: db.rockylinux.org
mantis_db_name: mantisdb
mantis_db_user: mantis
mantis_binder_user: "{{ rocky_ldap_bind_dn }}"
mantis_binder_pass: "{{ rocky_ldap_bind_pw }}"
# Vault
# mantis_db_pass: ThisIsNotThePassword!
ipa_getcert_requested_hostnames:
- name: "{{ ansible_fqdn }}"
owner: apache
key_location: "/etc/pki/tls/private/bugs.rockylinux.org.key"
cert_location: "/etc/pki/tls/certs/bugs.rockylinux.org.crt"
postcmd: "/bin/systemctl reload httpd"
cnames:
- "bugs.rockylinux.org"
...

View File

@ -0,0 +1,41 @@
---
# Manage bootstrap hosts
#
- name: Manage and configure bootstrap hosts
hosts: all
become: true
vars_files:
- vars/mounts/bootstrap_staging.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- include_tasks: tasks/efs_mount.yml
loop: "{{ mounts }}"
- include_tasks: tasks/srpmproc.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

67
role-rocky-graylog.yml Normal file
View File

@ -0,0 +1,67 @@
---
# Configure and setup graylog
# Reccommended specs
# CPU: 2 cores
# Memory: 4GB
# Storage: Yes
- name: Install Graylog
hosts: all
become: true
vars_files:
# Vaults required
# vars/vaults/encpass.yml
# vars/vaults/hostman.yml
- vars/graylog.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
- name: Install SELinux packages
package:
name: python3-policycoreutils.noarch
state: present
- name: "Creating DNS Record for ord-prod-graylog.rockylinux.org"
freeipa.ansible_freeipa.ipadnsrecord:
ipaadmin_principal: "{{ ipa_admin|default('admin') }}"
ipaadmin_password: "{{ ipaadmin_password }}"
zone_name: "{{ graylog_ipa_dnsrecord_zone_name }}"
name: "{{ graylog_ipa_dnsrecord_name }}"
record_type: "{{ graylog_ipa_dnsrecord_record_type }}"
record_value: "{{ graylog_ipa_dnsrecord_record_value }}"
state: "{{ graylog_ipa_dnsrecord_state }}"
roles:
- role: rockylinux.ipagetcert
state: present
post_tasks:
- name: Open firewalld ports
ansible.posix.firewalld:
port: "{{ item.port }}"
permanent: "{{ item.permanent }}"
state: "{{ item.state }}"
loop: "{{ graylog_server_firewall_rules }}"
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...

View File

@ -0,0 +1,72 @@
---
# MirrorManager2
- name: Install and configure mirrormanager
hosts: all
become: false
vars_files:
# This playbook requires vaults!
# vars/vaults/hostman.yml
# vars/vaults/mirrormanager.yml
- vars/mounts/mirrormanager.yml
- vars/mirrormanager.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
- name: Install git
become: true
package:
name: git
state: present
- name: Install SELinux packages
become: true
package:
name: python3-policycoreutils.noarch
state: present
tasks:
#- include_tasks: tasks/mirrormanager.yml
- include_tasks: tasks/efs_mount.yml
loop: "{{ mounts }}"
tags: ["mounts"]
roles:
- role: rockylinux.ipagetcert
become: true
state: present
tags: ['certs']
post_tasks:
- name: Open firewalld ports
become: true
ansible.posix.firewalld:
port: "{{ item.port }}"
permanent: "{{ item.permanent }}"
state: "{{ item.state }}"
immediate: yes
loop: "{{ firewall_rules }}"
- name: Touching run file that ansible has ran here
become: true
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root
...