Attempt Sync

This is attempting a sync of some stuff from our ansible server. Some
data is either missing or obfuscated.
This commit is contained in:
nazunalika 2021-07-11 21:16:19 -07:00
parent d9c5608349
commit f118ec9571
Signed by: label
GPG Key ID: 6735C0E1BD65D048
13 changed files with 412 additions and 24 deletions

View File

@ -188,6 +188,15 @@ role-rocky-ipa-client.yml
init-rocky-system-config.yml init-rocky-system-config.yml
``` ```
### Initializing a base system
```
# All clients should be listed under [ipaclients]
role-rocky-ipa-client.yml
# All systems should be hardened
init-rocky-system-config.yml
```
## Current Set ## Current Set
``` ```

View File

@ -5,7 +5,7 @@
ipaadmin_password: "{{ ipaadmin_password }}" ipaadmin_password: "{{ ipaadmin_password }}"
name: "{{ item.group }}" name: "{{ item.group }}"
minlife: "{{ item.minlife | default(0) }}" minlife: "{{ item.minlife | default(0) }}"
maxlife: "{{ item.maxlife | default(84) }}" maxlife: "{{ item.maxlife | default(0) }}"
history: "{{ item.history | default(5) }}" history: "{{ item.history | default(5) }}"
priority: "{{ item.priority | default(1) }}" priority: "{{ item.priority | default(1) }}"
lockouttime: "{{ item.lockout | default(300) }}" lockouttime: "{{ item.lockout | default(300) }}"

View File

@ -0,0 +1,40 @@
---
# Manage bootstrap hosts
#
- name: Manage and configure bootstrap hosts
hosts: bootstrap_staging
become: true
vars_files:
- vars/mounts/bootstrap_staging.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- include_tasks: tasks/efs_mount.yml
loop: "{{ mounts }}"
- include_tasks: tasks/srpmproc.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root

View File

@ -37,8 +37,8 @@
state: present state: present
roles: roles:
- role: rockylinux.ipagetcert #- role: rockylinux.ipagetcert
state: present # state: present
- role: cloudalchemy.prometheus - role: cloudalchemy.prometheus
state: present state: present
- role: cloudalchemy.alertmanager - role: cloudalchemy.alertmanager
@ -61,24 +61,3 @@
mode: '0644' mode: '0644'
owner: root owner: root
group: root group: root
- name: Install Prometheus Node Exporter
hosts: all
become: true
pre_tasks:
- name: Install SELinux packages
package:
name: python3-policycoreutils.noarch
state: present
roles:
- role: cloudalchemy.node-exporter
state: present
post_tasks:
- name: Open firewall for node-exporter
ansible.posix.firewalld:
port: 9100/tcp
permanent: true
state: enabled

View File

@ -0,0 +1,66 @@
---
# pinnwand
- name: Install pinnwand
hosts: pinnwand
become: true
vars_files:
- vars/vaults/hostman.yml
- vars/vaults/pinnwand.yml
- vars/pinnwand.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
- name: Install SELinux packages
package:
name: python3-policycoreutils.noarch
state: present
tasks:
#- include_tasks: tasks/pinnwand.yml
# tags: ['includetasks']
roles:
- role: rockylinux.ipagetcert
state: present
tags: ['certs']
- role: rockylinux.pinnwand
state: present
tags: ['role_pinnwand']
# Define variables in vars/matomo/nginx.yml
- role: nginxinc.nginx_core.nginx
tags: ['nginx']
#- role: nginxinc.nginx_core.nginx_config
# tags: ['nginx']
post_tasks:
- name: Open firewalld ports
ansible.posix.firewalld:
port: "{{ item.port }}"
permanent: "{{ item.permanent | default(yes) }}"
state: "{{ item.state | default(present) }}"
loop: "{{ firewall_rules }}"
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root

View File

@ -0,0 +1,41 @@
---
# Configures an instance to function as a HTTP serving member of repopool
- name: Configure Repo Pool hosts
hosts: repopool
become: true
vars_files:
- vars/vaults/encpass.yml
- vars/common.yml
- vars/mounts/repopool.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- name: "Setup shared filesystem mount"
include_tasks: tasks/efs_mount.yml
with_items: "{{ mounts }}"
tags: ["koji_efs_mount"]
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root

View File

@ -0,0 +1,40 @@
---
# Manage srpmproc
#
- name: Manage and configure srpmproc
hosts: srpmproc
become: true
vars_files:
- vars/mounts/srpmproc.yml
# This is to try to avoid the handler issue in pre/post tasks
handlers:
- import_tasks: handlers/main.yml
pre_tasks:
- name: Check if ansible cannot be run here
stat:
path: /etc/no-ansible
register: no_ansible
- name: Verify if we can run ansible
assert:
that:
- "not no_ansible.stat.exists"
success_msg: "We are able to run on this node"
fail_msg: "/etc/no-ansible exists - skipping run on this node"
tasks:
- include_tasks: tasks/efs_mount.yml
loop: "{{ mounts }}"
- include_tasks: tasks/srpmproc.yml
post_tasks:
- name: Touching run file that ansible has ran here
file:
path: /var/log/ansible.run
state: touch
mode: '0644'
owner: root
group: root

View File

@ -0,0 +1,46 @@
---
# Requires amazon-efs-utils; included, but should probably be split out?
#
- name: "Installing amazon-efs-utils"
become: yes
become_user: root
yum:
name: 'https://git.rockylinux.org/neil/efs-utils/-/jobs/5/artifacts/raw/build/amazon-efs-utils-1.30.1-1.el8.noarch.rpm?inline=false'
disable_gpg_check: yes
validate_certs: yes
state: present
tags:
- amazon_efs_utils
- packages
- mounts
- name: "Gathering ec2 facts"
amazon.aws.ec2_metadata_facts:
tags:
- mounts
# "you can use /etc/hosts" https://github.com/aws/efs-utils/issues/1
- name: "Install custom hosts file because fmlC-w amazon said so."
become: yes
become_user: root
ansible.builtin.lineinfile:
path: /etc/hosts
line: "{{ item.ip_map[ansible_ec2_placement_availability_zone] }} {{ item.fsid }}.efs.{{ ansible_ec2_placement_region }}.amazonaws.com"
create: yes
tags:
- mounts
- name: "Creating and mounting {{ item.fsid }} at {{ item.mount_point }}"
become: yes
become_user: root
ansible.posix.mount:
path: "{{ item.mount_point }}"
src: "{{ item.fsid }}:/"
fstype: "{{ item.fstype }}"
opts: "{{ item.fsopts | join(',') }}"
state: "{{ item.state | default('mounted') }}"
tags:
- mounts

View File

@ -0,0 +1,9 @@
---
- name: Configure SELinux booleans
ansible.posix.seboolean:
name: "{{ item }}"
persistent: true
state: true
with_items:
- httpd_can_network_connect_db
- httpd_can_network_connect

View File

@ -0,0 +1,18 @@
---
x-efs_fs_opts_common: &common_fs_opts
fstype: efs
fsopts:
- _netdev
- tls
- iam
- rw
mounts:
- name: prod-build-compose
<<: *common_fs_opts
fsid: fs-XXXXXXXX
mount_point: /mnt/compose
ip_map:
us-east-2a: 10.100.100.250
us-east-2b: 10.100.101.250
us-east-2c: 10.100.102.250

View File

@ -0,0 +1,26 @@
---
x-efs_fs_opts_common: &common_fs_opts
fstype: efs
fsopts:
- _netdev
- tls
- iam
- rw
mounts:
- name: prod-build-repos-staging
<<: *common_fs_opts
fsid: fs-XXXXXXXX
mount_point: /mnt/repos-staging
ip_map:
us-east-2a: 10.101.100.249
us-east-2b: 10.101.101.249
us-east-2c: 10.101.102.249
- name: prod-build-repos-production
<<: *common_fs_opts
fsid: fs-YYYYYYYY
mount_point: /mnt/repos-production
ip_map:
us-east-2a: 10.101.100.246
us-east-2b: 10.101.101.246
us-east-2c: 10.101.102.246

View File

@ -0,0 +1,50 @@
---
x-efs_fs_opts_common: &common_fs_opts
fstype: efs
fsopts:
- _netdev
- tls
- iam
- rw
mounts:
- name: prod-build-repos-internal
<<: *common_fs_opts
fsid: fs-XXXXXXX1
mount_point: /mnt/repos-internal
ip_map:
us-east-2a: 10.101.100.248
us-east-2b: 10.101.101.248
us-east-2c: 10.101.102.248
- name: prod-koji
<<: *common_fs_opts
fsid: fs-XXXXXXX2
mount_point: /mnt/koji
ip_map:
us-east-2a: 10.101.100.247
us-east-2b: 10.101.101.247
us-east-2c: 10.101.102.247
- name: prod-build-compose
<<: *common_fs_opts
fsid: fs-XXXXXXX3
mount_point: /mnt/compose
ip_map:
us-east-2a: 10.101.100.250
us-east-2b: 10.101.101.250
us-east-2c: 10.101.102.250
- name: prod-build-repos-staging
<<: *common_fs_opts
fsid: fs-XXXXXXX4
mount_point: /mnt/repos-staging
ip_map:
us-east-2a: 10.101.100.249
us-east-2b: 10.101.101.249
us-east-2c: 10.101.102.249
- name: prod-build-repos-production
<<: *common_fs_opts
fsid: fs-XXXXXXX5
mount_point: /mnt/repos-production
ip_map:
us-east-2a: 10.101.100.246
us-east-2b: 10.101.101.246
us-east-2c: 10.101.102.246

View File

@ -0,0 +1,64 @@
---
# pinnwand
firewall_rules:
- port: 443/tcp
permanent: true
state: enabled
- port: 9100/tcp
permanent: true
state: enabled
tls_ca_cert: "/etc/pki/tls/certs/ca-bundle.crt"
tls_cert: "/etc/pki/tls/certs/{{ ansible_fqdn }}.crt"
tls_key: "/etc/pki/tls/private/{{ ansible_fqdn }}.key"
ipa_getcert_requested_hostnames:
- name: "{{ ansible_fqdn }}"
owner: nginx
key_location: "{{ tls_key }}"
cert_location: "{{ tls_cert }}"
postcmd: "/bin/systemctl reload nginx"
pinnwand_config:
database:
scheme: postgresql
username: pinnwand
password: "{{ _pinnwand_db_rw_pass }}"
hostname: "db.rockylinux.org"
port: 5432
database: pinnwand_db
paste_size: 10485760
preferred_lexers: []
logo_path: /opt/pinnwand/logo.png
page_path: /tmp
page_list:
- about
- removal
- expiry
footer: ''
paste_help: ''
report_email: 'abuse@rockylinux.org'
expiries:
- name: 1hour
time: 3600
- name: 1day
time: 86400
- name: 1week
time: 604800
- name: forever
time: 4294967294
ratelimits:
- name: read
capacity: 100
consume: 1
refill: 2
- name: create
capacity: 2
consume: 2
refill: 1
- name: delete
capacity: 2
consume: 2
refill: 1
spamscore: 50