From 699ec2e2f08b1aaca16d0c7291983bb835aa929a Mon Sep 17 00:00:00 2001 From: nazunalika Date: Sat, 26 Feb 2022 20:19:20 -0700 Subject: [PATCH] init --- .ansible-lint | 6 + .pre-commit-config.yaml | 33 +++ .yamllint | 7 + README.md | 37 +++ adhoc-facts-refresh.yml | 8 + adhoc-rabbitmqqueue.yml | 87 +++++++ adhoc-rabbitmquser.yml | 35 +++ collections/README.md | 14 ++ collections/requirements.yml | 18 ++ defaults/main.yml | 2 + files/README.md | 1 + .../custom/sssd-rocky/CentOS-8-system-auth | 1 + .../custom/sssd-rocky/RedHat-8-system-auth | 40 ++++ .../custom/sssd-rocky/Rocky-8-system-auth | 1 + files/etc/pam.d/CentOS-7-system-auth-ac | 1 + files/etc/pam.d/RedHat-7-system-auth-ac | 34 +++ files/etc/rockybanner | 3 + files/etc/sudoers.d/cis | 2 + files/etc/systemd/system/noggin.service | 16 ++ files/usr/local/bin/dmidecode-pretty | 185 +++++++++++++++ files/usr/local/bin/lock-wrapper | 51 ++++ handlers/main.yml | 49 ++++ init-rocky-account-services.yml | 35 +++ init-rocky-ansible-host.yml | 57 +++++ init-rocky-bugzilla.yml | 58 +++++ init-rocky-chrony.yml | 41 ++++ init-rocky-install-kvm-hosts.yml | 60 +++++ init-rocky-mantisbt.yml | 59 +++++ init-rocky-noggin-theme.yml | 41 ++++ init-rocky-noggin.yml | 35 +++ init-rocky-openqa-developer-host.yml | 53 +++++ init-rocky-postfix-relay.yml | 37 +++ init-rocky-repo-servers.yml | 35 +++ init-rocky-system-config.yml | 57 +++++ local-ansible.cfg | 76 ++++++ mantis.yml | 32 +++ role-rocky-bootstrap_staging.yml | 41 ++++ role-rocky-graylog.yml | 67 ++++++ role-rocky-mirrormanager.yml | 72 ++++++ role-rocky-monitoring.yml | 64 ++++++ role-rocky-mqtt.yml | 62 +++++ role-rocky-node_exporter.yml | 22 ++ role-rocky-pinnwand.yml | 67 ++++++ role-rocky-rabbitmq.yml | 78 +++++++ role-rocky-repopool.yml | 42 ++++ roles/README.md | 14 ++ roles/requirements.yml | 44 ++++ tasks/account_services.yml | 27 +++ tasks/auditd.yml | 36 +++ tasks/authentication.yml | 55 +++++ tasks/bugzilla.yml | 55 +++++ tasks/bugzilla_install.yml | 60 +++++ tasks/chrony.yml | 33 +++ tasks/efs_mount.yml | 45 ++++ tasks/grub.yml | 5 + tasks/harden.yml | 217 ++++++++++++++++++ tasks/main.yml | 4 + tasks/mantis.yml | 100 ++++++++ tasks/mantispatch.yml | 26 +++ tasks/mirrormanager.yml | 68 ++++++ tasks/noggin.yml | 89 +++++++ tasks/openqa.yml | 192 ++++++++++++++++ tasks/postfix_relay.yml | 38 +++ tasks/repository.yml | 3 + tasks/scripts.yml | 18 ++ tasks/srpmproc.yml | 10 + tasks/ssh_config.yml | 46 ++++ tasks/variable_loader_common.yml | 22 ++ templates/README.md | 1 + templates/etc/httpd/conf.d/bugzilla.conf.j2 | 37 +++ templates/etc/httpd/conf.d/mantis.conf.j2 | 33 +++ templates/etc/postfix/sasl_passwd.j2 | 1 + .../opt/mirrormanager/mirrormanager2.cfg.j2 | 169 ++++++++++++++ templates/tmp/mantis_import.sql.j2 | 1 + templates/var/www/bugzilla/answer | 11 + templates/var/www/bugzilla/localconfig.j2 | 19 ++ .../var/www/mantis/config/config_inc.php.j2 | 46 ++++ tests/README.md | 3 + tests/inventory | 1 + tests/test.yml | 5 + vars/CentOS.yml | 1 + vars/RedHat.yml | 162 +++++++++++++ vars/Rocky.yml | 1 + vars/bugzilla.yml | 53 +++++ vars/common.yml | 23 ++ vars/ipaserver.yml | 3 + vars/main.yml | 2 + vars/mantis.yml | 32 +++ vars/mirrormanager.yml | 117 ++++++++++ vars/mounts/bootstrap_staging.yml | 19 ++ vars/mounts/mirrormanager.yml | 27 +++ vars/mounts/repopool.yml | 27 +++ vars/mounts/srpmproc.yml | 51 ++++ vars/mqtt.yml | 6 + vars/openqa.yml | 77 +++++++ vars/pinnwand.yml | 65 ++++++ vars/rabbitmq.yml | 45 ++++ 97 files changed, 4067 insertions(+) create mode 100644 .ansible-lint create mode 100644 .pre-commit-config.yaml create mode 100644 .yamllint create mode 100644 README.md create mode 100644 adhoc-facts-refresh.yml create mode 100644 adhoc-rabbitmqqueue.yml create mode 100644 adhoc-rabbitmquser.yml create mode 100644 collections/README.md create mode 100644 collections/requirements.yml create mode 100644 defaults/main.yml create mode 100644 files/README.md create mode 120000 files/etc/authselect/custom/sssd-rocky/CentOS-8-system-auth create mode 100644 files/etc/authselect/custom/sssd-rocky/RedHat-8-system-auth create mode 120000 files/etc/authselect/custom/sssd-rocky/Rocky-8-system-auth create mode 120000 files/etc/pam.d/CentOS-7-system-auth-ac create mode 100644 files/etc/pam.d/RedHat-7-system-auth-ac create mode 100644 files/etc/rockybanner create mode 100644 files/etc/sudoers.d/cis create mode 100644 files/etc/systemd/system/noggin.service create mode 100644 files/usr/local/bin/dmidecode-pretty create mode 100644 files/usr/local/bin/lock-wrapper create mode 100644 handlers/main.yml create mode 100644 init-rocky-account-services.yml create mode 100644 init-rocky-ansible-host.yml create mode 100644 init-rocky-bugzilla.yml create mode 100644 init-rocky-chrony.yml create mode 100644 init-rocky-install-kvm-hosts.yml create mode 100644 init-rocky-mantisbt.yml create mode 100644 init-rocky-noggin-theme.yml create mode 100644 init-rocky-noggin.yml create mode 100644 init-rocky-openqa-developer-host.yml create mode 100644 init-rocky-postfix-relay.yml create mode 100644 init-rocky-repo-servers.yml create mode 100644 init-rocky-system-config.yml create mode 100644 local-ansible.cfg create mode 100644 mantis.yml create mode 100644 role-rocky-bootstrap_staging.yml create mode 100644 role-rocky-graylog.yml create mode 100644 role-rocky-mirrormanager.yml create mode 100644 role-rocky-monitoring.yml create mode 100644 role-rocky-mqtt.yml create mode 100644 role-rocky-node_exporter.yml create mode 100644 role-rocky-pinnwand.yml create mode 100644 role-rocky-rabbitmq.yml create mode 100644 role-rocky-repopool.yml create mode 100644 roles/README.md create mode 100644 roles/requirements.yml create mode 100644 tasks/account_services.yml create mode 100644 tasks/auditd.yml create mode 100644 tasks/authentication.yml create mode 100644 tasks/bugzilla.yml create mode 100644 tasks/bugzilla_install.yml create mode 100644 tasks/chrony.yml create mode 100644 tasks/efs_mount.yml create mode 100644 tasks/grub.yml create mode 100644 tasks/harden.yml create mode 100644 tasks/main.yml create mode 100644 tasks/mantis.yml create mode 100644 tasks/mantispatch.yml create mode 100644 tasks/mirrormanager.yml create mode 100644 tasks/noggin.yml create mode 100644 tasks/openqa.yml create mode 100644 tasks/postfix_relay.yml create mode 100644 tasks/repository.yml create mode 100644 tasks/scripts.yml create mode 100644 tasks/srpmproc.yml create mode 100644 tasks/ssh_config.yml create mode 100644 tasks/variable_loader_common.yml create mode 100644 templates/README.md create mode 100644 templates/etc/httpd/conf.d/bugzilla.conf.j2 create mode 100644 templates/etc/httpd/conf.d/mantis.conf.j2 create mode 100644 templates/etc/postfix/sasl_passwd.j2 create mode 100644 templates/opt/mirrormanager/mirrormanager2.cfg.j2 create mode 100644 templates/tmp/mantis_import.sql.j2 create mode 100644 templates/var/www/bugzilla/answer create mode 100644 templates/var/www/bugzilla/localconfig.j2 create mode 100644 templates/var/www/mantis/config/config_inc.php.j2 create mode 100644 tests/README.md create mode 100644 tests/inventory create mode 100644 tests/test.yml create mode 120000 vars/CentOS.yml create mode 100644 vars/RedHat.yml create mode 120000 vars/Rocky.yml create mode 100644 vars/bugzilla.yml create mode 100644 vars/common.yml create mode 100644 vars/ipaserver.yml create mode 100644 vars/main.yml create mode 100644 vars/mantis.yml create mode 100644 vars/mirrormanager.yml create mode 100644 vars/mounts/bootstrap_staging.yml create mode 100644 vars/mounts/mirrormanager.yml create mode 100644 vars/mounts/repopool.yml create mode 100644 vars/mounts/srpmproc.yml create mode 100644 vars/mqtt.yml create mode 100644 vars/openqa.yml create mode 100644 vars/pinnwand.yml create mode 100644 vars/rabbitmq.yml diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 0000000..2394b2a --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,6 @@ +# .ansible-lint +warn_list: + - '204' # Lines should be less than 160 characters + - '701' # meta/main.yml should contain relevant info +skip_list: + - '106' # Role name must match ^[a-z][a-z0-9_]+$ pattern diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..5f5065c --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,33 @@ +--- +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-added-large-files + - id: check-case-conflict + - id: check-executables-have-shebangs + - id: check-json + - id: pretty-format-json + - id: detect-private-key + + - repo: local + hooks: + - id: ansible-lint + name: Ansible-lint + description: This hook runs ansible-lint. + entry: ansible-lint --force-color + language: python + # do not pass files to ansible-lint, see: + # https://github.com/ansible/ansible-lint/issues/611 + pass_filenames: false + always_run: true + + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.26.0 + hooks: + - id: yamllint + files: \.(yaml|yml)$ + types: [file, yaml] + entry: yamllint diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000..04c5633 --- /dev/null +++ b/.yamllint @@ -0,0 +1,7 @@ +--- +extends: default + +rules: + line-length: + max: 140 + level: warning diff --git a/README.md b/README.md new file mode 100644 index 0000000..9810dfa --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +# Ansible AWX Template: Ops Management + +Ansible AWX is the method used for the Rocky Linux infrastructure, as a replacement for using the CLI. This template is used specifically for management of systems and infrastructure and takes bits and pieces from the original infrastructure git repository on GitHub. + +This repository may include duplicate playbooks from other ansible management repositories. Some pieces may also be removed and put into their own repository. + +## Notes on local runs and playbooks for local development systems + +There are some playbooks that are meant to be ran locally. There are also cases where AWX is not feasible. To run said playbooks, these are things to keep in mind: + +* local-ansible.cfg will need to be used +* `init-rocky-ansible-host.yml` will need to be ran using that configuration file (if there are roles/collections needed) + +## Provides / Information + +This repository is for Infrastructure operations. + +``` +. +├── README.md +├── defaults +│   └── main.yml +├── files +│   └── README.md +├── handlers +│   └── main.yml +├── tasks +│   └── main.yml +├── templates +│   └── README.md +├── tests +│   ├── README.md +│   ├── inventory +│   └── test.yml +└── vars + └── main.yml +``` diff --git a/adhoc-facts-refresh.yml b/adhoc-facts-refresh.yml new file mode 100644 index 0000000..b43b928 --- /dev/null +++ b/adhoc-facts-refresh.yml @@ -0,0 +1,8 @@ +--- +- hosts: all + become: true + tasks: + - name: Force a fact refresh to have those available in local cache + setup: + gather_timeout: 30 +... diff --git a/adhoc-rabbitmqqueue.yml b/adhoc-rabbitmqqueue.yml new file mode 100644 index 0000000..ba5bdb2 --- /dev/null +++ b/adhoc-rabbitmqqueue.yml @@ -0,0 +1,87 @@ +--- +# This playbook is meant to be used with callable variables, like adhoc or AWX. +# What: Creates RabbitMQ Users +# Required parameters: +# -> username: The username to create in RabbitMQ, which should match an LDAP +# name or the CN of a certificate. Note that if it's a hostname +# it must be the FQDN. +# -> queue_name: Name of the queue to create. This should be setup with a +# prefix_suffix name, where prefix is the username, and +# the suffix is a service name. +# -> routing_keys: A list to be used as routing keys. +# Optional: +# -> write_queues: A list of queues name prefixes that which the user will +# be allowed to publish. +# -> thresholds: A dictionary with two keys "warning" and "critical" - The +# values are numbers. In the event we have a monitoring system +# this can be a number of messages that could cause an alert. +# -> vhost: The vhost this queue will be part of. The default is /pubsub. + +- name: Create a User + hosts: all + become: false + gather_facts: false + vars_files: + - vars/rabbitmq.yml + + tasks: + - name: "Checking for user variables" + assert: + that: + - username != "admin" + - username != "guest" + - username != "mq-monitoring" + success_msg: "Required variables provided" + fail_msg: "Username is reserved" + tags: + - rabbitmq + + - name: "Validate username queue name" + assert: + that: + - "queue_name.startswith(username)" + tags: + - rabbitmq + + - name: "Creating User Account" + community.rabbitmq.rabbitmq_user: + user: "{{ username }}" + vhost: "{{ vhost|default('/pubsub') }}" + read_priv: "^(zmq\\.topic)|^(amq\\.topic)|({{ username }}.*)$" + write_priv: "^(amq\\.topic)|({{ username }}.*){% for queue in write_queues|default([]) %}|({{ queue }}.*){% endfor %}$" + configure_priv: "^$" + state: present + tags: + - rabbitmq + + - name: "Create {{ queue_name }}" + delegate_to: "{{ rabbitmq_cluster_list[0] }}" + community.rabbitmq.rabbitmq_queue: + name: "{{ queue_name }}" + vhost: "{{ vhost|default('/pubsub') }}" + auto_delete: false + durable: true + message_ttl: "{{ message_ttl|default('null') }}" + state: present + login_user: admin + login_password: "{{ rabbitmq_admin_password }}" + tags: + - rabbitmq + + - name: "Bind {{ queue_name }} to amq.topic exchange" + delegate_to: "{{ rabbitmq_cluster_list[0] }}" + community.rabbitmq.rabbitmq_binding: + name: "amq.topic" + destination: "{{ queue_name }}" + destination_type: queue + routing_key: "{{ routing_item }}" + vhost: "{{ vhost|default('/pubsub') }}" + state: present + login_user: admin + login_password: "{{ rabbitmq_admin_password }}" + loop: "{{ routing_keys }}" + loop_control: + loop_var: routing_item + tags: + - rabbitmq +... diff --git a/adhoc-rabbitmquser.yml b/adhoc-rabbitmquser.yml new file mode 100644 index 0000000..09768c4 --- /dev/null +++ b/adhoc-rabbitmquser.yml @@ -0,0 +1,35 @@ +--- +# This playbook is meant to be used with callable variables, like adhoc or AWX. +# What: Creates RabbitMQ Users +# The username is the required parameter + +- name: Create a User + hosts: all + become: false + gather_facts: false + vars_files: + - vars/rabbitmq.yml + + tasks: + - name: "Checking for user variables" + assert: + that: + - username != "admin" + - username != "guest" + - username != "mq-monitoring" + success_msg: "Required variables provided" + fail_msg: "Username is reserved" + tags: + - rabbitmq + + - name: "Creating User Account" + community.rabbitmq.rabbitmq_user: + user: "{{ username }}" + vhost: "{{ vhost }}" + read_priv: "^$" + write_priv: "amq\\.topic" + configure_priv: "^$" + state: present + tags: + - rabbitmq +... diff --git a/collections/README.md b/collections/README.md new file mode 100644 index 0000000..a70c7ef --- /dev/null +++ b/collections/README.md @@ -0,0 +1,14 @@ +# Collections + +If you are wanting to use a collection specifically for this, you will need to define it in a `requirements.yml`, otherwise AWX will not install what you need to run your tasks. + +Example: + +``` +--- +# Roles +collections: + - netbox.netbox + - community.aws + - containers.podman +``` diff --git a/collections/requirements.yml b/collections/requirements.yml new file mode 100644 index 0000000..7fa8ac2 --- /dev/null +++ b/collections/requirements.yml @@ -0,0 +1,18 @@ +--- +collections: + # freeipa + - name: freeipa.ansible_freeipa + version: 1.6.3 + - name: community.general + - name: community.mysql + - name: community.rabbitmq + - name: ansible.posix + - name: ansible.utils + - name: ktdreyer.koji_ansible + - name: netbox.netbox + - name: community.aws + - name: community.libvirt + - name: containers.podman + - name: nginxinc.nginx_core + version: 0.3.0 +... diff --git a/defaults/main.yml b/defaults/main.yml new file mode 100644 index 0000000..858c8da --- /dev/null +++ b/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# Defaults diff --git a/files/README.md b/files/README.md new file mode 100644 index 0000000..f154f20 --- /dev/null +++ b/files/README.md @@ -0,0 +1 @@ +Files come here diff --git a/files/etc/authselect/custom/sssd-rocky/CentOS-8-system-auth b/files/etc/authselect/custom/sssd-rocky/CentOS-8-system-auth new file mode 120000 index 0000000..62848fb --- /dev/null +++ b/files/etc/authselect/custom/sssd-rocky/CentOS-8-system-auth @@ -0,0 +1 @@ +RedHat-8-system-auth \ No newline at end of file diff --git a/files/etc/authselect/custom/sssd-rocky/RedHat-8-system-auth b/files/etc/authselect/custom/sssd-rocky/RedHat-8-system-auth new file mode 100644 index 0000000..37ec715 --- /dev/null +++ b/files/etc/authselect/custom/sssd-rocky/RedHat-8-system-auth @@ -0,0 +1,40 @@ +{imply "with-smartcard" if "with-smartcard-required"} +auth required pam_env.so +auth required pam_faildelay.so delay=2000000 +auth required pam_faillock.so preauth audit silent deny=5 unlock_time=900 {include if "with-faillock"} +auth [success=1 default=ignore] pam_succeed_if.so service notin login:gdm:xdm:kdm:xscreensaver:gnome-screensaver:kscreensaver quiet use_uid {include if "with-smartcard-required"} +auth [success=done ignore=ignore default=die] pam_sss.so require_cert_auth ignore_authinfo_unavail {include if "with-smartcard-required"} +auth sufficient pam_fprintd.so {include if "with-fingerprint"} +auth sufficient pam_u2f.so cue {include if "with-pam-u2f"} +auth required pam_u2f.so cue nouserok {include if "with-pam-u2f-2fa"} +auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular +auth [default=1 ignore=ignore success=ok] pam_localuser.so {exclude if "with-smartcard"} +auth [default=2 ignore=ignore success=ok] pam_localuser.so {include if "with-smartcard"} +auth [success=done authinfo_unavail=ignore ignore=ignore default=die] pam_sss.so try_cert_auth {include if "with-smartcard"} +auth sufficient pam_unix.so {if not "without-nullok":nullok} try_first_pass +auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular +auth sufficient pam_sss.so forward_pass +auth required pam_faillock.so authfail audit deny=5 unlock_time=900 fail_interval=900 {include if "with-faillock"} +auth required pam_deny.so + +account required pam_access.so {include if "with-pamaccess"} +account required pam_faillock.so {include if "with-faillock"} +account required pam_unix.so +account sufficient pam_localuser.so {exclude if "with-files-access-provider"} +account sufficient pam_usertype.so issystem +account [default=bad success=ok user_unknown=ignore] pam_sss.so +account required pam_permit.so + +password requisite pam_pwquality.so try_first_pass local_users_only minlen=14 dcredit=-1 lcredit=-1 ucredit=-1 ocredit=-1 retry=3 +password requisite pam_pwhistory.so use_authok remember=5 +password sufficient pam_unix.so sha512 shadow {if not "without-nullok":nullok} try_first_pass use_authtok +password sufficient pam_sss.so use_authtok +password required pam_deny.so + +session optional pam_keyinit.so revoke +session required pam_limits.so +-session optional pam_systemd.so +session optional pam_oddjob_mkhomedir.so umask=0077 {include if "with-mkhomedir"} +session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid +session required pam_unix.so +session optional pam_sss.so diff --git a/files/etc/authselect/custom/sssd-rocky/Rocky-8-system-auth b/files/etc/authselect/custom/sssd-rocky/Rocky-8-system-auth new file mode 120000 index 0000000..62848fb --- /dev/null +++ b/files/etc/authselect/custom/sssd-rocky/Rocky-8-system-auth @@ -0,0 +1 @@ +RedHat-8-system-auth \ No newline at end of file diff --git a/files/etc/pam.d/CentOS-7-system-auth-ac b/files/etc/pam.d/CentOS-7-system-auth-ac new file mode 120000 index 0000000..456a8fc --- /dev/null +++ b/files/etc/pam.d/CentOS-7-system-auth-ac @@ -0,0 +1 @@ +RedHat-7-system-auth-ac \ No newline at end of file diff --git a/files/etc/pam.d/RedHat-7-system-auth-ac b/files/etc/pam.d/RedHat-7-system-auth-ac new file mode 100644 index 0000000..c20a81b --- /dev/null +++ b/files/etc/pam.d/RedHat-7-system-auth-ac @@ -0,0 +1,34 @@ +#%PAM-1.0 +# This file is auto-generated. +# User changes will be destroyed the next time authconfig is run. +auth required pam_env.so +auth required pam_faildelay.so delay=2000000 +auth required pam_faillock.so preauth audit silent deny=5 unlock_time=900 +auth [default=1 success=ok] pam_localuser.so +auth [success=done ignore=ignore default=bad] pam_unix.so nullok try_first_pass +auth requisite pam_succeed_if.so uid >= 1000 quiet_success +auth sufficient pam_sss.so forward_pass +auth [default=die] pam_faillock.so authfail audit deny=5 unlock_time=900 +auth required pam_deny.so + +account required pam_faillock.so +account required pam_unix.so +account sufficient pam_localuser.so +account sufficient pam_succeed_if.so uid < 1000 quiet +account [default=bad success=ok user_unknown=ignore] pam_sss.so +account required pam_permit.so + +password requisite pam_pwquality.so try_first_pass minlen=14 dcredit=-1 lcredit=-1 ucredit=-1 ocredit=-1 local_users_only retry=3 +password requisite pam_pwhistory.so use_authok remember=5 +password sufficient pam_unix.so sha512 shadow try_first_pass use_authtok +password sufficient pam_sss.so use_authtok +password required pam_deny.so + +session optional pam_keyinit.so revoke +session required pam_limits.so +-session optional pam_systemd.so +session optional pam_oddjob_mkhomedir.so umask=0077 +session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid +session required pam_unix.so +session optional pam_sss.so + diff --git a/files/etc/rockybanner b/files/etc/rockybanner new file mode 100644 index 0000000..283b178 --- /dev/null +++ b/files/etc/rockybanner @@ -0,0 +1,3 @@ +This is a Rocky Linux system + +All access is logged and monitored. Unauthorized access is prohibited. diff --git a/files/etc/sudoers.d/cis b/files/etc/sudoers.d/cis new file mode 100644 index 0000000..9f41de1 --- /dev/null +++ b/files/etc/sudoers.d/cis @@ -0,0 +1,2 @@ +Defaults use_pty +Defaults logfile="/var/log/sudo.log" diff --git a/files/etc/systemd/system/noggin.service b/files/etc/systemd/system/noggin.service new file mode 100644 index 0000000..aedea69 --- /dev/null +++ b/files/etc/systemd/system/noggin.service @@ -0,0 +1,16 @@ +[Unit] +Description=noggin +After=network-online.target +Wants=network-online.target + +[Service] +Environment=FLASK_APP=/opt/noggin/noggin/noggin/app.py +Environment=NOGGIN_CONFIG_PATH=/opt/noggin/noggin.cfg +Environment=FLASK_DEBUG=1 +User=noggin +WorkingDirectory=/opt/noggin/noggin +ExecStart=/bin/bash /opt/noggin/start_noggin.sh +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/files/usr/local/bin/dmidecode-pretty b/files/usr/local/bin/dmidecode-pretty new file mode 100644 index 0000000..a66d71a --- /dev/null +++ b/files/usr/local/bin/dmidecode-pretty @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# Louis Abel +use strict; + +# Check for version of dmidecode +my $dmi_test = `dmidecode -q 2>/dev/null; echo \$?`; +chomp($dmi_test); +our $dmi_ver = "rhel8"; +our $dmidecode_cmd = "dmidecode -q"; +if( $dmi_test eq "1" ) { $dmi_ver = "rhel4"; $dmidecode_cmd = "dmidecode"; } + +# Figure out number of cores per cpu +my $c_cpuinfo = `grep -c processor /proc/cpuinfo`; +chomp($c_cpuinfo); +my $c_dmidecode = `$dmidecode_cmd | grep -c 'Processor Information'`; +chomp($c_dmidecode); + +# Figure out hyperthreaded cores +my $htt; +my $lscpu_test = `lscpu 2>/dev/null; echo \$?`; +chomp($lscpu_test); +if( $lscpu_test eq "127" ) { + $htt = "Cannot Detect Threads"; +} else { + $htt = `lscpu | awk -F':' '/Thread/ {print \$2}'`; + chomp($htt); +} +$htt =~ s/^\s+|\s+$//g; + +my $cores; +if( $c_cpuinfo eq $c_dmidecode ) { + $cores = "single core"; +} elsif ( $c_cpuinfo > $c_dmidecode ) { + my $num_cores = $c_cpuinfo / $c_dmidecode / $htt; + $cores = "$num_cores cores"; +} else { + $cores = "failed to determine number of cores"; +} + +# Parse dmidecode output +our %manufacturer; +our %cpu; +our %memory; +our %network; + +open( FH, "$dmidecode_cmd |") or die "Couldn't run $dmidecode_cmd: $!\n\n"; +my ($section, $dim, $dim_size); +my $dims_used = 0; +my $dims_total = 0; +my $eths_section = 0; +my $eths_total = 0; +while( my $line = ) { + chomp($line); + + # Store section information + if( $line =~ /^\S+/ ) { $section = $line; } + + # Print Bios Information + if( $section eq "BIOS Information" || $section =~ /Handle 0x0000/ ) { + if( $line =~ /^\s+Version:\s+(.+)\s*$/ ) { $manufacturer{bios} = $1; } + } + + # Print System Information + if( $section eq "System Information" || $section =~ /Handle 0x0100/ ) { + if( $line =~ /^\s+Manufacturer:\s+(.+)\s*$/ ) { if( $1 =~ /Dell Computer Corporation/ ) { $manufacturer{make} = "Dell Inc."; } else { $manufacturer{make} = $1; } } + if( $line =~ /^\s+Product Name:\s+(.+)\s*$/ ) { my $tmp = $1; $tmp =~ s/\s+$//g; $manufacturer{model} = $tmp; } + if( $line =~ /^\s+Serial Number:\s+(.+)\s*$/ ) { $manufacturer{serial} = $1; } + } + + # Print Chassis Information + if( $section eq "Chassis Information" || $section =~ /Handle 0x0300/ ) { + if( $line =~ /^\s+Type:\s+(.+)\s*$/ ) { $manufacturer{chassis_type} = $1; } + if( $line =~ /^\s+Height:\s+(.+)\s*$/ ) { $manufacturer{chassis_height} = $1; } + } + + # Print Processor Information + if( $section eq "Processor Information" || $section =~ /Handle 0x040/ ) { + if( $line =~ /^\s+Version:\s+(.+)\s*$/ ) { + my $cpu_model = $1; + + if( $cpu_model =~ /Not Specified/ ) { + $cpu_model = `cat /proc/cpuinfo | grep 'model name' | awk -F: {'print \$2'} | head -n 1`; + chomp( $cpu_model ); + $cpu_model =~ s/^\s*//g; + } + + $cpu_model =~ s/\s+/ /g; + + $cpu{physical} = $c_dmidecode; + $cpu{virtual} = $c_cpuinfo; + $cpu{model} = "$cpu_model ($cores) (Threads: $htt)"; + } + + if( $line =~ /^\s+Speed:\s+(.+)\s*$/ ) { $cpu{speed} = $1; } + + } + + # Print Physical Memory Array + if( $section eq "Physical Memory Array" || $section =~ /Handle 0x1000/ ) { + if( $line =~ /^\s+Error Correction Type:\s+(.+)\s*$/ ) { $memory{error} = $1; } + if( $line =~ /^\s+Maximum Capacity:\s+(.+)\s*$/ ) { $memory{max} = $1; } + if( $line =~ /^\s+Number Of Devices:\s+(.+)\s*$/ ) { $memory{count} = $1; } + } + + # Print Memory Device + if( $section eq "Memory Device" || $section =~ /Handle 0x110/ ) { + if( $line =~ /^\s+Locator:\s+(.+)\s*$/ ) { $dim = $1; $dim =~ s/\s+//g; $dims_total++} + if( $line =~ /^\s+Size:\s+(.+)\s*$/ ) { $dim_size = $1; } + if( $line =~ /^\s+Speed:\s+(.+)\s*$/ ) { next if( $dim_size =~ /No Module Installed/ ); $memory{$dims_total}{location} = $dim; $memory{$dims_total}{size} = $dim_size; $memory{$dims_total}{speed} = $1; $dims_used++; } + if( $line =~ /^\s+Type:\s+(.+)\s*$/ ) { $memory{type} = $1; } + } + + # Print Ethernet Devices + $network{total} = 0; + if( $section =~ /^On Board Device/ || $section =~ /Handle 0x0A00/ || $section =~ /^Onboard Device/ ) { + if( $line =~ /^\s+Type:\s+Ethernet\s*$/ ) { $eths_section = 1; $eths_total++; $network{total} = $eths_total; } + next if( $eths_section == 0 ); + + if( $line =~ /^\s+Status:\s+(.+)\s*$/ ) { $network{$eths_total}{status} = $1; } + if( $line =~ /^\s+Description:\s+(.+)\s*$/ ) { $network{$eths_total}{desc} = $1; } + } +} +close(FH); + + +# Clean up missing data +$manufacturer{chassis_height} = "" unless( defined($manufacturer{chassis_height}) ); +$memory{used} = $dims_total; + +#Print Data +print "Make: $manufacturer{make}\n"; +print "Model: $manufacturer{model}\n"; +print "Serial: $manufacturer{serial}\n"; +print "Bios Rev: $manufacturer{bios}\n"; +print "Chassis Type: $manufacturer{chassis_type}\n"; +print "Chassis Height: $manufacturer{chassis_height}\n"; +print "$cpu{physical} x $cpu{model}\n"; +print_memory_info(); +print_network_info(); + + +#### Functions #### + +sub print_memory_info { + my ($maxsize, $max_unit) = $memory{max} =~ /^\s*(\d+)\s*(\w+)\s*$/; + my $dim_count = $memory{count}; + my $max_per_dim = $maxsize / $dim_count; + + my $size_error = ""; + my $speed_error = ""; + my $common_size; + my $common_speed; + for( my $i = 1; $i < $dims_used + 1; $i++ ) { + my $size = $memory{$i}{size} || 0; + my $speed = $memory{$i}{speed} || 0; + + if( defined($common_size) && $common_size ne $size ) { $size_error = 1; } + else { $common_size = $size; } + if( defined($common_speed) && $common_speed ne $speed ) { $speed_error = 2; } + else { $common_speed = $speed; } + } + + my ($mem_size, $mem_unit) = $common_size =~ /^\s*(\d+)\s*(\w+)\s*$/; + my $total_mem_unit = "MB"; + if( $mem_unit eq "MB" ) { $total_mem_unit = "GB"; } + my $mem_total = ($mem_size * $dims_used) * 1024 ; + + if( $common_size =~ /(\d+\.\d{2})\d+/ ) { $common_size = $1; } + if( $mem_size >= 1024 ) { my $gb_size = $mem_size / 1024; $common_size = "$gb_size GB"; } + + print "$common_size @ $common_speed x $dims_used = $mem_total $total_mem_unit"; + if( $size_error || $speed_error ) { print " $size_error$speed_error"; } + print "\n"; + + if( $max_per_dim =~ /(\d+\.\d{2})\d+/ ) { $max_per_dim = $1; } + print "$max_per_dim $max_unit x $dim_count dims = $maxsize $max_unit maximum capacity\n"; + print "$memory{type}\n$memory{error}\n"; +} + +sub print_network_info { + my $num_devices = $network{total}; + for( my $i=1; $i < $num_devices + 1; $i++ ) { + print "$network{$i}{desc} [$network{$i}{status}]\n"; + } +} diff --git a/files/usr/local/bin/lock-wrapper b/files/usr/local/bin/lock-wrapper new file mode 100644 index 0000000..17c96ff --- /dev/null +++ b/files/usr/local/bin/lock-wrapper @@ -0,0 +1,51 @@ +#!/bin/bash +# Borrowed from Fedora Infra for Rocky Linux + +if [ $# -lt 2 ]; then + echo "Usage: $0 [name] [script]" + exit 1; +fi + +NAME=$1 +SCRIPT=$2 + +SILENT="no" +if [ $# -ge 3 -a "$3" == "--silent" ]; then + SILENT="yes" + shift +fi + +shift 2 + +LOCKDIR="/var/tmp/$NAME" +PIDFILE="$LOCKDIR/pid" + +function cleanup { + rm -rf "$LOCKDIR" +} + +RESTORE_UMASK=$(umask -p) +umask 0077 +mkdir "$LOCKDIR" >& /dev/null +if [ $? != 0 ]; then + PID=$(cat "$PIDFILE") + if [ -n "$PID" ] && /bin/ps $PID > /dev/null + then + if [ "$SILENT" != "yes" ]; then + echo "$PID is still running" + /bin/ps -o user,pid,start,time,comm $PID + fi + exit 1; + else + echo "$LOCKDIR exists but $PID is dead" + echo "Removing lockdir and re-running" + /bin/rm -rf $LOCKDIR + mkdir $LOCKDIR || exit + fi +fi + +trap cleanup EXIT SIGQUIT SIGHUP SIGTERM +echo $$ > "$PIDFILE" + +$RESTORE_UMASK +eval "$SCRIPT $*" diff --git a/handlers/main.yml b/handlers/main.yml new file mode 100644 index 0000000..69cbdb4 --- /dev/null +++ b/handlers/main.yml @@ -0,0 +1,49 @@ +--- +# Handlers +- name: restart_sshd + service: + name: sshd + state: restarted + +- name: restart_httpd + service: + name: httpd + state: restarted + +- name: restart_nginx + service: + name: nginx + state: restarted + +- name: reload_networkmanager + service: + name: NetworkManager + state: reloaded + +- name: regenerate_auditd_rules + command: /sbin/augenrules + +- name: reload_chrony + systemd: + name: "{{ chrony_service_name }}" + state: restarted + listen: "chrony service restart" + +- name: restart_gitlab + command: gitlab-ctl reconfigure + register: gitlab_restart + failed_when: gitlab_restart_handler_failed_when | bool + +- name: restart_noggin + service: + name: noggin + state: restarted + +- name: rehash_postfix_sasl + command: "postmap /etc/postfix/sasl_passwd" + +- name: restart_postfix + service: + name: postfix + state: restarted +... diff --git a/init-rocky-account-services.yml b/init-rocky-account-services.yml new file mode 100644 index 0000000..a2c302b --- /dev/null +++ b/init-rocky-account-services.yml @@ -0,0 +1,35 @@ +--- +# Preps a system to be part of Account Services +- name: Configure Account Services + hosts: all + become: true + + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are not able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Deploy Account Services + import_tasks: tasks/account_services.yml + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-ansible-host.yml b/init-rocky-ansible-host.yml new file mode 100644 index 0000000..3c48e82 --- /dev/null +++ b/init-rocky-ansible-host.yml @@ -0,0 +1,57 @@ +--- + +- hosts: localhost + connection: local + vars: + force_purge: true + roles_installation_dir: roles/public + collection_installation_dir: collections + installation_prefix: ./ + pre_tasks: + # example prepare ansible box for execution + # - name: install required pip modules on the host running ansible + # pip: + # name: + # - jmespath + # - netaddr + # - python-consul + # - pyvmomi + # - python-ldap + # - twine + + - name: Remove existing public roles + file: + path: "{{ installation_prefix }}{{ roles_installation_dir }}" + state: absent + when: force_purge | bool + + - name: Install all public roles + command: > + ansible-galaxy role install + {{ ( force_purge | bool ) | ternary('--force','') }} + --role-file {{ installation_prefix }}roles/requirements.yml + --roles-path {{ installation_prefix }}{{ roles_installation_dir }} + register: galaxy_install_role + changed_when: '"Installing " in galaxy_install_role.stdout' + + - name: Install needed collections + command: > + ansible-galaxy collection install + {{ ( force_purge | bool ) | ternary('--force-with-deps','') }} + -r {{ installation_prefix }}collections/requirements.yml + -p {{ installation_prefix }}{{ collection_installation_dir }} + register: galaxy_install_collection + changed_when: '"Installing " in galaxy_install_collection.stdout' + + - name: cleanup old ssh known_hosts - remove + file: + path: "../tmp/known_hosts" + state: absent + mode: "0644" + + - name: cleanup old ssh known_hosts - blank + file: + path: "../tmp/known_hosts" + state: touch + mode: "0644" +... diff --git a/init-rocky-bugzilla.yml b/init-rocky-bugzilla.yml new file mode 100644 index 0000000..f7a866e --- /dev/null +++ b/init-rocky-bugzilla.yml @@ -0,0 +1,58 @@ +--- +# Installs Bugzilla +- name: Configure Bugzilla + hosts: all + become: true + vars_files: + - vars/common.yml + - vars/bugzilla.yml + + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are not able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + roles: + - role: rockylinux.ipagetcert + state: present + + tasks: + - name: Deploy Mantis + import_tasks: tasks/bugzilla.yml + + post_tasks: + - name: Open firewalld ports + ansible.posix.firewalld: + service: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: + - http + - https + + - name: Ensure httpd is enabled and running + service: + name: httpd + enabled: true + state: started + + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-chrony.yml b/init-rocky-chrony.yml new file mode 100644 index 0000000..dd0f6fa --- /dev/null +++ b/init-rocky-chrony.yml @@ -0,0 +1,41 @@ +--- +# Sets Up Chrony Server/Client +# Created: @derekmpage +# Kudos: @danielkubat @Darkbat91 +# Fixes: @nazunalika +- name: Rocky Chrony Runbook + hosts: all + become: true + vars_files: + - vars/chrony.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Configure Chrony + import_tasks: tasks/chrony.yml + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-install-kvm-hosts.yml b/init-rocky-install-kvm-hosts.yml new file mode 100644 index 0000000..fe5826d --- /dev/null +++ b/init-rocky-install-kvm-hosts.yml @@ -0,0 +1,60 @@ +--- +# Creates a standalone KVM hosts +# Created: @SherifNagy +# Modified to current standards: @nazunalika +- name: Configure KVM host + hosts: kvm + become: true + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Check for CPU Virtualization + shell: "set -o pipefail; lscpu | grep -i virtualization" + register: result + changed_when: false + failed_when: "result.rc != 0" + + # Install KVM packages + - name: Installing KVM Packages + package: + name: + - qemu-kvm + - libvirt + - libvirt-python + - libguestfs-tools + - virt-install + state: present + + - name: Enable and Start libvirtd + systemd: + name: libvirtd + state: started + enabled: true + + - name: Verify KVM module is loaded + shell: "set -o pipefail; lsmod | grep -i kvm" + register: result + changed_when: false + failed_when: "result.rc != 0" + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-mantisbt.yml b/init-rocky-mantisbt.yml new file mode 100644 index 0000000..68aab0e --- /dev/null +++ b/init-rocky-mantisbt.yml @@ -0,0 +1,59 @@ +--- +# Installs the mantis bug tracker +# This requries information from the vault +- name: Configure MantisBT + hosts: all + become: true + vars_files: + - vars/common.yml + - vars/mantis.yml + + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are not able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + roles: + - role: rockylinux.ipagetcert + state: present + + tasks: + - name: Deploy Mantis + import_tasks: tasks/mantis.yml + + post_tasks: + - name: Open firewalld ports + ansible.posix.firewalld: + service: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: + - http + - https + + - name: Ensure httpd is enabled and running + service: + name: httpd + enabled: true + state: started + + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-noggin-theme.yml b/init-rocky-noggin-theme.yml new file mode 100644 index 0000000..05c29c3 --- /dev/null +++ b/init-rocky-noggin-theme.yml @@ -0,0 +1,41 @@ +--- +# (Re)deploys the noggin theme +- name: Deploy Noggin Theme + hosts: all + become: true + + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are not able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Deploy Rocky Noggin Theme + git: + repo: https://github.com/rocky-linux/noggin-theme.git + dest: /opt/noggin/noggin/noggin/themes/rocky + update: true + version: main + become_user: noggin + notify: restart_noggin + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-noggin.yml b/init-rocky-noggin.yml new file mode 100644 index 0000000..89031d2 --- /dev/null +++ b/init-rocky-noggin.yml @@ -0,0 +1,35 @@ +--- +# (Re)deploys the noggin theme +- name: Deploy Noggin + hosts: all + become: true + + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are not able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Deploy Noggin + import_tasks: "tasks/noggin.yml" + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-openqa-developer-host.yml b/init-rocky-openqa-developer-host.yml new file mode 100644 index 0000000..f462e9b --- /dev/null +++ b/init-rocky-openqa-developer-host.yml @@ -0,0 +1,53 @@ +# Sets up local OpenQA testing environment +# This playbook is *NOT* intended for WAN-facing systems! +# +# Usages: +# # Install and configure an openQA developer host, download all current Rocky ISOs, +# # and POST a test job +# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml +# +# # Only perform ISO download tasks +# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=download_isos +# +# # Only perform configuration, do not download ISOs or POST a job +# ansible-playbook playbooks/init-rocky-openqa-developer-host.yml --tags=configure +# +# Created: @akatch +--- +- name: Rocky OpenQA Runbook + hosts: localhost + connection: local + become: true + vars_files: + - vars/openqa.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Install and configure OpenQA + import_tasks: tasks/openqa.yml + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-postfix-relay.yml b/init-rocky-postfix-relay.yml new file mode 100644 index 0000000..781affd --- /dev/null +++ b/init-rocky-postfix-relay.yml @@ -0,0 +1,37 @@ +--- +# Configures postfix on a system to relay mail +# NOTE: smtp vars will be in vaults - originally they were available directly +# on the ansible host. This was never a viable and secure option. +- name: Configure Postfix Relay + hosts: all + become: true + + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are not able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Deploy Postfix Relay + import_tasks: tasks/postfix_relay.yml + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-repo-servers.yml b/init-rocky-repo-servers.yml new file mode 100644 index 0000000..bfc994b --- /dev/null +++ b/init-rocky-repo-servers.yml @@ -0,0 +1,35 @@ +--- +# Preps a system to be a repository +- name: Configure repository system + hosts: all + become: true + + handlers: + - import_tasks: handers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are not able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Configure repository system + import_tasks: tasks/repository.yml + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/init-rocky-system-config.yml b/init-rocky-system-config.yml new file mode 100644 index 0000000..a337c87 --- /dev/null +++ b/init-rocky-system-config.yml @@ -0,0 +1,57 @@ +--- +# Basic system configuration. All hardening should also be imported here. +# Use --extra-vars="host=..." and specify a hostname in the inventory or +# provide an ansible host group name. You can also just use "all" if you +# want to ensure all systems are up to date on the configuration. +- name: Configure system + hosts: all + become: true + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: Loading Variables from OS Common + import_tasks: tasks/variable_loader_common.yml + + - name: Configure SSH + import_tasks: tasks/ssh_config.yml + + - name: Configure harden settings + import_tasks: tasks/harden.yml + + - name: Configure PAM + import_tasks: tasks/authentication.yml + + - name: Configure auditd + import_tasks: tasks/auditd.yml + + - name: Configure grub + import_tasks: tasks/grub.yml + + - name: Configure common scripts + import_tasks: tasks/scripts.yml + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/local-ansible.cfg b/local-ansible.cfg new file mode 100644 index 0000000..3167296 --- /dev/null +++ b/local-ansible.cfg @@ -0,0 +1,76 @@ +[defaults] + +######################################## +# Display settings +######################################## + +# Output display +force_color = 1 +nocows = True + + +# Note: http://docs.ansible.com/ansible/intro_configuration.html#ansible-managed +ansible_managed = Ansible managed +#ansible_managed = Ansible managed - {file} on {host} + + +# Warn when ansible think it is better to use module. +# Note: http://docs.ansible.com/ansible/intro_configuration.html#id88 +command_warnings = True + +# Enable this to debug tasks calls +display_args_to_stdout = False +display_skipped_hosts = false + +######################################## +# Playbook settings +######################################## + + +# Default strategy +strategy = free + +# Number of hosts processed in parallel +forks = 20 + + +######################################## +# Behaviour settings +######################################## + + +# Make role variables private +retry_files_enabled = True + +# Fact options +gathering = smart +#gathering = !all +#gathering = smart,network,hardware,virtual,ohai,facter +#gathering = network,!hardware,virtual,!ohai,!facter + +# facts caching +#fact_caching_connection = tmp/facts_cache +#fact_caching = json +fact_caching = memory +fact_caching_timeout = 1800 + +# Enable or disable logs +# Note put to false in prod +no_log = False + + +######################################## +# Common destinations +######################################## + +log_path = tmp/ansible.log +known_hosts = tmp/known_hosts +roles_path = roles/local:roles/public +collections_paths = collections + +######################################## +# SSH Configuration +######################################## +[ssh_connection] +# Disable GSSAPI, which slows down SSH connections for ansible +ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o GSSAPIAuthentication=no diff --git a/mantis.yml b/mantis.yml new file mode 100644 index 0000000..c54af44 --- /dev/null +++ b/mantis.yml @@ -0,0 +1,32 @@ +--- +# mantis vars +mantis_version: 2.25.0 +mantis_checksum: "sha256:d8973d3677ecb2ccbfee95e2267b3128049fbdcc59aa1f007686a342d93a4c0a" +mantis_pkg: + - php + - php-ldap + - httpd + - mod_ssl + - php-pgsql + - php-mbstring + - php-curl + - openldap + - php-json +mantis_db_host: db.rockylinux.org +mantis_db_name: mantisdb +mantis_db_user: mantis +mantis_binder_user: "{{ rocky_ldap_bind_dn }}" +mantis_binder_pass: "{{ rocky_ldap_bind_pw }}" + +# Vault +# mantis_db_pass: ThisIsNotThePassword! + +ipa_getcert_requested_hostnames: + - name: "{{ ansible_fqdn }}" + owner: apache + key_location: "/etc/pki/tls/private/bugs.rockylinux.org.key" + cert_location: "/etc/pki/tls/certs/bugs.rockylinux.org.crt" + postcmd: "/bin/systemctl reload httpd" + cnames: + - "bugs.rockylinux.org" +... diff --git a/role-rocky-bootstrap_staging.yml b/role-rocky-bootstrap_staging.yml new file mode 100644 index 0000000..6b4d087 --- /dev/null +++ b/role-rocky-bootstrap_staging.yml @@ -0,0 +1,41 @@ +--- +# Manage bootstrap hosts +# +- name: Manage and configure bootstrap hosts + hosts: all + become: true + vars_files: + - vars/mounts/bootstrap_staging.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - include_tasks: tasks/efs_mount.yml + loop: "{{ mounts }}" + + - include_tasks: tasks/srpmproc.yml + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/role-rocky-graylog.yml b/role-rocky-graylog.yml new file mode 100644 index 0000000..43869f5 --- /dev/null +++ b/role-rocky-graylog.yml @@ -0,0 +1,67 @@ +--- +# Configure and setup graylog +# Reccommended specs +# CPU: 2 cores +# Memory: 4GB +# Storage: Yes +- name: Install Graylog + hosts: all + become: true + vars_files: + # Vaults required + # vars/vaults/encpass.yml + # vars/vaults/hostman.yml + - vars/graylog.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + - name: Install SELinux packages + package: + name: python3-policycoreutils.noarch + state: present + + - name: "Creating DNS Record for ord-prod-graylog.rockylinux.org" + freeipa.ansible_freeipa.ipadnsrecord: + ipaadmin_principal: "{{ ipa_admin|default('admin') }}" + ipaadmin_password: "{{ ipaadmin_password }}" + zone_name: "{{ graylog_ipa_dnsrecord_zone_name }}" + name: "{{ graylog_ipa_dnsrecord_name }}" + record_type: "{{ graylog_ipa_dnsrecord_record_type }}" + record_value: "{{ graylog_ipa_dnsrecord_record_value }}" + state: "{{ graylog_ipa_dnsrecord_state }}" + + roles: + - role: rockylinux.ipagetcert + state: present + + post_tasks: + - name: Open firewalld ports + ansible.posix.firewalld: + port: "{{ item.port }}" + permanent: "{{ item.permanent }}" + state: "{{ item.state }}" + loop: "{{ graylog_server_firewall_rules }}" + + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/role-rocky-mirrormanager.yml b/role-rocky-mirrormanager.yml new file mode 100644 index 0000000..37493d4 --- /dev/null +++ b/role-rocky-mirrormanager.yml @@ -0,0 +1,72 @@ +--- +# MirrorManager2 +- name: Install and configure mirrormanager + hosts: all + become: false + vars_files: + # This playbook requires vaults! + # vars/vaults/hostman.yml + # vars/vaults/mirrormanager.yml + - vars/mounts/mirrormanager.yml + - vars/mirrormanager.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + - name: Install git + become: true + package: + name: git + state: present + + - name: Install SELinux packages + become: true + package: + name: python3-policycoreutils.noarch + state: present + + tasks: + #- include_tasks: tasks/mirrormanager.yml + - include_tasks: tasks/efs_mount.yml + loop: "{{ mounts }}" + tags: ["mounts"] + + roles: + - role: rockylinux.ipagetcert + become: true + state: present + tags: ['certs'] + + post_tasks: + - name: Open firewalld ports + become: true + ansible.posix.firewalld: + port: "{{ item.port }}" + permanent: "{{ item.permanent }}" + state: "{{ item.state }}" + immediate: yes + loop: "{{ firewall_rules }}" + + - name: Touching run file that ansible has ran here + become: true + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/role-rocky-monitoring.yml b/role-rocky-monitoring.yml new file mode 100644 index 0000000..431ffab --- /dev/null +++ b/role-rocky-monitoring.yml @@ -0,0 +1,64 @@ +--- +# Creates the first monitoring server +# Reccommended specs +# CPU: 2 cores +# Memory: 2GB +# Storage: a piece of string +- name: Install Prometheus + hosts: all + become: true + vars_files: + # vars/vaults/encpass.yml + - vars/monitoring.yml + - vars/monitoring/alertmanager.yml + - vars/monitoring/grafana.yml + - vars/monitoring/prometheus.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + - name: Install SELinux packages + package: + name: python3-policycoreutils.noarch + state: present + + roles: + # - role: rockylinux.ipagetcert + # state: present + - role: cloudalchemy.prometheus + state: present + - role: cloudalchemy.alertmanager + state: present + - role: cloudalchemy.grafana + state: present + + post_tasks: + - name: Open firewalld ports + ansible.posix.firewalld: + port: "{{ item.port }}" + permanent: "{{ item.permanent }}" + state: "{{ item.state }}" + loop: "{{ monitoring_server_firewall_rules }}" + + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/role-rocky-mqtt.yml b/role-rocky-mqtt.yml new file mode 100644 index 0000000..4ea8963 --- /dev/null +++ b/role-rocky-mqtt.yml @@ -0,0 +1,62 @@ +--- +# Stands up an mqtt instance +- name: Configure mqtt + hosts: all + become: true + vars_files: + # vars/vaults/encpass.yml + - vars/mqtt.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + # EPEL and PowerTools are required for ipsilon to function + # I also couldn't find an ansible built-in to do this + - name: Enable the PowerTools repository + community.general.ini_file: + dest: /etc/yum.repos.d/Rocky-PowerTools.repo + section: powertools + option: enabled + value: 1 + owner: root + group: root + mode: '0644' + + # The CentOS extras repos has epel-release provided + - name: Enable the EPEL repository + yum: + name: epel-release + state: present + tags: + - packages + + roles: + - role: rockylinux.ipagetcert + state: present + + - role: rockylinux.mqtt + state: present + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/role-rocky-node_exporter.yml b/role-rocky-node_exporter.yml new file mode 100644 index 0000000..0457451 --- /dev/null +++ b/role-rocky-node_exporter.yml @@ -0,0 +1,22 @@ +--- +- name: Install Prometheus Node Exporter + hosts: all + become: true + + pre_tasks: + - name: Install SELinux packages + package: + name: python3-policycoreutils.noarch + state: present + + roles: + - role: cloudalchemy.node-exporter + state: present + + post_tasks: + - name: Open firewall for node-exporter + ansible.posix.firewalld: + port: 9100/tcp + permanent: true + state: enabled +... diff --git a/role-rocky-pinnwand.yml b/role-rocky-pinnwand.yml new file mode 100644 index 0000000..123f9fd --- /dev/null +++ b/role-rocky-pinnwand.yml @@ -0,0 +1,67 @@ +--- +# pinnwand +- name: Install pinnwand + hosts: all + become: true + vars_files: + - vars/pinnwand.yml + # vars/vaults/hostman.yml + # vars/vaults/pinnwand.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + - name: Install SELinux packages + package: + name: python3-policycoreutils.noarch + state: present + + tasks: + # - include_tasks: tasks/pinnwand.yml + # tags: ['includetasks'] + + roles: + - role: rockylinux.ipagetcert + state: present + tags: ['certs'] + + - role: rockylinux.pinnwand + state: present + tags: ['role_pinnwand'] + + # Define variables in vars/matomo/nginx.yml + - role: nginxinc.nginx_core.nginx + tags: ['nginx'] + # - role: nginxinc.nginx_core.nginx_config + # tags: ['nginx'] + + post_tasks: + - name: Open firewalld ports + ansible.posix.firewalld: + port: "{{ item.port }}" + permanent: "{{ item.permanent | default(yes) }}" + state: "{{ item.state | default(present) }}" + loop: "{{ firewall_rules }}" + + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/role-rocky-rabbitmq.yml b/role-rocky-rabbitmq.yml new file mode 100644 index 0000000..857e4e5 --- /dev/null +++ b/role-rocky-rabbitmq.yml @@ -0,0 +1,78 @@ +--- +# Stands up a RabbitMQ Cluster +- name: Configure RabbitMQ + hosts: all + become: true + vars_files: + - vars/common.yml + # vars/vaults/encpass.yml + - vars/rabbitmq.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + # We have separate passwords per rabbitmq env + - name: Import rabbitmq passwords + include_vars: + file: "vars/vaults/rabbitmq_{{ rabbitmq_env }}.yml" + + # EPEL and PowerTools are required for ipsilon to function + # I also couldn't find an ansible built-in to do this + - name: Enable the PowerTools repository + community.general.ini_file: + dest: /etc/yum.repos.d/Rocky-PowerTools.repo + section: powertools + option: enabled + value: 1 + owner: root + group: root + mode: '0644' + + # The CentOS extras repos has epel-release provided + - name: Enable the EPEL repository + yum: + name: epel-release + state: present + tags: + - packages + + # This will change eventually to a rocky-release-messaging repo or to a + # rocky-release-rabbitmq repo + #- name: Install centos rabbitmq + # yum: + # name: centos-release-rabbitmq-38 + # state: present + # tags: + # - packages + + roles: + - role: rockylinux.ipagetcert + state: present + when: rabbitmq_private + + - role: rockylinux.rabbitmq + state: present + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/role-rocky-repopool.yml b/role-rocky-repopool.yml new file mode 100644 index 0000000..c05e697 --- /dev/null +++ b/role-rocky-repopool.yml @@ -0,0 +1,42 @@ +--- +# Configures an instance to function as a HTTP serving member of repopool +- name: Configure Repo Pool hosts + hosts: all + become: true + vars_files: + # vars/vaults/encpass.yml + - vars/common.yml + - vars/mounts/repopool.yml + + # This is to try to avoid the handler issue in pre/post tasks + handlers: + - import_tasks: handlers/main.yml + + pre_tasks: + - name: Check if ansible cannot be run here + stat: + path: /etc/no-ansible + register: no_ansible + + - name: Verify if we can run ansible + assert: + that: + - "not no_ansible.stat.exists" + success_msg: "We are able to run on this node" + fail_msg: "/etc/no-ansible exists - skipping run on this node" + + tasks: + - name: "Setup shared filesystem mount" + include_tasks: tasks/efs_mount.yml + with_items: "{{ mounts }}" + tags: ["koji_efs_mount"] + + post_tasks: + - name: Touching run file that ansible has ran here + file: + path: /var/log/ansible.run + state: touch + mode: '0644' + owner: root + group: root +... diff --git a/roles/README.md b/roles/README.md new file mode 100644 index 0000000..169dbf3 --- /dev/null +++ b/roles/README.md @@ -0,0 +1,14 @@ +# Roles + +If you are wanting to use role specifically for this, you will need to define it in a `requirements.yml`, otherwise AWX will not install what you need to run your tasks. + +Example: + +``` +--- +# Roles +roles: + - rockylinux.ipagetcert + src: https://github.com/rocky-linux/ansible-role-ipa-getcert + version: main +``` diff --git a/roles/requirements.yml b/roles/requirements.yml new file mode 100644 index 0000000..350e8a0 --- /dev/null +++ b/roles/requirements.yml @@ -0,0 +1,44 @@ +--- +roles: + - name: geerlingguy.mysql + # monitoring + - name: cloudalchemy.node_exporter + - name: cloudalchemy.prometheus + - name: cloudalchemy.alertmanager + - name: cloudalchemy.grafana + - name: geerlingguy.gitlab + - name: geerlingguy.postgresql + - name: geerlingguy.php + - name: geerlingguy.nodejs + - name: geerlingguy.certbot + - name: riemers.gitlab-runner + + - name: rockylinux.ipagetcert + src: https://github.com/rocky-linux/ansible-role-ipa-getcert + version: main + - name: rockylinux.ipsilon + src: https://github.com/rocky-linux/ansible-role-ipsilon + version: main + - name: rockylinux.kojihub + src: https://github.com/rocky-linux/ansible-role-kojihub + version: main + - name: rockylinux.kojid + src: https://github.com/rocky-linux/ansible-role-kojid + version: main + - name: rockylinux.rabbitmq + src: https://github.com/rocky-linux/ansible-role-rabbitmq + version: main + - name: rockylinux.sigul + src: https://github.com/rocky-linux/ansible-role-sigul + version: main + - name: rockylinux.matterbridge + src: https://github.com/NeilHanlon/ansible-role-matterbridge + version: master + - name: rockylinux.pinnwand + src: https://github.com/rocky-linux/ansible-role-pinnwand + version: main + - name: rockylinux.wikijs + src: https://git.rockylinux.org/infrastructure/public/ansible/ansible-role-wikijs.git + scm: git + version: develop +... diff --git a/tasks/account_services.yml b/tasks/account_services.yml new file mode 100644 index 0000000..4bd38a8 --- /dev/null +++ b/tasks/account_services.yml @@ -0,0 +1,27 @@ +--- +# Account Services +- name: Install packages + package: + name: + - httpd + - mod_ssl + - python3 + - python3-setuptools + - python3-kdcproxy + state: present + +- name: Deploy relevant httpd configuration + template: + src: "etc/httpd/conf.d/id.conf.j2" + dest: "/etc/httpd/conf.d/id.conf" + owner: root + group: root + mode: '0644' + notify: restart_httpd + +- name: Enable and start + systemd: + name: httpd + state: running + enabled: true +... diff --git a/tasks/auditd.yml b/tasks/auditd.yml new file mode 100644 index 0000000..455f99a --- /dev/null +++ b/tasks/auditd.yml @@ -0,0 +1,36 @@ +--- +- name: Ensure auditd is installed + package: + name: audit + state: present + tags: + - harden + +- name: Ensure auditd is enabled + service: + name: auditd + enabled: true + +- name: Ensure auditd buffer is OK + replace: + path: /etc/audit/rules.d/audit.rules + regexp: '-b \d+' + replace: '-b {{ audit_buffer }}' + notify: + - regenerate_auditd_rules + tags: + - harden + +- name: Ensure collection audit rules are available + template: + src: "etc/audit/rules.d/collection.rules.j2" + dest: "/etc/audit/rules.d/collection.rules" + owner: root + group: root + mode: '0600' + backup: true + notify: + - regenerate_auditd_rules + tags: + - harden +... diff --git a/tasks/authentication.yml b/tasks/authentication.yml new file mode 100644 index 0000000..1d9bf33 --- /dev/null +++ b/tasks/authentication.yml @@ -0,0 +1,55 @@ +--- +# Configures PAM and SSSD post-ipa client installation. It is recommended that +# that we use a custom authselect profile and build it out from there. +- name: Enterprise Linux 8+ PAM Configuration + block: + - name: Ensure Custom Profile is removed + file: + path: /etc/authselect/custom/sssd-rocky + state: absent + + - name: Create custom authselect profile based on sssd + command: > + /usr/bin/authselect create-profile sssd-rocky + --base-on sssd + --symlink-dconf + --symlink-meta + --symlink=postlogin + --symlink=smartcard-auth + --symlink=fingerprint-auth + changed_when: false + + - name: Override system-auth and password-auth + copy: + src: "etc/authselect/custom/sssd-rocky/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-system-auth" + dest: "{{ item }}" + owner: root + group: root + mode: '0644' + loop: + - /etc/authselect/custom/sssd-rocky/system-auth + - /etc/authselect/custom/sssd-rocky/password-auth + + - name: Select New Profile + command: > + /usr/bin/authselect select custom/sssd-rocky + without-nullok + with-faillock + with-mkhomedir + with-sudo + --force + changed_when: false + + - name: Apply new settings + command: /usr/bin/authselect apply-changes + changed_when: false + + - name: Enable oddjobd + service: + name: oddjobd + state: started + enabled: true + when: + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version']|int >= '8' +... diff --git a/tasks/bugzilla.yml b/tasks/bugzilla.yml new file mode 100644 index 0000000..f514496 --- /dev/null +++ b/tasks/bugzilla.yml @@ -0,0 +1,55 @@ +--- +# Configure Bugzilla +- name: Configure SELinux booleans + ansible.posix.seboolean: + name: "{{ item }}" + persistent: true + state: true + with_items: + - httpd_can_network_connect_db + - httpd_can_network_connect + - httpd_can_sendmail + +- name: Install necessary packages + yum: + name: "{{ bugzilla_pkg }}" + state: present + tags: + - packages + +- name: Download the bugtracker + get_url: + url: "https://ftp.mozilla.org/pub/mozilla.org/webtools/bugzilla-{{ bugzilla_version }}.tar.gz" + dest: "/tmp/bugzilla-{{ bugzilla_version }}.tar.gz" + checksum: "{{ bugzilla_checksum }}" + +- name: Create initial directory + file: + path: "{{ bugzilla_dir }}" + state: directory + mode: '0750' + owner: root + group: apache + +- name: Extract bugzilla + unarchive: + src: "/tmp/bugzilla-{{ bugzilla_version }}.tar.gz" + dest: "{{ bugzilla_dir }}" + owner: root + group: apache + mode: '0640' + remote_src: true + extra_opts: + - '--strip-components=1' + +- name: Configure httpd + template: + src: "etc/httpd/conf.d/bugzilla.conf.j2" + dest: "/etc/httpd/conf.d/bugzilla.conf" + owner: root + group: root + mode: '0644' + +- name: Install necessary pieces + import_tasks: bugzilla_install.yml +... diff --git a/tasks/bugzilla_install.yml b/tasks/bugzilla_install.yml new file mode 100644 index 0000000..0d7213e --- /dev/null +++ b/tasks/bugzilla_install.yml @@ -0,0 +1,60 @@ +--- +# Install bugzilla properly, including modules and stuff + +- name: Check for a localconfig file + stat: + path: "{{ bugzilla_dir }}/localconfig" + register: conf_result + +- name: Deploy answer file + template: + src: "var/www/bugzilla/answer" + dest: "{{ bugzilla_dir }}/answer" + owner: root + group: apache + mode: "0640" + when: not conf_result.stat.exists + +- name: Run checksetup.pl + shell: "set -o pipefail && /usr/bin/perl checksetup.pl {{ bugzilla_dir }}/answer" + args: + chdir: "{{ bugzilla_dir }}" + changed_when: "1 != 1" + when: not conf_result.stat.exists + +- name: Deploy proper configuration + template: + src: "var/www/bugzilla/localconfig.j2" + dest: "{{ bugzilla_dir }}/localconfig" + owner: root + group: apache + mode: '0640' + +- name: Install the proper modules + shell: "set -o pipefail && /usr/bin/perl install-module.pl {{ item }}" + changed_when: "1 != 1" + args: + chdir: "{{ bugzilla_dir }}" + with_items: + - 'Net::SAML2' + - 'Template' + - 'Template::Plugin::GD::Image' + - 'HTML::FormatText::WithLinks' + - 'PatchReader' + - 'Crypt::OpenSSL::Verify' + - 'Crypt::OpenSSL::RSA' + - 'JSON::RPC' + - 'XML::Twig' + - 'Test::Taint' + +- name: Re-run checksetup.pl + shell: "set -o pipefail && /usr/bin/perl checksetup.pl" + args: + chdir: "{{ bugzilla_dir }}" + changed_when: "1 != 1" + +- name: Remove answer file + file: + path: "{{ bugzilla_dir }}/answer" + state: absent +... diff --git a/tasks/chrony.yml b/tasks/chrony.yml new file mode 100644 index 0000000..005fb2a --- /dev/null +++ b/tasks/chrony.yml @@ -0,0 +1,33 @@ +--- +- name: Create overrides if we're an IPA Replica + include_vars: "{{ item }}" + with_first_found: + - "chronyserver.yml" + when: "'chronyservers' in group_names" + +- name: Install chrony packages + yum: + name: "{{ chrony_packages }}" + state: present + +- name: Fix permissions for chrony home directory + file: + path: "{{ chrony_homedir }}" + mode: 0750 + state: directory + +- name: Deploy configuration + template: + src: chrony.conf.j2 + dest: "{{ chrony_config_file }}" + owner: "{{ chrony_owner }}" + group: "{{ chrony_group }}" + mode: "{{ chrony_mode }}" + notify: "chrony service restart" + +- name: Manage the state of service + systemd: + name: "{{ chrony_service_name }}" + state: "{{ chrony_service_state }}" + enabled: "{{ chrony_service_enabled }}" +... diff --git a/tasks/efs_mount.yml b/tasks/efs_mount.yml new file mode 100644 index 0000000..3dd5886 --- /dev/null +++ b/tasks/efs_mount.yml @@ -0,0 +1,45 @@ +--- +# Requires amazon-efs-utils; included, but should probably be split out? +# + +- name: "Installing amazon-efs-utils" + become: true + become_user: root + yum: + name: 'https://kojidev.rockylinux.org/kojifiles/packages/amazon-efs-utils/1.31.3/1.5c58a2f.el8/noarch/amazon-efs-utils-1.31.3-1.5c58a2f.el8.noarch.rpm' + disable_gpg_check: true + validate_certs: true + state: present + tags: + - amazon_efs_utils + - packages + - mounts + +- name: "Gathering ec2 facts" + amazon.aws.ec2_metadata_facts: + tags: + - mounts + +# "you can use /etc/hosts" https://github.com/aws/efs-utils/issues/1 +- name: "Install custom hosts file because fmlC-w amazon said so." + become: true + become_user: root + ansible.builtin.lineinfile: + path: /etc/hosts + line: "{{ item.ip_map[ansible_ec2_placement_availability_zone] }} {{ item.fsid }}.efs.{{ ansible_ec2_placement_region }}.amazonaws.com" + create: true + tags: + - mounts + +- name: "Creating and mounting {{ item.fsid }} at {{ item.mount_point }}" + become: true + become_user: root + ansible.posix.mount: + path: "{{ item.mount_point }}" + src: "{{ item.fsid }}:/" + fstype: "{{ item.fstype }}" + opts: "{{ item.fsopts | join(',') }}" + state: "{{ item.state | default('mounted') }}" + tags: + - mounts +... diff --git a/tasks/grub.yml b/tasks/grub.yml new file mode 100644 index 0000000..b06e7f1 --- /dev/null +++ b/tasks/grub.yml @@ -0,0 +1,5 @@ +--- +- name: Add kernel boot options to all kernels and default config + command: /usr/sbin/grubby --update-kernel=ALL --args "{{ grub_boot_options }}" + changed_when: "1 != 1" +... diff --git a/tasks/harden.yml b/tasks/harden.yml new file mode 100644 index 0000000..8bca3f9 --- /dev/null +++ b/tasks/harden.yml @@ -0,0 +1,217 @@ +--- +# Initial hardening ideas from CIS +- name: sysctl hardening and limits + block: + - name: create combined sysctl-dict if overwrites are defined + set_fact: + sysctl_config: '{{ sysctl_config | combine(sysctl_overwrite) }}' + when: sysctl_overwrite | default() + + - name: Kernel parameters + sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + state: present + ignoreerrors: true + sysctl_set: true + sysctl_file: /etc/sysctl.d/99-ansible.conf + with_dict: "{{ sysctl_config }}" + tags: + - harden + - kernel + + - name: Security limits + pam_limits: + dest: "/etc/security/limits.d/cis.conf" + domain: "{{ item.domain }}" + limit_type: "{{ item.limit_type }}" + limit_item: "{{ item.limit_item }}" + value: "{{ item.value }}" + with_items: "{{ limits }}" + tags: + - harden + +- name: Standard login settings + block: + - name: useradd defaults + lineinfile: + line: "INACTIVE=30" + regexp: "^INACTIVE=.*" + path: "/etc/login.defs" + tags: + - harden + + - name: login defs maximum days + replace: + path: /etc/login.defs + regexp: '(PASS_MAX_DAYS).*\d+' + replace: '\1\t{{ login_max_days }}' + tags: + - harden + + - name: login defs minimum days + replace: + path: /etc/login.defs + regexp: '(PASS_MIN_DAYS).*\d+' + replace: '\1\t{{ login_min_days }}' + tags: + - harden + + - name: login defs minimum length + replace: + path: /etc/login.defs + regexp: '(PASS_MIN_LEN).*\d+' + replace: '\1\t{{ login_min_len }}' + tags: + - harden + + - name: login defs warn age + replace: + path: /etc/login.defs + regexp: '(PASS_WARN_AGE).*\d+' + replace: '\1\t{{ login_warn_age }}' + tags: + - harden + + - name: cron directories permissions + file: + path: '{{ item }}' + owner: root + group: root + mode: '0700' + state: directory + loop: '{{ login_cron_directories }}' + tags: + - harden + + - name: Create cron/at allows + file: + path: '{{ item }}' + owner: root + group: root + mode: '0600' + state: touch + loop: '{{ login_cron_allows }}' + tags: + - harden + + - name: Remove cron/at denies + file: + path: '{{ item }}' + state: absent + loop: '{{ login_cron_denies }}' + tags: + - harden + + # TODO: Use pamd module to establish password policy + - name: pwquality - minlen + lineinfile: + line: "minlen = 14" + regexp: "^# minlen =.*" + path: "/etc/security/pwquality.conf" + tags: + - harden + + - name: pwquality - dcredit + lineinfile: + line: "dcredit = -1" + regexp: "^# dcredit =.*" + path: "/etc/security/pwquality.conf" + tags: + - harden + + - name: pwquality - ucredit + lineinfile: + line: "ucredit = -1" + regexp: "^# ucredit =.*" + path: "/etc/security/pwquality.conf" + tags: + - harden + + - name: pwquality - lcredit + lineinfile: + line: "lcredit = -1" + regexp: "^# lcredit =.*" + path: "/etc/security/pwquality.conf" + tags: + - harden + + - name: pwquality - ocredit + lineinfile: + line: "ocredit = -1" + regexp: "^# ocredit =.*" + path: "/etc/security/pwquality.conf" + tags: + - harden + +- name: Remove packages not allowed by CIS + package: + name: "{{ remove_packages }}" + state: absent + tags: + - harden + +- name: Disable Services + service: + name: "{{ item }}" + enabled: false + state: stopped + loop: "{{ disable_svc }}" + register: service_check + failed_when: service_check is failed and not 'Could not find the requested service' in service_check.msg + tags: + - services + - harden + +- name: modprobe settings + block: + - name: remove vfat from filesystem list if we are EFI + set_fact: + modprobe_unused_filesystems: "{{ modprobe_unused_filesystems | difference('vfat') }}" + when: + - efi_installed.stat.isdir is defined + - efi_installed.stat.isdir + tags: + - efi + + - name: disable unused filesystems + template: + src: "etc/modprobe.d/cis.conf.j2" + dest: "/etc/modprobe.d/cis.conf" + owner: 'root' + group: 'root' + mode: '0644' + tags: + - harden + +- name: Set init umask + lineinfile: + dest: /etc/sysconfig/init + state: present + regexp: ^umask + line: "umask 027" + create: true + owner: root + group: root + mode: '0644' + when: ansible_distribution_major_version == '7' + tags: + - harden + +- name: CIS sudoers configuration + copy: + src: "etc/sudoers.d/cis" + dest: "/etc/sudoers.d/cis" + owner: root + group: root + mode: '0440' + tags: + - harden + +- name: Remove packages not allowed by CIS + package: + name: "{{ remove_packages }}" + state: absent + tags: + - harden +... diff --git a/tasks/main.yml b/tasks/main.yml new file mode 100644 index 0000000..68a6567 --- /dev/null +++ b/tasks/main.yml @@ -0,0 +1,4 @@ +--- +# No tasks +- debug: msg="No tasks are provided here. Please import the task as needed in your playbook." +... diff --git a/tasks/mantis.yml b/tasks/mantis.yml new file mode 100644 index 0000000..efa181d --- /dev/null +++ b/tasks/mantis.yml @@ -0,0 +1,100 @@ +--- +# Configure mantis +- name: Configure SELinux booleans + ansible.posix.seboolean: + name: "{{ item }}" + persistent: true + state: true + with_items: + - httpd_can_network_connect_db + - httpd_can_network_connect + - httpd_can_sendmail + +- name: Install necessary packages + yum: + name: "{{ mantis_pkg }}" + state: present + tags: + - packages + +- name: Download the bugtracker + get_url: + url: "http://downloads.sourceforge.net/mantisbt/mantisbt-{{ mantis_version }}.tar.gz" + dest: "/tmp/mantisbt-{{ mantis_version }}.tar.gz" + checksum: "{{ mantis_checksum }}" + +- name: Extract mantis + unarchive: + src: "/tmp/mantisbt-{{ mantis_version }}.tar.gz" + dest: "/var/www" + owner: apache + group: apache + mode: '0644' + remote_src: true + +- name: Generate crypto salt + shell: "set -o pipefail && cat /dev/urandom | head -c 64 | base64 --wrap=0" + changed_when: "1 != 1" + register: cryptosalt_string + +- name: Configure mantis + template: + src: "var/www/mantis/config/config_inc.php.j2" + dest: "/var/www/mantisbt-{{ mantis_version }}/config/config_inc.php" + owner: apache + group: apache + mode: '0640' + +- name: Deploy plugins from Mantis GitHub + git: + repo: "https://github.com/mantisbt-plugins/{{ item }}.git" + dest: "/var/www/mantisbt-{{ mantis_version }}/plugins/{{ item }}" + update: true + version: master + with_items: + - Snippets + +- name: Deploy custom libravatar plugin + git: + repo: "https://github.com/nazunalika/mantisbt-libravatar.git" + dest: "/var/www/mantisbt-{{ mantis_version }}/plugins/Libravatar" + update: true + version: main + +- name: Deploy custom mattermost plugin + git: + repo: "https://github.com/nazunalika/mantisbt-mattermost.git" + dest: "/var/www/mantisbt-{{ mantis_version }}/plugins/Mattermost" + update: true + version: main + +- name: Configure httpd + template: + src: "etc/httpd/conf.d/mantis.conf.j2" + dest: "/etc/httpd/conf.d/mantis.conf" + owner: root + group: root + mode: '0644' + +- name: Database import template + template: + src: "tmp/mantis_import.sql.j2" + dest: "/tmp/mantis_import.sql.j2" + owner: root + group: root + mode: '0600' + +# We will need to generate this +# name: Import database if required +# community.general.postgresql_db: +# name: "{{ mantis_db_name }}" +# target: /tmp/mantis_import.sql +# owner: "{{ mantis_db_user }}" +# state: restore +# login_host: "{{ mantis_db_host }}" +# login_user: "{{ mantis_db_user }}" +# login_password: "{{ mantis_db_pass }}" + +- name: Patch up some pages + import_tasks: mantispatch.yml +... diff --git a/tasks/mantispatch.yml b/tasks/mantispatch.yml new file mode 100644 index 0000000..5ccbe0f --- /dev/null +++ b/tasks/mantispatch.yml @@ -0,0 +1,26 @@ +--- +# Patch up various pieces of mantis to customize it. We do not rely on local +# bug tracker accounts. We are doing regex instead of just replacing the +# file as a whole. Should make it easier to deal with upgrades in theory. +- name: Change signup_page.php to Account Services + replace: + path: "/var/www/mantisbt-{{ mantis_version }}/{{ item }}" + regexp: 'signup_page.php' + replace: 'https://accounts.rockylinux.org' + with_items: + - core/print_api.php + - lost_pwd_page.php + - login_page.php + +- name: Change special signup_page.php reference + replace: + path: "/var/www/mantisbt-{{ mantis_version }}/core/layout_api.php" + regexp: "' . helper_mantis_url( 'signup_page.php' ) . '" + replace: 'https://accounts.rockylinux.org' + +- name: Remove LDAP from checks for signup button + lineinfile: + path: "/var/www/mantisbt-{{ mantis_version }}/login_page.php" + state: absent + regex: 'LDAP != config_get_global' +... diff --git a/tasks/mirrormanager.yml b/tasks/mirrormanager.yml new file mode 100644 index 0000000..fee6a1f --- /dev/null +++ b/tasks/mirrormanager.yml @@ -0,0 +1,68 @@ +--- +# Mirrormanager tasks +- name: Configure SELinux booleans + become: true + ansible.posix.seboolean: + name: "{{ item }}" + persistent: true + state: true + with_items: + - httpd_can_network_connect_db + - httpd_can_network_connect + +- name: Create mirrormanager group + become: true + ansible.builtin.group: + name: "{{ (mirrormanager_user | default({})).group }}" + gid: "{{ (mirrormanager_user | default({})).gid | default(omit) }}" + system: "{{ (mirrormanager_user | default({})).system | default('yes') }}" + when: (mirrormanager_user | default({})).group is defined + +- name: Create mirrormanager user + become: true + ansible.builtin.user: + name: "{{ (mirrormanager_user | default({})).name | default(_wiki_defaultusr) }}" + comment: "{{ (mirrormanager_user | default({})).comment | default(omit) }}" + uid: "{{ (mirrormanager_user | default({})).uid | default(omit) }}" + group: "{{ (mirrormanager_user | default({})).group | default(omit) }}" + groups: "{{ (mirrormanager_user | default({})).groups | default(omit) }}" + home: "{{ (mirrormanager_user | default({})).home | default(mirrormanager_dir) }}" + create_home: "{{ (mirrormanager_user | default({})).create_home | default('no') }}" + shell: "{{ (mirrormanager_user | default({})).shell | default(omit) }}" + system: "{{ (mirrormanager_user | default({})).system | default('no') }}" + +- name: Create webroot directory + become: true + file: + path: "{{ mirrormanager_dir }}" + state: directory + group: "{{ mirrormanager_user.group }}" + owner: "{{ mirrormanager_user.name }}" + mode: "u=rwX,g=rX,o=rX" # 755 folders, 644 files + recurse: yes + seuser: system_u + serole: object_r + setype: httpd_sys_content_t + + #- name: Checkout git repository at version + # become: true + # ansible.builtin.git: + # repo: "https://github.com/fedora-infra/mirrormanager2.git" + # dest: "{{ mirrormanager_dir }}/app" + # depth: 1 + # version: "ee381257fcfef2eb38705d98f992d2ae8fb7bb8c" + # update: no + +- name: Deploy MM2 config + become: true + template: + src: "opt/mirrormanager/mirrormanager2.cfg.j2" + dest: "{{ mirrormanager_dir }}/app/mirrormanager2.cfg" + group: "{{ mirrormanager_user.group }}" + owner: "{{ mirrormanager_user.name }}" + mode: 0700 + seuser: system_u + serole: object_r + setype: httpd_sys_rw_content_t + tags: + - config diff --git a/tasks/noggin.yml b/tasks/noggin.yml new file mode 100644 index 0000000..735afcd --- /dev/null +++ b/tasks/noggin.yml @@ -0,0 +1,89 @@ +--- +- name: Ensure python is installed + yum: + name: + - python3 + - python3-pip + state: present + +- name: Ensure noggin user exists + user: + name: noggin + comment: "Noggin FAS" + +- name: Create noggin directory + file: + path: /opt/noggin + state: directory + mode: '0700' + owner: noggin + group: noggin + +- name: Deploy noggin + git: + repo: https://github.com/fedora-infra/noggin.git + dest: /opt/noggin/noggin + update: true + version: main + become: true + become_user: noggin + +- name: Noggin user must install poetry + pip: + name: poetry + executable: pip3 + become: true + become_user: noggin + +- name: Remove any pycache + file: + path: "/home/noggin/.cache/pypoetry" + state: absent + +- name: Noggin installation + command: "/home/noggin/.local/bin/poetry install --no-dev --extras deploy" + become: true + become_user: noggin + changed_when: "1 != 1" + args: + chdir: "/opt/noggin/noggin" + +- name: Get the noggin poetry virtualenv + shell: + cmd: "poetry env list | awk '{print $1}'" + chdir: "/opt/noggin/noggin" + become: true + become_user: noggin + changed_when: "1 != 1" + register: virtualenv_location + +- name: Deploy start up script + template: + src: "opt/noggin/start_noggin.sh.j2" + dest: "/opt/noggin/start_noggin.sh" + mode: '0750' + user: noggin + group: noggin + +- name: Deploy systemd unit + copy: + src: "etc/systemd/system/noggin.service" + dest: "/etc/systemd/system/noggin.service" + owner: root + group: root + mode: '0644' + +- name: Deploy noggin configuration + template: + src: "opt/noggin/noggin.cfg.j2" + dest: "/opt/noggin/noggin.cfg" + owner: noggin + group: noggin + mode: '0600' + +# The only way to run it properly, at least on EL8, is we need this line +- name: Add missing create_app call + lineinfile: + path: "/opt/noggin/noggin/noggin/app.py" + line: "app = create_app()" +... diff --git a/tasks/openqa.yml b/tasks/openqa.yml new file mode 100644 index 0000000..27d3585 --- /dev/null +++ b/tasks/openqa.yml @@ -0,0 +1,192 @@ +--- +- name: Install OpenQA packages + yum: + name: "{{ openqa_packages }}" + state: present + tags: + - packages + +- name: Copy httpd configuration files + copy: + remote_src: true + src: /etc/httpd/conf.d/{{ item }}.template + dest: /etc/httpd/conf.d/{{ item }} + mode: '0644' + owner: root + group: root + loop: + - openqa.conf + - openqa-ssl.conf + notify: restart_httpd + tags: + - configure + +- name: Template OpenQA configuration files + template: + src: etc/openqa/{{ item }}.j2 + dest: /etc/openqa/{{ item }} + owner: "{{ openqa_user }}" + group: "{{ openqa_group }}" + mode: "0444" + loop: + - openqa.ini + - client.conf + tags: + - configure + +- name: Get service facts + service_facts: + +- name: Check for non-empty postgres data directory + stat: + path: /var/lib/pgsql/data/base + register: postgres_data_dir + +- name: If postgresql is not already running, initialize database + command: postgresql-setup --initdb + when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" ) + and not postgres_data_dir.stat.exists + +- name: Enable and start postgresql service + systemd: + name: postgresql + state: started + enabled: true + when: not ( ansible_facts.services["postgresql.service"]["state"] == "running" ) + and not postgres_data_dir.stat.exists + +- name: Configure SELinux to allow httpd connection to network + seboolean: + name: httpd_can_network_connect + state: true + persistent: true + tags: + - configure + +- name: Enable and start OpenQA services + systemd: + name: "{{ item }}" + state: started + enabled: true + loop: "{{ openqa_services }}" + tags: + - configure + +- name: Create openqa-vnc firewalld service + template: + src: etc/firewalld/services/openqa-vnc.xml.j2 + dest: /etc/firewalld/services/openqa-vnc.xml + owner: root + group: root + mode: "0644" + tags: + - configure + +- name: Load openqa-vnc firewalld service + systemd: + name: firewalld + state: reloaded + tags: + - configure + +- name: Permit traffic for {{ item }} service + ansible.posix.firewalld: + service: "{{ item }}" + permanent: true + state: enabled + loop: + - http + - openqa-vnc + tags: + - configure + +- name: Reload FirewallD + systemd: + name: firewalld + state: reloaded + tags: + - configure + +- name: Check for existing repository + stat: + path: "{{ openqa_homedir }}/share/tests/rocky" + register: rocky_testing_repo + tags: + - configure + +- name: Clone repository if it does not already exist + git: + accept_hostkey: true + dest: "{{ openqa_homedir }}/share/tests/rocky" + repo: "{{ openqa_rocky_testing_repo }}" + version: develop + when: not rocky_testing_repo.stat.exists + tags: + - configure + +- name: Set owner/group/permissions on repo contents + file: + path: "{{ openqa_homedir }}/share/tests/rocky" + recurse: true + owner: "{{ openqa_user }}" + group: "{{ openqa_group }}" + mode: "u+rwX,g+rwX,o+rX,o-w" + tags: + - configure + +# fifloader.py will fail if the Demo user is not logged in +- name: Authenticate to web UI the first time + uri: + url: "http://{{ openqa_host }}/login" + +- name: Run fifloader.py + command: ./fifloader.py -l -c templates.fif.json templates-updates.fif.json + changed_when: "1 != 1" + args: + chdir: "{{ openqa_homedir }}/share/tests/rocky" + +- name: Create ISO directory + file: + path: "{{ openqa_homedir }}/share/factory/iso/fixed" + state: directory + owner: "{{ openqa_user }}" + group: "{{ openqa_group }}" + mode: "0775" + tags: + - download_isos + +- name: Download ISOs + get_url: + dest: "{{ openqa_homedir }}/share/factory/iso/fixed/{{ item.name }}" + url: "{{ rocky_iso_download_url }}/{{ item.name }}" + checksum: "{{ item.checksum }}" + owner: "{{ openqa_user }}" + group: "{{ openqa_group }}" + tmp_dest: "/var/tmp" + mode: "0644" + loop: "{{ openqa_isos }}" + tags: + - download_isos + +- name: Start {{ openqa_worker_count }} OpenQA workers + ansible.builtin.systemd: + name: "openqa-worker@{{ item }}" + state: started + enabled: true + # range 'end' parameter is exclusive, so add 1 + loop: "{{ range(1, (openqa_worker_count|int + 1)) | list }}" + tags: + - start_workers + - configure + +- name: POST a job + command: | + openqa-cli api -X POST isos \ + ISO=Rocky-{{ rocky_version }}-{{ rocky_arch }}-minimal.iso \ + ARCH={{ rocky_arch }} \ + DISTRI=rocky \ + FLAVOR=minimal-iso \ + VERSION={{ rocky_version }} \ + BUILD="{{ '%Y%m%d.%H%M%S' | strftime }}.0" + changed_when: "1 != 1" +... diff --git a/tasks/postfix_relay.yml b/tasks/postfix_relay.yml new file mode 100644 index 0000000..c25b5b7 --- /dev/null +++ b/tasks/postfix_relay.yml @@ -0,0 +1,38 @@ +--- +# Configure relay +- name: Ensure postfix is installed + yum: + name: + - postfix + - cyrus-sasl-plain + state: present + +- name: Add password map + template: + src: etc/postfix/sasl_passwd.j2 + dest: /etc/postfix/sasl_passwd + owner: root + group: root + mode: '0600' + notify: rehash_postfix_sasl + +- name: Add relay information to postfix + blockinfile: + path: /etc/postfix/main.cf + marker: "## ANSIBLE MANAGED ##" + block: | + smtp_tls_note_starttls_offer = yes + relayhost = [{{ smtp_relayhost }}]:587 + smtp_use_tls = yes + smtp_sasl_auth_enable = yes + smtp_sasl_security_options = + smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd + smtp_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt + notify: restart_postfix + +- name: Ensure postfix is running and enabled + service: + name: postfix + state: restarted + enabled: true +... diff --git a/tasks/repository.yml b/tasks/repository.yml new file mode 100644 index 0000000..ca86fa3 --- /dev/null +++ b/tasks/repository.yml @@ -0,0 +1,3 @@ +--- +# no tasks yet +... diff --git a/tasks/scripts.yml b/tasks/scripts.yml new file mode 100644 index 0000000..7e555f6 --- /dev/null +++ b/tasks/scripts.yml @@ -0,0 +1,18 @@ +--- +# Common scripts that rocky uses on nodes +- name: Lock Wrapper script + copy: + src: "usr/local/bin/lock-wrapper" + dest: "/usr/local/bin/lock-wrapper" + owner: root + group: root + mode: '0755' + +- name: dmidecode pretty script + copy: + src: "usr/local/bin/dmidecode-pretty" + dest: "/usr/local/bin/dmidecode-pretty" + owner: root + group: root + mode: '0755' +... diff --git a/tasks/srpmproc.yml b/tasks/srpmproc.yml new file mode 100644 index 0000000..23a0ae2 --- /dev/null +++ b/tasks/srpmproc.yml @@ -0,0 +1,10 @@ +--- +- name: Configure SELinux booleans + ansible.posix.seboolean: + name: "{{ item }}" + persistent: true + state: true + with_items: + - httpd_can_network_connect_db + - httpd_can_network_connect +... diff --git a/tasks/ssh_config.yml b/tasks/ssh_config.yml new file mode 100644 index 0000000..15941ac --- /dev/null +++ b/tasks/ssh_config.yml @@ -0,0 +1,46 @@ +--- +- name: Ensure SSH server is installed + package: + name: openssh-server + state: present + +- name: Ensure SSH daemon is enabled + service: + name: sshd + enabled: true + +# TODO: Prepare for /etc/ssh/sshd_config.d/* style of configuration +- name: SSH daemon configuration - global + block: + - name: SSH daemon configuration - base + template: + src: "etc/ssh/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-sshd_config.j2" + dest: "/etc/ssh/sshd_config" + owner: root + group: root + mode: '0600' + validate: /usr/sbin/sshd -t -f %s + backup: true + notify: restart_sshd + rescue: + - name: Print errors for configuration and validation + debug: + msg: "Error in SSH daemon configuration or template" + +- name: SSH banner + copy: + src: "etc/rockybanner" + dest: "/etc/rockybanner" + owner: root + group: root + mode: '0644' + notify: restart_sshd + +- name: Remove DSA keys + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/ssh/ssh_host_dsa_key.pub + - /etc/ssh/ssh_host_dsa_key +... diff --git a/tasks/variable_loader_common.yml b/tasks/variable_loader_common.yml new file mode 100644 index 0000000..ab182af --- /dev/null +++ b/tasks/variable_loader_common.yml @@ -0,0 +1,22 @@ +--- +- name: Standard System Configuration Variables + block: + - name: Loading Variables from OS Common + include_vars: "{{ item }}" + with_items: + - "{{ ansible_distribution }}.yml" + + - name: Create overrides if we're an IPA Replica + include_vars: "{{ item }}" + with_first_found: + - "ipaserver.yml" + when: "'ipaserver' in group_names" + + - name: Check if system is EFI + stat: + path: "/sys/firmware/efi" + register: efi_installed + + always: + - debug: msg="Variables are now loaded" +... diff --git a/templates/README.md b/templates/README.md new file mode 100644 index 0000000..25a2632 --- /dev/null +++ b/templates/README.md @@ -0,0 +1 @@ +Templates go here diff --git a/templates/etc/httpd/conf.d/bugzilla.conf.j2 b/templates/etc/httpd/conf.d/bugzilla.conf.j2 new file mode 100644 index 0000000..b994fbf --- /dev/null +++ b/templates/etc/httpd/conf.d/bugzilla.conf.j2 @@ -0,0 +1,37 @@ + + ServerAdmin infrastructure@rockylinux.org + DocumentRoot "{{ bugzilla_dir }}" + ServerName bugs.rockylinux.org + TransferLog /var/log/httpd/bugzilla_access.log + ErrorLog /var/log/httpd/bugzilla_error.log + + AddHandler cgi-script .cgi + DirectoryIndex index.cgi + Options MultiViews FollowSymlinks ExecCGI FollowSymLinks + AllowOverride All + Order allow,deny + Allow from all + + + + + SSLEngine on + SSLHonorCipherOrder on + SSLCipherSuite PROFILE=SYSTEM + SSLProxyCipherSuite PROFILE=SYSTEM + SSLCertificateFile /etc/pki/tls/certs/bugs.rockylinux.org.crt + SSLCertificateKeyFile /etc/pki/tls/private/bugs.rockylinux.org.key + ServerAdmin infrastructure@rockylinux.org + DocumentRoot "{{ bugzilla_dir }}" + ServerName bugs.rockylinux.org + TransferLog /var/log/httpd/bugzilla_access.log + ErrorLog /var/log/httpd/bugzilla_error.log + + AddHandler cgi-script .cgi + DirectoryIndex index.cgi + Options MultiViews FollowSymlinks ExecCGI FollowSymLinks + AllowOverride All + Order allow,deny + Allow from all + + diff --git a/templates/etc/httpd/conf.d/mantis.conf.j2 b/templates/etc/httpd/conf.d/mantis.conf.j2 new file mode 100644 index 0000000..5d08ddd --- /dev/null +++ b/templates/etc/httpd/conf.d/mantis.conf.j2 @@ -0,0 +1,33 @@ + + ServerAdmin infrastructure@rockylinux.org + DocumentRoot "/var/www/mantisbt-{{ mantis_version }}" + ServerName bugs.rockylinux.org + TransferLog /var/log/httpd/mantis_access.log + ErrorLog /var/log/httpd/mantis_error.log + + Options MultiViews FollowSymlinks + AllowOverride All + Order allow,deny + Allow from all + + + + + SSLEngine on + SSLHonorCipherOrder on + SSLCipherSuite PROFILE=SYSTEM + SSLProxyCipherSuite PROFILE=SYSTEM + SSLCertificateFile /etc/pki/tls/certs/bugs.rockylinux.org.crt + SSLCertificateKeyFile /etc/pki/tls/private/bugs.rockylinux.org.key + ServerAdmin infrastructure@rockylinux.org + DocumentRoot "/var/www/mantisbt-{{ mantis_version }}" + ServerName bugs.rockylinux.org + TransferLog /var/log/httpd/mantis_access.log + ErrorLog /var/log/httpd/mantis_error.log + + Options MultiViews FollowSymlinks + AllowOverride All + Order allow,deny + Allow from all + + diff --git a/templates/etc/postfix/sasl_passwd.j2 b/templates/etc/postfix/sasl_passwd.j2 new file mode 100644 index 0000000..87a6034 --- /dev/null +++ b/templates/etc/postfix/sasl_passwd.j2 @@ -0,0 +1 @@ +[{{ smtp_relayhost }}]:587 {{ smtp_user_name }}:{{ smtp_user_pass }} diff --git a/templates/opt/mirrormanager/mirrormanager2.cfg.j2 b/templates/opt/mirrormanager/mirrormanager2.cfg.j2 new file mode 100644 index 0000000..a9ce9c0 --- /dev/null +++ b/templates/opt/mirrormanager/mirrormanager2.cfg.j2 @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- + +''' +MirrorManager2 sample configuration. +''' + +### +# Most important configuration items +### + +# the number of items to display on the search pages +# Default: ``50``. +ITEMS_PER_PAGE = 50 + + +# url to the database server: +DB_URL='postgresql://{{ mirrormanager_db.user }}:{{ mirrormanager_db.password }}@{{ mirrormanager_db.host }}:{{ mirrormanager_db.port }}/{{ mirrormanager_db.dbname }}' + +# secret key used to generate unique csrf token +SECRET_KEY = '{{ mirrormanager_secret_key }}' + +# Seed used to make the password harder to brute force in case of leaking +# This should be kept really secret! +PASSWORD_SEED = "{{ mirrormanager_password_seed }}" + +# Make browsers send session cookie only via HTTPS +SESSION_COOKIE_SECURE=True + +### +# Other configuration items for the web-app +### + +from datetime import timedelta + +# Set the time after which the session expires. Flask's default is 31 days. +# Default: ``timedelta(hours=1)`` corresponds to 1 hour. +PERMANENT_SESSION_LIFETIME = timedelta(hours=1) + +# Folder containing the theme to use. +# Default: ``fedora``. +THEME_FOLDER = 'fedora' + +# Which authentication method to use, defaults to `fas` can be or `local` +# Default: ``fas``. +MM_AUTHENTICATION = 'fas' + +# If the authentication method is `fas`, groups in which should be the user +# to be recognized as an admin. +ADMIN_GROUP = ['sysadmin-main', 'sysadmin-web'] + +# Email of the admin to which send notification or error +ADMIN_EMAIL = ['admin@rockylinux.org', 'neil@rockylinux.org'] + +# Email address used in the 'From' field of the emails sent. +# Default: ``nobody@fedoraproject.org``. +EMAIL_FROM = 'nobody@rockylinux.org' + +# SMTP server to use, +# Default: ``localhost``. +SMTP_SERVER = 'localhost' + +# If the SMTP server requires authentication, fill in the information here +# SMTP_USERNAME = 'username' +# SMTP_PASSWORD = 'password' + +# When this is set to True, an additional menu item is shown which can +# be used to browse the different statistics generated by +# mirrorlist_statistics.py. +SHOW_STATISTICS = True + +# This is the directory the code enabled by SHOW_STATISTICS will use +# to locate the statistics files and display them. +STATISTICS_BASE = '/var/www/mirrormanager-statistics/data' + +# Countries which have to be excluded. +EMBARGOED_COUNTRIES = ['CU', 'IR', 'KP', 'SD', 'SY'] + +# When this is set to True, an additional menu item is shown which +# displays the maps generated with mm2_generate-worldmap. +SHOW_MAPS = True + +# Location of the static map displayed in the map tab. +STATIC_MAP = '/map/map.png' + +# Location of the interactive openstreetmap based map. +INTERACTIVE_MAP = '/map/mirrors.html' + +# The crawler can generate propagation statistics which can be +# converted into svg/pdf with mm2_propagation. These files +# can be displayed next to the statistics and maps tab if desired. +SHOW_PROPAGATION = True + +# Where to look for the above mentioned propagation images. +PROPAGATION_BASE = '/var/www/mirrormanager-statistics/data/propagation' + +# Disable master rsync server ACL +# Fedora does not use it and therefore it is set to False +MASTER_RSYNC_ACL = False + +# When this is set to True, the session cookie will only be returned to the +# server via ssl (https). If you connect to the server via plain http, the +# cookie will not be sent. This prevents sniffing of the cookie contents. +# This may be set to False when testing your application but should always +# be set to True in production. +# Default: ``True``. +MM_COOKIE_REQUIRES_HTTPS = True + +# The name of the cookie used to store the session id. +# Default: ``.MirrorManager``. +MM_COOKIE_NAME = 'MirrorManager' + +# If this variable is set (and the directory exists) the crawler +# will create per host log files in MM_LOG_DIR/crawler/.log +# which can the be used in the web interface by the mirror admins. +# Other parts besides the crawler are also using this variable to +# decide where to store log files. +MM_LOG_DIR = '/var/log/mirrormanager' + +# This is used to exclude certain protocols to be entered +# for host category URLs at all. +# The following is the default for Fedora to exclude FTP based +# mirrors to be added. Removing this confguration option +# or setting it to '' removes any protocol restrictions. +MM_PROTOCOL_REGEX = '^(?!ftp)(.*)$' + +# The netblock size parameters define which netblock sizes can be +# added by a site administrator. Larger networks can only be added by +# mirrormanager admins. +MM_IPV4_NETBLOCK_SIZE = '/16' +MM_IPV6_NETBLOCK_SIZE = '/32' + +# If not specified the application will rely on the root_url when sending +# emails, otherwise it will use this URL +# Default: ``None``. +APPLICATION_URL = None + +# Boolean specifying wether to check the user's IP address when retrieving +# its session. This make things more secure (thus is on by default) but +# under certain setup it might not work (for example is there are proxies +# in front of the application). +CHECK_SESSION_IP = True + +# Specify additional rsync parameters for the crawler +# # --timeout 14400: abort rsync crawl after 4 hours +# # --no-human-readable: because rsync made things pretty by default in 3.1.x +CRAWLER_RSYNC_PARAMETERS = '--no-motd --timeout 14400 --exclude=lost+found --no-human-readable' + +# This is a list of directories which MirrorManager will ignore while guessing +# the version and architecture from a path. +SKIP_PATHS_FOR_VERSION = [ + 'pub/alt', + 'pub/archive', +] + +### +# Configuration options used by the crons +### + +# Specify whether the crawler should send a report by email +CRAWLER_SEND_EMAIL = False + +# If a host fails for CRAWLER_AUTO_DISABLE times in a row +# the host will be disable automatically (user_active) +CRAWLER_AUTO_DISABLE = 4 + +UMDL_PREFIX = '/srv/' + +umdl_master_directories = [ +] diff --git a/templates/tmp/mantis_import.sql.j2 b/templates/tmp/mantis_import.sql.j2 new file mode 100644 index 0000000..b7db254 --- /dev/null +++ b/templates/tmp/mantis_import.sql.j2 @@ -0,0 +1 @@ +# Empty diff --git a/templates/var/www/bugzilla/answer b/templates/var/www/bugzilla/answer new file mode 100644 index 0000000..4fb4ccc --- /dev/null +++ b/templates/var/www/bugzilla/answer @@ -0,0 +1,11 @@ +$answer{'db_host'} = '{{ bugzilla_db_host }}'; +$answer{'db_driver'} = 'pg'; +$answer{'db_port'} = 0; +$answer{'db_name'} = '{{ bugzilla_db_name }}'; +$answer{'db_user'} = '{{ bugzilla_db_user }}'; +$answer{'db_pass'} = '{{ bugzilla_db_pass }}'; +$answer{'urlbase'} = 'https://bugs.rockylinux.org/'; +$answer{'ADMIN_EMAIL'} = 'infrastructure@rockylinux.org'; +$answer{'ADMIN_PASSWORD'} = '{{ bugzilla_admin_password }}'; +$answer{'ADMIN_REALNAME'} = 'Infrastructure'; +$answer{'NO_PAUSE'} = 1 diff --git a/templates/var/www/bugzilla/localconfig.j2 b/templates/var/www/bugzilla/localconfig.j2 new file mode 100644 index 0000000..f053ef6 --- /dev/null +++ b/templates/var/www/bugzilla/localconfig.j2 @@ -0,0 +1,19 @@ +$create_htaccess = 1; +$webservergroup = 'apache'; +$use_suexec = 0; +$db_driver = 'pg'; +$db_host = '{{ bugzilla_db_host }}'; +$db_name = '{{ bugzilla_db_name }}'; +$db_user = '{{ bugzilla_db_user }}'; +$db_pass = '{{ bugzilla_db_pass }}'; +$db_port = 0; +$db_sock = ''; +$db_check = 1; +$db_mysql_ssl_ca_file = ''; +$db_mysql_ssl_ca_path = ''; +$db_mysql_ssl_client_cert = ''; +$db_mysql_ssl_client_key = ''; +$index_html = 0; +$interdiffbin = '/usr/bin/interdiff'; +$diffpath = '/usr/bin'; +$site_wide_secret = '{{ lookup('password', '/dev/null length=54 chars=ascii_letters') }}'; diff --git a/templates/var/www/mantis/config/config_inc.php.j2 b/templates/var/www/mantis/config/config_inc.php.j2 new file mode 100644 index 0000000..a653d09 --- /dev/null +++ b/templates/var/www/mantis/config/config_inc.php.j2 @@ -0,0 +1,46 @@ +