diff --git a/README.md b/README.md index c37f25c..ebdf2f4 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,10 @@ -ansible-role-nebula -=================== +# ansible-role-nebula This role helps setup Nebula on applicable nodes in the RESF. Most settings for this role are specifically for the RESF and its projects. However, it is perfectly possible to use this on your own. Note that this relies specifically on `rocky-release-core` being installable, which means Rocky Linux will work without issues. Fedora Linux will work as nebula is available in their base repositories. Other distributions may not work. If there are issues with this role for your use case, please file an issue or a PR if you would like to enhance this role. -Requirements ------------- +## Requirements Requirements are as follows: @@ -14,25 +12,75 @@ Requirements are as follows: * Ansible collections: community.general * ansible-core >= 2.14 -Role Variables --------------- +## Role Variables A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. -Dependencies ------------- +## Dependencies * `community.general` +* `ansible.posix` (only for tests) -Example Playbook ----------------- +## Variables -License -------- +This is not an all inclusive list. For additional variables, check `defaults/main.yml`. + +| Variable | Default Value | Required | Description | +|---------------------------------------------|--------------------------------|-------------|-------------| +| `nebula_am_lighthouse` | `false` | Conditional | Sets this node as a lighthouse. | +| `nebula_lighthouse_internal` | 60 | No | How often (in seconds) should a node report to a lighthouse. | +| `nebula_routable_ip` | | No | The public routable IP that nebula needs to know about. If not set, it will be determined automatically. | +| `nebula_ip` | | Yes | IP required by nebula in the form of `X.X.X.X/X` (for example, `10.100.0.44/24`). | +| `nebula_ca_host` | | Yes | The hostname of the host which should be used as a CA. Exactly one (1) MUST be a CA. Required if `nebula_am_lighthouse` is `true`. | +| `nebula_is_ca` | `false` | Conditional | If the host is the CA or not. If `nebula_ca_host` is not defined, exactly one play host will need this set to `true`. Required if `nebula_am_lighthouse` is `true`. | +| `nebula_is_member` | `true` | Yes | This node a member of the mesh. | +| `nebula_ca_name` | RESF Nebula CA | Yes | Sets the name of the CA. | +| `nebula_ca_life` | 175200h | No | Sets the life of the CA certificate. | +| `nebula_ca_wait_timeout_secs` | 300 | No | Timeout in seconds for members to wait until the CA is ready to issue certificates. | +| `nebula_nodename` | `{{ ansible_facts.hostname }}` | No | Name of this nebula member. This is determined by the hostname by default. Otherwise, it can be set. | +| `nebula_groups` | `[]` | Conditional | List of groups that a node is assigned to. This added to the issued certificate for the node. | +| `nebula_listen_host` | 0.0.0.0 | Conditional | The IP of the interface nebula will need to bind to. Default is all IPv4 interfaces. Use `[::]` if you want to enable IPv6. | +| `nebula_listen_port` | 4242 | Conditional | The port to bind to. Default is `4242`, just like the documentation. | +| `nebula_listen_batch` | | No | Max number of packets to pull from the kernel on each syscall. | +| `nebula_listen_read_buffer` | | No | Read socket buffers for the UDP side. Values will be doubled in the kernel. Default is `net.core.rmem.default` on the system. | +| `nebula_listen_write_buffer` | | No | Read socket buffers for the UDP side. Values will be doubled in the kernel. Default is `net.core.wmem.default` on the system. | +| `nebula_listen_send_recv_error` | | No | Nebula will reply to packets it has no tunnel for with a recv_error packet. This helps speed up reconnection in cases of nebula not shutting down cleanly. The caveat is that this can be used to abuse checking for nebula running on some host. | +| `nebula_punchy_punch` | `true` | Conditional | Used for NAT situations. Most cases NAT exists, so this is set to `true`. Enabling this causes the node to send small packets at an interval. | +| `nebula_punchy_respond` | | No | Set this to `true` if the node is unable to receive handshakes and will attempt to initiate one (in the case where hole punching fails in one direction). Useful if a host is behind a difficult NAT (like symmetric NAT). | +| `nebula_punchy_respond_delay` | | No | Set this to the number of seconds to delay before attempting a punch. Only valid is `nebula_punchy_respond` is `true`. +| `nebula_punchy_delay` | | No | Set this to the number of seconds to delay/slow down punch responses. This is helpful if NAT is unable to handle certain race conditions. Only valid if `nebula_punchy_respond` is`true`. | +| `nebula_cipher` | aes | No | Unless you know what you're doing, avoid touching this setting. Refer to the nebula documentation. | +| `nebula_tun_disabled` | `false` | Conditional | Set to true if you do not want the tunnel up. Most people want a tunnel. | +| `nebula_tun_dev` | rneb01 | No | Set the tunnel device name. | +| `nebula_tun_drop_local_broadcast` | `false` | No | Toggles forwarding of local broadcast packets. This depends on the CIDR in the certificate for the node. | +| `nebula_tun_drop_multicast` | `false` | No | Toggles forwarding of multicast packets. | +| `nebula_tun_tx_queue` | 500 | No | Transmit queue length. Raise this number if there are a lot of transmit drops. | +| `nebula_tun_mtu` | 1300 | No | Default MTU for every packet. Safest setting is 1300 for internet routed packets. | +| `nebula_tun_use_system_route_table` | `false` | No | Exactly as it says, set to `true` if you want to manage unsafe routes directly on the system route table with gateway routes instead of nebula. | +| `nebula_routes` | `[]` | No | List of dictionaries. Use this to create route based MTU overrides. If you have a known path that can support a larger MTU, you can set it this way. | +| `nebula_unsafe_routes` | `[]` | No | List of dictionaries. Allows you to route traffic over nebula to non-nebula nodes. This should be avoided unless you have hosts that cannot run nebula. See nebula documentation. | +| `nebula_logging_level` | info | No | Sets the log level. | +| `nebula_logging_format` | text | No | Formatting of the logs. Can be `text` or `json`. | +| `nebula_logging_disable_timestamp` | `false` | No | Disables timestamp logging. If the output is redirected to some logging system, set to `true`. | +| `nebula_logging_timestamp_format` | | No | Sets the timestamp format. Default is RFC3339 unless format is `text` and is attached to a TTY. | +| `nebula_firewall_conntrack_tcp_timeout` | 12m | No | Sets the connection tracking TCP timeout. | +| `nebula_firewall_conntrack_udp_timeout` | 3m | No | Sets the connection tracking UDP timeout. | +| `nebula_firewall_conntrack_default_timeout` | 10m | No | Sets the default connection tracking timeout. | +| `nebula_firewall_inbound_rules` | `any` | No | List of dictionaries. Sets the appropriate inbound rules for this node. | +| `nebula_firewall_outbound_rules` | `any` | No | List of dictionaries. Sets the appropriate outbound rules for this node. | +| `nebula_pki_disconnect_invalid` | `true` | No | Forcefully disconnets a client if the certificate is expired or invalid. | +| `nebula_pki_block_list` | `[]` | No | List of certificate fingerprints that should be blocked even if it's valid. | +| `nebula_cert_public_key` | | No | Nebula node public key to use. If defined, no public key is generated on the CA. This will be signed and used. Requires `nebula_cert_private_key` to be set. | +| `nebula_cert_private_key` | | No | Nebula node privatekey to use. If defined, no public key is generated on the CA. This will be used. Requires `nebula_cert_public_key` to be set. | +| `nebula_preferred_ranges` | | No | Sets the priority order for underlay IP addresses. See [the documentation](https://nebula.defined.net/docs/config/preferred-ranges/). | +| `nebula_routines` | | No | Number of thread pairs to run that consume from the tun and UDP queues. The default is `1`, which means there's one tun and one UDP queue reader. The maximum recommended setting is half the available CPU cores. | + +## Example Playbook + +## License ... -Author Information ------------------- +## Author Information Louis Abel diff --git a/defaults/main.yml b/defaults/main.yml index e6885f4..ffa116e 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -93,7 +93,8 @@ nebula_logging_disable_timestamp: false nebula_firewall_conntrack_tcp_timeout: "12m" nebula_firewall_conntrack_udp_timeout: "3m" nebula_firewall_conntrack_default_timeout: "10m" -nebula_firewall_conntrack_max_connections: "100000" +# nebula_firewall_outbound_action: "drop" +# nebula_firewall_inbound_action: "drop" nebula_firewall_inbound_rules: - port: any diff --git a/meta/main.yml b/meta/main.yml index 08f5158..3957b54 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -17,7 +17,7 @@ galaxy_info: # - GPL-3.0-only # - Apache-2.0 # - CC-BY-4.0 - # license: GPL-3.0-only + license: GPL-3.0-only min_ansible_version: 2.14 platforms: - name: EL diff --git a/tasks/determine_os.yml b/tasks/determine_os.yml index 46ce128..3017f49 100644 --- a/tasks/determine_os.yml +++ b/tasks/determine_os.yml @@ -1,29 +1,34 @@ --- -- name: Check that this system is in the Red Hat family - ansible.builtin.assert: - that: - - ansible_os_family == "RedHat" - success_msg: "This is a RedHat family system" - fail_msg: "This is NOT a RedHat family system. Goodbye." +- name: Check Red Hat distributions + when: ansible_os_family == "RedHat" + block: + - name: Check that this system is in the Red Hat family + ansible.builtin.assert: + that: + - ansible_os_family == "RedHat" + success_msg: "This is a RedHat family system" + fail_msg: "This is NOT a RedHat family system. Goodbye." -- name: Supported distributions only - ansible.builtin.assert: - that: - - (ansible_distribution == "Rocky") or (ansible_distribution == "Fedora") - success_msg: "System is supported" - fail_msg: "System is NOT supported" + - name: Supported distributions only + ansible.builtin.assert: + that: + - (ansible_distribution == "Rocky") or (ansible_distribution == "Fedora") + success_msg: "System is supported" + fail_msg: "System is NOT supported" -- name: EL - Check that major versions are valid - ansible.builtin.assert: - that: - - ansible_distribution_major_version|int >= 8 - success_msg: "Supported major version of Enterprise Linux" - fail_msg: "This major version is not supported" + - name: EL - Check that major versions are valid + when: ansible_distribution == "Rocky" + ansible.builtin.assert: + that: + - ansible_distribution_major_version|int >= 8 + success_msg: "Supported major version of Enterprise Linux" + fail_msg: "This major version is not supported" -- name: Fedora - Check that major versions are valid - ansible.builtin.assert: - that: - - ansible_distribution_major_version|int >= 39 - success_msg: "Supported major version of Fedora" - fail_msg: "This major version is not supported" + - name: Fedora - Check that major versions are valid + when: ansible_distribution == "Fedora" + ansible.builtin.assert: + that: + - ansible_distribution_major_version|int >= 39 + success_msg: "Supported major version of Fedora" + fail_msg: "This major version is not supported" ... diff --git a/tasks/download.yml b/tasks/install_download.yml similarity index 100% rename from tasks/download.yml rename to tasks/install_download.yml diff --git a/tasks/install.yml b/tasks/install_pkg.yml similarity index 100% rename from tasks/install.yml rename to tasks/install_pkg.yml diff --git a/tasks/main.yml b/tasks/main.yml index be313f2..29cbc6d 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -17,19 +17,19 @@ when: nebula_is_member|bool - name: Install nebula via package manager - ansible.builtin.import_tasks: install.yml + ansible.builtin.import_tasks: install_pkg.yml when: nebula_use_native_package|bool - name: Install nebula via download - ansible.builtin.import_tasks: download.yml + ansible.builtin.import_tasks: install_download.yml when: - not nebula_use_native_package|bool - name: Install nebula CA - ansible.builtin.import_tasks: ca.yml + ansible.builtin.import_tasks: setup_ca.yml when: nebula_is_ca|bool - name: Configure member of mesh - ansible.builtin.import_tasks: member.yml + ansible.builtin.import_tasks: setup_member.yml when: nebula_is_member|bool ... diff --git a/tasks/ca.yml b/tasks/setup_ca.yml similarity index 100% rename from tasks/ca.yml rename to tasks/setup_ca.yml diff --git a/tasks/member.yml b/tasks/setup_member.yml similarity index 100% rename from tasks/member.yml rename to tasks/setup_member.yml diff --git a/templates/config.yml.j2 b/templates/config.yml.j2 index ec885e2..beb4efc 100644 --- a/templates/config.yml.j2 +++ b/templates/config.yml.j2 @@ -1,4 +1,4 @@ -# Nebula Configuration {{ ansible_managed }} +# Nebula Configuration ({{ ansible_managed }}) # PKI pki: @@ -9,7 +9,8 @@ pki: disconnect_invalid: {{ nebula_pki_disconnect_invalid }} {% endif %} {% if nebula_pki_blocklist|length >= 1 %} - {{ nebula_pki_blocklist | to_nice_yaml(indent=2) | indent(width=2) }} + blocklist: + {{ nebula_pki_blocklist | to_nice_yaml(indent=2) | indent(width=4) }} {% endif %} # static host map @@ -41,7 +42,7 @@ lighthouse: {% if not nebula_am_lighthouse %} {% for host in ansible_play_hosts_all %} {% if (hostvars[host]['nebula_am_lighthouse']|default(false)) and (hostvars[host]['nebula_is_member']|default(true)) %} - - {{ hostvars[host]['nebula_ip'].split('/')[0] }} + - '{{ hostvars[host]['nebula_ip'].split('/')[0] }}' {% endif %} {% endfor %} {% endif %} @@ -117,11 +118,16 @@ logging: disable_timestamp: {{ nebula_logging_disable_timestamp }} firewall: +{% if nebula_firewall_outbound_action is defined %} + outbound_action: {{ nebula_firewall_outbound_action }} +{% endif %} +{% if nebula_firewall_inbound_action is defined %} + inbound_action: {{ nebula_firewall_inbound_action }} +{% endif %} conntrack: tcp_timeout: {{ nebula_firewall_conntrack_tcp_timeout }} udp_timeout: {{ nebula_firewall_conntrack_udp_timeout }} default_timeout: {{ nebula_firewall_conntrack_default_timeout }} - max_connections: {{ nebula_firewall_conntrack_max_connections }} inbound: {{ nebula_firewall_inbound_rules | to_nice_yaml(indent=2) | indent(width=4) }} outbound: diff --git a/tests/ansible.cfg b/tests/ansible.cfg new file mode 100644 index 0000000..2f3b0ae --- /dev/null +++ b/tests/ansible.cfg @@ -0,0 +1,45 @@ +[defaults] +host_key_checking = False +retry_files_enabled = False +roles_path = ../../ +collections_paths = ../../../collections +remote_user = ansible +ansible_managed = RESF +timeout = 3 +callbacks_enabled = ansible.posix.profile_roles + +[privilege_escalation] +;become=True +;become_method=sudo +;become_user=root +;become_ask_pass=False + +[persistent_connection] + +[connection] + +[colors] + +[selinux] + +[diff] + +[galaxy] + +[inventory] +enable_plugins = host_list, virtualbox, yaml, constructed, script, ini, auto + +[netconf_connection] + +[paramiko_connection] +record_host_keys = False + +[jinja2] + +[tags] + +[ssh_connection] +ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no +pipelining = True +control_path = /tmp/ansible-role-nebula-%%h%%p%%r +retries = 10 diff --git a/tests/test.yml b/tests/test.yml index c79f336..b1bcb8f 100644 --- a/tests/test.yml +++ b/tests/test.yml @@ -1,6 +1,31 @@ --- -- hosts: localhost - remote_user: root +- name: Check that nebula hosts are not empty + hosts: localhost + any_errors_fatal: true + tasks: + - name: Check for one host + ansible.builtin.assert: + that: (groups['nebula'] | default([])) | length > 0 + fail_msg: "No hosts configured. Ending test." + success_msg: "There are hosts found in the group." + +- name: Setup nebula + hosts: nebula + strategy: free + become: true roles: - rockylinux.nebula + +- name: Verify they can ping + hosts: nebula + strategy: free + tasks: + - name: Ping all nebula hosts + ansible.builtin.command: "ping -W 1 -c 3 {{ hostvars[item]['nebula_ip'].split('/')[0] }}" + changed_when: "1 != 1" + register: ping_check + until: ping_check is succeeded + retries: 15 + delay: 10 + loop: "{{ ansible_play_hosts_all }}" ...