commit 1234ef51c02be0a5bff8b4bce72ab5bebd8a7a24 Author: Ryan Cuda Date: Sun Jan 25 08:31:56 2026 -0700 first commit diff --git a/README.md b/README.md new file mode 100644 index 0000000..f93769d --- /dev/null +++ b/README.md @@ -0,0 +1,1113 @@ +# RHCE Practice Lab + +This repo contains all the files needed to deploy an RHCE practice lab. The target infrastructure is Openshift Virtualuzation, and network services (eg; DNS) are handled by OPNsense. Once deployed, the lab consists of 7 VMs: + +- controller +- utility +- node1 +- node2 +- node3 +- node4 +- node5 + +The lab uses the domain name `lab.example.com`. + +All of the files needed to complete the tasks on the exam are hosted on the utility server, eg; [http://utility.lab.example.com](http://utility.lab.example.com/files) + +You will perform all tasks as the `ansible` user on the `controller` node from the directory `/home/ansible/ansible`. + +The `ansible` user's password is `ansible` (really original, I know). + +Unless otherwise specified, the password for any vaulted files is `redhat`. + +The lab is easily deployed with the following command: + +`ansible-playbook create-lab.yml -e @vault.yml --vault-password-file vault-password` + +The lab can be torn down by running the command: + +`ansible-playbook destroy-lab.yml` + +**Helpful hints:** + +`ansible localhost -m setup` to print system facts. You may want to pipe that out to a text file to avoid having to run the command repeatedly and save yourself some time. + +`ansible-config init --disabled > ansible.cfg` to generate a config file with all options commented. + +You can use `ansible.builtin.debug` to print out things like facts to make sure your syntax is correct, eg; + +``` +# printfacts.yml +- name: Print facts + hosts: jump01.lab.cudanet.org + gather_facts: true + remote_user: root + + tasks: + - name: print facts + ansible.builtin.debug: + msg: "The default IPv4 address for {{ inventory_hostname }} is {{ ansible_default_ipv4.address }}" +``` + +## Task 1. + +**install and configure ansible:** + +i) Install podman, ansible-core and ansible-navigator. /etc/yum.redos.d/rhce.repo should already be configured to pull packages from utility.lab.example.com. + +
+ +solution + +`dnf -y install podman ansible-core ansible-navigator` + +
+ +ii) configure ansible.cfg to install collections by default to `~/ansible/mycollections` and roles to `~/ansible/roles` + +
+ +solution + +``` +# ansible.cfg +[defaults] +inventory = /home/ansible/ansible/inventory +remote_user = ansible +roles_path = /home/ansible/ansible/roles +collections_path = /home/ansible/ansible/mycollections +``` + +
+ + +iii) configure `inventory` as follows: + +node1 is in the dev group. +node2 is in the test group. +nodes 3 and 4 are in the prod group. +node5 is in the balancers group. +the prod group is in the webservers group. + +
+ +solution + +``` +# inventory +[dev] +node1 + +[test] +node2 + +[prod] +node3 +node4 + +[balancers] +node5 + +[webservers:children] +prod +``` + +
+ +iv) ansible-navigator.yml is configured to pull the EE image from the utility server if missing. The registry is located at utility.lab.example.com:5000 + +
+ +solution + +``` +# ansible-navigator.yml +--- +ansible-navigator: + execution-environment: + image: utility.lab.example.com:5000/ee-supported-rhel9:latest + pull: + policy: missing + playbook-artifact: + enable: false +``` + +
+ +*NOTE: You're basically going to have to memorize the contents of this file, because unlike `ansible.cfg` there is no way to generate an ansible-navigator.yml file with dummy values.* + +## Task 2. + +**manage repositories:** + +Write a playbook called `repos.yml` to add the BaseOS and AppStream repos to all managed hosts with GPG check enabled. Mirror is located at http://utility.lab.example.com/rhel9/ + +
+ +solution + +``` +--- +# repos.yml +- name: Add BaseOS and AppStream repos to all hosts + hosts: all + become: true + + vars: + repos: + - BaseOS + - AppStream + + baseurl: http://utility.lab.example.com/rhel9 + gpgkey_url: http://utility.lab.example.com/rhel9/RPM-GPG-KEY-redhat-release + repo_file: /etc/yum.repos.d/rhce + + tasks: + - name: Add {{ item }} repository + ansible.builtin.yum_repository: + name: "EX294_{{ item }}" + description: "EX294 {{ item }} Repository" + baseurl: "{{ baseurl }}/{{ item }}" + enabled: true + gpgcheck: true + gpgkey: "{{ gpgkey_url }}" + file: "{{ repo_file }}" + loop: "{{ repos }}" + +``` + +
+ +## Task 3. +**install roles and collections:** + +i) Install collections for `ansible.posix`, `community.general` and `redhat.rhel_system_roles` to '~/ansible/mycollections/'. Collections are hosted at [http://utility.lab.example.com/files/](http://utility.lab.example.com/files/) + +ii) install the `balancer` and `phpinfo` roles from [http://utility.lab.example.com/files](http://utility.lab.example.com/files) using a `requirements.yml` file. + +*NOTE: although, not a requirement, you can specify both roles and collections in your requirements file* + +
+ +solution + +``` +# requirements.yml +--- +roles: + - name: phpinfo + src: http://utility.lab.example.com/files/phpinfo.tar.gz + path: /home/ansible/ansible/roles + + - name: balancer + src: http://utility.lab.example.com/files/haproxy.tar.gz + path: /home/ansible/ansible/roles + +collections: + - name: ansible.posix + source: http://utility.lab.example.com/files/ansible-posix-2.1.0.tar.gz + type: url + + - name: redhat.rhel_system_roles + source: http://utility.lab.example.com/files/redhat-rhel_system_roles-1.108.6.tar.gz + type: url + + - name: community.general + source: http://utility.lab.example.com/files/community-general-12.1.0.tar.gz + type: url +``` +``` +# bash +mkdir -p /home/ansible/ansible/{roles,mycollections} +ansible-galaxy role install -r requirements.yml +ansible-galaxy collection install -r requirements.yml -p /home/ansible/ansible/mycollections +``` + +
+ +## Task 4: + +**install packages and groups:** + +Write a playbook called `install.yml` to install `php` and `httpd` on the `test` group, and `RPM Development Tools` group in `dev` group only + +
+ +solution + +``` +# install.yml +--- +- name: Install Packages and Groups + hosts: all + + become: true + + tasks: + - name: Install packages on test group + ansible.builtin.dnf: + name: + - httpd + - php + state: latest + when: inventory_hostname in groups['test'] + + - name: Install RPM Development Tools group on test group + ansible.builtin.dnf: + name: "@RPM Development Tools" + state: latest + when: inventory_hostname in groups['dev'] +``` + +
+ +## Task 5. + +**create a role:** + +i) Create a role called `apache` to install, start and persistently enable `httpd` and `firewalld`. + +
+ +solution + +``` +# defaults/main.yml +--- +apache_packages: + - httpd + - firewalld +``` +``` +# handlers/main.yml +--- +- name: restart httpd + ansible.builtin.service: + name: httpd + state: restarted +``` + +
+ +*NOTE: You can create the basic filestructure of the role with `ansible-galaxy role init apache`* + +
+ +solution + +``` +apache/ +├── defaults/ +│ └── main.yml +├── handlers/ +│ └── main.yml +├── tasks/ +│ └── main.yml +├── templates/ +│ └── index.html.j2 +└── meta/ + └── main.yml +``` + +
+ +ii) Allow the HTTP traffic through the firewall. + +
+ +solution + +``` +# tasks/main.yml +--- +- name: Install httpd and firewalld + ansible.builtin.package: + name: "{{ apache_packages }}" + state: present + +- name: Enable and start firewalld + ansible.builtin.service: + name: firewalld + state: started + enabled: true + +- name: Enable and start httpd + ansible.builtin.service: + name: httpd + state: started + enabled: true + +- name: Allow HTTP service through firewalld + ansible.posix.firewalld: + service: http + permanent: true + state: enabled + immediate: true + +- name: Deploy index.html with FQDN and IPv4 + ansible.builtin.template: + src: index.html.j2 + dest: /var/www/html/index.html + owner: root + group: root + mode: '0644' + notify: restart httpd +``` +``` +# handlers/main.yml +--- +- name: restart httpd + ansible.builtin.service: + name: httpd + state: restarted +``` + +
+ +iii) Populate out `index.html` with FQDN and IPv4 address using a jinja2 template, pulling those variables from ansible facts. + +
+ +solution + +``` +# templates/index.html.j2 + + + + Apache Test Page + + +

Apache is working

+

FQDN: {{ ansible_facts.fqdn }}

+

IPv4 Address: {{ ansible_facts.default_ipv4.address }}

+ + +``` + +
+ +iv) Finally, run the role against the `dev` group + +
+ +solution + +``` +# apache.yml +--- +- name: Configure Apache web servers + hosts: dev + become: true + roles: + - apache +``` + +
+ +## Task 6. + +**use a role:** + +i) Use roles to apply the `balancer` role to the `balancers` group and `phpinfo` role to `webservers` group. Servers with the `phpinfo` role applied should report the FQDN and IP address of the web server, and refreshing the web browser should round robin between nodes 3 and 4. You should have already installed these roles in task 3. + +
+ +solution + +``` +# roles.yml +--- +- name: Configure load balancer + hosts: balancers + become: yes + roles: + - balancer + +- name: Configure web servers + hosts: webservers + become: yes + roles: + - phpinfo +``` + +
+ +## Task 7. + +**manage SELinux:** + +i) Use the `ansible.posix.selinux` role to configure SELinux to be enabled and enforcing on all managed hosts. Don't forget - changes to SELinux require a reboot to take effect. + +
+ +solution + +``` +--- +- name: Ensure SELinux is enabled and enforcing + hosts: all + become: true + + tasks: + - name: Set SELinux to enforcing + ansible.posix.selinux: + policy: targeted + state: enforcing + notify: Reboot if SELinux state changed + + handlers: + - name: Reboot if SELinux state changed + ansible.builtin.reboot: + msg: "Rebooting to apply SELinux changes" + reboot_timeout: 600 +``` + +
+ +## Task 8. + +**manage file content:** + +i) Populate `/etc/issue` with the name of the lifecycle environment, eg; "Development" for `dev`, "Testing" for `test` and "Production" for `prod`. + +
+ +solution + +``` +# issue.yml +--- +- name: Automatically populate /etc/issue with environment name + hosts: + - dev + - test + - prod + become: true + + tasks: + - name: Determine environment name from inventory groups + ansible.builtin.set_fact: + env_name: >- + {% if 'prod' in group_names %} + Production + {% elif 'test' in group_names %} + Testing + {% elif 'dev' in group_names %} + Development + {% endif %} + + - name: Populate /etc/issue + ansible.builtin.copy: + dest: /etc/issue + content: | + {{ env_name }} + owner: root + group: root + mode: '0644' +``` + +
+ +## Task 9. + +**manage storage:** + +i) Write a playbook called `partition.yml`. It should create a 1500MiB partition on vdb as ext4 mounted at /devmount, a 1500MiB partition on vdc as ext4 mounted at /devmount1, unless there isn't enough space on vdc, in which case make it 800MiB and print a message stating such. Check for vde. If there is no vde present, print message stating there's no such drive. + +*NOTE: My exam said to create partitions, but all examples I've seen point to logical volumes. Maybe practice both?* + +
+ +solution + +``` +# partition.yml +--- +- name: Configure disk partitions and mounts + hosts: all + become: true + gather_facts: true + + tasks: + #################################################################### + # /dev/vdb — always create 1500MB partition mounted at /devmount + #################################################################### + - name: Create 1500MB partition on /dev/vdb + community.general.parted: + device: /dev/vdb + number: 1 + state: present + part_end: 1500MiB + + - name: Create XFS filesystem on /dev/vdb1 + ansible.builtin.filesystem: + fstype: xfs + dev: /dev/vdb1 + + - name: Mount /dev/vdb1 at /devmount + ansible.builtin.mount: + path: /devmount + src: /dev/vdb1 + fstype: xfs + state: mounted + + #################################################################### + # /dev/vdc — size-based logic (1500MB or 800MB) + #################################################################### + - name: Determine size of /dev/vdc partition + ansible.builtin.set_fact: + vdc_part_size: >- + {{ '1500MiB' + if (ansible_facts.devices.vdc.sectors | int * + ansible_facts.devices.vdc.sectorsize | int) >= (1500 * 1024 * 1024) + else '800MiB' }} + when: "'vdc' in ansible_facts.devices" + + - name: Create partition on /dev/vdc + community.general.parted: + device: /dev/vdc + number: 1 + state: present + part_end: "{{ vdc_part_size }}" + when: "'vdc' in ansible_facts.devices" + + - name: Create XFS filesystem on /dev/vdc1 + ansible.builtin.filesystem: + fstype: xfs + dev: /dev/vdc1 + when: "'vdc' in ansible_facts.devices" + + - name: Mount /dev/vdc1 + ansible.builtin.mount: + path: >- + {{ '/devmount1' + if vdc_part_size == '1500MiB' + else '/dev/mount' }} + src: /dev/vdc1 + fstype: xfs + state: mounted + when: "'vdc' in ansible_facts.devices" + + #################################################################### + # /dev/vde presence check + #################################################################### + - name: Warn if /dev/vde is not present + ansible.builtin.debug: + msg: "Disk /dev/vde is not present" + when: "'vde' not in ansible_facts.devices" +``` + +
+ +## Task 10. + +**manage directories and symlinks:** + +i) create the directory `/webdev` with `U=RWX,G=RWX,O=RX` permissions. It should be owned by `webdev` group. It should have special permissions `set group id` (I think that means 2775 in octal). Symlink from `/webdev > /var/www/html/webdev`

create `/webdev/index.html` to report hostname and ip address. + +Allow traffic through the firewall for http. + +It should be browseable by the dev group. + +
+ +solution + +``` +# webcontent.yml +--- +- name: Configure restricted web content for dev hosts + hosts: dev + become: true + gather_facts: true + + tasks: + # ---------------- SELinux ---------------- + - name: Ensure SELinux is enforcing + ansible.posix.selinux: + policy: targeted + state: enforcing + + - name: Install SELinux utilities + ansible.builtin.package: + name: policycoreutils-python-utils + state: present + + # ---------------- Groups & Users ---------------- + - name: Ensure webdev group exists + ansible.builtin.group: + name: webdev + state: present + + - name: Add ansible user to webdev group + ansible.builtin.user: + name: ansible + groups: webdev + append: true + + # ---------------- Web Content ---------------- + - name: Create /webdev directory with setgid permissions + ansible.builtin.file: + path: /webdev + state: directory + owner: root + group: webdev + mode: "2775" + + - name: Create index.html using Ansible facts + ansible.builtin.copy: + dest: /webdev/index.html + owner: root + group: webdev + mode: "0644" + content: | + + + WebDev Host Info + +

WebDev Page

+

Hostname: {{ ansible_facts['hostname'] }}

+

IP Address: {{ ansible_facts['default_ipv4']['address'] }}

+ + + + # ---------------- Apache + Symlink ---------------- + - name: Create symlink from /webdev to /var/www/html/webdev + ansible.builtin.file: + src: /webdev + dest: /var/www/html/webdev + state: link + force: true + + # ---------------- SELinux Context ---------------- + - name: Allow Apache to read /webdev via SELinux + ansible.builtin.command: + cmd: semanage fcontext -a -t httpd_sys_content_t "/webdev(/.*)?" + register: semanage_result + failed_when: semanage_result.rc not in [0,1] + + - name: Apply SELinux context + ansible.builtin.command: restorecon -Rv /webdev + changed_when: false + + # ---------------- Firewall ---------------- + - name: Ensure firewalld is started and enabled + ansible.builtin.service: + name: firewalld + state: started + enabled: true + + - name: Allow HTTP through firewall + ansible.posix.firewalld: + service: http + permanent: true + immediate: true + state: enabled + + # ---------------- Apache Access Control ---------------- + - name: Restrict access to webdev content to node1 only + ansible.builtin.copy: + dest: /etc/httpd/conf.d/webdev.conf + owner: root + group: root + mode: "0644" + content: | + + Options FollowSymLinks + Require all granted + Require ip 127.0.0.1 + Require ip {{ ansible_facts['default_ipv4']['address'] }} + + + # ---------------- Services ---------------- + - name: Ensure httpd is started and enabled + ansible.builtin.service: + name: httpd + state: started + enabled: true + + - name: Restart httpd to apply configuration + ansible.builtin.service: + name: httpd + state: restarted +``` + +
+ +## Task 11. + +**manage file content with templates:** + +populate /etc/myhosts using hosts.j2 template and hosts.yml. Do not modify hosts.yml at all, it should handle all of the looping through the hosts in the template file +use a for loop on the j2 template to loop through each host + +
+ +solution + +``` +# hosts.j2 +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +{% for node in groups['all'] %} +{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }} {{ hostvars[node]['ansible_facts']['fqdn'] }} {{ hostvars[node]['ansible_facts']['hostname'] }} +{% endfor%} +``` + +``` +# hosts.yml +- name: Hosts config deploy + hosts: all + become: True + tasks: + + - name: Template a file to /etc/myhosts + when: inventory_hostname in groups['dev'] + ansible.builtin.template: + src: ./hosts.j2 + dest: /etc/myhosts +``` + +
+ +## Task 12. + +**modify file contents:** + +Download `hwreport.empty` from `utility.lab.example.com` to `/root/hwreport.txt` on all hosts. + +Replace key value pairs for hostname, bios version, memoryMiB, size of vda, vdb and vdc. If device does not exist, put NONE. + +
+ +solution + +``` +# hwreport.yml +--- +- name: Generate hardware report + hosts: all + become: yes + + tasks: + - name: Download empty hwreport file + get_url: + url: http://utility.lab.example.com/files/hwreport.empty + dest: /root/hwreport.txt + mode: '0644' + + - name: Set hostname + lineinfile: + path: /root/hwreport.txt + regexp: '^HOST=' + line: "HOST={{ ansible_hostname }}" + + - name: Set BIOS version + lineinfile: + path: /root/hwreport.txt + regexp: '^BIOS=' + line: "BIOS={{ ansible_bios_version | default('NONE') }}" + + - name: Set memory size + lineinfile: + path: /root/hwreport.txt + regexp: '^MEMORY=' + line: "MEMORY={{ ansible_memtotal_mb }} MB" + + - name: Set vdb disk size + lineinfile: + path: /root/hwreport.txt + regexp: '^VDB=' + line: "VDB={{ ansible_devices.vdb.size | default('NONE') }}" + + - name: Set vdc disk size + lineinfile: + path: /root/hwreport.txt + regexp: '^VDC=' + line: "VDC={{ ansible_devices.vdc.size | default('NONE') }}" + + - name: Set vdd disk size (NONE if missing) + lineinfile: + path: /root/hwreport.txt + regexp: '^VDD=' + line: >- + VDD={{ ansible_devices.vdd.size if 'vdd' in ansible_devices else 'NONE' }} +``` + +
+ +## Task 13. + +**use ansible vault to encrypt a file:** + +Create an encrypted variable file called `locker.yml` which should contain two variables and their values. + +*pw_developer is value imadev* +*pw_manager is value imamgr* + +`locker.yml` file should be encrypted using the password `whenyouwishuponastar` + +store the password in a file named `secret.txt`, which is used to encrypt the variable file. + +
+ +solution + +``` +# secret.txt +echo "whenyouwishuponastar" > secret.txt +chmod 600 secret.txt +``` +``` +# locker.yml +pw_developer: imadev +pw_manager: imamgr +``` +`ansible-vault encrypt locker.yml --vault-password-file secret.txt` + +
+ +## Task 14. + +**manage users:** + +Download the variable file "http://utility.lab.example.com/files/user_list.yml" and write a playbook named "users.yml" and then run the playbook on all the nodes using two variable files user_list.yml and locker.yml. + +i) + + * Create a group opsdev + + * Create user from users variable who job is equal to developer and need to be in opsdev group + + * Assign a password using SHA512 format and run playbook on dev and test group. + + * User password is {{ pw_developer }} + +ii) + + * Create a group opsmgr + + * Create user from users varaible who job is equal to manager and need to be in opsmgr group + + * Assign a password using SHA512 format and run playbook on prod group. + + * User password is {{ pw_manager }} + +iii) Use when condition for each play + +
+ +solution + +``` +# user_list.yml +users: + - name: Fred + role: manager + + - name: Wilma + role: manager + + - name: Barney + role: developer + + - name: Betty + role: developer +``` +``` +# users.yml +--- +- name: Download user_list.yml variable file + hosts: all + gather_facts: false + tasks: + - name: Download user_list.yml + ansible.builtin.get_url: + url: http://utility.lab.example.com/files/user_list.yml + dest: ./user_list.yml + run_once: true + delegate_to: localhost + +- name: Create developer users on dev and test + hosts: dev:test + become: true + vars_files: + - user_list.yml + - locker.yml + tasks: + - name: Ensure opsdev group exists + ansible.builtin.group: + name: opsdev + state: present + + - name: Create developer users + ansible.builtin.user: + name: "{{ item.name }}" + groups: opsdev + append: yes + password: "{{ pw_developer | password_hash('sha512') }}" + state: present + loop: "{{ users }}" + when: item.role == "developer" + + +- name: Create manager users on prod + hosts: prod + become: true + vars_files: + - user_list.yml + - locker.yml + tasks: + - name: Ensure opsmgr group exists + ansible.builtin.group: + name: opsmgr + state: present + + - name: Create manager users + ansible.builtin.user: + name: "{{ item.name }}" + groups: opsmgr + append: yes + password: "{{ pw_manager | password_hash('sha512') }}" + state: present + loop: "{{ users }}" + when: item.role == "manager" +``` + +`ansible-navigator run -m stdout users.yml --vault-password-file secret.txt` + +
+ +## Task 15. + +**re-encrypt a vaulted file:** + +Rekey variable file from [http://utility.lab.example.com/files/salaries.yml](http://utility.lab.example.com/files/salaries.yml) + +i) Old password: changeme + +ii) New password: redhat + +
+ +solution + +``` +# salaries.yml +fred: $100000 +wilma:$100000 +barney: $100000 +betty: $100000 +``` + +`wget http://utility.lab.example.com/files/salaries.yml` + +`ansible-vault rekey salaries.yml` + +``` +Vault password: changeme +New Vault password: redhat +Confirm New Vault password:redhat +``` + +
+ +## Task 16. + +**manage cron:** + +Create a cronjob for the user ansible on all nodes, playbook name is crontab.yml and the job details are below: + + i) Every 2 minutes the job will execute logger "EX294 in progress". + +
+ +solution + +``` +# cron.yml +--- +- name: Create cron job for user ansible + hosts: all + become: true + tasks: + - name: Ensure cron job runs every 2 minutes + ansible.builtin.cron: + name: "EX294 progress log" + user: ansible + minute: "*/2" + job: 'logger "EX294 in progress"' + state: present +``` + +
+ +## Task 17. + +**Use the RHEL timesync system role:** + + i) Create a playbook called "timesync.yml" that: + - Runs on all managed nodes + - Uses the timesync role + - Configures the role to use the currently active NTP provider + - Configure the role to use the time server utility.lab.example.com + - Configure the role to enable the iburst parameter + +
+ +solution + +``` +# timesync.yml +- name: Configure time synchronization using RHEL timesync role + hosts: all + become: true + roles: + - role: redhat.rhel_system_roles.timesync + vars: + timesync_ntp_provider: auto + timesync_ntp_servers: + - hostname: utility.lab.example.com + iburst: true +``` + +
+ +## Task 18. + +**configure MOTD:** + +Create a playbook called motd.yml. + + i) Run the playbook. + ii) Whenever you ssh into any node (node1 here), the message will be as follows: + Welcome to node1 + OS: RedHat 9.4 + Architecture: x86_64 + +
+ +solution + +``` +# motd.yml +--- +- name: Configure MOTD for all nodes + hosts: all + become: true + gather_facts: true + tasks: + - name: Set MOTD file + ansible.builtin.copy: + dest: /etc/motd + content: | + Welcome to {{ inventory_hostname }} + OS: {{ ansible_distribution }} {{ ansible_distribution_version }} + Architecture: {{ ansible_architecture }} + owner: root + group: root + mode: '0644' +``` + +
diff --git a/ansible-posix-2.1.0.tar.gz b/ansible-posix-2.1.0.tar.gz new file mode 100644 index 0000000..11a626d Binary files /dev/null and b/ansible-posix-2.1.0.tar.gz differ diff --git a/apache.tar.gz b/apache.tar.gz new file mode 100644 index 0000000..9bdd0fe Binary files /dev/null and b/apache.tar.gz differ diff --git a/community-general-12.1.0.tar.gz b/community-general-12.1.0.tar.gz new file mode 100644 index 0000000..aa65ca5 Binary files /dev/null and b/community-general-12.1.0.tar.gz differ diff --git a/create-lab.yml b/create-lab.yml new file mode 100644 index 0000000..bb0c9e1 --- /dev/null +++ b/create-lab.yml @@ -0,0 +1,575 @@ +--- +############################################################################### +# Combined Ansible Playbook +############################################################################### + +############################################################################### +# Play 1: Deploy multiple KubeVirt VMs from Block PVC template with cloud-init +############################################################################### +- name: Deploy multiple VMs from Block template PVC with cloud-init ISO + hosts: localhost + gather_facts: false + collections: + - kubernetes.core + + vars: + namespace: default + vm_domain: lab.example.com + + rootdisk_size: 64Gi + disk2_size: 2Gi + disk3_size: 1Gi + + vm_list: + - name: controller + ip: 10.4.0.100 + source_pvc: rhce-template + - name: node1 + ip: 10.4.0.101 + source_pvc: rhce-template + - name: node2 + ip: 10.4.0.102 + source_pvc: rhce-template + - name: node3 + ip: 10.4.0.103 + source_pvc: rhce-template + - name: node4 + ip: 10.4.0.104 + source_pvc: rhce-template + - name: node5 + ip: 10.4.0.105 + source_pvc: rhce-template + - name: utility + ip: 10.4.0.106 + source_pvc: utility-template + + tasks: + ########################################################################### + # Create PVCs + ########################################################################### + - name: Create rootdisk PVC from template + k8s: + state: present + definition: + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "{{ item.name }}-rootdisk" + namespace: "{{ namespace }}" + spec: + storageClassName: ocs-storagecluster-ceph-rbd-virtualization + accessModes: + - ReadWriteMany + volumeMode: Block + resources: + requests: + storage: "{{ rootdisk_size }}" + dataSource: + name: "{{ item.source_pvc }}" + kind: PersistentVolumeClaim + apiGroup: "" + loop: "{{ vm_list }}" + loop_control: + label: "{{ item.name }}" + + - name: Create disk2 PVC (2Gi) + k8s: + state: present + definition: + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "{{ item.name }}-disk2" + namespace: "{{ namespace }}" + spec: + storageClassName: ocs-storagecluster-ceph-rbd-virtualization + accessModes: + - ReadWriteMany + volumeMode: Block + resources: + requests: + storage: "{{ disk2_size }}" + loop: "{{ vm_list }}" + loop_control: + label: "{{ item.name }}-disk2" + + - name: Create disk3 PVC (1Gi) + k8s: + state: present + definition: + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "{{ item.name }}-disk3" + namespace: "{{ namespace }}" + spec: + storageClassName: ocs-storagecluster-ceph-rbd-virtualization + accessModes: + - ReadWriteMany + volumeMode: Block + resources: + requests: + storage: "{{ disk3_size }}" + loop: "{{ vm_list }}" + loop_control: + label: "{{ item.name }}-disk3" + + ########################################################################### + # Wait for PVCs + ########################################################################### + - name: Wait for all PVCs to be bound + k8s_info: + api_version: v1 + kind: PersistentVolumeClaim + name: "{{ item.0.name }}-{{ item.1 }}" + namespace: "{{ namespace }}" + register: pvc_status + until: pvc_status.resources[0].status.phase == "Bound" + retries: 30 + delay: 5 + loop: "{{ vm_list | product(['rootdisk', 'disk2', 'disk3']) | list }}" + loop_control: + label: "{{ item.0.name }}-{{ item.1 }}" + + ########################################################################### + # Create VirtualMachines + ########################################################################### + - name: Create VirtualMachine with additional raw disks + k8s: + state: present + definition: + apiVersion: kubevirt.io/v1 + kind: VirtualMachine + metadata: + name: "{{ item.name }}" + namespace: "{{ namespace }}" + spec: + running: true + template: + metadata: + labels: + kubevirt.io/domain: "{{ item.name }}" + spec: + domain: + cpu: + cores: 1 + resources: + requests: + memory: 2Gi + devices: + disks: + - name: rootdisk + disk: + bus: virtio + - name: disk2 + disk: + bus: virtio + - name: disk3 + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + interfaces: + - name: default + bridge: {} + networks: + - name: default + multus: + networkName: rhce + volumes: + - name: rootdisk + persistentVolumeClaim: + claimName: "{{ item.name }}-rootdisk" + - name: disk2 + persistentVolumeClaim: + claimName: "{{ item.name }}-disk2" + - name: disk3 + persistentVolumeClaim: + claimName: "{{ item.name }}-disk3" + - name: cloudinitdisk + cloudInitNoCloud: + hostname: "{{ item.name }}" + fqdn: "{{ item.name }}.{{ vm_domain }}" + manage_etc_hosts: true + networkData: | + version: 2 + ethernets: + enp1s0: + dhcp4: false + addresses: + - "{{ item.ip }}/24" + gateway4: 10.4.0.1 + nameservers: + search: + - "{{ vm_domain }}" + addresses: + - 10.1.0.1 + userData: | + #cloud-config + users: + - name: redhat + sudo: ALL=(ALL) NOPASSWD:ALL + lock_passwd: false + chpasswd: + list: | + redhat:redhat + expire: false + ssh_pwauth: true + user: redhat + password: redhat + loop: "{{ vm_list }}" + loop_control: + label: "{{ item.name }}" + +############################################################################### +# Play 2: Add static DNS entries to dnsmasq on OPNsense +############################################################################### +- name: Add static DNS entries to dnsmasq on OPNsense + hosts: opnsense.lab.cudanet.org + become: true + remote_user: root + gather_facts: false + + vars: + ansible_python_interpreter: /usr/local/bin/python3 + dnsmasq_hosts_file: /usr/local/etc/dnsmasq.conf.d/lab.conf + vms: + - ip: "10.4.0.100" + hostname: "controller.lab.example.com" + - ip: "10.4.0.101" + hostname: "node1.lab.example.com" + - ip: "10.4.0.102" + hostname: "node2.lab.example.com" + - ip: "10.4.0.103" + hostname: "node3.lab.example.com" + - ip: "10.4.0.104" + hostname: "node4.lab.example.com" + - ip: "10.4.0.105" + hostname: "node5.lab.example.com" + - ip: "10.4.0.106" + hostname: "utility.lab.example.com" + + tasks: + - name: Ensure dnsmasq hosts file exists + file: + path: "{{ dnsmasq_hosts_file }}" + state: touch + owner: root + group: wheel + mode: "0644" + + - name: Add static DNS entries to dnsmasq hosts file + lineinfile: + path: "{{ dnsmasq_hosts_file }}" + line: "address=/{{ item.hostname }}/{{ item.ip }}" + state: present + create: yes + backup: yes + loop: "{{ vms }}" + + - name: Reload dnsmasq service + ansible.builtin.shell: pluginctl dns + + - name: Ping each host from OPNsense to verify connectivity + ansible.builtin.shell: ping -c 3 {{ item.ip }} + register: ping_result + ignore_errors: yes + loop: "{{ vms }}" + + - name: Show ping results + debug: + msg: | + Ping to {{ item.item.hostname }} returned (rc={{ item.rc }}): + {{ item.stdout }} + loop: "{{ ping_result.results }}" + +############################################################################### +# Play 3: Register system, configure services, and mirror EE to local registry +############################################################################### +- name: Register system, configure services, and mirror EE to local registry + hosts: utility + become: true + + vars: + sat_user: "{{ vault_sat_user }}" + sat_passwd: "{{ vault_sat_passwd }}" + sat_orgid: "{{ vault_sat_orgid }}" + redhat_env: "{{ vault_redhat_env }}" + registry_host: utility.lab.example.com + registry_port: 5000 + host_port: 5000 + registry_image: docker.io/library/registry:2 + podman_user: "{{ vault_podman_user }}" + podman_passwd: "{{ vault_podman_passwd }}" + ee_source_image: registry.redhat.io/ansible-automation-platform-25/ee-supported-rhel9:latest + ee_target_image: "{{ registry_host }}:{{ registry_port }}/ee-supported-rhel9:latest" + + tasks: + - name: Register system with Red Hat Subscription Management + community.general.redhat_subscription: + username: "{{ sat_user }}" + password: "{{ sat_passwd }}" + org_id: "{{ sat_orgid }}" + environment: "{{ redhat_env }}" + state: present + + - name: Install required packages + ansible.builtin.dnf: + name: + - httpd + - firewalld + - podman + - policycoreutils-python-utils + state: present + + - name: Enable and start httpd + ansible.builtin.service: + name: httpd + state: started + enabled: true + + - name: Enable and start firewalld + ansible.builtin.service: + name: firewalld + state: started + enabled: true + + - name: Allow HTTP service through firewall + ansible.posix.firewalld: + service: http + permanent: true + state: enabled + immediate: true + + - name: Allow registry port through firewall + ansible.posix.firewalld: + port: "{{ registry_port }}/tcp" + permanent: true + state: enabled + immediate: true + + - name: Ensure correct permissions on web root + ansible.builtin.file: + path: /var/www/html + recurse: true + mode: "0755" + + - name: Set SELinux context for Ansible Automation Platform content + community.general.sefcontext: + target: "/var/www/html/ansible-automation-platform(/.*)?" + setype: httpd_sys_content_t + state: present + + - name: Set SELinux context for RHEL 9 content + community.general.sefcontext: + target: "/var/www/html/rhel9(/.*)?" + setype: httpd_sys_content_t + state: present + + - name: Set SELinux context for files + community.general.sefcontext: + target: "/var/www/html/files(/.*)?" + setype: httpd_sys_content_t + state: present + + - name: Restore SELinux contexts + ansible.builtin.command: restorecon -Rv /var/www/html + changed_when: false + + - name: Create registry quadlet file + ansible.builtin.copy: + dest: /etc/containers/systemd/registry.container + mode: "0644" + content: | + [Unit] + Description=Registry + + [Container] + ContainerName=registry + Image={{ registry_image }} + PublishPort={{ registry_port }}:{{ host_port }} + + [Install] + WantedBy=multi-user.target + + - name: Reload Systemd Daemons + ansible.builtin.systemd: + daemon_reload: yes + become: true + + - name: Start registry.service + ansible.builtin.systemd: + name: registry.service + state: started + become: true + + - name: Create containers config directory + ansible.builtin.file: + path: /root/.config/containers + state: directory + mode: "0700" + + - name: Configure insecure registry + ansible.builtin.copy: + dest: /root/.config/containers/registries.conf + mode: "0600" + content: | + [[registry]] + location = "{{ registry_host }}:{{ registry_port }}" + insecure = true + + - name: Login to Red Hat registry + containers.podman.podman_login: + username: "{{ podman_user }}" + password: "{{ podman_passwd }}" + registry: registry.redhat.io + + - name: Pull Execution Environment image + containers.podman.podman_image: + name: "{{ ee_source_image }}" + state: present + + - name: Tag EE image for local registry + ansible.builtin.command: + cmd: podman tag {{ ee_source_image }} {{ ee_target_image }} + changed_when: true + + - name: Push EE image to local registry + ansible.builtin.command: + cmd: podman push --remove-signatures {{ ee_target_image }} + changed_when: true + + - name: Install chrony package + ansible.builtin.package: + name: chrony + state: present + + - name: Configure chrony as NTP server + ansible.builtin.lineinfile: + path: /etc/chrony.conf + regexp: '^allow' + line: 'allow 0.0.0.0/0' + state: present + + - name: Ensure chrony service is enabled and started + ansible.builtin.service: + name: chronyd + state: started + enabled: true + + - name: Open NTP service in firewall + ansible.builtin.firewalld: + service: ntp + permanent: true + state: enabled + immediate: true + when: ansible_facts.services['firewalld.service'] is defined + +############################################################################### +# Play 4: Configure the Controller node +############################################################################### +- name: Configure the Controller node + hosts: controller + become: true + + vars: + registry_host: utility.lab.example.com + registry_port: 5000 + + tasks: + - name: Create repo file + ansible.builtin.copy: + content: | + [ansible-automation-platform-2.5] + name=Ansible Automation Platform 2.5 + metadata_expire=-1 + gpgcheck=1 + enabled=1 + baseurl=http://utility.lab.example.com/ansible-automation-platform/2.5 + gpgkey=http://utility.lab.example.com/rhel9/RPM-GPG-KEY-redhat-release + + [BaseOS] + name=BaseOS Packages Red Hat Enterprise Linux 9 + metadata_expire=-1 + gpgcheck=1 + enabled=1 + baseurl=http://utility.lab.example.com/rhel9/BaseOS + gpgkey=http://utility.lab.example.com/rhel9/RPM-GPG-KEY-redhat-release + + [AppStream] + name=AppStream Packages Red Hat Enterprise Linux 9 + metadata_expire=-1 + gpgcheck=1 + enabled=1 + baseurl=http://utility.lab.example.com/rhel9/AppStream/ + gpgkey=http://utility.lab.example.com/rhel9/rpm-gpg/RPM-GPG-KEY-redhat-release + dest: /etc/yum.repos.d/rhce.repo + + - name: Install required packages + ansible.builtin.dnf: + name: + - podman + - ansible-core + - ansible-navigator + state: present + + - name: Create directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: "0700" + owner: ansible + group: ansible + loop: + - /home/ansible/.config + - /home/ansible/.config/containers + - /home/ansible/ansible + - /home/ansible/ansible/roles + - /home/ansible/ansible/mycollections + + - name: Configure insecure registry + ansible.builtin.copy: + dest: /home/ansible/.config/containers/registries.conf + mode: "0600" + content: | + [[registry]] + location = "{{ registry_host }}:{{ registry_port }}" + insecure = true + owner: ansible + group: ansible + + - name: Configure ansible.cfg + ansible.builtin.copy: + dest: /home/ansible/ansible/ansible.cfg + content: | + [defaults] + inventory = /home/ansible/ansible/inventory + remote_user = ansible + roles_path = /home/ansible/ansible/roles + collections_path = /home/ansible/ansible/mycollections + + - name: Configure ansible-navigator.yml + ansible.builtin.copy: + dest: /home/ansible/ansible/ansible-navigator.yml + content: | + --- + ansible-navigator: + execution-environment: + image: utility.lab.example.com:5000/ee-supported-rhel9:latest + pull: + policy: missing + playbook-artifact: + enable: false + + - name: Create test.yml + ansible.builtin.copy: + dest: /home/ansible/ansible/test.yml + content: | + --- + - name: A simple playbook to test that Ansible is configured + hosts: localhost + tasks: + - name: Run test playbook + ansible.builtin.debug: + msg: "If you're reading this, Ansible is configured on your system." diff --git a/debug.yml b/debug.yml new file mode 100644 index 0000000..0e1da3d --- /dev/null +++ b/debug.yml @@ -0,0 +1,9 @@ +- name: Print facts + hosts: jump01.lab.cudanet.org + gather_facts: true + remote_user: root + + tasks: + - name: print facts + ansible.builtin.debug: + msg: "The default IPv4 address for {{ inventory_hostname }} is {{ ansible_default_ipv4.address }}" diff --git a/destroy-lab.yml b/destroy-lab.yml new file mode 100644 index 0000000..903d570 --- /dev/null +++ b/destroy-lab.yml @@ -0,0 +1,69 @@ +--- +- name: Delete VMs and all associated disks (PVCs) + hosts: localhost + gather_facts: false + collections: + - kubernetes.core + + vars: + namespace: default + + vm_names: + - controller + - node1 + - node2 + - node3 + - node4 + - node5 + - utility + + disks: + - rootdisk + - disk2 + - disk3 + + tasks: + ########################################################################### + # Delete VMs + ########################################################################### + - name: Delete VirtualMachines + k8s: + api_version: kubevirt.io/v1 + kind: VirtualMachine + name: "{{ item }}" + namespace: "{{ namespace }}" + state: absent + loop: "{{ vm_names }}" + + ########################################################################### + # Wait until VMs are actually gone (IMPORTANT) + ########################################################################### + - name: Wait for VMs to be fully deleted + k8s_info: + api_version: kubevirt.io/v1 + kind: VirtualMachine + name: "{{ item }}" + namespace: "{{ namespace }}" + register: vm_check + until: vm_check.resources | length == 0 + retries: 30 + delay: 5 + loop: "{{ vm_names }}" + loop_control: + label: "{{ item }}" + + ########################################################################### + # Delete PVCs (no pre-check, safe & idempotent) + ########################################################################### + - name: Delete PVCs for all VM disks + k8s: + api_version: v1 + kind: PersistentVolumeClaim + name: "{{ item.0 }}-{{ item.1 }}" + namespace: "{{ namespace }}" + state: absent + loop: "{{ vm_names | product(disks) | list }}" + loop_control: + label: "{{ item.0 }}-{{ item.1 }}" + failed_when: false + diff --git a/haproxy.tar.gz b/haproxy.tar.gz new file mode 100644 index 0000000..0292b60 Binary files /dev/null and b/haproxy.tar.gz differ diff --git a/hwreport.empty b/hwreport.empty new file mode 100644 index 0000000..4c93995 --- /dev/null +++ b/hwreport.empty @@ -0,0 +1,7 @@ +HOST= +BIOS= +MEMORY= +VDB= +VDC= +VDE= + diff --git a/inventory b/inventory new file mode 100644 index 0000000..59f2e2b --- /dev/null +++ b/inventory @@ -0,0 +1,14 @@ +[satellite] +satellite.lab.cudanet.org + +[workstation] +jump01.lab.cudanet.org + +[routers] +opnsense.lab.cudanet.org + +[utility] +utility.lab.example.com + +[controller] +controller.lab.example.com diff --git a/myhosts.txt b/myhosts.txt new file mode 100644 index 0000000..849c10d --- /dev/null +++ b/myhosts.txt @@ -0,0 +1,2 @@ +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 diff --git a/phpinfo.tar.gz b/phpinfo.tar.gz new file mode 100644 index 0000000..f4eaf04 Binary files /dev/null and b/phpinfo.tar.gz differ diff --git a/redhat-rhel_system_roles-1.108.6.tar.gz b/redhat-rhel_system_roles-1.108.6.tar.gz new file mode 100644 index 0000000..f43eb36 Binary files /dev/null and b/redhat-rhel_system_roles-1.108.6.tar.gz differ diff --git a/salaries.yml b/salaries.yml new file mode 100644 index 0000000..3945250 --- /dev/null +++ b/salaries.yml @@ -0,0 +1,9 @@ +$ANSIBLE_VAULT;1.1;AES256 +32373538383562646335653030373731386462393436643032363863653637396334376537386337 +3434323231616637643462373664313661363037343534660a373236663364363138376435646231 +39666263666362623536393261303966393434316130336436613636393137656233616364396532 +3036366663333263650a356561386439356364383131356533313834653164346262353934316238 +64323934376437663435636666326465393336663535353335353864353663333064343031383264 +30393361336533383533316538386539333437316165303964313665353466346164326638326132 +33333830646262333662653739306662613662653032626166373730623062373936333239643532 +61353739353830626137 diff --git a/solutions/ansible-navigator.yml b/solutions/ansible-navigator.yml new file mode 100644 index 0000000..503bad7 --- /dev/null +++ b/solutions/ansible-navigator.yml @@ -0,0 +1,8 @@ +--- +ansible-navigator: + execution-environment: + image: utility.lab.example.com:5000/ee-supported-rhel9:latest + pull: + policy: missing + playbook-artifact: + enable: false diff --git a/solutions/ansible.cfg b/solutions/ansible.cfg new file mode 100644 index 0000000..2947b85 --- /dev/null +++ b/solutions/ansible.cfg @@ -0,0 +1,21 @@ +[defaults] +inventory = ./inventory +interpreter_python = /usr/bin/python3 +remote_user = ansible +host_key_checking = false +roles_path = /root/ansible/roles + +[galaxy] +server_list = automation_hub + +[galaxy_server.automation_hub] +url = https://console.redhat.com/api/automation-hub/ +auth_url = https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token +token = "eyJhbGciOiJIUzUxMiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI0NzQzYTkzMC03YmJiLTRkZGQtOTgzMS00ODcxNGRlZDc0YjUifQ.eyJpYXQiOjE3NjYzMzM3MDgsImp0aSI6ImYzMGQ4ZDQ4LTJlYTAtNGI2OS1iZmFhLWViZTNhOGIxZDA0MyIsImlzcyI6Imh0dHBzOi8vc3NvLnJlZGhhdC5jb20vYXV0aC9yZWFsbXMvcmVkaGF0LWV4dGVybmFsIiwiYXVkIjoiaHR0cHM6Ly9zc28ucmVkaGF0LmNvbS9hdXRoL3JlYWxtcy9yZWRoYXQtZXh0ZXJuYWwiLCJzdWIiOiI1NDY5MjM3NiIsInR5cCI6Ik9mZmxpbmUiLCJhenAiOiJjbG91ZC1zZXJ2aWNlcyIsIm5vbmNlIjoiODI2YWZmNTktOTZmNC00ODcyLTg0MDUtNWYzODY0M2M3YzMwIiwic2lkIjoiMjdiZWY4NjMtOGFiZS00YWFlLTk4NGUtN2M5YzU3ODVmM2Y3Iiwic2NvcGUiOiJvcGVuaWQgYXBpLmNvbnNvbGUgYmFzaWMgcm9sZXMgd2ViLW9yaWdpbnMgY2xpZW50X3R5cGUucHJlX2tjMjUgYXBpLmFza19yZWRfaGF0IG9mZmxpbmVfYWNjZXNzIn0.75q6N-IJiOGSxWmMOraXYmeJxmPU4p6iSFJj99jqhOOpKvgDk9_gD-MnM8FU_AGvuifbYn8_zj2QgTSaLlo8hw" + +[galaxy_server.galaxy_hub] +url=https://galaxy.ansible.com/ + +[privilege_escalation] +become = true +become_method = sudo diff --git a/solutions/apache.yml b/solutions/apache.yml new file mode 100644 index 0000000..a301f5e --- /dev/null +++ b/solutions/apache.yml @@ -0,0 +1,7 @@ +# apache_role.yml +--- +- name: Configure Apache web servers + hosts: dev + become: true + roles: + - apache diff --git a/solutions/hwreport.yml b/solutions/hwreport.yml new file mode 100644 index 0000000..9dffaf3 --- /dev/null +++ b/solutions/hwreport.yml @@ -0,0 +1,49 @@ +--- +- name: Generate hardware report + hosts: all + become: yes + + tasks: + - name: Download empty hwreport file + get_url: + url: http://utility.lab.example.com/files/hwreport.empty + dest: /root/hwreport.txt + mode: '0644' + + - name: Set hostname + lineinfile: + path: /root/hwreport.txt + regexp: '^HOST=' + line: "HOST={{ ansible_hostname }}" + + - name: Set BIOS version + lineinfile: + path: /root/hwreport.txt + regexp: '^BIOS=' + line: "BIOS={{ ansible_bios_version | default('NONE') }}" + + - name: Set memory size + lineinfile: + path: /root/hwreport.txt + regexp: '^MEMORY=' + line: "MEMORY={{ ansible_memtotal_mb }} MB" + + - name: Set vdb disk size + lineinfile: + path: /root/hwreport.txt + regexp: '^VDB=' + line: "VDB={{ ansible_devices.vdb.size | default('NONE') }}" + + - name: Set vdc disk size + lineinfile: + path: /root/hwreport.txt + regexp: '^VDC=' + line: "VDC={{ ansible_devices.vdc.size | default('NONE') }}" + + - name: Set vdd disk size (NONE if missing) + lineinfile: + path: /root/hwreport.txt + regexp: '^VDD=' + line: >- + VDD={{ ansible_devices.vdd.size if 'vdd' in ansible_devices else 'NONE' }} + diff --git a/solutions/install.yml b/solutions/install.yml new file mode 100644 index 0000000..61bedb7 --- /dev/null +++ b/solutions/install.yml @@ -0,0 +1,20 @@ +# playbook.yml +- name: Install Packages and Groups + hosts: all + + become: true + + tasks: + - name: Install packages on test group + ansible.builtin.dnf: + name: + - httpd + - php + state: latest + when: inventory_hostname in groups['test'] + + - name: Install RPM Development Tools group on dev group + ansible.builtin.dnf: + name: "@RPM Development Tools" + state: latest + when: inventory_hostname in groups['dev'] diff --git a/solutions/inventory b/solutions/inventory new file mode 100644 index 0000000..ab000d5 --- /dev/null +++ b/solutions/inventory @@ -0,0 +1,16 @@ +[dev] +node1 + +[test] +node2 + +[prod] +node3 +node4 + +[balancers] +node5 + +[webservers:children] +prod + diff --git a/solutions/issue.yml b/solutions/issue.yml new file mode 100644 index 0000000..1cb7428 --- /dev/null +++ b/solutions/issue.yml @@ -0,0 +1,28 @@ +--- +- name: Automatically populate /etc/issue with environment name + hosts: + - dev + - test + - prod + become: true + + tasks: + - name: Determine environment name from inventory groups + ansible.builtin.set_fact: + env_name: >- + {% if 'prod' in group_names %} + Production + {% elif 'test' in group_names %} + Testing + {% elif 'dev' in group_names %} + Development + {% endif %} + + - name: Populate /etc/issue + ansible.builtin.copy: + dest: /etc/issue + content: | + {{ env_name }} + owner: root + group: root + mode: '0644' diff --git a/solutions/partition.yml b/solutions/partition.yml new file mode 100644 index 0000000..2dd04bf --- /dev/null +++ b/solutions/partition.yml @@ -0,0 +1,74 @@ +--- +- name: Configure disk partitions and mounts + hosts: all + become: true + gather_facts: true + + tasks: + #################################################################### + # /dev/vdb — always create 1500MB partition mounted at /devmount + #################################################################### + - name: Create 1500MB partition on /dev/vdb + community.general.parted: + device: /dev/vdb + number: 1 + state: present + part_end: 1500MiB + + - name: Create XFS filesystem on /dev/vdb1 + ansible.builtin.filesystem: + fstype: xfs + dev: /dev/vdb1 + + - name: Mount /dev/vdb1 at /devmount + ansible.builtin.mount: + path: /devmount + src: /dev/vdb1 + fstype: xfs + state: mounted + + #################################################################### + # /dev/vdc — size-based logic (1500MB or 800MB) + #################################################################### + - name: Determine size of /dev/vdc partition + ansible.builtin.set_fact: + vdc_part_size: >- + {{ '1500MiB' + if (ansible_facts.devices.vdc.sectors | int * + ansible_facts.devices.vdc.sectorsize | int) >= (1500 * 1024 * 1024) + else '800MiB' }} + when: "'vdc' in ansible_facts.devices" + + - name: Create partition on /dev/vdc + community.general.parted: + device: /dev/vdc + number: 1 + state: present + part_end: "{{ vdc_part_size }}" + when: "'vdc' in ansible_facts.devices" + + - name: Create XFS filesystem on /dev/vdc1 + ansible.builtin.filesystem: + fstype: xfs + dev: /dev/vdc1 + when: "'vdc' in ansible_facts.devices" + + - name: Mount /dev/vdc1 + ansible.builtin.mount: + path: >- + {{ '/devmount1' + if vdc_part_size == '1500MiB' + else '/dev/mount' }} + src: /dev/vdc1 + fstype: xfs + state: mounted + when: "'vdc' in ansible_facts.devices" + + #################################################################### + # /dev/vde presence check + #################################################################### + - name: Warn if /dev/vde is not present + ansible.builtin.debug: + msg: "Disk /dev/vde is not present" + when: "'vde' not in ansible_facts.devices" + diff --git a/solutions/repos.yml b/solutions/repos.yml new file mode 100644 index 0000000..afe57a7 --- /dev/null +++ b/solutions/repos.yml @@ -0,0 +1,27 @@ +--- +# repos.yml +- name: Add BaseOS and AppStream repos to all hosts + hosts: all + become: true + + vars: + repos: + - BaseOS + - AppStream + + baseurl: http://utility.lab.example.com/rhel9 + gpgkey_url: http://utility.lab.example.com/rhel9/RPM-GPG-KEY-redhat-release + repo_file: /etc/yum.repos.d/rhce + + tasks: + - name: Add {{ item }} repository + ansible.builtin.yum_repository: + name: "EX294_{{ item }}" + description: "EX294 {{ item }} Repository" + baseurl: "{{ baseurl }}/{{ item }}" + enabled: true + gpgcheck: true + gpgkey: "{{ gpgkey_url }}" + file: "{{ repo_file }}" + loop: "{{ repos }}" + diff --git a/solutions/requirements.yml b/solutions/requirements.yml new file mode 100644 index 0000000..d8df76b --- /dev/null +++ b/solutions/requirements.yml @@ -0,0 +1,23 @@ +# requirements.yml +--- +roles: + - name: phpinfo + src: http://utility.lab.example.com/files/phpinfo.tar.gz + path: /home/ansible/ansible/roles + + - name: balancer + src: http://utility.lab.example.com/files/haproxy.tar.gz + path: /home/ansible/ansible/roles + +collections: + - name: ansible.posix + source: http://utility.lab.example.com/files/ansible-posix-2.1.0.tar.gz + type: url + + - name: redhat.rhel_system_roles + source: http://utility.lab.example.com/files/redhat-rhel_system_roles-1.108.6.tar.gz + type: url + + - name: community.general + source: http://utility.lab.example.com/files/community-general-12.1.0.tar.gz + type: url diff --git a/solutions/rhce.repo b/solutions/rhce.repo new file mode 100644 index 0000000..d73dfa1 --- /dev/null +++ b/solutions/rhce.repo @@ -0,0 +1,15 @@ +[BaseOS] +name=BaseOS Packages Red Hat Enterprise Linux 9 +metadata_expire=-1 +gpgcheck=1 +enabled=1 +baseurl=http://utility.lab.example.com/rhel9/BaseOS +gpgkey=http://utility.lab.example.com/rhel9/RPM-GPG-KEY-redhat-release + +[AppStream] +name=AppStream Packages Red Hat Enterprise Linux 9 +metadata_expire=-1 +gpgcheck=1 +enabled=1 +baseurl=http://utility.lab.example.com/rhel9/AppStream/ +gpgkey=http://utility.lab.example.com/rhel9/rpm-gpg/RPM-GPG-KEY-redhat-release diff --git a/solutions/roles.yml b/solutions/roles.yml new file mode 100644 index 0000000..3b7dd74 --- /dev/null +++ b/solutions/roles.yml @@ -0,0 +1,13 @@ +# roles.yml +--- +- name: Configure load balancer + hosts: balancers + become: yes + roles: + - balancer + +- name: Configure web servers + hosts: webservers + become: yes + roles: + - phpinfo diff --git a/solutions/roles/apache/README.md b/solutions/roles/apache/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/solutions/roles/apache/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/solutions/roles/apache/defaults/main.yml b/solutions/roles/apache/defaults/main.yml new file mode 100644 index 0000000..60034e9 --- /dev/null +++ b/solutions/roles/apache/defaults/main.yml @@ -0,0 +1,5 @@ +# defaults/main.yml +--- +apache_packages: + - httpd + - firewalld diff --git a/solutions/roles/apache/handlers/main.yml b/solutions/roles/apache/handlers/main.yml new file mode 100644 index 0000000..bc770a3 --- /dev/null +++ b/solutions/roles/apache/handlers/main.yml @@ -0,0 +1,6 @@ +# handlers/main.yml +--- +- name: restart httpd + ansible.builtin.service: + name: httpd + state: restarted diff --git a/solutions/roles/apache/meta/main.yml b/solutions/roles/apache/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/solutions/roles/apache/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/solutions/roles/apache/tasks/main.yml b/solutions/roles/apache/tasks/main.yml new file mode 100644 index 0000000..8712316 --- /dev/null +++ b/solutions/roles/apache/tasks/main.yml @@ -0,0 +1,34 @@ +# tasks/main.yml +--- +- name: Install httpd and firewalld + ansible.builtin.package: + name: "{{ apache_packages }}" + state: present + +- name: Enable and start firewalld + ansible.builtin.service: + name: firewalld + state: started + enabled: true + +- name: Enable and start httpd + ansible.builtin.service: + name: httpd + state: started + enabled: true + +- name: Allow HTTP service through firewalld + ansible.posix.firewalld: + service: http + permanent: true + state: enabled + immediate: true + +- name: Deploy index.html with FQDN and IPv4 + ansible.builtin.template: + src: index.html.j2 + dest: /var/www/html/index.html + owner: root + group: root + mode: '0644' + notify: restart httpd diff --git a/solutions/roles/apache/templates/index.html.j2 b/solutions/roles/apache/templates/index.html.j2 new file mode 100644 index 0000000..90dd40b --- /dev/null +++ b/solutions/roles/apache/templates/index.html.j2 @@ -0,0 +1,12 @@ +# templates/index.html.j2 + + + + Apache Test Page + + +

Apache is working

+

FQDN: {{ ansible_facts.fqdn }}

+

IPv4 Address: {{ ansible_facts.default_ipv4.address }}

+ + diff --git a/solutions/roles/apache/tests/inventory b/solutions/roles/apache/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/solutions/roles/apache/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/solutions/roles/apache/tests/test.yml b/solutions/roles/apache/tests/test.yml new file mode 100644 index 0000000..191e731 --- /dev/null +++ b/solutions/roles/apache/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - apache diff --git a/solutions/roles/apache/vars/main.yml b/solutions/roles/apache/vars/main.yml new file mode 100644 index 0000000..2aa5032 --- /dev/null +++ b/solutions/roles/apache/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for apache diff --git a/solutions/roles/balancer/MANIFEST.json b/solutions/roles/balancer/MANIFEST.json new file mode 100644 index 0000000..a07fe9a --- /dev/null +++ b/solutions/roles/balancer/MANIFEST.json @@ -0,0 +1,19 @@ +{ + "collection_info": null, + "dependencies": [], + "format": 1, + "license": "MIT", + "license_file": null, + "name": "balancer", + "namespace": "local", + "readme": "README.md", + "repository": null, + "tags": [ + "haproxy", + "loadbalancer", + "networking", + "web" + ], + "version": "1.0.0" +} + diff --git a/solutions/roles/balancer/defaults/main.yml b/solutions/roles/balancer/defaults/main.yml new file mode 100644 index 0000000..8fe8ba0 --- /dev/null +++ b/solutions/roles/balancer/defaults/main.yml @@ -0,0 +1,11 @@ +--- +haproxy_frontend_port: 80 + +haproxy_backend_servers: + - name: node3 + address: node3 + port: 80 + - name: node4 + address: node4 + port: 80 + diff --git a/solutions/roles/balancer/handlers/main.yml b/solutions/roles/balancer/handlers/main.yml new file mode 100644 index 0000000..516c8a0 --- /dev/null +++ b/solutions/roles/balancer/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart haproxy + ansible.builtin.service: + name: haproxy + state: restarted + diff --git a/solutions/roles/balancer/meta/.galaxy_install_info b/solutions/roles/balancer/meta/.galaxy_install_info new file mode 100644 index 0000000..bd9eeef --- /dev/null +++ b/solutions/roles/balancer/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: 'Tue 30 Dec 2025 12:53:11 AM ' +version: '' diff --git a/solutions/roles/balancer/meta/main.yml b/solutions/roles/balancer/meta/main.yml new file mode 100644 index 0000000..689c74d --- /dev/null +++ b/solutions/roles/balancer/meta/main.yml @@ -0,0 +1,21 @@ +--- +galaxy_info: + role_name: balancer + author: lab + description: Installs and configures HAProxy to load balance Apache servers + license: MIT + min_ansible_version: "2.9" + + platforms: + - name: EL + versions: + - "9" + + galaxy_tags: + - haproxy + - loadbalancer + - networking + - web + +dependencies: [] + diff --git a/solutions/roles/balancer/tasks/main.yml b/solutions/roles/balancer/tasks/main.yml new file mode 100644 index 0000000..b3914c8 --- /dev/null +++ b/solutions/roles/balancer/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: Install HAProxy + ansible.builtin.dnf: + name: haproxy + state: present + +- name: Enable and start HAProxy + ansible.builtin.service: + name: haproxy + state: started + enabled: true + +- name: Deploy HAProxy configuration + ansible.builtin.template: + src: haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + owner: root + group: root + mode: '0644' + notify: Restart haproxy + +- name: Allow port 80 through firewalld + ansible.posix.firewalld: + service: http + permanent: true + immediate: true + state: enabled + diff --git a/solutions/roles/balancer/templates/haproxy.cfg.j2 b/solutions/roles/balancer/templates/haproxy.cfg.j2 new file mode 100644 index 0000000..446b200 --- /dev/null +++ b/solutions/roles/balancer/templates/haproxy.cfg.j2 @@ -0,0 +1,25 @@ +global + log /dev/log local0 + log /dev/log local1 notice + daemon + maxconn 2048 + +defaults + log global + mode http + option httplog + option dontlognull + timeout connect 5s + timeout client 50s + timeout server 50s + +frontend http_front + bind *:80 + default_backend webservers + +backend webservers + balance roundrobin + option httpchk + server node3 node3:80 check + server node4 node4:80 check + diff --git a/solutions/roles/phpinfo/README.md b/solutions/roles/phpinfo/README.md new file mode 100644 index 0000000..9506b06 --- /dev/null +++ b/solutions/roles/phpinfo/README.md @@ -0,0 +1,17 @@ +# phpinfo Role + +This role installs Apache, PHP, and Firewalld, opens HTTP access, fixes SELinux +contexts, and deploys a PHP info page. + +## Requirements +- RHEL / Rocky / Alma 9 +- SELinux enforcing +- firewalld enabled + +## Usage +```yaml +- hosts: web + become: true + roles: + - phpinfo + diff --git a/solutions/roles/phpinfo/defaults/main.yml b/solutions/roles/phpinfo/defaults/main.yml new file mode 100644 index 0000000..3323778 --- /dev/null +++ b/solutions/roles/phpinfo/defaults/main.yml @@ -0,0 +1,4 @@ +--- +phpinfo_webroot: /var/www/html +phpinfo_file: index.php + diff --git a/solutions/roles/phpinfo/handlers/main.yml b/solutions/roles/phpinfo/handlers/main.yml new file mode 100644 index 0000000..6d3384d --- /dev/null +++ b/solutions/roles/phpinfo/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart httpd + ansible.builtin.service: + name: httpd + state: restarted + diff --git a/solutions/roles/phpinfo/meta/.galaxy_install_info b/solutions/roles/phpinfo/meta/.galaxy_install_info new file mode 100644 index 0000000..bd9eeef --- /dev/null +++ b/solutions/roles/phpinfo/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: 'Tue 30 Dec 2025 12:53:11 AM ' +version: '' diff --git a/solutions/roles/phpinfo/meta/main.yml b/solutions/roles/phpinfo/meta/main.yml new file mode 100644 index 0000000..bc99f96 --- /dev/null +++ b/solutions/roles/phpinfo/meta/main.yml @@ -0,0 +1,22 @@ +--- +galaxy_info: + role_name: phpinfo + author: lab + description: Installs Apache, PHP, configures firewalld, fixes SELinux, and deploys a PHP info page + license: MIT + min_ansible_version: "2.9" + + platforms: + - name: EL + versions: + - "9" + + galaxy_tags: + - php + - httpd + - firewalld + - selinux + - web + +dependencies: [] + diff --git a/solutions/roles/phpinfo/tasks/main.yml b/solutions/roles/phpinfo/tasks/main.yml new file mode 100644 index 0000000..3bfec36 --- /dev/null +++ b/solutions/roles/phpinfo/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: Install required packages + ansible.builtin.dnf: + name: + - httpd + - firewalld + - php + state: present + +- name: Enable and start httpd + ansible.builtin.service: + name: httpd + state: started + enabled: true + +- name: Enable and start firewalld + ansible.builtin.service: + name: firewalld + state: started + enabled: true + +- name: Allow HTTP traffic through firewalld (persistent and immediate) + ansible.posix.firewalld: + service: http + state: enabled + permanent: true + immediate: true + +- name: Fix SELinux context on webroot + ansible.builtin.command: restorecon -Rv {{ phpinfo_webroot }} + changed_when: false + +- name: Deploy PHP info page + ansible.builtin.template: + src: index.php.j2 + dest: "{{ phpinfo_webroot }}/{{ phpinfo_file }}" + owner: root + group: root + mode: '0644' + notify: Restart httpd + diff --git a/solutions/roles/phpinfo/templates/index.php.j2 b/solutions/roles/phpinfo/templates/index.php.j2 new file mode 100644 index 0000000..c1b42ed --- /dev/null +++ b/solutions/roles/phpinfo/templates/index.php.j2 @@ -0,0 +1,4 @@ + + diff --git a/solutions/roles/phpinfo/tests/inventory b/solutions/roles/phpinfo/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/solutions/roles/phpinfo/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/solutions/roles/phpinfo/tests/test.yml b/solutions/roles/phpinfo/tests/test.yml new file mode 100644 index 0000000..7f977bc --- /dev/null +++ b/solutions/roles/phpinfo/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - phpinfo diff --git a/solutions/roles/phpinfo/vars/main.yml b/solutions/roles/phpinfo/vars/main.yml new file mode 100644 index 0000000..3dd96f6 --- /dev/null +++ b/solutions/roles/phpinfo/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for phpinfo diff --git a/solutions/selinux.yml b/solutions/selinux.yml new file mode 100644 index 0000000..9fa4d57 --- /dev/null +++ b/solutions/selinux.yml @@ -0,0 +1,17 @@ +--- +- name: Ensure SELinux is enabled and enforcing + hosts: all + become: true + + tasks: + - name: Set SELinux to enforcing + ansible.posix.selinux: + policy: targeted + state: enforcing + notify: Reboot if SELinux state changed + + handlers: + - name: Reboot if SELinux state changed + ansible.builtin.reboot: + msg: "Rebooting to apply SELinux changes" + reboot_timeout: 600 diff --git a/test.yml b/test.yml new file mode 100644 index 0000000..422de65 --- /dev/null +++ b/test.yml @@ -0,0 +1,7 @@ +--- +- name: A simple playbook to test that Ansible is configured + hosts: localhost + tasks: + - name: Run test playbook + ansible.builtin.debug: + msg: "If you're reading this, Ansible is configured on your system." diff --git a/user_list.yml b/user_list.yml new file mode 100644 index 0000000..af9ba95 --- /dev/null +++ b/user_list.yml @@ -0,0 +1,12 @@ +users: + - name: Fred + role: manager + + - name: Wilma + role: manager + + - name: Barney + role: developer + + - name: Betty + role: developer diff --git a/vault-password b/vault-password new file mode 100644 index 0000000..8e9ab21 --- /dev/null +++ b/vault-password @@ -0,0 +1 @@ +redhat diff --git a/vault.yml b/vault.yml new file mode 100644 index 0000000..bc13e43 --- /dev/null +++ b/vault.yml @@ -0,0 +1,51 @@ +$ANSIBLE_VAULT;1.1;AES256 +62613361613532643339306531396533643738343365376535346463663731306536373139306163 +6138386139623031656265326165386634323435353261610a303939356634386436366132306663 +65616566386162616435626464373063393463666435623832303533333662303966386636613737 +3365656432323739620a313463343436326463323632333337373437386237353862353564343438 +63346634393830386233343536396563306138626264306330623765393530373763323764363534 +38323666633631646666663966303365646266373830636664373366613330346662363134623235 +65356537306462643735363033306537613462313236623062623633356132343161373032613264 +34323865373564336532663361336239363939333239346539326434316435366362373734303337 +32343835373063376530646161333365626138366530313430623265306466353430366537353937 +39313032616265616633383061363432366236353434663639343463366435633839363733366134 +31356231653162653362303535643666633839633031633763346336376465383030303961393662 +38353261313935626565303961663065323030303265626562393830323430353331663361333232 +61373664313930363033303334653238666237373562343664366463646338316434346131663230 +32303364646432313039383833656436633637633631376335333962633966326332353061393761 +65396436636266336662303234663735333838303961373365656132363162353332613133386633 +33303633323030383236383630616335663430336363613234643938623962316433646637613032 +63323935613766623031303536383663623065353261663132303339626130393635393934363539 +63653733353635663664323334346138336231396333366233323766383931313236373365373432 +37393934666661373263373538366663636430623131363664333063373338633639623264323334 +38623330663531383639323464623933306431373764386461326136323964623465663630663335 +38623361633239356637616637393438393139636431373266383330343039666663666366393036 +34373239313638623861393663366535613133323036366634336131316537333734636538396138 +63393963346536623831373765626232356130323361383936636661323966653764376439653032 +64326439643036653534383236623735353336316666623037366338336466323662353065613835 +32663566323538346463323033306430643834333330393966653437356564653065613637323836 +61666262306265313362383130346636333136396635636234653830333038616566313736633838 +65353062613538613761626164346534656161356435313562353130626662366530363037626461 +61656461326239633739393564343665346364303437323532343533346537316333333561636331 +34613866646562373833313161636431393532313465366562383037396435333366613264343662 +35356431313232646162663665316563653461623266613631646435383033343632386166636233 +35323934383466323734633230653164373231383861346336316566663231376133653763346130 +61373135373738653831363963383831646663383532363038333636303934666333623061373337 +31363137366136313937376437663963373963646331393564393661363565346234646230383239 +65323564613334623238346638636132346437306637666230646438623236373035313263306163 +61616335653164313466386435376437386561333531343632366463313931323366353261613738 +62646236333932356433633864313138343764333065396537353438623336616235643536333938 +66326137313033333334626437613132323336613335636439333964626631306337383862363730 +64396466303064343863336639323335313165303339386639306431633135636665666433623433 +36623239623466343163316563386435313136333662643564656566303036386430656637646633 +33366139323630653936336463333031613433373139663631306638656463626431383535626237 +31386264396461373865353033333237643365653933663532613465613564326333613335383130 +32643965393736363236366432353733373764643731373536336633343139366365666366373437 +63313664316562623566306663306132376461376433326361626135653933366232303430333933 +63373537633764636463326538663939396431353732376239373166316236363431393230353631 +65613739366265353637633031646532643934353839393334383332353865343330363537323462 +64393662646335366133646239376166633833393861343561353165616132363330626537336461 +33353761663134643934393866393235613532316139356335633233326537333163633931356162 +65653264333164303438646130636435373261396333366531393432366231633661303534643961 +39353939323937643934373332316461353361323863616164373038633261343034636632313362 +3931653566633839386332373363663365333765306639353961