first commit
This commit is contained in:
575
create-lab.yml
Normal file
575
create-lab.yml
Normal file
@@ -0,0 +1,575 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Combined Ansible Playbook
|
||||
###############################################################################
|
||||
|
||||
###############################################################################
|
||||
# Play 1: Deploy multiple KubeVirt VMs from Block PVC template with cloud-init
|
||||
###############################################################################
|
||||
- name: Deploy multiple VMs from Block template PVC with cloud-init ISO
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
collections:
|
||||
- kubernetes.core
|
||||
|
||||
vars:
|
||||
namespace: default
|
||||
vm_domain: lab.example.com
|
||||
|
||||
rootdisk_size: 64Gi
|
||||
disk2_size: 2Gi
|
||||
disk3_size: 1Gi
|
||||
|
||||
vm_list:
|
||||
- name: controller
|
||||
ip: 10.4.0.100
|
||||
source_pvc: rhce-template
|
||||
- name: node1
|
||||
ip: 10.4.0.101
|
||||
source_pvc: rhce-template
|
||||
- name: node2
|
||||
ip: 10.4.0.102
|
||||
source_pvc: rhce-template
|
||||
- name: node3
|
||||
ip: 10.4.0.103
|
||||
source_pvc: rhce-template
|
||||
- name: node4
|
||||
ip: 10.4.0.104
|
||||
source_pvc: rhce-template
|
||||
- name: node5
|
||||
ip: 10.4.0.105
|
||||
source_pvc: rhce-template
|
||||
- name: utility
|
||||
ip: 10.4.0.106
|
||||
source_pvc: utility-template
|
||||
|
||||
tasks:
|
||||
###########################################################################
|
||||
# Create PVCs
|
||||
###########################################################################
|
||||
- name: Create rootdisk PVC from template
|
||||
k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "{{ item.name }}-rootdisk"
|
||||
namespace: "{{ namespace }}"
|
||||
spec:
|
||||
storageClassName: ocs-storagecluster-ceph-rbd-virtualization
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Block
|
||||
resources:
|
||||
requests:
|
||||
storage: "{{ rootdisk_size }}"
|
||||
dataSource:
|
||||
name: "{{ item.source_pvc }}"
|
||||
kind: PersistentVolumeClaim
|
||||
apiGroup: ""
|
||||
loop: "{{ vm_list }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
- name: Create disk2 PVC (2Gi)
|
||||
k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "{{ item.name }}-disk2"
|
||||
namespace: "{{ namespace }}"
|
||||
spec:
|
||||
storageClassName: ocs-storagecluster-ceph-rbd-virtualization
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Block
|
||||
resources:
|
||||
requests:
|
||||
storage: "{{ disk2_size }}"
|
||||
loop: "{{ vm_list }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}-disk2"
|
||||
|
||||
- name: Create disk3 PVC (1Gi)
|
||||
k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "{{ item.name }}-disk3"
|
||||
namespace: "{{ namespace }}"
|
||||
spec:
|
||||
storageClassName: ocs-storagecluster-ceph-rbd-virtualization
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Block
|
||||
resources:
|
||||
requests:
|
||||
storage: "{{ disk3_size }}"
|
||||
loop: "{{ vm_list }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}-disk3"
|
||||
|
||||
###########################################################################
|
||||
# Wait for PVCs
|
||||
###########################################################################
|
||||
- name: Wait for all PVCs to be bound
|
||||
k8s_info:
|
||||
api_version: v1
|
||||
kind: PersistentVolumeClaim
|
||||
name: "{{ item.0.name }}-{{ item.1 }}"
|
||||
namespace: "{{ namespace }}"
|
||||
register: pvc_status
|
||||
until: pvc_status.resources[0].status.phase == "Bound"
|
||||
retries: 30
|
||||
delay: 5
|
||||
loop: "{{ vm_list | product(['rootdisk', 'disk2', 'disk3']) | list }}"
|
||||
loop_control:
|
||||
label: "{{ item.0.name }}-{{ item.1 }}"
|
||||
|
||||
###########################################################################
|
||||
# Create VirtualMachines
|
||||
###########################################################################
|
||||
- name: Create VirtualMachine with additional raw disks
|
||||
k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: kubevirt.io/v1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: "{{ item.name }}"
|
||||
namespace: "{{ namespace }}"
|
||||
spec:
|
||||
running: true
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
kubevirt.io/domain: "{{ item.name }}"
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: 1
|
||||
resources:
|
||||
requests:
|
||||
memory: 2Gi
|
||||
devices:
|
||||
disks:
|
||||
- name: rootdisk
|
||||
disk:
|
||||
bus: virtio
|
||||
- name: disk2
|
||||
disk:
|
||||
bus: virtio
|
||||
- name: disk3
|
||||
disk:
|
||||
bus: virtio
|
||||
- name: cloudinitdisk
|
||||
disk:
|
||||
bus: virtio
|
||||
interfaces:
|
||||
- name: default
|
||||
bridge: {}
|
||||
networks:
|
||||
- name: default
|
||||
multus:
|
||||
networkName: rhce
|
||||
volumes:
|
||||
- name: rootdisk
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ item.name }}-rootdisk"
|
||||
- name: disk2
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ item.name }}-disk2"
|
||||
- name: disk3
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ item.name }}-disk3"
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
hostname: "{{ item.name }}"
|
||||
fqdn: "{{ item.name }}.{{ vm_domain }}"
|
||||
manage_etc_hosts: true
|
||||
networkData: |
|
||||
version: 2
|
||||
ethernets:
|
||||
enp1s0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "{{ item.ip }}/24"
|
||||
gateway4: 10.4.0.1
|
||||
nameservers:
|
||||
search:
|
||||
- "{{ vm_domain }}"
|
||||
addresses:
|
||||
- 10.1.0.1
|
||||
userData: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: redhat
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
lock_passwd: false
|
||||
chpasswd:
|
||||
list: |
|
||||
redhat:redhat
|
||||
expire: false
|
||||
ssh_pwauth: true
|
||||
user: redhat
|
||||
password: redhat
|
||||
loop: "{{ vm_list }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
###############################################################################
|
||||
# Play 2: Add static DNS entries to dnsmasq on OPNsense
|
||||
###############################################################################
|
||||
- name: Add static DNS entries to dnsmasq on OPNsense
|
||||
hosts: opnsense.lab.cudanet.org
|
||||
become: true
|
||||
remote_user: root
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/local/bin/python3
|
||||
dnsmasq_hosts_file: /usr/local/etc/dnsmasq.conf.d/lab.conf
|
||||
vms:
|
||||
- ip: "10.4.0.100"
|
||||
hostname: "controller.lab.example.com"
|
||||
- ip: "10.4.0.101"
|
||||
hostname: "node1.lab.example.com"
|
||||
- ip: "10.4.0.102"
|
||||
hostname: "node2.lab.example.com"
|
||||
- ip: "10.4.0.103"
|
||||
hostname: "node3.lab.example.com"
|
||||
- ip: "10.4.0.104"
|
||||
hostname: "node4.lab.example.com"
|
||||
- ip: "10.4.0.105"
|
||||
hostname: "node5.lab.example.com"
|
||||
- ip: "10.4.0.106"
|
||||
hostname: "utility.lab.example.com"
|
||||
|
||||
tasks:
|
||||
- name: Ensure dnsmasq hosts file exists
|
||||
file:
|
||||
path: "{{ dnsmasq_hosts_file }}"
|
||||
state: touch
|
||||
owner: root
|
||||
group: wheel
|
||||
mode: "0644"
|
||||
|
||||
- name: Add static DNS entries to dnsmasq hosts file
|
||||
lineinfile:
|
||||
path: "{{ dnsmasq_hosts_file }}"
|
||||
line: "address=/{{ item.hostname }}/{{ item.ip }}"
|
||||
state: present
|
||||
create: yes
|
||||
backup: yes
|
||||
loop: "{{ vms }}"
|
||||
|
||||
- name: Reload dnsmasq service
|
||||
ansible.builtin.shell: pluginctl dns
|
||||
|
||||
- name: Ping each host from OPNsense to verify connectivity
|
||||
ansible.builtin.shell: ping -c 3 {{ item.ip }}
|
||||
register: ping_result
|
||||
ignore_errors: yes
|
||||
loop: "{{ vms }}"
|
||||
|
||||
- name: Show ping results
|
||||
debug:
|
||||
msg: |
|
||||
Ping to {{ item.item.hostname }} returned (rc={{ item.rc }}):
|
||||
{{ item.stdout }}
|
||||
loop: "{{ ping_result.results }}"
|
||||
|
||||
###############################################################################
|
||||
# Play 3: Register system, configure services, and mirror EE to local registry
|
||||
###############################################################################
|
||||
- name: Register system, configure services, and mirror EE to local registry
|
||||
hosts: utility
|
||||
become: true
|
||||
|
||||
vars:
|
||||
sat_user: "{{ vault_sat_user }}"
|
||||
sat_passwd: "{{ vault_sat_passwd }}"
|
||||
sat_orgid: "{{ vault_sat_orgid }}"
|
||||
redhat_env: "{{ vault_redhat_env }}"
|
||||
registry_host: utility.lab.example.com
|
||||
registry_port: 5000
|
||||
host_port: 5000
|
||||
registry_image: docker.io/library/registry:2
|
||||
podman_user: "{{ vault_podman_user }}"
|
||||
podman_passwd: "{{ vault_podman_passwd }}"
|
||||
ee_source_image: registry.redhat.io/ansible-automation-platform-25/ee-supported-rhel9:latest
|
||||
ee_target_image: "{{ registry_host }}:{{ registry_port }}/ee-supported-rhel9:latest"
|
||||
|
||||
tasks:
|
||||
- name: Register system with Red Hat Subscription Management
|
||||
community.general.redhat_subscription:
|
||||
username: "{{ sat_user }}"
|
||||
password: "{{ sat_passwd }}"
|
||||
org_id: "{{ sat_orgid }}"
|
||||
environment: "{{ redhat_env }}"
|
||||
state: present
|
||||
|
||||
- name: Install required packages
|
||||
ansible.builtin.dnf:
|
||||
name:
|
||||
- httpd
|
||||
- firewalld
|
||||
- podman
|
||||
- policycoreutils-python-utils
|
||||
state: present
|
||||
|
||||
- name: Enable and start httpd
|
||||
ansible.builtin.service:
|
||||
name: httpd
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Enable and start firewalld
|
||||
ansible.builtin.service:
|
||||
name: firewalld
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Allow HTTP service through firewall
|
||||
ansible.posix.firewalld:
|
||||
service: http
|
||||
permanent: true
|
||||
state: enabled
|
||||
immediate: true
|
||||
|
||||
- name: Allow registry port through firewall
|
||||
ansible.posix.firewalld:
|
||||
port: "{{ registry_port }}/tcp"
|
||||
permanent: true
|
||||
state: enabled
|
||||
immediate: true
|
||||
|
||||
- name: Ensure correct permissions on web root
|
||||
ansible.builtin.file:
|
||||
path: /var/www/html
|
||||
recurse: true
|
||||
mode: "0755"
|
||||
|
||||
- name: Set SELinux context for Ansible Automation Platform content
|
||||
community.general.sefcontext:
|
||||
target: "/var/www/html/ansible-automation-platform(/.*)?"
|
||||
setype: httpd_sys_content_t
|
||||
state: present
|
||||
|
||||
- name: Set SELinux context for RHEL 9 content
|
||||
community.general.sefcontext:
|
||||
target: "/var/www/html/rhel9(/.*)?"
|
||||
setype: httpd_sys_content_t
|
||||
state: present
|
||||
|
||||
- name: Set SELinux context for files
|
||||
community.general.sefcontext:
|
||||
target: "/var/www/html/files(/.*)?"
|
||||
setype: httpd_sys_content_t
|
||||
state: present
|
||||
|
||||
- name: Restore SELinux contexts
|
||||
ansible.builtin.command: restorecon -Rv /var/www/html
|
||||
changed_when: false
|
||||
|
||||
- name: Create registry quadlet file
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/containers/systemd/registry.container
|
||||
mode: "0644"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Registry
|
||||
|
||||
[Container]
|
||||
ContainerName=registry
|
||||
Image={{ registry_image }}
|
||||
PublishPort={{ registry_port }}:{{ host_port }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
- name: Reload Systemd Daemons
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: yes
|
||||
become: true
|
||||
|
||||
- name: Start registry.service
|
||||
ansible.builtin.systemd:
|
||||
name: registry.service
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: Create containers config directory
|
||||
ansible.builtin.file:
|
||||
path: /root/.config/containers
|
||||
state: directory
|
||||
mode: "0700"
|
||||
|
||||
- name: Configure insecure registry
|
||||
ansible.builtin.copy:
|
||||
dest: /root/.config/containers/registries.conf
|
||||
mode: "0600"
|
||||
content: |
|
||||
[[registry]]
|
||||
location = "{{ registry_host }}:{{ registry_port }}"
|
||||
insecure = true
|
||||
|
||||
- name: Login to Red Hat registry
|
||||
containers.podman.podman_login:
|
||||
username: "{{ podman_user }}"
|
||||
password: "{{ podman_passwd }}"
|
||||
registry: registry.redhat.io
|
||||
|
||||
- name: Pull Execution Environment image
|
||||
containers.podman.podman_image:
|
||||
name: "{{ ee_source_image }}"
|
||||
state: present
|
||||
|
||||
- name: Tag EE image for local registry
|
||||
ansible.builtin.command:
|
||||
cmd: podman tag {{ ee_source_image }} {{ ee_target_image }}
|
||||
changed_when: true
|
||||
|
||||
- name: Push EE image to local registry
|
||||
ansible.builtin.command:
|
||||
cmd: podman push --remove-signatures {{ ee_target_image }}
|
||||
changed_when: true
|
||||
|
||||
- name: Install chrony package
|
||||
ansible.builtin.package:
|
||||
name: chrony
|
||||
state: present
|
||||
|
||||
- name: Configure chrony as NTP server
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/chrony.conf
|
||||
regexp: '^allow'
|
||||
line: 'allow 0.0.0.0/0'
|
||||
state: present
|
||||
|
||||
- name: Ensure chrony service is enabled and started
|
||||
ansible.builtin.service:
|
||||
name: chronyd
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Open NTP service in firewall
|
||||
ansible.builtin.firewalld:
|
||||
service: ntp
|
||||
permanent: true
|
||||
state: enabled
|
||||
immediate: true
|
||||
when: ansible_facts.services['firewalld.service'] is defined
|
||||
|
||||
###############################################################################
|
||||
# Play 4: Configure the Controller node
|
||||
###############################################################################
|
||||
- name: Configure the Controller node
|
||||
hosts: controller
|
||||
become: true
|
||||
|
||||
vars:
|
||||
registry_host: utility.lab.example.com
|
||||
registry_port: 5000
|
||||
|
||||
tasks:
|
||||
- name: Create repo file
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
[ansible-automation-platform-2.5]
|
||||
name=Ansible Automation Platform 2.5
|
||||
metadata_expire=-1
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
baseurl=http://utility.lab.example.com/ansible-automation-platform/2.5
|
||||
gpgkey=http://utility.lab.example.com/rhel9/RPM-GPG-KEY-redhat-release
|
||||
|
||||
[BaseOS]
|
||||
name=BaseOS Packages Red Hat Enterprise Linux 9
|
||||
metadata_expire=-1
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
baseurl=http://utility.lab.example.com/rhel9/BaseOS
|
||||
gpgkey=http://utility.lab.example.com/rhel9/RPM-GPG-KEY-redhat-release
|
||||
|
||||
[AppStream]
|
||||
name=AppStream Packages Red Hat Enterprise Linux 9
|
||||
metadata_expire=-1
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
baseurl=http://utility.lab.example.com/rhel9/AppStream/
|
||||
gpgkey=http://utility.lab.example.com/rhel9/rpm-gpg/RPM-GPG-KEY-redhat-release
|
||||
dest: /etc/yum.repos.d/rhce.repo
|
||||
|
||||
- name: Install required packages
|
||||
ansible.builtin.dnf:
|
||||
name:
|
||||
- podman
|
||||
- ansible-core
|
||||
- ansible-navigator
|
||||
state: present
|
||||
|
||||
- name: Create directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0700"
|
||||
owner: ansible
|
||||
group: ansible
|
||||
loop:
|
||||
- /home/ansible/.config
|
||||
- /home/ansible/.config/containers
|
||||
- /home/ansible/ansible
|
||||
- /home/ansible/ansible/roles
|
||||
- /home/ansible/ansible/mycollections
|
||||
|
||||
- name: Configure insecure registry
|
||||
ansible.builtin.copy:
|
||||
dest: /home/ansible/.config/containers/registries.conf
|
||||
mode: "0600"
|
||||
content: |
|
||||
[[registry]]
|
||||
location = "{{ registry_host }}:{{ registry_port }}"
|
||||
insecure = true
|
||||
owner: ansible
|
||||
group: ansible
|
||||
|
||||
- name: Configure ansible.cfg
|
||||
ansible.builtin.copy:
|
||||
dest: /home/ansible/ansible/ansible.cfg
|
||||
content: |
|
||||
[defaults]
|
||||
inventory = /home/ansible/ansible/inventory
|
||||
remote_user = ansible
|
||||
roles_path = /home/ansible/ansible/roles
|
||||
collections_path = /home/ansible/ansible/mycollections
|
||||
|
||||
- name: Configure ansible-navigator.yml
|
||||
ansible.builtin.copy:
|
||||
dest: /home/ansible/ansible/ansible-navigator.yml
|
||||
content: |
|
||||
---
|
||||
ansible-navigator:
|
||||
execution-environment:
|
||||
image: utility.lab.example.com:5000/ee-supported-rhel9:latest
|
||||
pull:
|
||||
policy: missing
|
||||
playbook-artifact:
|
||||
enable: false
|
||||
|
||||
- name: Create test.yml
|
||||
ansible.builtin.copy:
|
||||
dest: /home/ansible/ansible/test.yml
|
||||
content: |
|
||||
---
|
||||
- name: A simple playbook to test that Ansible is configured
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Run test playbook
|
||||
ansible.builtin.debug:
|
||||
msg: "If you're reading this, Ansible is configured on your system."
|
||||
Reference in New Issue
Block a user