diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 8ee71b697e..c7b548e78c 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -458,6 +458,7 @@ svg svgrepo svm systemd +tayga tcib tdciagigtlesa tempestconf diff --git a/roles/config_drive/templates/network-config.j2 b/roles/config_drive/templates/network-config.j2 index 4c96d4445e..59efc6e4ac 100644 --- a/roles/config_drive/templates/network-config.j2 +++ b/roles/config_drive/templates/network-config.j2 @@ -1 +1 @@ -{{ cifmw_config_drive_networkconfig | to_nice_yaml(indent=2, default_style="\"") }} +{{ cifmw_config_drive_networkconfig | to_nice_yaml(indent=2) }} diff --git a/roles/nat64_appliance/README.md b/roles/nat64_appliance/README.md index 56738ee679..0599477e48 100644 --- a/roles/nat64_appliance/README.md +++ b/roles/nat64_appliance/README.md @@ -1,12 +1,31 @@ # nat64_appliance -`diskimage-builder` definition and element to build a NAT64 + DNS64 appliance VM image. +`main.yml`: Tasks to build a NAT64 + DNS64 appliance VM image, uses `diskimage-builder`. +`deploy.yml`: Tasks to deploy networks and appliance VM on a libvirt hypervisor. +`cleanup.yml`: Tasks to destroy and undefine the VM and networks on a libvirt hypervisor, and delete the built image. ## Parameters * `cifmw_nat64_appliance_basedir`: (String) Base directory. Defaults to `{{ cifmw_basedir }}` which defaults to `~/ci-framework-data`. * `cifmw_nat64_appliance_workdir`: (String) Working directory. Defaults to `{{ cifmw_nat64_appliance_basedir }}/nat64_appliance`. * `cifmw_nat64_appliance_venv_dir`: (String) Python virtual environment directory. Defaults to `{{ cifmw_nat64_appliance_workdir }}/venv`. +* `cifmw_nat64_libvirt_uri`: (String) The libvirt URI for the hypervisor to deploy on. Defaults to `qemu:///system`. +* `cifmw_nat64_network_ipv4_name`: (String) Name of the nat64 IPv4 libvirt network. Defaults to: `nat64-net-v4`. +* `cifmw_nat64_network_ipv4_bridge_name`: (String) Bridge name for the nat64 IPv4 libvirt network. Defaults to: `br-64v4`. +* `cifmw_nat64_network_ipv4_address`: (String) IP address for the nat64 IPv4 libvirt network. Defaults to: `172.31.255.1`. +* `cifmw_nat64_network_ipv4_prefix`: (Integer) IP prefix length for the nat64 IPv4 libvirt network. Defaults to: `24`. +* `cifmw_nat64_network_ipv6_name`: (String) Name of the nat64 IPv6 libvirt network. Defaults to: `nat64-net-v6`. +* `cifmw_nat64_network_ipv6_bridge_name`: (String) The bridge name for the nat64 IPv6 libvirt network. Defaults to: `br-64v6`. +* `cifmw_nat64_network_ipv6_address`: (String) IP address for the nat64 IPv6 libvirt network. Defaults to: `fd00:abcd:abcd:fc00::1`. +* `cifmw_nat64_network_ipv6_prefix`: (Integer) IP prefix length for the nat64 IPv6 libvirt network. Defaults to: `64`. +* `cifmw_nat64_appliance_name`: (String) Name and hostname for the nat64 appliance VM. Defaults to: `nat64-appliance`. +* `cifmw_nat64_appliance_ipv4_address`: (String) IPv4 address for the nat64 appliance VM. Defaults to: `172.31.255.2`. +* `cifmw_nat64_appliance_ipv6_address`: (String) IPv6 address for the nat64 appliance VM. Defaults to: `fd00:abcd:abcd:fc00::2`. +* `cifmw_nat64_appliance_memory`: (Integer) Memory in GiB for the nat64 appliance VM. Defaults to: `2`. +* `cifmw_nat64_appliance_cpus`: (Integer) Virtual CPUs for the nat64 appliance VM. Defaults to: `2`. +* `cifmw_nat64_appliance_ssh_pub_keys`: (List) List of SSH public key for the nat64 appliance VM. Defalts to: `[]`. +* `cifmw_nat64_ipv6_prefix`: (String) IPv6 prefix for nat64. Defaults to: `fd00:abcd:abcd:fc00::/64`. +* `cifmw_nat64_ipv6_tayga_address`: (String) Tayga IPv6 address. Defaults to: `fd00:abcd:abcd:fc00::3`. ## Building the image @@ -18,12 +37,34 @@ Include the `nat64_appliance` role in a playbook. For example: roles: - nat64_appliance ``` -The built image will be in: `{{ cifmw_basedir }}/artifacts/roles/nat64-appliance/nat64-appliance.qcow2` + +The built image will be in: `{{ cifmw_nat64_appliance_workdir }}/nat64-appliance.qcow2` ## Using the nat64-appliance -- [With Openstack cloud](#with-openstack-cloud){#toc-with-openstack-cloud} - [With Libvirt](#with-libvirt){#toc-with-libvirt} +- [With Openstack cloud](#with-openstack-cloud){#toc-with-openstack-cloud} + +### With Libvirt + +``` +- name: "Build nat64 appliance image" + ansible.builtin.include_role: + name: nat64_appliance +- name: "Deploy the nat64 appliance and networks" + ansible.builtin.include_role: + name: nat64_appliance + tasks_from: deploy.yml +``` + +To cleanup the libvirt nat64 deployment: +``` +- name: "Build nat64 appliance image" + ansible.builtin.include_role: + name: nat64_appliance + tasks_from: cleanup.yml +``` + ### With Openstack cloud @@ -220,7 +261,3 @@ $ ssh -J fedora@192.168.254.164 fedora@fd00:abcd:aaaa:fc00::2b8 PING sunet.se(fd00:abcd:abcd:fcff::259c:c033 (fd00:abcd:abcd:fcff::259c:c033)) 56 data bytes 64 bytes from fd00:abcd:abcd:fcff::259c:c033 (fd00:abcd:abcd:fcff::259c:c033): icmp_seq=1 ttl=53 time=4.91 ms ``` - -### With Libvirt - -TODO diff --git a/roles/nat64_appliance/defaults/main.yml b/roles/nat64_appliance/defaults/main.yml index 98b58e14f0..610f8ff9a9 100644 --- a/roles/nat64_appliance/defaults/main.yml +++ b/roles/nat64_appliance/defaults/main.yml @@ -20,3 +20,25 @@ cifmw_nat64_appliance_basedir: >- }} cifmw_nat64_appliance_workdir: "{{ cifmw_nat64_appliance_basedir }}/nat64_appliance" cifmw_nat64_appliance_venv_dir: "{{ cifmw_nat64_appliance_workdir }}/venv" + +cifmw_nat64_libvirt_uri: "qemu:///system" +cifmw_nat64_network_ipv4_name: nat64-net-v4 +cifmw_nat64_network_ipv4_bridge_name: br-64v4 +cifmw_nat64_network_ipv4_address: 172.31.255.1 +cifmw_nat64_network_ipv4_prefix: 24 + +cifmw_nat64_network_ipv6_name: nat64-net-v6 +cifmw_nat64_network_ipv6_bridge_name: br-64v6 +cifmw_nat64_network_ipv6_address: fd00:abcd:abcd:fc00::1 +cifmw_nat64_network_ipv6_prefix: 64 +cifmw_nat64_appliance_name: nat64-appliance +cifmw_nat64_appliance_ipv4_address: 172.31.255.2 +cifmw_nat64_appliance_ipv6_address: fd00:abcd:abcd:fc00::2 + +cifmw_nat64_appliance_memory: 2 +cifmw_nat64_appliance_cpus: 2 +cifmw_nat64_appliance_ssh_pub_keys: [] + + +cifmw_nat64_ipv6_prefix: "fd00:abcd:abcd:fc00::/64" +cifmw_nat64_ipv6_tayga_address: "fd00:abcd:abcd:fc00::3" diff --git a/roles/nat64_appliance/files/nat64-appliance.yaml b/roles/nat64_appliance/files/nat64-appliance.yaml index 62f3e5e7ea..76163ff4a0 100644 --- a/roles/nat64_appliance/files/nat64-appliance.yaml +++ b/roles/nat64_appliance/files/nat64-appliance.yaml @@ -8,12 +8,12 @@ - block-device-efi - package-installs - nat64-router + - reset-bls-entries # Requires edpm-image-builder elements. environment: DIB_RELEASE: '9-stream' DIB_PYTHON_VERSION: '3' DIB_IMAGE_SIZE: '2' COMPRESS_IMAGE: '1' - TMP_DIR: '/var/tmp' DIB_BLOCK_DEVICE_CONFIG: | - local_loop: name: image0 @@ -34,6 +34,16 @@ - name: BSP type: 'EF02' size: 8MiB + - name: boot + type: '8300' + size: 512MiB + mkfs: + type: xfs + mount: + mount_point: /boot + fstab: + options: "defaults" + fsck-passno: 1 - name: root type: '8300' size: 100% diff --git a/roles/nat64_appliance/molecule/default/cleanup.yml b/roles/nat64_appliance/molecule/default/cleanup.yml new file mode 100644 index 0000000000..395b8b1fda --- /dev/null +++ b/roles/nat64_appliance/molecule/default/cleanup.yml @@ -0,0 +1,38 @@ +- name: Cleanup + hosts: instance + vars: + ansible_user_dir: "{{ lookup('env', 'HOME') }}" + tasks: + - name: "Destroy the test-node" + community.libvirt.virt: + command: destroy + name: test-node + uri: 'qemu:///system' + + - name: "Undefine the test-node" + community.libvirt.virt: + command: undefine + name: test-node + force: true + uri: 'qemu:///system' + + - name: Destroy the test network + register: net_destroy + community.libvirt.virt_net: + command: destroy + name: br-mol + uri: 'qemu:///system' + failed_when: + - net_destroy.rc is defined + - net_destroy.rc > 1 + + - name: Undefine the test network + community.libvirt.virt_net: + command: undefine + name: br-mol + uri: 'qemu:///system' + + - name: "Cleanup the nat64 appliance and networks" + ansible.builtin.include_role: + name: nat64_appliance + tasks_from: cleanup.yml diff --git a/roles/nat64_appliance/molecule/default/converge.yml b/roles/nat64_appliance/molecule/default/converge.yml index 321ee0c8d1..07eb6be415 100644 --- a/roles/nat64_appliance/molecule/default/converge.yml +++ b/roles/nat64_appliance/molecule/default/converge.yml @@ -15,10 +15,273 @@ # under the License. - name: Converge - hosts: all + hosts: instance + vars: + ansible_user_dir: "{{ lookup('env', 'HOME') }}" + cifmw_basedir: "/opt/basedir" tasks: + - name: Crate SSH keypair + register: _test_key + community.crypto.openssh_keypair: + comment: "test-key" + path: "{{ (ansible_user_dir, '.ssh/id_test') | path_join }}" + type: "ecdsa" + + - name: Discover latest image + when: + - cifmw_discovered_image_url is not defined + ansible.builtin.include_role: + name: discover_latest_image + + - name: Download latest image + ansible.builtin.get_url: + url: "{{ cifmw_discovered_image_url }}" + dest: "{{ cifmw_basedir }}" + timeout: 20 + register: result + until: result is success + retries: 60 + delay: 10 + - name: Build nat64 appliance image vars: - extra_args: "--dry-run" + # TODO(hjensas): Running as root should not be required here. + # But the CI job fails with permission issue unless using root. + # This permission error is only seen in CI and when using the + # ci-framework reproducer. + cifmw_nat64_appliance_run_dib_as_root: true ansible.builtin.include_role: name: nat64_appliance + + - name: Fix permissions on nat64_appliance dir - because we ran dib as root + become: true + ansible.builtin.file: + path: "{{ cifmw_basedir }}/nat64_appliance" + state: directory + recurse: true + owner: "{{ ansible_user_id }}" + group: "{{ ansible_user_gid }}" + + - name: "Deploy the nat64 appliance and networks" + vars: + cifmw_nat64_appliance_ssh_pub_keys: + - "{{ _test_key.public_key }}" + ansible.builtin.include_role: + name: nat64_appliance + tasks_from: deploy.yml + + - name: Set MAC address facts + ansible.builtin.set_fact: + test_node_mac_address: "{{ '52:54:00' | community.general.random_mac }}" + + - name: Define a IPv6 network for test node + community.libvirt.virt_net: + command: define + name: br-mol + xml: |- + + br-mol + + + + + + + + uri: 'qemu:///system' + + - name: Create a IPv6 network for test node + community.libvirt.virt_net: + command: create + name: br-mol + uri: 'qemu:///system' + + - name: Ensure the IPv6 network for test node is active + community.libvirt.virt_net: + state: active + name: br-mol + uri: 'qemu:///system' + + - name: Ensure the IPv6 network for test node is enabled to autostart + community.libvirt.virt_net: + autostart: true + name: br-mol + uri: 'qemu:///system' + + - name: Generate test node UUID + ansible.builtin.set_fact: + test_node_uuid: "{{ 99999999 | random | to_uuid | lower }}" + + - name: Make an a copy of the discovered/downloaded image + ansible.builtin.copy: + src: "{{ cifmw_basedir }}/{{ cifmw_discovered_image_name }}" + dest: "{{ cifmw_basedir }}/{{ test_node_uuid }}.qcow2" + owner: "{{ ansible_user_id }}" + group: "{{ ansible_user_gid }}" + mode: '0644' + + - name: Create the config-drive ISO for the test node + vars: + cifmw_config_drive_iso_image: "{{ cifmw_basedir }}/{{ test_node_uuid }}.iso" + cifmw_config_drive_uuid: "{{ test_node_uuid }}" + cifmw_config_drive_name: mol-test-node + cifmw_config_drive_hostname: mol-test-node + cifmw_config_drive_userdata: + ssh_authorized_keys: + - "{{ _test_key.public_key }}" + cifmw_config_drive_networkconfig: + network: + version: 2 + ethernets: + id0: + match: + macaddress: "{{ test_node_mac_address }}" + addresses: + - 'fd00:abcd:aaaa::101/64' + routes: + - to: '::/0' + via: 'fd00:abcd:aaaa::1' + on-link: true + nameservers: + addresses: + - 'fd00:abcd:aaaa::1' + ansible.builtin.include_role: + name: config_drive + + - name: Define test-node VM + community.libvirt.virt: + command: define + xml: | + + test-node + {{ test_node_uuid }} + 1 + 2 + + hvm + + + + + + + + + + + + + + + /usr/libexec/qemu-kvm + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + +