diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt
index 8ee71b697e..c7b548e78c 100644
--- a/docs/dictionary/en-custom.txt
+++ b/docs/dictionary/en-custom.txt
@@ -458,6 +458,7 @@ svg
svgrepo
svm
systemd
+tayga
tcib
tdciagigtlesa
tempestconf
diff --git a/roles/config_drive/templates/network-config.j2 b/roles/config_drive/templates/network-config.j2
index 4c96d4445e..59efc6e4ac 100644
--- a/roles/config_drive/templates/network-config.j2
+++ b/roles/config_drive/templates/network-config.j2
@@ -1 +1 @@
-{{ cifmw_config_drive_networkconfig | to_nice_yaml(indent=2, default_style="\"") }}
+{{ cifmw_config_drive_networkconfig | to_nice_yaml(indent=2) }}
diff --git a/roles/nat64_appliance/README.md b/roles/nat64_appliance/README.md
index 56738ee679..0599477e48 100644
--- a/roles/nat64_appliance/README.md
+++ b/roles/nat64_appliance/README.md
@@ -1,12 +1,31 @@
# nat64_appliance
-`diskimage-builder` definition and element to build a NAT64 + DNS64 appliance VM image.
+`main.yml`: Tasks to build a NAT64 + DNS64 appliance VM image, uses `diskimage-builder`.
+`deploy.yml`: Tasks to deploy networks and appliance VM on a libvirt hypervisor.
+`cleanup.yml`: Tasks to destroy and undefine the VM and networks on a libvirt hypervisor, and delete the built image.
## Parameters
* `cifmw_nat64_appliance_basedir`: (String) Base directory. Defaults to `{{ cifmw_basedir }}` which defaults to `~/ci-framework-data`.
* `cifmw_nat64_appliance_workdir`: (String) Working directory. Defaults to `{{ cifmw_nat64_appliance_basedir }}/nat64_appliance`.
* `cifmw_nat64_appliance_venv_dir`: (String) Python virtual environment directory. Defaults to `{{ cifmw_nat64_appliance_workdir }}/venv`.
+* `cifmw_nat64_libvirt_uri`: (String) The libvirt URI for the hypervisor to deploy on. Defaults to `qemu:///system`.
+* `cifmw_nat64_network_ipv4_name`: (String) Name of the nat64 IPv4 libvirt network. Defaults to: `nat64-net-v4`.
+* `cifmw_nat64_network_ipv4_bridge_name`: (String) Bridge name for the nat64 IPv4 libvirt network. Defaults to: `br-64v4`.
+* `cifmw_nat64_network_ipv4_address`: (String) IP address for the nat64 IPv4 libvirt network. Defaults to: `172.31.255.1`.
+* `cifmw_nat64_network_ipv4_prefix`: (Integer) IP prefix length for the nat64 IPv4 libvirt network. Defaults to: `24`.
+* `cifmw_nat64_network_ipv6_name`: (String) Name of the nat64 IPv6 libvirt network. Defaults to: `nat64-net-v6`.
+* `cifmw_nat64_network_ipv6_bridge_name`: (String) The bridge name for the nat64 IPv6 libvirt network. Defaults to: `br-64v6`.
+* `cifmw_nat64_network_ipv6_address`: (String) IP address for the nat64 IPv6 libvirt network. Defaults to: `fd00:abcd:abcd:fc00::1`.
+* `cifmw_nat64_network_ipv6_prefix`: (Integer) IP prefix length for the nat64 IPv6 libvirt network. Defaults to: `64`.
+* `cifmw_nat64_appliance_name`: (String) Name and hostname for the nat64 appliance VM. Defaults to: `nat64-appliance`.
+* `cifmw_nat64_appliance_ipv4_address`: (String) IPv4 address for the nat64 appliance VM. Defaults to: `172.31.255.2`.
+* `cifmw_nat64_appliance_ipv6_address`: (String) IPv6 address for the nat64 appliance VM. Defaults to: `fd00:abcd:abcd:fc00::2`.
+* `cifmw_nat64_appliance_memory`: (Integer) Memory in GiB for the nat64 appliance VM. Defaults to: `2`.
+* `cifmw_nat64_appliance_cpus`: (Integer) Virtual CPUs for the nat64 appliance VM. Defaults to: `2`.
+* `cifmw_nat64_appliance_ssh_pub_keys`: (List) List of SSH public key for the nat64 appliance VM. Defalts to: `[]`.
+* `cifmw_nat64_ipv6_prefix`: (String) IPv6 prefix for nat64. Defaults to: `fd00:abcd:abcd:fc00::/64`.
+* `cifmw_nat64_ipv6_tayga_address`: (String) Tayga IPv6 address. Defaults to: `fd00:abcd:abcd:fc00::3`.
## Building the image
@@ -18,12 +37,34 @@ Include the `nat64_appliance` role in a playbook. For example:
roles:
- nat64_appliance
```
-The built image will be in: `{{ cifmw_basedir }}/artifacts/roles/nat64-appliance/nat64-appliance.qcow2`
+
+The built image will be in: `{{ cifmw_nat64_appliance_workdir }}/nat64-appliance.qcow2`
## Using the nat64-appliance
-- [With Openstack cloud](#with-openstack-cloud){#toc-with-openstack-cloud}
- [With Libvirt](#with-libvirt){#toc-with-libvirt}
+- [With Openstack cloud](#with-openstack-cloud){#toc-with-openstack-cloud}
+
+### With Libvirt
+
+```
+- name: "Build nat64 appliance image"
+ ansible.builtin.include_role:
+ name: nat64_appliance
+- name: "Deploy the nat64 appliance and networks"
+ ansible.builtin.include_role:
+ name: nat64_appliance
+ tasks_from: deploy.yml
+```
+
+To cleanup the libvirt nat64 deployment:
+```
+- name: "Build nat64 appliance image"
+ ansible.builtin.include_role:
+ name: nat64_appliance
+ tasks_from: cleanup.yml
+```
+
### With Openstack cloud
@@ -220,7 +261,3 @@ $ ssh -J fedora@192.168.254.164 fedora@fd00:abcd:aaaa:fc00::2b8
PING sunet.se(fd00:abcd:abcd:fcff::259c:c033 (fd00:abcd:abcd:fcff::259c:c033)) 56 data bytes
64 bytes from fd00:abcd:abcd:fcff::259c:c033 (fd00:abcd:abcd:fcff::259c:c033): icmp_seq=1 ttl=53 time=4.91 ms
```
-
-### With Libvirt
-
-TODO
diff --git a/roles/nat64_appliance/defaults/main.yml b/roles/nat64_appliance/defaults/main.yml
index 98b58e14f0..610f8ff9a9 100644
--- a/roles/nat64_appliance/defaults/main.yml
+++ b/roles/nat64_appliance/defaults/main.yml
@@ -20,3 +20,25 @@ cifmw_nat64_appliance_basedir: >-
}}
cifmw_nat64_appliance_workdir: "{{ cifmw_nat64_appliance_basedir }}/nat64_appliance"
cifmw_nat64_appliance_venv_dir: "{{ cifmw_nat64_appliance_workdir }}/venv"
+
+cifmw_nat64_libvirt_uri: "qemu:///system"
+cifmw_nat64_network_ipv4_name: nat64-net-v4
+cifmw_nat64_network_ipv4_bridge_name: br-64v4
+cifmw_nat64_network_ipv4_address: 172.31.255.1
+cifmw_nat64_network_ipv4_prefix: 24
+
+cifmw_nat64_network_ipv6_name: nat64-net-v6
+cifmw_nat64_network_ipv6_bridge_name: br-64v6
+cifmw_nat64_network_ipv6_address: fd00:abcd:abcd:fc00::1
+cifmw_nat64_network_ipv6_prefix: 64
+cifmw_nat64_appliance_name: nat64-appliance
+cifmw_nat64_appliance_ipv4_address: 172.31.255.2
+cifmw_nat64_appliance_ipv6_address: fd00:abcd:abcd:fc00::2
+
+cifmw_nat64_appliance_memory: 2
+cifmw_nat64_appliance_cpus: 2
+cifmw_nat64_appliance_ssh_pub_keys: []
+
+
+cifmw_nat64_ipv6_prefix: "fd00:abcd:abcd:fc00::/64"
+cifmw_nat64_ipv6_tayga_address: "fd00:abcd:abcd:fc00::3"
diff --git a/roles/nat64_appliance/files/nat64-appliance.yaml b/roles/nat64_appliance/files/nat64-appliance.yaml
index 62f3e5e7ea..76163ff4a0 100644
--- a/roles/nat64_appliance/files/nat64-appliance.yaml
+++ b/roles/nat64_appliance/files/nat64-appliance.yaml
@@ -8,12 +8,12 @@
- block-device-efi
- package-installs
- nat64-router
+ - reset-bls-entries # Requires edpm-image-builder elements.
environment:
DIB_RELEASE: '9-stream'
DIB_PYTHON_VERSION: '3'
DIB_IMAGE_SIZE: '2'
COMPRESS_IMAGE: '1'
- TMP_DIR: '/var/tmp'
DIB_BLOCK_DEVICE_CONFIG: |
- local_loop:
name: image0
@@ -34,6 +34,16 @@
- name: BSP
type: 'EF02'
size: 8MiB
+ - name: boot
+ type: '8300'
+ size: 512MiB
+ mkfs:
+ type: xfs
+ mount:
+ mount_point: /boot
+ fstab:
+ options: "defaults"
+ fsck-passno: 1
- name: root
type: '8300'
size: 100%
diff --git a/roles/nat64_appliance/molecule/default/cleanup.yml b/roles/nat64_appliance/molecule/default/cleanup.yml
new file mode 100644
index 0000000000..395b8b1fda
--- /dev/null
+++ b/roles/nat64_appliance/molecule/default/cleanup.yml
@@ -0,0 +1,38 @@
+- name: Cleanup
+ hosts: instance
+ vars:
+ ansible_user_dir: "{{ lookup('env', 'HOME') }}"
+ tasks:
+ - name: "Destroy the test-node"
+ community.libvirt.virt:
+ command: destroy
+ name: test-node
+ uri: 'qemu:///system'
+
+ - name: "Undefine the test-node"
+ community.libvirt.virt:
+ command: undefine
+ name: test-node
+ force: true
+ uri: 'qemu:///system'
+
+ - name: Destroy the test network
+ register: net_destroy
+ community.libvirt.virt_net:
+ command: destroy
+ name: br-mol
+ uri: 'qemu:///system'
+ failed_when:
+ - net_destroy.rc is defined
+ - net_destroy.rc > 1
+
+ - name: Undefine the test network
+ community.libvirt.virt_net:
+ command: undefine
+ name: br-mol
+ uri: 'qemu:///system'
+
+ - name: "Cleanup the nat64 appliance and networks"
+ ansible.builtin.include_role:
+ name: nat64_appliance
+ tasks_from: cleanup.yml
diff --git a/roles/nat64_appliance/molecule/default/converge.yml b/roles/nat64_appliance/molecule/default/converge.yml
index 321ee0c8d1..07eb6be415 100644
--- a/roles/nat64_appliance/molecule/default/converge.yml
+++ b/roles/nat64_appliance/molecule/default/converge.yml
@@ -15,10 +15,273 @@
# under the License.
- name: Converge
- hosts: all
+ hosts: instance
+ vars:
+ ansible_user_dir: "{{ lookup('env', 'HOME') }}"
+ cifmw_basedir: "/opt/basedir"
tasks:
+ - name: Crate SSH keypair
+ register: _test_key
+ community.crypto.openssh_keypair:
+ comment: "test-key"
+ path: "{{ (ansible_user_dir, '.ssh/id_test') | path_join }}"
+ type: "ecdsa"
+
+ - name: Discover latest image
+ when:
+ - cifmw_discovered_image_url is not defined
+ ansible.builtin.include_role:
+ name: discover_latest_image
+
+ - name: Download latest image
+ ansible.builtin.get_url:
+ url: "{{ cifmw_discovered_image_url }}"
+ dest: "{{ cifmw_basedir }}"
+ timeout: 20
+ register: result
+ until: result is success
+ retries: 60
+ delay: 10
+
- name: Build nat64 appliance image
vars:
- extra_args: "--dry-run"
+ # TODO(hjensas): Running as root should not be required here.
+ # But the CI job fails with permission issue unless using root.
+ # This permission error is only seen in CI and when using the
+ # ci-framework reproducer.
+ cifmw_nat64_appliance_run_dib_as_root: true
ansible.builtin.include_role:
name: nat64_appliance
+
+ - name: Fix permissions on nat64_appliance dir - because we ran dib as root
+ become: true
+ ansible.builtin.file:
+ path: "{{ cifmw_basedir }}/nat64_appliance"
+ state: directory
+ recurse: true
+ owner: "{{ ansible_user_id }}"
+ group: "{{ ansible_user_gid }}"
+
+ - name: "Deploy the nat64 appliance and networks"
+ vars:
+ cifmw_nat64_appliance_ssh_pub_keys:
+ - "{{ _test_key.public_key }}"
+ ansible.builtin.include_role:
+ name: nat64_appliance
+ tasks_from: deploy.yml
+
+ - name: Set MAC address facts
+ ansible.builtin.set_fact:
+ test_node_mac_address: "{{ '52:54:00' | community.general.random_mac }}"
+
+ - name: Define a IPv6 network for test node
+ community.libvirt.virt_net:
+ command: define
+ name: br-mol
+ xml: |-
+
+ br-mol
+
+
+
+
+
+
+
+ uri: 'qemu:///system'
+
+ - name: Create a IPv6 network for test node
+ community.libvirt.virt_net:
+ command: create
+ name: br-mol
+ uri: 'qemu:///system'
+
+ - name: Ensure the IPv6 network for test node is active
+ community.libvirt.virt_net:
+ state: active
+ name: br-mol
+ uri: 'qemu:///system'
+
+ - name: Ensure the IPv6 network for test node is enabled to autostart
+ community.libvirt.virt_net:
+ autostart: true
+ name: br-mol
+ uri: 'qemu:///system'
+
+ - name: Generate test node UUID
+ ansible.builtin.set_fact:
+ test_node_uuid: "{{ 99999999 | random | to_uuid | lower }}"
+
+ - name: Make an a copy of the discovered/downloaded image
+ ansible.builtin.copy:
+ src: "{{ cifmw_basedir }}/{{ cifmw_discovered_image_name }}"
+ dest: "{{ cifmw_basedir }}/{{ test_node_uuid }}.qcow2"
+ owner: "{{ ansible_user_id }}"
+ group: "{{ ansible_user_gid }}"
+ mode: '0644'
+
+ - name: Create the config-drive ISO for the test node
+ vars:
+ cifmw_config_drive_iso_image: "{{ cifmw_basedir }}/{{ test_node_uuid }}.iso"
+ cifmw_config_drive_uuid: "{{ test_node_uuid }}"
+ cifmw_config_drive_name: mol-test-node
+ cifmw_config_drive_hostname: mol-test-node
+ cifmw_config_drive_userdata:
+ ssh_authorized_keys:
+ - "{{ _test_key.public_key }}"
+ cifmw_config_drive_networkconfig:
+ network:
+ version: 2
+ ethernets:
+ id0:
+ match:
+ macaddress: "{{ test_node_mac_address }}"
+ addresses:
+ - 'fd00:abcd:aaaa::101/64'
+ routes:
+ - to: '::/0'
+ via: 'fd00:abcd:aaaa::1'
+ on-link: true
+ nameservers:
+ addresses:
+ - 'fd00:abcd:aaaa::1'
+ ansible.builtin.include_role:
+ name: config_drive
+
+ - name: Define test-node VM
+ community.libvirt.virt:
+ command: define
+ xml: |
+
+ test-node
+ {{ test_node_uuid }}
+ 1
+ 2
+
+ hvm
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /usr/libexec/qemu-kvm
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /dev/urandom
+
+
+
+
+
+ destroy
+ restart
+ destroy
+
+
+
+
+
+ uri: 'qemu:///system'
+
+ - name: Start test-node VM
+ community.libvirt.virt:
+ state: running
+ name: test-node
+ uri: 'qemu:///system'
+
+ - name: Wait for test node to be reachable via ssh
+ ansible.builtin.wait_for:
+ host: 'fd00:abcd:aaaa::101'
+ port: 22
+ state: present
+ delay: 10
+
+ - name: Add test node to inventory
+ ansible.builtin.add_host:
+ name: test-node
+ groups:
+ - test_nodes
+ ansible_host: 'fd00:abcd:aaaa::101'
+ ansible_ssh_user: 'cloud-user'
+ ansible_ssh_private_key_file: "{{ _test_key.filename }}"
+ ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
+
+ # Even tough the node respond on port 22, let some time to ensure
+ # ssh access for users is available.
+ - name: Wait a little to let the test instance boot.
+ ansible.builtin.pause:
+ seconds: 20
+
+ - name: Ping example.com (delegate to test-node)
+ delegate_to: test-node
+ register: _ping_example_com
+ ansible.builtin.command: "ping -c 2 example.com"
+
+ - name: Debug the ping example.com result
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop:
+ - "{{ _ping_example_com.rc }}"
+ - "{{ _ping_example_com.stdout_lines }}"
+ - "{{ _ping_example_com.stderr_lines }}"
diff --git a/roles/nat64_appliance/molecule/default/prepare.yml b/roles/nat64_appliance/molecule/default/prepare.yml
index d3594acc41..3c69c9256f 100644
--- a/roles/nat64_appliance/molecule/default/prepare.yml
+++ b/roles/nat64_appliance/molecule/default/prepare.yml
@@ -14,8 +14,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-
- name: Prepare
hosts: all
+ vars:
+ cifmw_basedir: "/opt/basedir"
+ pre_tasks:
+ - name: Create custom basedir
+ become: true
+ ansible.builtin.file:
+ path: "{{ cifmw_basedir }}"
+ state: directory
+ owner: zuul
+ group: zuul
+ mode: "0755"
roles:
- role: test_deps
+ - role: libvirt_manager
diff --git a/roles/nat64_appliance/tasks/cleanup.yml b/roles/nat64_appliance/tasks/cleanup.yml
index 6461d1f587..0e1c2f6680 100644
--- a/roles/nat64_appliance/tasks/cleanup.yml
+++ b/roles/nat64_appliance/tasks/cleanup.yml
@@ -18,3 +18,38 @@
ansible.builtin.file:
state: absent
path: "{{ cifmw_nat64_appliance_workdir }}/nat64-appliance.qcow2"
+
+- name: Stop the nat64_appliance VM
+ community.libvirt.virt:
+ command: destroy
+ name: "{{ cifmw_nat64_appliance_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Undefine the nat64_appliance VM
+ community.libvirt.virt:
+ command: undefine
+ name: "{{ cifmw_nat64_appliance_name }}"
+ force: true
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Destroy the nat64 networks
+ register: net_destroy
+ community.libvirt.virt_net:
+ command: destroy
+ name: "{{ item }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+ loop:
+ - "{{ cifmw_nat64_network_ipv4_name }}"
+ - "{{ cifmw_nat64_network_ipv6_name }}"
+ failed_when:
+ - net_destroy.rc is defined
+ - net_destroy.rc > 1
+
+- name: Undefine the nat64 networks
+ community.libvirt.virt_net:
+ command: undefine
+ name: "{{ item }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+ loop:
+ - "{{ cifmw_nat64_network_ipv4_name }}"
+ - "{{ cifmw_nat64_network_ipv6_name }}"
diff --git a/roles/nat64_appliance/tasks/deploy.yml b/roles/nat64_appliance/tasks/deploy.yml
new file mode 100644
index 0000000000..4a440f84ce
--- /dev/null
+++ b/roles/nat64_appliance/tasks/deploy.yml
@@ -0,0 +1,111 @@
+---
+- name: Set MAC address facts
+ ansible.builtin.set_fact:
+ cifmw_nat64_appliance_ipv4_mac_address: "{{ '52:54:00' | community.general.random_mac }}"
+ cifmw_nat64_appliance_ipv6_mac_address: "{{ '52:54:00' | community.general.random_mac }}"
+
+- name: Create the IPv4 libvirt network for nat64
+ community.libvirt.virt_net:
+ command: define
+ name: "{{ cifmw_nat64_network_ipv4_name }}"
+ xml: "{{ lookup('template', 'ipv4_network.xml.j2') }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Ensure the IPv4 libvirt network for nat64 is created/started
+ community.libvirt.virt_net:
+ command: create
+ name: "{{ cifmw_nat64_network_ipv4_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Ensure the IPv4 libvirt network for nat64 is active
+ community.libvirt.virt_net:
+ state: active
+ name: "{{ cifmw_nat64_network_ipv4_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Ensure the IPv4 libvirt network for nat64 is enabled to autostart
+ community.libvirt.virt_net:
+ autostart: true
+ name: "{{ cifmw_nat64_network_ipv4_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Create the IPv6 libvirt network for nat64
+ community.libvirt.virt_net:
+ command: define
+ name: "{{ cifmw_nat64_network_ipv6_name }}"
+ xml: "{{ lookup('template', 'ipv6_network.xml.j2') }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Ensure the IPv6 libvirt network for nat64 is created/started
+ community.libvirt.virt_net:
+ command: create
+ name: "{{ cifmw_nat64_network_ipv6_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Ensure the IPv6 libvirt network for nat64 network is active
+ community.libvirt.virt_net:
+ state: active
+ name: "{{ cifmw_nat64_network_ipv6_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: Ensure the IPv6 libvirt network for nat64 is enabled to autostart
+ community.libvirt.virt_net:
+ autostart: true
+ name: "{{ cifmw_nat64_network_ipv6_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: "Generate nat64-appliance UUID"
+ ansible.builtin.set_fact:
+ nat64_appliance_uuid: "{{ 99999999 | random | to_uuid | lower }}"
+
+- name: "Create the config-drive ISO for the nat64-appliance"
+ vars:
+ cifmw_config_drive_iso_image: "{{ cifmw_nat64_appliance_workdir }}/{{ nat64_appliance_uuid }}.iso"
+ cifmw_config_drive_uuid: "{{ nat64_appliance_uuid }}"
+ cifmw_config_drive_name: "{{ cifmw_nat64_appliance_name }}"
+ cifmw_config_drive_hostname: "{{ cifmw_nat64_appliance_name }}"
+ cifmw_config_drive_userdata:
+ ssh_authorized_keys: "{{ cifmw_nat64_appliance_ssh_pub_keys }}"
+ write_files:
+ - path: "/etc/nat64/config-data"
+ owner: "root:root"
+ content: "{{ lookup('template', 'config-data.j2') }}"
+ cifmw_config_drive_networkconfig:
+ network:
+ version: 2
+ ethernets:
+ id0:
+ match:
+ macaddress: "{{ cifmw_nat64_appliance_ipv4_mac_address }}"
+ addresses:
+ - "{{ cifmw_nat64_appliance_ipv4_address }}/{{ cifmw_nat64_network_ipv4_prefix }}"
+ routes:
+ - to: '0.0.0.0/0'
+ via: "{{ cifmw_nat64_network_ipv4_address }}"
+ on-link: true
+ nameservers:
+ addresses:
+ - "{{ cifmw_nat64_network_ipv4_address }}"
+ id1:
+ match:
+ macaddress: "{{ cifmw_nat64_appliance_ipv6_mac_address }}"
+ addresses:
+ - "{{ cifmw_nat64_appliance_ipv6_address }}/{{ cifmw_nat64_network_ipv6_prefix }}"
+ routes:
+ - to: '::/0'
+ via: "{{ cifmw_nat64_network_ipv6_address }}"
+ on-link: true
+ ansible.builtin.include_role:
+ name: config_drive
+
+- name: "Define nat64-appliance VM"
+ community.libvirt.virt:
+ command: define
+ xml: "{{ lookup('template', 'domain.xml.j2') }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
+
+- name: "Start nat64-appliance VM"
+ community.libvirt.virt:
+ state: running
+ name: "{{ cifmw_nat64_appliance_name }}"
+ uri: "{{ cifmw_nat64_libvirt_uri }}"
diff --git a/roles/nat64_appliance/tasks/main.yml b/roles/nat64_appliance/tasks/main.yml
index 29fc8f5e78..2a9aa941ce 100644
--- a/roles/nat64_appliance/tasks/main.yml
+++ b/roles/nat64_appliance/tasks/main.yml
@@ -25,11 +25,10 @@
- name: Ensure working directory exists
ansible.builtin.file:
- path: "{{ cifmw_nat64_appliance_workdir }}"
+ path: "{{ cifmw_nat64_appliance_workdir }}/tmp"
state: directory
mode: "0755"
-
- name: Install required RPM packages
tags:
- packages
@@ -61,12 +60,21 @@
- "elements/"
- nat64-appliance.yaml
+- name: Clone edpm-image-builder (reset-bls-entries dib element)
+ ansible.builtin.git:
+ repo: https://github.com/openstack-k8s-operators/edpm-image-builder.git
+ dest: "{{ cifmw_nat64_appliance_workdir }}/edpm-image-builder"
+ version: main
+
- name: Build the nat64-appliance image using DIB
+ become: "{{ cifmw_nat64_appliance_run_dib_as_root | default(false) | bool }}"
environment:
- ELEMENTS_PATH: "{{ cifmw_nat64_appliance_workdir }}/elements"
+ ELEMENTS_PATH: "{{ cifmw_nat64_appliance_workdir }}/elements:{{ cifmw_nat64_appliance_workdir }}/edpm-image-builder/dib/"
DIB_IMAGE_CACHE: "{{ cifmw_nat64_appliance_workdir }}/cache"
+ DIB_DEBUG_TRACE: '1'
cifmw.general.ci_script:
chdir: "{{ cifmw_nat64_appliance_workdir }}"
output_dir: "{{ cifmw_nat64_appliance_basedir }}/artifacts"
creates: "{{ cifmw_nat64_appliance_workdir }}/nat64-appliance.qcow2"
script: "{{ cifmw_nat64_appliance_venv_dir }}/bin/diskimage-builder nat64-appliance.yaml {{ extra_args | default('') }}"
+ executable: "/bin/bash"
diff --git a/roles/nat64_appliance/templates/config-data.j2 b/roles/nat64_appliance/templates/config-data.j2
new file mode 100644
index 0000000000..65ffacb5fc
--- /dev/null
+++ b/roles/nat64_appliance/templates/config-data.j2
@@ -0,0 +1,8 @@
+# The IPv6 ip subnet, for example: fd00:abcd:abcd:fc00::/64
+NAT64_IPV6_PREFIX={{ cifmw_nat64_ipv6_prefix }}
+
+# The IPv6 host address, for example: fd00:abcd:abcd:fc00::2
+NAT64_HOST_IPV6={{ cifmw_nat64_appliance_ipv6_address }}
+
+# The IPv6 address used for the tayga tun interface, for example: fd00:abcd:abcd:fc00::3
+NAT64_TAYGA_IPV6={{ cifmw_nat64_ipv6_tayga_address }}
diff --git a/roles/nat64_appliance/templates/domain.xml.j2 b/roles/nat64_appliance/templates/domain.xml.j2
new file mode 100644
index 0000000000..12bf2b3cff
--- /dev/null
+++ b/roles/nat64_appliance/templates/domain.xml.j2
@@ -0,0 +1,123 @@
+
+ {{ cifmw_nat64_appliance_name }}
+ {{ nat64_appliance_uuid }}
+ {{ cifmw_nat64_appliance_memory }}
+ {{ cifmw_nat64_appliance_cpus }}
+
+ hvm
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /usr/libexec/qemu-kvm
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /dev/urandom
+
+
+
+
+
+ destroy
+ restart
+ destroy
+
+
+
+
+
diff --git a/roles/nat64_appliance/templates/ipv4_network.xml.j2 b/roles/nat64_appliance/templates/ipv4_network.xml.j2
new file mode 100644
index 0000000000..30e78ee269
--- /dev/null
+++ b/roles/nat64_appliance/templates/ipv4_network.xml.j2
@@ -0,0 +1,16 @@
+
+ {{ cifmw_nat64_network_ipv4_name }}
+
+
+
+
diff --git a/roles/nat64_appliance/templates/ipv6_network.xml.j2 b/roles/nat64_appliance/templates/ipv6_network.xml.j2
new file mode 100644
index 0000000000..e063f153be
--- /dev/null
+++ b/roles/nat64_appliance/templates/ipv6_network.xml.j2
@@ -0,0 +1,25 @@
+
+ {{ cifmw_nat64_network_ipv6_name }}
+
+
+
+
+
+