Skip to content

Commit 0acc406

Browse files
committed
WIP: tests: add test for bare-metal with ipv6
IPv6 brings some new complexities, particularly around IPAM.
1 parent 80806a8 commit 0acc406

File tree

4 files changed

+363
-27
lines changed

4 files changed

+363
-27
lines changed

.github/workflows/e2e.yml

+26
Original file line numberDiff line numberDiff line change
@@ -38,3 +38,29 @@ jobs:
3838
with:
3939
name: tests-e2e-scenarios-bare-metal
4040
path: /tmp/artifacts/
41+
42+
tests-e2e-scenarios-bare-metal-ipv6:
43+
runs-on: ubuntu-24.04
44+
timeout-minutes: 70
45+
steps:
46+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
47+
with:
48+
path: ${{ env.GOPATH }}/src/k8s.io/kops
49+
50+
- name: Set up go
51+
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed
52+
with:
53+
go-version-file: '${{ env.GOPATH }}/src/k8s.io/kops/go.mod'
54+
55+
- name: tests/e2e/scenarios/bare-metal/run-test
56+
working-directory: ${{ env.GOPATH }}/src/k8s.io/kops
57+
run: |
58+
timeout 60m tests/e2e/scenarios/bare-metal/scenario-ipv6
59+
env:
60+
ARTIFACTS: /tmp/artifacts
61+
- name: Archive production artifacts
62+
if: always()
63+
uses: actions/upload-artifact@v4
64+
with:
65+
name: tests-e2e-scenarios-bare-metal-ipv6
66+
path: /tmp/artifacts/

tests/e2e/scenarios/bare-metal/cleanup

+2
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@ sudo ip link del dev tap-vm0 || true
3838
sudo ip link del dev tap-vm1 || true
3939
sudo ip link del dev tap-vm2 || true
4040

41+
sudo ip link del dev br0 || true
42+
4143
rm -rf .build/vm0
4244
rm -rf .build/vm1
4345
rm -rf .build/vm2

tests/e2e/scenarios/bare-metal/run-test

+37-27
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,16 @@ fi
4646
rm -rf ${WORKDIR}/s3
4747
mkdir -p ${WORKDIR}/s3/
4848

49+
IPV4_PREFIX=10.123.45.
50+
51+
VM0_IP=${IPV4_PREFIX}10
52+
VM1_IP=${IPV4_PREFIX}11
53+
VM2_IP=${IPV4_PREFIX}12
54+
55+
# Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
56+
eval $(ssh-agent)
57+
ssh-add ${REPO_ROOT}/.build/.ssh/id_ed25519
58+
4959
# Start our VMs
5060
${REPO_ROOT}/tests/e2e/scenarios/bare-metal/start-vms
5161

@@ -55,13 +65,14 @@ echo "Waiting 10 seconds for VMs to start"
5565
sleep 10
5666

5767
# Remove from known-hosts in case of reuse
58-
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.10 || true
59-
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.11 || true
60-
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.12 || true
68+
ssh-keygen -f ~/.ssh/known_hosts -R ${VM0_IP} || true
69+
ssh-keygen -f ~/.ssh/known_hosts -R ${VM1_IP} || true
70+
ssh-keygen -f ~/.ssh/known_hosts -R ${VM2_IP} || true
6171

62-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] uptime
63-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] uptime
64-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] uptime
72+
# Check SSH is working and accept the keys
73+
ssh -o StrictHostKeyChecking=accept-new root@${VM0_IP} uptime
74+
ssh -o StrictHostKeyChecking=accept-new root@${VM1_IP} uptime
75+
ssh -o StrictHostKeyChecking=accept-new root@${VM2_IP} uptime
6576

6677
cd ${REPO_ROOT}
6778

@@ -91,7 +102,7 @@ ${KOPS} create cluster --cloud=metal metal.k8s.local --zones main --networking c
91102

92103
# Set the IP ingress, required for metal cloud
93104
# TODO: is this the best option?
94-
${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=10.123.45.10
105+
${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=${VM0_IP}
95106

96107
# Use latest etcd-manager image (while we're adding features)
97108
${KOPS} edit cluster metal.k8s.local --set 'spec.etcdClusters[*].manager.image=us-central1-docker.pkg.dev/k8s-staging-images/etcd-manager/etcd-manager-static:latest'
@@ -112,32 +123,31 @@ ${KOPS} get ig --name metal.k8s.local -oyaml
112123
${KOPS} update cluster metal.k8s.local
113124
${KOPS} update cluster metal.k8s.local --yes --admin
114125

115-
# Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
116-
eval $(ssh-agent)
117-
ssh-add ${REPO_ROOT}/.build/.ssh/id_ed25519
118-
119126
# Enroll the control-plane VM
120-
${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host 10.123.45.10 --v=2
127+
${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host ${VM0_IP} --v=2
121128

122129
# Manual creation of "volumes" for etcd, and setting up peer nodes
123-
cat <<EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] tee -a /etc/hosts
130+
cat <<EOF | ssh root@${VM0_IP} tee -a /etc/hosts
124131
125132
# Hosts added for etcd discovery
126-
10.123.45.10 node0.main.metal.k8s.local
127-
10.123.45.10 node0.events.metal.k8s.local
133+
${VM0_IP} node0.main.metal.k8s.local
134+
${VM0_IP} node0.events.metal.k8s.local
128135
EOF
129136

130-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] cat /etc/hosts
137+
ssh root@${VM0_IP} cat /etc/hosts
131138

132-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
133-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster
139+
ssh root@${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
140+
ssh root@${VM0_IP} touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster
134141

135-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
136-
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster
142+
ssh root@${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
143+
ssh root@${VM0_IP} touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster
137144

138145

139-
echo "Waiting 300 seconds for kube to start"
140-
sleep 300
146+
echo "Waiting for kube to start"
147+
until kubectl get nodes; do
148+
echo "waiting for kube to start"
149+
sleep 10
150+
done
141151

142152
kubectl get nodes
143153
kubectl get pods -A
@@ -196,18 +206,18 @@ function enroll_node() {
196206

197207
# Manual "discovery" for control-plane endpoints
198208
# TODO: Replace with well-known IP
199-
cat <<EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@${node_ip} tee -a /etc/hosts
209+
cat <<EOF | ssh root@${node_ip} tee -a /etc/hosts
200210
201211
# Hosts added for leader discovery
202-
10.123.45.10 kops-controller.internal.metal.k8s.local
203-
10.123.45.10 api.internal.metal.k8s.local
212+
${VM0_IP} kops-controller.internal.metal.k8s.local
213+
${VM0_IP} api.internal.metal.k8s.local
204214
EOF
205215

206216
timeout 10m ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group nodes-main --host ${node_ip} --v=2
207217
}
208218

209-
enroll_node 10.123.45.11
210-
enroll_node 10.123.45.12
219+
enroll_node ${VM1_IP}
220+
enroll_node ${VM2_IP}
211221

212222
echo "Waiting 30 seconds for nodes to be ready"
213223
sleep 30

0 commit comments

Comments
 (0)