4848rm -rf ${WORKDIR} /s3
4949mkdir -p ${WORKDIR} /s3/
5050
51+ IPV4_PREFIX=10.123.45.
52+
53+ VM0_IP=${IPV4_PREFIX} 10
54+ VM1_IP=${IPV4_PREFIX} 11
55+ VM2_IP=${IPV4_PREFIX} 12
56+
5157# Start our VMs
5258${REPO_ROOT} /tests/e2e/scenarios/bare-metal/start-vms
5359
60+ # Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
61+ eval $( ssh-agent)
62+ ssh-add ${REPO_ROOT} /.build/.ssh/id_ed25519
63+
5464. hack/dev-build-metal.sh
5565
5666echo " Waiting 10 seconds for VMs to start"
5767sleep 10
5868
5969# Remove from known-hosts in case of reuse
60- ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.10 || true
61- ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.11 || true
62- ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.12 || true
70+ ssh-keygen -f ~ /.ssh/known_hosts -R ${VM0_IP} || true
71+ ssh-keygen -f ~ /.ssh/known_hosts -R ${VM1_IP} || true
72+ ssh-keygen -f ~ /.ssh/known_hosts -R ${VM2_IP} || true
6373
64- ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
65- ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
66- ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
74+ # Check SSH is working and accept the keys
75+ ssh -o StrictHostKeyChecking=accept-new root@${VM0_IP} uptime
76+ ssh -o StrictHostKeyChecking=accept-new root@${VM1_IP} uptime
77+ ssh -o StrictHostKeyChecking=accept-new root@${VM2_IP} uptime
6778
6879cd ${REPO_ROOT}
6980
@@ -93,7 +104,7 @@ ${KOPS} create cluster --cloud=metal metal.k8s.local --zones main --networking c
93104
94105# Set the IP ingress, required for metal cloud
95106# TODO: is this the best option?
96- ${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=10.123.45.10
107+ ${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=${VM0_IP}
97108
98109# Use latest etcd-manager image (while we're adding features)
99110${KOPS} edit cluster metal.k8s.local --set ' spec.etcdClusters[*].manager.image=us-central1-docker.pkg.dev/k8s-staging-images/etcd-manager/etcd-manager-static:latest'
@@ -114,36 +125,37 @@ ${KOPS} get ig --name metal.k8s.local -oyaml
114125${KOPS} update cluster metal.k8s.local
115126${KOPS} update cluster metal.k8s.local --yes --admin
116127
117- # Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
118- eval $( ssh-agent)
119- ssh-add ${REPO_ROOT} /.build/.ssh/id_ed25519
120-
121128# Enroll the control-plane VM
122- ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host 10.123.45.10 --v=2
129+ ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host ${VM0_IP} --v=2
123130
124131# Manual creation of "volumes" for etcd, and setting up peer nodes
125- cat
<< EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] tee -a /etc/hosts132+ cat << EOF | ssh root@ ${VM0_IP} tee -a /etc/hosts
126133
127134# Hosts added for etcd discovery
128- 10.123.45.10 node0.main.metal.k8s.local
129- 10.123.45.10 node0.events.metal.k8s.local
135+ ${VM0_IP} node0.main.metal.k8s.local
136+ ${VM0_IP} node0.events.metal.k8s.local
130137EOF
131138
132- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] cat /etc/hosts
139+ ssh root@ ${VM0_IP} cat /etc/hosts
133140
134- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
135- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster
141+ ssh root@ ${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
142+ ssh root@ ${VM0_IP} touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster
136143
137- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
138- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster
144+ ssh root@ ${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
145+ ssh root@ ${VM0_IP} touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster
139146
140147
141148echo " Waiting for kube to start"
149+ <<< <<< < HEAD
142150# Wait for kube-apiserver to be ready, timeout after 10 minutes
143151for i in {1..60}; do
144152 if kubectl get nodes; then
145153 break
146154 fi
155+ =======
156+ until kubectl get nodes; do
157+ echo " waiting for kube to start"
158+ >>>>>>> a3f1a9dcc3 (tests: add test for bare-metal with ipv6)
147159 sleep 10
148160done
149161
@@ -204,18 +216,18 @@ function enroll_node() {
204216
205217# Manual "discovery" for control-plane endpoints
206218# TODO: Replace with well-known IP
207- cat << EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 root@${node_ip} tee -a /etc/hosts
219+ cat << EOF | ssh root@${node_ip} tee -a /etc/hosts
208220
209221# Hosts added for leader discovery
210- 10.123.45.10 kops-controller.internal.metal.k8s.local
211- 10.123.45.10 api.internal.metal.k8s.local
222+ ${VM0_IP} kops-controller.internal.metal.k8s.local
223+ ${VM0_IP} api.internal.metal.k8s.local
212224EOF
213225
214226timeout 10m ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group nodes-main --host ${node_ip} --v=2
215227}
216228
217- enroll_node 10.123.45.11
218- enroll_node 10.123.45.12
229+ enroll_node ${VM1_IP}
230+ enroll_node ${VM2_IP}
219231
220232echo " Waiting 30 seconds for nodes to be ready"
221233sleep 30
0 commit comments