46
46
rm -rf ${WORKDIR} /s3
47
47
mkdir -p ${WORKDIR} /s3/
48
48
49
+ IPV4_PREFIX=10.123.45.
50
+
51
+ VM0_IP=${IPV4_PREFIX} 10
52
+ VM1_IP=${IPV4_PREFIX} 11
53
+ VM2_IP=${IPV4_PREFIX} 12
54
+
55
+ # Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
56
+ eval $( ssh-agent)
57
+ ssh-add ${REPO_ROOT} /.build/.ssh/id_ed25519
58
+
49
59
# Start our VMs
50
60
${REPO_ROOT} /tests/e2e/scenarios/bare-metal/start-vms
51
61
@@ -55,13 +65,14 @@ echo "Waiting 10 seconds for VMs to start"
55
65
sleep 10
56
66
57
67
# Remove from known-hosts in case of reuse
58
- ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.10 || true
59
- ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.11 || true
60
- ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.12 || true
68
+ ssh-keygen -f ~ /.ssh/known_hosts -R ${VM0_IP} || true
69
+ ssh-keygen -f ~ /.ssh/known_hosts -R ${VM1_IP} || true
70
+ ssh-keygen -f ~ /.ssh/known_hosts -R ${VM2_IP} || true
61
71
62
- ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
63
- ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
64
- ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
72
+ # Check SSH is working and accept the keys
73
+ ssh -o StrictHostKeyChecking=accept-new root@${VM0_IP} uptime
74
+ ssh -o StrictHostKeyChecking=accept-new root@${VM1_IP} uptime
75
+ ssh -o StrictHostKeyChecking=accept-new root@${VM2_IP} uptime
65
76
66
77
cd ${REPO_ROOT}
67
78
@@ -91,7 +102,7 @@ ${KOPS} create cluster --cloud=metal metal.k8s.local --zones main --networking c
91
102
92
103
# Set the IP ingress, required for metal cloud
93
104
# TODO: is this the best option?
94
- ${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=10.123.45.10
105
+ ${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=${VM0_IP}
95
106
96
107
# Use latest etcd-manager image (while we're adding features)
97
108
${KOPS} edit cluster metal.k8s.local --set ' spec.etcdClusters[*].manager.image=us-central1-docker.pkg.dev/k8s-staging-images/etcd-manager/etcd-manager-static:latest'
@@ -112,32 +123,31 @@ ${KOPS} get ig --name metal.k8s.local -oyaml
112
123
${KOPS} update cluster metal.k8s.local
113
124
${KOPS} update cluster metal.k8s.local --yes --admin
114
125
115
- # Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
116
- eval $( ssh-agent)
117
- ssh-add ${REPO_ROOT} /.build/.ssh/id_ed25519
118
-
119
126
# Enroll the control-plane VM
120
- ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host 10.123.45.10 --v=2
127
+ ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host ${VM0_IP} --v=2
121
128
122
129
# Manual creation of "volumes" for etcd, and setting up peer nodes
123
- cat
<< EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] tee -a /etc/hosts
130
+ cat << EOF | ssh root@ ${VM0_IP} tee -a /etc/hosts
124
131
125
132
# Hosts added for etcd discovery
126
- 10.123.45.10 node0.main.metal.k8s.local
127
- 10.123.45.10 node0.events.metal.k8s.local
133
+ ${VM0_IP} node0.main.metal.k8s.local
134
+ ${VM0_IP} node0.events.metal.k8s.local
128
135
EOF
129
136
130
- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] cat /etc/hosts
137
+ ssh root@ ${VM0_IP} cat /etc/hosts
131
138
132
- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
133
- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster
139
+ ssh root@ ${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
140
+ ssh root@ ${VM0_IP} touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster
134
141
135
- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
136
- ssh
-o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster
142
+ ssh root@ ${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
143
+ ssh root@ ${VM0_IP} touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster
137
144
138
145
139
- echo " Waiting 300 seconds for kube to start"
140
- sleep 300
146
+ echo " Waiting for kube to start"
147
+ until kubectl get nodes; do
148
+ echo " waiting for kube to start"
149
+ sleep 10
150
+ done
141
151
142
152
kubectl get nodes
143
153
kubectl get pods -A
@@ -196,18 +206,18 @@ function enroll_node() {
196
206
197
207
# Manual "discovery" for control-plane endpoints
198
208
# TODO: Replace with well-known IP
199
- cat << EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 root@${node_ip} tee -a /etc/hosts
209
+ cat << EOF | ssh root@${node_ip} tee -a /etc/hosts
200
210
201
211
# Hosts added for leader discovery
202
- 10.123.45.10 kops-controller.internal.metal.k8s.local
203
- 10.123.45.10 api.internal.metal.k8s.local
212
+ ${VM0_IP} kops-controller.internal.metal.k8s.local
213
+ ${VM0_IP} api.internal.metal.k8s.local
204
214
EOF
205
215
206
216
timeout 10m ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group nodes-main --host ${node_ip} --v=2
207
217
}
208
218
209
- enroll_node 10.123.45.11
210
- enroll_node 10.123.45.12
219
+ enroll_node ${VM1_IP}
220
+ enroll_node ${VM2_IP}
211
221
212
222
echo " Waiting 30 seconds for nodes to be ready"
213
223
sleep 30
0 commit comments