1
+ #! /usr/bin/env bash
2
+
3
+ # Copyright 2024 The Kubernetes Authors.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ set -o errexit
18
+ set -o nounset
19
+ set -o pipefail
20
+ set -o xtrace
21
+
22
+ REPO_ROOT=$( git rev-parse --show-toplevel)
23
+ cd ${REPO_ROOT}
24
+
25
+ WORKDIR=${REPO_ROOT} /.build/
26
+
27
+ BINDIR=${WORKDIR} /bin
28
+ mkdir -p " ${BINDIR} "
29
+ go build -o ${BINDIR} /kops ./cmd/kops
30
+
31
+ KOPS=${BINDIR} /kops
32
+
33
+ function cleanup() {
34
+ echo " running dump-artifacts"
35
+ ${REPO_ROOT} /tests/e2e/scenarios/bare-metal/dump-artifacts || true
36
+
37
+ echo " running cleanup"
38
+ ${REPO_ROOT} /tests/e2e/scenarios/bare-metal/cleanup || true
39
+ }
40
+
41
+ if [[ -z " ${SKIP_CLEANUP:- } " ]]; then
42
+ trap cleanup EXIT
43
+ fi
44
+
45
+ # Create the directory that will back our mock s3 storage
46
+ rm -rf ${WORKDIR} /s3
47
+ mkdir -p ${WORKDIR} /s3/
48
+
49
+ # Start our VMs
50
+ ${REPO_ROOT} /tests/e2e/scenarios/bare-metal/start-vms
51
+
52
+ . hack/dev-build-metal.sh
53
+
54
+ echo " Waiting 10 seconds for VMs to start"
55
+ sleep 10
56
+
57
+ # Remove from known-hosts in case of reuse
58
+ ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.10 || true
59
+ ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.11 || true
60
+ ssh-keygen -f ~ /.ssh/known_hosts -R 10.123.45.12 || true
61
+
62
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
63
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
64
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] uptime
65
+
66
+ cd ${REPO_ROOT}
67
+
68
+ # Enable feature flag for bare metal
69
+ export KOPS_FEATURE_FLAGS=Metal
70
+
71
+ # Set up the AWS credentials
72
+ export AWS_SECRET_ACCESS_KEY=secret
73
+ export AWS_ACCESS_KEY_ID=accesskey
74
+ export AWS_ENDPOINT_URL=http://10.123.45.1:8443
75
+ export AWS_REGION=us-east-1
76
+
77
+ export S3_ENDPOINT=${AWS_ENDPOINT_URL}
78
+ export S3_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
79
+ export S3_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
80
+
81
+ # Create the state-store bucket in our mock s3 server
82
+ export KOPS_STATE_STORE=s3://kops-state-store/
83
+ aws --version
84
+ aws s3 ls s3://kops-state-store || aws s3 mb s3://kops-state-store
85
+
86
+ export CLUSTER_NAME=metalipv6.k8s.local
87
+
88
+ # List clusters (there should not be any yet)
89
+ ${KOPS} get cluster || true
90
+
91
+ # Create a cluster
92
+ ${KOPS} create cluster --cloud=metal ${CLUSTER_NAME} --zones main --networking cni --ipv6
93
+
94
+ # Set the IP ingress, required for metal cloud
95
+ # TODO: is this the best option?
96
+ ${KOPS} edit cluster ${CLUSTER_NAME} --set spec.api.publicName=10.123.45.10
97
+
98
+ # Use latest etcd-manager image (while we're adding features)
99
+ ${KOPS} edit cluster ${CLUSTER_NAME} --set ' spec.etcdClusters[*].manager.image=us-central1-docker.pkg.dev/k8s-staging-images/etcd-manager/etcd-manager-static:latest'
100
+
101
+ # Use 1.31 kubernetes so we get kube-apiserver fixes
102
+ export KOPS_RUN_TOO_NEW_VERSION=1
103
+ " ${KOPS} " edit cluster ${CLUSTER_NAME} " --set=cluster.spec.kubernetesVersion=1.31.0"
104
+
105
+ # List clusters
106
+ ${KOPS} get cluster
107
+ ${KOPS} get cluster -oyaml
108
+
109
+ # List instance groups
110
+ ${KOPS} get ig --name ${CLUSTER_NAME}
111
+ ${KOPS} get ig --name ${CLUSTER_NAME} -oyaml
112
+
113
+ # Apply basic configuration
114
+ ${KOPS} update cluster ${CLUSTER_NAME}
115
+ ${KOPS} update cluster ${CLUSTER_NAME} --yes --admin
116
+
117
+ # Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
118
+ eval $( ssh-agent)
119
+ ssh-add ${REPO_ROOT} /.build/.ssh/id_ed25519
120
+
121
+ # Enroll the control-plane VM
122
+ ${KOPS} toolbox enroll --cluster ${CLUSTER_NAME} --instance-group control-plane-main --host 10.123.45.10 --v=2
123
+
124
+ # Manual creation of "volumes" for etcd, and setting up peer nodes
125
+ cat
<< EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 [email protected] tee -a /etc/hosts
126
+
127
+ # Hosts added for etcd discovery
128
+ 10.123.45.10 node0.main.${CLUSTER_NAME}
129
+ 10.123.45.10 node0.events.${CLUSTER_NAME}
130
+ EOF
131
+
132
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] cat /etc/hosts
133
+
134
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] mkdir -p /mnt/disks/
${CLUSTER_NAME} --main--0/mnt
135
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] touch /mnt/disks/
${CLUSTER_NAME} --main--0/mnt/please-create-new-cluster
136
+
137
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] mkdir -p /mnt/disks/
${CLUSTER_NAME} --events--0/mnt
138
+ ssh -o StrictHostKeyChecking=accept-new -i
${REPO_ROOT} /.build/.ssh/id_ed25519
[email protected] touch /mnt/disks/
${CLUSTER_NAME} --events--0/mnt/please-create-new-cluster
139
+
140
+
141
+ echo " Waiting 300 seconds for kube to start"
142
+ sleep 300
143
+
144
+ kubectl get nodes
145
+ kubectl get pods -A
146
+
147
+ # Install kindnet
148
+ kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/main/install-kindnet.yaml
149
+ echo " Waiting 10 seconds for kindnet to start"
150
+ sleep 10
151
+ kubectl get nodes
152
+ kubectl get pods -A
153
+
154
+ # For host records
155
+ kubectl create ns kops-system
156
+ kubectl apply -f ${REPO_ROOT} /k8s/crds/kops.k8s.io_hosts.yaml
157
+
158
+ # kops-controller extra permissions
159
+ kubectl apply --server-side -f - << EOF
160
+ apiVersion: rbac.authorization.k8s.io/v1
161
+ kind: ClusterRoleBinding
162
+ metadata:
163
+ name: kops-controller:pki-verifier
164
+ roleRef:
165
+ apiGroup: rbac.authorization.k8s.io
166
+ kind: ClusterRole
167
+ name: kops-controller:pki-verifier
168
+ subjects:
169
+ - apiGroup: rbac.authorization.k8s.io
170
+ kind: User
171
+ name: system:serviceaccount:kube-system:kops-controller
172
+ ---
173
+ apiVersion: rbac.authorization.k8s.io/v1
174
+ kind: ClusterRole
175
+ metadata:
176
+ name: kops-controller:pki-verifier
177
+ rules:
178
+ - apiGroups:
179
+ - "kops.k8s.io"
180
+ resources:
181
+ - hosts
182
+ verbs:
183
+ - get
184
+ - list
185
+ - watch
186
+ # Must be able to set node addresses
187
+ # TODO: Move out?
188
+ - apiGroups:
189
+ - ""
190
+ resources:
191
+ - nodes/status
192
+ verbs:
193
+ - patch
194
+ EOF
195
+
196
+ function enroll_node() {
197
+ local node_ip=$1
198
+
199
+ # Manual "discovery" for control-plane endpoints
200
+ # TODO: Replace with well-known IP
201
+ cat << EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT} /.build/.ssh/id_ed25519 root@${node_ip} tee -a /etc/hosts
202
+
203
+ # Hosts added for leader discovery
204
+ 10.123.45.10 kops-controller.internal.${CLUSTER_NAME}
205
+ 10.123.45.10 api.internal.${CLUSTER_NAME}
206
+ EOF
207
+
208
+ timeout 10m ${KOPS} toolbox enroll --cluster ${CLUSTER_NAME} --instance-group nodes-main --host ${node_ip} --v=2
209
+ }
210
+
211
+ enroll_node 10.123.45.11
212
+ enroll_node 10.123.45.12
213
+
214
+ echo " Waiting 30 seconds for nodes to be ready"
215
+ sleep 30
216
+
217
+ kubectl get nodes
218
+ kubectl get pods -A
219
+
220
+
221
+ echo " Test successful"
0 commit comments