|
| 1 | +#!/usr/bin/env bash |
| 2 | + |
| 3 | +# Copyright 2024 The Karmada Authors. |
| 4 | +# |
| 5 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | +# you may not use this file except in compliance with the License. |
| 7 | +# You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, software |
| 12 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | +# See the License for the specific language governing permissions and |
| 15 | +# limitations under the License. |
| 16 | + |
| 17 | +## This script deploy karmada control plane to a cluster using helm chart, and then deploy several member clusters. |
| 18 | +## Some member clusters are joined in Push mode, using karmadactl join command. |
| 19 | +## Other member clusters are joined in Pull mode, using helm chart of karmada-agent. |
| 20 | + |
| 21 | +# VERSION which version Karmada you want to install |
| 22 | +VERSION=${VERSION:-"latest"} |
| 23 | + |
| 24 | +# CHARTDIR the relative path of Karmada charts |
| 25 | +CHARTDIR=${CHARTDIR:-"./charts/karmada"} |
| 26 | + |
| 27 | +# NEED_CREATE_KIND_CLUSTER customize whether you need to create clusters by kind, if you have clusters already, please unset these options. |
| 28 | +NEED_CREATE_KIND_CLUSTER=${NEED_CREATE_KIND_CLUSTER:-"true"} |
| 29 | +NEED_CREATE_KIND_MEMBER_CLUSTER=${NEED_CREATE_KIND_MEMBER_CLUSTER:-"true"} |
| 30 | +CLUSTER_VERSION=${CLUSTER_VERSION:-"kindest/node:v1.27.3"} |
| 31 | + |
| 32 | +# IMAGE_FROM customize whether you need fetch images in advance, optional value are as follows: |
| 33 | +# pull-in-advance: use 'docker pull' to fetch needed images to local node in advance (in case of network trouble in automatically pulling images) |
| 34 | +# make: build karmada images from source code with latest tag and pull images (other than karmada) in advance ( in case of trying latest code) |
| 35 | +# empty or any other value: ignored, just pull image by k8s-runtime when needing |
| 36 | +IMAGE_FROM=${IMAGE_FROM:-"make"} |
| 37 | + |
| 38 | +# LOAD_IMAGE_IN_ADVANCE if you fetch images in advance and you are using KinD clusters, may be you'd like to load it to KinD in advance too |
| 39 | +# if not, please unset this option |
| 40 | +LOAD_IMAGE_IN_ADVANCE=${LOAD_IMAGE_IN_ADVANCE:-"true"} |
| 41 | + |
| 42 | +# KARMADA_HOST_NAME customize your cluster name and context name of karmada-host cluster, if you already has a cluster, replace it by your real name |
| 43 | +KARMADA_HOST_NAME=${KARMADA_HOST_NAME:-"karmada-host"} |
| 44 | +# KARMADA_HOST_KUBECONFIG customize your kubeconfig of karmada-host cluster, if you already has a cluster, replace it by your real kubeconfig path |
| 45 | +KARMADA_HOST_KUBECONFIG=${KARMADA_HOST_KUBECONFIG:-"${HOME}/.kube/karmada-host.config"} |
| 46 | + |
| 47 | +# PUSH_MODE_MEMBERS a map which customizing your push mode member clusters |
| 48 | +# the key of the map is the cluster name / context name of your member clusters |
| 49 | +# the value of the map is the corresponding kubeconfig path of the member cluster |
| 50 | +# if you already has member clusters, replace the key with real name and replace the value with real kubeconfig path |
| 51 | +# you can add any number push mode clusters as you expect, just append this map as following examples |
| 52 | +declare -A PUSH_MODE_MEMBERS |
| 53 | +PUSH_MODE_MEMBERS["member1"]="${HOME}/.kube/member1.config" |
| 54 | +PUSH_MODE_MEMBERS["member2"]="${HOME}/.kube/member2.config" |
| 55 | + |
| 56 | +# PULL_MODE_MEMBERS a map which customizing your pull mode member clusters |
| 57 | +# the key of the map is the cluster name / context name of your member clusters |
| 58 | +# the value of the map is the corresponding kubeconfig path of the member cluster |
| 59 | +# if you already has member clusters, replace the key with real name and replace the value with real kubeconfig path |
| 60 | +# you can add any number pull mode clusters as you expect, just append this map as following examples |
| 61 | +declare -A PULL_MODE_MEMBERS |
| 62 | +PULL_MODE_MEMBERS["member3"]="${HOME}/.kube/member3.config" |
| 63 | + |
| 64 | +# INSTALL_ESTIMATOR whether install karmada-scheduler-estimator |
| 65 | +INSTALL_ESTIMATOR=${INSTALL_ESTIMATOR:-"true"} |
| 66 | + |
| 67 | +echo "########## start installing karmada control plane ##########" |
| 68 | +set -ex |
| 69 | + |
| 70 | +# 1. create KinD cluster if you set NEED_CREATE_KIND_CLUSTER |
| 71 | +if ${NEED_CREATE_KIND_CLUSTER}; then |
| 72 | + # 1.1 create karmada-host cluster |
| 73 | + kind delete clusters karmada-host |
| 74 | + rm -f "${KARMADA_HOST_KUBECONFIG}" |
| 75 | + export CLUSTER_VERSION=${CLUSTER_VERSION} |
| 76 | + hack/create-cluster.sh ${KARMADA_HOST_NAME} "${KARMADA_HOST_KUBECONFIG}" |
| 77 | +fi |
| 78 | + |
| 79 | +# 2. fetch images in advance is you set IMAGE_FROM |
| 80 | +if [ "${IMAGE_FROM}" == "pull-in-advance" ]; then |
| 81 | + ## 2.1 use 'docker pull' to fetch target images to local node in advance |
| 82 | + imgs=$(cat ${CHARTDIR}/values.yaml | grep -C 1 'repository:' | sed 's/*karmadaImageVersion/'${VERSION}'/g' | awk -F ':' '{print $2}' | sed 's/\"//g' | xargs -n3 | awk '{print $1"/"$2":"$3}') |
| 83 | + for img in ${imgs}; do |
| 84 | + docker pull "${img}" |
| 85 | + done |
| 86 | + docker pull registry.k8s.io/metrics-server/metrics-server:v0.6.3 |
| 87 | +elif [ "${IMAGE_FROM}" == "make" ]; then |
| 88 | + ## 2.2 build karmada images from source code with latest tag and pull images (other than karmada) in advance |
| 89 | + imgs=$(cat ${CHARTDIR}/values.yaml | grep -v 'karmada' | grep -C 1 'repository: ' | awk -F ':' '{print $2}' | sed 's/\"//g' | xargs -n3 | awk '{print $1"/"$2":"$3}') |
| 90 | + for img in ${imgs}; do |
| 91 | + docker pull "${img}" |
| 92 | + done |
| 93 | + docker pull registry.k8s.io/metrics-server/metrics-server:v0.6.3 |
| 94 | + |
| 95 | + export VERSION=${VERSION} |
| 96 | + export REGISTRY="docker.io/karmada" |
| 97 | + make images GOOS="linux" . |
| 98 | +fi |
| 99 | + |
| 100 | +# 3. load images into KinD karmada-host cluster in advance if you set LOAD_IMAGE_IN_ADVANCE |
| 101 | +if [[ ${NEED_CREATE_KIND_CLUSTER} && ${LOAD_IMAGE_IN_ADVANCE} ]]; then |
| 102 | + imgs=$(cat ${CHARTDIR}/values.yaml | grep -C 1 'repository:' | sed 's/*karmadaImageVersion/'${VERSION}'/g' | awk -F ':' '{print $2}' | sed 's/\"//g' | xargs -n3 | awk '{print $1"/"$2":"$3}') |
| 103 | + for img in ${imgs}; do |
| 104 | + kind load docker-image "${img}" --name ${KARMADA_HOST_NAME} |
| 105 | + done |
| 106 | +fi |
| 107 | + |
| 108 | +# 4. this script try to deploy karmada-apiserver by host-network |
| 109 | +# so, it needs to get host-network ip (node ip) from kube-apiserver, and then add this ip to values.yaml as SANs of certificate |
| 110 | +export KUBECONFIG=${KARMADA_HOST_KUBECONFIG} |
| 111 | +HOST_IP=$(kubectl get ep kubernetes -o jsonpath='{.subsets[0].addresses[0].ip}') |
| 112 | +sed -i'' -e "/localhost/{n;s/ \"127.0.0.1/ \"${HOST_IP}\",\n&/g}" ${CHARTDIR}/values.yaml |
| 113 | + |
| 114 | +# 5. install karmada at karmada-host cluster by helm |
| 115 | +# if you want to install some components, do like `--set components={"search,descheduler"}` |
| 116 | +helm install karmada -n karmada-system \ |
| 117 | + --kubeconfig "${KARMADA_HOST_KUBECONFIG}" \ |
| 118 | + --create-namespace \ |
| 119 | + --dependency-update \ |
| 120 | + --set apiServer.hostNetwork=true,components={"metricsAdapter,search,descheduler"} \ |
| 121 | + ${CHARTDIR} |
| 122 | + |
| 123 | +# 6.verify: wait for karmada control plane ready |
| 124 | +while [[ $(kubectl get po -A | grep -c karmada-system ) -ne $(kubectl get po -n karmada-system | grep -c Running) ]]; do |
| 125 | + echo "waiting for karmada control plane ready..."; sleep 10; |
| 126 | +done |
| 127 | +kubectl get po -n karmada-system -o wide |
| 128 | + |
| 129 | +# 7. export kubeconfig of karmada-apiserver to local path |
| 130 | +KARMADA_APISERVER_KUBECONFIG="${HOME}/.kube/karmada-apiserver.config" |
| 131 | +kubectl get secret -n karmada-system karmada-kubeconfig -o jsonpath={.data.kubeconfig} | base64 -d > "${KARMADA_APISERVER_KUBECONFIG}" |
| 132 | +KARMADA_APISERVER_ADDR=$(kubectl get ep karmada-apiserver -n karmada-system | tail -n 1 | awk '{print $2}') |
| 133 | +sed -i'' -e "s/karmada-apiserver.karmada-system.svc.*:5443/${KARMADA_APISERVER_ADDR}/g" "${KARMADA_APISERVER_KUBECONFIG}" |
| 134 | + |
| 135 | +echo "########## end installing karmada control plane success ##########" |
| 136 | + |
| 137 | +echo "########## start deploying member clusters ##########" |
| 138 | + |
| 139 | +# 1. create KinD cluster if you set NEED_CREATE_KIND_MEMBER_CLUSTER |
| 140 | +if ${NEED_CREATE_KIND_MEMBER_CLUSTER}; then |
| 141 | + ## 1.1. create push mode member clusters by KinD |
| 142 | + for clustername in "${!PUSH_MODE_MEMBERS[@]}"; do |
| 143 | + kind delete clusters "${clustername}" |
| 144 | + rm -f "${PUSH_MODE_MEMBERS[$clustername]}" |
| 145 | + export CLUSTER_VERSION=${CLUSTER_VERSION} |
| 146 | + hack/create-cluster.sh "${clustername}" "${PUSH_MODE_MEMBERS[$clustername]}" |
| 147 | + |
| 148 | + kind load docker-image "registry.k8s.io/metrics-server/metrics-server:v0.6.3" --name "${clustername}" |
| 149 | + hack/deploy-k8s-metrics-server.sh "${PUSH_MODE_MEMBERS[$clustername]}" "${clustername}" |
| 150 | + done |
| 151 | + |
| 152 | + ## 1.2. create pull mode member clusters by KinD |
| 153 | + for clustername in "${!PULL_MODE_MEMBERS[@]}"; do |
| 154 | + kind delete clusters "${clustername}" |
| 155 | + rm -f "${PULL_MODE_MEMBERS[$clustername]}" |
| 156 | + export CLUSTER_VERSION=${CLUSTER_VERSION} |
| 157 | + hack/create-cluster.sh "${clustername}" "${PULL_MODE_MEMBERS[$clustername]}" |
| 158 | + |
| 159 | + kind load docker-image "registry.k8s.io/metrics-server/metrics-server:v0.6.3" --name "${clustername}" |
| 160 | + hack/deploy-k8s-metrics-server.sh "${PULL_MODE_MEMBERS[$clustername]}" "${clustername}" |
| 161 | + done |
| 162 | +fi |
| 163 | + |
| 164 | +# 2. load karmada-agent image into pull mode member clusters in advance if you set LOAD_IMAGE_IN_ADVANCE |
| 165 | +if [[ ${NEED_CREATE_KIND_MEMBER_CLUSTER} && ${LOAD_IMAGE_IN_ADVANCE} ]]; then |
| 166 | + agentImage=$(cat ${CHARTDIR}/values.yaml | grep -C 1 'repository: karmada/karmada-agent' | sed 's/*karmadaImageVersion/'${VERSION}'/g' | awk -F ':' '{print $2}' | sed 's/\"//g' | xargs -n3 | awk '{print $1"/"$2":"$3}') |
| 167 | + for clustername in "${!PULL_MODE_MEMBERS[@]}"; do |
| 168 | + kind load docker-image "${agentImage}" --name "${clustername}" |
| 169 | + done |
| 170 | +fi |
| 171 | + |
| 172 | +# 3. download karmadactl |
| 173 | +GO111MODULE=on go install "github.com/karmada-io/karmada/cmd/karmadactl" |
| 174 | +GOPATH=$(go env GOPATH | awk -F ':' '{print $1}') |
| 175 | +KARMADACTL_BIN="${GOPATH}/bin/karmadactl" |
| 176 | + |
| 177 | +declare -A ALL_MEMBERS |
| 178 | + |
| 179 | +# 4. join push mode member clusters by 'karmadactl join' command |
| 180 | +for clustername in "${!PUSH_MODE_MEMBERS[@]}"; do |
| 181 | + ALL_MEMBERS[$clustername]=${PUSH_MODE_MEMBERS[$clustername]} |
| 182 | + ${KARMADACTL_BIN} join "${clustername}" --kubeconfig "${KARMADA_APISERVER_KUBECONFIG}" --karmada-context karmada-apiserver --cluster-kubeconfig "${PUSH_MODE_MEMBERS[$clustername]}" --cluster-context "${clustername}" |
| 183 | +done |
| 184 | + |
| 185 | +# 5. when you deploy karmada-agent by helm chart, you should manually fill in the cert of karmada-apiserver at values.yaml |
| 186 | +# so, it needs to get cert from karmada-apiserver.config for agent |
| 187 | +set +x |
| 188 | +CA_CRT=$(cat "${KARMADA_APISERVER_KUBECONFIG}" | grep certificate-authority-data | awk -F ': ' '{print $2}' | base64 -d) |
| 189 | +AGENT_CRT=$(cat "${KARMADA_APISERVER_KUBECONFIG}" | grep client-certificate-data | awk -F ': ' '{print $2}' | base64 -d) |
| 190 | +AGENT_KEY=$(cat "${KARMADA_APISERVER_KUBECONFIG}" | grep client-key-data | awk -F ': ' '{print $2}' | base64 -d) |
| 191 | + |
| 192 | +# 6. join pull mode member clusters by helm chart |
| 193 | +for clustername in "${!PULL_MODE_MEMBERS[@]}"; do |
| 194 | + clusterConfig=${PULL_MODE_MEMBERS[$clustername]} |
| 195 | + ALL_MEMBERS[$clustername]=$clusterConfig |
| 196 | + |
| 197 | + MEMBER_APISERVER_ADDR=$(kubectl get ep kubernetes --kubeconfig $clusterConfig --context $clustername | tail -n 1 | awk '{print $2}') |
| 198 | + |
| 199 | + helm install karmada-agent -n karmada-system \ |
| 200 | + --kubeconfig "${clusterConfig}" \ |
| 201 | + --kube-context "${clustername}" \ |
| 202 | + --create-namespace \ |
| 203 | + --dependency-update \ |
| 204 | + --set installMode=agent,agent.clusterName="${clustername}",agent.clusterEndpoint=https://"${MEMBER_APISERVER_ADDR}",agent.kubeconfig.server=https://"${KARMADA_APISERVER_ADDR}",agent.kubeconfig.caCrt="${CA_CRT}",agent.kubeconfig.crt="${AGENT_CRT}",agent.kubeconfig.key="${AGENT_KEY}" \ |
| 205 | + ${CHARTDIR} |
| 206 | +done |
| 207 | + |
| 208 | +echo "########## end deploying member clusters success ##########" |
| 209 | + |
| 210 | +if ${INSTALL_ESTIMATOR}; then |
| 211 | + echo "########## start deploying karmada-scheduler-estimator ##########" |
| 212 | + |
| 213 | + # 1. install karmada-scheduler-estimator |
| 214 | + for clustername in "${!ALL_MEMBERS[@]}"; do |
| 215 | + MEMBER_APISERVER=$(cat ${ALL_MEMBERS[$clustername]} | grep server: | awk -F ': ' '{print $2}') |
| 216 | + MEMBER_CA_CRT=$(cat ${ALL_MEMBERS[$clustername]} | grep certificate-authority-data | awk -F ': ' '{print $2}' | base64 -d) |
| 217 | + MEMBER_CRT=$(cat ${ALL_MEMBERS[$clustername]} | grep client-certificate-data | awk -F ': ' '{print $2}' | base64 -d) |
| 218 | + MEMBER_KEY=$(cat ${ALL_MEMBERS[$clustername]} | grep client-key-data | awk -F ': ' '{print $2}' | base64 -d) |
| 219 | + |
| 220 | + helm upgrade -i karmada-scheduler-estimator-${clustername} -n karmada-system \ |
| 221 | + --kubeconfig "${KARMADA_HOST_KUBECONFIG}" \ |
| 222 | + --set installMode=component,components={"schedulerEstimator"},schedulerEstimator.memberClusters[0].clusterName="${clustername}",schedulerEstimator.memberClusters[0].kubeconfig.server="${MEMBER_APISERVER}",schedulerEstimator.memberClusters[0].kubeconfig.caCrt="${MEMBER_CA_CRT}",schedulerEstimator.memberClusters[0].kubeconfig.crt="${MEMBER_CRT}",schedulerEstimator.memberClusters[0].kubeconfig.key="${MEMBER_KEY}" \ |
| 223 | + ${CHARTDIR} |
| 224 | + done |
| 225 | + |
| 226 | + echo "########## end deploying karmada-scheduler-estimator success ##########" |
| 227 | +fi |
| 228 | + |
| 229 | +# merge karmada-host.config and karmada-apiserver.config into ${KARMADA_MERGE_KUBECONFIG}, keep the same with other installation method |
| 230 | +KARMADA_MERGE_KUBECONFIG="${HOME}/.kube/karmada.config" |
| 231 | +export KUBECONFIG="${KARMADA_HOST_KUBECONFIG}":"${KARMADA_APISERVER_KUBECONFIG}" |
| 232 | +kubectl config view --flatten > "${KARMADA_MERGE_KUBECONFIG}" |
| 233 | + |
| 234 | +# verify: wait for member cluster ready and then print member clusters |
| 235 | +MEMBERS_NUMBER=$(( ${#PUSH_MODE_MEMBERS[*]} + ${#PULL_MODE_MEMBERS[*]} )) |
| 236 | +while [[ "$(kubectl --context karmada-apiserver get clusters -o wide | grep -c "True")" -ne ${MEMBERS_NUMBER} ]]; do |
| 237 | + echo "waiting for member clusters ready..."; sleep 2; |
| 238 | +done |
| 239 | +kubectl --context karmada-apiserver get cluster -o wide |
0 commit comments