Skip to content

Commit 6bce7f8

Browse files
authored
Merge pull request #5288 from nawazkh/peer-aks-vnet-with-in-tiltfile
Peer AKS VNet for apiserver-ilb template
2 parents 177964e + 73cd188 commit 6bce7f8

File tree

8 files changed

+150
-36
lines changed

8 files changed

+150
-36
lines changed

Tiltfile

Lines changed: 107 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ settings = {
2323
"capi_version": "v1.8.5",
2424
"caaph_version": "v0.2.5",
2525
"cert_manager_version": "v1.16.1",
26-
"kubernetes_version": "v1.28.3",
27-
"aks_kubernetes_version": "v1.28.3",
26+
"kubernetes_version": "v1.28.15",
27+
"aks_kubernetes_version": "v1.28.15",
2828
"flatcar_version": "3374.2.1",
2929
"azure_location": "eastus",
3030
"control_plane_machine_count": "1",
@@ -213,10 +213,10 @@ def capz():
213213
yaml = str(kustomizesub("./hack/observability")) # build an observable kind deployment by default
214214

215215
# add extra_args if they are defined
216-
if settings.get("extra_args"):
217-
azure_extra_args = settings.get("extra_args").get("azure")
216+
if settings.get("container_args"):
217+
capz_container_args = settings.get("container_args").get("capz-controller-manager")
218218
yaml_dict = decode_yaml_stream(yaml)
219-
append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", azure_extra_args)
219+
append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", capz_container_args)
220220
yaml = str(encode_yaml_stream(yaml_dict))
221221
yaml = fixup_yaml_empty_arrays(yaml)
222222

@@ -318,9 +318,14 @@ def flavors():
318318
for template in template_list:
319319
deploy_worker_templates(template, substitutions)
320320

321+
delete_all_workload_clusters = kubectl_cmd + " delete clusters --all --wait=false;"
322+
323+
if "aks" in settings.get("kustomize_substitutions", {}).get("MGMT_CLUSTER_NAME", ""):
324+
delete_all_workload_clusters += clear_aks_vnet_peerings()
325+
321326
local_resource(
322327
name = "delete-all-workload-clusters",
323-
cmd = kubectl_cmd + " delete clusters --all --wait=false",
328+
cmd = ["sh", "-ec", delete_all_workload_clusters],
324329
auto_init = False,
325330
trigger_mode = TRIGGER_MODE_MANUAL,
326331
labels = ["flavors"],
@@ -383,16 +388,35 @@ def deploy_worker_templates(template, substitutions):
383388

384389
yaml = shlex.quote(yaml)
385390
flavor_name = os.path.basename(flavor)
386-
flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; make generate-flavors; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -; echo \"Cluster \'$CLUSTER_NAME\' created, don't forget to delete\""
391+
flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); "
392+
393+
apiserver_lb_private_ip = os.getenv("AZURE_INTERNAL_LB_PRIVATE_IP", "")
394+
if "windows-apiserver-ilb" in flavor and apiserver_lb_private_ip == "":
395+
flavor_cmd += "export AZURE_INTERNAL_LB_PRIVATE_IP=\"40.0.11.100\"; "
396+
elif "apiserver-ilb" in flavor and apiserver_lb_private_ip == "":
397+
flavor_cmd += "export AZURE_INTERNAL_LB_PRIVATE_IP=\"30.0.11.100\"; "
398+
399+
flavor_cmd += "export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -; "
400+
flavor_cmd += "echo \"Cluster ${CLUSTER_NAME} created, don't forget to delete\"; "
387401

388402
# wait for kubeconfig to be available
389-
flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; chmod 600 ./${CLUSTER_NAME}.kubeconfig; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done"
403+
flavor_cmd += "echo \"Waiting for kubeconfig to be available\"; "
404+
flavor_cmd += "until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; "
405+
flavor_cmd += kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; "
406+
flavor_cmd += "chmod 600 ./${CLUSTER_NAME}.kubeconfig; "
407+
flavor_cmd += "echo \"Kubeconfig for ${CLUSTER_NAME} created and saved in the local\"; "
408+
flavor_cmd += "echo \"Waiting for ${CLUSTER_NAME} API Server to be accessible\"; "
409+
flavor_cmd += "until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done; "
410+
flavor_cmd += "echo \"API Server of ${CLUSTER_NAME} is accessible\"; "
390411

391412
# copy the kubeadm configmap to the calico-system namespace.
392413
# This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace.
393414
if "windows" in flavor_name:
394-
flavor_cmd += "; until " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system > /dev/null 2>&1; do sleep 5; done"
395-
flavor_cmd += "; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig create namespace calico-system --dry-run=client -o yaml | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system -o yaml | sed 's/namespace: kube-system/namespace: calico-system/' | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -"
415+
flavor_cmd += "until " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system > /dev/null 2>&1; do sleep 5; done; "
416+
flavor_cmd += kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig create namespace calico-system --dry-run=client -o yaml | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system -o yaml | sed 's/namespace: kube-system/namespace: calico-system/' | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; "
417+
418+
if "aks" in settings.get("kustomize_substitutions", {}).get("MGMT_CLUSTER_NAME", ""):
419+
flavor_cmd += peer_vnets()
396420

397421
flavor_cmd += get_addons(flavor_name)
398422

@@ -410,14 +434,15 @@ def get_addons(flavor_name):
410434
if "aks" in flavor_name:
411435
return ""
412436

413-
addon_cmd = "; export CIDRS=$(" + kubectl_cmd + " get cluster ${CLUSTER_NAME} -o jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[*]}')"
414-
addon_cmd += "; export CIDR_LIST=$(bash -c 'echo $CIDRS' | tr ' ' ',')"
415-
addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME} --set cloudControllerManager.clusterCIDR=${CIDR_LIST}"
437+
addon_cmd = "export CIDRS=$(" + kubectl_cmd + " get cluster ${CLUSTER_NAME} -o jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[*]}'); "
438+
addon_cmd += "export CIDR_LIST=$(bash -c 'echo $CIDRS' | tr ' ' ','); "
439+
addon_cmd += helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME} --set cloudControllerManager.clusterCIDR=${CIDR_LIST}"
416440
if "flatcar" in flavor_name: # append caCetDir location to the cloud-provider-azure helm install command for flatcar flavor
417441
addon_cmd += " --set-string cloudControllerManager.caCertDir=/usr/share/ca-certificates"
442+
addon_cmd += "; "
418443

419444
if "azure-cni-v1" in flavor_name:
420-
addon_cmd += "; " + kubectl_cmd + " apply -f ./templates/addons/azure-cni-v1.yaml --kubeconfig ./${CLUSTER_NAME}.kubeconfig"
445+
addon_cmd += kubectl_cmd + " apply -f ./templates/addons/azure-cni-v1.yaml --kubeconfig ./${CLUSTER_NAME}.kubeconfig; "
421446
else:
422447
# install calico
423448
if "ipv6" in flavor_name:
@@ -426,7 +451,7 @@ def get_addons(flavor_name):
426451
calico_values = "./templates/addons/calico-dual-stack/values.yaml"
427452
else:
428453
calico_values = "./templates/addons/calico/values.yaml"
429-
addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://docs.tigera.io/calico/charts --version ${CALICO_VERSION} calico tigera-operator -f " + calico_values + " --namespace tigera-operator --create-namespace"
454+
addon_cmd += helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://docs.tigera.io/calico/charts --version ${CALICO_VERSION} calico tigera-operator -f " + calico_values + " --namespace tigera-operator --create-namespace; "
430455

431456
return addon_cmd
432457

@@ -455,6 +480,73 @@ def waitforsystem():
455480
local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-kubeadm-control-plane-system")
456481
local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-system")
457482

483+
def peer_vnets():
484+
# TODO: check for az cli to be installed in local
485+
# wait for AKS VNet to be in the state created
486+
peering_cmd = '''
487+
echo \"--------Peering VNETs--------\";
488+
az network vnet wait --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --created --timeout 180;
489+
export MGMT_VNET_ID=$(az network vnet show --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --query id --output tsv);
490+
echo \" 1/8 ${AKS_MGMT_VNET_NAME} found \"; '''
491+
492+
# wait for workload VNet to be created
493+
peering_cmd += '''
494+
az network vnet wait --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --created --timeout 180;
495+
export WORKLOAD_VNET_ID=$(az network vnet show --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --query id --output tsv);
496+
echo \" 2/8 ${CLUSTER_NAME}-vnet found \"; '''
497+
498+
# peer mgmt vnet
499+
peering_cmd += '''
500+
az network vnet peering create --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --remote-vnet \"${WORKLOAD_VNET_ID}\" --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none;
501+
az network vnet peering wait --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --created --timeout 300 --only-show-errors --output none;
502+
echo \" 3/8 mgmt-to-${CLUSTER_NAME} peering created in ${AKS_MGMT_VNET_NAME}\"; '''
503+
504+
# peer workload vnet
505+
peering_cmd += '''
506+
az network vnet peering create --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --remote-vnet \"${MGMT_VNET_ID}\" --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none;
507+
az network vnet peering wait --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --created --timeout 300 --only-show-errors --output none;
508+
echo \" 4/8 ${CLUSTER_NAME}-to-mgmt peering created in ${CLUSTER_NAME}-vnet\"; '''
509+
510+
# create private DNS zone
511+
peering_cmd += '''
512+
az network private-dns zone create --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --only-show-errors --output none;
513+
az network private-dns zone wait --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --created --timeout 300 --only-show-errors --output none;
514+
echo \" 5/8 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com private DNS zone created in ${CLUSTER_NAME}\"; '''
515+
516+
# link private DNS Zone to workload vnet
517+
peering_cmd += '''
518+
az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --virtual-network \"${WORKLOAD_VNET_ID}\" --registration-enabled false --only-show-errors --output none;
519+
az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --created --timeout 300 --only-show-errors --output none;
520+
echo \" 6/8 workload cluster vnet ${CLUSTER_NAME}-vnet linked with private DNS zone\"; '''
521+
522+
# link private DNS Zone to mgmt vnet
523+
peering_cmd += '''
524+
az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --virtual-network \"${MGMT_VNET_ID}\" --registration-enabled false --only-show-errors --output none;
525+
az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --created --timeout 300 --only-show-errors --output none;
526+
echo \" 7/8 management cluster vnet ${AKS_MGMT_VNET_NAME} linked with private DNS zone\"; '''
527+
528+
# create private DNS zone record
529+
peering_cmd += '''
530+
az network private-dns record-set a add-record --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --record-set-name \"@\" --ipv4-address ${AZURE_INTERNAL_LB_PRIVATE_IP} --only-show-errors --output none;
531+
echo \" 8/8 \"@\" private DNS zone record created to point ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com to ${AZURE_INTERNAL_LB_PRIVATE_IP}\"; '''
532+
533+
return peering_cmd
534+
535+
def clear_aks_vnet_peerings():
536+
#
537+
delete_peering_cmd = '''
538+
echo \"--------Clearing AKS MGMT VNETs Peerings--------\";
539+
az network vnet wait --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --created --timeout 180;
540+
echo \" VNet ${AKS_MGMT_VNET_NAME} found \"; '''
541+
542+
# List all peering names and store them in an array
543+
delete_peering_cmd += '''
544+
PEERING_NAMES=$(az network vnet peering list --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --query \"[].name\" --output tsv);
545+
for PEERING_NAME in ${PEERING_NAMES[@]}; do echo \"Deleting peering: ${PEERING_NAME}\"; az network vnet peering delete --name ${PEERING_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME}; done;
546+
echo \"All VNETs Peerings deleted in ${AKS_MGMT_VNET_NAME}\"; '''
547+
548+
return delete_peering_cmd
549+
458550
##############################
459551
# Actual work happens here
460552
##############################

0 commit comments

Comments
 (0)