forked from awslabs/kubernetes-iteration-toolkit
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathawscli-cp-with-vpc.yaml
118 lines (114 loc) · 5.25 KB
/
awscli-cp-with-vpc.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: awscli-eks-cluster-create-with-vpc-stack
namespace: scalability
spec:
description: |
Create an EKS cluster.
This Task can be used to create an EKS cluster for a given service role, region in an AWS account
params:
- name: cluster-name
description: The name of the EKS cluster you want to spin.
- name: kubernetes-version
default: "1.23"
description: The EKS version to install.
- name: region
default: "us-west-2"
description: The region where the cluster is in.
- name: endpoint
default: ""
description: "aws eks enpoint to create clusters against"
- name: service-role-name
description: servicerole name to be used for eks cluster to perform operations in customer account to setup cluster
- name: vpc-stack-name
description: The name of the VPC name you want to use for EKS cluster.
- name: aws-ebs-csi-driver-version
default: release-1.13
description: The release version for aws ebs csi driver.
workspaces:
- name: config
mountPath: /config/
stepTemplate:
env:
- name: KUBECONFIG
value: /config/kubeconfig
steps:
- name: create-eks
image: alpine/k8s:1.23.7
script: |
ENDPOINT_FLAG=""
if [ -n "$(params.endpoint)" ]; then
ENDPOINT_FLAG="--endpoint $(params.endpoint)"
fi
SERVICE_ROLE_NAME=$(params.service-role-name)
SERVICE_ROLE_ARN=$(aws iam get-role --role-name $SERVICE_ROLE_NAME --query 'Role.[Arn]' --output text)
CREATED_CLUSTER=$(aws eks $ENDPOINT_FLAG list-clusters --region $(params.region) --query 'clusters[?@==`'$(params.cluster-name)'`]' --output text )
echo "CREATED_CLUSTER=$CREATED_CLUSTER"
subnets=$(aws cloudformation --region $(params.region) describe-stacks --stack-name $(params.vpc-stack-name) --query='Stacks[].Outputs[?OutputKey==`SubnetIds`].OutputValue' --output text | sed -e 's/ /,/')
echo "subnets=$subnets"
sg=$(aws cloudformation --region $(params.region) describe-stacks --stack-name $(params.vpc-stack-name) --query='Stacks[].Outputs[?OutputKey==`SecurityGroups`].OutputValue' --output text)
echo "securitygroup=$sg"
if [ "$CREATED_CLUSTER" == "" ]; then
aws eks create-cluster --name $(params.cluster-name) --region $(params.region) --kubernetes-version $(params.kubernetes-version) --role-arn $SERVICE_ROLE_ARN --resources-vpc-config subnetIds=$subnets,securityGroupIds=$sg $ENDPOINT_FLAG
fi
aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name)
- name: write-kubeconfig
image: alpine/k8s:1.23.7
script: |
ENDPOINT_FLAG=""
if [ -n "$(params.endpoint)" ]; then
ENDPOINT_FLAG="--endpoint $(params.endpoint)"
fi
aws eks $ENDPOINT_FLAG update-kubeconfig --name $(params.cluster-name) --region $(params.region)
- name: install-addons-and-validate
image: alpine/k8s:1.23.7
script: |
# enable PD on the cluster
kubectl api-versions
kubectl api-resources
# Apiserver is not recongnizing deamonset for sometime inconsistently, need to see if livez/readyz are healthy
while true; do date && kubectl get --raw "/readyz" --v=10 && break ; sleep 5; done
#ToDo remove this, adding to help eks networking evaluate ec2 DescribeNetworkInterface feature performance
kubectl set image daemonset/aws-node -n kube-system aws-node=public.ecr.aws/e4l5e7p8/amazon/amazon-k8s-cni:v1.18.3-scalability-rc1
kubectl set env daemonset/aws-node -n kube-system ENABLE_PREFIX_DELEGATION=true
# install csi drivers.
kubectl apply -k "github.com/kubernetes-sigs/aws-ebs-csi-driver/deploy/kubernetes/overlays/stable/?ref=$(params.aws-ebs-csi-driver-version)"
# TODO: Calculate replicas based on the cluster size going forward.
# Patching the coredns not to get scheduled on the monitoring node.
kubectl patch deployment coredns --patch '{
"spec": {
"template": {
"spec": {
"affinity": {
"podAntiAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": [
{
"labelSelector": {
"matchExpressions": [
{
"key": "eks.amazonaws.com/nodegroup",
"operator": "In",
"values": ["monitoring-$(params.cluster-name)-nodes-1"]
}
]
},
"topologyKey": "kubernetes.io/hostname"
}
]
}
}
}
}
}
}' -n kube-system
kubectl scale --replicas 1000 deploy coredns -n kube-system
#ToDo - remove these comments after experimentation
# Install EKS Pod Identity Agent
# ENDPOINT_FLAG=""
# if [ -n "$(params.endpoint)" ]; then
# ENDPOINT_FLAG="--endpoint $(params.endpoint)"
# fi
# aws eks $ENDPOINT_FLAG create-addon --cluster-name $(params.cluster-name) --addon-name eks-pod-identity-agent --addon-version v1.0.0-eksbuild.1
# aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name)