This repository has been archived by the owner on Nov 22, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathcluster.tf
93 lines (84 loc) · 3.33 KB
/
cluster.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
resource "random_pet" "cluster_name" {
length = 2
}
resource "rancher2_machine_config_v2" "nodes" {
for_each = var.node
generate_name = replace(each.value.name, "_", "-")
vsphere_config {
cfgparam = ["disk.enableUUID=TRUE"]
clone_from = var.vsphere_env.cloud_image_name
cloud_config = templatefile("${path.cwd}/files/user_data.tftmpl",
{
ssh_user = "rancher",
ssh_public_key = file("${path.cwd}/files/.ssh-public-key")
}) # End of templatefile
content_library = var.vsphere_env.library_name
cpu_count = each.value.vcpu
creation_type = "library"
datacenter = var.vsphere_env.datacenter
datastore = var.vsphere_env.datastore
disk_size = each.value.hdd_capacity
memory_size = each.value.vram
network = [each.value.vm_network]
vcenter = var.vsphere_env.server
}
} # End of rancher2_machine_config_v2
resource "rancher2_cluster_v2" "rke2" {
annotations = var.rancher_env.cluster_annotations
kubernetes_version = var.rancher_env.rke2_version
labels = var.rancher_env.cluster_labels
name = random_pet.cluster_name.id
rke_config {
additional_manifest = templatefile("${path.cwd}/files/additional_manifests.tftmpl",
{
interface_name = var.kubevip.interface_name
kube_vip_rbac = data.http.kube_vip_rbac.response_body
kube_vip_version = jsondecode(data.http.kube_vip_version.response_body)["tag_name"]
svc_lb_vip = var.kubevip.lb_vip
})
chart_values = <<-EOF
rke2-cilium:
bandwidthManager:
bbr: true
enabled: true
bpf:
hostLegacyRouting: false
masquerade: true
cluster:
name: ${random_pet.cluster_name.id}
installNoConntrackIptablesRules: true
k8sServiceHost: 127.0.0.1
k8sServicePort: 6443
kubeProxyReplacement: true
operator:
enabled: true
replicas: 1
EOF
machine_global_config = <<EOF
cni: "cilium"
disable: [ "rke2-ingress-nginx" ]
disable-kube-proxy: "true"
etcd-arg: [ "experimental-initial-corrupt-check=true" ] # Can be removed once etcd v3.6 enables corruption check by default (see: https://github.com/etcd-io/etcd/issues/13766)
kubelet-arg: [ "cgroup-driver=systemd" ]
write-kubeconfig-mode: "0644"
EOF
dynamic "machine_pools" {
for_each = var.node
content {
cloud_credential_secret_name = data.rancher2_cloud_credential.auth.id
control_plane_role = machine_pools.key == "ctl_plane" ? true : false
etcd_role = machine_pools.key == "ctl_plane" ? true : false
name = machine_pools.value.name
quantity = machine_pools.value.quantity
worker_role = machine_pools.key != "ctl_plane" ? true : false
machine_config {
kind = rancher2_machine_config_v2.nodes[machine_pools.key].kind
name = replace(rancher2_machine_config_v2.nodes[machine_pools.key].name, "_", "-")
}
} # End of dynamic for_each content
} # End of machine_pools
machine_selector_config {
config = null
} # End machine_selector_config
} # End of rke_config
} # End of rancher2_cluster_v2