From 3d5bbb105af6326004956298a7662030023d91aa Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Tue, 17 Oct 2023 11:16:10 -0700 Subject: [PATCH] Add support for etcd encryption in vSphere (#6831) * Add support for etcd encryption in vSphere * Add e2e test for vsphere etcd encryption --- .../buildspecs/vsphere-test-eks-a-cli.yml | 7 + pkg/providers/vsphere/config/template-cp.yaml | 16 + pkg/providers/vsphere/template.go | 9 + .../cluster_ubuntu_etcd_encryption.yaml | 132 ++++ ...ted_results_ubuntu_etcd_encryption_cp.yaml | 742 ++++++++++++++++++ pkg/providers/vsphere/vsphere_test.go | 100 +++ test/e2e/vsphere_test.go | 20 + test/framework/etcdencryption.go | 13 +- 8 files changed, 1038 insertions(+), 1 deletion(-) create mode 100644 pkg/providers/vsphere/testdata/cluster_ubuntu_etcd_encryption.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_results_ubuntu_etcd_encryption_cp.yaml diff --git a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml index 7a5be8903d16..1dff3a58475b 100644 --- a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml @@ -77,6 +77,13 @@ env: T_PRIVATE_REGISTRY_MIRROR_CA_CERT: "harbor-registry-data:authenticated_caCert" T_NTP_SERVERS: "ntp:servers" T_AWS_IAM_ROLE_ARN: "aws-iam-auth-role:ec2_role_arn" + T_IRSA_S3_BUCKET: "etcd-encryption:irsa_s3_bucket" + T_KMS_IAM_ROLE: "etcd-encryption:kms_iam_role_arn" + T_KMS_IMAGE: "etcd-encryption:kms_image" + T_KMS_KEY_ARN: "etcd-encryption:kms_key_arn" + T_KMS_KEY_REGION: "etcd-encryption:region" + T_KMS_SOCKET: "etcd-encryption:socket" + T_SSH_PRIVATE_KEY: "vsphere_ci_beta_connection:base64_encoded_ssh_private_key" phases: pre_build: commands: diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index 83c55133e53f..2bbabfcf305e 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -182,6 +182,16 @@ spec: name: awsiamcert readOnly: false {{- end}} +{{- if .encryptionProviderConfig }} + - hostPath: /etc/kubernetes/enc + mountPath: /etc/kubernetes/enc + name: encryption-config + readOnly: false + - hostPath: /var/run/kmsplugin/ + mountPath: /var/run/kmsplugin/ + name: kms-plugin + readOnly: false +{{- end }} controllerManager: extraArgs: cloud-provider: external @@ -213,6 +223,12 @@ spec: certificatesDir: /var/lib/kubeadm/pki {{- end }} files: +{{- if .encryptionProviderConfig }} + - content: | +{{ .encryptionProviderConfig | indent 8}} + owner: root:root + path: /etc/kubernetes/enc/encryption-config.yaml +{{- end }} - content: | apiVersion: v1 kind: Pod diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index 32acb8f47e99..fceff2194875 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -134,6 +134,7 @@ func buildTemplateMapCP( apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig). Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). Append(clusterapi.PodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig)). + Append(clusterapi.EtcdEncryptionExtraArgs(clusterSpec.Cluster.Spec.EtcdEncryption)). Append(sharedExtraArgs) controllerManagerExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.NodeCIDRMaskExtraArgs(&clusterSpec.Cluster.Spec.ClusterNetwork)) @@ -324,6 +325,14 @@ func buildTemplateMapCP( values["bottlerocketSettings"] = brSettings } + if clusterSpec.Cluster.Spec.EtcdEncryption != nil && len(*clusterSpec.Cluster.Spec.EtcdEncryption) != 0 { + conf, err := common.GenerateKMSEncryptionConfiguration(clusterSpec.Cluster.Spec.EtcdEncryption) + if err != nil { + return nil, err + } + values["encryptionProviderConfig"] = conf + } + return values, nil } diff --git a/pkg/providers/vsphere/testdata/cluster_ubuntu_etcd_encryption.yaml b/pkg/providers/vsphere/testdata/cluster_ubuntu_etcd_encryption.yaml new file mode 100644 index 000000000000..a7b84e7dc8c3 --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_ubuntu_etcd_encryption.yaml @@ -0,0 +1,132 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test-namespace +spec: + controlPlaneConfiguration: + count: 3 + endpoint: + host: 1.2.3.4 + machineGroupRef: + name: test-cp + kind: VSphereMachineConfig + kubernetesVersion: "1.21" + etcdEncryption: + - providers: + - kms: + name: config1 + socketListenAddress: unix:///var/run/kmsplugin/socket1-new.sock + - kms: + name: config2 + socketListenAddress: unix:///var/run/kmsplugin/socket1-old.sock + resources: + - secrets + - resource1.anywhere.eks.amazonsaws.com + - providers: + - kms: + name: config3 + socketListenAddress: unix:///var/run/kmsplugin/socket2-new.sock + - kms: + name: config4 + socketListenAddress: unix:///var/run/kmsplugin/socket2-old.sock + resources: + - configmaps + - resource2.anywhere.eks.amazonsaws.com + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + name: test-wn + kind: VSphereMachineConfig + name: md-0 + externalEtcdConfiguration: + count: 3 + machineGroupRef: + name: test-etcd + kind: VSphereMachineConfig + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-cp + namespace: test-namespace +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: ubuntu + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.21.2" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-wn + namespace: test-namespace +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: ubuntu + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.21.2" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-etcd + namespace: test-namespace +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: ubuntu + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.21.2" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test + namespace: test-namespace +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/testdata/expected_results_ubuntu_etcd_encryption_cp.yaml b/pkg/providers/vsphere/testdata/expected_results_ubuntu_etcd_encryption_cp.yaml new file mode 100644 index 000000000000..ff9706bb6acb --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_ubuntu_etcd_encryption_cp.yaml @@ -0,0 +1,742 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 + kind: EtcdadmCluster + name: test-etcd + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.21.2 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-control-plane-template-1234567890000 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/etc/kubernetes/pki/etcd/ca.crt" + certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" + keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + encryption-provider-config: /etc/kubernetes/enc/encryption-config.yaml + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + - hostPath: /etc/kubernetes/enc + mountPath: /etc/kubernetes/enc + name: encryption-config + readOnly: false + - hostPath: /var/run/kmsplugin/ + mountPath: /var/run/kmsplugin/ + name: kms-plugin + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - providers: + - kms: + apiVersion: v1 + cachesize: 1000 + endpoint: unix:///var/run/kmsplugin/socket1-new.sock + name: config1 + timeout: 3s + - kms: + apiVersion: v1 + cachesize: 1000 + endpoint: unix:///var/run/kmsplugin/socket1-old.sock + name: config2 + timeout: 3s + - identity: {} + resources: + - secrets + - resource1.anywhere.eks.amazonsaws.com + - providers: + - kms: + apiVersion: v1 + cachesize: 1000 + endpoint: unix:///var/run/kmsplugin/socket2-new.sock + name: config3 + timeout: 3s + - kms: + apiVersion: v1 + cachesize: 1000 + endpoint: unix:///var/run/kmsplugin/socket2-old.sock + name: config4 + timeout: 3s + - identity: {} + resources: + - configmaps + - resource2.anywhere.eks.amazonsaws.com + owner: root:root + path: /etc/kubernetes/enc/encryption-config.yaml + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 3 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: cloud-config + cloudInitConfig: + version: 3.4.16 + installDir: "/usr/bin" + etcdReleaseURL: https://distro.eks.amazonaws.com/kubernetes-1-21/releases/4/artifacts/etcd/v3.4.16/etcd-linux-amd64-v3.4.16.tar.gz + preEtcdadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-etcd-template-1234567890000 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-etcd-template-1234567890000 + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-2004-kube-v1.21.2 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "vsphere_username" + password: "vsphere_password" +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "vsphere_password" + vsphere_server.username: "vsphere_username" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.21.0-eks-d-1-21-eks-a-v0.0.0-dev-build.158 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/vsphere_test.go b/pkg/providers/vsphere/vsphere_test.go index 46d050d53f3e..d8d31acfdaf3 100644 --- a/pkg/providers/vsphere/vsphere_test.go +++ b/pkg/providers/vsphere/vsphere_test.go @@ -3950,3 +3950,103 @@ func TestProviderGenerateDeploymentFileForBottlerocketWithTrustedCertBundles(t * test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_cert_bundles_config_cp.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_cert_bundles_config_md.yaml") } + +func TestProviderGenerateCAPISpecForUpgradeEtcdEncryption(t *testing.T) { + tests := []struct { + testName string + clusterconfigFile string + wantCPFile string + wantMDFile string + }{ + { + testName: "etcd-encryption", + clusterconfigFile: "cluster_ubuntu_etcd_encryption.yaml", + wantCPFile: "testdata/expected_results_ubuntu_etcd_encryption_cp.yaml", + wantMDFile: "testdata/expected_results_main_121_md.yaml", + }, + } + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + setupContext(t) + ctx := context.Background() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{ + Name: "test", + } + bootstrapCluster := &types.Cluster{ + Name: "bootstrap-test", + } + clusterSpec := givenClusterSpec(t, tt.clusterconfigFile) + + oldCP := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + InfrastructureRef: v1.ObjectReference{ + Name: "test-control-plane-template-1234567890000", + }, + }, + }, + } + oldMD := &clusterv1.MachineDeployment{ + Spec: clusterv1.MachineDeploymentSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + InfrastructureRef: v1.ObjectReference{ + Name: "test-md-0-1234567890000", + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &v1.ObjectReference{ + Name: "test-md-0-template-1234567890000", + }, + }, + }, + }, + }, + } + etcdadmCluster := &etcdv1.EtcdadmCluster{ + Spec: etcdv1.EtcdadmClusterSpec{ + InfrastructureTemplate: v1.ObjectReference{ + Name: "test-etcd-template-1234567890000", + }, + }, + } + + ipValidator := mocks.NewMockIPValidator(mockCtrl) + ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil) + + datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename) + provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator) + if provider == nil { + t.Fatalf("provider object is nil") + } + + err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) + if err != nil { + t.Fatalf("failed to setup and validate: %v", err) + } + + controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name + workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name + machineDeploymentName := fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name) + etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name + + kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil) + kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(datacenterConfig, nil) + kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, controlPlaneMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName], nil) + kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, workerNodeMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName], nil) + kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, etcdMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[etcdMachineConfigName], nil) + kubectl.EXPECT().GetKubeadmControlPlane(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldCP, nil) + kubectl.EXPECT().GetMachineDeployment(ctx, machineDeploymentName, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldMD, nil).Times(2) + kubectl.EXPECT().GetEtcdadmCluster(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(etcdadmCluster, nil) + + cp, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec.DeepCopy()) + if err != nil { + t.Fatalf("failed to generate cluster api spec contents: %v", err) + } + + test.AssertContentToFile(t, string(cp), tt.wantCPFile) + test.AssertContentToFile(t, string(md), tt.wantMDFile) + }) + } +} diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index fc40000e17ca..50eb17c41624 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -3200,6 +3200,26 @@ func TestVSphereKubernetes128UbuntuAirgappedRegistryMirror(t *testing.T) { runAirgapConfigFlow(test, "195.18.0.1/16,196.18.0.1/16") } +func TestVSphereKubernetes128EtcdEncryption(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu128()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + ), + framework.WithPodIamConfig(), + ) + test.GenerateClusterConfig() + test.CreateCluster() + test.PostClusterCreateEtcdEncryptionSetup() + test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{framework.WithEtcdEncrytion()}) + test.StopIfFailed() + test.ValidateEtcdEncryption() + test.DeleteCluster() +} + func ubuntu128ProviderWithLabels(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( diff --git a/test/framework/etcdencryption.go b/test/framework/etcdencryption.go index eb893b7e2060..6cd3185c90e1 100644 --- a/test/framework/etcdencryption.go +++ b/test/framework/etcdencryption.go @@ -145,7 +145,7 @@ func (e *ClusterE2ETest) ValidateEtcdEncryption() { "get", fmt.Sprintf("/registry/secrets/default/%s", secretName), "| hexdump -C", } for _, etcdIP := range etcdIPs { - out, err := ssh.RunCommand(ctx, SSHKeyPath, "capc", etcdIP, cmd...) + out, err := ssh.RunCommand(ctx, SSHKeyPath, getSSHUsernameByProvider(e.Provider.Name()), etcdIP, cmd...) if err != nil { e.T.Fatalf("Error verifying the secret is encrypted in etcd: %v", err) } @@ -157,6 +157,17 @@ func (e *ClusterE2ETest) ValidateEtcdEncryption() { } } +func getSSHUsernameByProvider(provider string) string { + switch provider { + case "cloudstack": + return "capc" + case "nutanix": + return "eksa" + default: + return "ec2-user" + } +} + // PostClusterCreateEtcdEncryptionSetup performs operations on the cluster to prepare it for etcd encryption. // These operations include: // - Adding Cluster SA cert to the OIDC provider's keys.