Skip to content

Commit

Permalink
Fix missing updates of server certificates (#219)
Browse files Browse the repository at this point in the history
* merge

* wip test

* added test for restart

* tests reorg

* simplified tests
  • Loading branch information
enrichman authored Feb 4, 2025
1 parent 997216f commit 2a7541c
Show file tree
Hide file tree
Showing 5 changed files with 239 additions and 149 deletions.
41 changes: 33 additions & 8 deletions pkg/controller/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -76,6 +77,7 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
WithOptions(ctrlruntimecontroller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Owns(&apps.StatefulSet{}).
Complete(&reconciler)
}

Expand Down Expand Up @@ -212,24 +214,47 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
}
}

bootstrapSecret, err := bootstrap.Generate(ctx, cluster, serviceIP, token)
if err != nil {
if err := c.ensureBootstrapSecret(ctx, cluster, serviceIP, token); err != nil {
return err
}

if err := c.Client.Create(ctx, bootstrapSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}

if err := c.bindNodeProxyClusterRole(ctx, cluster); err != nil {
return err
}

return nil
}

// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring bootstrap secret")

bootstrapData, err := bootstrap.GenerateBootstrapData(ctx, cluster, serviceIP, token)
if err != nil {
return err
}

bootstrapSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
Namespace: cluster.Namespace,
},
}

_, err = controllerutil.CreateOrUpdate(ctx, c.Client, bootstrapSecret, func() error {
if err := controllerutil.SetControllerReference(cluster, bootstrapSecret, c.Scheme); err != nil {
return err
}

bootstrapSecret.Data = map[string][]byte{
"bootstrap": bootstrapData,
}
return nil
})
return err
}

func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
// create init node config
initServerConfig, err := server.Config(true, serviceIP)
Expand Down
29 changes: 2 additions & 27 deletions pkg/controller/cluster/server/bootstrap/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
Expand All @@ -35,7 +34,7 @@ type content struct {
// Generate generates the bootstrap for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- save the bootstrap data as a secret
func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) (*v1.Secret, error) {
func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) ([]byte, error) {
bootstrap, err := requestBootstrap(token, ip)
if err != nil {
return nil, fmt.Errorf("failed to request bootstrap secret: %w", err)
Expand All @@ -45,31 +44,7 @@ func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string)
return nil, fmt.Errorf("failed to decode bootstrap secret: %w", err)
}

bootstrapData, err := json.Marshal(bootstrap)
if err != nil {
return nil, err
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
Namespace: cluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: cluster.APIVersion,
Kind: cluster.Kind,
Name: cluster.Name,
UID: cluster.UID,
},
},
},
Data: map[string][]byte{
"bootstrap": bootstrapData,
},
}, nil
return json.Marshal(bootstrap)

}

Expand Down
188 changes: 110 additions & 78 deletions tests/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,46 @@ package k3k_test

import (
"context"
"crypto/x509"
"errors"
"fmt"
"strings"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)

var _ = When("k3k is installed", func() {
It("is in Running status", func() {

// check that the controller is running
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)

Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))

var isRunning bool
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
isRunning = true
break
}
}

return isRunning
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})

var _ = When("a cluster is installed", func() {

var namespace string
Expand All @@ -30,28 +53,29 @@ var _ = When("a cluster is installed", func() {
namespace = createdNS.Name
})

It("will be created in shared mode", func() {
It("can create a nginx pod", func() {
ctx := context.Background()
containerIP, err := k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))

fmt.Fprintln(GinkgoWriter, "K3s containerIP: "+containerIP)

cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{containerIP},
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Enabled: true,
},
},
},
}
virtualK8sClient := CreateCluster(containerIP, cluster)

By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)

By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)

nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
Expand All @@ -65,7 +89,7 @@ var _ = When("a cluster is installed", func() {
}},
},
}
nginxPod, err = virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))

// check that the nginx Pod is up and running in the host cluster
Expand All @@ -78,12 +102,12 @@ var _ = When("a cluster is installed", func() {
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]

fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
)

if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
)

return pod.Status.Phase == corev1.PodRunning
}
}
Expand All @@ -94,73 +118,81 @@ var _ = When("a cluster is installed", func() {
WithPolling(time.Second * 5).
Should(BeTrue())
})
})

func CreateCluster(hostIP string, cluster v1alpha1.Cluster) *kubernetes.Clientset {
GinkgoHelper()
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()

By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Enabled: true,
},
},
},
}

By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)

ctx := context.Background()
err := k8sClient.Create(ctx, &cluster)
Expect(err).To(Not(HaveOccurred()))
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)

By("Waiting for server and kubelet to be ready")
_, err := virtualK8sClient.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))

// check that the server Pod and the Kubelet are in Ready state
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, v1.ListOptions{})
labelSelector := "cluster=" + cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))

serverRunning := false
kubeletRunning := false
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]

for _, pod := range podList.Items {
imageName := pod.Spec.Containers[0].Image
imageName = strings.Split(imageName, ":")[0] // remove tag
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
// GracePeriodSeconds: ptr.To[int64](0)
err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))

switch imageName {
case "rancher/k3s":
serverRunning = pod.Status.Phase == corev1.PodRunning
case "rancher/k3k-kubelet":
kubeletRunning = pod.Status.Phase == corev1.PodRunning
}
By("Deleting server pod")

if serverRunning && kubeletRunning {
return true
}
}
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())

By("Server pod up and running again")

By("Using old k8s client configuration should fail")

Eventually(func() bool {
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeTrue())

return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())

By("Waiting for server to be up and running")

var config *clientcmdapi.Config
Eventually(func() error {
vKubeconfig := kubeconfig.New()
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, "k3k-mycluster-kubelet"})
config, err = vKubeconfig.Extract(ctx, k8sClient, &cluster, hostIP)
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())

configData, err := clientcmd.Write(*config)
Expect(err).To(Not(HaveOccurred()))

restcfg, err := clientcmd.RESTConfigFromKubeConfig(configData)
Expect(err).To(Not(HaveOccurred()))
virtualK8sClient, err := kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))

serverVersion, err := virtualK8sClient.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
fmt.Fprintf(GinkgoWriter, "serverVersion: %+v\n", serverVersion)

return virtualK8sClient
}
By("Recover new config should succeed")

Eventually(func() error {
virtualK8sClient = NewVirtualK8sClient(cluster)
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
})
})
Loading

0 comments on commit 2a7541c

Please sign in to comment.