diff --git a/config/crd/bases/operator.kcp.io_frontproxies.yaml b/config/crd/bases/operator.kcp.io_frontproxies.yaml index c5801de..0644363 100644 --- a/config/crd/bases/operator.kcp.io_frontproxies.yaml +++ b/config/crd/bases/operator.kcp.io_frontproxies.yaml @@ -18,6 +18,9 @@ spec: - jsonPath: .spec.rootShard.ref.name name: RootShard type: string + - jsonPath: .spec.externalHostname + name: ExternalHostname + type: string - jsonPath: .status.phase name: Phase type: string diff --git a/config/crd/bases/operator.kcp.io_rootshards.yaml b/config/crd/bases/operator.kcp.io_rootshards.yaml index 7b152c4..93c5914 100644 --- a/config/crd/bases/operator.kcp.io_rootshards.yaml +++ b/config/crd/bases/operator.kcp.io_rootshards.yaml @@ -66,11 +66,16 @@ spec: type: object type: object certificates: + description: |- + Certificates configures how the operator should create the kcp root CA, from which it will + then create all other sub CAs and leaf certificates. properties: caSecretRef: description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. + CASecretRef can be used as an alternative to the IssuerRef: This field allows to configure + a pre-existing CA certificate that should be used as sign kcp certificates. + This Secret must contain both the certificate and the private key so that new sub certificates + can be signed and created from this CA. This field is mutually exclusive with issuerRef. properties: name: default: "" @@ -84,8 +89,9 @@ spec: type: object x-kubernetes-map-type: atomic issuerRef: - description: ObjectReference is a reference to an object with - a given name, kind and group. + description: |- + IssuerRef points to a pre-existing cert-manager Issuer or ClusterIssuer that shall be used + to acquire new certificates. This field is mutually exclusive with caSecretRef. properties: group: description: Group of the object being referred to. diff --git a/config/crd/bases/operator.kcp.io_shards.yaml b/config/crd/bases/operator.kcp.io_shards.yaml index 9dbfb68..8df60a2 100644 --- a/config/crd/bases/operator.kcp.io_shards.yaml +++ b/config/crd/bases/operator.kcp.io_shards.yaml @@ -14,7 +14,17 @@ spec: singular: shard scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.rootShard.ref.name + name: RootShard + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 schema: openAPIV3Schema: description: Shard is the Schema for the shards API diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index db46380..b65a62b 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -33,7 +33,7 @@ _tools/reconciler-gen --config hack/reconciling.yaml > internal/reconciling/zz_g # generate CRDs go run sigs.k8s.io/controller-tools/cmd/controller-gen \ - rbac:roleName=manager-role crd webhook \ + rbac:roleName=manager-role crd webhook object \ paths="./..." \ output:crd:artifacts:config=config/crd/bases diff --git a/internal/controller/frontproxy_controller.go b/internal/controller/frontproxy_controller.go index 40b3a0d..d366592 100644 --- a/internal/controller/frontproxy_controller.go +++ b/internal/controller/frontproxy_controller.go @@ -31,9 +31,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/kcp-dev/kcp-operator/internal/reconciling" "github.com/kcp-dev/kcp-operator/internal/resources" @@ -49,6 +52,25 @@ type FrontProxyReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *FrontProxyReconciler) SetupWithManager(mgr ctrl.Manager) error { + rootShardHandler := handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + rootShard := obj.(*operatorv1alpha1.RootShard) + + var fpList operatorv1alpha1.FrontProxyList + if err := mgr.GetClient().List(ctx, &fpList, &client.ListOptions{Namespace: rootShard.Namespace}); err != nil { + utilruntime.HandleError(err) + return nil + } + + var requests []reconcile.Request + for _, frontProxy := range fpList.Items { + if ref := frontProxy.Spec.RootShard.Reference; ref != nil && ref.Name == rootShard.Name { + requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&frontProxy)}) + } + } + + return requests + }) + return ctrl.NewControllerManagedBy(mgr). For(&operatorv1alpha1.FrontProxy{}). Owns(&appsv1.Deployment{}). @@ -56,6 +78,7 @@ func (r *FrontProxyReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.Secret{}). Owns(&corev1.Service{}). Owns(&certmanagerv1.Certificate{}). + Watches(&operatorv1alpha1.RootShard{}, rootShardHandler). Complete(r) } @@ -80,29 +103,29 @@ func (r *FrontProxyReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - defer func() { - if err := r.reconcileStatus(ctx, &frontProxy); err != nil { - recErr = kerrors.NewAggregate([]error{recErr, err}) - } - }() + conditions, recErr := r.reconcile(ctx, &frontProxy) + + if err := r.reconcileStatus(ctx, &frontProxy, conditions); err != nil { + recErr = kerrors.NewAggregate([]error{recErr, err}) + } - return ctrl.Result{}, r.reconcile(ctx, &frontProxy) + return ctrl.Result{}, recErr } -func (r *FrontProxyReconciler) reconcile(ctx context.Context, frontProxy *operatorv1alpha1.FrontProxy) error { - var errs []error +func (r *FrontProxyReconciler) reconcile(ctx context.Context, frontProxy *operatorv1alpha1.FrontProxy) ([]metav1.Condition, error) { + var ( + errs []error + conditions []metav1.Condition + ) - ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(frontProxy, operatorv1alpha1.SchemeGroupVersion.WithKind("FrontProxy"))) + cond, rootShard := fetchRootShard(ctx, r.Client, frontProxy.Namespace, frontProxy.Spec.RootShard.Reference) + conditions = append(conditions, cond) - ref := frontProxy.Spec.RootShard.Reference - if ref == nil { - return fmt.Errorf("no valid RootShard in FrontProxy spec defined") + if rootShard == nil { + return conditions, nil } - rootShard := &operatorv1alpha1.RootShard{} - if err := r.Client.Get(ctx, types.NamespacedName{Name: ref.Name, Namespace: frontProxy.Namespace}, rootShard); err != nil { - return fmt.Errorf("referenced RootShard '%s' could not be fetched", ref.Name) - } + ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(frontProxy, operatorv1alpha1.SchemeGroupVersion.WithKind("FrontProxy"))) configMapReconcilers := []k8creconciling.NamedConfigMapReconcilerFactory{ frontproxy.PathMappingConfigMapReconciler(frontProxy, rootShard), @@ -147,27 +170,36 @@ func (r *FrontProxyReconciler) reconcile(ctx context.Context, frontProxy *operat errs = append(errs, err) } - return kerrors.NewAggregate(errs) + return conditions, kerrors.NewAggregate(errs) } -func (r *FrontProxyReconciler) reconcileStatus(ctx context.Context, oldFrontProxy *operatorv1alpha1.FrontProxy) error { +func (r *FrontProxyReconciler) reconcileStatus(ctx context.Context, oldFrontProxy *operatorv1alpha1.FrontProxy, conditions []metav1.Condition) error { frontProxy := oldFrontProxy.DeepCopy() var errs []error - if frontProxy.Status.Phase == "" { - frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseProvisioning - } - - if frontProxy.DeletionTimestamp != nil { - frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseDeleting + depKey := types.NamespacedName{Namespace: frontProxy.Namespace, Name: resources.GetFrontProxyDeploymentName(frontProxy)} + cond, err := getDeploymentAvailableCondition(ctx, r.Client, depKey) + if err != nil { + errs = append(errs, err) + } else { + conditions = append(conditions, cond) } - if err := r.setAvailableCondition(ctx, frontProxy); err != nil { - errs = append(errs, err) + for _, condition := range conditions { + condition.ObservedGeneration = frontProxy.Generation + frontProxy.Status.Conditions = updateCondition(frontProxy.Status.Conditions, condition) } - if cond := apimeta.FindStatusCondition(frontProxy.Status.Conditions, string(operatorv1alpha1.RootShardConditionTypeAvailable)); cond.Status == metav1.ConditionTrue { + availableCond := apimeta.FindStatusCondition(frontProxy.Status.Conditions, string(operatorv1alpha1.ConditionTypeAvailable)) + switch { + case availableCond.Status == metav1.ConditionTrue: frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseRunning + + case frontProxy.DeletionTimestamp != nil: + frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseDeleting + + case frontProxy.Status.Phase == "": + frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseProvisioning } // only patch the status if there are actual changes. @@ -179,35 +211,3 @@ func (r *FrontProxyReconciler) reconcileStatus(ctx context.Context, oldFrontProx return kerrors.NewAggregate(errs) } - -func (r *FrontProxyReconciler) setAvailableCondition(ctx context.Context, frontProxy *operatorv1alpha1.FrontProxy) error { - var dep appsv1.Deployment - depKey := types.NamespacedName{Namespace: frontProxy.Namespace, Name: resources.GetFrontProxyDeploymentName(frontProxy)} - if err := r.Client.Get(ctx, depKey, &dep); client.IgnoreNotFound(err) != nil { - return err - } - - available := metav1.ConditionFalse - reason := operatorv1alpha1.FrontProxyConditionReasonDeploymentUnavailable - msg := deploymentStatusString(dep, depKey) - - if dep.Name != "" { - if deploymentReady(dep) { - available = metav1.ConditionTrue - reason = operatorv1alpha1.FrontProxyConditionReasonReplicasUp - } else { - available = metav1.ConditionFalse - reason = operatorv1alpha1.FrontProxyConditionReasonReplicasUnavailable - } - } - - frontProxy.Status.Conditions = updateCondition(frontProxy.Status.Conditions, metav1.Condition{ - Type: string(operatorv1alpha1.FrontProxyConditionTypeAvailable), - Status: available, - ObservedGeneration: frontProxy.Generation, - Reason: string(reason), - Message: msg, - }) - - return nil -} diff --git a/internal/controller/rootshard_controller.go b/internal/controller/rootshard_controller.go index 9f30fb9..aa79c2b 100644 --- a/internal/controller/rootshard_controller.go +++ b/internal/controller/rootshard_controller.go @@ -90,17 +90,21 @@ func (r *RootShardReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } - defer func() { - if err := r.reconcileStatus(ctx, &rootShard); err != nil { - recErr = kerrors.NewAggregate([]error{recErr, err}) - } - }() + conditions, recErr := r.reconcile(ctx, &rootShard) + + if err := r.reconcileStatus(ctx, &rootShard, conditions); err != nil { + recErr = kerrors.NewAggregate([]error{recErr, err}) + } - return ctrl.Result{}, r.reconcile(ctx, &rootShard) + return ctrl.Result{}, recErr } -func (r *RootShardReconciler) reconcile(ctx context.Context, rootShard *operatorv1alpha1.RootShard) error { - var errs []error +//nolint:unparam // Keep the controller working the same as all the others, even though currently it does always return nil conditions. +func (r *RootShardReconciler) reconcile(ctx context.Context, rootShard *operatorv1alpha1.RootShard) ([]metav1.Condition, error) { + var ( + errs []error + conditions []metav1.Condition + ) ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(rootShard, operatorv1alpha1.SchemeGroupVersion.WithKind("RootShard"))) @@ -160,28 +164,37 @@ func (r *RootShardReconciler) reconcile(ctx context.Context, rootShard *operator errs = append(errs, err) } - return kerrors.NewAggregate(errs) + return conditions, kerrors.NewAggregate(errs) } // reconcileStatus sets both phase and conditions on the reconciled RootShard object. -func (r *RootShardReconciler) reconcileStatus(ctx context.Context, oldRootShard *operatorv1alpha1.RootShard) error { +func (r *RootShardReconciler) reconcileStatus(ctx context.Context, oldRootShard *operatorv1alpha1.RootShard, conditions []metav1.Condition) error { rootShard := oldRootShard.DeepCopy() var errs []error - if rootShard.Status.Phase == "" { - rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseProvisioning - } - - if rootShard.DeletionTimestamp != nil { - rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseDeleting + depKey := types.NamespacedName{Namespace: rootShard.Namespace, Name: resources.GetRootShardDeploymentName(rootShard)} + cond, err := getDeploymentAvailableCondition(ctx, r.Client, depKey) + if err != nil { + errs = append(errs, err) + } else { + conditions = append(conditions, cond) } - if err := r.setAvailableCondition(ctx, rootShard); err != nil { - errs = append(errs, err) + for _, condition := range conditions { + condition.ObservedGeneration = rootShard.Generation + rootShard.Status.Conditions = updateCondition(rootShard.Status.Conditions, condition) } - if cond := apimeta.FindStatusCondition(rootShard.Status.Conditions, string(operatorv1alpha1.RootShardConditionTypeAvailable)); cond.Status == metav1.ConditionTrue { + availableCond := apimeta.FindStatusCondition(rootShard.Status.Conditions, string(operatorv1alpha1.ConditionTypeAvailable)) + switch { + case availableCond.Status == metav1.ConditionTrue: rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseRunning + + case rootShard.DeletionTimestamp != nil: + rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseDeleting + + case rootShard.Status.Phase == "": + rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseProvisioning } // only patch the status if there are actual changes. @@ -193,35 +206,3 @@ func (r *RootShardReconciler) reconcileStatus(ctx context.Context, oldRootShard return kerrors.NewAggregate(errs) } - -func (r *RootShardReconciler) setAvailableCondition(ctx context.Context, rootShard *operatorv1alpha1.RootShard) error { - var dep appsv1.Deployment - depKey := types.NamespacedName{Namespace: rootShard.Namespace, Name: resources.GetRootShardDeploymentName(rootShard)} - if err := r.Client.Get(ctx, depKey, &dep); client.IgnoreNotFound(err) != nil { - return err - } - - available := metav1.ConditionFalse - reason := operatorv1alpha1.RootShardConditionReasonDeploymentUnavailable - msg := deploymentStatusString(dep, depKey) - - if dep.Name != "" { - if deploymentReady(dep) { - available = metav1.ConditionTrue - reason = operatorv1alpha1.RootShardConditionReasonReplicasUp - } else { - available = metav1.ConditionFalse - reason = operatorv1alpha1.RootShardConditionReasonReplicasUnavailable - } - } - - rootShard.Status.Conditions = updateCondition(rootShard.Status.Conditions, metav1.Condition{ - Type: string(operatorv1alpha1.RootShardConditionTypeAvailable), - Status: available, - ObservedGeneration: rootShard.Generation, - Reason: string(reason), - Message: msg, - }) - - return nil -} diff --git a/internal/controller/shard_controller.go b/internal/controller/shard_controller.go index 9920ef3..3d9b872 100644 --- a/internal/controller/shard_controller.go +++ b/internal/controller/shard_controller.go @@ -18,7 +18,6 @@ package controller import ( "context" - "errors" "fmt" certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" @@ -32,9 +31,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/kcp-dev/kcp-operator/internal/reconciling" "github.com/kcp-dev/kcp-operator/internal/resources" @@ -50,12 +52,32 @@ type ShardReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *ShardReconciler) SetupWithManager(mgr ctrl.Manager) error { + rootShardHandler := handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + rootShard := obj.(*operatorv1alpha1.RootShard) + + var shards operatorv1alpha1.ShardList + if err := mgr.GetClient().List(ctx, &shards, &client.ListOptions{Namespace: rootShard.Namespace}); err != nil { + utilruntime.HandleError(err) + return nil + } + + var requests []reconcile.Request + for _, shard := range shards.Items { + if ref := shard.Spec.RootShard.Reference; ref != nil && ref.Name == rootShard.Name { + requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&shard)}) + } + } + + return requests + }) + return ctrl.NewControllerManagedBy(mgr). For(&operatorv1alpha1.Shard{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Secret{}). Owns(&corev1.Service{}). Owns(&certmanagerv1.Certificate{}). + Watches(&operatorv1alpha1.RootShard{}, rootShardHandler). Complete(r) } @@ -79,31 +101,27 @@ func (r *ShardReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return ctrl.Result{}, nil } - defer func() { - if err := r.reconcileStatus(ctx, &s); err != nil { - recErr = kerrors.NewAggregate([]error{recErr, err}) - } - }() - - var rootShard operatorv1alpha1.RootShard - if ref := s.Spec.RootShard.Reference; ref != nil { - rootShardRef := types.NamespacedName{ - Namespace: s.Namespace, - Name: ref.Name, - } + conditions, recErr := r.reconcile(ctx, &s) - if err := r.Client.Get(ctx, rootShardRef, &rootShard); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to get root shard: %w", err) - } - } else { - return ctrl.Result{}, errors.New("no RootShard reference specified in Shard spec") + if err := r.reconcileStatus(ctx, &s, conditions); err != nil { + recErr = kerrors.NewAggregate([]error{recErr, err}) } - return ctrl.Result{}, r.reconcile(ctx, &s, &rootShard) + return ctrl.Result{}, recErr } -func (r *ShardReconciler) reconcile(ctx context.Context, s *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) error { - var errs []error +func (r *ShardReconciler) reconcile(ctx context.Context, s *operatorv1alpha1.Shard) ([]metav1.Condition, error) { + var ( + errs []error + conditions []metav1.Condition + ) + + cond, rootShard := fetchRootShard(ctx, r.Client, s.Namespace, s.Spec.RootShard.Reference) + conditions = append(conditions, cond) + + if rootShard == nil { + return conditions, nil + } ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(s, operatorv1alpha1.SchemeGroupVersion.WithKind("Shard"))) @@ -136,28 +154,37 @@ func (r *ShardReconciler) reconcile(ctx context.Context, s *operatorv1alpha1.Sha errs = append(errs, err) } - return kerrors.NewAggregate(errs) + return conditions, kerrors.NewAggregate(errs) } // reconcileStatus sets both phase and conditions on the reconciled Shard object. -func (r *ShardReconciler) reconcileStatus(ctx context.Context, oldShard *operatorv1alpha1.Shard) error { +func (r *ShardReconciler) reconcileStatus(ctx context.Context, oldShard *operatorv1alpha1.Shard, conditions []metav1.Condition) error { newShard := oldShard.DeepCopy() var errs []error - if newShard.Status.Phase == "" { - newShard.Status.Phase = operatorv1alpha1.ShardPhaseProvisioning - } - - if newShard.DeletionTimestamp != nil { - newShard.Status.Phase = operatorv1alpha1.ShardPhaseDeleting + depKey := types.NamespacedName{Namespace: newShard.Namespace, Name: resources.GetShardDeploymentName(newShard)} + cond, err := getDeploymentAvailableCondition(ctx, r.Client, depKey) + if err != nil { + errs = append(errs, err) + } else { + conditions = append(conditions, cond) } - if err := r.setAvailableCondition(ctx, newShard); err != nil { - errs = append(errs, err) + for _, condition := range conditions { + condition.ObservedGeneration = newShard.Generation + newShard.Status.Conditions = updateCondition(newShard.Status.Conditions, condition) } - if cond := apimeta.FindStatusCondition(newShard.Status.Conditions, string(operatorv1alpha1.ShardConditionTypeAvailable)); cond.Status == metav1.ConditionTrue { + availableCond := apimeta.FindStatusCondition(newShard.Status.Conditions, string(operatorv1alpha1.ConditionTypeAvailable)) + switch { + case availableCond.Status == metav1.ConditionTrue: newShard.Status.Phase = operatorv1alpha1.ShardPhaseRunning + + case newShard.DeletionTimestamp != nil: + newShard.Status.Phase = operatorv1alpha1.ShardPhaseDeleting + + case newShard.Status.Phase == "": + newShard.Status.Phase = operatorv1alpha1.ShardPhaseProvisioning } // only patch the status if there are actual changes. @@ -169,35 +196,3 @@ func (r *ShardReconciler) reconcileStatus(ctx context.Context, oldShard *operato return kerrors.NewAggregate(errs) } - -func (r *ShardReconciler) setAvailableCondition(ctx context.Context, s *operatorv1alpha1.Shard) error { - var dep appsv1.Deployment - depKey := types.NamespacedName{Namespace: s.Namespace, Name: resources.GetShardDeploymentName(s)} - if err := r.Client.Get(ctx, depKey, &dep); client.IgnoreNotFound(err) != nil { - return err - } - - available := metav1.ConditionFalse - reason := operatorv1alpha1.ShardConditionReasonDeploymentUnavailable - msg := deploymentStatusString(dep, depKey) - - if dep.Name != "" { - if deploymentReady(dep) { - available = metav1.ConditionTrue - reason = operatorv1alpha1.ShardConditionReasonReplicasUp - } else { - available = metav1.ConditionFalse - reason = operatorv1alpha1.ShardConditionReasonReplicasUnavailable - } - } - - s.Status.Conditions = updateCondition(s.Status.Conditions, metav1.Condition{ - Type: string(operatorv1alpha1.ShardConditionTypeAvailable), - Status: available, - ObservedGeneration: s.Generation, - Reason: string(reason), - Message: msg, - }) - - return nil -} diff --git a/internal/controller/util.go b/internal/controller/util.go index 373ddb3..e0397df 100644 --- a/internal/controller/util.go +++ b/internal/controller/util.go @@ -17,33 +17,54 @@ limitations under the License. package controller import ( + "context" "fmt" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" ) func deploymentReady(dep appsv1.Deployment) bool { return dep.Status.UpdatedReplicas == dep.Status.ReadyReplicas && dep.Status.ReadyReplicas == ptr.Deref(dep.Spec.Replicas, 0) } -func deploymentStatusString(dep appsv1.Deployment, key types.NamespacedName) string { +func getDeploymentAvailableCondition(ctx context.Context, c client.Client, key types.NamespacedName) (metav1.Condition, error) { + var dep appsv1.Deployment + if err := c.Get(ctx, key, &dep); client.IgnoreNotFound(err) != nil { + return metav1.Condition{}, err + } + + available := metav1.ConditionFalse + reason := operatorv1alpha1.ConditionReasonDeploymentUnavailable msg := fmt.Sprintf("Deployment %s", key) if dep.Name != "" { if deploymentReady(dep) { - msg += " is fully up and running" + available = metav1.ConditionTrue + reason = operatorv1alpha1.ConditionReasonReplicasUp + msg += " is fully up and running." } else { - msg += " is not in desired replica state" + available = metav1.ConditionFalse + reason = operatorv1alpha1.ConditionReasonReplicasUnavailable + msg += " is not in desired replica state." } } else { - msg += " does not exist" + msg += " does not exist." } - return msg + return metav1.Condition{ + Type: string(operatorv1alpha1.ConditionTypeAvailable), + Status: available, + Reason: string(reason), + Message: msg, + }, nil } func updateCondition(conditions []metav1.Condition, newCondition metav1.Condition) []metav1.Condition { @@ -68,3 +89,31 @@ func updateCondition(conditions []metav1.Condition, newCondition metav1.Conditio return conditions } + +func fetchRootShard(ctx context.Context, c client.Client, namespace string, ref *corev1.LocalObjectReference) (metav1.Condition, *operatorv1alpha1.RootShard) { + if ref == nil { + return metav1.Condition{ + Type: string(operatorv1alpha1.ConditionTypeRootShard), + Status: metav1.ConditionFalse, + Reason: string(operatorv1alpha1.ConditionReasonRootShardRefInvalid), + Message: "No valid RootShard defined in spec.", + }, nil + } + + rootShard := &operatorv1alpha1.RootShard{} + if err := c.Get(ctx, types.NamespacedName{Name: ref.Name, Namespace: namespace}, rootShard); err != nil { + return metav1.Condition{ + Type: string(operatorv1alpha1.ConditionTypeRootShard), + Status: metav1.ConditionFalse, + Reason: string(operatorv1alpha1.ConditionReasonRootShardRefInvalid), + Message: fmt.Sprintf("Failed to retrieve RootShard: %v.", err), + }, nil + } + + return metav1.Condition{ + Type: string(operatorv1alpha1.ConditionTypeRootShard), + Status: metav1.ConditionTrue, + Reason: string(operatorv1alpha1.ConditionReasonRootShardRefValid), + Message: "RootShard reference is valid.", + }, rootShard +} diff --git a/internal/resources/rootshard/ca_certificates.go b/internal/resources/rootshard/ca_certificates.go index 0e05fd4..20b0a40 100644 --- a/internal/resources/rootshard/ca_certificates.go +++ b/internal/resources/rootshard/ca_certificates.go @@ -17,8 +17,6 @@ limitations under the License. package rootshard import ( - "fmt" - certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" certmanagermetav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" @@ -31,14 +29,14 @@ import ( func RootCACertificateReconciler(rootShard *operatorv1alpha1.RootShard) reconciling.NamedCertificateReconcilerFactory { name := resources.GetRootShardCAName(rootShard, operatorv1alpha1.RootCA) + if rootShard.Spec.Certificates.IssuerRef == nil { + panic("RootCACertificateReconciler must not be called if not issuerRef is specified.") + } + return func() (string, reconciling.CertificateReconciler) { return name, func(cert *certmanagerv1.Certificate) (*certmanagerv1.Certificate, error) { cert.SetLabels(resources.GetRootShardResourceLabels(rootShard)) - if rootShard.Spec.Certificates.IssuerRef == nil { - return nil, fmt.Errorf("no issuer ref configured in RootShard '%s/%s'", rootShard.Namespace, rootShard.Name) - } - cert.Spec = certmanagerv1.CertificateSpec{ IsCA: true, CommonName: name, diff --git a/sdk/apis/operator/v1alpha1/common.go b/sdk/apis/operator/v1alpha1/common.go index d14ced1..9a1675a 100644 --- a/sdk/apis/operator/v1alpha1/common.go +++ b/sdk/apis/operator/v1alpha1/common.go @@ -97,3 +97,26 @@ const ( FrontProxyClientCA CA = "front-proxy-client" RequestHeaderClientCA CA = "requestheader-client" ) + +type ConditionType string + +const ( + ConditionTypeAvailable ConditionType = "Available" + ConditionTypeRootShard ConditionType = "RootShard" +) + +type ConditionReason string + +const ( + // reasons for ConditionTypeAvailable + + ConditionReasonDeploymentUnavailable ConditionReason = "DeploymentUnavailable" + ConditionReasonReplicasUp ConditionReason = "ReplicasUp" + ConditionReasonReplicasUnavailable ConditionReason = "ReplicasUnavailable" + + // reasons for ConditionTypeRootShard + + ConditionReasonRootShardRefInvalid ConditionReason = "InvalidReference" + ConditionReasonRootShardRefNotFound ConditionReason = "RootShardNotFound" + ConditionReasonRootShardRefValid ConditionReason = "Valid" +) diff --git a/sdk/apis/operator/v1alpha1/frontproxy_types.go b/sdk/apis/operator/v1alpha1/frontproxy_types.go index 88b7324..3dad92d 100644 --- a/sdk/apis/operator/v1alpha1/frontproxy_types.go +++ b/sdk/apis/operator/v1alpha1/frontproxy_types.go @@ -66,24 +66,11 @@ const ( FrontProxyPhaseDeleting FrontProxyPhase = "Deleting" ) -type FrontProxyConditionType string - -const ( - FrontProxyConditionTypeAvailable FrontProxyConditionType = "Available" -) - -type FrontProxyConditionReason string - -const ( - FrontProxyConditionReasonDeploymentUnavailable FrontProxyConditionReason = "DeploymentUnavailable" - FrontProxyConditionReasonReplicasUp FrontProxyConditionReason = "ReplicasUp" - FrontProxyConditionReasonReplicasUnavailable FrontProxyConditionReason = "ReplicasUnavailable" -) - // +genclient // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:printcolumn:JSONPath=".spec.rootShard.ref.name",name="RootShard",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.externalHostname",name="ExternalHostname",type="string" // +kubebuilder:printcolumn:JSONPath=".status.phase",name="Phase",type="string" // +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type="date" diff --git a/sdk/apis/operator/v1alpha1/rootshard_types.go b/sdk/apis/operator/v1alpha1/rootshard_types.go index c12c534..7461288 100644 --- a/sdk/apis/operator/v1alpha1/rootshard_types.go +++ b/sdk/apis/operator/v1alpha1/rootshard_types.go @@ -30,6 +30,8 @@ type RootShardSpec struct { // Cache configures the cache server (with a Kubernetes-like API) used by a sharded kcp instance. Cache CacheConfig `json:"cache"` + // Certificates configures how the operator should create the kcp root CA, from which it will + // then create all other sub CAs and leaf certificates. Certificates Certificates `json:"certificates"` } @@ -40,8 +42,16 @@ type ExternalConfig struct { Port uint32 `json:"port"` } +// Certificates configures how certificates for kcp should be created. type Certificates struct { - IssuerRef *ObjectReference `json:"issuerRef,omitempty"` + // IssuerRef points to a pre-existing cert-manager Issuer or ClusterIssuer that shall be used + // to acquire new certificates. This field is mutually exclusive with caSecretRef. + IssuerRef *ObjectReference `json:"issuerRef,omitempty"` + + // CASecretRef can be used as an alternative to the IssuerRef: This field allows to configure + // a pre-existing CA certificate that should be used as sign kcp certificates. + // This Secret must contain both the certificate and the private key so that new sub certificates + // can be signed and created from this CA. This field is mutually exclusive with issuerRef. CASecretRef *corev1.LocalObjectReference `json:"caSecretRef,omitempty"` } @@ -97,20 +107,6 @@ const ( RootShardPhaseDeleting RootShardPhase = "Deleting" ) -type RootShardConditionType string - -const ( - RootShardConditionTypeAvailable RootShardConditionType = "Available" -) - -type RootShardConditionReason string - -const ( - RootShardConditionReasonDeploymentUnavailable RootShardConditionReason = "DeploymentUnavailable" - RootShardConditionReasonReplicasUp RootShardConditionReason = "ReplicasUp" - RootShardConditionReasonReplicasUnavailable RootShardConditionReason = "ReplicasUnavailable" -) - // +genclient // +kubebuilder:object:root=true // +kubebuilder:subresource:status diff --git a/sdk/apis/operator/v1alpha1/shard_types.go b/sdk/apis/operator/v1alpha1/shard_types.go index fdf17bc..5756548 100644 --- a/sdk/apis/operator/v1alpha1/shard_types.go +++ b/sdk/apis/operator/v1alpha1/shard_types.go @@ -56,23 +56,12 @@ const ( ShardPhaseDeleting ShardPhase = "Deleting" ) -type ShardConditionType string - -const ( - ShardConditionTypeAvailable ShardConditionType = "Available" -) - -type ShardConditionReason string - -const ( - ShardConditionReasonDeploymentUnavailable ShardConditionReason = "DeploymentUnavailable" - ShardConditionReasonReplicasUp ShardConditionReason = "ReplicasUp" - ShardConditionReasonReplicasUnavailable ShardConditionReason = "ReplicasUnavailable" -) - // +genclient // +kubebuilder:object:root=true // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.rootShard.ref.name",name="RootShard",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Phase",type="string" +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type="date" // Shard is the Schema for the shards API type Shard struct { diff --git a/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go index a1593e2..abd8184 100644 --- a/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go +++ b/sdk/apis/operator/v1alpha1/zz_generated.deepcopy.go @@ -1,21 +1,5 @@ //go:build !ignore_autogenerated -/* -Copyright 2024 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - // Code generated by controller-gen. DO NOT EDIT. package v1alpha1 @@ -289,7 +273,7 @@ func (in *FrontProxy) DeepCopyInto(out *FrontProxy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontProxy. @@ -366,6 +350,11 @@ func (in *FrontProxySpec) DeepCopyInto(out *FrontProxySpec) { *out = new(ImageSpec) (*in).DeepCopyInto(*out) } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontProxySpec. @@ -381,6 +370,13 @@ func (in *FrontProxySpec) DeepCopy() *FrontProxySpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FrontProxyStatus) DeepCopyInto(out *FrontProxyStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontProxyStatus. @@ -705,13 +701,28 @@ func (in *RootShardStatus) DeepCopy() *RootShardStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Shard) DeepCopyInto(out *Shard) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shard. @@ -784,6 +795,13 @@ func (in *ShardSpec) DeepCopy() *ShardSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ShardStatus) DeepCopyInto(out *ShardStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardStatus.