Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Add RootShard condition #31

Merged
merged 7 commits into from
Feb 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions config/crd/bases/operator.kcp.io_frontproxies.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ spec:
- jsonPath: .spec.rootShard.ref.name
name: RootShard
type: string
- jsonPath: .spec.externalHostname
name: ExternalHostname
type: string
- jsonPath: .status.phase
name: Phase
type: string
Expand Down
14 changes: 10 additions & 4 deletions config/crd/bases/operator.kcp.io_rootshards.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,16 @@ spec:
type: object
type: object
certificates:
description: |-
Certificates configures how the operator should create the kcp root CA, from which it will
then create all other sub CAs and leaf certificates.
properties:
caSecretRef:
description: |-
LocalObjectReference contains enough information to let you locate the
referenced object inside the same namespace.
CASecretRef can be used as an alternative to the IssuerRef: This field allows to configure
a pre-existing CA certificate that should be used as sign kcp certificates.
This Secret must contain both the certificate and the private key so that new sub certificates
can be signed and created from this CA. This field is mutually exclusive with issuerRef.
properties:
name:
default: ""
Expand All @@ -84,8 +89,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
issuerRef:
description: ObjectReference is a reference to an object with
a given name, kind and group.
description: |-
IssuerRef points to a pre-existing cert-manager Issuer or ClusterIssuer that shall be used
to acquire new certificates. This field is mutually exclusive with caSecretRef.
properties:
group:
description: Group of the object being referred to.
Expand Down
12 changes: 11 additions & 1 deletion config/crd/bases/operator.kcp.io_shards.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,17 @@ spec:
singular: shard
scope: Namespaced
versions:
- name: v1alpha1
- additionalPrinterColumns:
- jsonPath: .spec.rootShard.ref.name
name: RootShard
type: string
- jsonPath: .status.phase
name: Phase
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: Shard is the Schema for the shards API
Expand Down
2 changes: 1 addition & 1 deletion hack/update-codegen.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ _tools/reconciler-gen --config hack/reconciling.yaml > internal/reconciling/zz_g

# generate CRDs
go run sigs.k8s.io/controller-tools/cmd/controller-gen \
rbac:roleName=manager-role crd webhook \
rbac:roleName=manager-role crd webhook object \
paths="./..." \
output:crd:artifacts:config=config/crd/bases

Expand Down
118 changes: 59 additions & 59 deletions internal/controller/frontproxy_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,12 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

"github.com/kcp-dev/kcp-operator/internal/reconciling"
"github.com/kcp-dev/kcp-operator/internal/resources"
Expand All @@ -49,13 +52,33 @@ type FrontProxyReconciler struct {

// SetupWithManager sets up the controller with the Manager.
func (r *FrontProxyReconciler) SetupWithManager(mgr ctrl.Manager) error {
rootShardHandler := handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
rootShard := obj.(*operatorv1alpha1.RootShard)

var fpList operatorv1alpha1.FrontProxyList
if err := mgr.GetClient().List(ctx, &fpList, &client.ListOptions{Namespace: rootShard.Namespace}); err != nil {
utilruntime.HandleError(err)
return nil
}

var requests []reconcile.Request
for _, frontProxy := range fpList.Items {
if ref := frontProxy.Spec.RootShard.Reference; ref != nil && ref.Name == rootShard.Name {
requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&frontProxy)})
}
}

return requests
})

return ctrl.NewControllerManagedBy(mgr).
For(&operatorv1alpha1.FrontProxy{}).
Owns(&appsv1.Deployment{}).
Owns(&corev1.ConfigMap{}).
Owns(&corev1.Secret{}).
Owns(&corev1.Service{}).
Owns(&certmanagerv1.Certificate{}).
Watches(&operatorv1alpha1.RootShard{}, rootShardHandler).
Complete(r)
}

Expand All @@ -80,29 +103,29 @@ func (r *FrontProxyReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, nil
}

defer func() {
if err := r.reconcileStatus(ctx, &frontProxy); err != nil {
recErr = kerrors.NewAggregate([]error{recErr, err})
}
}()
conditions, recErr := r.reconcile(ctx, &frontProxy)

if err := r.reconcileStatus(ctx, &frontProxy, conditions); err != nil {
recErr = kerrors.NewAggregate([]error{recErr, err})
}

return ctrl.Result{}, r.reconcile(ctx, &frontProxy)
return ctrl.Result{}, recErr
}

func (r *FrontProxyReconciler) reconcile(ctx context.Context, frontProxy *operatorv1alpha1.FrontProxy) error {
var errs []error
func (r *FrontProxyReconciler) reconcile(ctx context.Context, frontProxy *operatorv1alpha1.FrontProxy) ([]metav1.Condition, error) {
var (
errs []error
conditions []metav1.Condition
)

ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(frontProxy, operatorv1alpha1.SchemeGroupVersion.WithKind("FrontProxy")))
cond, rootShard := fetchRootShard(ctx, r.Client, frontProxy.Namespace, frontProxy.Spec.RootShard.Reference)
conditions = append(conditions, cond)

ref := frontProxy.Spec.RootShard.Reference
if ref == nil {
return fmt.Errorf("no valid RootShard in FrontProxy spec defined")
if rootShard == nil {
return conditions, nil
}

rootShard := &operatorv1alpha1.RootShard{}
if err := r.Client.Get(ctx, types.NamespacedName{Name: ref.Name, Namespace: frontProxy.Namespace}, rootShard); err != nil {
return fmt.Errorf("referenced RootShard '%s' could not be fetched", ref.Name)
}
ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(frontProxy, operatorv1alpha1.SchemeGroupVersion.WithKind("FrontProxy")))

configMapReconcilers := []k8creconciling.NamedConfigMapReconcilerFactory{
frontproxy.PathMappingConfigMapReconciler(frontProxy, rootShard),
Expand Down Expand Up @@ -147,27 +170,36 @@ func (r *FrontProxyReconciler) reconcile(ctx context.Context, frontProxy *operat
errs = append(errs, err)
}

return kerrors.NewAggregate(errs)
return conditions, kerrors.NewAggregate(errs)
}

func (r *FrontProxyReconciler) reconcileStatus(ctx context.Context, oldFrontProxy *operatorv1alpha1.FrontProxy) error {
func (r *FrontProxyReconciler) reconcileStatus(ctx context.Context, oldFrontProxy *operatorv1alpha1.FrontProxy, conditions []metav1.Condition) error {
frontProxy := oldFrontProxy.DeepCopy()
var errs []error

if frontProxy.Status.Phase == "" {
frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseProvisioning
}

if frontProxy.DeletionTimestamp != nil {
frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseDeleting
depKey := types.NamespacedName{Namespace: frontProxy.Namespace, Name: resources.GetFrontProxyDeploymentName(frontProxy)}
cond, err := getDeploymentAvailableCondition(ctx, r.Client, depKey)
if err != nil {
errs = append(errs, err)
} else {
conditions = append(conditions, cond)
}

if err := r.setAvailableCondition(ctx, frontProxy); err != nil {
errs = append(errs, err)
for _, condition := range conditions {
condition.ObservedGeneration = frontProxy.Generation
frontProxy.Status.Conditions = updateCondition(frontProxy.Status.Conditions, condition)
}

if cond := apimeta.FindStatusCondition(frontProxy.Status.Conditions, string(operatorv1alpha1.RootShardConditionTypeAvailable)); cond.Status == metav1.ConditionTrue {
availableCond := apimeta.FindStatusCondition(frontProxy.Status.Conditions, string(operatorv1alpha1.ConditionTypeAvailable))
switch {
case availableCond.Status == metav1.ConditionTrue:
frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseRunning

case frontProxy.DeletionTimestamp != nil:
frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseDeleting

case frontProxy.Status.Phase == "":
frontProxy.Status.Phase = operatorv1alpha1.FrontProxyPhaseProvisioning
}

// only patch the status if there are actual changes.
Expand All @@ -179,35 +211,3 @@ func (r *FrontProxyReconciler) reconcileStatus(ctx context.Context, oldFrontProx

return kerrors.NewAggregate(errs)
}

func (r *FrontProxyReconciler) setAvailableCondition(ctx context.Context, frontProxy *operatorv1alpha1.FrontProxy) error {
var dep appsv1.Deployment
depKey := types.NamespacedName{Namespace: frontProxy.Namespace, Name: resources.GetFrontProxyDeploymentName(frontProxy)}
if err := r.Client.Get(ctx, depKey, &dep); client.IgnoreNotFound(err) != nil {
return err
}

available := metav1.ConditionFalse
reason := operatorv1alpha1.FrontProxyConditionReasonDeploymentUnavailable
msg := deploymentStatusString(dep, depKey)

if dep.Name != "" {
if deploymentReady(dep) {
available = metav1.ConditionTrue
reason = operatorv1alpha1.FrontProxyConditionReasonReplicasUp
} else {
available = metav1.ConditionFalse
reason = operatorv1alpha1.FrontProxyConditionReasonReplicasUnavailable
}
}

frontProxy.Status.Conditions = updateCondition(frontProxy.Status.Conditions, metav1.Condition{
Type: string(operatorv1alpha1.FrontProxyConditionTypeAvailable),
Status: available,
ObservedGeneration: frontProxy.Generation,
Reason: string(reason),
Message: msg,
})

return nil
}
83 changes: 32 additions & 51 deletions internal/controller/rootshard_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,17 +90,21 @@ func (r *RootShardReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, nil
}

defer func() {
if err := r.reconcileStatus(ctx, &rootShard); err != nil {
recErr = kerrors.NewAggregate([]error{recErr, err})
}
}()
conditions, recErr := r.reconcile(ctx, &rootShard)

if err := r.reconcileStatus(ctx, &rootShard, conditions); err != nil {
recErr = kerrors.NewAggregate([]error{recErr, err})
}

return ctrl.Result{}, r.reconcile(ctx, &rootShard)
return ctrl.Result{}, recErr
}

func (r *RootShardReconciler) reconcile(ctx context.Context, rootShard *operatorv1alpha1.RootShard) error {
var errs []error
//nolint:unparam // Keep the controller working the same as all the others, even though currently it does always return nil conditions.
func (r *RootShardReconciler) reconcile(ctx context.Context, rootShard *operatorv1alpha1.RootShard) ([]metav1.Condition, error) {
var (
errs []error
conditions []metav1.Condition
)

ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(rootShard, operatorv1alpha1.SchemeGroupVersion.WithKind("RootShard")))

Expand Down Expand Up @@ -160,28 +164,37 @@ func (r *RootShardReconciler) reconcile(ctx context.Context, rootShard *operator
errs = append(errs, err)
}

return kerrors.NewAggregate(errs)
return conditions, kerrors.NewAggregate(errs)
}

// reconcileStatus sets both phase and conditions on the reconciled RootShard object.
func (r *RootShardReconciler) reconcileStatus(ctx context.Context, oldRootShard *operatorv1alpha1.RootShard) error {
func (r *RootShardReconciler) reconcileStatus(ctx context.Context, oldRootShard *operatorv1alpha1.RootShard, conditions []metav1.Condition) error {
rootShard := oldRootShard.DeepCopy()
var errs []error

if rootShard.Status.Phase == "" {
rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseProvisioning
}

if rootShard.DeletionTimestamp != nil {
rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseDeleting
depKey := types.NamespacedName{Namespace: rootShard.Namespace, Name: resources.GetRootShardDeploymentName(rootShard)}
cond, err := getDeploymentAvailableCondition(ctx, r.Client, depKey)
if err != nil {
errs = append(errs, err)
} else {
conditions = append(conditions, cond)
}

if err := r.setAvailableCondition(ctx, rootShard); err != nil {
errs = append(errs, err)
for _, condition := range conditions {
condition.ObservedGeneration = rootShard.Generation
rootShard.Status.Conditions = updateCondition(rootShard.Status.Conditions, condition)
}

if cond := apimeta.FindStatusCondition(rootShard.Status.Conditions, string(operatorv1alpha1.RootShardConditionTypeAvailable)); cond.Status == metav1.ConditionTrue {
availableCond := apimeta.FindStatusCondition(rootShard.Status.Conditions, string(operatorv1alpha1.ConditionTypeAvailable))
switch {
case availableCond.Status == metav1.ConditionTrue:
rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseRunning

case rootShard.DeletionTimestamp != nil:
rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseDeleting

case rootShard.Status.Phase == "":
rootShard.Status.Phase = operatorv1alpha1.RootShardPhaseProvisioning
}

// only patch the status if there are actual changes.
Expand All @@ -193,35 +206,3 @@ func (r *RootShardReconciler) reconcileStatus(ctx context.Context, oldRootShard

return kerrors.NewAggregate(errs)
}

func (r *RootShardReconciler) setAvailableCondition(ctx context.Context, rootShard *operatorv1alpha1.RootShard) error {
var dep appsv1.Deployment
depKey := types.NamespacedName{Namespace: rootShard.Namespace, Name: resources.GetRootShardDeploymentName(rootShard)}
if err := r.Client.Get(ctx, depKey, &dep); client.IgnoreNotFound(err) != nil {
return err
}

available := metav1.ConditionFalse
reason := operatorv1alpha1.RootShardConditionReasonDeploymentUnavailable
msg := deploymentStatusString(dep, depKey)

if dep.Name != "" {
if deploymentReady(dep) {
available = metav1.ConditionTrue
reason = operatorv1alpha1.RootShardConditionReasonReplicasUp
} else {
available = metav1.ConditionFalse
reason = operatorv1alpha1.RootShardConditionReasonReplicasUnavailable
}
}

rootShard.Status.Conditions = updateCondition(rootShard.Status.Conditions, metav1.Condition{
Type: string(operatorv1alpha1.RootShardConditionTypeAvailable),
Status: available,
ObservedGeneration: rootShard.Generation,
Reason: string(reason),
Message: msg,
})

return nil
}
Loading