From 0a8bce382b6d2010952025beb7878ac36537d538 Mon Sep 17 00:00:00 2001 From: josvaz Date: Wed, 19 Feb 2025 12:34:53 +0100 Subject: [PATCH] CLOUDP-299197: AtlasNetworkContainer CRD Controller (#2100) * Add AtlasNetworkContainer CRD Signed-off-by: jose.vazquez * update rebased helm chart * Add Network Container controller * Add deletion protection on network containers --------- Signed-off-by: jose.vazquez --- .github/workflows/test-e2e.yml | 1 + .mockery.yaml | 1 + PROJECT | 9 + api/condition.go | 5 + api/v1/atlasnetworkcontainer_types.go | 114 +++ api/v1/atlasnetworkcontainer_types_test.go | 186 ++++ api/v1/project_reference_cel_test.go | 14 +- api/v1/status/atlasnetworkcontainer.go | 20 + api/v1/status/atlasnetworkpeering.go | 25 + api/v1/status/zz_generated.deepcopy.go | 61 ++ api/v1/zz_generated.deepcopy.go | 91 ++ ...as.mongodb.com_atlasnetworkcontainers.yaml | 197 ++++ config/crd/kustomization.yaml | 1 + .../atlasnetworkcontainer_editor_role.yaml | 27 + .../atlasnetworkcontainer_viewer_role.yaml | 23 + config/rbac/clusterwide/role.yaml | 3 + config/rbac/kustomization.yaml | 3 +- .../atlas_v1_atlasnetworkcontainer.yaml | 15 + config/samples/kustomization.yaml | 1 + helm-charts | 2 +- .../atlasipaccesslist/state_test.go | 4 +- .../atlasnetworkcontainer_controller.go | 126 +++ .../atlasnetworkcontainer_controller_test.go | 110 +++ .../controller/atlasnetworkcontainer/state.go | 101 ++ .../atlasnetworkcontainer/state_test.go | 871 ++++++++++++++++++ .../atlasnetworkcontainer/transitions.go | 112 +++ .../atlasprivateendpoint_controller_test.go | 2 +- internal/controller/reconciler/reconciler.go | 45 + .../controller/reconciler/reconciler_test.go | 5 +- internal/controller/registry.go | 2 + internal/controller/workflow/reason.go | 14 + .../atlasnetworkcontainercredentials.go | 24 + .../indexer/atlasnetworkcontainerprojects.go | 30 + .../atlasnetworkcontainerprojects_test.go | 46 + internal/indexer/atlasreferredprojects.go | 30 + internal/indexer/indexer.go | 2 + internal/indexer/localcredentials_test.go | 54 ++ .../translation/network_container_service.go | 327 +++++++ .../networkcontainer/conversion.go | 75 ++ .../networkcontainer/conversion_test.go | 37 + .../networkcontainer/networkcontainer.go | 117 +++ .../networkcontainer/networkcontainer_test.go | 621 +++++++++++++ test/e2e/network_container_controller_test.go | 251 +++++ test/helper/cel/cel.go | 11 + test/helper/e2e/actions/steps.go | 19 + 45 files changed, 3822 insertions(+), 13 deletions(-) create mode 100644 api/v1/atlasnetworkcontainer_types.go create mode 100644 api/v1/atlasnetworkcontainer_types_test.go create mode 100644 api/v1/status/atlasnetworkcontainer.go create mode 100644 api/v1/status/atlasnetworkpeering.go create mode 100644 config/crd/bases/atlas.mongodb.com_atlasnetworkcontainers.yaml create mode 100644 config/rbac/atlasnetworkcontainer_editor_role.yaml create mode 100644 config/rbac/atlasnetworkcontainer_viewer_role.yaml create mode 100644 config/samples/atlas_v1_atlasnetworkcontainer.yaml create mode 100644 internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go create mode 100644 internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller_test.go create mode 100644 internal/controller/atlasnetworkcontainer/state.go create mode 100644 internal/controller/atlasnetworkcontainer/state_test.go create mode 100644 internal/controller/atlasnetworkcontainer/transitions.go create mode 100644 internal/indexer/atlasnetworkcontainercredentials.go create mode 100644 internal/indexer/atlasnetworkcontainerprojects.go create mode 100644 internal/indexer/atlasnetworkcontainerprojects_test.go create mode 100644 internal/mocks/translation/network_container_service.go create mode 100644 internal/translation/networkcontainer/conversion.go create mode 100644 internal/translation/networkcontainer/conversion_test.go create mode 100644 internal/translation/networkcontainer/networkcontainer.go create mode 100644 internal/translation/networkcontainer/networkcontainer_test.go create mode 100644 test/e2e/network_container_controller_test.go diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 5c8a7c7ffb..80148f4424 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -185,6 +185,7 @@ jobs: "flex", "ip-access-list", "dry-run", + "networkcontainer-controller", ] steps: - name: Get repo files from cache diff --git a/.mockery.yaml b/.mockery.yaml index af49f0f615..9e1d37f88f 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -16,3 +16,4 @@ packages: github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/privateendpoint: github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/maintenancewindow: github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/encryptionatrest: + github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer: diff --git a/PROJECT b/PROJECT index 994554d2ac..0e847c6f1a 100644 --- a/PROJECT +++ b/PROJECT @@ -126,4 +126,13 @@ resources: kind: AtlasIPAccessList path: github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1 version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: mongodb.com + group: atlas + kind: AtlasNetworkContainer + path: github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1 + version: v1 version: "3" diff --git a/api/condition.go b/api/condition.go index 4ba3089d8c..f3a33219f5 100644 --- a/api/condition.go +++ b/api/condition.go @@ -106,6 +106,11 @@ const ( IPAccessListReady ConditionType = "IPAccessListReady" ) +// Atlas Network Container condition types +const ( + NetworkContainerReady ConditionType = "NetworkContainerReady" +) + // Generic condition type const ( ResourceVersionStatus ConditionType = "ResourceVersionIsValid" diff --git a/api/v1/atlasnetworkcontainer_types.go b/api/v1/atlasnetworkcontainer_types.go new file mode 100644 index 0000000000..f38497ce80 --- /dev/null +++ b/api/v1/atlasnetworkcontainer_types.go @@ -0,0 +1,114 @@ +/* +Copyright 2025 MongoDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" +) + +func init() { + SchemeBuilder.Register(&AtlasNetworkContainer{}, &AtlasNetworkContainerList{}) +} + +// AtlasNetworkContainer is the Schema for the AtlasNetworkContainer API +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status` +// +kubebuilder:printcolumn:name="Provider",type=string,JSONPath=`.spec.provider` +// +kubebuilder:printcolumn:name="Id",type=string,JSONPath=`.status.id` +// +kubebuilder:subresource:status +// +groupName:=atlas.mongodb.com +// +kubebuilder:resource:categories=atlas,shortName=anc +type AtlasNetworkContainer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AtlasNetworkContainerSpec `json:"spec,omitempty"` + Status status.AtlasNetworkContainerStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AtlasNetworkContainerList contains a list of AtlasNetworkContainer +type AtlasNetworkContainerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AtlasNetworkContainer `json:"items"` +} + +// +kubebuilder:validation:XValidation:rule="(has(self.externalProjectRef) && !has(self.projectRef)) || (!has(self.externalProjectRef) && has(self.projectRef))",message="must define only one project reference through externalProjectRef or projectRef" +// +kubebuilder:validation:XValidation:rule="(has(self.externalProjectRef) && has(self.connectionSecret)) || !has(self.externalProjectRef)",message="must define a local connection secret when referencing an external project" +// +kubebuilder:validation:XValidation:rule="(self.provider == 'GCP' && !has(self.region)) || (self.provider != 'GCP')",message="must not set region for GCP containers" +// +kubebuilder:validation:XValidation:rule="((self.provider == 'AWS' || self.provider == 'AZURE') && has(self.region)) || (self.provider == 'GCP')",message="must set region for AWS and Azure containers" +// +kubebuilder:validation:XValidation:rule="(self.id == oldSelf.id) || (!has(self.id) && !has(oldSelf.id))",message="id is immutable" +// +kubebuilder:validation:XValidation:rule="(self.region == oldSelf.region) || (!has(self.region) && !has(oldSelf.region))",message="region is immutable" + +// AtlasNetworkContainerSpec defines the desired state of an AtlasNetworkContainer +type AtlasNetworkContainerSpec struct { + ProjectDualReference `json:",inline"` + + // Provider is the name of the cloud provider hosting the network container + // +kubebuilder:validation:Enum=AWS;GCP;AZURE + // +kubebuilder:validation:Required + Provider string `json:"provider"` + + AtlasNetworkContainerConfig `json:",inline"` +} + +// AtlasNetworkContainerConfig defines the Atlas specifics of the desired state of a Network Container +type AtlasNetworkContainerConfig struct { + // ID is the container identified for an already existent network container to be managed by the operator. + // This field can be used in conjunction with cidrBlock to update the cidrBlock of an existing container. + // This field is immutable. + // +optional + ID string `json:"id,omitempty"` + + // ContainerRegion is the provider region name of Atlas network peer container in Atlas region format + // This is required by AWS and Azure, but not used by GCP. + // This field is immutable, Atlas does not admit network container changes. + // +optional + Region string `json:"region,omitempty"` + + // Atlas CIDR. It needs to be set if ContainerID is not set. + // +optional + CIDRBlock string `json:"cidrBlock"` +} + +func (np *AtlasNetworkContainer) GetStatus() api.Status { + return np.Status +} + +func (np *AtlasNetworkContainer) Credentials() *api.LocalObjectReference { + return np.Spec.ConnectionSecret +} + +func (np *AtlasNetworkContainer) ProjectDualRef() *ProjectDualReference { + return &np.Spec.ProjectDualReference +} + +func (np *AtlasNetworkContainer) UpdateStatus(conditions []api.Condition, options ...api.Option) { + np.Status.Conditions = conditions + np.Status.ObservedGeneration = np.ObjectMeta.Generation + + for _, o := range options { + v := o.(status.AtlasNetworkContainerStatusOption) + v(&np.Status) + } +} diff --git a/api/v1/atlasnetworkcontainer_types_test.go b/api/v1/atlasnetworkcontainer_types_test.go new file mode 100644 index 0000000000..faf86a2e43 --- /dev/null +++ b/api/v1/atlasnetworkcontainer_types_test.go @@ -0,0 +1,186 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/cel" +) + +func TestContainerCELChecks(t *testing.T) { + for _, tc := range []struct { + title string + old, obj *AtlasNetworkContainer + expectedErrors []string + }{ + { + title: "GCP fails with a region", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + Region: "some-region", + }, + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": must not set region for GCP containers"}, + }, + { + title: "GCP succeeds without a region", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + }, + }, + }, + { + title: "AWS succeeds with a region", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + Region: "some-region", + }, + }, + }, + }, + { + title: "Azure succeeds with a region", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + Region: "some-region", + }, + }, + }, + }, + { + title: "AWS fails without a region", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": must set region for AWS and Azure containers"}, + }, + { + title: "Azure fails without a region", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAzure), + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": must set region for AWS and Azure containers"}, + }, + { + title: "ID cannot be changed", + old: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + ID: "old-id", + }, + }, + }, + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + ID: "new-id", + }, + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": id is immutable"}, + }, + { + title: "ID can be unset", + old: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + }, + }, + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + }, + }, + }, + { + title: "ID can be set", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + ID: "new-id", + }, + }, + }, + }, + { + title: "Region cannot be changed", + old: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + Region: "old-region", + }, + }, + }, + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + Region: "new-region", + }, + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": region is immutable"}, + }, + { + title: "Region can be unset (for GCP)", + old: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + }, + }, + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + }, + }, + }, + { + title: "Region can be set", + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: AtlasNetworkContainerConfig{ + Region: "new-region", + }, + }, + }, + }, + } { + t.Run(tc.title, func(t *testing.T) { + // inject a project to avoid other CEL validations being hit + tc.obj.Spec.ProjectRef = &common.ResourceRefNamespaced{Name: "some-project"} + unstructuredOldObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.old) + require.NoError(t, err) + unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.obj) + require.NoError(t, err) + + crdPath := "../../config/crd/bases/atlas.mongodb.com_atlasnetworkcontainers.yaml" + validator, err := cel.VersionValidatorFromFile(t, crdPath, "v1") + assert.NoError(t, err) + errs := validator(unstructuredObject, unstructuredOldObject) + + require.Equal(t, tc.expectedErrors, cel.ErrorListAsStrings(errs)) + }) + } +} diff --git a/api/v1/project_reference_cel_test.go b/api/v1/project_reference_cel_test.go index 3707b54069..1b366593ab 100644 --- a/api/v1/project_reference_cel_test.go +++ b/api/v1/project_reference_cel_test.go @@ -47,6 +47,14 @@ var dualRefCRDs = []struct { obj: &AtlasIPAccessList{}, filename: "atlas.mongodb.com_atlasipaccesslists.yaml", }, + { + obj: &AtlasNetworkContainer{ + Spec: AtlasNetworkContainerSpec{ + Provider: "GCP", // Avoid triggering container specific validations + }, + }, + filename: "atlas.mongodb.com_atlasnetworkcontainers.yaml", + }, } var testCases = []struct { @@ -157,11 +165,7 @@ func TestProjectDualReferenceCELValidations(t *testing.T) { assert.NoError(t, err) errs := validator(unstructuredObject, unstructuredOldObject) - require.Equal(t, len(tc.expectedErrors), len(errs)) - - for i, err := range errs { - assert.Equal(t, tc.expectedErrors[i], err.Error()) - } + require.Equal(t, tc.expectedErrors, cel.ErrorListAsStrings(errs)) }) } } diff --git a/api/v1/status/atlasnetworkcontainer.go b/api/v1/status/atlasnetworkcontainer.go new file mode 100644 index 0000000000..4ec6a4607b --- /dev/null +++ b/api/v1/status/atlasnetworkcontainer.go @@ -0,0 +1,20 @@ +package status + +import "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + +// AtlasNetworkContainerStatus is a status for the AtlasNetworkContainer Custom resource. +// Not the one included in the AtlasProject +type AtlasNetworkContainerStatus struct { + api.Common `json:",inline"` + + // ID record the identifier of the container in Atlas + ID string `json:"id,omitempty"` + + // Provisioned is true when clusters have been deployed to the container before + // the last reconciliation + Provisioned bool `json:"provisioned,omitempty"` +} + +// +kubebuilder:object:generate=false + +type AtlasNetworkContainerStatusOption func(s *AtlasNetworkContainerStatus) diff --git a/api/v1/status/atlasnetworkpeering.go b/api/v1/status/atlasnetworkpeering.go new file mode 100644 index 0000000000..63114d67d2 --- /dev/null +++ b/api/v1/status/atlasnetworkpeering.go @@ -0,0 +1,25 @@ +package status + +// AWSContainerStatus contains AWS only related status information +type AWSContainerStatus struct { + // VpcID is AWS VPC id on the Atlas side + VpcID string `json:"vpcId,omitempty"` +} + +// AzureContainerStatus contains Azure only related status information +type AzureContainerStatus struct { + // AzureSubscriptionID is Azure Subscription id on the Atlas side + AzureSubscriptionID string `json:"azureSubscriptionIDpcId,omitempty"` + + // VnetName is Azure network on the Atlas side + VnetName string `json:"vNetName,omitempty"` +} + +// GCPContainerStatus contains GCP only related status information +type GCPContainerStatus struct { + // GCPProjectID is GCP project on the Atlas side + GCPProjectID string `json:"gcpProjectID,omitempty"` + + // NetworkName is GCP network on the Atlas side + NetworkName string `json:"networkName,omitempty"` +} diff --git a/api/v1/status/zz_generated.deepcopy.go b/api/v1/status/zz_generated.deepcopy.go index 21be0ea4df..70389d39a2 100644 --- a/api/v1/status/zz_generated.deepcopy.go +++ b/api/v1/status/zz_generated.deepcopy.go @@ -18,6 +18,21 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/project" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSContainerStatus) DeepCopyInto(out *AWSContainerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSContainerStatus. +func (in *AWSContainerStatus) DeepCopy() *AWSContainerStatus { + if in == nil { + return nil + } + out := new(AWSContainerStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AlertConfiguration) DeepCopyInto(out *AlertConfiguration) { *out = *in @@ -182,6 +197,22 @@ func (in *AtlasIPAccessListStatus) DeepCopy() *AtlasIPAccessListStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkContainerStatus) DeepCopyInto(out *AtlasNetworkContainerStatus) { + *out = *in + in.Common.DeepCopyInto(&out.Common) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkContainerStatus. +func (in *AtlasNetworkContainerStatus) DeepCopy() *AtlasNetworkContainerStatus { + if in == nil { + return nil + } + out := new(AtlasNetworkContainerStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AtlasNetworkPeer) DeepCopyInto(out *AtlasNetworkPeer) { *out = *in @@ -355,6 +386,21 @@ func (in *AtlasStreamInstanceStatus) DeepCopy() *AtlasStreamInstanceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureContainerStatus) DeepCopyInto(out *AzureContainerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureContainerStatus. +func (in *AzureContainerStatus) DeepCopy() *AzureContainerStatus { + if in == nil { + return nil + } + out := new(AzureContainerStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupCompliancePolicyStatus) DeepCopyInto(out *BackupCompliancePolicyStatus) { *out = *in @@ -588,6 +634,21 @@ func (in *FeatureUsage) DeepCopy() *FeatureUsage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPContainerStatus) DeepCopyInto(out *GCPContainerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPContainerStatus. +func (in *GCPContainerStatus) DeepCopy() *GCPContainerStatus { + if in == nil { + return nil + } + out := new(GCPContainerStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPEndpoint) DeepCopyInto(out *GCPEndpoint) { *out = *in diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 7b8ea02724..44cbc2da99 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1106,6 +1106,97 @@ func (in *AtlasIPAccessListSpec) DeepCopy() *AtlasIPAccessListSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkContainer) DeepCopyInto(out *AtlasNetworkContainer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkContainer. +func (in *AtlasNetworkContainer) DeepCopy() *AtlasNetworkContainer { + if in == nil { + return nil + } + out := new(AtlasNetworkContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AtlasNetworkContainer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkContainerConfig) DeepCopyInto(out *AtlasNetworkContainerConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkContainerConfig. +func (in *AtlasNetworkContainerConfig) DeepCopy() *AtlasNetworkContainerConfig { + if in == nil { + return nil + } + out := new(AtlasNetworkContainerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkContainerList) DeepCopyInto(out *AtlasNetworkContainerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AtlasNetworkContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkContainerList. +func (in *AtlasNetworkContainerList) DeepCopy() *AtlasNetworkContainerList { + if in == nil { + return nil + } + out := new(AtlasNetworkContainerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AtlasNetworkContainerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkContainerSpec) DeepCopyInto(out *AtlasNetworkContainerSpec) { + *out = *in + in.ProjectDualReference.DeepCopyInto(&out.ProjectDualReference) + out.AtlasNetworkContainerConfig = in.AtlasNetworkContainerConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkContainerSpec. +func (in *AtlasNetworkContainerSpec) DeepCopy() *AtlasNetworkContainerSpec { + if in == nil { + return nil + } + out := new(AtlasNetworkContainerSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AtlasNetworkPeeringConfig) DeepCopyInto(out *AtlasNetworkPeeringConfig) { *out = *in diff --git a/config/crd/bases/atlas.mongodb.com_atlasnetworkcontainers.yaml b/config/crd/bases/atlas.mongodb.com_atlasnetworkcontainers.yaml new file mode 100644 index 0000000000..1c17e342f9 --- /dev/null +++ b/config/crd/bases/atlas.mongodb.com_atlasnetworkcontainers.yaml @@ -0,0 +1,197 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: atlasnetworkcontainers.atlas.mongodb.com +spec: + group: atlas.mongodb.com + names: + categories: + - atlas + kind: AtlasNetworkContainer + listKind: AtlasNetworkContainerList + plural: atlasnetworkcontainers + shortNames: + - anc + singular: atlasnetworkcontainer + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.provider + name: Provider + type: string + - jsonPath: .status.id + name: Id + type: string + name: v1 + schema: + openAPIV3Schema: + description: AtlasNetworkContainer is the Schema for the AtlasNetworkContainer + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AtlasNetworkContainerSpec defines the desired state of an + AtlasNetworkContainer + properties: + cidrBlock: + description: Atlas CIDR. It needs to be set if ContainerID is not + set. + type: string + connectionSecret: + description: Name of the secret containing Atlas API private and public + keys + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + externalProjectRef: + description: |- + "externalProjectRef" holds the parent Atlas project ID. + Mutually exclusive with the "projectRef" field + properties: + id: + description: ID is the Atlas project ID + type: string + required: + - id + type: object + id: + description: |- + ID is the container identified for an already existent network container to be managed by the operator. + This field can be used in conjunction with cidrBlock to update the cidrBlock of an existing container. + This field is immutable. + type: string + projectRef: + description: |- + "projectRef" is a reference to the parent AtlasProject resource. + Mutually exclusive with the "externalProjectRef" field + properties: + name: + description: Name is the name of the Kubernetes Resource + type: string + namespace: + description: Namespace is the namespace of the Kubernetes Resource + type: string + required: + - name + type: object + provider: + description: Provider is the name of the cloud provider hosting the + network container + enum: + - AWS + - GCP + - AZURE + type: string + region: + description: |- + ContainerRegion is the provider region name of Atlas network peer container in Atlas region format + This is required by AWS and Azure, but not used by GCP. + This field is immutable, Atlas does not admit network container changes. + type: string + required: + - provider + type: object + x-kubernetes-validations: + - message: must define only one project reference through externalProjectRef + or projectRef + rule: (has(self.externalProjectRef) && !has(self.projectRef)) || (!has(self.externalProjectRef) + && has(self.projectRef)) + - message: must define a local connection secret when referencing an external + project + rule: (has(self.externalProjectRef) && has(self.connectionSecret)) || + !has(self.externalProjectRef) + - message: must not set region for GCP containers + rule: (self.provider == 'GCP' && !has(self.region)) || (self.provider + != 'GCP') + - message: must set region for AWS and Azure containers + rule: ((self.provider == 'AWS' || self.provider == 'AZURE') && has(self.region)) + || (self.provider == 'GCP') + - message: id is immutable + rule: (self.id == oldSelf.id) || (!has(self.id) && !has(oldSelf.id)) + - message: region is immutable + rule: (self.region == oldSelf.region) || (!has(self.region) && !has(oldSelf.region)) + status: + description: |- + AtlasNetworkContainerStatus is a status for the AtlasNetworkContainer Custom resource. + Not the one included in the AtlasProject + properties: + conditions: + description: Conditions is the list of statuses showing the current + state of the Atlas Custom Resource + items: + description: Condition describes the state of an Atlas Custom Resource + at a certain point. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of Atlas Custom Resource condition. + type: string + required: + - status + - type + type: object + type: array + id: + description: ID record the identifier of the container in Atlas + type: string + observedGeneration: + description: |- + ObservedGeneration indicates the generation of the resource specification that the Atlas Operator is aware of. + The Atlas Operator updates this field to the 'metadata.generation' as soon as it starts reconciliation of the resource. + format: int64 + type: integer + provisioned: + description: |- + Provisioned is true when clusters have been deployed to the container before + the last reconciliation + type: boolean + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 911c4e25d3..ca157a1f8a 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -17,5 +17,6 @@ resources: - bases/atlas.mongodb.com_atlasprivateendpoints.yaml - bases/atlas.mongodb.com_atlascustomroles.yaml - bases/atlas.mongodb.com_atlasipaccesslists.yaml + - bases/atlas.mongodb.com_atlasnetworkcontainers.yaml configurations: - kustomizeconfig.yaml diff --git a/config/rbac/atlasnetworkcontainer_editor_role.yaml b/config/rbac/atlasnetworkcontainer_editor_role.yaml new file mode 100644 index 0000000000..37922896de --- /dev/null +++ b/config/rbac/atlasnetworkcontainer_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit atlasnetworkcontainers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: mongodb-atlas-kubernetes + app.kubernetes.io/managed-by: kustomize + name: atlasnetworkcontainer-editor-role +rules: +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkcontainers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkcontainers/status + verbs: + - get diff --git a/config/rbac/atlasnetworkcontainer_viewer_role.yaml b/config/rbac/atlasnetworkcontainer_viewer_role.yaml new file mode 100644 index 0000000000..3ba47c0090 --- /dev/null +++ b/config/rbac/atlasnetworkcontainer_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view atlasnetworkcontainers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: mongodb-atlas-kubernetes + app.kubernetes.io/managed-by: kustomize + name: atlasnetworkcontainer-viewer-role +rules: +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkcontainers + verbs: + - get + - list + - watch +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkcontainers/status + verbs: + - get diff --git a/config/rbac/clusterwide/role.yaml b/config/rbac/clusterwide/role.yaml index 650a73bb60..ddbdf1007f 100644 --- a/config/rbac/clusterwide/role.yaml +++ b/config/rbac/clusterwide/role.yaml @@ -35,6 +35,7 @@ rules: - atlasdeployments - atlasfederatedauths - atlasipaccesslists + - atlasnetworkcontainers - atlasprivateendpoints - atlasprojects - atlassearchindexconfigs @@ -61,6 +62,7 @@ rules: - atlasdeployments/status - atlasfederatedauths/status - atlasipaccesslists/status + - atlasnetworkcontainers/status - atlasprivateendpoints/status - atlasprojects/status - atlassearchindexconfigs/status @@ -75,5 +77,6 @@ rules: - atlas.mongodb.com resources: - atlasipaccesslists/finalizers + - atlasnetworkcontainers/finalizers verbs: - update diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 4694142350..dd79bd08fc 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -22,6 +22,8 @@ resources: # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines # if you do not want those helpers be installed with your Project. +- atlasnetworkcontainer_editor_role.yaml +- atlasnetworkcontainer_viewer_role.yaml - atlasipaccesslist_editor_role.yaml - atlasipaccesslist_viewer_role.yaml - atlascustomrole_editor_role.yaml @@ -48,4 +50,3 @@ resources: - atlasproject_viewer_role.yaml - atlasdeployment_editor_role.yaml - atlasdeployment_viewer_role.yaml - diff --git a/config/samples/atlas_v1_atlasnetworkcontainer.yaml b/config/samples/atlas_v1_atlasnetworkcontainer.yaml new file mode 100644 index 0000000000..8502c0597f --- /dev/null +++ b/config/samples/atlas_v1_atlasnetworkcontainer.yaml @@ -0,0 +1,15 @@ +apiVersion: atlas.mongodb.com/v1 +kind: AtlasNetworkContainer +metadata: + labels: + app.kubernetes.io/name: mongodb-atlas-kubernetes + app.kubernetes.io/managed-by: kustomize + name: atlasnetworkcontainer-sample +spec: + externalProjectRef: + projectId: 66e2f2b621571b7e69a89b66 + connectionSecret: + name: atlas-connection-secret + provider: AWS + cidrBlock: 10.11.0.0/16 + region: US_EAST_1 diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index c02899cacd..d7cdad3549 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -9,4 +9,5 @@ resources: - atlas_v1_atlasbackupschedule.yaml - atlas_v1_atlasteam.yaml - atlas_v1_atlasipaccesslist.yaml + - atlas_v1_atlasnetworkcontainer.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/helm-charts b/helm-charts index e340dce7d9..178ec1afd6 160000 --- a/helm-charts +++ b/helm-charts @@ -1 +1 @@ -Subproject commit e340dce7d91f567724e220831ce8aa7ea3e89b91 +Subproject commit 178ec1afd67ae55fa652e55f68d72cc22d9a1977 diff --git a/internal/controller/atlasipaccesslist/state_test.go b/internal/controller/atlasipaccesslist/state_test.go index 8833efdb67..f43d29997e 100644 --- a/internal/controller/atlasipaccesslist/state_test.go +++ b/internal/controller/atlasipaccesslist/state_test.go @@ -152,7 +152,7 @@ func TestHandleCustomResource(t *testing.T) { expectedConditions: []api.Condition{ api.FalseCondition(api.ReadyType). WithReason(string(workflow.AtlasAPIAccessNotConfigured)). - WithMessageRegexp("can not fetch AtlasProject: atlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), + WithMessageRegexp("missing Kubernetes Atlas Project\natlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), api.TrueCondition(api.ResourceVersionStatus), }, }, @@ -225,7 +225,7 @@ func TestHandleCustomResource(t *testing.T) { expectedConditions: []api.Condition{ api.FalseCondition(api.ReadyType). WithReason(string(workflow.AtlasAPIAccessNotConfigured)). - WithMessageRegexp("failed to query Kubernetes: failed to get Project from Kubernetes: can not fetch AtlasProject: atlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), + WithMessageRegexp("failed to query Kubernetes: failed to get Project from Kubernetes: missing Kubernetes Atlas Project\natlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), api.TrueCondition(api.ResourceVersionStatus), }, }, diff --git a/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go b/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go new file mode 100644 index 0000000000..ce58ffadfb --- /dev/null +++ b/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go @@ -0,0 +1,126 @@ +/* +Copyright (C) MongoDB, Inc. 2020-present. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +*/ + +package atlasnetworkcontainer + +import ( + "context" + "time" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/indexer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" +) + +// AtlasNetworkContainerReconciler reconciles a AtlasNetworkContainer object +type AtlasNetworkContainerReconciler struct { + reconciler.AtlasReconciler + AtlasProvider atlas.Provider + Scheme *runtime.Scheme + EventRecorder record.EventRecorder + GlobalPredicates []predicate.Predicate + ObjectDeletionProtection bool + independentSyncPeriod time.Duration +} + +// +kubebuilder:rbac:groups=atlas.mongodb.com,resources=atlasnetworkcontainers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=atlas.mongodb.com,resources=atlasnetworkcontainers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=atlas.mongodb.com,resources=atlasnetworkcontainers/finalizers,verbs=update + +// Reconcile Atlas Network Container resources +func (r *AtlasNetworkContainerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Log.Infow("-> Starting AtlasNetworkContainer reconciliation") + + networkContainer := akov2.AtlasNetworkContainer{} + result := customresource.PrepareResource(ctx, r.Client, req, &networkContainer, r.Log) + if !result.IsOk() { + return result.ReconcileResult(), nil + } + return r.handleCustomResource(ctx, &networkContainer) +} + +// For prepares the controller for its target Custom Resource; Network Containers +func (r *AtlasNetworkContainerReconciler) For() (client.Object, builder.Predicates) { + return &akov2.AtlasNetworkContainer{}, builder.WithPredicates(r.GlobalPredicates...) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AtlasNetworkContainerReconciler) SetupWithManager(mgr ctrl.Manager, skipNameValidation bool) error { + return ctrl.NewControllerManagedBy(mgr). + For(r.For()). + Watches( + &akov2.AtlasProject{}, + handler.EnqueueRequestsFromMapFunc(r.networkContainerForProjectMapFunc()), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.networkContainerForCredentialMapFunc()), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: pointer.MakePtr(skipNameValidation)}). + Complete(r) +} + +func (r *AtlasNetworkContainerReconciler) networkContainerForProjectMapFunc() handler.MapFunc { + return indexer.ProjectsIndexMapperFunc( + indexer.AtlasNetworkContainerByProjectIndex, + func() *akov2.AtlasNetworkContainerList { return &akov2.AtlasNetworkContainerList{} }, + indexer.NetworkContainerRequests, + r.Client, + r.Log, + ) +} + +func (r *AtlasNetworkContainerReconciler) networkContainerForCredentialMapFunc() handler.MapFunc { + return indexer.CredentialsIndexMapperFunc( + indexer.AtlasNetworkContainerCredentialsIndex, + func() *akov2.AtlasNetworkContainerList { return &akov2.AtlasNetworkContainerList{} }, + indexer.NetworkContainerRequests, + r.Client, + r.Log, + ) +} + +func NewAtlasNetworkContainerReconciler( + c cluster.Cluster, + predicates []predicate.Predicate, + atlasProvider atlas.Provider, + deletionProtection bool, + logger *zap.Logger, + independentSyncPeriod time.Duration, +) *AtlasNetworkContainerReconciler { + return &AtlasNetworkContainerReconciler{ + AtlasReconciler: reconciler.AtlasReconciler{ + Client: c.GetClient(), + Log: logger.Named("controllers").Named("AtlasNetworkContainer").Sugar(), + }, + Scheme: c.GetScheme(), + EventRecorder: c.GetEventRecorderFor("AtlasNetworkContainer"), + AtlasProvider: atlasProvider, + GlobalPredicates: predicates, + ObjectDeletionProtection: deletionProtection, + independentSyncPeriod: independentSyncPeriod, + } +} diff --git a/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller_test.go b/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller_test.go new file mode 100644 index 0000000000..524cb54475 --- /dev/null +++ b/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller_test.go @@ -0,0 +1,110 @@ +/* +Copyright (C) MongoDB, Inc. 2025-present. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +*/ + +package atlasnetworkcontainer + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" +) + +const ( + testProjectID = "project-id" +) + +func TestReconcile(t *testing.T) { + ctx := context.Background() + + testScheme := runtime.NewScheme() + require.NoError(t, akov2.AddToScheme(testScheme)) + + tests := map[string]struct { + request reconcile.Request + expectedResult reconcile.Result + expectedLogs []string + }{ + "failed to prepare resource": { + request: reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "nc0"}}, + expectedResult: reconcile.Result{}, + expectedLogs: []string{ + "-> Starting AtlasNetworkContainer reconciliation", + "Object default/nc0 doesn't exist, was it deleted after reconcile request?", + }, + }, + "prepare resource for reconciliation": { + request: reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "nc1"}}, + expectedResult: reconcile.Result{}, + expectedLogs: []string{ + "-> Starting AtlasNetworkContainer reconciliation", + "-> Skipping AtlasNetworkContainer reconciliation as annotation mongodb.com/atlas-reconciliation-policy=skip", + }, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + core, logs := observer.New(zap.DebugLevel) + fakeClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(testNetworkContainer()). + Build() + r := &AtlasNetworkContainerReconciler{ + AtlasReconciler: reconciler.AtlasReconciler{ + Client: fakeClient, + Log: zap.New(core).Sugar(), + }, + } + result, _ := r.Reconcile(ctx, tc.request) + assert.Equal(t, tc.expectedResult, result) + assert.Equal(t, len(tc.expectedLogs), logs.Len()) + for i, log := range logs.All() { + assert.Equal(t, tc.expectedLogs[i], log.Message) + } + }) + } +} + +func testNetworkContainer() *akov2.AtlasNetworkContainer { + return &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nc1", + Namespace: "default", + Annotations: map[string]string{ + customresource.ReconciliationPolicyAnnotation: customresource.ReconciliationPolicySkip, + }, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ExternalProjectRef: &akov2.ExternalProjectReference{ + ID: testProjectID, + }, + ConnectionSecret: &api.LocalObjectReference{}, + }, + Provider: "AWS", + }, + Status: status.AtlasNetworkContainerStatus{ + ID: "container-id", + Provisioned: true, + }, + } +} diff --git a/internal/controller/atlasnetworkcontainer/state.go b/internal/controller/atlasnetworkcontainer/state.go new file mode 100644 index 0000000000..c8d842cf14 --- /dev/null +++ b/internal/controller/atlasnetworkcontainer/state.go @@ -0,0 +1,101 @@ +package atlasnetworkcontainer + +import ( + "context" + "errors" + "fmt" + "reflect" + + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/statushandler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" +) + +type reconcileRequest struct { + projectID string + networkContainer *akov2.AtlasNetworkContainer + service networkcontainer.NetworkContainerService +} + +func (r *AtlasNetworkContainerReconciler) handleCustomResource(ctx context.Context, networkContainer *akov2.AtlasNetworkContainer) (ctrl.Result, error) { + typeName := reflect.TypeOf(*networkContainer).Name() + if customresource.ReconciliationShouldBeSkipped(networkContainer) { + return r.Skip(ctx, typeName, networkContainer, networkContainer.Spec) + } + + conditions := api.InitCondition(networkContainer, api.FalseCondition(api.ReadyType)) + workflowCtx := workflow.NewContext(r.Log, conditions, ctx, networkContainer) + defer statushandler.Update(workflowCtx, r.Client, r.EventRecorder, networkContainer) + + isValid := customresource.ValidateResourceVersion(workflowCtx, networkContainer, r.Log) + if !isValid.IsOk() { + return r.Invalidate(typeName, isValid) + } + + if !r.AtlasProvider.IsResourceSupported(networkContainer) { + return r.Unsupport(workflowCtx, typeName) + } + + credentials, err := r.ResolveCredentials(ctx, networkContainer) + if err != nil { + return r.release(workflowCtx, networkContainer, err), nil + } + sdkClientSet, _, err := r.AtlasProvider.SdkClientSet(ctx, credentials, r.Log) + if err != nil { + return r.terminate(workflowCtx, networkContainer, workflow.NetworkContainerNotConfigured, err), nil + } + project, err := r.ResolveProject(ctx, sdkClientSet.SdkClient20231115008, networkContainer) + if err != nil { + return r.release(workflowCtx, networkContainer, err), nil + } + return r.handle(workflowCtx, &reconcileRequest{ + projectID: project.ID, + networkContainer: networkContainer, + service: networkcontainer.NewNetworkContainerServiceFromClientSet(sdkClientSet), + }) +} + +func (r *AtlasNetworkContainerReconciler) handle(workflowCtx *workflow.Context, req *reconcileRequest) (ctrl.Result, error) { + atlasContainer, err := discover(workflowCtx.Context, req) + if err != nil { + return r.terminate(workflowCtx, req.networkContainer, workflow.NetworkContainerNotConfigured, err), nil + } + inAtlas := atlasContainer != nil + deleted := req.networkContainer.DeletionTimestamp != nil + switch { + case !deleted && !inAtlas: + return r.create(workflowCtx, req) + case !deleted && inAtlas: + return r.sync(workflowCtx, req, atlasContainer) + case deleted && inAtlas: + return r.delete(workflowCtx, req, atlasContainer) + default: // deleted && !inAtlas: + return r.unmanage(workflowCtx, req.networkContainer) + } +} + +func discover(ctx context.Context, req *reconcileRequest) (*networkcontainer.NetworkContainer, error) { + id := req.networkContainer.Spec.ID + if id == "" { + id = req.networkContainer.Status.ID + } + if id != "" { + container, err := req.service.Get(ctx, req.projectID, id) + if err != nil { + return nil, fmt.Errorf("failed to get container %s from project %s: %w", id, req.projectID, err) + } + return container, nil + } + cfg := networkcontainer.NewNetworkContainerConfig( + req.networkContainer.Spec.Provider, &req.networkContainer.Spec.AtlasNetworkContainerConfig) + container, err := req.service.Find(ctx, req.projectID, cfg) + if err != nil && !errors.Is(err, networkcontainer.ErrNotFound) { + return nil, fmt.Errorf("failed to find container from project %s: %w", req.projectID, err) + } + return container, nil +} diff --git a/internal/controller/atlasnetworkcontainer/state_test.go b/internal/controller/atlasnetworkcontainer/state_test.go new file mode 100644 index 0000000000..d0619b718d --- /dev/null +++ b/internal/controller/atlasnetworkcontainer/state_test.go @@ -0,0 +1,871 @@ +package atlasnetworkcontainer + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20231115008/admin" + "go.mongodb.org/atlas-sdk/v20231115008/mockadmin" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" + atlasmock "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/mocks/atlas" + akomock "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/mocks/translation" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" +) + +var ( + // sample error test + ErrTestFail = errors.New("failure") +) + +const ( + testContainerID = "container-id" +) + +func TestHandleCustomResource(t *testing.T) { + deletionTime := metav1.Now() + tests := []struct { + title string + networkContainer *akov2.AtlasNetworkContainer + provider atlas.Provider + wantResult ctrl.Result + wantFinalizers []string + wantConditions []api.Condition + }{ + { + title: "should skip reconciliation", + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-container", + Namespace: "default", + Annotations: map[string]string{ + customresource.ReconciliationPolicyAnnotation: customresource.ReconciliationPolicySkip, + }, + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "11.10.0.0/21", + }, + }, + }, + wantResult: ctrl.Result{}, + wantFinalizers: []string{customresource.FinalizerLabel}, + }, + { + title: "should fail to validate resource", + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-container", + Namespace: "default", + Labels: map[string]string{ + customresource.ResourceVersion: "wrong", + }, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "11.10.0.0/21", + }, + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType), + api.FalseCondition(api.ResourceVersionStatus). + WithReason(string(workflow.AtlasResourceVersionIsInvalid)). + WithMessageRegexp("wrong is not a valid semver version for label mongodb.com/atlas-resource-version"), + }, + }, + { + title: "should fail when not supported", + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-container", + Namespace: "default", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "11.10.0.0/21", + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return false + }, + }, + wantResult: ctrl.Result{}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.AtlasGovUnsupported)). + WithMessageRegexp("the AtlasNetworkContainer is not supported by Atlas for government"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should fail to resolve credentials and remove finalizer", + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-container", + Namespace: "default", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{ + Name: "my-no-existing-project", + }, + }, + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "11.10.0.0/21", + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: nil, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp("missing Kubernetes Atlas Project\natlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should fail to create sdk", + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-container", + Namespace: "default", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "my-secret", + }, + }, + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "11.10.0.0/21", + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + SdkSetClientFunc: func(secretRef *client.ObjectKey, log *zap.SugaredLogger) (*atlas.ClientSet, string, error) { + return nil, "", errors.New("failed to create sdk") + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp("failed to create sdk"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should fail to resolve project and remove finalizers", + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-container", + Namespace: "default", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "my-secret", + }, + ProjectRef: &common.ResourceRefNamespaced{ + Name: "my-no-existing-project", + }, + }, + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "11.10.0.0/21", + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + SdkSetClientFunc: func(secretRef *client.ObjectKey, log *zap.SugaredLogger) (*atlas.ClientSet, string, error) { + pAPI := mockadmin.NewProjectsApi(t) + return &atlas.ClientSet{ + SdkClient20231115008: &admin.APIClient{ProjectsApi: pAPI}, + }, "", nil + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: nil, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp("failed to query Kubernetes: failed to get Project from Kubernetes: missing Kubernetes Atlas Project\natlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should handle network container with unmanage", + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-container", + Namespace: "default", + Finalizers: []string{customresource.FinalizerLabel}, + DeletionTimestamp: &deletionTime, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "my-secret", + }, + ProjectRef: &common.ResourceRefNamespaced{ + Name: "my-project", + }, + }, + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "11.10.0.0/21", + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + SdkSetClientFunc: func(secretRef *client.ObjectKey, log *zap.SugaredLogger) (*atlas.ClientSet, string, error) { + ncAPI := mockadmin.NewNetworkPeeringApi(t) + ncAPI.EXPECT().ListPeeringContainerByCloudProvider(mock.Anything, mock.Anything).Return( + admin.ListPeeringContainerByCloudProviderApiRequest{ApiService: ncAPI}, + ) + ncAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.AnythingOfType("admin.ListPeeringContainerByCloudProviderApiRequest")).Return( + &admin.PaginatedCloudProviderContainer{ + Results: &[]admin.CloudProviderContainer{}, + }, nil, nil, + ) + pAPI := mockadmin.NewProjectsApi(t) + pAPI.EXPECT().GetProjectByName(mock.Anything, mock.Anything).Return( + admin.GetProjectByNameApiRequest{ApiService: pAPI}, + ) + pAPI.EXPECT().GetProjectByNameExecute(mock.AnythingOfType("admin.GetProjectByNameApiRequest")).Return( + &admin.Group{ + Id: pointer.MakePtr(testProjectID), + }, nil, nil, + ) + return &atlas.ClientSet{ + SdkClient20231115008: &admin.APIClient{ + NetworkPeeringApi: ncAPI, + ProjectsApi: pAPI, + }, + }, "", nil + }, + }, + wantResult: ctrl.Result{}, + wantFinalizers: nil, + wantConditions: nil, + }, + } + for _, tc := range tests { + t.Run(tc.title, func(t *testing.T) { + project := &akov2.AtlasProject{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-project", + Namespace: "default", + }, + } + testScheme := runtime.NewScheme() + require.NoError(t, akov2.AddToScheme(testScheme)) + k8sClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(project, tc.networkContainer). + WithStatusSubresource(tc.networkContainer). + Build() + logger := zaptest.NewLogger(t) + ctx := context.Background() + r := testReconciler(k8sClient, tc.provider, logger) + result, err := r.handleCustomResource(ctx, tc.networkContainer) + nc := getNetworkContainer(t, ctx, k8sClient, client.ObjectKeyFromObject(tc.networkContainer)) + require.NoError(t, err) + assert.Equal(t, tc.wantResult, result) + assert.Equal(t, tc.wantFinalizers, getFinalizers(nc)) + assert.Equal(t, cleanConditions(tc.wantConditions), cleanConditions(getConditions(nc))) + }) + } +} + +func TestHandle(t *testing.T) { + deletionTime := metav1.Now() + emptyProvider := &atlasmock.TestProvider{} + logger := zaptest.NewLogger(t) + for _, tc := range []struct { + title string + req *reconcileRequest + wantResult ctrl.Result + wantErr error + wantFinalizers []string + wantConditions []api.Condition + }{ + { + title: "create succeeds", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return( + nil, networkcontainer.ErrNotFound, + ) + ncs.EXPECT().Create(mock.Anything, testProjectID, mock.Anything).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + ID: testContainerID, + Provisioned: false, + }, + nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.TrueCondition(api.NetworkContainerReady). + WithMessageRegexp(fmt.Sprintf("Network Container %s is ready", testContainerID)), + api.TrueCondition(api.ReadyType), + }, + }, + + { + title: "create fails", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return( + nil, networkcontainer.ErrNotFound, + ) + ncs.EXPECT().Create(mock.Anything, testProjectID, mock.Anything).Return( + nil, + ErrTestFail, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: nil, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp(fmt.Sprintf("failed to create container: %v", ErrTestFail)), + }, + }, + + { + title: "in sync", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + ID: testContainerID, + Provisioned: true, + }, nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.TrueCondition(api.NetworkContainerReady). + WithMessageRegexp(fmt.Sprintf("Network Container %s is ready", testContainerID)), + api.TrueCondition(api.ReadyType), + }, + }, + + { + title: "existent container in sync", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + ID: testContainerID, + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + ID: testContainerID, + Provisioned: true, + }, nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.TrueCondition(api.NetworkContainerReady). + WithMessageRegexp(fmt.Sprintf("Network Container %s is ready", testContainerID)), + api.TrueCondition(api.ReadyType), + }, + }, + + { + title: "update succeeds", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.12.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + ID: testContainerID, + Provisioned: true, + }, nil, + ) + ncs.EXPECT().Update(mock.Anything, testProjectID, testContainerID, mock.Anything).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.12.0.0/21", + }, + }, + ID: testContainerID, + Provisioned: false, + }, nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.TrueCondition(api.NetworkContainerReady). + WithMessageRegexp(fmt.Sprintf("Network Container %s is ready", testContainerID)), + api.TrueCondition(api.ReadyType), + }, + }, + + { + title: "update fails", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.12.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + ID: testContainerID, + Provisioned: true, + }, nil, + ) + ncs.EXPECT().Update(mock.Anything, testProjectID, testContainerID, mock.Anything).Return( + nil, ErrTestFail, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: nil, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp(fmt.Sprintf("failed to update container: %v", ErrTestFail)), + }, + }, + + { + title: "delete succeeds", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + Finalizers: []string{customresource.FinalizerLabel}, + DeletionTimestamp: &deletionTime, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.12.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + // different CIDR, but it should not matter as we are removing + CIDRBlock: "10.11.0.0/21", + }, + }, + ID: testContainerID, + Provisioned: true, + }, nil, + ) + ncs.EXPECT().Delete(mock.Anything, testProjectID, testContainerID).Return( + nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{}, + wantFinalizers: nil, + wantConditions: []api.Condition{}, + }, + + { + title: "delete fails", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-another-container", + Finalizers: []string{customresource.FinalizerLabel}, + DeletionTimestamp: &deletionTime, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "Azure", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_2", + CIDRBlock: "10.14.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return( + &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "Azure", // almost empty, but we are removing anyways + }, + ID: testContainerID, + }, nil, + ) + ncs.EXPECT().Delete(mock.Anything, testProjectID, testContainerID).Return( + ErrTestFail, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.NetworkContainerNotDeleted)). + WithMessageRegexp(fmt.Sprintf("failed to delete container: %v", ErrTestFail)), + }, + }, + + { + title: "discover find fails abnormally", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Find(mock.Anything, testProjectID, mock.Anything).Return(nil, ErrTestFail) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp(fmt.Sprintf("failed to find container from project %s: %v", + testProjectID, ErrTestFail)), + }, + }, + + { + title: "discover get fails abnormally", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + ID: testContainerID, + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return(nil, ErrTestFail) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp(fmt.Sprintf("failed to get container %s from project %s: %v", + testContainerID, testProjectID, ErrTestFail)), + }, + }, + + { + title: "discover get fails with not found", + req: &reconcileRequest{ + projectID: testProjectID, + networkContainer: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-container", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + ID: testContainerID, + Region: "US_EAST_1", + CIDRBlock: "10.11.0.0/21", + }, + }, + }, + service: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + nil, + networkcontainer.ErrNotFound, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.NetworkContainerNotConfigured)). + WithMessageRegexp(fmt.Sprintf("failed to get container %s from project %s: %v", + testContainerID, testProjectID, networkcontainer.ErrNotFound)), + }, + }, + } { + t.Run(tc.title, func(t *testing.T) { + workflowCtx := &workflow.Context{ + Context: context.Background(), + } + testScheme := runtime.NewScheme() + require.NoError(t, akov2.AddToScheme(testScheme)) + k8sClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(tc.req.networkContainer). + Build() + r := testReconciler(k8sClient, emptyProvider, logger) + result, err := r.handle(workflowCtx, tc.req) + assert.ErrorIs(t, err, tc.wantErr) + assert.Equal(t, tc.wantResult, result) + nc := getNetworkContainer(t, workflowCtx.Context, k8sClient, client.ObjectKeyFromObject(tc.req.networkContainer)) + assert.Equal(t, tc.wantFinalizers, getFinalizers(nc)) + assert.Equal(t, cleanConditions(tc.wantConditions), cleanConditions(workflowCtx.Conditions())) + }) + } +} + +func getNetworkContainer(t *testing.T, ctx context.Context, k8sClient client.Client, key client.ObjectKey) *akov2.AtlasNetworkContainer { + networkContainer := &akov2.AtlasNetworkContainer{} + if err := k8sClient.Get(ctx, key, networkContainer); err != nil && !k8serrors.IsNotFound(err) { + require.NoError(t, err) + } + return networkContainer +} + +func getFinalizers(networkContainer *akov2.AtlasNetworkContainer) []string { + if networkContainer == nil { + return nil + } + return networkContainer.GetFinalizers() +} + +func getConditions(networkContainer *akov2.AtlasNetworkContainer) []api.Condition { + if networkContainer == nil { + return nil + } + return networkContainer.Status.GetConditions() +} + +func testReconciler(k8sClient client.Client, provider atlas.Provider, logger *zap.Logger) *AtlasNetworkContainerReconciler { + return &AtlasNetworkContainerReconciler{ + AtlasReconciler: reconciler.AtlasReconciler{ + Client: k8sClient, + Log: logger.Sugar(), + }, + AtlasProvider: provider, + EventRecorder: record.NewFakeRecorder(10), + } +} + +func cleanConditions(inputs []api.Condition) []api.Condition { + outputs := make([]api.Condition, 0, len(inputs)) + for _, condition := range inputs { + clean := condition + clean.LastTransitionTime = metav1.Time{} + outputs = append(outputs, clean) + } + return outputs +} diff --git a/internal/controller/atlasnetworkcontainer/transitions.go b/internal/controller/atlasnetworkcontainer/transitions.go new file mode 100644 index 0000000000..407edd6828 --- /dev/null +++ b/internal/controller/atlasnetworkcontainer/transitions.go @@ -0,0 +1,112 @@ +package atlasnetworkcontainer + +import ( + "errors" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" +) + +func (r *AtlasNetworkContainerReconciler) create(workflowCtx *workflow.Context, req *reconcileRequest) (ctrl.Result, error) { + cfg := networkcontainer.NewNetworkContainerConfig( + req.networkContainer.Spec.Provider, + &req.networkContainer.Spec.AtlasNetworkContainerConfig, + ) + createdContainer, err := req.service.Create(workflowCtx.Context, req.projectID, cfg) + if err != nil { + wrappedErr := fmt.Errorf("failed to create container: %w", err) + return r.terminate(workflowCtx, req.networkContainer, workflow.NetworkContainerNotConfigured, wrappedErr), nil + } + return r.ready(workflowCtx, req.networkContainer, createdContainer) +} + +func (r *AtlasNetworkContainerReconciler) sync(workflowCtx *workflow.Context, req *reconcileRequest, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + cfg := networkcontainer.NewNetworkContainerConfig( + req.networkContainer.Spec.Provider, &req.networkContainer.Spec.AtlasNetworkContainerConfig) + // only the CIDR block can be updated in a container + if cfg.CIDRBlock != container.NetworkContainerConfig.CIDRBlock { + return r.update(workflowCtx, req, container) + } + return r.ready(workflowCtx, req.networkContainer, container) +} + +func (r *AtlasNetworkContainerReconciler) update(workflowCtx *workflow.Context, req *reconcileRequest, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + updatedContainer, err := req.service.Update(workflowCtx.Context, req.projectID, container.ID, &container.NetworkContainerConfig) + if err != nil { + wrappedErr := fmt.Errorf("failed to update container: %w", err) + return r.terminate(workflowCtx, req.networkContainer, workflow.NetworkContainerNotConfigured, wrappedErr), nil + } + return r.ready(workflowCtx, req.networkContainer, updatedContainer) +} + +func (r *AtlasNetworkContainerReconciler) delete(workflowCtx *workflow.Context, req *reconcileRequest, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + if customresource.IsResourcePolicyKeepOrDefault(req.networkContainer, r.ObjectDeletionProtection) { + return r.unmanage(workflowCtx, req.networkContainer) + } + err := req.service.Delete(workflowCtx.Context, req.projectID, container.ID) + if err != nil { + wrappedErr := fmt.Errorf("failed to delete container: %w", err) + return r.terminate(workflowCtx, req.networkContainer, workflow.NetworkContainerNotDeleted, wrappedErr), nil + } + return r.unmanage(workflowCtx, req.networkContainer) +} + +func (r *AtlasNetworkContainerReconciler) ready(workflowCtx *workflow.Context, networkContainer *akov2.AtlasNetworkContainer, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + if err := customresource.ManageFinalizer(workflowCtx.Context, r.Client, networkContainer, customresource.SetFinalizer); err != nil { + return r.terminate(workflowCtx, networkContainer, workflow.AtlasFinalizerNotSet, err), nil + } + + workflowCtx.SetConditionTrueMsg(api.NetworkContainerReady, fmt.Sprintf("Network Container %s is ready", container.ID)). + SetConditionTrue(api.ReadyType).EnsureStatusOption(updateNetworkContainerStatusOption(container)) + + if networkContainer.Spec.ExternalProjectRef != nil { + return workflow.Requeue(r.independentSyncPeriod).ReconcileResult(), nil + } + + return workflow.OK().ReconcileResult(), nil +} + +func (r *AtlasNetworkContainerReconciler) unmanage(workflowCtx *workflow.Context, networkContainer *akov2.AtlasNetworkContainer) (ctrl.Result, error) { + if err := customresource.ManageFinalizer(workflowCtx.Context, r.Client, networkContainer, customresource.UnsetFinalizer); err != nil { + return r.terminate(workflowCtx, networkContainer, workflow.AtlasFinalizerNotRemoved, err), nil + } + return workflow.Deleted().ReconcileResult(), nil +} + +func (r *AtlasNetworkContainerReconciler) release(workflowCtx *workflow.Context, networkContainer *akov2.AtlasNetworkContainer, err error) ctrl.Result { + if errors.Is(err, reconciler.ErrMissingKubeProject) { + if finalizerErr := customresource.ManageFinalizer(workflowCtx.Context, r.Client, networkContainer, customresource.UnsetFinalizer); finalizerErr != nil { + err = errors.Join(err, finalizerErr) + } + } + return r.terminate(workflowCtx, networkContainer, workflow.NetworkContainerNotConfigured, err) +} + +func (r *AtlasNetworkContainerReconciler) terminate( + ctx *workflow.Context, + resource api.AtlasCustomResource, + reason workflow.ConditionReason, + err error, +) ctrl.Result { + condition := api.ReadyType + r.Log.Errorf("resource %T(%s/%s) failed on condition %s: %s", + resource, resource.GetNamespace(), resource.GetName(), condition, err) + result := workflow.Terminate(reason, err) + ctx.SetConditionFalse(api.ReadyType).SetConditionFromResult(condition, result) + + return result.ReconcileResult() +} + +func updateNetworkContainerStatusOption(container *networkcontainer.NetworkContainer) status.AtlasNetworkContainerStatusOption { + return func(containerStatus *status.AtlasNetworkContainerStatus) { + networkcontainer.ApplyNetworkContainerStatus(containerStatus, container) + } +} diff --git a/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go b/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go index edb1e99087..8f4883b9d4 100644 --- a/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go +++ b/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go @@ -293,7 +293,7 @@ func TestEnsureCustomResource(t *testing.T) { expectedResult: reconcile.Result{RequeueAfter: workflow.DefaultRetry}, expectedLogs: []string{ "resource 'pe1' version is valid", - "resource *v1.AtlasPrivateEndpoint(default/pe1) failed on condition Ready: can not fetch AtlasProject: atlasprojects.atlas.mongodb.com \"my-project\" not found", + "resource *v1.AtlasPrivateEndpoint(default/pe1) failed on condition Ready: missing Kubernetes Atlas Project\natlasprojects.atlas.mongodb.com \"my-project\" not found", "Status update", }, }, diff --git a/internal/controller/reconciler/reconciler.go b/internal/controller/reconciler/reconciler.go index 23d7462088..93e93a2ec3 100644 --- a/internal/controller/reconciler/reconciler.go +++ b/internal/controller/reconciler/reconciler.go @@ -2,16 +2,27 @@ package reconciler import ( "context" + "errors" "fmt" "go.mongodb.org/atlas-sdk/v20231115008/admin" "go.uber.org/zap" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/project" ) +var ( + // ErrMissingProject marks a project is gone from Kubernetes + ErrMissingKubeProject = errors.New("missing Kubernetes Atlas Project") +) + type AtlasReconciler struct { Client client.Client Log *zap.SugaredLogger @@ -61,6 +72,37 @@ func (r *AtlasReconciler) ResolveCredentials(ctx context.Context, pro project.Pr return project.ConnectionSecretObjectKey(), nil } +func (r *AtlasReconciler) Skip(ctx context.Context, typeName string, resource api.AtlasCustomResource, spec any) (ctrl.Result, error) { + msg := fmt.Sprintf("-> Skipping %s reconciliation as annotation %s=%s", + typeName, customresource.ReconciliationPolicyAnnotation, customresource.ReconciliationPolicySkip) + r.Log.Infow(msg, "spec", spec) + if !resource.GetDeletionTimestamp().IsZero() { + if err := customresource.ManageFinalizer(ctx, r.Client, resource, customresource.UnsetFinalizer); err != nil { + result := workflow.Terminate(workflow.Internal, err) + r.Log.Errorw("Failed to remove finalizer", "terminate", err) + + return result.ReconcileResult(), nil + } + } + + return workflow.OK().ReconcileResult(), nil +} + +func (r *AtlasReconciler) Invalidate(typeName string, invalid workflow.Result) (ctrl.Result, error) { + // note: ValidateResourceVersion already set the state so we don't have to do it here. + r.Log.Debugf("%T is invalid: %v", typeName, invalid) + return invalid.ReconcileResult(), nil +} + +func (r *AtlasReconciler) Unsupport(ctx *workflow.Context, typeName string) (ctrl.Result, error) { + unsupported := workflow.Terminate( + workflow.AtlasGovUnsupported, + fmt.Errorf("the %s is not supported by Atlas for government", typeName), + ).WithoutRetry() + ctx.SetConditionFromResult(api.ReadyType, unsupported) + return unsupported.ReconcileResult(), nil +} + func (r *AtlasReconciler) credentialsFor(pro project.ProjectReferrerObject) *client.ObjectKey { key := client.ObjectKeyFromObject(pro) pdr := pro.ProjectDualRef() @@ -84,6 +126,9 @@ func (r *AtlasReconciler) fetchProject(ctx context.Context, pro project.ProjectR key := client.ObjectKey{Name: pdr.ProjectRef.Name, Namespace: ns} err := r.Client.Get(ctx, key, &project) if err != nil { + if k8serrors.IsNotFound(err) { + return nil, errors.Join(ErrMissingKubeProject, err) + } return nil, fmt.Errorf("can not fetch AtlasProject: %w", err) } return &project, nil diff --git a/internal/controller/reconciler/reconciler_test.go b/internal/controller/reconciler/reconciler_test.go index 6b76125b1c..f0491d911e 100644 --- a/internal/controller/reconciler/reconciler_test.go +++ b/internal/controller/reconciler/reconciler_test.go @@ -2,7 +2,6 @@ package reconciler_test import ( "context" - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -131,7 +130,7 @@ func TestResolveCredentials(t *testing.T) { ProjectRef: &common.ResourceRefNamespaced{Name: "project", Namespace: "project-ns"}, }, }, - expectedError: fmt.Errorf("can not fetch AtlasProject"), + expectedError: reconciler.ErrMissingKubeProject, }, { title: "should select CustomRoleCredentials from CustomRole when externalProjectID is set", @@ -273,7 +272,7 @@ func TestResolveCredentials(t *testing.T) { }, }, }, - expectedError: fmt.Errorf("can not fetch AtlasProject"), + expectedError: reconciler.ErrMissingKubeProject, }, { title: "should NOT select credentials when both projectRef and externalProjectId are empty", diff --git a/internal/controller/registry.go b/internal/controller/registry.go index efe6bd6da5..40d4470e1a 100644 --- a/internal/controller/registry.go +++ b/internal/controller/registry.go @@ -20,6 +20,7 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasdeployment" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasfederatedauth" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasipaccesslist" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasnetworkcontainer" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasprivateendpoint" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasproject" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlassearchindexconfig" @@ -94,6 +95,7 @@ func (r *Registry) registerControllers(c cluster.Cluster, ap atlas.Provider) { reconcilers = append(reconcilers, atlascustomrole.NewAtlasCustomRoleReconciler(c, r.deprecatedPredicates(), ap, r.deletionProtection, r.independentSyncPeriod, r.logger)) reconcilers = append(reconcilers, atlasprivateendpoint.NewAtlasPrivateEndpointReconciler(c, r.defaultPredicates(), ap, r.deletionProtection, r.independentSyncPeriod, r.logger)) reconcilers = append(reconcilers, atlasipaccesslist.NewAtlasIPAccessListReconciler(c, r.defaultPredicates(), ap, r.deletionProtection, r.independentSyncPeriod, r.logger)) + reconcilers = append(reconcilers, atlasnetworkcontainer.NewAtlasNetworkContainerReconciler(c, r.defaultPredicates(), ap, r.deletionProtection, r.logger, r.independentSyncPeriod)) r.reconcilers = reconcilers } diff --git a/internal/controller/workflow/reason.go b/internal/controller/workflow/reason.go index cf83de3e2b..acba5a311d 100644 --- a/internal/controller/workflow/reason.go +++ b/internal/controller/workflow/reason.go @@ -160,3 +160,17 @@ const ( IPAccessListFailedToGetState ConditionReason = "IPAccessListFailedToGetState" IPAccessListPending ConditionReason = "IPAccessListPending" ) + +// Atlas Network Peering reasons +const ( + NetworkPeeringConnectionCreating ConditionReason = "NetworkPeeringConnectionCreating" + NetworkPeeringConnectionPending ConditionReason = "NetworkPeeringConnectionPending" + NetworkPeeringRemovingContainer ConditionReason = "NetworkPeeringRemovingContainer" +) + +// Atlas Network Container reasons +const ( + NetworkContainerNotConfigured ConditionReason = "NetworkContainerNotConfigured" + NetworkContainerCreated ConditionReason = "NetworkContainerCreated" + NetworkContainerNotDeleted ConditionReason = "NetworkContainerNotDeleted" +) diff --git a/internal/indexer/atlasnetworkcontainercredentials.go b/internal/indexer/atlasnetworkcontainercredentials.go new file mode 100644 index 0000000000..f59eec20e2 --- /dev/null +++ b/internal/indexer/atlasnetworkcontainercredentials.go @@ -0,0 +1,24 @@ +package indexer + +import ( + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" +) + +const ( + AtlasNetworkContainerCredentialsIndex = "atlasnetworkcontainer.credentials" +) + +func NewAtlasNetworkContainerByCredentialIndexer(logger *zap.Logger) *LocalCredentialIndexer { + return NewLocalCredentialsIndexer(AtlasNetworkContainerCredentialsIndex, &akov2.AtlasNetworkContainer{}, logger) +} + +func NetworkContainerRequests(list *akov2.AtlasNetworkContainerList) []reconcile.Request { + requests := make([]reconcile.Request, 0, len(list.Items)) + for _, item := range list.Items { + requests = append(requests, toRequest(&item)) + } + return requests +} diff --git a/internal/indexer/atlasnetworkcontainerprojects.go b/internal/indexer/atlasnetworkcontainerprojects.go new file mode 100644 index 0000000000..d7d16a6652 --- /dev/null +++ b/internal/indexer/atlasnetworkcontainerprojects.go @@ -0,0 +1,30 @@ +//nolint:dupl +package indexer + +import ( + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" +) + +const ( + AtlasNetworkContainerByProjectIndex = "atlasnetworkcontainer.spec.projectRef" +) + +type AtlasNetworkContainerByProjectIndexer struct { + AtlasReferrerByProjectIndexerBase +} + +func NewAtlasNetworkContainerByProjectIndexer(logger *zap.Logger) *AtlasNetworkContainerByProjectIndexer { + return &AtlasNetworkContainerByProjectIndexer{ + AtlasReferrerByProjectIndexerBase: *NewAtlasReferrerByProjectIndexer( + logger, + AtlasNetworkContainerByProjectIndex, + ), + } +} + +func (*AtlasNetworkContainerByProjectIndexer) Object() client.Object { + return &akov2.AtlasNetworkContainer{} +} diff --git a/internal/indexer/atlasnetworkcontainerprojects_test.go b/internal/indexer/atlasnetworkcontainerprojects_test.go new file mode 100644 index 0000000000..6c41ec54c3 --- /dev/null +++ b/internal/indexer/atlasnetworkcontainerprojects_test.go @@ -0,0 +1,46 @@ +package indexer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" +) + +func TestAtlasNetworkContainerByProjectIndices(t *testing.T) { + t.Run("should return nil when instance has no project associated to it", func(t *testing.T) { + pe := &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{}, + } + + indexer := NewAtlasNetworkContainerByProjectIndexer(zaptest.NewLogger(t)) + keys := indexer.Keys(pe) + assert.Nil(t, keys) + }) + + t.Run("should return indexes slice when instance has project associated to it", func(t *testing.T) { + pe := &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{ + Name: "project-1", + Namespace: "default", + }, + }, + }, + } + + indexer := NewAtlasNetworkContainerByProjectIndexer(zaptest.NewLogger(t)) + keys := indexer.Keys(pe) + assert.Equal( + t, + []string{ + "default/project-1", + }, + keys, + ) + }) +} diff --git a/internal/indexer/atlasreferredprojects.go b/internal/indexer/atlasreferredprojects.go index be133043db..15907a8b00 100644 --- a/internal/indexer/atlasreferredprojects.go +++ b/internal/indexer/atlasreferredprojects.go @@ -1,9 +1,15 @@ package indexer import ( + "context" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/fields" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/project" ) @@ -37,3 +43,27 @@ func (rb *AtlasReferrerByProjectIndexerBase) Keys(object client.Object) []string return []string{pdr.ProjectRef.GetObject(pro.GetNamespace()).String()} } + +func ProjectsIndexMapperFunc[L client.ObjectList](indexerName string, listGenFn func() L, reqsFn requestsFunc[L], kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, obj client.Object) []reconcile.Request { + project, ok := obj.(*akov2.AtlasProject) + if !ok { + logger.Warnf("watching AtlasProject but got %T", obj) + return nil + } + + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector( + indexerName, + client.ObjectKeyFromObject(project).String(), + ), + } + list := listGenFn() + err := kubeClient.List(ctx, list, listOpts) + if err != nil { + logger.Errorf("failed to list from indexer %s: %v", indexerName, err) + return nil + } + return reqsFn(list) + } +} diff --git a/internal/indexer/indexer.go b/internal/indexer/indexer.go index f977296ac5..ada2708124 100644 --- a/internal/indexer/indexer.go +++ b/internal/indexer/indexer.go @@ -42,6 +42,8 @@ func RegisterAll(ctx context.Context, c cluster.Cluster, logger *zap.Logger) err NewAtlasPrivateEndpointByProjectIndexer(logger), NewAtlasIPAccessListCredentialsByCredentialIndexer(logger), NewAtlasIPAccessListByProjectIndexer(logger), + NewAtlasNetworkContainerByCredentialIndexer(logger), + NewAtlasNetworkContainerByProjectIndexer(logger), ) } diff --git a/internal/indexer/localcredentials_test.go b/internal/indexer/localcredentials_test.go index 48dafa85cd..76f4623e26 100644 --- a/internal/indexer/localcredentials_test.go +++ b/internal/indexer/localcredentials_test.go @@ -130,6 +130,23 @@ func TestLocalCredentialsIndexer(t *testing.T) { wantKeys: []string{"ns/secret-ref"}, wantObject: &akov2.AtlasPrivateEndpoint{}, }, + { + name: "should return keys when there is a reference on a network container", + object: &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user", + Namespace: "ns", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{Name: "secret-ref"}, + }, + }, + }, + index: AtlasNetworkContainerCredentialsIndex, + wantKeys: []string{"ns/secret-ref"}, + wantObject: &akov2.AtlasNetworkContainer{}, + }, } { indexers := testIndexers(t) t.Run(tc.name, func(t *testing.T) { @@ -311,6 +328,42 @@ func TestCredentialsIndexMapperFunc(t *testing.T) { }}, }, }, + { + name: "matching input credentials renders matching network container", + index: AtlasNetworkContainerCredentialsIndex, + output: &akov2.AtlasNetworkContainer{}, + mapperFn: func(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return CredentialsIndexMapperFunc[*akov2.AtlasNetworkContainerList]( + AtlasNetworkContainerCredentialsIndex, + func() *akov2.AtlasNetworkContainerList { return &akov2.AtlasNetworkContainerList{} }, + NetworkContainerRequests, + kubeClient, + logger, + ) + }, + input: newTestSecret("matching-container-secret-ref"), + objects: []client.Object{ + &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "matching-container", + Namespace: "ns", + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "matching-container-secret-ref", + }, + }, + }, + }, + }, + want: []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: "matching-container", + Namespace: "ns", + }}, + }, + }, } { scheme := runtime.NewScheme() assert.NoError(t, corev1.AddToScheme(scheme)) @@ -417,5 +470,6 @@ func testIndexers(t *testing.T) map[string]*LocalCredentialIndexer { indexers[AtlasDeploymentCredentialsIndex] = NewAtlasDeploymentByCredentialIndexer(logger) indexers[AtlasCustomRoleCredentialsIndex] = NewAtlasCustomRoleByCredentialIndexer(logger) indexers[AtlasPrivateEndpointCredentialsIndex] = NewAtlasPrivateEndpointByCredentialIndexer(logger) + indexers[AtlasNetworkContainerCredentialsIndex] = NewAtlasNetworkContainerByCredentialIndexer(logger) return indexers } diff --git a/internal/mocks/translation/network_container_service.go b/internal/mocks/translation/network_container_service.go new file mode 100644 index 0000000000..915c0260d7 --- /dev/null +++ b/internal/mocks/translation/network_container_service.go @@ -0,0 +1,327 @@ +// Code generated by mockery. DO NOT EDIT. + +package translation + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + networkcontainer "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" +) + +// NetworkContainerServiceMock is an autogenerated mock type for the NetworkContainerService type +type NetworkContainerServiceMock struct { + mock.Mock +} + +type NetworkContainerServiceMock_Expecter struct { + mock *mock.Mock +} + +func (_m *NetworkContainerServiceMock) EXPECT() *NetworkContainerServiceMock_Expecter { + return &NetworkContainerServiceMock_Expecter{mock: &_m.Mock} +} + +// Create provides a mock function with given fields: ctx, projectID, cfg +func (_m *NetworkContainerServiceMock) Create(ctx context.Context, projectID string, cfg *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error) { + ret := _m.Called(ctx, projectID, cfg) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 *networkcontainer.NetworkContainer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error)); ok { + return rf(ctx, projectID, cfg) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *networkcontainer.NetworkContainerConfig) *networkcontainer.NetworkContainer); ok { + r0 = rf(ctx, projectID, cfg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*networkcontainer.NetworkContainer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *networkcontainer.NetworkContainerConfig) error); ok { + r1 = rf(ctx, projectID, cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetworkContainerServiceMock_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create' +type NetworkContainerServiceMock_Create_Call struct { + *mock.Call +} + +// Create is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - cfg *networkcontainer.NetworkContainerConfig +func (_e *NetworkContainerServiceMock_Expecter) Create(ctx interface{}, projectID interface{}, cfg interface{}) *NetworkContainerServiceMock_Create_Call { + return &NetworkContainerServiceMock_Create_Call{Call: _e.mock.On("Create", ctx, projectID, cfg)} +} + +func (_c *NetworkContainerServiceMock_Create_Call) Run(run func(ctx context.Context, projectID string, cfg *networkcontainer.NetworkContainerConfig)) *NetworkContainerServiceMock_Create_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(*networkcontainer.NetworkContainerConfig)) + }) + return _c +} + +func (_c *NetworkContainerServiceMock_Create_Call) Return(_a0 *networkcontainer.NetworkContainer, _a1 error) *NetworkContainerServiceMock_Create_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NetworkContainerServiceMock_Create_Call) RunAndReturn(run func(context.Context, string, *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error)) *NetworkContainerServiceMock_Create_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: ctx, projectID, containerID +func (_m *NetworkContainerServiceMock) Delete(ctx context.Context, projectID string, containerID string) error { + ret := _m.Called(ctx, projectID, containerID) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, projectID, containerID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NetworkContainerServiceMock_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type NetworkContainerServiceMock_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - containerID string +func (_e *NetworkContainerServiceMock_Expecter) Delete(ctx interface{}, projectID interface{}, containerID interface{}) *NetworkContainerServiceMock_Delete_Call { + return &NetworkContainerServiceMock_Delete_Call{Call: _e.mock.On("Delete", ctx, projectID, containerID)} +} + +func (_c *NetworkContainerServiceMock_Delete_Call) Run(run func(ctx context.Context, projectID string, containerID string)) *NetworkContainerServiceMock_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *NetworkContainerServiceMock_Delete_Call) Return(_a0 error) *NetworkContainerServiceMock_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *NetworkContainerServiceMock_Delete_Call) RunAndReturn(run func(context.Context, string, string) error) *NetworkContainerServiceMock_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Find provides a mock function with given fields: ctx, projectID, cfg +func (_m *NetworkContainerServiceMock) Find(ctx context.Context, projectID string, cfg *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error) { + ret := _m.Called(ctx, projectID, cfg) + + if len(ret) == 0 { + panic("no return value specified for Find") + } + + var r0 *networkcontainer.NetworkContainer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error)); ok { + return rf(ctx, projectID, cfg) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *networkcontainer.NetworkContainerConfig) *networkcontainer.NetworkContainer); ok { + r0 = rf(ctx, projectID, cfg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*networkcontainer.NetworkContainer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *networkcontainer.NetworkContainerConfig) error); ok { + r1 = rf(ctx, projectID, cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetworkContainerServiceMock_Find_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Find' +type NetworkContainerServiceMock_Find_Call struct { + *mock.Call +} + +// Find is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - cfg *networkcontainer.NetworkContainerConfig +func (_e *NetworkContainerServiceMock_Expecter) Find(ctx interface{}, projectID interface{}, cfg interface{}) *NetworkContainerServiceMock_Find_Call { + return &NetworkContainerServiceMock_Find_Call{Call: _e.mock.On("Find", ctx, projectID, cfg)} +} + +func (_c *NetworkContainerServiceMock_Find_Call) Run(run func(ctx context.Context, projectID string, cfg *networkcontainer.NetworkContainerConfig)) *NetworkContainerServiceMock_Find_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(*networkcontainer.NetworkContainerConfig)) + }) + return _c +} + +func (_c *NetworkContainerServiceMock_Find_Call) Return(_a0 *networkcontainer.NetworkContainer, _a1 error) *NetworkContainerServiceMock_Find_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NetworkContainerServiceMock_Find_Call) RunAndReturn(run func(context.Context, string, *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error)) *NetworkContainerServiceMock_Find_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, projectID, containerID +func (_m *NetworkContainerServiceMock) Get(ctx context.Context, projectID string, containerID string) (*networkcontainer.NetworkContainer, error) { + ret := _m.Called(ctx, projectID, containerID) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *networkcontainer.NetworkContainer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*networkcontainer.NetworkContainer, error)); ok { + return rf(ctx, projectID, containerID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *networkcontainer.NetworkContainer); ok { + r0 = rf(ctx, projectID, containerID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*networkcontainer.NetworkContainer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, projectID, containerID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetworkContainerServiceMock_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type NetworkContainerServiceMock_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - containerID string +func (_e *NetworkContainerServiceMock_Expecter) Get(ctx interface{}, projectID interface{}, containerID interface{}) *NetworkContainerServiceMock_Get_Call { + return &NetworkContainerServiceMock_Get_Call{Call: _e.mock.On("Get", ctx, projectID, containerID)} +} + +func (_c *NetworkContainerServiceMock_Get_Call) Run(run func(ctx context.Context, projectID string, containerID string)) *NetworkContainerServiceMock_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *NetworkContainerServiceMock_Get_Call) Return(_a0 *networkcontainer.NetworkContainer, _a1 error) *NetworkContainerServiceMock_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NetworkContainerServiceMock_Get_Call) RunAndReturn(run func(context.Context, string, string) (*networkcontainer.NetworkContainer, error)) *NetworkContainerServiceMock_Get_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: ctx, projectID, containerID, cfg +func (_m *NetworkContainerServiceMock) Update(ctx context.Context, projectID string, containerID string, cfg *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error) { + ret := _m.Called(ctx, projectID, containerID, cfg) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 *networkcontainer.NetworkContainer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error)); ok { + return rf(ctx, projectID, containerID, cfg) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, *networkcontainer.NetworkContainerConfig) *networkcontainer.NetworkContainer); ok { + r0 = rf(ctx, projectID, containerID, cfg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*networkcontainer.NetworkContainer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, *networkcontainer.NetworkContainerConfig) error); ok { + r1 = rf(ctx, projectID, containerID, cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetworkContainerServiceMock_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type NetworkContainerServiceMock_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - containerID string +// - cfg *networkcontainer.NetworkContainerConfig +func (_e *NetworkContainerServiceMock_Expecter) Update(ctx interface{}, projectID interface{}, containerID interface{}, cfg interface{}) *NetworkContainerServiceMock_Update_Call { + return &NetworkContainerServiceMock_Update_Call{Call: _e.mock.On("Update", ctx, projectID, containerID, cfg)} +} + +func (_c *NetworkContainerServiceMock_Update_Call) Run(run func(ctx context.Context, projectID string, containerID string, cfg *networkcontainer.NetworkContainerConfig)) *NetworkContainerServiceMock_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(*networkcontainer.NetworkContainerConfig)) + }) + return _c +} + +func (_c *NetworkContainerServiceMock_Update_Call) Return(_a0 *networkcontainer.NetworkContainer, _a1 error) *NetworkContainerServiceMock_Update_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NetworkContainerServiceMock_Update_Call) RunAndReturn(run func(context.Context, string, string, *networkcontainer.NetworkContainerConfig) (*networkcontainer.NetworkContainer, error)) *NetworkContainerServiceMock_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewNetworkContainerServiceMock creates a new instance of NetworkContainerServiceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNetworkContainerServiceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *NetworkContainerServiceMock { + mock := &NetworkContainerServiceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/translation/networkcontainer/conversion.go b/internal/translation/networkcontainer/conversion.go new file mode 100644 index 0000000000..52b281792b --- /dev/null +++ b/internal/translation/networkcontainer/conversion.go @@ -0,0 +1,75 @@ +package networkcontainer + +import ( + "go.mongodb.org/atlas-sdk/v20231115008/admin" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" +) + +type NetworkContainerConfig struct { + Provider string + akov2.AtlasNetworkContainerConfig +} + +type NetworkContainer struct { + NetworkContainerConfig + ID string + Provisioned bool +} + +func NewNetworkContainerConfig(provider string, config *akov2.AtlasNetworkContainerConfig) *NetworkContainerConfig { + return &NetworkContainerConfig{ + Provider: provider, + AtlasNetworkContainerConfig: *config, + } +} + +func ApplyNetworkContainerStatus(containerStatus *status.AtlasNetworkContainerStatus, container *NetworkContainer) { + containerStatus.ID = container.ID + containerStatus.Provisioned = container.Provisioned +} + +func toAtlas(container *NetworkContainer) *admin.CloudProviderContainer { + cpc := toAtlasConfig(&container.NetworkContainerConfig) + cpc.Id = pointer.SetOrNil(container.ID, "") + return cpc +} + +func toAtlasConfig(cfg *NetworkContainerConfig) *admin.CloudProviderContainer { + cpc := &admin.CloudProviderContainer{ + ProviderName: pointer.SetOrNil(cfg.Provider, ""), + AtlasCidrBlock: pointer.SetOrNil(cfg.CIDRBlock, ""), + } + if cpc.GetProviderName() == string(provider.ProviderAWS) { + cpc.RegionName = pointer.SetOrNil(cfg.Region, "") + } else { + cpc.Region = pointer.SetOrNil(cfg.Region, "") + } + return cpc +} + +func fromAtlas(container *admin.CloudProviderContainer) *NetworkContainer { + pc := fromAtlasNoStatus(container) + pc.Provisioned = container.GetProvisioned() + return pc +} + +func fromAtlasNoStatus(container *admin.CloudProviderContainer) *NetworkContainer { + region := container.GetRegion() + if container.GetProviderName() == string(provider.ProviderAWS) { + region = container.GetRegionName() + } + return &NetworkContainer{ + NetworkContainerConfig: NetworkContainerConfig{ + Provider: container.GetProviderName(), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: container.GetAtlasCidrBlock(), + Region: region, + }, + }, + ID: container.GetId(), + } +} diff --git a/internal/translation/networkcontainer/conversion_test.go b/internal/translation/networkcontainer/conversion_test.go new file mode 100644 index 0000000000..ee5fb3b160 --- /dev/null +++ b/internal/translation/networkcontainer/conversion_test.go @@ -0,0 +1,37 @@ +package networkcontainer + +import ( + "fmt" + "testing" + + gofuzz "github.com/google/gofuzz" + "github.com/stretchr/testify/assert" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" +) + +const fuzzIterations = 100 + +var providerNames = []string{ + string(provider.ProviderAWS), + string(provider.ProviderAzure), + string(provider.ProviderGCP), +} + +func FuzzConvertContainer(f *testing.F) { + for i := uint(0); i < fuzzIterations; i++ { + f.Add(([]byte)(fmt.Sprintf("seed sample %x", i)), i) + } + f.Fuzz(func(t *testing.T, data []byte, index uint) { + containerData := NetworkContainer{} + gofuzz.NewFromGoFuzz(data).Fuzz(&containerData) + containerData.Provider = providerNames[index%3] + cleanupContainer(&containerData) + result := fromAtlas(toAtlas(&containerData)) + assert.Equal(t, &containerData, result, "failed for index=%d", index) + }) +} + +func cleanupContainer(container *NetworkContainer) { + container.AtlasNetworkContainerConfig.ID = "" +} diff --git a/internal/translation/networkcontainer/networkcontainer.go b/internal/translation/networkcontainer/networkcontainer.go new file mode 100644 index 0000000000..3c2fe8f3d8 --- /dev/null +++ b/internal/translation/networkcontainer/networkcontainer.go @@ -0,0 +1,117 @@ +package networkcontainer + +import ( + "context" + "errors" + "fmt" + "net/http" + + "go.mongodb.org/atlas-sdk/v20231115008/admin" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/paging" +) + +var ( + // ErrNotFound means an resource is missing + ErrNotFound = errors.New("not found") + + // ErrContainerInUse is a failure to remove a containe still in use + ErrContainerInUse = errors.New("container still in use") + + // ErrAmbigousFind fails when a find result is ambiguous, + // usually more than one result was found when either one or noe was expected + ErrAmbigousFind = errors.New("ambigous find results") +) + +type NetworkContainerService interface { + Create(ctx context.Context, projectID string, cfg *NetworkContainerConfig) (*NetworkContainer, error) + Get(ctx context.Context, projectID, containerID string) (*NetworkContainer, error) + Find(ctx context.Context, projectID string, cfg *NetworkContainerConfig) (*NetworkContainer, error) + Update(ctx context.Context, projectID, containerID string, cfg *NetworkContainerConfig) (*NetworkContainer, error) + Delete(ctx context.Context, projectID, containerID string) error +} + +type networkContainerService struct { + peeringAPI admin.NetworkPeeringApi +} + +func NewNetworkContainerServiceFromClientSet(clientSet *atlas.ClientSet) NetworkContainerService { + return NewNetworkContainerService(clientSet.SdkClient20231115008.NetworkPeeringApi) +} + +func NewNetworkContainerService(peeringAPI admin.NetworkPeeringApi) NetworkContainerService { + return &networkContainerService{peeringAPI: peeringAPI} +} + +func (np *networkContainerService) Create(ctx context.Context, projectID string, cfg *NetworkContainerConfig) (*NetworkContainer, error) { + newContainer, _, err := np.peeringAPI.CreatePeeringContainer(ctx, projectID, toAtlasConfig(cfg)).Execute() + if err != nil { + return nil, fmt.Errorf("failed to create peering container at project %s: %w", projectID, err) + } + return fromAtlas(newContainer), nil +} + +func (np *networkContainerService) Get(ctx context.Context, projectID, containerID string) (*NetworkContainer, error) { + container, _, err := np.peeringAPI.GetPeeringContainer(ctx, projectID, containerID).Execute() + if admin.IsErrorCode(err, "CLOUD_PROVIDER_CONTAINER_NOT_FOUND") { + return nil, errors.Join(err, ErrNotFound) + } + if err != nil { + return nil, fmt.Errorf("failed to get container %s: %w", containerID, err) + } + return fromAtlas(container), nil +} + +func (np *networkContainerService) Find(ctx context.Context, projectID string, cfg *NetworkContainerConfig) (*NetworkContainer, error) { + atlasContainers, err := paging.ListAll(ctx, func(ctx context.Context, pageNum int) (paging.Response[admin.CloudProviderContainer], *http.Response, error) { + return np.peeringAPI.ListPeeringContainerByCloudProvider(ctx, projectID).ProviderName(cfg.Provider).PageNum(pageNum).Execute() + }) + if err != nil { + return nil, fmt.Errorf("failed to list containers at project %s: %w", projectID, err) + } + containers := []*NetworkContainer{} + for _, atlasContainer := range atlasContainers { + container := fromAtlas(&atlasContainer) + switch provider.ProviderName(cfg.Provider) { + case provider.ProviderGCP: + if container.CIDRBlock == cfg.CIDRBlock { + containers = append(containers, container) + } + default: + if container.CIDRBlock == cfg.CIDRBlock && container.Region == cfg.Region { + containers = append(containers, container) + } + } + } + if len(containers) < 1 { + return nil, ErrNotFound + } + if len(containers) > 1 { + return nil, ErrAmbigousFind + } + return containers[0], nil +} + +func (np *networkContainerService) Update(ctx context.Context, projectID, containerID string, cfg *NetworkContainerConfig) (*NetworkContainer, error) { + updatedContainer, _, err := np.peeringAPI.UpdatePeeringContainer(ctx, projectID, containerID, toAtlasConfig(cfg)).Execute() + if err != nil { + return nil, fmt.Errorf("failed to update peering container %s: %w", containerID, err) + } + return fromAtlas(updatedContainer), nil +} + +func (np *networkContainerService) Delete(ctx context.Context, projectID, containerID string) error { + _, _, err := np.peeringAPI.DeletePeeringContainer(ctx, projectID, containerID).Execute() + if admin.IsErrorCode(err, "CLOUD_PROVIDER_CONTAINER_NOT_FOUND") { + return errors.Join(err, ErrNotFound) + } + if admin.IsErrorCode(err, "CONTAINERS_IN_USE") { + return fmt.Errorf("failed to remove container %s as it is still in use: %w", containerID, ErrContainerInUse) + } + if err != nil { + return fmt.Errorf("failed to delete container: %w", err) + } + return nil +} diff --git a/internal/translation/networkcontainer/networkcontainer_test.go b/internal/translation/networkcontainer/networkcontainer_test.go new file mode 100644 index 0000000000..c16e52ee65 --- /dev/null +++ b/internal/translation/networkcontainer/networkcontainer_test.go @@ -0,0 +1,621 @@ +package networkcontainer_test + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/mock" + "go.mongodb.org/atlas-sdk/v20231115008/admin" + "go.mongodb.org/atlas-sdk/v20231115008/mockadmin" + + "github.com/stretchr/testify/assert" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" +) + +const ( + testProjectID = "fake-test-project-id" + + testContainerID = "fake-container-id" + + testVpcID = "fake-vpc-id" + + testAzureSubcriptionID = "fake-azure-subcription-id" + + testVnet = "fake-vnet" + + testGCPProjectID = "fake-test-project" + + testNetworkName = "fake-test-network" +) + +var ( + ErrFakeFailure = errors.New("fake-failure") +) + +func TestNetworkContainerCreate(t *testing.T) { + for _, tc := range []struct { + title string + cfg *networkcontainer.NetworkContainerConfig + api admin.NetworkPeeringApi + expectedContainer *networkcontainer.NetworkContainer + expectedError error + }{ + { + title: "successful api create for AWS returns success", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testCreateNetworkContainerAPI( + &admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAWS)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + RegionName: pointer.MakePtr(testContainerConfig().Region), + VpcId: pointer.MakePtr(testVpcID), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "successful api create for AWS returns success without VPC ID", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testCreateNetworkContainerAPI( + &admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAWS)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + RegionName: pointer.MakePtr(testContainerConfig().Region), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "successful api create for Azure returns success", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testCreateNetworkContainerAPI(&admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAzure)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + Region: pointer.MakePtr(testContainerConfig().Region), + AzureSubscriptionId: pointer.MakePtr(string(testAzureSubcriptionID)), + VnetName: pointer.MakePtr(testVnet), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "successful api create for Azure without status updates returns success", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testCreateNetworkContainerAPI(&admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAzure)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + Region: pointer.MakePtr(testContainerConfig().Region), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "successful api create for GCP returns success", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testCreateNetworkContainerAPI(&admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderGCP)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + GcpProjectId: pointer.MakePtr(testGCPProjectID), + NetworkName: pointer.MakePtr(testNetworkName), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{CIDRBlock: "1.1.1.1/2"}, + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "successful api create for GCP without status returns success", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testCreateNetworkContainerAPI(&admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderGCP)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{CIDRBlock: "1.1.1.1/2"}, + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "failed api create returns failure", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: "bad-provider", + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testCreateNetworkContainerAPI(nil, ErrFakeFailure), + expectedContainer: nil, + expectedError: ErrFakeFailure, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkcontainer.NewNetworkContainerService(tc.api) + container, err := s.Create(ctx, testProjectID, tc.cfg) + assert.Equal(t, tc.expectedContainer, container) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestNetworkContainerGet(t *testing.T) { + for _, tc := range []struct { + title string + api admin.NetworkPeeringApi + expectedContainer *networkcontainer.NetworkContainer + expectedError error + }{ + { + title: "successful api get returns success", + api: testGetNetworkContainerAPI( + &admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAWS)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + RegionName: pointer.MakePtr(testContainerConfig().Region), + VpcId: pointer.MakePtr(testVpcID), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "not found api get returns wrapped not found error", + api: testGetNetworkContainerAPI(nil, testAPIError("CLOUD_PROVIDER_CONTAINER_NOT_FOUND")), + expectedContainer: nil, + expectedError: networkcontainer.ErrNotFound, + }, + + { + title: "other api get failure returns wrapped error", + api: testGetNetworkContainerAPI(nil, ErrFakeFailure), + expectedContainer: nil, + expectedError: ErrFakeFailure, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkcontainer.NewNetworkContainerService(tc.api) + container, err := s.Get(ctx, testProjectID, testContainerID) + assert.Equal(t, tc.expectedContainer, container) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestNetworkContainerFind(t *testing.T) { + for _, tc := range []struct { + title string + cfg *networkcontainer.NetworkContainerConfig + api admin.NetworkPeeringApi + expectedContainer *networkcontainer.NetworkContainer + expectedError error + }{ + { + title: "successful find returns success", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testFindNetworkContainerAPI( + []admin.CloudProviderContainer{ + { + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAWS)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + RegionName: pointer.MakePtr(testContainerConfig().Region), + VpcId: pointer.MakePtr(testVpcID), + }, + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "find fails other error", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testFindNetworkContainerAPI( + nil, + ErrFakeFailure, + ), + expectedContainer: nil, + expectedError: ErrFakeFailure, + }, + + { + title: "find fails not found", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testFindNetworkContainerAPI( + []admin.CloudProviderContainer{}, + nil, + ), + expectedContainer: nil, + expectedError: networkcontainer.ErrNotFound, + }, + + { + title: "successful find on GCP", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "18.18.192.0/18", + }, + }, + api: testFindNetworkContainerAPI( + []admin.CloudProviderContainer{ + { + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderGCP)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr("18.18.192.0/18"), + GcpProjectId: pointer.MakePtr(testGCPProjectID), + NetworkName: pointer.MakePtr(testNetworkName), + }, + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "18.18.192.0/18", + }, + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "successful find on Azure", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "11.11.0.0/16", + Region: "US_EAST_2", + }, + }, + api: testFindNetworkContainerAPI( + []admin.CloudProviderContainer{ + { + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAzure)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr("11.11.0.0/16"), + Region: pointer.MakePtr("US_EAST_2"), + }, + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "11.11.0.0/16", + Region: "US_EAST_2", + }, + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "not found on Azure", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "11.11.0.0/16", + Region: "US_CENTRAL_5", + }, + }, + api: testFindNetworkContainerAPI( + []admin.CloudProviderContainer{ + { + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAzure)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr("11.11.0.0/16"), + Region: pointer.MakePtr("US_EAST_2"), + }, + }, + nil, + ), + expectedContainer: nil, + expectedError: networkcontainer.ErrNotFound, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkcontainer.NewNetworkContainerService(tc.api) + container, err := s.Find(ctx, testProjectID, tc.cfg) + assert.Equal(t, tc.expectedContainer, container) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestNetworkContainerUpdate(t *testing.T) { + for _, tc := range []struct { + title string + cfg *networkcontainer.NetworkContainerConfig + api admin.NetworkPeeringApi + expectedContainer *networkcontainer.NetworkContainer + expectedError error + }{ + { + title: "successful api update returns success", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testUpdateNetworkContainerAPI( + &admin.CloudProviderContainer{ + Id: pointer.MakePtr(testContainerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAWS)), + Provisioned: pointer.MakePtr(false), + AtlasCidrBlock: pointer.MakePtr(testContainerConfig().CIDRBlock), + RegionName: pointer.MakePtr(testContainerConfig().Region), + VpcId: pointer.MakePtr(testVpcID), + }, + nil, + ), + expectedContainer: &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + ID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "api update failure returns wrapped error", + cfg: &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: testContainerConfig(), + }, + api: testUpdateNetworkContainerAPI(nil, ErrFakeFailure), + expectedContainer: nil, + expectedError: ErrFakeFailure, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkcontainer.NewNetworkContainerService(tc.api) + container, err := s.Update(ctx, testProjectID, testContainerID, tc.cfg) + assert.Equal(t, tc.expectedContainer, container) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestNetworkContainerDelete(t *testing.T) { + for _, tc := range []struct { + title string + api admin.NetworkPeeringApi + expectedError error + }{ + { + title: "successful api delete returns success", + api: testDeleteNetworkContainerAPI(nil), + expectedError: nil, + }, + + { + title: "not found api delete failure returns wrapped not found error", + api: testDeleteNetworkContainerAPI(testAPIError("CLOUD_PROVIDER_CONTAINER_NOT_FOUND")), + expectedError: networkcontainer.ErrNotFound, + }, + + { + title: "container in api delete failure returns wrapped container in use", + api: testDeleteNetworkContainerAPI(testAPIError("CONTAINERS_IN_USE")), + expectedError: networkcontainer.ErrContainerInUse, + }, + + { + title: "other api get failure returns wrapped error", + api: testDeleteNetworkContainerAPI(ErrFakeFailure), + expectedError: ErrFakeFailure, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkcontainer.NewNetworkContainerService(tc.api) + err := s.Delete(ctx, testProjectID, testContainerID) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func testContainerConfig() akov2.AtlasNetworkContainerConfig { + return akov2.AtlasNetworkContainerConfig{ + Region: "sample-region", + CIDRBlock: "1.1.1.1/2", + } +} + +func testCreateNetworkContainerAPI(apiContainer *admin.CloudProviderContainer, err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().CreatePeeringContainer( + mock.Anything, testProjectID, mock.Anything, + ).Return(admin.CreatePeeringContainerApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().CreatePeeringContainerExecute( + mock.AnythingOfType("admin.CreatePeeringContainerApiRequest"), + ).Return(apiContainer, nil, err) + return &apiMock +} + +func testGetNetworkContainerAPI(apiContainer *admin.CloudProviderContainer, err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().GetPeeringContainer( + mock.Anything, testProjectID, mock.Anything, + ).Return(admin.GetPeeringContainerApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().GetPeeringContainerExecute( + mock.AnythingOfType("admin.GetPeeringContainerApiRequest"), + ).Return(apiContainer, nil, err) + return &apiMock +} + +func testFindNetworkContainerAPI(apiContainers []admin.CloudProviderContainer, err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().ListPeeringContainerByCloudProvider(mock.Anything, testProjectID).Return( + admin.ListPeeringContainerByCloudProviderApiRequest{ApiService: &apiMock}, + ) + + results := admin.PaginatedCloudProviderContainer{ + Results: &apiContainers, + } + apiMock.EXPECT().ListPeeringContainerByCloudProviderExecute( + mock.AnythingOfType("admin.ListPeeringContainerByCloudProviderApiRequest"), + ).Return(&results, nil, err) + return &apiMock +} + +func testAPIError(code string) error { + err := &admin.GenericOpenAPIError{} + err.SetModel(admin.ApiError{ + ErrorCode: pointer.MakePtr(code), + }) + return err +} + +func testUpdateNetworkContainerAPI(apiContainer *admin.CloudProviderContainer, err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().UpdatePeeringContainer( + mock.Anything, testProjectID, testContainerID, mock.Anything, + ).Return(admin.UpdatePeeringContainerApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().UpdatePeeringContainerExecute( + mock.AnythingOfType("admin.UpdatePeeringContainerApiRequest"), + ).Return(apiContainer, nil, err) + return &apiMock +} + +func testDeleteNetworkContainerAPI(err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().DeletePeeringContainer( + mock.Anything, testProjectID, testContainerID, + ).Return(admin.DeletePeeringContainerApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().DeletePeeringContainerExecute( + mock.AnythingOfType("admin.DeletePeeringContainerApiRequest"), + ).Return(nil, nil, err) + return &apiMock +} diff --git a/test/e2e/network_container_controller_test.go b/test/e2e/network_container_controller_test.go new file mode 100644 index 0000000000..9e4874a459 --- /dev/null +++ b/test/e2e/network_container_controller_test.go @@ -0,0 +1,251 @@ +package e2e_test + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/actions" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/api/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/config" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/data" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/model" +) + +const ( + createMeID = "create-me" +) + +var _ = Describe("NetworkContainerController", Label("networkcontainer-controller"), func() { + var testData *model.TestDataProvider + + _ = BeforeEach(OncePerOrdered, func() { + checkUpAWSEnvironment() + checkUpAzureEnvironment() + checkNSetUpGCPEnvironment() + }) + + _ = AfterEach(func() { + GinkgoWriter.Write([]byte("\n")) + GinkgoWriter.Write([]byte("===============================================\n")) + GinkgoWriter.Write([]byte("Network Container Controller Test\n")) + GinkgoWriter.Write([]byte("Operator namespace: " + testData.Resources.Namespace + "\n")) + GinkgoWriter.Write([]byte("===============================================\n")) + if CurrentSpecReport().Failed() { + Expect(actions.SaveProjectsToFile(testData.Context, testData.K8SClient, testData.Resources.Namespace)).Should(Succeed()) + } + By("Delete Resources, Project with NetworkContainer", func() { + actions.DeleteTestDataNetworkContainers(testData) + actions.DeleteTestDataProject(testData) + actions.AfterEachFinalCleanup([]model.TestDataProvider{*testData}) + }) + }) + + DescribeTable("NetworkContainerController", + func(test *model.TestDataProvider, useProjectID bool, networkPeers []*akov2.AtlasNetworkContainer) { + testData = test + actions.ProjectCreationFlow(test) + networkContainerControllerFlow(test, useProjectID, networkPeers) + }, + Entry("Test[networkpeering-aws-1]: New AWS Network Container is created successfully", + Label("network-container-cr-aws-1"), + model.DataProvider( + "networkcontainer-cr-aws-1", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + false, + []*akov2.AtlasNetworkContainer{ + { + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{Name: data.ProjectName}, + }, + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.128.0.0/21", + }, + }, + }, + }, + ), + Entry("Test[networkpeering-azure-2]: New Azure Network Container is created successfully", + Label("network-container-cr-azure-2"), + model.DataProvider( + "networkcontainer-cr-azure-2", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + false, + []*akov2.AtlasNetworkContainer{ + { + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{Name: data.ProjectName}, + }, + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_2", + CIDRBlock: "10.128.0.0/21", + }, + }, + }, + }, + ), + Entry("Test[networkpeering-gcp-3]: New GCP Network Container is created successfully", + Label("network-container-cr-gcp-3"), + model.DataProvider( + "networkcontainer-cr-gcp-3", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + false, + []*akov2.AtlasNetworkContainer{ + { + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{Name: data.ProjectName}, + }, + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "10.128.0.0/18", + }, + }, + }, + }, + ), + Entry("Test[networkpeering-all-5]: Existing Network Containers from all providers with direct ids are taken over successfully", + Label("network-container-cr-all-5"), + model.DataProvider( + "networkcontainer-cr-all-5", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + true, + []*akov2.AtlasNetworkContainer{ + { + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{Name: data.ProjectName}, + }, + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + ID: createMeID, + Region: "US_EAST_1", + CIDRBlock: "10.128.0.0/21", + }, + }, + }, + { + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{Name: data.ProjectName}, + }, + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + ID: createMeID, + Region: "US_EAST_2", + CIDRBlock: "10.128.0.0/21", + }, + }, + }, + { + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{Name: data.ProjectName}, + }, + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + ID: createMeID, + CIDRBlock: "10.128.0.0/18", + }, + }, + }, + }, + ), + ) +}) + +func networkContainerControllerFlow(userData *model.TestDataProvider, useProjectID bool, containers []*akov2.AtlasNetworkContainer) { + By("Create network containers from CRs", func() { + atlasClient, err := atlas.AClient() + Expect(err).To(Succeed()) + projectID := "" + if useProjectID { + createdProject := akov2.AtlasProject{} + key := types.NamespacedName{Name: userData.Project.Name, Namespace: userData.Project.Namespace} + Expect(userData.K8SClient.Get(userData.Context, key, &createdProject)).Should(Succeed()) + projectID = createdProject.Status.ID + } + for i, container := range containers { + if useProjectID { + container.Spec.ExternalProjectRef = &akov2.ExternalProjectReference{ + ID: projectID, + } + container.Spec.ProjectRef = nil + container.Spec.ConnectionSecret = &api.LocalObjectReference{ + Name: config.DefaultOperatorGlobalKey, + } + } else { + container.Spec.ProjectRef = &common.ResourceRefNamespaced{ + Name: userData.Project.Name, + Namespace: userData.Project.Namespace, + } + container.Spec.ExternalProjectRef = nil + } + container.Name = fmt.Sprintf("%s-item-%d", userData.Prefix, i) + container.Namespace = userData.Project.Namespace + if container.Spec.ID == createMeID { + id, err := createTestContainer(userData.Context, atlasClient, userData.Project.Status.ID, &container.Spec) + Expect(err).To(Succeed()) + container.Spec.ID = id + } + Expect(userData.K8SClient.Create(userData.Context, container)).Should(Succeed()) + } + }) + + By("Check network container CRs to be Ready", func() { + for _, container := range containers { + key := types.NamespacedName{Name: container.Name, Namespace: container.Namespace} + Eventually(func(g Gomega) bool { + Expect(userData.K8SClient.Get(userData.Context, key, container)).Should(Succeed()) + return networkContainerReady(container) + }).WithTimeout(2 * time.Minute).WithPolling(5 * time.Second).Should(BeTrue()) + } + }) +} + +func networkContainerReady(container *akov2.AtlasNetworkContainer) bool { + for _, condition := range container.Status.Conditions { + if condition.Type == api.ReadyType && condition.Status == v1.ConditionTrue { + return true + } + } + return false +} + +func createTestContainer(ctx context.Context, atlasClient atlas.Atlas, projectID string, container *akov2.AtlasNetworkContainerSpec) (string, error) { + service := networkcontainer.NewNetworkContainerService(atlasClient.Client.NetworkPeeringApi) + cfg := networkcontainer.NewNetworkContainerConfig(container.Provider, &container.AtlasNetworkContainerConfig) + createdContainer, err := service.Create(ctx, projectID, cfg) + if err != nil { + return "", fmt.Errorf("failed to pre-providiong test container for %s config %v: %w", + container.Provider, container.AtlasNetworkContainerConfig, err) + } + return createdContainer.ID, nil +} diff --git a/test/helper/cel/cel.go b/test/helper/cel/cel.go index 5ab3af7ce8..6d3ae4e0b0 100644 --- a/test/helper/cel/cel.go +++ b/test/helper/cel/cel.go @@ -144,3 +144,14 @@ func findCEL(t *testing.T, s *schema.Structural, root bool, pth *field.Path) (ma return ret, nil } + +func ErrorListAsStrings(errs field.ErrorList) []string { + if len(errs) == 0 { + return nil + } + errMsgs := make([]string, 0, len(errs)) + for _, err := range errs { + errMsgs = append(errMsgs, err.Error()) + } + return errMsgs +} diff --git a/test/helper/e2e/actions/steps.go b/test/helper/e2e/actions/steps.go index 79378a2124..441af602f7 100644 --- a/test/helper/e2e/actions/steps.go +++ b/test/helper/e2e/actions/steps.go @@ -11,6 +11,7 @@ import ( . "github.com/onsi/gomega/gstruct" "go.mongodb.org/atlas-sdk/v20231115008/admin" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -514,6 +515,24 @@ func DeleteAtlasGlobalKeyIfExist(data model.TestDataProvider) { } } +func DeleteTestDataNetworkContainers(data *model.TestDataProvider) { + By("Delete network containers", func() { + containers := &akov2.AtlasNetworkContainerList{} + Expect(data.K8SClient.List(data.Context, containers, &client.ListOptions{Namespace: data.Resources.Namespace})).Should(Succeed()) + for _, container := range containers.Items { + key := client.ObjectKey{Name: container.Name, Namespace: container.Namespace} + Expect(data.K8SClient.Delete(data.Context, &container)).Should(Succeed()) + Eventually( + func() bool { + foundContainer := &akov2.AtlasNetworkContainer{} + err := data.K8SClient.Get(data.Context, key, foundContainer) + return err != nil && errors.IsNotFound(err) + }, + ).WithTimeout(10*time.Minute).WithPolling(20*time.Second).Should(BeTrue(), "Network container should be deleted from Atlas") + } + }) +} + func AfterEachFinalCleanup(datas []model.TestDataProvider) { for i := range datas { data := datas[i]