From b7b508b59933a4a6532f895f35977b6f22e0e8fe Mon Sep 17 00:00:00 2001 From: Anuj Agrawal Date: Sun, 22 Sep 2024 20:47:48 +0530 Subject: [PATCH] Added tests for multiclusterservice controller Signed-off-by: Anuj Agrawal --- .../endpointslice_collect_controller_test.go | 387 ++++++ .../endpointslice_dispatch_controller_test.go | 905 ++++++++++++++ .../mcs_controller_test.go | 1103 +++++++++++++++++ 3 files changed, 2395 insertions(+) create mode 100644 pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go create mode 100644 pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go create mode 100644 pkg/controllers/multiclusterservice/mcs_controller_test.go diff --git a/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go b/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go new file mode 100644 index 000000000000..011f8e4ac4d8 --- /dev/null +++ b/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go @@ -0,0 +1,387 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +func TestGetEventHandler(t *testing.T) { + testCases := []struct { + name string + clusterName string + existingHandler bool + }{ + { + name: "New handler", + clusterName: "cluster1", + existingHandler: false, + }, + { + name: "Existing handler", + clusterName: "cluster2", + existingHandler: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + controller := &EndpointSliceCollectController{ + eventHandlers: sync.Map{}, + worker: &mockAsyncWorker{}, + } + if tc.existingHandler { + controller.eventHandlers.Store(tc.clusterName, &mockResourceEventHandler{}) + } + handler := controller.getEventHandler(tc.clusterName) + assert.NotNil(t, handler, "Handler should not be nil") + storedHandler, exists := controller.eventHandlers.Load(tc.clusterName) + assert.True(t, exists, "Handler should be stored in eventHandlers") + assert.Equal(t, handler, storedHandler, "Stored handler should match returned handler") + if !tc.existingHandler { + assert.IsType(t, &cache.ResourceEventHandlerFuncs{}, handler, "New handler should be of type *cache.ResourceEventHandlerFuncs") + } else { + assert.IsType(t, &mockResourceEventHandler{}, handler, "Existing handler should be of type *mockResourceEventHandler") + } + }) + } +} + +func TestGenHandlerFuncs(t *testing.T) { + clusterName := "test-cluster" + testObj := createTestEndpointSlice("test-object", "test-namespace") + + t.Run("AddFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + addFunc := controller.genHandlerAddFunc(clusterName) + addFunc(testObj) + assert.Equal(t, 1, mockWorker.addCount, "Add function should be called once") + }) + + t.Run("UpdateFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + updateFunc := controller.genHandlerUpdateFunc(clusterName) + newObj := createTestEndpointSlice("test-object", "test-namespace") + newObj.SetLabels(map[string]string{"new-label": "new-value"}) + + updateFunc(testObj, newObj) + assert.Equal(t, 1, mockWorker.addCount, "Update function should be called once when objects are different") + + updateFunc(testObj, testObj) + assert.Equal(t, 1, mockWorker.addCount, "Update function should not be called when objects are the same") + }) + + t.Run("DeleteFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + deleteFunc := controller.genHandlerDeleteFunc(clusterName) + deleteFunc(testObj) + assert.Equal(t, 1, mockWorker.addCount, "Delete function should be called once") + + deletedObj := cache.DeletedFinalStateUnknown{Obj: testObj} + deleteFunc(deletedObj) + assert.Equal(t, 2, mockWorker.addCount, "Delete function should be called for DeletedFinalStateUnknown") + }) +} + +func TestGetEndpointSliceWorkMeta(t *testing.T) { + testCases := []struct { + name string + existingWork *workv1alpha1.Work + endpointSlice *unstructured.Unstructured + expectedMeta metav1.ObjectMeta + expectedError bool + }{ + { + name: "New work for EndpointSlice", + endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false), + expectedMeta: metav1.ObjectMeta{ + Name: "endpointslice-test-eps-default", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNamespaceLabel: "default", + util.MultiClusterServiceNameLabel: "test-service", + util.PropagationInstruction: util.PropagationInstructionSuppressed, + util.EndpointSliceWorkManagedByLabel: util.MultiClusterServiceKind, + }, + }, + }, + { + name: "Existing work for EndpointSlice", + existingWork: createExistingWork("endpointslice-test-eps-default", "test-cluster", "ExistingController"), + endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false), + expectedMeta: metav1.ObjectMeta{ + Name: "endpointslice-test-eps-default", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNamespaceLabel: "default", + util.MultiClusterServiceNameLabel: "test-service", + util.PropagationInstruction: util.PropagationInstructionSuppressed, + util.EndpointSliceWorkManagedByLabel: "ExistingController.MultiClusterService", + }, + Finalizers: []string{util.MCSEndpointSliceDispatchControllerFinalizer}, + }, + }, + { + name: "Nil EndpointSlice", + endpointSlice: nil, + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + fakeClient := createFakeClient(tc.existingWork) + testFunc := func() (metav1.ObjectMeta, error) { + return getEndpointSliceWorkMeta(context.TODO(), fakeClient, "test-cluster", "endpointslice-test-eps-default", tc.endpointSlice) + } + if tc.expectedError { + assert.Panics(t, func() { + _, err := testFunc() + require.Error(t, err) + }, "Expected a panic for nil EndpointSlice") + } else { + meta, err := testFunc() + require.NoError(t, err) + assert.Equal(t, tc.expectedMeta.Name, meta.Name) + assert.Equal(t, tc.expectedMeta.Namespace, meta.Namespace) + assert.Equal(t, tc.expectedMeta.Finalizers, meta.Finalizers) + assert.True(t, compareLabels(meta.Labels, tc.expectedMeta.Labels), + "Labels do not match. Expected: %v, Got: %v", tc.expectedMeta.Labels, meta.Labels) + } + }) + } +} + +func TestCleanProviderClustersEndpointSliceWork(t *testing.T) { + testCases := []struct { + name string + existingWork *workv1alpha1.Work + expectedWork *workv1alpha1.Work + expectedDelete bool + }{ + { + name: "Work managed by multiple controllers", + existingWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-service", + util.MultiClusterServiceNamespaceLabel: "default", + util.EndpointSliceWorkManagedByLabel: "MultiClusterService.OtherController", + }, + }, + }, + expectedWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.EndpointSliceWorkManagedByLabel: "OtherController", + }, + }, + }, + expectedDelete: false, + }, + { + name: "Work managed only by MultiClusterService", + existingWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-service", + util.MultiClusterServiceNamespaceLabel: "default", + util.EndpointSliceWorkManagedByLabel: "MultiClusterService", + }, + }, + }, + expectedWork: nil, + expectedDelete: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + scheme := setupSchemeEndpointCollect() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.existingWork).Build() + err := cleanProviderClustersEndpointSliceWork(context.TODO(), fakeClient, tc.existingWork) + assert.NoError(t, err, "Unexpected error in cleanProviderClustersEndpointSliceWork") + + if tc.expectedDelete { + err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, &workv1alpha1.Work{}) + assert.Error(t, err, "Expected Work to be deleted, but it still exists") + assert.True(t, apierrors.IsNotFound(err), "Expected NotFound error, got %v", err) + } else { + updatedWork := &workv1alpha1.Work{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, updatedWork) + assert.NoError(t, err, "Failed to get updated Work") + assert.True(t, compareLabels(updatedWork.Labels, tc.expectedWork.Labels), + "Labels mismatch. Expected %v, but got %v", tc.expectedWork.Labels, updatedWork.Labels) + } + }) + } +} + +// Helper Functions + +// Helper function to set up a scheme for EndpointSlice collection tests +func setupSchemeEndpointCollect() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = workv1alpha1.Install(scheme) + _ = discoveryv1.AddToScheme(scheme) + return scheme +} + +// Helper function to create a test EndpointSlice +func createTestEndpointSlice(name, namespace string) *unstructured.Unstructured { + endpointSlice := &discoveryv1.EndpointSlice{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "discovery.k8s.io/v1", + Kind: "EndpointSlice", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + unstructuredObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(endpointSlice) + return &unstructured.Unstructured{Object: unstructuredObj} +} + +// Helper function to create an EndpointSlice for testing with specific properties +func createEndpointSliceForTest(name, namespace, serviceName string, isManaged bool) *unstructured.Unstructured { + labels := map[string]interface{}{ + discoveryv1.LabelServiceName: serviceName, + } + if isManaged { + labels[discoveryv1.LabelManagedBy] = util.EndpointSliceDispatchControllerLabelValue + } + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + "labels": labels, + }, + }, + } +} + +// Helper function to create an existing Work resource for testing +func createExistingWork(name, namespace, managedBy string) *workv1alpha1.Work { + return &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + util.EndpointSliceWorkManagedByLabel: managedBy, + }, + }, + } +} + +// Helper function to create a fake client with an optional existing Work +func createFakeClient(existingWork *workv1alpha1.Work) client.Client { + scheme := setupSchemeEndpointCollect() + objs := []client.Object{} + if existingWork != nil { + objs = append(objs, existingWork) + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() +} + +// Helper function to compare two label maps, considering special handling for EndpointSliceWorkManagedByLabel +func compareLabels(actual, expected map[string]string) bool { + if len(actual) != len(expected) { + return false + } + for k, v := range expected { + actualV, exists := actual[k] + if !exists { + return false + } + if k == util.EndpointSliceWorkManagedByLabel { + actualParts := strings.Split(actualV, ".") + expectedParts := strings.Split(v, ".") + sort.Strings(actualParts) + sort.Strings(expectedParts) + if !reflect.DeepEqual(actualParts, expectedParts) { + return false + } + } else if actualV != v { + return false + } + } + return true +} + +// Mock implementations + +type mockAsyncWorker struct { + addCount int +} + +func (m *mockAsyncWorker) Add(_ interface{}) { + m.addCount++ +} + +func (m *mockAsyncWorker) AddAfter(_ interface{}, _ time.Duration) {} + +func (m *mockAsyncWorker) Enqueue(_ interface{}) {} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} + +type mockResourceEventHandler struct{} + +func (m *mockResourceEventHandler) OnAdd(_ interface{}, _ bool) {} + +func (m *mockResourceEventHandler) OnUpdate(_, _ interface{}) {} + +func (m *mockResourceEventHandler) OnDelete(_ interface{}) {} diff --git a/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go new file mode 100644 index 000000000000..ec12fff7942a --- /dev/null +++ b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go @@ -0,0 +1,905 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + discoveryv1 "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +func TestUpdateEndpointSliceDispatched(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + status metav1.ConditionStatus + reason string + message string + expectedCondition metav1.Condition + }{ + { + name: "update status to true", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + status: metav1.ConditionTrue, + reason: "EndpointSliceDispatchedSucceed", + message: "EndpointSlice are dispatched successfully", + expectedCondition: metav1.Condition{ + Type: networkingv1alpha1.EndpointSliceDispatched, + Status: metav1.ConditionTrue, + Reason: "EndpointSliceDispatchedSucceed", + Message: "EndpointSlice are dispatched successfully", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := new(MockClient) + mockStatusWriter := new(MockStatusWriter) + + // Expectations Setup + mockClient.On("Status").Return(mockStatusWriter) + mockClient.On("Get", mock.Anything, mock.AnythingOfType("types.NamespacedName"), mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything). + Run(func(args mock.Arguments) { + arg := args.Get(2).(*networkingv1alpha1.MultiClusterService) + *arg = *tt.mcs // Copy the input MCS to the output + }).Return(nil) + + mockStatusWriter.On("Update", mock.Anything, mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything). + Run(func(args mock.Arguments) { + mcs := args.Get(1).(*networkingv1alpha1.MultiClusterService) + mcs.Status.Conditions = []metav1.Condition{tt.expectedCondition} + }).Return(nil) + + c := &EndpointsliceDispatchController{ + Client: mockClient, + EventRecorder: record.NewFakeRecorder(100), + } + + err := c.updateEndpointSliceDispatched(context.Background(), tt.mcs, tt.status, tt.reason, tt.message) + assert.NoError(t, err, "updateEndpointSliceDispatched should not return an error") + + mockClient.AssertExpectations(t) + mockStatusWriter.AssertExpectations(t) + + assert.Len(t, tt.mcs.Status.Conditions, 1, "MCS should have one condition") + if len(tt.mcs.Status.Conditions) > 0 { + condition := tt.mcs.Status.Conditions[0] + assert.Equal(t, tt.expectedCondition.Type, condition.Type) + assert.Equal(t, tt.expectedCondition.Status, condition.Status) + assert.Equal(t, tt.expectedCondition.Reason, condition.Reason) + assert.Equal(t, tt.expectedCondition.Message, condition.Message) + } + }) + } +} + +func TestNewClusterFunc(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputObj client.Object + expectedResult []reconcile.Request + }{ + { + name: "new cluster, matching MCS", + existingObjs: []client.Object{ + &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + inputObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedResult: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work"}}, + }, + }, + { + name: "new cluster, no matching MCS", + existingObjs: []client.Object{ + &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster2"}, + }, + }, + }, + }, + inputObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedResult: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + result := c.newClusterFunc()(context.Background(), tt.inputObj) + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestGetClusterEndpointSliceWorks(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + mcsNamespace string + mcsName string + expectedWorks int + expectedError bool + listError error + }{ + { + name: "find matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work3", + Namespace: "karmada-es-cluster3", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 2, + expectedError: false, + }, + { + name: "no matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 0, + expectedError: false, + }, + { + name: "works in different namespace", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "test-namespace", + }, + }, + }, + }, + mcsNamespace: "test-namespace", + mcsName: "test-mcs", + expectedWorks: 1, + expectedError: false, + }, + { + name: "list error", + existingObjs: []client.Object{}, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 0, + expectedError: true, + listError: errors.New("fake list error"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + if tt.listError != nil { + c.Client = &fakeClient{Client: c.Client, listError: tt.listError} + } + works, err := c.getClusterEndpointSliceWorks(context.Background(), tt.mcsNamespace, tt.mcsName) + if tt.expectedError { + assert.Error(t, err) + assert.Nil(t, works) + } else { + assert.NoError(t, err) + assert.Len(t, works, tt.expectedWorks) + for _, work := range works { + assert.Equal(t, tt.mcsName, work.Labels[util.MultiClusterServiceNameLabel]) + assert.Equal(t, tt.mcsNamespace, work.Labels[util.MultiClusterServiceNamespaceLabel]) + } + } + }) + } +} + +func TestNewMultiClusterServiceFunc(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputObj client.Object + expectedResult []reconcile.Request + }{ + { + name: "MCS with matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster2", + }, + }, + }, + }, + inputObj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + expectedResult: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work-1"}}, + }, + }, + { + name: "MCS with no matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + inputObj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + expectedResult: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + result := c.newMultiClusterServiceFunc()(context.Background(), tt.inputObj) + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestCleanOrphanDispatchedEndpointSlice(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + mcs *networkingv1alpha1.MultiClusterService + expectedDeletes int + expectedError bool + }{ + { + name: "clean orphan works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + expectedDeletes: 1, + expectedError: false, + }, + { + name: "no orphan works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + expectedDeletes: 0, + expectedError: false, + }, + { + name: "work without provision cluster annotation", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster2"}, + }, + }, + }, + expectedDeletes: 0, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.existingObjs...).Build() + c := &EndpointsliceDispatchController{ + Client: fakeClient, + } + err := c.cleanOrphanDispatchedEndpointSlice(context.Background(), tt.mcs) + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // Check if the expected number of works were deleted + remainingWorks := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), remainingWorks, &client.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, remainingWorks.Items, len(tt.existingObjs)-tt.expectedDeletes) + } + }) + } +} + +func TestEnsureEndpointSliceWork(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + work *workv1alpha1.Work + providerCluster string + consumerCluster string + expectedError bool + expectedWork *workv1alpha1.Work + }{ + { + name: "create new work", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + work: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-provider", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": { + "name": "test-eps" + }, + "endpoints": [ + { + "addresses": ["10.0.0.1"] + } + ], + "ports": [ + { + "port": 80 + } + ] + }`), + }, + }, + }, + }, + }, + }, + providerCluster: "provider", + consumerCluster: "consumer", + expectedError: false, + expectedWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-consumer", + Finalizers: []string{util.ExecutionControllerFinalizer}, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": { + "name": "provider-test-eps", + "labels": { + "kubernetes.io/service-name": "test-mcs", + "endpointslice.kubernetes.io/managed-by": "endpointslice-dispatch-controller.karmada.io", + "karmada.io/managed": "true" + }, + "annotations": { + "endpointslice.karmada.io/provision-cluster": "provider", + "work.karmada.io/name": "test-work", + "work.karmada.io/namespace": "karmada-es-consumer", + "resourcetemplate.karmada.io/uid": "" + } + }, + "endpoints": [ + { + "addresses": ["10.0.0.1"] + } + ], + "ports": [ + { + "port": 80 + } + ] + }`), + }, + }, + }, + }, + }, + }, + }, + { + name: "empty manifest", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + work: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-provider", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{}, + }, + }, + }, + providerCluster: "provider", + consumerCluster: "consumer", + expectedError: false, + expectedWork: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + c := &EndpointsliceDispatchController{ + Client: fakeClient, + } + + err := c.ensureEndpointSliceWork(context.Background(), tt.mcs, tt.work, tt.providerCluster, tt.consumerCluster) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + if tt.expectedWork != nil { + createdWork := &workv1alpha1.Work{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tt.expectedWork.Name, + Namespace: tt.expectedWork.Namespace, + }, createdWork) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedWork.ObjectMeta.Name, createdWork.ObjectMeta.Name) + assert.Equal(t, tt.expectedWork.ObjectMeta.Namespace, createdWork.ObjectMeta.Namespace) + assert.Equal(t, tt.expectedWork.ObjectMeta.Finalizers, createdWork.ObjectMeta.Finalizers) + assert.Equal(t, tt.expectedWork.ObjectMeta.Annotations, createdWork.ObjectMeta.Annotations) + assert.Equal(t, tt.expectedWork.ObjectMeta.Labels, createdWork.ObjectMeta.Labels) + + // Comparing manifests + assert.Equal(t, len(tt.expectedWork.Spec.Workload.Manifests), len(createdWork.Spec.Workload.Manifests)) + if len(tt.expectedWork.Spec.Workload.Manifests) > 0 { + expectedManifest := &unstructured.Unstructured{} + createdManifest := &unstructured.Unstructured{} + + err = expectedManifest.UnmarshalJSON(tt.expectedWork.Spec.Workload.Manifests[0].Raw) + assert.NoError(t, err) + err = createdManifest.UnmarshalJSON(createdWork.Spec.Workload.Manifests[0].Raw) + assert.NoError(t, err) + + assert.Equal(t, expectedManifest.GetName(), createdManifest.GetName()) + assert.Equal(t, expectedManifest.GetLabels(), createdManifest.GetLabels()) + assert.Equal(t, expectedManifest.GetAnnotations(), createdManifest.GetAnnotations()) + } + } else { + workList := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), workList) + assert.NoError(t, err) + assert.Empty(t, workList.Items) + } + } + }) + } +} + +func TestCleanupEndpointSliceFromConsumerClusters(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputWork *workv1alpha1.Work + expectedErr bool + }{ + { + name: "cleanup works in consumer clusters", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-1", + Namespace: "karmada-es-cluster1", + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster1", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-2", + Namespace: "karmada-es-cluster2", + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster1", + }, + }, + }, + }, + inputWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Finalizers: []string{ + util.MCSEndpointSliceDispatchControllerFinalizer, + }, + }, + }, + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + c := &EndpointsliceDispatchController{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(append(tt.existingObjs, tt.inputWork)...).Build(), + } + + err := c.cleanupEndpointSliceFromConsumerClusters(context.Background(), tt.inputWork) + if tt.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Check if works are deleted + for _, obj := range tt.existingObjs { + work := obj.(*workv1alpha1.Work) + err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: work.Namespace, Name: work.Name}, &workv1alpha1.Work{}) + assert.True(t, client.IgnoreNotFound(err) == nil) + } + + // Check if the finalizer is removed + updatedWork := &workv1alpha1.Work{} + err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.inputWork.Namespace, Name: tt.inputWork.Name}, updatedWork) + assert.NoError(t, err) + assert.NotContains(t, updatedWork.Finalizers, util.MCSEndpointSliceDispatchControllerFinalizer) + } + }) + } +} + +// Helper Functions + +// Helper function to create and configure a runtime scheme for the controller +func setupSchemeEndpointDispatch() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = networkingv1alpha1.Install(scheme) + _ = workv1alpha1.Install(scheme) + _ = clusterv1alpha1.Install(scheme) + _ = discoveryv1.AddToScheme(scheme) + return scheme +} + +// Helper function to create a new EndpointsliceDispatchController with a fake client for testing +func setupController(objs ...client.Object) *EndpointsliceDispatchController { + scheme := setupSchemeEndpointDispatch() + return &EndpointsliceDispatchController{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build(), + EventRecorder: record.NewFakeRecorder(100), + } +} + +// Mock implementations + +// MockClient is a mock of client.Client interface +type MockClient struct { + mock.Mock +} + +func (m *MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + args := m.Called(ctx, key, obj, opts) + return args.Error(0) +} + +func (m *MockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + args := m.Called(ctx, list, opts) + return args.Error(0) +} + +func (m *MockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +func (m *MockClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Status() client.StatusWriter { + args := m.Called() + return args.Get(0).(client.StatusWriter) +} + +func (m *MockClient) SubResource(subResource string) client.SubResourceClient { + args := m.Called(subResource) + return args.Get(0).(client.SubResourceClient) +} + +func (m *MockClient) Scheme() *runtime.Scheme { + args := m.Called() + return args.Get(0).(*runtime.Scheme) +} + +func (m *MockClient) RESTMapper() meta.RESTMapper { + args := m.Called() + return args.Get(0).(meta.RESTMapper) +} + +func (m *MockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + args := m.Called(obj) + return args.Get(0).(schema.GroupVersionKind), args.Error(1) +} + +func (m *MockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + args := m.Called(obj) + return args.Bool(0), args.Error(1) +} + +// MockStatusWriter is a mock of client.StatusWriter interface +type MockStatusWriter struct { + mock.Mock +} + +func (m *MockStatusWriter) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + args := m.Called(ctx, obj, subResource, opts) + return args.Error(0) +} + +func (m *MockStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +// Custom fake client that can simulate list errors +type fakeClient struct { + client.Client + listError error +} + +func (f *fakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if f.listError != nil { + return f.listError + } + return f.Client.List(ctx, list, opts...) +} diff --git a/pkg/controllers/multiclusterservice/mcs_controller_test.go b/pkg/controllers/multiclusterservice/mcs_controller_test.go new file mode 100644 index 000000000000..738022942574 --- /dev/null +++ b/pkg/controllers/multiclusterservice/mcs_controller_test.go @@ -0,0 +1,1103 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "fmt" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/names" +) + +func TestHandleMultiClusterServiceDelete(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + existingService *corev1.Service + existingResourceBinding *workv1alpha2.ResourceBinding + expectedServiceLabels map[string]string + expectedServiceAnnotations map[string]string + expectedRBLabels map[string]string + expectedRBAnnotations map[string]string + }{ + { + name: "Delete MCS and clean up Service and ResourceBinding", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Finalizers: []string{util.MCSControllerFinalizer}, + }, + }, + existingService: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + util.ResourceTemplateClaimedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + Annotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + }, + existingResourceBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-test-mcs", + Namespace: "default", + Labels: map[string]string{ + workv1alpha2.BindingManagedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + Annotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + }, + expectedServiceLabels: nil, + expectedServiceAnnotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + expectedRBLabels: map[string]string{ + workv1alpha2.BindingManagedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + expectedRBAnnotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController(tt.mcs, tt.existingService, tt.existingResourceBinding) + + _, err := controller.handleMultiClusterServiceDelete(context.Background(), tt.mcs) + assert.NoError(t, err) + + updatedService := &corev1.Service{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: tt.mcs.Name}, updatedService) + assert.NoError(t, err) + + updatedRB := &workv1alpha2.ResourceBinding{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: "service-" + tt.mcs.Name}, updatedRB) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedServiceLabels, updatedService.Labels) + assert.Equal(t, tt.expectedServiceAnnotations, updatedService.Annotations) + assert.Equal(t, tt.expectedRBLabels, updatedRB.Labels) + assert.Equal(t, tt.expectedRBAnnotations, updatedRB.Annotations) + + updatedMCS := &networkingv1alpha1.MultiClusterService{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: tt.mcs.Name}, updatedMCS) + assert.NoError(t, err) + assert.NotContains(t, updatedMCS.Finalizers, util.MCSControllerFinalizer) + }) + } +} + +func TestRetrieveMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + existingWorks []*workv1alpha1.Work + providerClusters sets.Set[string] + clusters []*clusterv1alpha1.Cluster + expectedWorks int + }{ + { + name: "Remove work for non-provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + existingWorks: []*workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: names.GenerateWorkName("MultiClusterService", "test-mcs", "default"), + Namespace: names.GenerateExecutionSpaceName("cluster1"), + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion":"networking.karmada.io/v1alpha1","kind":"MultiClusterService"}`)}, + }, + }, + }, + }, + }, + }, + providerClusters: sets.New("cluster2"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "Keep work for provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + existingWorks: []*workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: names.GenerateWorkName("MultiClusterService", "test-mcs", "default"), + Namespace: names.GenerateExecutionSpaceName("cluster1"), + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion":"networking.karmada.io/v1alpha1","kind":"MultiClusterService"}`)}, + }, + }, + }, + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.mcs} + objs = append(objs, toRuntimeObjects(tt.existingWorks)...) + objs = append(objs, toRuntimeObjects(tt.clusters)...) + + controller := newFakeController(objs...) + + err := controller.retrieveMultiClusterService(context.Background(), tt.mcs, tt.providerClusters) + assert.NoError(t, err) + + workList := &workv1alpha1.WorkList{} + err = controller.Client.List(context.Background(), workList) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedWorks, len(workList.Items)) + }) + } +} + +func TestPropagateMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + providerClusters sets.Set[string] + clusters []*clusterv1alpha1.Cluster + expectedWorks int + }{ + { + name: "Propagate to one ready cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 1, + }, + { + name: "No propagation to unready cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "No propagation to cluster without EndpointSlice support", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "Pod"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "Propagate to multiple ready clusters", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 2, + }, + { + name: "Mixed cluster readiness and API support", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2", "cluster3"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "Pod"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.mcs} + objs = append(objs, toRuntimeObjects(tt.clusters)...) + + controller := newFakeController(objs...) + + err := controller.propagateMultiClusterService(context.Background(), tt.mcs, tt.providerClusters) + assert.NoError(t, err) + + workList := &workv1alpha1.WorkList{} + err = controller.Client.List(context.Background(), workList) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(workList.Items)) + + if tt.expectedWorks > 0 { + for _, work := range workList.Items { + assert.Equal(t, names.GenerateWorkName(tt.mcs.Kind, tt.mcs.Name, tt.mcs.Namespace), work.Name) + clusterName, err := names.GetClusterName(work.Namespace) + assert.NoError(t, err) + assert.Contains(t, tt.providerClusters, clusterName) + assert.Equal(t, "test-id", work.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + } + } + }) + } +} + +func TestBuildResourceBinding(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + providerClusters sets.Set[string] + consumerClusters sets.Set[string] + }{ + { + name: "Build ResourceBinding with non-overlapping clusters", + svc: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + UID: "test-uid", + ResourceVersion: "1234", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + consumerClusters: sets.New("cluster3", "cluster4"), + }, + { + name: "Build ResourceBinding with empty consumer clusters", + svc: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + UID: "test-uid", + ResourceVersion: "1234", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + consumerClusters: sets.New[string](), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController() + rb, err := controller.buildResourceBinding(tt.svc, tt.mcs, tt.providerClusters, tt.consumerClusters) + + assert.NoError(t, err) + assert.NotNil(t, rb) + + // ObjectMeta Check + assert.Equal(t, names.GenerateBindingName(tt.svc.Kind, tt.svc.Name), rb.Name) + assert.Equal(t, tt.svc.Namespace, rb.Namespace) + + // Annotations Check + assert.Equal(t, tt.mcs.Name, rb.Annotations[networkingv1alpha1.MultiClusterServiceNameAnnotation]) + assert.Equal(t, tt.mcs.Namespace, rb.Annotations[networkingv1alpha1.MultiClusterServiceNamespaceAnnotation]) + + // Labels Check + assert.Equal(t, util.MultiClusterServiceKind, rb.Labels[workv1alpha2.BindingManagedByLabel]) + assert.Equal(t, "test-id", rb.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + + // OwnerReferences Check + assert.Len(t, rb.OwnerReferences, 1) + assert.Equal(t, tt.svc.APIVersion, rb.OwnerReferences[0].APIVersion) + assert.Equal(t, tt.svc.Kind, rb.OwnerReferences[0].Kind) + assert.Equal(t, tt.svc.Name, rb.OwnerReferences[0].Name) + assert.Equal(t, tt.svc.UID, rb.OwnerReferences[0].UID) + + // Finalizers Check + assert.Contains(t, rb.Finalizers, util.BindingControllerFinalizer) + + // Spec Check + expectedClusters := tt.providerClusters.Union(tt.consumerClusters).UnsortedList() + actualClusters := rb.Spec.Placement.ClusterAffinity.ClusterNames + + // Sort both slices before comparison + sort.Strings(expectedClusters) + sort.Strings(actualClusters) + + assert.Equal(t, expectedClusters, actualClusters, "Cluster names should match regardless of order") + + // Resource reference Check + assert.Equal(t, tt.svc.APIVersion, rb.Spec.Resource.APIVersion) + assert.Equal(t, tt.svc.Kind, rb.Spec.Resource.Kind) + assert.Equal(t, tt.svc.Namespace, rb.Spec.Resource.Namespace) + assert.Equal(t, tt.svc.Name, rb.Spec.Resource.Name) + assert.Equal(t, tt.svc.UID, rb.Spec.Resource.UID) + assert.Equal(t, tt.svc.ResourceVersion, rb.Spec.Resource.ResourceVersion) + }) + } +} + +func TestClaimMultiClusterServiceForService(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + updateError bool + expectedError bool + }{ + { + name: "Claim service for MCS - basic case", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + }, + { + name: "Claim service for MCS - with existing labels and annotations", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + Labels: map[string]string{ + "existing-label": "value", + policyv1alpha1.PropagationPolicyPermanentIDLabel: "should-be-removed", + }, + Annotations: map[string]string{ + "existing-annotation": "value", + policyv1alpha1.PropagationPolicyNameAnnotation: "should-be-removed", + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + }, + { + name: "Claim service for MCS - update error", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + updateError: true, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController(tt.svc) + if tt.updateError { + controller.Client = newFakeClientWithUpdateError(tt.svc, true) + } + + err := controller.claimMultiClusterServiceForService(context.Background(), tt.svc, tt.mcs) + + if tt.expectedError { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + + updatedSvc := &corev1.Service{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.svc.Namespace, Name: tt.svc.Name}, updatedSvc) + assert.NoError(t, err) + + // Added labels and annotations check + assert.Equal(t, util.MultiClusterServiceKind, updatedSvc.Labels[util.ResourceTemplateClaimedByLabel]) + assert.Equal(t, "test-id", updatedSvc.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + assert.Equal(t, tt.mcs.Name, updatedSvc.Annotations[networkingv1alpha1.MultiClusterServiceNameAnnotation]) + assert.Equal(t, tt.mcs.Namespace, updatedSvc.Annotations[networkingv1alpha1.MultiClusterServiceNamespaceAnnotation]) + + // Removed labels and annotations check + assert.NotContains(t, updatedSvc.Labels, policyv1alpha1.PropagationPolicyPermanentIDLabel) + assert.NotContains(t, updatedSvc.Labels, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.PropagationPolicyNameAnnotation) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.PropagationPolicyNamespaceAnnotation) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.ClusterPropagationPolicyAnnotation) + + // Check existing labels and annotations are preserved + if tt.svc.Labels != nil { + assert.Contains(t, updatedSvc.Labels, "existing-label") + } + if tt.svc.Annotations != nil { + assert.Contains(t, updatedSvc.Annotations, "existing-annotation") + } + }) + } +} + +func TestIsClusterReady(t *testing.T) { + tests := []struct { + name string + clusterName string + clusterObj *clusterv1alpha1.Cluster + expectedReady bool + }{ + { + name: "cluster is ready", + clusterName: "ready-cluster", + clusterObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + expectedReady: true, + }, + { + name: "cluster is not ready", + clusterName: "not-ready-cluster", + clusterObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-ready-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + expectedReady: false, + }, + { + name: "cluster does not exist", + clusterName: "non-existent-cluster", + clusterObj: nil, + expectedReady: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var objects []runtime.Object + if tt.clusterObj != nil { + objects = append(objects, tt.clusterObj) + } + + controller := newFakeController(objects...) + + ready := controller.IsClusterReady(context.Background(), tt.clusterName) + assert.Equal(t, tt.expectedReady, ready, "IsClusterReady() result does not match expected") + }) + } +} + +func TestServiceHasCrossClusterMultiClusterService(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + expected bool + }{ + { + name: "Service has cross-cluster MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + }, + }, + expected: true, + }, + { + name: "Service has no cross-cluster MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("LocalCluster")}, + }, + }, + expected: false, + }, + { + name: "Service has no MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.svc} + if tt.mcs != nil { + objs = append(objs, tt.mcs) + } + + controller := newFakeController(objs...) + + result := controller.serviceHasCrossClusterMultiClusterService(tt.svc) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestClusterMapFunc(t *testing.T) { + tests := []struct { + name string + object client.Object + mcsList []*networkingv1alpha1.MultiClusterService + expectedRequests []reconcile.Request + }{ + { + name: "Cluster matches MCS provider", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{ + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs1", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs2", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs1"}}, + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs2"}}, + }, + }, + { + name: "Cluster doesn't match any MCS", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{ + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs1", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs1"}}, + }, + }, + { + name: "Empty MCS list", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{}, + expectedRequests: []reconcile.Request{}, + }, + { + name: "Non-Cluster object", + object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{}, + expectedRequests: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.object} + objs = append(objs, toRuntimeObjects(tt.mcsList)...) + + controller := newFakeController(objs...) + mapFunc := controller.clusterMapFunc() + + requests := mapFunc(context.Background(), tt.object) + + assert.Equal(t, len(tt.expectedRequests), len(requests), "Number of requests does not match expected") + assert.ElementsMatch(t, tt.expectedRequests, requests, "Requests do not match expected") + + if _, ok := tt.object.(*clusterv1alpha1.Cluster); ok { + for _, request := range requests { + found := false + for _, mcs := range tt.mcsList { + if mcs.Name == request.Name && mcs.Namespace == request.Namespace { + found = true + break + } + } + assert.True(t, found, "Generated request does not correspond to any MCS in the list") + } + } + }) + } +} + +func TestNeedSyncMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + clusterName string + expectedNeed bool + expectedErr bool + }{ + { + name: "MCS with CrossCluster type and matching provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster1", + expectedNeed: true, + expectedErr: false, + }, + { + name: "MCS with CrossCluster type and matching consumer cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster2", + expectedNeed: true, + expectedErr: false, + }, + { + name: "MCS without CrossCluster type", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("LocalCluster")}, + }, + }, + clusterName: "cluster1", + expectedNeed: false, + expectedErr: false, + }, + { + name: "MCS with empty ProviderClusters and ConsumerClusters", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + }, + }, + clusterName: "cluster1", + expectedNeed: true, + expectedErr: false, + }, + { + name: "Cluster doesn't match ProviderClusters or ConsumerClusters", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster3", + expectedNeed: false, + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clusters := createClustersFromMCS(tt.mcs) + objs := append([]runtime.Object{tt.mcs}, toRuntimeObjects(clusters)...) + + controller := newFakeController(objs...) + need, err := controller.needSyncMultiClusterService(tt.mcs, tt.clusterName) + + assert.Equal(t, tt.expectedNeed, need, "Expected need %v, but got %v", tt.expectedNeed, need) + if tt.expectedErr { + assert.Error(t, err, "Expected an error, but got none") + } else { + assert.NoError(t, err, "Expected no error, but got %v", err) + } + }) + } +} + +// Helper Functions + +// Helper function to create fake Cluster objects based on the MCS spec +func createClustersFromMCS(mcs *networkingv1alpha1.MultiClusterService) []*clusterv1alpha1.Cluster { + var clusters []*clusterv1alpha1.Cluster + for _, pc := range mcs.Spec.ProviderClusters { + clusters = append(clusters, &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: pc.Name}, + }) + } + for _, cc := range mcs.Spec.ConsumerClusters { + clusters = append(clusters, &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: cc.Name}, + }) + } + return clusters +} + +// Helper function to set up a scheme with all necessary types +func setupScheme() *runtime.Scheme { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = networkingv1alpha1.Install(s) + _ = workv1alpha1.Install(s) + _ = workv1alpha2.Install(s) + _ = clusterv1alpha1.Install(s) + _ = scheme.AddToScheme(s) + return s +} + +// Helper function to create a new MCSController with a fake client +func newFakeController(objs ...runtime.Object) *MCSController { + s := setupScheme() + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objs...).Build() + return &MCSController{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(100), + } +} + +// Helper function to convert a slice of objects to a slice of runtime.Object +func toRuntimeObjects(objs interface{}) []runtime.Object { + var result []runtime.Object + switch v := objs.(type) { + case []*workv1alpha1.Work: + for _, obj := range v { + result = append(result, obj) + } + case []*clusterv1alpha1.Cluster: + for _, obj := range v { + result = append(result, obj) + } + case []*networkingv1alpha1.MultiClusterService: + for _, obj := range v { + result = append(result, obj) + } + } + return result +} + +// Helper function to create a fake client that can simulate update errors +func newFakeClientWithUpdateError(svc *corev1.Service, shouldError bool) client.Client { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = networkingv1alpha1.Install(s) + + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svc).Build() + + if shouldError { + return &errorInjectingClient{ + Client: fakeClient, + shouldError: shouldError, + } + } + + return fakeClient +} + +type errorInjectingClient struct { + client.Client + shouldError bool +} + +func (c *errorInjectingClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.shouldError { + return fmt.Errorf("simulated update error") + } + return c.Client.Update(ctx, obj, opts...) +}