|
| 1 | +// Copyright © 2025 Kube logging authors |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +package watch_selector |
| 16 | + |
| 17 | +import ( |
| 18 | + "context" |
| 19 | + "fmt" |
| 20 | + "os" |
| 21 | + "path/filepath" |
| 22 | + "testing" |
| 23 | + "time" |
| 24 | + |
| 25 | + appsv1 "k8s.io/api/apps/v1" |
| 26 | + batchv1 "k8s.io/api/batch/v1" |
| 27 | + corev1 "k8s.io/api/core/v1" |
| 28 | + rbacv1 "k8s.io/api/rbac/v1" |
| 29 | + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" |
| 30 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 31 | + "k8s.io/apimachinery/pkg/runtime" |
| 32 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 33 | + "sigs.k8s.io/controller-runtime/pkg/cluster" |
| 34 | + "sigs.k8s.io/e2e-framework/third_party/helm" |
| 35 | + |
| 36 | + "github.com/kube-logging/logging-operator/e2e/common" |
| 37 | + "github.com/kube-logging/logging-operator/e2e/common/cond" |
| 38 | + "github.com/kube-logging/logging-operator/e2e/common/setup" |
| 39 | + "github.com/kube-logging/logging-operator/pkg/sdk/logging/api/v1beta1" |
| 40 | + "github.com/stretchr/testify/require" |
| 41 | +) |
| 42 | + |
| 43 | +var TestTempDir string |
| 44 | + |
| 45 | +func init() { |
| 46 | + var ok bool |
| 47 | + TestTempDir, ok = os.LookupEnv("PROJECT_DIR") |
| 48 | + if !ok { |
| 49 | + TestTempDir = "../.." |
| 50 | + } |
| 51 | + TestTempDir = filepath.Join(TestTempDir, "build/_test") |
| 52 | + err := os.MkdirAll(TestTempDir, os.FileMode(0755)) |
| 53 | + if err != nil { |
| 54 | + panic(err) |
| 55 | + } |
| 56 | +} |
| 57 | + |
| 58 | +func TestWatchSelectors(t *testing.T) { |
| 59 | + common.Initialize(t) |
| 60 | + ns := "test" |
| 61 | + releaseNameOverride := "e2e" |
| 62 | + common.WithCluster("watch-selector", t, func(t *testing.T, c common.Cluster) { |
| 63 | + setup.LoggingOperator(t, c, setup.LoggingOperatorOptionFunc(func(options *setup.LoggingOperatorOptions) { |
| 64 | + options.Namespace = ns |
| 65 | + options.NameOverride = releaseNameOverride |
| 66 | + options.Args = []string{"-enable-leader-election=true", "-watch-labeled-children=true", "-watch-labeled-secrets=true"} |
| 67 | + })) |
| 68 | + |
| 69 | + ctx := context.Background() |
| 70 | + |
| 71 | + // Managed logging resource which creates a fluentd pod with a secret named: watch-selector-test-fluentd |
| 72 | + logging := v1beta1.Logging{ |
| 73 | + ObjectMeta: metav1.ObjectMeta{ |
| 74 | + Name: "watch-selector-test", |
| 75 | + Namespace: ns, |
| 76 | + }, |
| 77 | + Spec: v1beta1.LoggingSpec{ |
| 78 | + ControlNamespace: ns, |
| 79 | + FluentbitSpec: &v1beta1.FluentbitSpec{}, |
| 80 | + FluentdSpec: &v1beta1.FluentdSpec{ |
| 81 | + Image: v1beta1.ImageSpec{ |
| 82 | + Repository: common.FluentdImageRepo, |
| 83 | + Tag: common.FluentdImageTag, |
| 84 | + }, |
| 85 | + }, |
| 86 | + }, |
| 87 | + } |
| 88 | + common.RequireNoError(t, c.GetClient().Create(ctx, &logging)) |
| 89 | + |
| 90 | + // Unmanaged resources |
| 91 | + common.RequireNoError(t, installFluentdSts(c)) |
| 92 | + |
| 93 | + unmanagedSecret := &corev1.Secret{ |
| 94 | + ObjectMeta: metav1.ObjectMeta{ |
| 95 | + Name: "unmanaged-fluentd-secret", |
| 96 | + Namespace: ns, |
| 97 | + Labels: map[string]string{ |
| 98 | + "app": "fluentd", |
| 99 | + }, |
| 100 | + }, |
| 101 | + Data: map[string][]byte{ |
| 102 | + "key": []byte("value"), |
| 103 | + }, |
| 104 | + } |
| 105 | + common.RequireNoError(t, c.GetClient().Create(ctx, unmanagedSecret)) |
| 106 | + |
| 107 | + require.Eventually(t, func() bool { |
| 108 | + if isManagedFluentdPodRunning := cond.PodShouldBeRunning(t, c.GetClient(), client.ObjectKey{Namespace: ns, Name: logging.Name + "-fluentd-0"}); !isManagedFluentdPodRunning() { |
| 109 | + t.Logf("managed fluentd pod is not running") |
| 110 | + return false |
| 111 | + } |
| 112 | + |
| 113 | + if isUnmanagedFluentdPodRunning := cond.PodShouldBeRunning(t, c.GetClient(), client.ObjectKey{Namespace: "fluentd", Name: "fluentd-0"}); !isUnmanagedFluentdPodRunning() { |
| 114 | + t.Logf("unmanaged fluentd pod is not running") |
| 115 | + return false |
| 116 | + } |
| 117 | + |
| 118 | + return true |
| 119 | + }, 5*time.Minute, 3*time.Second) |
| 120 | + |
| 121 | + deployedLogging := &v1beta1.Logging{} |
| 122 | + common.RequireNoError(t, c.GetClient().Get(ctx, client.ObjectKeyFromObject(&logging), deployedLogging)) |
| 123 | + |
| 124 | + // Check if the managed resources are actually controlled by the logging resource |
| 125 | + managedSts := &appsv1.StatefulSet{} |
| 126 | + common.RequireNoError(t, c.GetClient().Get(ctx, client.ObjectKey{Namespace: ns, Name: deployedLogging.Name + "-fluentd"}, managedSts)) |
| 127 | + stsOwnerRefMeta := metav1.GetControllerOf(managedSts) |
| 128 | + require.NotNil(t, stsOwnerRefMeta) |
| 129 | + |
| 130 | + require.Equal(t, deployedLogging.APIVersion, stsOwnerRefMeta.APIVersion) |
| 131 | + require.Equal(t, deployedLogging.Kind, stsOwnerRefMeta.Kind) |
| 132 | + require.Equal(t, deployedLogging.Name, stsOwnerRefMeta.Name) |
| 133 | + require.True(t, *stsOwnerRefMeta.Controller) |
| 134 | + |
| 135 | + managedSecret := &corev1.Secret{} |
| 136 | + common.RequireNoError(t, c.GetClient().Get(ctx, client.ObjectKey{Namespace: ns, Name: deployedLogging.Name + "-fluentd"}, managedSecret)) |
| 137 | + secretOwnerRefMeta := metav1.GetControllerOf(managedSecret) |
| 138 | + require.NotNil(t, secretOwnerRefMeta) |
| 139 | + |
| 140 | + require.Equal(t, deployedLogging.APIVersion, secretOwnerRefMeta.APIVersion) |
| 141 | + require.Equal(t, deployedLogging.Kind, secretOwnerRefMeta.Kind) |
| 142 | + require.Equal(t, deployedLogging.Name, secretOwnerRefMeta.Name) |
| 143 | + require.True(t, *secretOwnerRefMeta.Controller) |
| 144 | + |
| 145 | + // Check if the unmanaged resources are actually not controlled by the operator |
| 146 | + unmanagedSts := &appsv1.StatefulSet{} |
| 147 | + common.RequireNoError(t, c.GetClient().Get(ctx, client.ObjectKey{Namespace: "fluentd", Name: "fluentd"}, unmanagedSts)) |
| 148 | + secretOwnerRefMeta = metav1.GetControllerOf(unmanagedSts) |
| 149 | + require.Nil(t, secretOwnerRefMeta) |
| 150 | + |
| 151 | + secret := &corev1.Secret{} |
| 152 | + common.RequireNoError(t, c.GetClient().Get(ctx, client.ObjectKeyFromObject(unmanagedSecret), secret)) |
| 153 | + secretOwnerRefMeta = metav1.GetControllerOf(secret) |
| 154 | + require.Nil(t, secretOwnerRefMeta) |
| 155 | + |
| 156 | + }, func(t *testing.T, c common.Cluster) error { |
| 157 | + path := filepath.Join(TestTempDir, fmt.Sprintf("cluster-%s.log", t.Name())) |
| 158 | + t.Logf("Printing cluster logs to %s", path) |
| 159 | + err := c.PrintLogs(common.PrintLogConfig{ |
| 160 | + Namespaces: []string{ns, "default"}, |
| 161 | + FilePath: path, |
| 162 | + Limit: 100 * 1000, |
| 163 | + }) |
| 164 | + if err != nil { |
| 165 | + return err |
| 166 | + } |
| 167 | + |
| 168 | + loggingOperatorName := "logging-operator-" + releaseNameOverride |
| 169 | + t.Logf("Collecting coverage files from logging-operator: %s/%s", ns, loggingOperatorName) |
| 170 | + err = c.CollectTestCoverageFiles(ns, loggingOperatorName) |
| 171 | + if err != nil { |
| 172 | + t.Logf("Failed collecting coverage files: %s", err) |
| 173 | + } |
| 174 | + return err |
| 175 | + |
| 176 | + }, func(o *cluster.Options) { |
| 177 | + if o.Scheme == nil { |
| 178 | + o.Scheme = runtime.NewScheme() |
| 179 | + } |
| 180 | + common.RequireNoError(t, v1beta1.AddToScheme(o.Scheme)) |
| 181 | + common.RequireNoError(t, apiextensionsv1.AddToScheme(o.Scheme)) |
| 182 | + common.RequireNoError(t, appsv1.AddToScheme(o.Scheme)) |
| 183 | + common.RequireNoError(t, batchv1.AddToScheme(o.Scheme)) |
| 184 | + common.RequireNoError(t, corev1.AddToScheme(o.Scheme)) |
| 185 | + common.RequireNoError(t, rbacv1.AddToScheme(o.Scheme)) |
| 186 | + }) |
| 187 | +} |
| 188 | + |
| 189 | +func installFluentdSts(c common.Cluster) error { |
| 190 | + manager := helm.New(c.KubeConfigFilePath()) |
| 191 | + |
| 192 | + if err := manager.RunRepo(helm.WithArgs("add", "fluent", "https://fluent.github.io/helm-charts")); err != nil { |
| 193 | + return fmt.Errorf("failed to add fluent repo: %v", err) |
| 194 | + } |
| 195 | + |
| 196 | + if err := manager.RunInstall( |
| 197 | + helm.WithName("fluentd"), |
| 198 | + helm.WithChart("fluent/fluentd"), |
| 199 | + helm.WithArgs("--create-namespace"), |
| 200 | + helm.WithNamespace("fluentd"), |
| 201 | + helm.WithArgs("--set", "kind=StatefulSet"), |
| 202 | + helm.WithWait(), |
| 203 | + ); err != nil { |
| 204 | + return fmt.Errorf("failed to install fluentd: %v", err) |
| 205 | + } |
| 206 | + |
| 207 | + return nil |
| 208 | +} |
0 commit comments