diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go index 4677007fddfb..95d778ec8077 100644 --- a/pkg/daemon/ceph/client/pool.go +++ b/pkg/daemon/ceph/client/pool.go @@ -468,6 +468,10 @@ func updatePoolCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, cl logger.Debugf("Skipping crush rule update for pool %q: EnableCrushUpdates is disabled", pool.Name) return nil } + if clusterSpec.IsStretchCluster() { + logger.Debugf("skipping crush rule update for pool %q in a stretch cluster", pool.Name) + return nil + } if pool.FailureDomain == "" && pool.DeviceClass == "" { logger.Debugf("skipping check for failure domain and deviceClass on pool %q as it is not specified", pool.Name) diff --git a/pkg/daemon/ceph/client/pool_test.go b/pkg/daemon/ceph/client/pool_test.go index db29c26867b3..28bacae2109a 100644 --- a/pkg/daemon/ceph/client/pool_test.go +++ b/pkg/daemon/ceph/client/pool_test.go @@ -251,10 +251,12 @@ func TestUpdateFailureDomain(t *testing.T) { currentFailureDomain := "rack" currentDeviceClass := "default" testCrushRuleName := "test_rule" + cephCommandCalled := false executor := &exectest.MockExecutor{} context := &clusterd.Context{Executor: executor} executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) + cephCommandCalled = true if args[1] == "pool" { if args[2] == "get" { assert.Equal(t, "mypool", args[3]) @@ -335,6 +337,27 @@ func TestUpdateFailureDomain(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "mypool_zone", newCrushRule) }) + + t.Run("stretch cluster skips crush rule update", func(t *testing.T) { + p := cephv1.NamedPoolSpec{ + Name: "mypool", + PoolSpec: cephv1.PoolSpec{ + FailureDomain: "zone", + Replicated: cephv1.ReplicatedSpec{Size: 3}, + EnableCrushUpdates: true, + }, + } + clusterSpec := &cephv1.ClusterSpec{ + Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.MonZoneSpec{{Name: "zone1"}, {Name: "zone2"}, {Name: "zone3", Arbiter: true}}}}, + Storage: cephv1.StorageScopeSpec{}, + } + newCrushRule = "" + cephCommandCalled = false + err := updatePoolCrushRule(context, AdminTestClusterInfo("mycluster"), clusterSpec, p) + assert.NoError(t, err) + assert.Equal(t, "", newCrushRule) + assert.False(t, cephCommandCalled) + }) } func TestExtractPoolDetails(t *testing.T) {