Skip to content

Commit

Permalink
Merge pull request #629 from red-hat-storage/sync_us--master
Browse files Browse the repository at this point in the history
Syncing latest changes from upstream master for rook
  • Loading branch information
travisn authored Apr 22, 2024
2 parents 29e3436 + 44b8499 commit 2846830
Show file tree
Hide file tree
Showing 20 changed files with 254 additions and 92 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/canary-test-config/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ runs:
- name: Setup Minikube
shell: bash --noprofile --norc -eo pipefail -x {0}
run: |
tests/scripts/github-action-helper.sh install_minikube_with_none_driver v1.29.2
tests/scripts/github-action-helper.sh install_minikube_with_none_driver v1.30.0
- name: install deps
shell: bash --noprofile --norc -eo pipefail -x {0}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-helm-suite.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-mgr-suite.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.29.2"]
kubernetes-versions: ["v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.29.2"]
kubernetes-versions: ["v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-object-suite.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-smoke-suite.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/integration-test-upgrade-suite.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -69,7 +69,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down
12 changes: 6 additions & 6 deletions .github/workflows/integration-tests-on-release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.27.11", "v1.28.7", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -58,7 +58,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.27.11", "v1.28.7", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -99,7 +99,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.27.11", "v1.28.7", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -137,7 +137,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.27.11", "v1.28.7", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -175,7 +175,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.27.11", "v1.28.7", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -216,7 +216,7 @@ jobs:
strategy:
fail-fast: false
matrix:
kubernetes-versions: ["v1.25.16", "v1.29.2"]
kubernetes-versions: ["v1.25.16", "v1.30.0"]
steps:
- name: checkout
uses: actions/checkout@v4
Expand Down
54 changes: 27 additions & 27 deletions .mergify.yml
Original file line number Diff line number Diff line change
Expand Up @@ -238,37 +238,37 @@ pull_request_rules:
- "check-success=crds-gen"
- "check-success=docs-check"
- "check-success=pylint"
- "check-success=canary (quay.io/ceph/ceph:v18)"
- "check-success=raw-disk-with-object (quay.io/ceph/ceph:v18)"
- "check-success=two-osds-in-device (quay.io/ceph/ceph:v18)"
- "check-success=osd-with-metadata-partition-device (quay.io/ceph/ceph:v18)"
- "check-success=osd-with-metadata-device (quay.io/ceph/ceph:v18)"
- "check-success=encryption (quay.io/ceph/ceph:v18)"
- "check-success=lvm (quay.io/ceph/ceph:v18)"
- "check-success=pvc (quay.io/ceph/ceph:v18)"
- "check-success=pvc-db (quay.io/ceph/ceph:v18)"
- "check-success=pvc-db-wal (quay.io/ceph/ceph:v18)"
- "check-success=encryption-pvc (quay.io/ceph/ceph:v18)"
- "check-success=encryption-pvc-db (quay.io/ceph/ceph:v18)"
- "check-success=encryption-pvc-db-wal (quay.io/ceph/ceph:v18)"
- "check-success=encryption-pvc-kms-vault-token-auth (quay.io/ceph/ceph:v18)"
- "check-success=encryption-pvc-kms-vault-k8s-auth (quay.io/ceph/ceph:v18)"
- "check-success=lvm-pvc (quay.io/ceph/ceph:v18)"
- "check-success=multi-cluster-mirroring (quay.io/ceph/ceph:v18)"
- "check-success=rgw-multisite-testing (quay.io/ceph/ceph:v18)"
- "check-success=encryption-pvc-kms-ibm-kp (quay.io/ceph/ceph:v18)"
- "check-success=multus-cluster-network (quay.io/ceph/ceph:v18)"
- "check-success=csi-hostnetwork-disabled (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / canary (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / raw-disk-with-object (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / two-osds-in-device (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / osd-with-metadata-partition-device (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / osd-with-metadata-device (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / encryption (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / lvm (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / pvc (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / pvc-db (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / pvc-db-wal (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / encryption-pvc (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / encryption-pvc-db (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / encryption-pvc-db-wal (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / encryption-pvc-kms-vault-token-auth (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / encryption-pvc-kms-vault-k8s-auth (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / lvm-pvc (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / multi-cluster-mirroring (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / rgw-multisite-testing (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / encryption-pvc-kms-ibm-kp (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / multus-cluster-network (quay.io/ceph/ceph:v18)"
- "check-success=canary-tests / csi-hostnetwork-disabled (quay.io/ceph/ceph:v18)"
- "check-success=TestCephSmokeSuite (v1.25.16)"
- "check-success=TestCephSmokeSuite (v1.29.2)"
- "check-success=TestCephSmokeSuite (v1.30.0)"
- "check-success=TestCephHelmSuite (v1.25.16)"
- "check-success=TestCephHelmSuite (v1.29.2)"
- "check-success=TestCephMultiClusterDeploySuite (v1.29.2)"
- "check-success=TestCephObjectSuite (v1.29.2)"
- "check-success=TestCephHelmSuite (v1.30.0)"
- "check-success=TestCephMultiClusterDeploySuite (v1.30.0)"
- "check-success=TestCephObjectSuite (v1.30.0)"
- "check-success=TestCephUpgradeSuite (v1.25.16)"
- "check-success=TestCephUpgradeSuite (v1.29.2)"
- "check-success=TestCephUpgradeSuite (v1.30.0)"
- "check-success=TestHelmUpgradeSuite (v1.25.16)"
- "check-success=TestHelmUpgradeSuite (v1.29.2)"
- "check-success=TestHelmUpgradeSuite (v1.30.0)"
actions:
merge:
method: merge
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ and Rook is granted the required privileges (see below for more information).

## Kubernetes Version

Kubernetes versions **v1.25** through **v1.29** are supported.
Kubernetes versions **v1.25** through **v1.30** are supported.

## CPU Architecture

Expand Down
2 changes: 1 addition & 1 deletion Documentation/Getting-Started/quickstart.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ This guide will walk through the basic setup of a Ceph cluster and enable K8s ap

## Kubernetes Version

Kubernetes versions **v1.25** through **v1.29** are supported.
Kubernetes versions **v1.25** through **v1.30** are supported.

## CPU Architecture

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ provisioner: {{ $root.Values.operatorNamespace }}.rbd.csi.ceph.com
{{- end }}
parameters:
clusterID: {{ $ecblockpool.parameters.clusterID }}
dataPool: {{ $ecblockpool.name }}-metadata
pool: {{ $ecblockpool.name }}
pool: {{ $ecblockpool.name }}-metadata
dataPool: {{ $ecblockpool.name }}
imageFormat: "{{ $ecblockpool.parameters.imageFormat }}"
imageFeatures: {{ $ecblockpool.parameters.imageFeatures }}

Expand All @@ -58,4 +58,4 @@ parameters:
allowVolumeExpansion: {{ $ecblockpool.storageClass.allowVolumeExpansion }}
reclaimPolicy: {{ $ecblockpool.storageClass.reclaimPolicy }}
{{ end }}
{{ end }}
{{ end }}
6 changes: 0 additions & 6 deletions pkg/operator/ceph/cluster/cluster_external.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,6 @@ func (c *ClusterController) configureExternalCephCluster(cluster *cluster) error
}
}

// Create CSI config map
err = csi.CreateCsiConfigMap(c.OpManagerCtx, c.namespacedName.Namespace, c.context.Clientset, cluster.ownerInfo)
if err != nil {
return errors.Wrap(err, "failed to create csi config map")
}

// update the msgr2 flag
for _, m := range cluster.ClusterInfo.Monitors {
// m.Endpoint=10.1.115.104:3300
Expand Down
3 changes: 2 additions & 1 deletion pkg/operator/ceph/cluster/osd/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -580,8 +580,9 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionC
initContainers = append(initContainers, c.getExpandEncryptedPVCInitContainer(osdDataDirPath, osdProps))
}
initContainers = append(initContainers, c.getActivatePVCInitContainer(osdProps, osdID))
// The expand init container fails for legacy LVM-based OSDs, so only supported expansion for raw mode OSDs
initContainers = append(initContainers, c.getExpandPVCInitContainer(osdProps, osdID))
}
initContainers = append(initContainers, c.getExpandPVCInitContainer(osdProps, osdID))
} else {
// Add the volume to the spec and the mount to the daemon container
// so that it can pick the already mounted/activated osd metadata path
Expand Down
5 changes: 2 additions & 3 deletions pkg/operator/ceph/cluster/osd/spec_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,12 +198,11 @@ func testPodDevices(t *testing.T, dataDir, deviceName string, allDevices bool) {
deployment, err = c.makeDeployment(osdProp, osd, dataPathMap)
assert.Nil(t, err)
assert.NotNil(t, deployment)
assert.Equal(t, 5, len(deployment.Spec.Template.Spec.InitContainers), deployment.Spec.Template.Spec.InitContainers[2].Name)
assert.Equal(t, 4, len(deployment.Spec.Template.Spec.InitContainers), deployment.Spec.Template.Spec.InitContainers[2].Name)
assert.Equal(t, "config-init", deployment.Spec.Template.Spec.InitContainers[0].Name)
assert.Equal(t, "copy-bins", deployment.Spec.Template.Spec.InitContainers[1].Name)
assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[2].Name)
assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[3].Name)
assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[4].Name)
assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[3].Name)
assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers))
initCont = deployment.Spec.Template.Spec.InitContainers[0]
assert.Equal(t, 5, len(initCont.VolumeMounts), initCont.VolumeMounts)
Expand Down
43 changes: 39 additions & 4 deletions pkg/operator/ceph/csi/cluster_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,12 +254,50 @@ func CreateCsiConfigMap(ctx context.Context, namespace string, clientset kuberne
if !k8serrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "failed to create initial csi config map %q (in %q)", configMap.Name, namespace)
}
// CM already exists; update owner refs to it if needed
// this corrects issues where the csi config map was sometimes created with CephCluster
// owner ref, which would result in the cm being deleted if that cluster was deleted
if err := updateCsiConfigMapOwnerRefs(ctx, namespace, clientset, ownerInfo); err != nil {
return errors.Wrapf(err, "failed to ensure csi config map %q (in %q) owner references", configMap.Name, namespace)
}
}

logger.Infof("successfully created csi config map %q", configMap.Name)
return nil
}

// check the owner references on the csi config map, and fix incorrect references if needed
func updateCsiConfigMapOwnerRefs(ctx context.Context, namespace string, clientset kubernetes.Interface, expectedOwnerInfo *k8sutil.OwnerInfo) error {
cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, ConfigName, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "failed to fetch csi config map %q (in %q) which already exists", ConfigName, namespace)
}

existingOwners := cm.GetOwnerReferences()
var currentOwner *metav1.OwnerReference = nil
if len(existingOwners) == 1 {
currentOwner = &existingOwners[0] // currentOwner is nil unless there is exactly one owner on the cm
}
// if there is exactly one owner, and it is correct --> no fix needed
if currentOwner != nil && (currentOwner.UID == expectedOwnerInfo.GetUID()) {
logger.Debugf("csi config map %q (in %q) has the expected owner; owner id: %q", ConfigName, namespace, currentOwner.UID)
return nil
}

// must fix owner refs
logger.Infof("updating csi configmap %q (in %q) owner info", ConfigName, namespace)
cm.OwnerReferences = []metav1.OwnerReference{}
if err := expectedOwnerInfo.SetControllerReference(cm); err != nil {
return errors.Wrapf(err, "failed to set updated owner reference on csi config map %q (in %q)", ConfigName, namespace)
}
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{})
if err != nil {
return errors.Wrapf(err, "failed to update csi config map %q (in %q) to update its owner reference", ConfigName, namespace)
}

return nil
}

// SaveClusterConfig updates the config map used to provide ceph-csi with
// basic cluster configuration. The clusterNamespace and clusterInfo are
// used to determine what "cluster" in the config map will be updated and
Expand Down Expand Up @@ -292,10 +330,7 @@ func SaveClusterConfig(clientset kubernetes.Interface, clusterNamespace string,
configMap, err := clientset.CoreV1().ConfigMaps(csiNamespace).Get(clusterInfo.Context, ConfigName, metav1.GetOptions{})
if err != nil {
if k8serrors.IsNotFound(err) {
err = CreateCsiConfigMap(clusterInfo.Context, csiNamespace, clientset, clusterInfo.OwnerInfo)
if err != nil {
return errors.Wrap(err, "failed creating csi config map")
}
return errors.Wrap(err, "waiting for CSI config map to be created")
}
return errors.Wrap(err, "failed to fetch current csi config map")
}
Expand Down
Loading

0 comments on commit 2846830

Please sign in to comment.