diff --git a/Documentation/CRDs/Block-Storage/ceph-block-pool-rados-namespace-crd.md b/Documentation/CRDs/Block-Storage/ceph-block-pool-rados-namespace-crd.md index 8be7f73b2215..4d169438f289 100644 --- a/Documentation/CRDs/Block-Storage/ceph-block-pool-rados-namespace-crd.md +++ b/Documentation/CRDs/Block-Storage/ceph-block-pool-rados-namespace-crd.md @@ -49,3 +49,35 @@ If any setting is unspecified, a suitable default will be used automatically. ### Spec - `blockPoolName`: The metadata name of the CephBlockPool CR where the rados namespace will be created. + +## Creating a Storage Class + +Once the RADOS namespace is created, an RBD-based StorageClass can be created to +create PVs in this RADOS namespace. For this purpose, the `clusterID` value from the +CephBlockPoolRadosNamespace status needs to be put into the `clusterID` field of the StorageClass +spec. + +Extract the clusterID from the CephBlockPoolRadosNamespace CR: + +```console +$ kubectl -n rook-ceph get cephblockpoolradosnamespace/namespace-a -o jsonpath='{.status.info.clusterID}' +80fc4f4bacc064be641633e6ed25ba7e +``` + +In this example, replace `namespace-a` by the actual name of the radosnamespace +created before. +Now set the `clusterID` retrieved from the previous step into the `clusterID` of the storage class. + +Example: + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block-rados-ns +provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name +parameters: + clusterID: 80fc4f4bacc064be641633e6ed25ba7e + pool: replicapool + ... +``` diff --git a/pkg/operator/ceph/controller/cleanup.go b/pkg/operator/ceph/controller/cleanup.go index 01c4b23047d6..99a4a60efd71 100644 --- a/pkg/operator/ceph/controller/cleanup.go +++ b/pkg/operator/ceph/controller/cleanup.go @@ -69,9 +69,8 @@ func (c *ResourceCleanup) StartJob(ctx context.Context, clientset kubernetes.Int podSpec := c.jobTemplateSpec() job := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: c.resource.GetNamespace(), - OwnerReferences: c.resource.GetOwnerReferences(), + Name: jobName, + Namespace: c.resource.GetNamespace(), }, Spec: batch.JobSpec{ Template: podSpec, diff --git a/pkg/operator/ceph/file/subvolumegroup/controller.go b/pkg/operator/ceph/file/subvolumegroup/controller.go index 3c01bd0cb42f..a213e2d80cef 100644 --- a/pkg/operator/ceph/file/subvolumegroup/controller.go +++ b/pkg/operator/ceph/file/subvolumegroup/controller.go @@ -399,8 +399,13 @@ func buildClusterID(cephFilesystemSubVolumeGroup *cephv1.CephFilesystemSubVolume func (r *ReconcileCephFilesystemSubVolumeGroup) cleanup(svg *cephv1.CephFilesystemSubVolumeGroup, cephCluster *cephv1.CephCluster) error { logger.Infof("starting cleanup of the ceph resources for subVolumeGroup %q in namespace %q", svg.Name, svg.Namespace) + svgName := svg.Spec.Name + // use resource name if `spec.Name` is empty in the subvolumeGroup CR. + if svgName == "" { + svgName = svg.Name + } cleanupConfig := map[string]string{ - opcontroller.CephFSSubVolumeGroupNameEnv: svg.Spec.Name, + opcontroller.CephFSSubVolumeGroupNameEnv: svgName, opcontroller.CephFSNameEnv: svg.Spec.FilesystemName, opcontroller.CSICephFSRadosNamesaceEnv: "csi", opcontroller.CephFSMetaDataPoolNameEnv: file.GenerateMetaDataPoolName(svg.Spec.FilesystemName),