Skip to content

Commit

Permalink
Merge pull request #779 from red-hat-storage/sync_ds--master
Browse files Browse the repository at this point in the history
Syncing latest changes from master for rook
  • Loading branch information
openshift-merge-bot[bot] authored Nov 20, 2024
2 parents 400b307 + 5b176fc commit 20473b3
Show file tree
Hide file tree
Showing 7 changed files with 67 additions and 8 deletions.
15 changes: 15 additions & 0 deletions .github/workflows/canary-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,21 @@ jobs:
mgr_raw=$(kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr)
timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- curl --silent --show-error ${mgr_raw%%:*}:9283; do echo 'waiting for mgr prometheus exporter to be ready' && sleep 1; done"
- name: test osd.0 auth recovery from keyring file
run: |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
osd_id=0
osd_pod=$(kubectl get pod -l app=rook-ceph-osd,osd=$osd_id -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
if [ $osd_pod ]; then
timeout 15 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph auth del osd.$osd_id ; do sleep 1 && echo 'waiting for osd auth to be deleted'; done";
kubectl -n rook-ceph delete pod $osd_pod;
timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph auth get osd.$osd_id ; do sleep 1 && echo 'waiting for osd auth to be recovered'; done";
osd_pod=$(kubectl get pod -l app=rook-ceph-osd,osd=$osd_id -n rook-ceph -o jsonpath='{.items[*].metadata.name}');
kubectl -n rook-ceph wait --for=condition=Ready pod/$osd_pod --timeout=120s;
else
echo "osd $osd_id not found, skipping test";
fi
- name: test external script create-external-cluster-resources.py
run: |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/docs-check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
with:
python-version: 3.9

- uses: DavidAnson/markdownlint-cli2-action@db43aef879112c3119a410d69f66701e0d530809 # v17.0.0
- uses: DavidAnson/markdownlint-cli2-action@eb5ca3ab411449c66620fe7f1b3c9e10547144b0 # v18.0.0
with:
globs: |
Documentation/**/*.md
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/unit-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ jobs:
go-version: "1.22"

- name: Setup jq
uses: dcarbone/install-jq-action@8867ddb4788346d7c22b72ea2e2ffe4d514c7bcb # v2.1.0
uses: dcarbone/install-jq-action@e397bd87438d72198f81efd21f876461183d383a # v3.0.1
with:
version: "${{ inputs.version }}"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ In order to configure an external Ceph cluster with Rook, we need to extract som

## 1. Create all users and keys

Run the python script [create-external-cluster-resources.py](https://github.com/rook/rook/blob/master/deploy/examples/external/create-external-cluster-resources.py) for creating all users and keys.
Run the python script [create-external-cluster-resources.py](https://github.com/rook/rook/blob/master/deploy/examples/external/create-external-cluster-resources.py) in the provider Ceph cluster cephadm shell, to have access to create the necessary users and keys.

```console
python3 create-external-cluster-resources.py --rbd-data-pool-name <pool_name> --cephfs-filesystem-name <filesystem-name> --rgw-endpoint <rgw-endpoint> --namespace <namespace> --format bash
Expand Down
1 change: 1 addition & 0 deletions deploy/charts/rook-ceph-cluster/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ spec:
mountPath: /etc/rook
- name: ceph-admin-secret
mountPath: /var/lib/rook-ceph-mon
serviceAccountName: rook-ceph-default
volumes:
- name: ceph-admin-secret
secret:
Expand Down
35 changes: 33 additions & 2 deletions pkg/operator/ceph/cluster/osd/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,11 +107,42 @@ OSD_ID="$ROOK_OSD_ID"
OSD_UUID=%s
OSD_STORE_FLAG="%s"
OSD_DATA_DIR=/var/lib/ceph/osd/ceph-"$OSD_ID"
KEYRING_FILE="$OSD_DATA_DIR"/keyring
CV_MODE=%s
DEVICE="$%s"
# create new keyring
ceph -n client.admin auth get-or-create osd."$OSD_ID" mon 'allow profile osd' mgr 'allow profile osd' osd 'allow *' -k /etc/ceph/admin-keyring-store/keyring
# In rare cases keyring file created with prepare-osd but did not
# being stored in ceph auth system therefore we need to import it
# from keyring file instead of creating new one
if ! ceph -n client.admin auth get osd."$OSD_ID" -k /etc/ceph/admin-keyring-store/keyring; then
if [ -f "$KEYRING_FILE" ]; then
# import keyring from existing file
TMP_DIR=$(mktemp -d)
python3 -c "
import configparser
config = configparser.ConfigParser()
config.read('$KEYRING_FILE')
if not config.has_section('osd.$OSD_ID'):
exit()
config['osd.$OSD_ID'] = {'key': config['osd.$OSD_ID']['key'], 'caps mon': '\"allow profile osd\"', 'caps mgr': '\"allow profile osd\"', 'caps osd': '\"allow *\"'}
with open('$TMP_DIR/keyring', 'w') as configfile:
config.write(configfile)
"
cat "$TMP_DIR"/keyring
ceph -n client.admin auth import -i "$TMP_DIR"/keyring -k /etc/ceph/admin-keyring-store/keyring
rm --recursive --force "$TMP_DIR"
else
# create new keyring if no keyring file found
ceph -n client.admin auth get-or-create osd."$OSD_ID" mon 'allow profile osd' mgr 'allow profile osd' osd 'allow *' -k /etc/ceph/admin-keyring-store/keyring
fi
fi
# active the osd with ceph-volume
if [[ "$CV_MODE" == "lvm" ]]; then
Expand Down
18 changes: 15 additions & 3 deletions pkg/operator/ceph/cluster/osd/topology/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"fmt"
"strings"

"github.com/coreos/pkg/capnslog"
"github.com/rook/rook/pkg/daemon/ceph/client"
corev1 "k8s.io/api/core/v1"
)
Expand All @@ -39,6 +40,8 @@ var (

// The list of supported failure domains in the CRUSH map, ordered from lowest to highest
CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...)

logger = capnslog.NewPackageLogger("github.com/rook/rook", "osd-topology")
)

const (
Expand Down Expand Up @@ -107,18 +110,27 @@ func extractTopologyFromLabels(labels map[string]string) (map[string]string, str
}
// iterate in lowest to highest order as the lowest level should be sustained and higher level duplicate
// should be removed
duplicateTopology := make(map[string]int)
duplicateTopology := make(map[string][]string)
for i := len(allKubernetesTopologyLabels) - 1; i >= 0; i-- {
topologyLabel := allKubernetesTopologyLabels[i]
if value, ok := labels[topologyLabel]; ok {
if _, ok := duplicateTopology[value]; ok {
delete(topology, kubernetesTopologyLabelToCRUSHLabel(topologyLabel))
} else {
duplicateTopology[value] = 1
}
duplicateTopology[value] = append(duplicateTopology[value], topologyLabel)
}
}

// remove non-duplicate entries, and report if any duplicate entries were found
for value, duplicateKeys := range duplicateTopology {
if len(duplicateKeys) <= 1 {
delete(duplicateTopology, value)
}
}
if len(duplicateTopology) != 0 {
logger.Warningf("Found duplicate location values with labels: %v", duplicateTopology)
}

return topology, topologyAffinity
}

Expand Down

0 comments on commit 20473b3

Please sign in to comment.