Skip to content

Commit

Permalink
osd: drop openstack related tasks
Browse files Browse the repository at this point in the history
All of this should be addressed in custom separate playbooks if needed.

Signed-off-by: Guillaume Abrioux <[email protected]>
  • Loading branch information
guits committed Mar 9, 2024
1 parent 77be56d commit a6b4926
Show file tree
Hide file tree
Showing 27 changed files with 4 additions and 315 deletions.
58 changes: 0 additions & 58 deletions group_vars/all.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -554,64 +554,6 @@ dummy:
#docker_pull_timeout: "300s"


#############
# OPENSTACK #
#############
#openstack_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# openstack_glance_pool:
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
#openstack_glance_pool:
# name: "images"
# application: "rbd"
#openstack_cinder_pool:
# name: "volumes"
# application: "rbd"
#openstack_nova_pool:
# name: "vms"
# application: "rbd"
#openstack_cinder_backup_pool:
# name: "backups"
# application: "rbd"
#openstack_gnocchi_pool:
# name: "metrics"
# application: "rbd"
#openstack_cephfs_data_pool:
# name: "manila_data"
# application: "cephfs"
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# application: "cephfs"
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"
# - "{{ openstack_nova_pool }}"
# - "{{ openstack_cinder_backup_pool }}"
# - "{{ openstack_gnocchi_pool }}"
# - "{{ openstack_cephfs_data_pool }}"
# - "{{ openstack_cephfs_metadata_pool }}"


# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
#openstack_keys:
# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }


#############
# DASHBOARD #
#############
Expand Down
58 changes: 0 additions & 58 deletions roles/ceph-defaults/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -546,64 +546,6 @@ docker_pull_retry: 3
docker_pull_timeout: "300s"


#############
# OPENSTACK #
#############
openstack_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# openstack_glance_pool:
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
openstack_glance_pool:
name: "images"
application: "rbd"
openstack_cinder_pool:
name: "volumes"
application: "rbd"
openstack_nova_pool:
name: "vms"
application: "rbd"
openstack_cinder_backup_pool:
name: "backups"
application: "rbd"
openstack_gnocchi_pool:
name: "metrics"
application: "rbd"
openstack_cephfs_data_pool:
name: "manila_data"
application: "cephfs"
openstack_cephfs_metadata_pool:
name: "manila_metadata"
application: "cephfs"
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
- "{{ openstack_nova_pool }}"
- "{{ openstack_cinder_backup_pool }}"
- "{{ openstack_gnocchi_pool }}"
- "{{ openstack_cephfs_data_pool }}"
- "{{ openstack_cephfs_metadata_pool }}"


# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
openstack_keys:
- { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
- { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
- { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }


#############
# DASHBOARD #
#############
Expand Down
9 changes: 0 additions & 9 deletions roles/ceph-osd/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -100,12 +100,3 @@
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
tags: wait_all_osds_up

# Create the pools listed in openstack_pools
- name: Include openstack_config.yml
ansible.builtin.include_tasks: openstack_config.yml
when:
- not add_osd | bool
- not rolling_update | default(False) | bool
- openstack_config | bool
- inventory_hostname == groups[osd_group_name] | last
tags: wait_all_osds_up
68 changes: 0 additions & 68 deletions roles/ceph-osd/tasks/openstack_config.yml

This file was deleted.

1 change: 0 additions & 1 deletion roles/ceph-validate/tasks/check_pools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
ansible.builtin.fail:
msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
with_items:
- "{{ openstack_pools | default([]) }}"
- "{{ cephfs_pools | default([]) }}"
- "{{ pools | default([]) }}"
when:
Expand Down
1 change: 0 additions & 1 deletion tests/functional/all-in-one/container/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ public_network: "192.168.19.0/24"
cluster_network: "192.168.20.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
openstack_config: True
dashboard_enabled: false
ceph_conf_overrides:
global:
Expand Down
1 change: 0 additions & 1 deletion tests/functional/all-in-one/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ ceph_origin: repository
ceph_repository: community
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
openstack_config: True
dashboard_enabled: False
public_network: "192.168.17.0/24"
cluster_network: "192.168.18.0/24"
Expand Down
12 changes: 0 additions & 12 deletions tests/functional/all_daemons/container/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"
size: 1
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
docker_pull_timeout: 600s
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
Expand Down
14 changes: 0 additions & 14 deletions tests/functional/all_daemons/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"
size: 1
application: rbd
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
Expand Down
12 changes: 0 additions & 12 deletions tests/functional/all_daemons_ipv6/container/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"
size: 1
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
docker_pull_timeout: 600s
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
Expand Down
14 changes: 0 additions & 14 deletions tests/functional/all_daemons_ipv6/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,6 @@ ceph_conf_overrides:
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"
size: 1
application: rbd
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
Expand Down
14 changes: 1 addition & 13 deletions tests/functional/docker2podman/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,6 @@ ceph_conf_overrides:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False
openstack_glance_pool:
name: "images"
rule_name: "HDD"
size: 1
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
Expand All @@ -38,4 +26,4 @@ ceph_docker_image_tag: latest-main
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ public_network: "192.168.31.0/24"
cluster_network: "192.168.32.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
openstack_config: True
dashboard_enabled: false
ceph_conf_overrides:
global:
Expand Down
1 change: 0 additions & 1 deletion tests/functional/external_clients/inventory/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ ceph_origin: repository
ceph_repository: community
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
openstack_config: True
dashboard_enabled: False
public_network: "192.168.31.0/24"
cluster_network: "192.168.32.0/24"
Expand Down
16 changes: 1 addition & 15 deletions tests/functional/lvm-osds/container/group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,6 @@ ceph_conf_overrides:
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
openstack_config: True
openstack_glance_pool:
name: "images"
type: 3
size: 1
application: rbd
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
ceph_docker_image_tag: latest-main
ceph_docker_image_tag: latest-main
Loading

0 comments on commit a6b4926

Please sign in to comment.