diff --git a/.ansible-lint b/.ansible-lint
index 796dfa158e..f9ae078ac5 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -1,4 +1,5 @@
---
+strict: true
use_default_rules: true
skip_list:
# [E301] Commands should not change things if nothing needs doing
@@ -24,3 +25,14 @@ skip_list:
- fqcn[action]
# role name check matching ^*$
- role-name
+ # TODO(frickler): Discuss these in detail, skipping for now to unblock things
+ - key-order[task]
+ - no-free-form
+ - name[play]
+ - var-naming[no-role-prefix]
+ - risky-file-permissions
+ - risky-shell-pipe
+ - command-instead-of-shell
+ - command-instead-of-module
+ - ignore-errors
+ - jinja[spacing]
diff --git a/.codespell-ignore b/.codespell-ignore
new file mode 100644
index 0000000000..4bb4cb538b
--- /dev/null
+++ b/.codespell-ignore
@@ -0,0 +1,3 @@
+assertIn
+ist
+solum
diff --git a/README.rst b/README.rst
index 9af0e9467a..07746f5791 100644
--- a/README.rst
+++ b/README.rst
@@ -48,7 +48,6 @@ Kolla Ansible deploys containers for the following OpenStack projects:
- `CloudKitty `__
- `Cyborg `__
- `Designate `__
-- `Freezer `__
- `Glance `__
- `Heat `__
- `Horizon `__
@@ -59,18 +58,14 @@ Kolla Ansible deploys containers for the following OpenStack projects:
- `Manila `__
- `Masakari `__
- `Mistral `__
-- `Monasca `__
-- `Murano `__
- `Neutron `__
- `Nova `__
- `Octavia `__
-- `Sahara `__
-- `Senlin `__
-- `Solum `__
+- Skyline (`APIServer `__ and `Console `__)
- `Swift `__
- `Tacker `__
- `Trove `__
-- `Vitrage `__
+- `Venus `__
- `Watcher `__
- `Zun `__
@@ -84,8 +79,8 @@ Kolla Ansible deploys containers for the following infrastructure components:
`InfluxDB `__,
`Prometheus `__, and
`Grafana `__ for performance monitoring.
-- `Elasticsearch `__ and
- `Kibana `__ to search, analyze,
+- `OpenSearch `__ and
+ `OpenSearch Dashboards `__ to search, analyze,
and visualize log messages.
- `Etcd `__ a distributed reliable key-value store.
- `Fluentd `__ as an open source data collector
@@ -101,8 +96,6 @@ Kolla Ansible deploys containers for the following infrastructure components:
- `RabbitMQ `__ as a messaging backend for
communication between services.
- `Redis `__ an in-memory data structure store.
-- `Zookeeper `__ an open-source server which enables
- highly reliable distributed coordination.
Directories
===========
@@ -135,16 +128,16 @@ workflow `__.
- File bugs, blueprints, track releases, etc on
`Launchpad `__.
- Attend weekly
- `meetings `__.
+ `meetings `__.
- Contribute `code `__.
Contributors
============
Check out who's `contributing
-code `__ and
+code `__ and
`contributing
-reviews `__.
+reviews `__.
Notices
=======
diff --git a/ansible/action_plugins/merge_configs.py b/ansible/action_plugins/merge_configs.py
index 6ad1e3a219..a825835506 100644
--- a/ansible/action_plugins/merge_configs.py
+++ b/ansible/action_plugins/merge_configs.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# Copyright 2015 Sam Yaple
# Copyright 2017 99Cloud Inc.
#
@@ -173,12 +171,12 @@ def run(self, tmp=None, task_vars=None):
del tmp # not used
sources = self._task.args.get('sources', None)
+ whitespace = self._task.args.get('whitespace', True)
if not isinstance(sources, list):
sources = [sources]
- config = OverrideConfigParser(
- whitespace=self._task.args.get('whitespace', True))
+ config = OverrideConfigParser(whitespace=whitespace)
for source in sources:
self.read_config(source, config)
@@ -215,7 +213,11 @@ def run(self, tmp=None, task_vars=None):
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj)
- result.update(copy_action.run(task_vars=task_vars))
+ copy_result = copy_action.run(task_vars=task_vars)
+ copy_result['invocation']['module_args'].update({
+ 'src': result_file, 'sources': sources,
+ 'whitespace': whitespace})
+ result.update(copy_result)
finally:
shutil.rmtree(local_tempdir)
return result
diff --git a/ansible/action_plugins/merge_yaml.py b/ansible/action_plugins/merge_yaml.py
old mode 100755
new mode 100644
index a4170b3b23..ea7350bf73
--- a/ansible/action_plugins/merge_yaml.py
+++ b/ansible/action_plugins/merge_yaml.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# Copyright 2015 Sam Yaple
# Copyright 2016 intel
#
@@ -19,8 +17,7 @@
import shutil
import tempfile
-from yaml import dump
-from yaml import safe_load
+import yaml
from ansible import constants
from ansible import errors as ansible_errors
@@ -58,6 +55,14 @@
default: False
required: False
type: bool
+ yaml_width:
+ description:
+ - The maximum width of the YAML document. By default, Ansible uses the
+ PyYAML library which has a default 80 symbol string length limit.
+ To change the limit, the new value can be used here.
+ default: None
+ required: False
+ type: int
author: Sean Mooney
'''
@@ -71,6 +76,7 @@
sources:
- "/tmp/default.yml"
- "/tmp/override.yml"
+ yaml_width: 131072
dest:
- "/tmp/out.yml"
'''
@@ -83,7 +89,7 @@ class ActionModule(action.ActionBase):
def read_config(self, source):
result = None
# Only use config if present
- if os.access(source, os.R_OK):
+ if source and os.access(source, os.R_OK):
with open(source, 'r') as f:
template_data = f.read()
@@ -96,7 +102,7 @@ def read_config(self, source):
self._templar.environment.loader.searchpath = searchpath
template_data = self._templar.template(template_data)
- result = safe_load(template_data)
+ result = yaml.safe_load(template_data)
return result or {}
def run(self, tmp=None, task_vars=None):
@@ -116,6 +122,7 @@ def run(self, tmp=None, task_vars=None):
output = {}
sources = self._task.args.get('sources', None)
extend_lists = self._task.args.get('extend_lists', False)
+ yaml_width = self._task.args.get('yaml_width', None)
if not isinstance(sources, list):
sources = [sources]
for source in sources:
@@ -130,11 +137,13 @@ def run(self, tmp=None, task_vars=None):
try:
result_file = os.path.join(local_tempdir, 'source')
with open(result_file, 'w') as f:
- f.write(dump(output, default_flow_style=False))
+ f.write(yaml.dump(output, default_flow_style=False,
+ width=yaml_width))
new_task = self._task.copy()
new_task.args.pop('sources', None)
new_task.args.pop('extend_lists', None)
+ new_task.args.pop('yaml_width', None)
new_task.args.update(
dict(
src=result_file
@@ -149,7 +158,11 @@ def run(self, tmp=None, task_vars=None):
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj)
- result.update(copy_action.run(task_vars=task_vars))
+ copy_result = copy_action.run(task_vars=task_vars)
+ copy_result['invocation']['module_args'].update({
+ 'src': result_file, 'sources': sources,
+ 'extend_lists': extend_lists})
+ result.update(copy_result)
finally:
shutil.rmtree(local_tempdir)
return result
diff --git a/ansible/destroy.yml b/ansible/destroy.yml
index 9d302bb34c..8603bda226 100644
--- a/ansible/destroy.yml
+++ b/ansible/destroy.yml
@@ -1,5 +1,9 @@
---
- name: Apply role destroy
hosts: all
+ max_fail_percentage: >-
+ {{ destroy_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- destroy
diff --git a/ansible/filter_plugins/address.py b/ansible/filter_plugins/address.py
index 3757ee8f0e..44ddfa830c 100644
--- a/ansible/filter_plugins/address.py
+++ b/ansible/filter_plugins/address.py
@@ -15,6 +15,7 @@
# limitations under the License.
from kolla_ansible.kolla_address import kolla_address
+from kolla_ansible.kolla_url import kolla_url
from kolla_ansible.put_address_in_context import put_address_in_context
@@ -24,5 +25,6 @@ class FilterModule(object):
def filters(self):
return {
'kolla_address': kolla_address,
+ 'kolla_url': kolla_url,
'put_address_in_context': put_address_in_context,
}
diff --git a/ansible/gather-facts.yml b/ansible/gather-facts.yml
index 0c7c792982..a30c7f4ed2 100644
--- a/ansible/gather-facts.yml
+++ b/ansible/gather-facts.yml
@@ -4,20 +4,25 @@
# building their configurations.
- name: Gather facts for all hosts
hosts: all
+ max_fail_percentage: >-
+ {{ gather_facts_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
serial: '{{ kolla_serial|default("0") }}'
gather_facts: false
tasks:
+ - name: Group hosts to determine when using --limit
+ group_by:
+ key: "all_using_limit_{{ (ansible_play_batch | length) != (groups['all'] | length) }}"
+ changed_when: false
+
- name: Gather facts
setup:
filter: "{{ kolla_ansible_setup_filter }}"
gather_subset: "{{ kolla_ansible_setup_gather_subset }}"
when:
+ # Don't gather if fact caching is in use
- not ansible_facts
-
- - name: Group hosts to determine when using --limit
- group_by:
- key: "all_using_limit_{{ (ansible_play_batch | length) != (groups['all'] | length) }}"
- changed_when: false
tags: always
# NOTE(pbourke): This case covers deploying subsets of hosts using --limit. The
@@ -28,6 +33,10 @@
# the limit.
- name: Gather facts for all hosts (if using --limit)
hosts: all_using_limit_True
+ max_fail_percentage: >-
+ {{ gather_facts_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
serial: '{{ kolla_serial|default("0") }}'
gather_facts: false
vars:
@@ -44,7 +53,10 @@
delegate_facts: True
delegate_to: "{{ item }}"
with_items: "{{ delegate_hosts }}"
- # We gathered facts for all hosts in the batch during the first play.
when:
+ # We gathered facts for all hosts in the batch during the first play.
+ # Ensure that we don't try again if they failed.
+ - item not in groups["all_using_limit_True"]
+ # Don't gather if fact caching is in use
- not hostvars[item].ansible_facts
tags: always
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 648537ad3e..90dbc53596 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -48,7 +48,7 @@ kolla_base_distro: "rocky"
kolla_internal_vip_address: "{{ kolla_internal_address | default('') }}"
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
-kolla_same_external_internal_vip: "{{ kolla_external_vip_address == kolla_internal_vip_address }}"
+kolla_same_external_internal_vip: "{{ kolla_external_vip_address | ansible.utils.ipaddr('address') == kolla_internal_vip_address | ansible.utils.ipaddr('address') }}"
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}"
kolla_dev_repos_directory: "/opt/stack/"
@@ -85,6 +85,8 @@ database_user: "root"
database_port: "3306"
database_connection_recycle_time: 10
database_max_pool_size: 1
+database_enable_tls_backend: "{{ 'yes' if ((kolla_enable_tls_backend | bool ) and ( enable_proxysql | bool)) else 'no' }}"
+database_enable_tls_internal: "{{ 'yes' if ((kolla_enable_tls_internal | bool ) and ( enable_proxysql | bool)) else 'no' }}"
####################
# Container engine options
@@ -97,6 +99,7 @@ kolla_container_engine: "docker"
docker_registry_email:
docker_registry: "quay.io"
docker_namespace: "openstack.kolla"
+docker_image_name_prefix: ""
docker_registry_username:
# Please read the docs carefully before applying docker_registry_insecure.
docker_registry_insecure: "no"
@@ -122,8 +125,7 @@ docker_restart_policy_retry: "10"
# Extra docker options for Zun
docker_configure_for_zun: "no"
docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
-docker_zun_config:
- cluster-store: etcd://{% for host in groups.get('etcd', []) %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['etcd_client_port'] }}{% if not loop.last %},{% endif %}{% endfor %}
+docker_zun_config: {}
# Extra containerd options for Zun
containerd_configure_for_zun: "no"
@@ -151,6 +153,33 @@ docker_common_options:
restart_retries: "{{ docker_restart_policy_retry }}"
graceful_timeout: "{{ docker_graceful_timeout }}"
client_timeout: "{{ docker_client_timeout }}"
+ container_engine: "{{ kolla_container_engine }}"
+
+# Container engine specific volume paths
+docker_volumes_path: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes"
+podman_volumes_path: "{{ docker_runtime_directory or '/var/lib/containers' }}/storage/volumes"
+container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine == 'docker' else podman_volumes_path }}"
+
+#####################
+# Volumes under /run
+#####################
+# Podman has problem with mounting whole /run directory
+# described here: https://github.com/containers/podman/issues/16305
+run_default_volumes_podman:
+ - '/run/netns:/run/netns:shared'
+ - '/run/lock/nova:/run/lock/nova:shared'
+ - "/run/libvirt:/run/libvirt:shared"
+ - "/run/nova:/run/nova:shared"
+ - "/run/openvswitch:/run/openvswitch:shared"
+
+run_default_volumes_docker: []
+
+run_default_subdirectories:
+ - '/run/netns'
+ - '/run/lock/nova'
+ - "/run/libvirt"
+ - "/run/nova"
+ - "/run/openvswitch"
####################
# Dimensions options
@@ -159,11 +188,20 @@ docker_common_options:
# NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9)
# fixes at least rabbitmq and mariadb
default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}"
-default_container_dimensions_el9:
+default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}"
+default_docker_dimensions_el9:
ulimits:
nofile:
soft: 1048576
hard: 1048576
+default_podman_dimensions_el9:
+ ulimits:
+ RLIMIT_NOFILE:
+ soft: 1048576
+ hard: 1048576
+ RLIMIT_NPROC:
+ soft: 1048576
+ hard: 1048576
#####################
# Healthcheck options
@@ -190,11 +228,17 @@ keepalived_virtual_router_id: "51"
#######################
-# Elasticsearch Options
-#######################
-elasticsearch_datadir_volume: "elasticsearch"
-
-elasticsearch_internal_endpoint: "{{ internal_protocol }}://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"
+## Opensearch Options
+########################
+opensearch_datadir_volume: "opensearch"
+
+opensearch_internal_endpoint: "{{ opensearch_address | kolla_url(internal_protocol, opensearch_port) }}"
+opensearch_dashboards_internal_fqdn: "{{ kolla_internal_fqdn }}"
+opensearch_dashboards_external_fqdn: "{{ kolla_external_fqdn }}"
+opensearch_dashboards_internal_endpoint: "{{ opensearch_dashboards_internal_fqdn | kolla_url(internal_protocol, opensearch_dashboards_port) }}"
+opensearch_dashboards_external_endpoint: "{{ opensearch_dashboards_external_fqdn | kolla_url(public_protocol, opensearch_dashboards_port_external) }}"
+opensearch_dashboards_user: "opensearch"
+opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}"
###################
# Messaging options
@@ -224,6 +268,10 @@ om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}"
# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS.
om_rabbitmq_cacert: "{{ rabbitmq_cacert }}"
+om_enable_rabbitmq_high_availability: false
+# Only enable quorum queues if you disable om_enable_rabbitmq_high_availability
+om_enable_rabbitmq_quorum_queues: true
+
####################
# Networking options
####################
@@ -240,6 +288,7 @@ bifrost_network_interface: "{{ network_interface }}"
dns_interface: "{{ network_interface }}"
dpdk_tunnel_interface: "{{ neutron_external_interface }}"
ironic_http_interface: "{{ api_interface }}"
+ironic_tftp_interface: "{{ api_interface }}"
# Configure the address family (AF) per network.
# Valid options are [ ipv4, ipv6 ]
@@ -255,12 +304,14 @@ bifrost_network_address_family: "{{ network_address_family }}"
dns_address_family: "{{ network_address_family }}"
dpdk_tunnel_address_family: "{{ network_address_family }}"
ironic_http_address_family: "{{ api_address_family }}"
+ironic_tftp_address_family: "{{ api_address_family }}"
migration_interface_address: "{{ 'migration' | kolla_address }}"
tunnel_interface_address: "{{ 'tunnel' | kolla_address }}"
octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}"
dpdk_tunnel_interface_address: "{{ 'dpdk_tunnel' | kolla_address }}"
ironic_http_interface_address: "{{ 'ironic_http' | kolla_address }}"
+ironic_tftp_interface_address: "{{ 'ironic_tftp' | kolla_address }}"
# Valid options are [ openvswitch, ovn, linuxbridge, vmware_nsxv, vmware_nsxv3, vmware_nsxp, vmware_dvs ]
# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable.
@@ -274,41 +325,70 @@ neutron_ipam_driver: "internal"
# The list should be in alphabetical order
aodh_internal_fqdn: "{{ kolla_internal_fqdn }}"
aodh_external_fqdn: "{{ kolla_external_fqdn }}"
+aodh_internal_endpoint: "{{ aodh_internal_fqdn | kolla_url(internal_protocol, aodh_api_port) }}"
+aodh_public_endpoint: "{{ aodh_external_fqdn | kolla_url(public_protocol, aodh_api_public_port) }}"
aodh_api_port: "8042"
+aodh_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else aodh_api_port }}"
aodh_api_listen_port: "{{ aodh_api_port }}"
barbican_internal_fqdn: "{{ kolla_internal_fqdn }}"
barbican_external_fqdn: "{{ kolla_external_fqdn }}"
+barbican_internal_endpoint: "{{ barbican_internal_fqdn | kolla_url(internal_protocol, barbican_api_port) }}"
+barbican_public_endpoint: "{{ barbican_external_fqdn | kolla_url(public_protocol, barbican_api_public_port) }}"
barbican_api_port: "9311"
+barbican_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else barbican_api_port }}"
barbican_api_listen_port: "{{ barbican_api_port }}"
+blazar_internal_fqdn: "{{ kolla_internal_fqdn }}"
+blazar_external_fqdn: "{{ kolla_external_fqdn }}"
+blazar_internal_base_endpoint: "{{ blazar_internal_fqdn | kolla_url(internal_protocol, blazar_api_port) }}"
+blazar_public_base_endpoint: "{{ blazar_external_fqdn | kolla_url(public_protocol, blazar_api_public_port) }}"
blazar_api_port: "1234"
+blazar_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else blazar_api_port }}"
+blazar_api_listen_port: "{{ blazar_api_port }}"
ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}"
ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}"
+ceph_rgw_internal_base_endpoint: "{{ ceph_rgw_internal_fqdn | kolla_url(internal_protocol, ceph_rgw_port) }}"
+ceph_rgw_public_base_endpoint: "{{ ceph_rgw_external_fqdn | kolla_url(public_protocol, ceph_rgw_public_port) }}"
ceph_rgw_port: "6780"
+ceph_rgw_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ceph_rgw_port }}"
cinder_internal_fqdn: "{{ kolla_internal_fqdn }}"
cinder_external_fqdn: "{{ kolla_external_fqdn }}"
+cinder_internal_base_endpoint: "{{ cinder_internal_fqdn | kolla_url(internal_protocol, cinder_api_port) }}"
+cinder_public_base_endpoint: "{{ cinder_external_fqdn | kolla_url(public_protocol, cinder_api_public_port) }}"
cinder_api_port: "8776"
+cinder_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cinder_api_port }}"
cinder_api_listen_port: "{{ cinder_api_port }}"
+cloudkitty_internal_fqdn: "{{ kolla_internal_fqdn }}"
+cloudkitty_external_fqdn: "{{ kolla_external_fqdn }}"
+cloudkitty_internal_endpoint: "{{ cloudkitty_internal_fqdn | kolla_url(internal_protocol, cloudkitty_api_port) }}"
+cloudkitty_public_endpoint: "{{ cloudkitty_external_fqdn | kolla_url(public_protocol, cloudkitty_api_public_port) }}"
cloudkitty_api_port: "8889"
+cloudkitty_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cloudkitty_api_port }}"
+cloudkitty_api_listen_port: "{{ cloudkitty_api_port }}"
collectd_udp_port: "25826"
+cyborg_internal_fqdn: "{{ kolla_internal_fqdn }}"
+cyborg_external_fqdn: "{{ kolla_external_fqdn }}"
cyborg_api_port: "6666"
+cyborg_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cyborg_api_port }}"
+cyborg_api_listen_port: "{{ cyborg_api_port }}"
designate_internal_fqdn: "{{ kolla_internal_fqdn }}"
designate_external_fqdn: "{{ kolla_external_fqdn }}"
+designate_internal_endpoint: "{{ designate_internal_fqdn | kolla_url(internal_protocol, designate_api_port) }}"
+designate_public_endpoint: "{{ designate_external_fqdn | kolla_url(public_protocol, designate_api_public_port) }}"
designate_api_port: "9001"
designate_api_listen_port: "{{ designate_api_port }}"
+designate_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else designate_api_port }}"
designate_bind_port: "53"
designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}"
designate_rndc_port: "953"
-elasticsearch_port: "9200"
-
etcd_client_port: "2379"
etcd_peer_port: "2380"
etcd_enable_tls: "{{ kolla_enable_tls_backend }}"
@@ -316,33 +396,76 @@ etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}"
fluentd_syslog_port: "5140"
-freezer_api_port: "9090"
-
glance_internal_fqdn: "{{ kolla_internal_fqdn }}"
glance_external_fqdn: "{{ kolla_external_fqdn }}"
+glance_internal_endpoint: "{{ glance_internal_fqdn | kolla_url(internal_protocol, glance_api_port) }}"
+glance_public_endpoint: "{{ glance_external_fqdn | kolla_url(public_protocol, glance_api_public_port) }}"
glance_api_port: "9292"
glance_api_listen_port: "{{ glance_api_port }}"
+glance_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else glance_api_port }}"
glance_tls_proxy_stats_port: "9293"
gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}"
gnocchi_external_fqdn: "{{ kolla_external_fqdn }}"
+gnocchi_internal_endpoint: "{{ gnocchi_internal_fqdn | kolla_url(internal_protocol, gnocchi_api_port) }}"
+gnocchi_public_endpoint: "{{ gnocchi_external_fqdn | kolla_url(public_protocol, gnocchi_api_public_port) }}"
gnocchi_api_port: "8041"
gnocchi_api_listen_port: "{{ gnocchi_api_port }}"
+gnocchi_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else gnocchi_api_port }}"
+grafana_internal_fqdn: "{{ kolla_internal_fqdn }}"
+grafana_external_fqdn: "{{ kolla_external_fqdn }}"
+grafana_internal_endpoint: "{{ grafana_internal_fqdn | kolla_url(internal_protocol, grafana_server_port) }}"
+grafana_public_endpoint: "{{ grafana_external_fqdn | kolla_url(public_protocol, grafana_server_public_port) }}"
grafana_server_port: "3000"
+grafana_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else grafana_server_port }}"
+grafana_server_listen_port: "{{ grafana_server_port }}"
haproxy_stats_port: "1984"
haproxy_monitor_port: "61313"
+haproxy_ssh_port: "2985"
+# configure SSL/TLS settings for haproxy config, one of [modern, intermediate, legacy]:
+kolla_haproxy_ssl_settings: "modern"
+
+haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' else ssl_modern_settings | default(ssl_modern_settings) }}"
+
+ssl_legacy_settings: |
+ ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES
+ ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11
+
+ssl_intermediate_settings: |
+ ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
+ ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
+ ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
+ ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
+ ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
+ ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
+
+ssl_modern_settings: |
+ ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
+ ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets
+ ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
+ ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets
heat_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_external_fqdn: "{{ kolla_external_fqdn }}"
+heat_internal_base_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port) }}"
+heat_public_base_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port) }}"
heat_api_port: "8004"
heat_api_listen_port: "{{ heat_api_port }}"
+heat_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_port }}"
heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}"
+heat_cfn_internal_base_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal_protocol, heat_api_cfn_port) }}"
+heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}"
heat_api_cfn_port: "8000"
heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}"
+heat_api_cfn_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_cfn_port }}"
+horizon_internal_fqdn: "{{ kolla_internal_fqdn }}"
+horizon_external_fqdn: "{{ kolla_external_fqdn }}"
+horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}"
+horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}"
horizon_port: "80"
horizon_tls_port: "443"
horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}"
@@ -351,40 +474,84 @@ influxdb_http_port: "8086"
ironic_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_external_fqdn: "{{ kolla_external_fqdn }}"
+ironic_internal_endpoint: "{{ ironic_internal_fqdn | kolla_url(internal_protocol, ironic_api_port) }}"
+ironic_public_endpoint: "{{ ironic_external_fqdn | kolla_url(public_protocol, ironic_api_public_port) }}"
ironic_api_port: "6385"
ironic_api_listen_port: "{{ ironic_api_port }}"
+ironic_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_api_port }}"
ironic_inspector_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_inspector_external_fqdn: "{{ kolla_external_fqdn }}"
+ironic_inspector_internal_endpoint: "{{ ironic_inspector_internal_fqdn | kolla_url(internal_protocol, ironic_inspector_port) }}"
+ironic_inspector_public_endpoint: "{{ ironic_inspector_external_fqdn | kolla_url(public_protocol, ironic_inspector_public_port) }}"
ironic_inspector_port: "5050"
+ironic_inspector_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_inspector_port }}"
ironic_inspector_listen_port: "{{ ironic_inspector_port }}"
ironic_http_port: "8089"
+ironic_prometheus_exporter_port: "9608"
iscsi_port: "3260"
-kafka_port: "9092"
+keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}"
+keystone_public_listen_port: "5000"
+keystone_internal_port: "5000"
+keystone_internal_listen_port: "{{ keystone_internal_port }}"
-keystone_public_port: "5000"
-keystone_public_listen_port: "{{ keystone_public_port }}"
-# NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
-# TODO(yoctozepto): Remove after Zed.
-keystone_admin_port: "35357"
-keystone_admin_listen_port: "{{ keystone_admin_port }}"
keystone_ssh_port: "8023"
-kibana_server_port: "5601"
-
kuryr_port: "23750"
+letsencrypt_webserver_port: "8081"
+letsencrypt_managed_certs: "{{ '' if not enable_letsencrypt | bool else ('internal' if letsencrypt_internal_cert_server != '' and kolla_same_external_internal_vip | bool else ('internal,external' if letsencrypt_internal_cert_server != '' and letsencrypt_external_cert_server != '' else ('internal' if letsencrypt_internal_cert_server != '' else ('external' if letsencrypt_external_cert_server != '' and not kolla_same_external_internal_vip | bool else '')))) }}"
+letsencrypt_external_cert_server: ""
+letsencrypt_internal_cert_server: ""
+
+magnum_internal_fqdn: "{{ kolla_internal_fqdn }}"
+magnum_external_fqdn: "{{ kolla_external_fqdn }}"
+magnum_internal_base_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port) }}"
+magnum_public_base_endpoint: "{{ magnum_external_fqdn | kolla_url(public_protocol, magnum_api_public_port) }}"
magnum_api_port: "9511"
+magnum_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else magnum_api_port }}"
+magnum_api_listen_port: "{{ magnum_api_port }}"
+manila_internal_fqdn: "{{ kolla_internal_fqdn }}"
+manila_external_fqdn: "{{ kolla_external_fqdn }}"
+manila_internal_base_endpoint: "{{ manila_internal_fqdn | kolla_url(internal_protocol, manila_api_port) }}"
+manila_public_base_endpoint: "{{ manila_external_fqdn | kolla_url(public_protocol, manila_api_public_port) }}"
manila_api_port: "8786"
+manila_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else manila_api_port }}"
+manila_api_listen_port: "{{ manila_api_port }}"
mariadb_port: "{{ database_port }}"
mariadb_wsrep_port: "4567"
mariadb_ist_port: "4568"
mariadb_sst_port: "4444"
mariadb_clustercheck_port: "4569"
+mariadb_enable_tls_backend: "{{ database_enable_tls_backend }}"
+
mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}"
+mariadb_monitor_connect_interval: "2000"
+mariadb_monitor_galera_healthcheck_interval: "4000"
+mariadb_monitor_galera_healthcheck_timeout: "1000"
+mariadb_monitor_galera_healthcheck_max_timeout_count: "2"
+mariadb_monitor_ping_interval: "3000"
+mariadb_monitor_ping_timeout: "2000"
+mariadb_monitor_ping_max_failures: "2"
+#
+# Defaults preserved for multinode setup
+# Tweaked for single-node
+#
+# ProxySQL shuns servers on MySQL errors, which can cause failures
+# during upgrades or restarts. In single-node setups, ProxySQL can't reroute
+# traffic, leading to "Max connect timeout" errors. To avoid this in CI and
+# signle-node environments, delay error responses to clients by 10 seconds,
+# giving the backend time to recover without immediate failures.
+#
+# See ProxySQL docs for more: https://proxysql.com/documentation/global-variables/mysql-variables/#mysql-shun_on_failures
+mariadb_shun_on_failures: "{{ '10' if mariadb_shards_info.shards.values() | map(attribute='hosts') | map('length') | select('<=', 1) | list | length > 0 else '5' }}"
+mariadb_connect_retries_delay: "{{ '1000' if mariadb_shards_info.shards.values() | map(attribute='hosts') | map('length') | select('<=', 1) | list | length > 0 else '1' }}"
+mariadb_connect_retries_on_failure: "{{ '20' if mariadb_shards_info.shards.values() | map(attribute='hosts') | map('length') | select('<=', 1) | list | length > 0 else '10' }}"
+
+mariadb_datadir_volume: "mariadb"
mariadb_default_database_shard_id: 0
mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}"
@@ -392,72 +559,105 @@ mariadb_shard_id: "{{ mariadb_default_database_shard_id }}"
mariadb_shard_name: "shard_{{ mariadb_shard_id }}"
mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}"
mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}"
+mariadb_backup_target: "{{ 'active' if mariadb_loadbalancer == 'haproxy' else 'replica' }}"
mariadb_shard_root_user_prefix: "root_shard_"
mariadb_shard_backup_user_prefix: "backup_shard_"
mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}"
+masakari_internal_fqdn: "{{ kolla_internal_fqdn }}"
+masakari_external_fqdn: "{{ kolla_external_fqdn }}"
+masakari_internal_endpoint: "{{ masakari_internal_fqdn | kolla_url(internal_protocol, masakari_api_port) }}"
+masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol, masakari_api_public_port) }}"
masakari_api_port: "15868"
+masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}"
+masakari_api_listen_port: "{{ masakari_api_port }}"
+masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
memcached_port: "11211"
+memcache_security_strategy: "ENCRYPT"
+mistral_internal_fqdn: "{{ kolla_internal_fqdn }}"
+mistral_external_fqdn: "{{ kolla_external_fqdn }}"
+mistral_internal_base_endpoint: "{{ mistral_internal_fqdn | kolla_url(internal_protocol, mistral_api_port) }}"
+mistral_public_base_endpoint: "{{ mistral_external_fqdn | kolla_url(public_protocol, mistral_api_public_port) }}"
mistral_api_port: "8989"
-
-monasca_api_port: "8070"
-monasca_log_api_port: "{{ monasca_api_port }}"
-monasca_agent_forwarder_port: "17123"
-monasca_agent_statsd_port: "8125"
-
-murano_api_port: "8082"
+mistral_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else mistral_api_port }}"
+mistral_api_listen_port: "{{ mistral_api_port }}"
neutron_internal_fqdn: "{{ kolla_internal_fqdn }}"
neutron_external_fqdn: "{{ kolla_external_fqdn }}"
+neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}"
+neutron_public_endpoint: "{{ neutron_external_fqdn | kolla_url(public_protocol, neutron_server_public_port) }}"
neutron_server_port: "9696"
neutron_server_listen_port: "{{ neutron_server_port }}"
+neutron_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else neutron_server_port }}"
neutron_tls_proxy_stats_port: "9697"
nova_internal_fqdn: "{{ kolla_internal_fqdn }}"
nova_external_fqdn: "{{ kolla_external_fqdn }}"
+nova_internal_base_endpoint: "{{ nova_internal_fqdn | kolla_url(internal_protocol, nova_api_port) }}"
+nova_public_base_endpoint: "{{ nova_external_fqdn | kolla_url(public_protocol, nova_api_public_port) }}"
nova_api_port: "8774"
nova_api_listen_port: "{{ nova_api_port }}"
+nova_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_api_port }}"
+nova_metadata_internal_fqdn: "{{ kolla_internal_fqdn }}"
+nova_metadata_external_fqdn: "{{ kolla_external_fqdn }}"
nova_metadata_port: "8775"
nova_metadata_listen_port: "{{ nova_metadata_port }}"
nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_novncproxy_port: "6080"
nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}"
+nova_novncproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_novncproxy_port }}"
nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}"
nova_spicehtml5proxy_port: "6082"
nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}"
+nova_spicehtml5proxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_spicehtml5proxy_port }}"
nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_serialproxy_port: "6083"
nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}"
+nova_serialproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_serialproxy_port }}"
nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
octavia_internal_fqdn: "{{ kolla_internal_fqdn }}"
octavia_external_fqdn: "{{ kolla_external_fqdn }}"
+octavia_internal_endpoint: "{{ octavia_internal_fqdn | kolla_url(internal_protocol, octavia_api_port) }}"
+octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, octavia_api_public_port) }}"
octavia_api_port: "9876"
octavia_api_listen_port: "{{ octavia_api_port }}"
+octavia_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else octavia_api_port }}"
octavia_health_manager_port: "5555"
+# NOTE: If an external ElasticSearch cluster port is specified,
+# we default to using that port in services with ElasticSearch
+# endpoints. This is for backwards compatibility.
+opensearch_port: "{{ elasticsearch_port | default('9200') }}"
+opensearch_dashboards_port: "5601"
+opensearch_dashboards_port_external: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else opensearch_dashboards_port }}"
+opensearch_dashboards_listen_port: "{{ opensearch_dashboards_port }}"
+
ovn_nb_db_port: "6641"
ovn_sb_db_port: "6642"
ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
ovn_sb_connection: "{% for host in groups['ovn-sb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
-outward_rabbitmq_port: "5674"
-outward_rabbitmq_management_port: "15674"
-outward_rabbitmq_cluster_port: "25674"
-outward_rabbitmq_epmd_port: "4371"
-outward_rabbitmq_prometheus_port: "15694"
-
ovsdb_port: "6640"
placement_internal_fqdn: "{{ kolla_internal_fqdn }}"
placement_external_fqdn: "{{ kolla_external_fqdn }}"
+placement_internal_endpoint: "{{ placement_internal_fqdn | kolla_url(internal_protocol, placement_api_port) }}"
+placement_public_endpoint: "{{ placement_external_fqdn | kolla_url(public_protocol, placement_api_public_port) }}"
# Default Placement API port of 8778 already in use
placement_api_port: "8780"
placement_api_listen_port: "{{ placement_api_port }}"
+placement_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else placement_api_port }}"
+prometheus_internal_fqdn: "{{ kolla_internal_fqdn }}"
+prometheus_external_fqdn: "{{ kolla_external_fqdn }}"
+prometheus_internal_endpoint: "{{ prometheus_internal_fqdn | kolla_url(internal_protocol, prometheus_port) }}"
+prometheus_public_endpoint: "{{ prometheus_external_fqdn | kolla_url(public_protocol, prometheus_public_port) }}"
prometheus_port: "9091"
+prometheus_listen_port: "{{ prometheus_port }}"
+prometheus_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_port }}"
prometheus_node_exporter_port: "9100"
prometheus_mysqld_exporter_port: "9104"
prometheus_haproxy_exporter_port: "9101"
@@ -468,13 +668,17 @@ prometheus_cadvisor_port: "18080"
prometheus_fluentd_integration_port: "24231"
prometheus_libvirt_exporter_port: "9177"
prometheus_etcd_integration_port: "{{ etcd_client_port }}"
+proxysql_prometheus_exporter_port: "6070"
# Prometheus alertmanager ports
+prometheus_alertmanager_internal_fqdn: "{{ kolla_internal_fqdn }}"
+prometheus_alertmanager_external_fqdn: "{{ kolla_external_fqdn }}"
+prometheus_alertmanager_internal_endpoint: "{{ prometheus_alertmanager_internal_fqdn | kolla_url(internal_protocol, prometheus_alertmanager_port) }}"
+prometheus_alertmanager_public_endpoint: "{{ prometheus_alertmanager_external_fqdn | kolla_url(public_protocol, prometheus_alertmanager_public_port) }}"
prometheus_alertmanager_port: "9093"
prometheus_alertmanager_cluster_port: "9094"
-
-# Prometheus MSTeams port
-prometheus_msteams_port: "9095"
+prometheus_alertmanager_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_alertmanager_port }}"
+prometheus_alertmanager_listen_port: "{{ prometheus_alertmanager_port }}"
# Prometheus openstack-exporter ports
prometheus_openstack_exporter_port: "9198"
@@ -483,6 +687,9 @@ prometheus_elasticsearch_exporter_port: "9108"
# Prometheus blackbox-exporter ports
prometheus_blackbox_exporter_port: "9115"
+# Prometheus instance label to use for metrics
+prometheus_instance_label:
+
proxysql_admin_port: "6032"
rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}"
@@ -494,30 +701,26 @@ rabbitmq_prometheus_port: "15692"
redis_port: "6379"
redis_sentinel_port: "26379"
-sahara_api_port: "8386"
-
-senlin_internal_fqdn: "{{ kolla_internal_fqdn }}"
-senlin_external_fqdn: "{{ kolla_external_fqdn }}"
-senlin_api_port: "8778"
-senlin_api_listen_port: "{{ senlin_api_port }}"
-
-skydive_analyzer_port: "8085"
-skydive_agents_port: "8090"
-
-solum_application_deployment_port: "9777"
-solum_image_builder_port: "9778"
-
-storm_nimbus_thrift_port: 6627
-storm_supervisor_thrift_port: 6628
-# Storm will run up to (end - start) + 1 workers per worker host. Here
-# we reserve ports for those workers, and implicitly define the maximum
-# number of workers per host.
-storm_worker_port_range:
- start: 6700
- end: 6703
+skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}"
+skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}"
+skyline_apiserver_internal_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}"
+skyline_apiserver_public_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}"
+skyline_console_internal_fqdn: "{{ kolla_internal_fqdn }}"
+skyline_console_external_fqdn: "{{ kolla_external_fqdn }}"
+skyline_console_internal_endpoint: "{{ skyline_console_internal_fqdn | kolla_url(internal_protocol, skyline_console_port) }}"
+skyline_console_public_endpoint: "{{ skyline_console_external_fqdn | kolla_url(public_protocol, skyline_console_public_port) }}"
+skyline_apiserver_port: "9998"
+skyline_apiserver_listen_port: "{{ skyline_apiserver_port }}"
+skyline_apiserver_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_apiserver_port }}"
+skyline_console_port: "9999"
+skyline_console_listen_port: "{{ skyline_console_port }}"
+skyline_console_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_console_port }}"
+skyline_enable_sso: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}"
swift_internal_fqdn: "{{ kolla_internal_fqdn }}"
swift_external_fqdn: "{{ kolla_external_fqdn }}"
+swift_internal_base_endpoint: "{{ swift_internal_fqdn | kolla_url(internal_protocol, swift_proxy_server_port) }}"
+swift_public_base_endpoint: "{{ swift_external_fqdn | kolla_url(public_protocol, swift_proxy_server_port) }}"
swift_proxy_server_port: "8080"
swift_proxy_server_listen_port: "{{ swift_proxy_server_port }}"
swift_object_server_port: "6000"
@@ -527,29 +730,53 @@ swift_rsync_port: "10873"
syslog_udp_port: "{{ fluentd_syslog_port }}"
+tacker_internal_fqdn: "{{ kolla_internal_fqdn }}"
+tacker_external_fqdn: "{{ kolla_external_fqdn }}"
+tacker_internal_endpoint: "{{ tacker_internal_fqdn | kolla_url(internal_protocol, tacker_server_port) }}"
+tacker_public_endpoint: "{{ tacker_external_fqdn | kolla_url(public_protocol, tacker_server_public_port) }}"
tacker_server_port: "9890"
+tacker_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else tacker_server_port }}"
+tacker_server_listen_port: "{{ tacker_server_port }}"
+trove_internal_fqdn: "{{ kolla_internal_fqdn }}"
+trove_external_fqdn: "{{ kolla_external_fqdn }}"
+trove_internal_base_endpoint: "{{ trove_internal_fqdn | kolla_url(internal_protocol, trove_api_port) }}"
+trove_public_base_endpoint: "{{ trove_external_fqdn | kolla_url(public_protocol, trove_api_public_port) }}"
trove_api_port: "8779"
+trove_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else trove_api_port }}"
+trove_api_listen_port: "{{ trove_api_port }}"
+venus_internal_fqdn: "{{ kolla_internal_fqdn }}"
+venus_external_fqdn: "{{ kolla_external_fqdn }}"
+venus_internal_endpoint: "{{ venus_internal_fqdn | kolla_url(internal_protocol, venus_api_port) }}"
+venus_public_endpoint: "{{ venus_external_fqdn | kolla_url(public_protocol, venus_api_public_port) }}"
venus_api_port: "10010"
+venus_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else venus_api_port }}"
+venus_api_listen_port: "{{ venus_api_port }}"
+watcher_internal_fqdn: "{{ kolla_internal_fqdn }}"
+watcher_external_fqdn: "{{ kolla_external_fqdn }}"
+watcher_internal_endpoint: "{{ watcher_internal_fqdn | kolla_url(internal_protocol, watcher_api_port) }}"
+watcher_public_endpoint: "{{ watcher_external_fqdn | kolla_url(public_protocol, watcher_api_public_port) }}"
watcher_api_port: "9322"
-
-zookeeper_client_port: "2181"
-zookeeper_peer_port: "2888"
-zookeeper_quorum_port: "3888"
+watcher_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else watcher_api_port }}"
+watcher_api_listen_port: "{{ watcher_api_port }}"
zun_api_port: "9517"
+zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}"
+zun_api_listen_port: "{{ zun_api_port }}"
+zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}"
+zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}"
zun_wsproxy_port: "6784"
zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
zun_cni_daemon_port: "9036"
-
-vitrage_api_port: "8999"
+zun_internal_fqdn: "{{ kolla_internal_fqdn }}"
+zun_external_fqdn: "{{ kolla_external_fqdn }}"
+zun_internal_base_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port) }}"
+zun_public_base_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port) }}"
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}"
-# TODO(yoctozepto): Remove after Zed. Kept for compatibility only.
-admin_protocol: "{{ internal_protocol }}"
##################
# Firewall options
@@ -569,7 +796,7 @@ openstack_logging_debug: "False"
openstack_region_name: "RegionOne"
# Variable defined the pin_release_version, apply for rolling upgrade process
-openstack_previous_release_name: "yoga"
+openstack_previous_release_name: "2024.1"
# A list of policy file formats that are supported by Oslo.policy
supported_policy_format_list:
@@ -614,7 +841,6 @@ enable_memcached: "yes"
enable_neutron: "{{ enable_openstack_core | bool }}"
enable_nova: "{{ enable_openstack_core | bool }}"
enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}"
-enable_outward_rabbitmq: "{{ enable_murano | bool }}"
# NOTE: Most memcached clients handle load-balancing via client side
# hashing (consistent or not) logic, so going under the covers and messing
@@ -634,23 +860,24 @@ enable_ceph_rgw: "no"
enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}"
enable_cinder: "no"
enable_cinder_backup: "yes"
-enable_cinder_backend_hnas_nfs: "no"
enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}"
enable_cinder_backend_lvm: "no"
enable_cinder_backend_nfs: "no"
enable_cinder_backend_quobyte: "no"
enable_cinder_backend_pure_iscsi: "no"
enable_cinder_backend_pure_fc: "no"
+enable_cinder_backend_pure_roce: "no"
+enable_cinder_backend_pure_nvme_tcp: "no"
enable_cloudkitty: "no"
enable_collectd: "no"
enable_cyborg: "no"
enable_designate: "no"
enable_etcd: "no"
enable_fluentd: "yes"
-enable_freezer: "no"
+enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}"
enable_gnocchi: "no"
enable_gnocchi_statsd: "no"
-enable_grafana: "{{ enable_monasca | bool }}"
+enable_grafana: "no"
enable_grafana_external: "{{ enable_grafana | bool }}"
enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}"
enable_heat: "{{ enable_openstack_core | bool }}"
@@ -658,33 +885,27 @@ enable_horizon: "{{ enable_openstack_core | bool }}"
enable_horizon_blazar: "{{ enable_blazar | bool }}"
enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
enable_horizon_designate: "{{ enable_designate | bool }}"
-enable_horizon_freezer: "{{ enable_freezer | bool }}"
+enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}"
enable_horizon_heat: "{{ enable_heat | bool }}"
enable_horizon_ironic: "{{ enable_ironic | bool }}"
enable_horizon_magnum: "{{ enable_magnum | bool }}"
enable_horizon_manila: "{{ enable_manila | bool }}"
enable_horizon_masakari: "{{ enable_masakari | bool }}"
enable_horizon_mistral: "{{ enable_mistral | bool }}"
-enable_horizon_monasca: "{{ enable_monasca | bool }}"
-enable_horizon_murano: "{{ enable_murano | bool }}"
enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
enable_horizon_octavia: "{{ enable_octavia | bool }}"
-enable_horizon_sahara: "{{ enable_sahara | bool }}"
-enable_horizon_senlin: "{{ enable_senlin | bool }}"
-enable_horizon_solum: "{{ enable_solum | bool }}"
enable_horizon_tacker: "{{ enable_tacker | bool }}"
enable_horizon_trove: "{{ enable_trove | bool }}"
-enable_horizon_vitrage: "{{ enable_vitrage | bool }}"
enable_horizon_watcher: "{{ enable_watcher | bool }}"
enable_horizon_zun: "{{ enable_zun | bool }}"
-enable_influxdb: "{{ enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb') }}"
+enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}"
enable_ironic: "no"
+enable_ironic_dnsmasq: "{{ enable_ironic | bool }}"
enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
-# TODO(yoctozepto): Remove the deprecated enable_ironic_pxe_uefi in Zed.
-enable_ironic_pxe_uefi: "no"
+enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}"
enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
-enable_kafka: "{{ enable_monasca | bool }}"
enable_kuryr: "no"
+enable_letsencrypt: "no"
enable_magnum: "no"
enable_manila: "no"
enable_manila_backend_generic: "no"
@@ -697,13 +918,12 @@ enable_masakari: "no"
enable_masakari_instancemonitor: "{{ enable_masakari | bool }}"
enable_masakari_hostmonitor: "{{ enable_masakari | bool }}"
enable_mistral: "no"
-enable_monasca: "no"
enable_multipathd: "no"
-enable_murano: "no"
enable_neutron_vpnaas: "no"
enable_neutron_sriov: "no"
enable_neutron_mlnx: "no"
enable_neutron_dvr: "no"
+enable_neutron_fwaas: "no"
enable_neutron_qos: "no"
enable_neutron_agent_ha: "no"
enable_neutron_bgp_dragent: "no"
@@ -711,6 +931,7 @@ enable_neutron_provider_networks: "no"
enable_neutron_segments: "no"
enable_neutron_packet_logging: "no"
enable_neutron_sfc: "no"
+enable_neutron_taas: "no"
enable_neutron_trunk: "no"
enable_neutron_metering: "no"
enable_neutron_infoblox_ipam_agent: "no"
@@ -720,19 +941,16 @@ enable_nova_serialconsole_proxy: "no"
enable_nova_ssh: "yes"
enable_octavia: "no"
enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}"
+enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}"
enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
enable_ovs_dpdk: "no"
enable_osprofiler: "no"
enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
enable_prometheus: "no"
-enable_proxysql: "no"
+enable_proxysql: "yes"
enable_redis: "no"
-enable_sahara: "no"
-enable_senlin: "no"
-enable_skydive: "no"
-enable_solum: "no"
-enable_storm: "{{ enable_monasca | bool and monasca_enable_alerting_pipeline | bool }}"
+enable_skyline: "no"
enable_swift: "no"
enable_swift_s3api: "no"
enable_swift_recon: "no"
@@ -741,9 +959,7 @@ enable_telegraf: "no"
enable_trove: "no"
enable_trove_singletenant: "no"
enable_venus: "no"
-enable_vitrage: "no"
enable_watcher: "no"
-enable_zookeeper: "{{ enable_kafka | bool or enable_storm | bool }}"
enable_zun: "no"
ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}"
@@ -752,7 +968,6 @@ ironic_keystone_user: "ironic"
neutron_keystone_user: "neutron"
nova_keystone_user: "nova"
placement_keystone_user: "placement"
-murano_keystone_user: "murano"
cinder_keystone_user: "cinder"
glance_keystone_user: "glance"
@@ -763,31 +978,6 @@ num_nova_fake_per_node: 5
# Clean images options are specified here
enable_destroy_images: "no"
-####################
-# Monasca options
-####################
-monasca_enable_alerting_pipeline: True
-
-# Send logs from the control plane to the Monasca API. Monasca will then persist
-# them in Elasticsearch. If this is disabled, control plane logs will be sent
-# directly to Elasticsearch.
-monasca_ingest_control_plane_logs: True
-
-monasca_api_internal_base_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ monasca_api_port }}"
-monasca_api_public_base_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ monasca_api_port }}"
-
-monasca_log_api_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ monasca_log_api_port }}"
-monasca_log_api_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ monasca_log_api_port }}"
-
-# The OpenStack username used by the Monasca Agent and the Fluentd Monasca
-# plugin to post logs and metrics from the control plane to Monasca.
-monasca_agent_user: "monasca-agent"
-
-# The OpenStack project to which the control plane logs and metrics are
-# tagged with. Only users with the monasca read only user role, or higher
-# can access these from the Monasca APIs.
-monasca_control_plane_project: "monasca_control_plane"
-
####################
# Global Options
####################
@@ -801,20 +991,18 @@ skip_stop_containers: []
# Logging options
####################
-elasticsearch_address: "{{ kolla_internal_fqdn }}"
-enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
-
-# If using Curator an actions file will need to be defined. Please see
-# the documentation.
-enable_elasticsearch_curator: "no"
-
-enable_kibana: "{{ 'yes' if enable_central_logging | bool or enable_monasca | bool else 'no' }}"
-enable_kibana_external: "{{ enable_kibana | bool }}"
+# NOTE: If an external ElasticSearch cluster address is configured, all
+# services with ElasticSearch endpoints should be configured to log
+# to the external cluster by default. This is for backwards compatibility.
+opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}"
+enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}"
+enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
+enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
####################
# Redis options
####################
-redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}admin:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}"
+redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}default:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}"
redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes"
####################
@@ -822,25 +1010,27 @@ redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes"
####################
# valid values: ["elasticsearch", "redis"]
osprofiler_backend: "elasticsearch"
-elasticsearch_connection_string: "elasticsearch://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"
-osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else elasticsearch_connection_string }}"
+opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}"
+osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}"
####################
# RabbitMQ options
####################
rabbitmq_user: "openstack"
rabbitmq_monitoring_user: ""
-outward_rabbitmq_user: "openstack"
# Whether to enable TLS encryption for RabbitMQ client-server communication.
rabbitmq_enable_tls: "no"
# CA certificate bundle in RabbitMQ container.
rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
+rabbitmq_datadir_volume: "rabbitmq"
####################
# HAProxy options
####################
haproxy_user: "openstack"
haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}"
+haproxy_enable_http2: "yes"
+haproxy_http2_protocol: "alpn h2,http/1.1"
kolla_enable_tls_internal: "no"
kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}"
kolla_certificates_dir: "{{ node_config }}/certificates"
@@ -850,6 +1040,8 @@ kolla_admin_openrc_cacert: ""
kolla_copy_ca_into_containers: "no"
haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
haproxy_backend_cacert_dir: "/etc/ssl/certs"
+haproxy_single_external_frontend: false
+haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_external | bool else '80' }}"
##################
# Backend options
@@ -868,13 +1060,8 @@ kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem"
#####################
# ACME client options
#####################
-acme_client_servers: []
-
-####################
-# Kibana options
-####################
-kibana_user: "kibana"
-kibana_log_prefix: "flog"
+acme_client_lego: "server lego {{ api_interface_address }}:{{ letsencrypt_webserver_port }}"
+acme_client_servers: "{% set arr = [] %}{% if enable_letsencrypt | bool %}{{ arr.append(acme_client_lego) }}{% endif %}{{ arr }}"
####################
# Keystone options
@@ -882,14 +1069,16 @@ kibana_log_prefix: "flog"
keystone_internal_fqdn: "{{ kolla_internal_fqdn }}"
keystone_external_fqdn: "{{ kolla_external_fqdn }}"
-# TODO(yoctozepto): Remove after Zed. Kept for compatibility only.
-keystone_admin_url: "{{ keystone_internal_url }}"
-keystone_internal_url: "{{ internal_protocol }}://{{ keystone_internal_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
-keystone_public_url: "{{ public_protocol }}://{{ keystone_external_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
+keystone_internal_url: "{{ keystone_internal_fqdn | kolla_url(internal_protocol, keystone_internal_port) }}"
+keystone_public_url: "{{ keystone_external_fqdn | kolla_url(public_protocol, keystone_public_port) }}"
keystone_admin_user: "admin"
keystone_admin_project: "admin"
+# Whether or not to apply changes to service user passwords when services are
+# reconfigured
+update_keystone_service_user_passwords: true
+
default_project_domain_name: "Default"
default_project_domain_id: "default"
@@ -906,7 +1095,7 @@ fernet_token_allow_expired_window: 172800
# expiry and allow expired window, multiple active keys will be necessary.
fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}"
-keystone_default_user_role: "_member_"
+keystone_default_user_role: "member"
# OpenStack authentication string. You should only need to override these if you
# are changing the admin tenant/project or user.
@@ -914,15 +1103,17 @@ openstack_auth:
auth_url: "{{ keystone_internal_url }}"
username: "{{ keystone_admin_user }}"
password: "{{ keystone_admin_password }}"
- user_domain_name: "{{ default_user_domain_name }}"
- system_scope: "all"
+ project_name: "{{ keystone_admin_project }}"
+ domain_name: "default"
+ user_domain_name: "default"
#######################
# Glance options
#######################
-glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_swift | bool or glance_backend_vmware | bool) }}"
+glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_s3 | bool or glance_backend_swift | bool or glance_backend_vmware | bool) }}"
glance_backend_ceph: "no"
glance_backend_vmware: "no"
+glance_backend_s3: "no"
enable_glance_image_cache: "no"
glance_backend_swift: "{{ enable_swift | bool }}"
glance_file_datadir_volume: "glance"
@@ -933,9 +1124,6 @@ glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bo
# NOTE(mnasiadka): For use in common role
glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
-glance_internal_endpoint: "{{ internal_protocol }}://{{ glance_internal_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}"
-glance_public_endpoint: "{{ public_protocol }}://{{ glance_external_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}"
-
#######################
# Barbican options
#######################
@@ -943,9 +1131,6 @@ glance_public_endpoint: "{{ public_protocol }}://{{ glance_external_fqdn | put_a
barbican_crypto_plugin: "simple_crypto"
barbican_library_path: "/usr/lib/libCryptoki2_64.so"
-barbican_internal_endpoint: "{{ internal_protocol }}://{{ barbican_internal_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}"
-barbican_public_endpoint: "{{ public_protocol }}://{{ barbican_external_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}"
-
#################
# Gnocchi options
#################
@@ -960,6 +1145,8 @@ gnocchi_metric_datadir_volume: "gnocchi"
# Cinder options
#################################
cinder_backend_ceph: "no"
+cinder_backend_huawei: "no"
+cinder_backend_huawei_xml_files: []
cinder_backend_vmwarevc_vmdk: "no"
cinder_backend_vmware_vstorage_object: "no"
cinder_volume_group: "cinder-volumes"
@@ -967,7 +1154,7 @@ cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else '
# Valid options are [ '', redis, etcd ]
cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
-# Valid options are [ nfs, swift, ceph ]
+# Valid options are [ nfs, swift, ceph, s3 ]
cinder_backup_driver: "ceph"
cinder_backup_share: ""
cinder_backup_mount_options_nfs: ""
@@ -994,10 +1181,7 @@ designate_backend_external_bind9_nameservers: ""
# Valid options are [ '', redis ]
designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}"
-designate_internal_endpoint: "{{ internal_protocol }}://{{ designate_internal_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}"
-designate_public_endpoint: "{{ public_protocol }}://{{ designate_external_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}"
-
-designate_enable_notifications_sink: "{{ enable_designate | bool }}"
+designate_enable_notifications_sink: "no"
designate_notifications_topic_name: "notifications_designate"
#######################
@@ -1005,6 +1189,7 @@ designate_notifications_topic_name: "notifications_designate"
#######################
neutron_bgp_router_id: "1.1.1.1"
neutron_bridge_name: "{{ 'br-dvs' if neutron_plugin_agent == 'vmware_dvs' else 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}"
+neutron_physical_networks: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index }}{% if not loop.last %},{% endif %}{% endfor %}"
# Comma-separated type of enabled ml2 type drivers
neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}"
# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers')
@@ -1024,15 +1209,18 @@ neutron_legacy_iptables: "no"
# Enable distributed floating ip for OVN deployments
neutron_ovn_distributed_fip: "no"
-neutron_internal_endpoint: "{{ internal_protocol }}://{{ neutron_internal_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}"
-neutron_public_endpoint: "{{ public_protocol }}://{{ neutron_external_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}"
-
# SRIOV physnet:interface mappings when SRIOV is enabled
# "sriovnet1" and tunnel_interface used here as placeholders
neutron_sriov_physnet_mappings:
sriovnet1: "{{ tunnel_interface }}"
neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+# Set OVN network availability zones
+neutron_ovn_availability_zones: []
+
+# Enable OVN agent
+neutron_enable_ovn_agent: "no"
+
#######################
# Nova options
#######################
@@ -1050,17 +1238,10 @@ nova_console: "novnc"
#######################
nova_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-#######################
-# Murano options
-#######################
-murano_agent_rabbitmq_vhost: "muranoagent"
-murano_agent_rabbitmq_user: "muranoagent"
-
-
#######################
# Horizon options
#######################
-horizon_backend_database: "{{ enable_murano | bool }}"
+horizon_backend_database: false
horizon_keystone_multidomain: False
# Enable deploying custom horizon policy files for services that don't have a
@@ -1077,15 +1258,14 @@ enable_nova_horizon_policy_file: "{{ enable_nova }}"
horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
-horizon_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port }}"
-horizon_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_external | bool else horizon_port }}"
-
###################
# External Ceph options
###################
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
external_ceph_cephx_enabled: "yes"
+ceph_cluster: "ceph"
+
# External Ceph pool names
ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
@@ -1100,14 +1280,6 @@ ceph_gnocchi_user: "gnocchi"
ceph_manila_user: "manila"
ceph_nova_user: "{{ ceph_cinder_user }}"
-# External Ceph keyrings
-ceph_cinder_keyring: "ceph.client.cinder.keyring"
-ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
-ceph_glance_keyring: "ceph.client.glance.keyring"
-ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
-ceph_manila_keyring: "ceph.client.manila.keyring"
-ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
-
#####################
# VMware support
######################
@@ -1143,14 +1315,17 @@ enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bo
enable_prometheus_ceph_mgr_exporter: "no"
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_openstack_exporter_external: "no"
-enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
+enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}"
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"
enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"
enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}"
-enable_prometheus_msteams: "no"
+enable_prometheus_proxysql_exporter: "{{ enable_prometheus | bool and enable_proxysql | bool }}"
prometheus_alertmanager_user: "admin"
+prometheus_ceph_exporter_interval: "{{ prometheus_scrape_interval }}"
+prometheus_grafana_user: "grafana"
+prometheus_skyline_user: "skyline"
prometheus_scrape_interval: "60s"
prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}"
prometheus_openstack_exporter_timeout: "45s"
@@ -1160,12 +1335,6 @@ prometheus_ceph_mgr_exporter_endpoints: []
prometheus_openstack_exporter_endpoint_type: "internal"
prometheus_openstack_exporter_compute_api_version: "latest"
prometheus_libvirt_exporter_interval: "60s"
-prometheus_msteams_webhook_url:
-
-############
-# Vitrage
-############
-enable_vitrage_prometheus_datasource: "{{ enable_prometheus | bool }}"
####################
@@ -1174,39 +1343,31 @@ enable_vitrage_prometheus_datasource: "{{ enable_prometheus | bool }}"
influxdb_address: "{{ kolla_internal_fqdn }}"
influxdb_datadir_volume: "influxdb"
-influxdb_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ influxdb_http_port }}"
-
-#################
-# Kafka options
-#################
-kafka_datadir_volume: "kafka"
-
-# The number of brokers in a Kafka cluster. This is used for automatically
-# setting quantities such as topic replicas and it is not recommended to
-# change it unless you know what you are doing.
-kafka_broker_count: "{{ groups['kafka'] | length }}"
+influxdb_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, influxdb_http_port) }}"
#########################
# Internal Image options
#########################
-distro_python_version_map: {
- "centos": "3.9",
- "debian": "3.9",
- "rocky": "3.9",
- "ubuntu": "3.10"
-}
-
kolla_base_distro_version_default_map: {
"centos": "stream9",
- "debian": "bullseye",
+ "debian": "bookworm",
"rocky": "9",
- "ubuntu": "jammy",
+ "ubuntu": "noble",
}
-distro_python_version: "{{ distro_python_version_map[kolla_base_distro] }}"
+distro_python_version: "3"
kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}"
+#############
+# S3 options
+#############
+# Common options for S3 Cinder Backup and Glance S3 backend.
+s3_url:
+s3_bucket:
+s3_access_key:
+s3_secret_key:
+
##########
# Telegraf
##########
@@ -1214,55 +1375,29 @@ kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_
# telemetry data.
telegraf_enable_docker_input: "no"
-vitrage_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
-vitrage_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
-
-####################
-# Grafana
-####################
-grafana_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
-grafana_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
-
-#############
-# Ironic
-#############
-ironic_internal_endpoint: "{{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
-ironic_public_endpoint: "{{ public_protocol }}://{{ ironic_external_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
-
# Valid options are [ '', redis, etcd ]
ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
-########
-# Swift
-########
-swift_internal_base_endpoint: "{{ internal_protocol }}://{{ swift_internal_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}"
-
-swift_internal_endpoint: "{{ swift_internal_base_endpoint }}/v1/AUTH_%(tenant_id)s"
-swift_public_endpoint: "{{ public_protocol }}://{{ swift_external_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}/v1/AUTH_%(tenant_id)s"
-
##########
# Octavia
##########
# Whether to run Kolla-Ansible's automatic configuration for Octavia.
# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no`
# and keep your other Octavia config like before.
-octavia_auto_configure: yes
+octavia_auto_configure: "{{ 'amphora' in octavia_provider_drivers }}"
# Octavia network type options are [ tenant, provider ]
# * tenant indicates that we will create a tenant network and a network
# interface on the Octavia worker nodes for communication with amphorae.
# * provider indicates that we will create a flat or vlan provider network.
# In this case octavia_network_interface should be set to a network interface
-# on the Octavia woker nodes on the same provider network.
+# on the Octavia worker nodes on the same provider network.
octavia_network_type: "provider"
-octavia_internal_endpoint: "{{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
-octavia_public_endpoint: "{{ public_protocol }}://{{ octavia_external_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
-
###################################
# Identity federation configuration
###################################
-# Here we configure all of the IdPs meta informations that will be required to implement identity federation with OpenStack Keystone.
+# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone.
# We require the administrator to enter the following metadata:
# * name (internal name of the IdP in Keystone);
# * openstack_domain (the domain in Keystone that the IdP belongs to)
diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one
index 73a4ec82b8..6e1bbe67b4 100644
--- a/ansible/inventory/all-in-one
+++ b/ansible/inventory/all-in-one
@@ -43,12 +43,6 @@ monitoring
[etcd:children]
control
-[kafka:children]
-control
-
-[kibana:children]
-control
-
[telegraf:children]
compute
control
@@ -56,9 +50,6 @@ monitoring
network
storage
-[elasticsearch:children]
-control
-
[hacluster:children]
control
@@ -74,22 +65,6 @@ control
[rabbitmq:children]
control
-[outward-rabbitmq:children]
-control
-
-[monasca-agent:children]
-compute
-control
-monitoring
-network
-storage
-
-[monasca:children]
-monitoring
-
-[storm:children]
-monitoring
-
[keystone:children]
control
@@ -113,9 +88,6 @@ control
[cloudkitty:children]
control
-[freezer:children]
-control
-
[memcached:children]
control
@@ -131,9 +103,6 @@ control
[heat:children]
control
-[murano:children]
-control
-
[ironic:children]
control
@@ -146,12 +115,6 @@ monitoring
[magnum:children]
control
-[sahara:children]
-control
-
-[solum:children]
-control
-
[mistral:children]
control
@@ -174,12 +137,6 @@ compute
[tacker:children]
control
-[vitrage:children]
-control
-
-[senlin:children]
-control
-
[trove:children]
control
@@ -198,14 +155,11 @@ control
[bifrost:children]
deployment
-[zookeeper:children]
-control
-
[zun:children]
control
-[skydive:children]
-monitoring
+[skyline:children]
+control
[redis:children]
control
@@ -216,6 +170,9 @@ control
[venus:children]
monitoring
+[letsencrypt:children]
+loadbalancer
+
# Additional control implemented here. These groups allow you to control which
# services run on which hosts at a per-service level.
#
@@ -236,9 +193,12 @@ common
[kolla-toolbox:children]
common
-# Elasticsearch Curator
-[elasticsearch-curator:children]
-elasticsearch
+[opensearch:children]
+control
+
+# Opensearch dashboards
+[opensearch-dashboards:children]
+opensearch
# Glance
[glance-api:children]
@@ -251,6 +211,9 @@ nova
[nova-conductor:children]
nova
+[nova-metadata:children]
+nova
+
[nova-super-conductor:children]
nova
@@ -286,6 +249,9 @@ neutron
compute
network
+[neutron-ovn-agent:children]
+compute
+
[neutron-bgp-dragent:children]
neutron
@@ -318,13 +284,6 @@ cloudkitty
[cloudkitty-processor:children]
cloudkitty
-# Freezer
-[freezer-api:children]
-freezer
-
-[freezer-scheduler:children]
-freezer
-
# iSCSI
[iscsid:children]
compute
@@ -390,48 +349,6 @@ heat
[heat-engine:children]
heat
-# Murano
-[murano-api:children]
-murano
-
-[murano-engine:children]
-murano
-
-# Monasca
-[monasca-agent-collector:children]
-monasca-agent
-
-[monasca-agent-forwarder:children]
-monasca-agent
-
-[monasca-agent-statsd:children]
-monasca-agent
-
-[monasca-api:children]
-monasca
-
-[monasca-log-persister:children]
-monasca
-
-[monasca-log-metrics:children]
-monasca
-
-[monasca-thresh:children]
-monasca
-
-[monasca-notification:children]
-monasca
-
-[monasca-persister:children]
-monasca
-
-# Storm
-[storm-worker:children]
-storm
-
-[storm-nimbus:children]
-storm
-
# Ironic
[ironic-api:children]
ironic
@@ -455,25 +372,6 @@ magnum
[magnum-conductor:children]
magnum
-# Solum
-[solum-api:children]
-solum
-
-[solum-worker:children]
-solum
-
-[solum-deployer:children]
-solum
-
-[solum-conductor:children]
-solum
-
-[solum-application-deployment:children]
-solum
-
-[solum-image-builder:children]
-solum
-
# Mistral
[mistral-api:children]
mistral
@@ -520,13 +418,6 @@ gnocchi
[gnocchi-metricd:children]
gnocchi
-# Sahara
-[sahara-api:children]
-sahara
-
-[sahara-engine:children]
-sahara
-
# Ceilometer
[ceilometer-central:children]
ceilometer
@@ -555,19 +446,6 @@ watcher
[watcher-applier:children]
watcher
-# Senlin
-[senlin-api:children]
-senlin
-
-[senlin-conductor:children]
-senlin
-
-[senlin-engine:children]
-senlin
-
-[senlin-health-manager:children]
-senlin
-
# Octavia
[octavia-api:children]
octavia
@@ -623,13 +501,12 @@ compute
[zun-cni-daemon:children]
compute
-# Skydive
-[skydive-analyzer:children]
-skydive
+# Skyline
+[skyline-apiserver:children]
+skyline
-[skydive-agent:children]
-compute
-network
+[skyline-console:children]
+skyline
# Tacker
[tacker-server:children]
@@ -638,22 +515,6 @@ tacker
[tacker-conductor:children]
tacker
-# Vitrage
-[vitrage-api:children]
-vitrage
-
-[vitrage-notifier:children]
-vitrage
-
-[vitrage-graph:children]
-vitrage
-
-[vitrage-ml:children]
-vitrage
-
-[vitrage-persistor:children]
-vitrage
-
# Blazar
[blazar-api:children]
blazar
@@ -672,9 +533,6 @@ storage
[prometheus-mysqld-exporter:children]
mariadb
-[prometheus-haproxy-exporter:children]
-loadbalancer
-
[prometheus-memcached-exporter:children]
memcached
@@ -692,7 +550,7 @@ monitoring
monitoring
[prometheus-elasticsearch-exporter:children]
-elasticsearch
+opensearch
[prometheus-blackbox-exporter:children]
monitoring
@@ -700,9 +558,6 @@ monitoring
[prometheus-libvirt-exporter:children]
compute
-[prometheus-msteams:children]
-prometheus-alertmanager
-
[masakari-api:children]
control
@@ -742,3 +597,9 @@ venus
[venus-manager:children]
venus
+
+[letsencrypt-webserver:children]
+letsencrypt
+
+[letsencrypt-lego:children]
+letsencrypt
diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode
index 171d357b01..0485a33b2c 100644
--- a/ansible/inventory/multinode
+++ b/ansible/inventory/multinode
@@ -67,12 +67,6 @@ monitoring
[prometheus:children]
monitoring
-[kafka:children]
-control
-
-[kibana:children]
-control
-
[telegraf:children]
compute
control
@@ -80,9 +74,6 @@ monitoring
network
storage
-[elasticsearch:children]
-control
-
[hacluster:children]
control
@@ -98,22 +89,6 @@ control
[rabbitmq:children]
control
-[outward-rabbitmq:children]
-control
-
-[monasca-agent:children]
-compute
-control
-monitoring
-network
-storage
-
-[monasca:children]
-monitoring
-
-[storm:children]
-monitoring
-
[keystone:children]
control
@@ -137,9 +112,6 @@ control
[cloudkitty:children]
control
-[freezer:children]
-control
-
[memcached:children]
control
@@ -155,21 +127,12 @@ control
[heat:children]
control
-[murano:children]
-control
-
-[solum:children]
-control
-
[ironic:children]
control
[magnum:children]
control
-[sahara:children]
-control
-
[mistral:children]
control
@@ -195,12 +158,6 @@ control
[trove:children]
control
-[senlin:children]
-control
-
-[vitrage:children]
-control
-
[watcher:children]
control
@@ -216,14 +173,11 @@ control
[bifrost:children]
deployment
-[zookeeper:children]
-control
-
[zun:children]
control
-[skydive:children]
-monitoring
+[skyline:children]
+control
[redis:children]
control
@@ -234,6 +188,9 @@ control
[venus:children]
monitoring
+[letsencrypt:children]
+loadbalancer
+
# Additional control implemented here. These groups allow you to control which
# services run on which hosts at a per-service level.
#
@@ -254,9 +211,12 @@ common
[kolla-toolbox:children]
common
-# Elasticsearch Curator
-[elasticsearch-curator:children]
-elasticsearch
+[opensearch:children]
+control
+
+# Opensearch dashboards
+[opensearch-dashboards:children]
+opensearch
# Glance
[glance-api:children]
@@ -269,6 +229,9 @@ nova
[nova-conductor:children]
nova
+[nova-metadata:children]
+nova
+
[nova-super-conductor:children]
nova
@@ -316,6 +279,10 @@ neutron
[ironic-neutron-agent:children]
neutron
+[neutron-ovn-agent:children]
+compute
+network
+
# Cinder
[cinder-api:children]
cinder
@@ -336,13 +303,6 @@ cloudkitty
[cloudkitty-processor:children]
cloudkitty
-# Freezer
-[freezer-api:children]
-freezer
-
-[freezer-scheduler:children]
-freezer
-
# iSCSI
[iscsid:children]
compute
@@ -398,48 +358,6 @@ heat
[heat-engine:children]
heat
-# Murano
-[murano-api:children]
-murano
-
-[murano-engine:children]
-murano
-
-# Monasca
-[monasca-agent-collector:children]
-monasca-agent
-
-[monasca-agent-forwarder:children]
-monasca-agent
-
-[monasca-agent-statsd:children]
-monasca-agent
-
-[monasca-api:children]
-monasca
-
-[monasca-log-persister:children]
-monasca
-
-[monasca-log-metrics:children]
-monasca
-
-[monasca-thresh:children]
-monasca
-
-[monasca-notification:children]
-monasca
-
-[monasca-persister:children]
-monasca
-
-# Storm
-[storm-worker:children]
-storm
-
-[storm-nimbus:children]
-storm
-
# Ironic
[ironic-api:children]
ironic
@@ -463,32 +381,6 @@ magnum
[magnum-conductor:children]
magnum
-# Sahara
-[sahara-api:children]
-sahara
-
-[sahara-engine:children]
-sahara
-
-# Solum
-[solum-api:children]
-solum
-
-[solum-worker:children]
-solum
-
-[solum-deployer:children]
-solum
-
-[solum-conductor:children]
-solum
-
-[solum-application-deployment:children]
-solum
-
-[solum-image-builder:children]
-solum
-
# Mistral
[mistral-api:children]
mistral
@@ -573,19 +465,6 @@ watcher
[watcher-applier:children]
watcher
-# Senlin
-[senlin-api:children]
-senlin
-
-[senlin-conductor:children]
-senlin
-
-[senlin-engine:children]
-senlin
-
-[senlin-health-manager:children]
-senlin
-
# Octavia
[octavia-api:children]
octavia
@@ -641,13 +520,12 @@ compute
[zun-cni-daemon:children]
compute
-# Skydive
-[skydive-analyzer:children]
-skydive
+# Skyline
+[skyline-apiserver:children]
+skyline
-[skydive-agent:children]
-compute
-network
+[skyline-console:children]
+skyline
# Tacker
[tacker-server:children]
@@ -656,22 +534,6 @@ tacker
[tacker-conductor:children]
tacker
-# Vitrage
-[vitrage-api:children]
-vitrage
-
-[vitrage-notifier:children]
-vitrage
-
-[vitrage-graph:children]
-vitrage
-
-[vitrage-ml:children]
-vitrage
-
-[vitrage-persistor:children]
-vitrage
-
# Blazar
[blazar-api:children]
blazar
@@ -690,9 +552,6 @@ storage
[prometheus-mysqld-exporter:children]
mariadb
-[prometheus-haproxy-exporter:children]
-loadbalancer
-
[prometheus-memcached-exporter:children]
memcached
@@ -710,7 +569,7 @@ monitoring
monitoring
[prometheus-elasticsearch-exporter:children]
-elasticsearch
+opensearch
[prometheus-blackbox-exporter:children]
monitoring
@@ -718,9 +577,6 @@ monitoring
[prometheus-libvirt-exporter:children]
compute
-[prometheus-msteams:children]
-prometheus-alertmanager
-
[masakari-api:children]
control
@@ -760,3 +616,9 @@ venus
[venus-manager:children]
venus
+
+[letsencrypt-webserver:children]
+letsencrypt
+
+[letsencrypt-lego:children]
+letsencrypt
diff --git a/ansible/kolla-host.yml b/ansible/kolla-host.yml
index b6a196afca..37ab5cc1de 100644
--- a/ansible/kolla-host.yml
+++ b/ansible/kolla-host.yml
@@ -5,6 +5,10 @@
hosts: baremetal
serial: '{{ kolla_serial|default("0") }}'
gather_facts: false
+ max_fail_percentage: >-
+ {{ baremetal_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: openstack.kolla.baremetal,
tags: baremetal }
diff --git a/ansible/library/kolla_container.py b/ansible/library/kolla_container.py
new file mode 100644
index 0000000000..a4eaaaedd1
--- /dev/null
+++ b/ansible/library/kolla_container.py
@@ -0,0 +1,435 @@
+# Copyright 2015 Sam Yaple
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# FIXME(yoctozepto): this module does *not* validate "common_options" which are
+# a hacky way to seed most usages of kolla_container in kolla-ansible ansible
+# playbooks - caution has to be exerted when setting "common_options"
+
+# FIXME(yoctozepto): restart_policy is *not* checked in the container
+
+from ansible.module_utils.basic import AnsibleModule
+import traceback
+
+from ansible.module_utils.kolla_container_worker import ContainerWorker
+
+
+DOCUMENTATION = '''
+---
+module: kolla_container
+short_description: Module for controlling containers
+description:
+ - A module targeting at controlling container engine as used by Kolla.
+options:
+ common_options:
+ description:
+ - A dict containing common params such as login info
+ required: True
+ type: dict
+ default: dict()
+ action:
+ description:
+ - The action the module should take
+ required: True
+ type: str
+ choices:
+ - compare_container
+ - compare_image
+ - create_volume
+ - ensure_image
+ - get_container_env
+ - get_container_state
+ - pull_image
+ - remove_container
+ - remove_image
+ - remove_volume
+ - recreate_or_restart_container
+ - restart_container
+ - start_container
+ - stop_container
+ - stop_container_and_remove_container
+ api_version:
+ description:
+ - The version of the api for docker-py to use when contacting docker
+ required: False
+ type: str
+ default: auto
+ auth_email:
+ description:
+ - The email address used to authenticate
+ required: False
+ type: str
+ auth_password:
+ description:
+ - The password used to authenticate
+ required: False
+ type: str
+ auth_registry:
+ description:
+ - The registry to authenticate
+ required: False
+ type: str
+ auth_username:
+ description:
+ - The username used to authenticate
+ required: False
+ type: str
+ command:
+ description:
+ - The command to execute in the container
+ required: False
+ type: str
+ container_engine:
+ description:
+ - Name of container engine to use
+ required: False
+ type: str
+ default: docker
+ detach:
+ description:
+ - Detach from the container after it is created
+ required: False
+ default: True
+ type: bool
+ name:
+ description:
+ - Name of the container or volume to manage
+ required: False
+ type: str
+ environment:
+ description:
+ - The environment to set for the container
+ required: False
+ type: dict
+ image:
+ description:
+ - Name of the docker image
+ required: False
+ type: str
+ ipc_mode:
+ description:
+ - Set docker ipc namespace
+ required: False
+ type: str
+ default: None
+ choices:
+ - host
+ cap_add:
+ description:
+ - Add capabilities to docker container
+ required: False
+ type: list
+ default: list()
+ security_opt:
+ description:
+ - Set container security profile
+ required: False
+ type: list
+ default: list()
+ labels:
+ description:
+ - List of labels to apply to container
+ required: False
+ type: dict
+ default: dict()
+ pid_mode:
+ description:
+ - Set docker pid namespace
+ required: False
+ type: str
+ default: None
+ choices:
+ - host
+ cgroupns_mode:
+ description:
+ - Set docker cgroups namespace (default depends on Docker config)
+ - Supported only with Docker 20.10 (Docker API 1.41) and later
+ required: False
+ type: str
+ default: None
+ choices:
+ - private
+ - host
+ privileged:
+ description:
+ - Set the container to privileged
+ required: False
+ default: False
+ type: bool
+ remove_on_exit:
+ description:
+ - When not detaching from container, remove on successful exit
+ required: False
+ default: True
+ type: bool
+ restart_policy:
+ description:
+ - When docker restarts the container (does not affect checks)
+ required: False
+ type: str
+ choices:
+ - no
+ - on-failure
+ - oneshot
+ - always
+ - unless-stopped
+ restart_retries:
+ description:
+ - How many times to attempt a restart if 'on-failure' policy is set
+ type: int
+ default: 10
+ tmpfs:
+ description:
+ - List of paths to mount as tmpfs.
+ required: False
+ type: list
+ volumes:
+ description:
+ - Set volumes for docker to use
+ required: False
+ type: list
+ volumes_from:
+ description:
+ - Name or id of container(s) to use volumes from
+ required: True
+ type: list
+ state:
+ description:
+ - Check container status
+ required: False
+ type: str
+ choices:
+ - running
+ - exited
+ - paused
+ tty:
+ description:
+ - Allocate TTY to container
+ required: False
+ default: False
+ type: bool
+ client_timeout:
+ description:
+ - Docker client timeout in seconds
+ required: False
+ default: 120
+ type: int
+ healthcheck:
+ description:
+ - Container healthcheck configuration
+ required: False
+ default: dict()
+ type: dict
+author: Sam Yaple
+'''
+
+EXAMPLES = '''
+- hosts: kolla_container
+ tasks:
+ - name: Start container
+ kolla_container:
+ image: ubuntu
+ name: test_container
+ action: start_container
+ - name: Remove container
+ kolla_container:
+ name: test_container
+ action: remove_container
+ - name: Pull image without starting container
+ kolla_container:
+ action: pull_image
+ image: private-registry.example.com:5000/ubuntu
+ - name: Create named volume
+ kolla_container:
+ action: create_volume
+ name: name_of_volume
+ - name: Remove named volume
+ kolla_container:
+ action: remove_volume
+ name: name_of_volume
+ - name: Remove image
+ kolla_container:
+ action: remove_image
+ image: name_of_image
+'''
+
+
+def generate_module():
+ # NOTE(jeffrey4l): add empty string '' to choices let us use
+ # pid_mode: "{{ service.pid_mode | default ('') }}" in yaml
+ # NOTE(r-krcek): arguments_spec should also be reflected in the list of
+ # arguments in service-check-containers role
+ argument_spec = dict(
+ common_options=dict(required=False, type='dict', default=dict()),
+ action=dict(required=True, type='str',
+ choices=['compare_container',
+ 'compare_image',
+ 'create_volume',
+ 'ensure_image',
+ 'get_container_env',
+ 'get_container_state',
+ 'pull_image',
+ 'recreate_or_restart_container',
+ 'remove_container',
+ 'remove_image',
+ 'remove_volume',
+ 'restart_container',
+ 'start_container',
+ 'stop_container',
+ 'stop_and_remove_container']),
+ api_version=dict(required=False, type='str'),
+ auth_email=dict(required=False, type='str'),
+ auth_password=dict(required=False, type='str', no_log=True),
+ auth_registry=dict(required=False, type='str'),
+ auth_username=dict(required=False, type='str'),
+ command=dict(required=False, type='str'),
+ container_engine=dict(required=False, type='str'),
+ detach=dict(required=False, type='bool', default=True),
+ labels=dict(required=False, type='dict', default=dict()),
+ name=dict(required=False, type='str'),
+ environment=dict(required=False, type='dict'),
+ healthcheck=dict(required=False, type='dict'),
+ image=dict(required=False, type='str'),
+ ipc_mode=dict(required=False, type='str', choices=['',
+ 'host',
+ 'private',
+ 'shareable']),
+ cap_add=dict(required=False, type='list', default=list()),
+ security_opt=dict(required=False, type='list', default=list()),
+ pid_mode=dict(required=False, type='str', choices=['',
+ 'host',
+ 'private']),
+ cgroupns_mode=dict(required=False, type='str',
+ choices=['private', 'host']),
+ privileged=dict(required=False, type='bool', default=False),
+ graceful_timeout=dict(required=False, type='int'),
+ remove_on_exit=dict(required=False, type='bool', default=True),
+ restart_policy=dict(required=False, type='str', choices=[
+ 'no',
+ 'on-failure',
+ 'oneshot',
+ 'always',
+ 'unless-stopped']),
+ restart_retries=dict(required=False, type='int'),
+ state=dict(required=False, type='str', default='running',
+ choices=['running',
+ 'exited',
+ 'paused']),
+ tls_verify=dict(required=False, type='bool', default=False),
+ tls_cert=dict(required=False, type='str'),
+ tls_key=dict(required=False, type='str'),
+ tls_cacert=dict(required=False, type='str'),
+ tmpfs=dict(required=False, type='list'),
+ volumes=dict(required=False, type='list'),
+ volumes_from=dict(required=False, type='list'),
+ dimensions=dict(required=False, type='dict', default=dict()),
+ tty=dict(required=False, type='bool', default=False),
+ client_timeout=dict(required=False, type='int'),
+ ignore_missing=dict(required=False, type='bool', default=False),
+ )
+ required_if = [
+ ['action', 'pull_image', ['image']],
+ ['action', 'start_container', ['image', 'name']],
+ ['action', 'compare_container', ['name']],
+ ['action', 'compare_image', ['name']],
+ ['action', 'create_volume', ['name']],
+ ['action', 'ensure_image', ['image']],
+ ['action', 'get_container_env', ['name']],
+ ['action', 'get_container_state', ['name']],
+ ['action', 'recreate_or_restart_container', ['name']],
+ ['action', 'remove_container', ['name']],
+ ['action', 'remove_image', ['image']],
+ ['action', 'remove_volume', ['name']],
+ ['action', 'restart_container', ['name']],
+ ['action', 'stop_container', ['name']],
+ ['action', 'stop_and_remove_container', ['name']],
+ ]
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ bypass_checks=False
+ )
+
+ common_options_defaults = {
+ 'auth_email': None,
+ 'auth_password': None,
+ 'auth_registry': None,
+ 'auth_username': None,
+ 'environment': None,
+ 'restart_policy': None,
+ 'restart_retries': 10,
+ 'api_version': 'auto',
+ 'graceful_timeout': 10,
+ 'client_timeout': 120,
+ 'container_engine': 'docker',
+ }
+
+ new_args = module.params.pop('common_options', dict()) or dict()
+ env_module_environment = module.params.pop('environment', dict()) or dict()
+
+ for k, v in module.params.items():
+ if v is None:
+ if k in common_options_defaults:
+ if k in new_args:
+ # From ansible groups vars the common options
+ # can be string or int
+ if isinstance(new_args[k], str) and new_args[k].isdigit():
+ new_args[k] = int(new_args[k])
+ continue
+ else:
+ if common_options_defaults[k] is not None:
+ new_args[k] = common_options_defaults[k]
+ else:
+ continue
+ if v is not None:
+ new_args[k] = v
+
+ env_module_common_options = new_args.pop('environment', dict()) or dict()
+ new_args['environment'] = env_module_common_options
+ new_args['environment'].update(env_module_environment)
+
+ # if pid_mode = ""/None/False, remove it
+ if not new_args.get('pid_mode', False):
+ new_args.pop('pid_mode', None)
+ # if ipc_mode = ""/None/False, remove it
+ if not new_args.get('ipc_mode', False):
+ new_args.pop('ipc_mode', None)
+
+ module.params = new_args
+ return module
+
+
+def main():
+ module = generate_module()
+
+ cw: ContainerWorker = None
+ try:
+ if module.params.get('container_engine') == 'docker':
+ from ansible.module_utils.kolla_docker_worker import DockerWorker
+ cw = DockerWorker(module)
+ else:
+ from ansible.module_utils.kolla_podman_worker import PodmanWorker
+ cw = PodmanWorker(module)
+
+ # TODO(inc0): We keep it bool to have ansible deal with consistent
+ # types. If we ever add method that will have to return some
+ # meaningful data, we need to refactor all methods to return dicts.
+ result = bool(getattr(cw, module.params.get('action'))())
+ module.exit_json(changed=cw.changed, result=result, **cw.result)
+ except Exception:
+ module.fail_json(changed=True, msg=repr(traceback.format_exc()),
+ **getattr(cw, 'result', {}))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible/library/kolla_container_facts.py b/ansible/library/kolla_container_facts.py
index 589755104b..7e2254e514 100644
--- a/ansible/library/kolla_container_facts.py
+++ b/ansible/library/kolla_container_facts.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python3
-
# Copyright 2016 99cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-import docker
-
+from abc import ABC
+from abc import abstractmethod
from ansible.module_utils.basic import AnsibleModule
+from traceback import format_exc
+
DOCUMENTATION = '''
---
@@ -27,6 +26,11 @@
- A module targeting at collecting Docker container facts. It is used for
detecting whether the container is running on host in Kolla.
options:
+ container_engine:
+ description:
+ - Name of container engine to use
+ required: True
+ type: str
api_version:
description:
- The version of the api for docker-py to use when contacting docker
@@ -38,6 +42,11 @@
- Name or names of the containers
required: False
type: str or list
+ action:
+ description:
+ - The action to perform
+ required: True
+ type: str
author: Jeffrey Zhang
'''
@@ -46,42 +55,113 @@
tasks:
- name: Gather docker facts
kolla_container_facts:
+ container_engine: docker
+ action: get_containers
- name: Gather glance container facts
kolla_container_facts:
+ container_engine: docker
name:
- glance_api
- glance_registry
+ container_engine: podman
+ action: get_containers
'''
-def get_docker_client():
- return docker.APIClient
+class ContainerFactsWorker(ABC):
+ def __init__(self, module):
+ self.module = module
+ self.results = dict(changed=False, _containers=[])
+ self.params = module.params
+
+ @abstractmethod
+ def get_containers(self):
+ pass
+
+
+class DockerFactsWorker(ContainerFactsWorker):
+ def __init__(self, module):
+ super().__init__(module)
+ try:
+ import docker
+ except ImportError:
+ self.module.fail_json(
+ msg="The docker library could not be imported")
+ self.client = docker.APIClient(version=module.params.get(
+ 'api_version'))
+
+ def get_containers(self):
+ containers = self.client.containers()
+ names = self.params.get('name')
+ if names and not isinstance(names, list):
+ names = [names]
+ for container in containers:
+ for container_name in container['Names']:
+ # remove '/' prefix character
+ container_name = container_name[1:]
+ if names and container_name not in names:
+ continue
+ self.results['_containers'].append(container)
+ self.results[container_name] = container
+
+
+class PodmanFactsWorker(ContainerFactsWorker):
+ def __init__(self, module):
+ try:
+ import podman.errors as podmanError
+ from podman import PodmanClient
+ except ImportError:
+ self.module.fail_json(
+ msg="The podman library could not be imported")
+ self.podmanError = podmanError
+ super().__init__(module)
+ self.client = PodmanClient(
+ base_url="http+unix:/run/podman/podman.sock")
+
+ def get_containers(self):
+ try:
+ containers = self.client.containers.list(
+ all=True, ignore_removed=True)
+ except self.podmanError.APIError as e:
+ self.module.fail_json(failed=True,
+ msg=f"Internal error: {e.explanation}")
+ names = self.params.get('name')
+ if names and not isinstance(names, list):
+ names = [names]
+
+ for container in containers:
+ container.reload()
+ container_name = container.attrs['Name']
+ if container_name not in names:
+ continue
+ self.results['_containers'].append(container.attrs)
+ self.results[container_name] = container.attrs
def main():
argument_spec = dict(
name=dict(required=False, type='list', default=[]),
- api_version=dict(required=False, type='str', default='auto')
+ api_version=dict(required=False, type='str', default='auto'),
+ container_engine=dict(required=True, type='str'),
+ action=dict(required=True, type='str',
+ choices=['get_containers']),
)
module = AnsibleModule(argument_spec=argument_spec)
- results = dict(changed=False, _containers=[])
- client = get_docker_client()(version=module.params.get('api_version'))
- containers = client.containers()
- names = module.params.get('name')
- if names and not isinstance(names, list):
- names = [names]
- for container in containers:
- for container_name in container['Names']:
- # remove '/' prefix character
- container_name = container_name[1:]
- if names and container_name not in names:
- continue
- results['_containers'].append(container)
- results[container_name] = container
- module.exit_json(**results)
+ cw: ContainerFactsWorker = None
+ try:
+ if module.params.get('container_engine') == 'docker':
+ cw = DockerFactsWorker(module)
+ else:
+ cw = PodmanFactsWorker(module)
+
+ result = bool(getattr(cw, module.params.get('action'))())
+ module.exit_json(result=result, **cw.results)
+ except Exception:
+ module.fail_json(changed=True, msg=repr(format_exc()),
+ **getattr(cw, 'result', {}))
if __name__ == "__main__":
diff --git a/ansible/library/kolla_container_volume_facts.py b/ansible/library/kolla_container_volume_facts.py
new file mode 100644
index 0000000000..a5aba0c9e1
--- /dev/null
+++ b/ansible/library/kolla_container_volume_facts.py
@@ -0,0 +1,108 @@
+# Copyright 2023 StackHPC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: kolla_container_volume_facts
+short_description: Module for collecting container volume facts
+description:
+ - A module targeted at collecting container volume facts. It is used
+ for detecting whether the container volume exists on a host.
+options:
+ container_engine:
+ description:
+ - Name of container engine to use
+ required: True
+ type: str
+ api_version:
+ description:
+ - The version of the api for docker-py to use when contacting docker
+ required: False
+ type: str
+ default: auto
+ name:
+ description:
+ - Name or names of the container volumes
+ required: False
+ type: str or list
+author: Jeffrey Zhang / Michal Nasiadka
+'''
+
+EXAMPLES = '''
+- hosts: all
+ tasks:
+ - name: Gather docker facts
+ kolla_container_volume_facts:
+
+ - name: Gather glance container facts
+ kolla_container_volume_facts:
+ container_engine: docker
+ name:
+ - glance_api
+ - glance_registry
+'''
+
+
+def get_docker_client():
+ import docker
+ return docker.APIClient
+
+
+def get_docker_volumes(api_version):
+ client = get_docker_client()(version=api_version)
+ return client.volumes()['Volumes']
+
+
+def get_podman_volumes():
+ from podman import PodmanClient
+
+ client = PodmanClient(base_url="http+unix:/run/podman/podman.sock")
+ volumes = []
+ for volume in client.volumes.list():
+ volumes.append(volume.attrs)
+ return volumes
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False, type='list', default=[]),
+ api_version=dict(required=False, type='str', default='auto'),
+ container_engine=dict(required=True, type='str')
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ results = dict(changed=False, _volumes=[])
+ if module.params.get('container_engine') == 'docker':
+ volumes = get_docker_volumes(module.params.get('api_version'))
+ else:
+ volumes = get_podman_volumes()
+
+ names = module.params.get('name')
+ if names and not isinstance(names, list):
+ names = [names]
+ for volume in volumes:
+ volume_name = volume['Name']
+ if names and volume_name not in names:
+ continue
+ results['_volumes'].append(volume)
+ results[volume_name] = volume
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible/library/kolla_docker.py b/ansible/library/kolla_docker.py
deleted file mode 100644
index 49a5bdf887..0000000000
--- a/ansible/library/kolla_docker.py
+++ /dev/null
@@ -1,389 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright 2015 Sam Yaple
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# FIXME(yoctozepto): this module does *not* validate "common_options" which are
-# a hacky way to seed most usages of kolla_docker in kolla-ansible ansible
-# playbooks - caution has to be exerted when setting "common_options"
-
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.kolla_docker_worker import DockerWorker
-
-DOCUMENTATION = '''
----
-module: kolla_docker
-short_description: Module for controlling Docker
-description:
- - A module targeting at controlling Docker as used by Kolla.
-options:
- common_options:
- description:
- - A dict containing common params such as login info
- required: False
- type: dict
- default: dict()
- action:
- description:
- - The action the module should take
- required: True
- type: str
- choices:
- - compare_container
- - compare_image
- - create_volume
- - ensure_image
- - get_container_env
- - get_container_state
- - pull_image
- - remove_container
- - remove_image
- - remove_volume
- - recreate_or_restart_container
- - restart_container
- - start_container
- - stop_container
- - stop_container_and_remove_container
- api_version:
- description:
- - The version of the api for docker-py to use when contacting docker
- required: False
- type: str
- default: auto
- auth_email:
- description:
- - The email address used to authenticate
- required: False
- type: str
- auth_password:
- description:
- - The password used to authenticate
- required: False
- type: str
- auth_registry:
- description:
- - The registry to authenticate
- required: False
- type: str
- auth_username:
- description:
- - The username used to authenticate
- required: False
- type: str
- command:
- description:
- - The command to execute in the container
- required: False
- type: str
- detach:
- description:
- - Detach from the container after it is created
- required: False
- default: True
- type: bool
- name:
- description:
- - Name of the container or volume to manage
- required: False
- type: str
- environment:
- description:
- - The environment to set for the container
- required: False
- type: dict
- image:
- description:
- - Name of the docker image
- required: False
- type: str
- ipc_mode:
- description:
- - Set docker ipc namespace
- required: False
- type: str
- default: None
- choices:
- - host
- cap_add:
- description:
- - Add capabilities to docker container
- required: False
- type: list
- default: list()
- security_opt:
- description:
- - Set container security profile
- required: False
- type: list
- default: list()
- labels:
- description:
- - List of labels to apply to container
- required: False
- type: dict
- default: dict()
- pid_mode:
- description:
- - Set docker pid namespace
- required: False
- type: str
- default: None
- choices:
- - host
- cgroupns_mode:
- description:
- - Set docker cgroups namespace (default depends on Docker config)
- - Supported only with Docker 20.10 (Docker API 1.41) and later
- required: False
- type: str
- default: None
- choices:
- - private
- - host
- privileged:
- description:
- - Set the container to privileged
- required: False
- default: False
- type: bool
- remove_on_exit:
- description:
- - When not detaching from container, remove on successful exit
- required: False
- default: True
- type: bool
- restart_policy:
- description:
- - When docker restarts the container (does not affect checks)
- required: False
- type: str
- choices:
- - no
- - on-failure
- - always
- - unless-stopped
- restart_retries:
- description:
- - How many times to attempt a restart if 'on-failure' policy is set
- type: int
- default: 10
- tmpfs:
- description:
- - List of paths to mount as tmpfs.
- required: False
- type: list
- volumes:
- description:
- - Set volumes for docker to use
- required: False
- type: list
- volumes_from:
- description:
- - Name or id of container(s) to use volumes from
- required: True
- type: list
- state:
- description:
- - Check container status
- required: False
- type: str
- choices:
- - running
- - exited
- - paused
- tty:
- description:
- - Allocate TTY to container
- required: False
- default: False
- type: bool
- client_timeout:
- description:
- - Docker client timeout in seconds
- required: False
- default: 120
- type: int
- healthcheck:
- description:
- - Container healthcheck configuration
- required: False
- default: dict()
- type: dict
-author: Sam Yaple
-'''
-
-EXAMPLES = '''
-- hosts: kolla_docker
- tasks:
- - name: Start container
- kolla_docker:
- image: ubuntu
- name: test_container
- action: start_container
- - name: Remove container
- kolla_docker:
- name: test_container
- action: remove_container
- - name: Pull image without starting container
- kolla_docker:
- action: pull_image
- image: private-registry.example.com:5000/ubuntu
- - name: Create named volume
- kolla_docker:
- action: create_volume
- name: name_of_volume
- - name: Remove named volume
- kolla_docker:
- action: remove_volume
- name: name_of_volume
- - name: Remove image
- kolla_docker:
- action: remove_image
- image: name_of_image
-'''
-
-
-def generate_module():
- # NOTE(jeffrey4l): add empty string '' to choices let us use
- # pid_mode: "{{ service.pid_mode | default ('') }}" in yaml
- argument_spec = dict(
- common_options=dict(required=False, type='dict', default=dict()),
- action=dict(required=True, type='str',
- choices=['compare_container',
- 'compare_image',
- 'create_volume',
- 'ensure_image',
- 'get_container_env',
- 'get_container_state',
- 'pull_image',
- 'recreate_or_restart_container',
- 'remove_container',
- 'remove_image',
- 'remove_volume',
- 'restart_container',
- 'start_container',
- 'stop_container',
- 'stop_and_remove_container']),
- api_version=dict(required=False, type='str', default='auto'),
- auth_email=dict(required=False, type='str'),
- auth_password=dict(required=False, type='str', no_log=True),
- auth_registry=dict(required=False, type='str'),
- auth_username=dict(required=False, type='str'),
- command=dict(required=False, type='str'),
- detach=dict(required=False, type='bool', default=True),
- labels=dict(required=False, type='dict', default=dict()),
- name=dict(required=False, type='str'),
- environment=dict(required=False, type='dict'),
- healthcheck=dict(required=False, type='dict'),
- image=dict(required=False, type='str'),
- ipc_mode=dict(required=False, type='str', choices=['',
- 'host',
- 'private',
- 'shareable']),
- cap_add=dict(required=False, type='list', default=list()),
- security_opt=dict(required=False, type='list', default=list()),
- pid_mode=dict(required=False, type='str', choices=['host', '']),
- cgroupns_mode=dict(required=False, type='str',
- choices=['private', 'host']),
- privileged=dict(required=False, type='bool', default=False),
- graceful_timeout=dict(required=False, type='int', default=10),
- remove_on_exit=dict(required=False, type='bool', default=True),
- restart_policy=dict(required=False, type='str', choices=[
- 'no',
- 'on-failure',
- 'always',
- 'unless-stopped']),
- restart_retries=dict(required=False, type='int', default=10),
- state=dict(required=False, type='str', default='running',
- choices=['running',
- 'exited',
- 'paused']),
- tls_verify=dict(required=False, type='bool', default=False),
- tls_cert=dict(required=False, type='str'),
- tls_key=dict(required=False, type='str'),
- tls_cacert=dict(required=False, type='str'),
- tmpfs=dict(required=False, type='list'),
- volumes=dict(required=False, type='list'),
- volumes_from=dict(required=False, type='list'),
- dimensions=dict(required=False, type='dict', default=dict()),
- tty=dict(required=False, type='bool', default=False),
- client_timeout=dict(required=False, type='int', default=120),
- ignore_missing=dict(required=False, type='bool', default=False),
- )
- required_if = [
- ['action', 'pull_image', ['image']],
- ['action', 'start_container', ['image', 'name']],
- ['action', 'compare_container', ['name']],
- ['action', 'compare_image', ['name']],
- ['action', 'create_volume', ['name']],
- ['action', 'ensure_image', ['image']],
- ['action', 'get_container_env', ['name']],
- ['action', 'get_container_state', ['name']],
- ['action', 'recreate_or_restart_container', ['name']],
- ['action', 'remove_container', ['name']],
- ['action', 'remove_image', ['image']],
- ['action', 'remove_volume', ['name']],
- ['action', 'restart_container', ['name']],
- ['action', 'stop_container', ['name']],
- ['action', 'stop_and_remove_container', ['name']],
- ]
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_if=required_if,
- bypass_checks=False
- )
-
- new_args = module.params.pop('common_options', dict())
-
- # NOTE(jeffrey4l): merge the environment
- env = module.params.pop('environment', dict())
- if env:
- new_args['environment'].update(env)
-
- for key, value in module.params.items():
- if key in new_args and value is None:
- continue
- new_args[key] = value
-
- # if pid_mode = ""/None/False, remove it
- if not new_args.get('pid_mode', False):
- new_args.pop('pid_mode', None)
- # if ipc_mode = ""/None/False, remove it
- if not new_args.get('ipc_mode', False):
- new_args.pop('ipc_mode', None)
-
- module.params = new_args
- return module
-
-
-def main():
- module = generate_module()
-
- dw = None
- try:
- dw = DockerWorker(module)
- # TODO(inc0): We keep it bool to have ansible deal with consistent
- # types. If we ever add method that will have to return some
- # meaningful data, we need to refactor all methods to return dicts.
- result = bool(getattr(dw, module.params.get('action'))())
- module.exit_json(changed=dw.changed, result=result, **dw.result)
- except Exception:
- module.fail_json(changed=True, msg=repr(traceback.format_exc()),
- **getattr(dw, 'result', {}))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible/library/kolla_toolbox.py b/ansible/library/kolla_toolbox.py
index 7ad3aa99f1..d0493a24f4 100644
--- a/ansible/library/kolla_toolbox.py
+++ b/ansible/library/kolla_toolbox.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python3
-
# Copyright 2016 99cloud Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,15 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from distutils.version import StrictVersion
-import docker
import json
import re
-from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.basic import AnsibleModule
from ast import literal_eval
+from shlex import split
DOCUMENTATION = '''
---
@@ -33,6 +29,11 @@
- A module targerting at invoking ansible module in kolla_toolbox
container as used by Kolla project.
options:
+ container_engine:
+ description:
+ - Name of container engine to use
+ required: True
+ type: str
module_name:
description:
- The module name to invoke
@@ -73,10 +74,12 @@
tasks:
- name: Ensure the direct absent
kolla_toolbox:
+ container_engine: docker
module_name: file
module_args: path=/tmp/a state=absent
- name: Create mysql database
kolla_toolbox:
+ container_engine: docker
module_name: mysql_db
module_args:
login_host: 192.168.1.10
@@ -85,9 +88,10 @@
name: testdb
- name: Creating default user role
kolla_toolbox:
- module_name: os_keystone_role
+ container_engine: docker
+ module_name: openstack.cloud.identity_role
module_args:
- name: _member_
+ name: member
auth: "{{ '{{ openstack_keystone_auth }}' }}"
module_extra_vars:
openstack_keystone_auth:
@@ -111,13 +115,19 @@ def gen_commandline(params):
if params.get('module_name'):
command.extend(['-m', params.get('module_name')])
if params.get('module_args'):
- if StrictVersion(ansible_version) < StrictVersion('2.11.0'):
- module_args = params.get('module_args')
- else:
+ try:
module_args = literal_eval(params.get('module_args'))
+ except SyntaxError:
+ if not isinstance(params.get('module_args'), str):
+ raise
+
+ # account for string arguments
+ module_args = split(params.get('module_args'))
if isinstance(module_args, dict):
module_args = ' '.join("{}='{}'".format(key, value)
for key, value in module_args.items())
+ if isinstance(module_args, list):
+ module_args = ' '.join(module_args)
command.extend(['-a', module_args])
if params.get('module_extra_vars'):
extra_vars = params.get('module_extra_vars')
@@ -128,24 +138,11 @@ def gen_commandline(params):
def get_docker_client():
+ import docker
return docker.APIClient
-def docker_supports_environment_in_exec(client):
- docker_version = StrictVersion(client.api_version)
- return docker_version >= StrictVersion('1.25')
-
-
-def main():
- specs = dict(
- module_name=dict(required=True, type='str'),
- module_args=dict(type='str'),
- module_extra_vars=dict(type='json'),
- api_version=dict(required=False, type='str', default='auto'),
- timeout=dict(required=False, type='int', default=180),
- user=dict(required=False, type='str'),
- )
- module = AnsibleModule(argument_spec=specs, bypass_checks=True)
+def use_docker(module):
client = get_docker_client()(
version=module.params.get('api_version'),
timeout=module.params.get('timeout'))
@@ -160,80 +157,122 @@ def main():
if 'user' in module.params:
kwargs['user'] = module.params['user']
- # NOTE(mgoddard): Docker 1.12 has API version 1.24, and was installed by
- # kolla-ansible bootstrap-servers on Rocky and earlier releases. This API
- # version does not have support for specifying environment variables for
- # exec jobs, which is necessary to use the Ansible JSON output formatter.
- # While we continue to support this version of Docker, fall back to the old
- # regex-based method for API version 1.24 and earlier.
- # TODO(mgoddard): Remove this conditional (keep the if) when we require
- # Docker API version 1.25+.
- if docker_supports_environment_in_exec(client):
- # Use the JSON output formatter, so that we can parse it.
+ # Use the JSON output formatter, so that we can parse it.
+ environment = {"ANSIBLE_STDOUT_CALLBACK": "json",
+ "ANSIBLE_LOAD_CALLBACK_PLUGINS": "True"}
+ job = client.exec_create(kolla_toolbox, command_line,
+ environment=environment, **kwargs)
+ json_output, error = client.exec_start(job, demux=True)
+ if error:
+ module.log(msg='Inner module stderr: %s' % error)
+
+ try:
+ output = json.loads(json_output)
+ except Exception:
+ module.fail_json(
+ msg='Can not parse the inner module output: %s' % json_output)
+
+ # Expected format is the following:
+ # {
+ # "plays": [
+ # {
+ # "tasks": [
+ # {
+ # "hosts": {
+ # "localhost": {
+ #
+ # }
+ # }
+ # }
+ # ]
+ # {
+ # ]
+ # }
+ try:
+ ret = output['plays'][0]['tasks'][0]['hosts']['localhost']
+ except (KeyError, IndexError):
+ module.fail_json(
+ msg='Ansible JSON output has unexpected format: %s' % output)
+
+ # Remove Ansible's internal variables from returned fields.
+ ret.pop('_ansible_no_log', None)
+ return ret
+
+
+def get_kolla_toolbox():
+ from podman import PodmanClient
+
+ with PodmanClient(base_url="http+unix:/run/podman/podman.sock") as client:
+ for cont in client.containers.list(all=True):
+ cont.reload()
+ if cont.name == 'kolla_toolbox' and cont.status == 'running':
+ return cont
+
+
+def use_podman(module):
+ from podman.errors.exceptions import APIError
+
+ try:
+ kolla_toolbox = get_kolla_toolbox()
+ if not kolla_toolbox:
+ module.fail_json(msg='kolla_toolbox container is not running.')
+
+ kwargs = {}
+ if 'user' in module.params:
+ kwargs['user'] = module.params['user']
environment = {"ANSIBLE_STDOUT_CALLBACK": "json",
"ANSIBLE_LOAD_CALLBACK_PLUGINS": "True"}
- job = client.exec_create(kolla_toolbox, command_line,
- environment=environment, **kwargs)
- json_output = client.exec_start(job)
+ command_line = gen_commandline(module.params)
- try:
- output = json.loads(json_output)
- except Exception:
- module.fail_json(
- msg='Can not parse the inner module output: %s' % json_output)
-
- # Expected format is the following:
- # {
- # "plays": [
- # {
- # "tasks": [
- # {
- # "hosts": {
- # "localhost": {
- #
- # }
- # }
- # }
- # ]
- # {
- # ]
- # }
- try:
- ret = output['plays'][0]['tasks'][0]['hosts']['localhost']
- except (KeyError, IndexError):
- module.fail_json(
- msg='Ansible JSON output has unexpected format: %s' % output)
+ _, raw_output = kolla_toolbox.exec_run(
+ command_line,
+ environment=environment,
+ tty=True,
+ **kwargs
+ )
+ except APIError as e:
+ module.fail_json(msg=f'Encountered Podman API error: {e.explanation}')
+
+ try:
+ json_output = raw_output.decode('utf-8')
+ output = json.loads(json_output)
+ except Exception:
+ module.fail_json(
+ msg='Can not parse the inner module output: %s' % json_output)
+
+ try:
+ ret = output['plays'][0]['tasks'][0]['hosts']['localhost']
+ except (KeyError, IndexError):
+ module.fail_json(
+ msg='Ansible JSON output has unexpected format: %s' % output)
+
+ # Remove Ansible's internal variables from returned fields.
+ ret.pop('_ansible_no_log', None)
+
+ return ret
+
+
+def main():
+ specs = dict(
+ container_engine=dict(required=True, type='str'),
+ module_name=dict(required=True, type='str'),
+ module_args=dict(type='str'),
+ module_extra_vars=dict(type='json'),
+ api_version=dict(required=False, type='str', default='auto'),
+ timeout=dict(required=False, type='int', default=180),
+ user=dict(required=False, type='str'),
+ )
+ module = AnsibleModule(argument_spec=specs, bypass_checks=True)
- # Remove Ansible's internal variables from returned fields.
- ret.pop('_ansible_no_log', None)
+ container_engine = module.params.get('container_engine').lower()
+ if container_engine == 'docker':
+ result = use_docker(module)
+ elif container_engine == 'podman':
+ result = use_podman(module)
else:
- job = client.exec_create(kolla_toolbox, command_line, **kwargs)
- output = client.exec_start(job)
-
- for exp in [JSON_REG, NON_JSON_REG]:
- m = exp.match(output)
- if m:
- inner_output = m.groupdict().get('stdout')
- status = m.groupdict().get('status')
- break
- else:
- module.fail_json(
- msg='Can not parse the inner module output: %s' % output)
-
- ret = dict()
- try:
- ret = json.loads(inner_output)
- except ValueError:
- # Some modules (e.g. command) do not produce a JSON output.
- # Instead, check the status, and assume changed on success.
- ret['stdout'] = inner_output
- if status != "SUCCESS":
- ret['failed'] = True
- else:
- # No way to know whether changed - assume yes.
- ret['changed'] = True
-
- module.exit_json(**ret)
+ module.fail_json(msg='Missing or invalid container engine.')
+
+ module.exit_json(**result)
if __name__ == "__main__":
diff --git a/ansible/mariadb.yml b/ansible/mariadb.yml
new file mode 100644
index 0000000000..ab1c90b0f7
--- /dev/null
+++ b/ansible/mariadb.yml
@@ -0,0 +1,92 @@
+---
+# For MariaDB we need to be careful about restarting services, to avoid losing quorum.
+- name: Apply role mariadb
+ gather_facts: false
+ hosts:
+ - mariadb
+ - '&enable_mariadb_True'
+ max_fail_percentage: >-
+ {{ mariadb_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - mariadb
+ tasks:
+ - import_role:
+ name: mariadb
+
+- name: Restart mariadb services
+ gather_facts: false
+ hosts:
+ - mariadb_restart
+ - '&enable_mariadb_True'
+ # Restart in batches
+ serial: "33%"
+ max_fail_percentage: >-
+ {{ mariadb_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - mariadb
+ tasks:
+ - import_role:
+ name: mariadb
+ tasks_from: restart_services.yml
+
+- name: Start mariadb services
+ gather_facts: false
+ hosts:
+ - mariadb_start
+ - '&enable_mariadb_True'
+ # Start in batches
+ serial: "33%"
+ max_fail_percentage: >-
+ {{ mariadb_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - mariadb
+ tasks:
+ - import_role:
+ name: mariadb
+ tasks_from: restart_services.yml
+
+- name: Restart bootstrap mariadb service
+ gather_facts: false
+ hosts:
+ - mariadb_bootstrap_restart
+ - '&enable_mariadb_True'
+ max_fail_percentage: >-
+ {{ mariadb_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - mariadb
+ tasks:
+ - import_role:
+ name: mariadb
+ tasks_from: restart_services.yml
+
+- name: Apply mariadb post-configuration
+ gather_facts: false
+ hosts:
+ - mariadb
+ - '&enable_mariadb_True'
+ max_fail_percentage: >-
+ {{ mariadb_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - mariadb
+ tasks:
+ - name: Include mariadb post-deploy.yml
+ include_role:
+ name: mariadb
+ tasks_from: post-deploy.yml
+ when: kolla_action in ['deploy', 'reconfigure', 'upgrade']
+
+ - name: Include mariadb post-upgrade.yml
+ include_role:
+ name: mariadb
+ tasks_from: post-upgrade.yml
+ when: kolla_action == 'upgrade'
diff --git a/ansible/mariadb_backup.yml b/ansible/mariadb_backup.yml
index e143819be2..dbe6070c2f 100644
--- a/ansible/mariadb_backup.yml
+++ b/ansible/mariadb_backup.yml
@@ -1,6 +1,10 @@
---
- name: Backup MariaDB
hosts: mariadb
+ max_fail_percentage: >-
+ {{ mariadb_backup_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: mariadb,
tags: mariadb,
diff --git a/ansible/mariadb_recovery.yml b/ansible/mariadb_recovery.yml
index 68b489cebb..a3d72d40dd 100644
--- a/ansible/mariadb_recovery.yml
+++ b/ansible/mariadb_recovery.yml
@@ -1,6 +1,10 @@
---
- name: Recover mariadb
hosts: mariadb
+ max_fail_percentage: >-
+ {{ mariadb_recovery_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: mariadb,
tags: mariadb,
diff --git a/ansible/module_utils/kolla_container_worker.py b/ansible/module_utils/kolla_container_worker.py
new file mode 100644
index 0000000000..bc0f3b4715
--- /dev/null
+++ b/ansible/module_utils/kolla_container_worker.py
@@ -0,0 +1,578 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC
+from abc import abstractmethod
+import logging
+import shlex
+
+from ansible.module_utils.kolla_systemd_worker import SystemdWorker
+
+COMPARE_CONFIG_CMD = ['/usr/local/bin/kolla_set_configs', '--check']
+LOG = logging.getLogger(__name__)
+
+
+class ContainerWorker(ABC):
+ def __init__(self, module):
+ self.module = module
+ self.params = self.module.params
+ self.changed = False
+ # Use this to store arguments to pass to exit_json().
+ self.result = {}
+
+ self.systemd = SystemdWorker(self.params)
+
+ # NOTE(mgoddard): The names used by Docker are inconsistent between
+ # configuration of a container's resources and the resources in
+ # container_info['HostConfig']. This provides a mapping between the
+ # two.
+ self.dimension_map = {
+ 'mem_limit': 'Memory', 'mem_reservation': 'MemoryReservation',
+ 'memswap_limit': 'MemorySwap', 'cpu_period': 'CpuPeriod',
+ 'cpu_quota': 'CpuQuota', 'cpu_shares': 'CpuShares',
+ 'cpuset_cpus': 'CpusetCpus', 'cpuset_mems': 'CpusetMems',
+ 'kernel_memory': 'KernelMemory', 'blkio_weight': 'BlkioWeight',
+ 'ulimits': 'Ulimits'}
+
+ @abstractmethod
+ def check_image(self):
+ pass
+
+ @abstractmethod
+ def get_container_info(self):
+ pass
+
+ @abstractmethod
+ def check_container(self):
+ pass
+
+ def compare_container(self):
+ container = self.check_container()
+ if (not container or
+ self.check_container_differs() or
+ self.compare_config() or
+ self.systemd.check_unit_change()):
+ self.changed = True
+ return self.changed
+
+ def check_container_differs(self):
+ container_info = self.get_container_info()
+ if not container_info:
+ return True
+
+ return (
+ self.compare_cap_add(container_info) or
+ self.compare_security_opt(container_info) or
+ self.compare_image(container_info) or
+ self.compare_ipc_mode(container_info) or
+ self.compare_labels(container_info) or
+ self.compare_privileged(container_info) or
+ self.compare_pid_mode(container_info) or
+ self.compare_cgroupns_mode(container_info) or
+ self.compare_tmpfs(container_info) or
+ self.compare_volumes(container_info) or
+ self.compare_volumes_from(container_info) or
+ self.compare_environment(container_info) or
+ self.compare_container_state(container_info) or
+ self.compare_dimensions(container_info) or
+ self.compare_command(container_info) or
+ self.compare_healthcheck(container_info)
+ )
+
+ def compare_ipc_mode(self, container_info):
+ new_ipc_mode = self.params.get('ipc_mode')
+ current_ipc_mode = container_info['HostConfig'].get('IpcMode')
+ if not current_ipc_mode:
+ current_ipc_mode = None
+
+ # only check IPC mode if it is specified
+ if new_ipc_mode is not None and new_ipc_mode != current_ipc_mode:
+ return True
+ return False
+
+ def compare_cap_add(self, container_info):
+ new_cap_add = self.params.get('cap_add', list())
+ try:
+ current_cap_add = container_info['HostConfig'].get('CapAdd', None)
+ except KeyError:
+ current_cap_add = None
+ except TypeError:
+ current_cap_add = None
+
+ if not current_cap_add:
+ current_cap_add = list()
+ if set(new_cap_add).symmetric_difference(set(current_cap_add)):
+ return True
+
+ def compare_security_opt(self, container_info):
+ ipc_mode = self.params.get('ipc_mode')
+ pid_mode = self.params.get('pid_mode')
+ privileged = self.params.get('privileged', False)
+ # NOTE(jeffrey4l) security opt is disabled when using host ipc mode or
+ # host pid mode or privileged. So no need to compare security opts
+ if ipc_mode == 'host' or pid_mode == 'host' or privileged:
+ return False
+ new_sec_opt = self.params.get('security_opt', list())
+ try:
+ current_sec_opt = container_info['HostConfig'].get('SecurityOpt',
+ list())
+ except KeyError:
+ current_sec_opt = None
+ except TypeError:
+ current_sec_opt = None
+
+ if not current_sec_opt:
+ current_sec_opt = list()
+ if set(new_sec_opt).symmetric_difference(set(current_sec_opt)):
+ return True
+
+ @abstractmethod
+ def compare_pid_mode(self, container_info):
+ pass
+
+ def compare_cgroupns_mode(self, container_info):
+ new_cgroupns_mode = self.params.get('cgroupns_mode')
+ if new_cgroupns_mode is None:
+ # means we don't care what it is
+ return False
+ current_cgroupns_mode = (container_info['HostConfig']
+ .get('CgroupnsMode')) or \
+ (container_info['HostConfig']
+ .get('CgroupMode'))
+ if current_cgroupns_mode in ('', None):
+ # means the container was created on Docker pre-20.10
+ # it behaves like 'host'
+ current_cgroupns_mode = 'host'
+ return new_cgroupns_mode != current_cgroupns_mode
+
+ def compare_privileged(self, container_info):
+ new_privileged = self.params.get('privileged')
+ current_privileged = container_info['HostConfig']['Privileged']
+ if new_privileged != current_privileged:
+ return True
+
+ @abstractmethod
+ def compare_image(self, container_info=None):
+ pass
+
+ def compare_labels(self, container_info):
+ new_labels = self.params.get('labels')
+ current_labels = container_info['Config'].get('Labels', dict())
+ image_labels = self.check_image().get('Labels', dict())
+ for k, v in image_labels.items():
+ if k in new_labels:
+ if v != new_labels[k]:
+ return True
+ else:
+ del current_labels[k]
+
+ if new_labels != current_labels:
+ return True
+
+ def compare_tmpfs(self, container_info):
+ new_tmpfs = self.generate_tmpfs()
+ current_tmpfs = container_info['HostConfig'].get('Tmpfs')
+ if not new_tmpfs:
+ new_tmpfs = []
+ if not current_tmpfs:
+ current_tmpfs = []
+
+ if set(current_tmpfs).symmetric_difference(set(new_tmpfs)):
+ return True
+
+ def compare_volumes_from(self, container_info):
+ new_vols_from = self.params.get('volumes_from')
+ current_vols_from = container_info['HostConfig'].get('VolumesFrom')
+ if not new_vols_from:
+ new_vols_from = list()
+ if not current_vols_from:
+ current_vols_from = list()
+
+ if set(current_vols_from).symmetric_difference(set(new_vols_from)):
+ return True
+
+ @abstractmethod
+ def compare_volumes(self, container_info):
+ pass
+
+ def dimensions_differ(self, a, b, key):
+ """Compares two docker dimensions
+
+ As there are two representations of dimensions in docker, we should
+ normalize them to compare if they are the same.
+
+ If the dimension is no more supported due docker update,
+ an error is thrown to operator to fix the dimensions' config.
+
+ The available representations can be found at:
+
+ https://docs.docker.com/config/containers/resource_constraints/
+
+
+ :param a: Integer or String that represents a number followed or not
+ by "b", "k", "m" or "g".
+ :param b: Integer or String that represents a number followed or not
+ by "b", "k", "m" or "g".
+ :return: True if 'a' has the same logical value as 'b' or else
+ False.
+ """
+
+ if a is None or b is None:
+ msg = ("The dimension [%s] is no more supported by Docker, "
+ "please remove it from yours configs or change "
+ "to the new one.") % key
+ LOG.error(msg)
+ self.module.fail_json(
+ failed=True,
+ msg=msg
+ )
+ return
+
+ unit_sizes = {
+ 'b': 1,
+ 'k': 1024
+ }
+ unit_sizes['m'] = unit_sizes['k'] * 1024
+ unit_sizes['g'] = unit_sizes['m'] * 1024
+ a = str(a)
+ b = str(b)
+ a_last_char = a[-1].lower()
+ b_last_char = b[-1].lower()
+ error_msg = ("The docker dimension unit [%s] is not supported for "
+ "the dimension [%s]. The currently supported units "
+ "are ['b', 'k', 'm', 'g'].")
+ if not a_last_char.isnumeric():
+ if a_last_char in unit_sizes:
+ a = str(int(a[:-1]) * unit_sizes[a_last_char])
+ else:
+ LOG.error(error_msg, a_last_char, a)
+ self.module.fail_json(
+ failed=True,
+ msg=error_msg % (a_last_char, a)
+ )
+
+ if not b_last_char.isnumeric():
+ if b_last_char in unit_sizes:
+ b = str(int(b[:-1]) * unit_sizes[b_last_char])
+ else:
+ LOG.error(error_msg, b_last_char, b)
+ self.module.fail_json(
+ failed=True,
+ msg=error_msg % (b_last_char, b)
+ )
+ return a != b
+
+ def compare_dimensions(self, container_info):
+ new_dimensions = self.params.get('dimensions')
+
+ if not self._dimensions_kernel_memory_removed:
+ self.dimension_map['kernel_memory'] = 'KernelMemory'
+
+ unsupported = set(new_dimensions.keys()) - \
+ set(self.dimension_map.keys())
+ if unsupported:
+ self.module.exit_json(
+ failed=True, msg=repr("Unsupported dimensions"),
+ unsupported_dimensions=unsupported)
+ current_dimensions = container_info['HostConfig']
+ for key1, key2 in self.dimension_map.items():
+ # NOTE(mgoddard): If a resource has been explicitly requested,
+ # check for a match. Otherwise, ensure it is set to the default.
+ if key1 in new_dimensions:
+ if key1 == 'ulimits':
+ if self.compare_ulimits(new_dimensions.get(key1),
+ current_dimensions.get(key2)):
+ return True
+ elif self.dimensions_differ(new_dimensions.get(key1),
+ current_dimensions.get(key2),
+ key1):
+ return True
+ elif current_dimensions.get(key2):
+ # The default values of all currently supported resources are
+ # '' or 0 - both falsy.
+ return True
+
+ def compare_environment(self, container_info):
+ if self.params.get('environment'):
+ current_env = dict()
+ for kv in container_info['Config'].get('Env', list()):
+ k, v = kv.split('=', 1)
+ current_env.update({k: v})
+
+ for k, v in self.params.get('environment').items():
+ if k not in current_env:
+ return True
+ if current_env[k] != v:
+ return True
+
+ def compare_container_state(self, container_info):
+ new_state = self.params.get('state')
+ current_state = container_info['State'].get('Status')
+
+ if new_state == "started" and current_state == "running":
+ return False
+ if new_state != current_state:
+ return True
+
+ def compare_ulimits(self, new_ulimits, current_ulimits):
+ # The new_ulimits is dict, we need make it to a list of Ulimit
+ # instance.
+ new_ulimits = self.build_ulimits(new_ulimits)
+
+ def key(ulimit):
+ return ulimit['Name']
+
+ if current_ulimits is None:
+ current_ulimits = []
+ return sorted(new_ulimits, key=key) != sorted(current_ulimits, key=key)
+
+ def compare_command(self, container_info):
+ new_command = self.params.get('command')
+ if new_command is not None:
+ new_command_split = shlex.split(new_command)
+ new_path = new_command_split[0]
+ new_args = new_command_split[1:]
+ if (new_path != container_info['Path'] or
+ new_args != container_info['Args']):
+ return True
+
+ def compare_healthcheck(self, container_info):
+ new_healthcheck = self.parse_healthcheck(
+ self.params.get('healthcheck'))
+ current_healthcheck = container_info['Config'].get('Healthcheck')
+
+ healthcheck_map = {
+ 'test': 'Test',
+ 'retries': 'Retries',
+ 'interval': 'Interval',
+ 'start_period': 'StartPeriod',
+ 'timeout': 'Timeout'}
+
+ if new_healthcheck:
+ new_healthcheck = new_healthcheck['healthcheck']
+ if current_healthcheck:
+ new_healthcheck = dict((healthcheck_map.get(k, k), v)
+ for (k, v) in new_healthcheck.items())
+ return new_healthcheck != current_healthcheck
+ else:
+ return True
+ else:
+ if current_healthcheck:
+ return True
+
+ def parse_image(self):
+ full_image = self.params.get('image')
+
+ if '/' in full_image:
+ registry, image = full_image.split('/', 1)
+ else:
+ image = full_image
+
+ if ':' in image:
+ return full_image.rsplit(':', 1)
+ else:
+ return full_image, 'latest'
+
+ @abstractmethod
+ def pull_image(self):
+ pass
+
+ @abstractmethod
+ def remove_container(self):
+ pass
+
+ def generate_tmpfs(self):
+ tmpfs = self.params.get('tmpfs')
+ if tmpfs:
+ # NOTE(mgoddard): Filter out any empty strings.
+ tmpfs = [t for t in tmpfs if t]
+ return tmpfs
+
+ def generate_volumes(self, binds=None):
+ if not binds:
+ volumes = self.params.get('volumes') or self.params.get('volume')
+ else:
+ volumes = binds
+
+ if not volumes:
+ return None, None
+
+ vol_list = list()
+ vol_dict = dict()
+
+ for vol in volumes:
+ if len(vol) == 0:
+ continue
+
+ if ':' not in vol:
+ vol_list.append(vol)
+ continue
+
+ split_vol = vol.split(':')
+
+ if (len(split_vol) == 2 and
+ ('/' not in split_vol[0] or '/' in split_vol[1])):
+ split_vol.append('rw')
+
+ vol_list.append(split_vol[1])
+ vol_dict.update({
+ split_vol[0]: {
+ 'bind': split_vol[1],
+ 'mode': split_vol[2]
+ }
+ })
+
+ return vol_list, vol_dict
+
+ @abstractmethod
+ def build_ulimits(self, ulimits):
+ pass
+
+ @abstractmethod
+ def create_container(self):
+ pass
+
+ @abstractmethod
+ def recreate_or_restart_container(self):
+ pass
+
+ @abstractmethod
+ def start_container(self):
+ pass
+
+ def get_container_env(self):
+ name = self.params.get('name')
+ info = self.get_container_info()
+ if not info:
+ self.module.fail_json(msg="No such container: {}".format(name))
+ else:
+ envs = dict()
+ for env in info['Config']['Env']:
+ if '=' in env:
+ key, value = env.split('=', 1)
+ else:
+ key, value = env, ''
+ envs[key] = value
+
+ self.module.exit_json(**envs)
+
+ def get_container_state(self):
+ name = self.params.get('name')
+ info = self.get_container_info()
+ if not info:
+ self.module.fail_json(msg="No such container: {}".format(name))
+ else:
+ self.module.exit_json(**info['State'])
+
+ def parse_healthcheck(self, healthcheck):
+ if not healthcheck:
+ return None
+
+ result = dict(healthcheck={})
+
+ # All supported healthcheck parameters
+ supported = set(['test', 'interval', 'timeout', 'start_period',
+ 'retries'])
+ unsupported = set(healthcheck) - supported
+ missing = supported - set(healthcheck)
+ duration_options = set(['interval', 'timeout', 'start_period'])
+
+ if unsupported:
+ self.module.exit_json(failed=True,
+ msg=repr("Unsupported healthcheck options"),
+ unsupported_healthcheck=unsupported)
+
+ if missing:
+ self.module.exit_json(failed=True,
+ msg=repr("Missing healthcheck option"),
+ missing_healthcheck=missing)
+
+ for key in healthcheck:
+ value = healthcheck.get(key)
+ if key in duration_options:
+ try:
+ result['healthcheck'][key] = int(value) * 1000000000
+ except TypeError:
+ raise TypeError(
+ 'Cannot parse healthcheck "{0}". '
+ 'Expected an integer, got "{1}".'
+ .format(value, type(value).__name__)
+ )
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse healthcheck "{0}". '
+ 'Expected an integer, got "{1}".'
+ .format(value, type(value).__name__)
+ )
+ else:
+ if key == 'test':
+ # If the user explicitly disables the healthcheck,
+ # return None as the healthcheck object
+ if value in (['NONE'], 'NONE'):
+ return None
+ else:
+ if isinstance(value, (tuple, list)):
+ result['healthcheck'][key] = \
+ [str(e) for e in value]
+ else:
+ result['healthcheck'][key] = \
+ ['CMD-SHELL', str(value)]
+ elif key == 'retries':
+ try:
+ result['healthcheck'][key] = int(value)
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse healthcheck number of retries.'
+ 'Expected an integer, got "{0}".'
+ .format(type(value))
+ )
+
+ return result
+
+ @abstractmethod
+ def stop_container(self):
+ pass
+
+ @abstractmethod
+ def stop_and_remove_container(self):
+ pass
+
+ @abstractmethod
+ def restart_container(self):
+ pass
+
+ @abstractmethod
+ def create_volume(self):
+ pass
+
+ @abstractmethod
+ def remove_volume(self):
+ pass
+
+ @abstractmethod
+ def remove_image(self):
+ pass
+
+ @abstractmethod
+ def ensure_image(self):
+ pass
+
+ def _inject_env_var(self, environment_info):
+ newenv = {
+ 'KOLLA_SERVICE_NAME': self.params.get('name').replace('_', '-')
+ }
+ environment_info.update(newenv)
+ return environment_info
+
+ def _format_env_vars(self):
+ env = self._inject_env_var(self.params.get('environment'))
+ return {k: "" if env[k] is None else env[k] for k in env}
diff --git a/ansible/module_utils/kolla_docker_worker.py b/ansible/module_utils/kolla_docker_worker.py
index e9da040931..cd8fdbb572 100644
--- a/ansible/module_utils/kolla_docker_worker.py
+++ b/ansible/module_utils/kolla_docker_worker.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# Copyright 2015 Sam Yaple
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,33 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# FIXME(yoctozepto): restart_policy is *not* checked in the container
-
import docker
import json
import os
-import shlex
-
-from distutils.version import StrictVersion
-COMPARE_CONFIG_CMD = ['/usr/local/bin/kolla_set_configs', '--check']
+from ansible.module_utils.kolla_container_worker import COMPARE_CONFIG_CMD
+from ansible.module_utils.kolla_container_worker import ContainerWorker
def get_docker_client():
return docker.APIClient
-class DockerWorker(object):
+class DockerWorker(ContainerWorker):
def __init__(self, module):
- self.module = module
- self.params = self.module.params
- self.changed = False
- # Use this to store arguments to pass to exit_json().
- self.result = {}
-
- # TLS not fully implemented
- # tls_config = self.generate_tls()
+ super().__init__(module)
options = {
'version': self.params.get('api_version'),
@@ -49,8 +36,8 @@ def __init__(self, module):
self.dc = get_docker_client()(**options)
- self._cgroupns_mode_supported = (
- StrictVersion(self.dc._version) >= StrictVersion('1.41'))
+ self._dimensions_kernel_memory_removed = True
+ self.dimension_map.pop('kernel_memory', None)
def generate_tls(self):
tls = {'verify': self.params.get('tls_verify')}
@@ -108,71 +95,6 @@ def get_container_info(self):
return None
return self.dc.inspect_container(self.params.get('name'))
- def compare_container(self):
- container = self.check_container()
- if (not container or
- self.check_container_differs() or
- self.compare_config()):
- self.changed = True
- return self.changed
-
- def check_container_differs(self):
- container_info = self.get_container_info()
- return (
- self.compare_cap_add(container_info) or
- self.compare_security_opt(container_info) or
- self.compare_image(container_info) or
- self.compare_ipc_mode(container_info) or
- self.compare_labels(container_info) or
- self.compare_privileged(container_info) or
- self.compare_pid_mode(container_info) or
- self.compare_cgroupns_mode(container_info) or
- self.compare_tmpfs(container_info) or
- self.compare_volumes(container_info) or
- self.compare_volumes_from(container_info) or
- self.compare_environment(container_info) or
- self.compare_container_state(container_info) or
- self.compare_dimensions(container_info) or
- self.compare_command(container_info) or
- self.compare_healthcheck(container_info)
- )
-
- def compare_ipc_mode(self, container_info):
- new_ipc_mode = self.params.get('ipc_mode')
- current_ipc_mode = container_info['HostConfig'].get('IpcMode')
- if not current_ipc_mode:
- current_ipc_mode = None
-
- # only check IPC mode if it is specified
- if new_ipc_mode is not None and new_ipc_mode != current_ipc_mode:
- return True
- return False
-
- def compare_cap_add(self, container_info):
- new_cap_add = self.params.get('cap_add', list())
- current_cap_add = container_info['HostConfig'].get('CapAdd',
- list())
- if not current_cap_add:
- current_cap_add = list()
- if set(new_cap_add).symmetric_difference(set(current_cap_add)):
- return True
-
- def compare_security_opt(self, container_info):
- ipc_mode = self.params.get('ipc_mode')
- pid_mode = self.params.get('pid_mode')
- privileged = self.params.get('privileged', False)
- # NOTE(jeffrey4l) security opt is disabled when using host ipc mode or
- # host pid mode or privileged. So no need to compare security opts
- if ipc_mode == 'host' or pid_mode == 'host' or privileged:
- return False
- new_sec_opt = self.params.get('security_opt', list())
- current_sec_opt = container_info['HostConfig'].get('SecurityOpt',
- list())
- if not current_sec_opt:
- current_sec_opt = list()
- if set(new_sec_opt).symmetric_difference(set(current_sec_opt)):
- return True
-
def compare_pid_mode(self, container_info):
new_pid_mode = self.params.get('pid_mode')
current_pid_mode = container_info['HostConfig'].get('PidMode')
@@ -182,27 +104,6 @@ def compare_pid_mode(self, container_info):
if new_pid_mode != current_pid_mode:
return True
- def compare_cgroupns_mode(self, container_info):
- if not self._cgroupns_mode_supported:
- return False
- new_cgroupns_mode = self.params.get('cgroupns_mode')
- if new_cgroupns_mode is None:
- # means we don't care what it is
- return False
- current_cgroupns_mode = (container_info['HostConfig']
- .get('CgroupnsMode'))
- if current_cgroupns_mode == '':
- # means the container was created on Docker pre-20.10
- # it behaves like 'host'
- current_cgroupns_mode = 'host'
- return new_cgroupns_mode != current_cgroupns_mode
-
- def compare_privileged(self, container_info):
- new_privileged = self.params.get('privileged')
- current_privileged = container_info['HostConfig']['Privileged']
- if new_privileged != current_privileged:
- return True
-
def compare_image(self, container_info=None):
container_info = container_info or self.get_container_info()
parse_repository_tag = docker.utils.parse_repository_tag
@@ -220,42 +121,6 @@ def compare_image(self, container_info=None):
parse_repository_tag(self.params.get('image'))):
return True
- def compare_labels(self, container_info):
- new_labels = self.params.get('labels')
- current_labels = container_info['Config'].get('Labels', dict())
- image_labels = self.check_image().get('Labels', dict())
- for k, v in image_labels.items():
- if k in new_labels:
- if v != new_labels[k]:
- return True
- else:
- del current_labels[k]
-
- if new_labels != current_labels:
- return True
-
- def compare_tmpfs(self, container_info):
- new_tmpfs = self.generate_tmpfs()
- current_tmpfs = container_info['HostConfig'].get('Tmpfs')
- if not new_tmpfs:
- new_tmpfs = []
- if not current_tmpfs:
- current_tmpfs = []
-
- if set(current_tmpfs).symmetric_difference(set(new_tmpfs)):
- return True
-
- def compare_volumes_from(self, container_info):
- new_vols_from = self.params.get('volumes_from')
- current_vols_from = container_info['HostConfig'].get('VolumesFrom')
- if not new_vols_from:
- new_vols_from = list()
- if not current_vols_from:
- current_vols_from = list()
-
- if set(current_vols_from).symmetric_difference(set(new_vols_from)):
- return True
-
def compare_volumes(self, container_info):
volumes, binds = self.generate_volumes()
current_vols = container_info['Config'].get('Volumes')
@@ -278,106 +143,6 @@ def compare_volumes(self, container_info):
if set(new_binds).symmetric_difference(set(current_binds)):
return True
- def compare_environment(self, container_info):
- if self.params.get('environment'):
- current_env = dict()
- for kv in container_info['Config'].get('Env', list()):
- k, v = kv.split('=', 1)
- current_env.update({k: v})
-
- for k, v in self.params.get('environment').items():
- if k not in current_env:
- return True
- if current_env[k] != v:
- return True
-
- def compare_container_state(self, container_info):
- new_state = self.params.get('state')
- current_state = container_info['State'].get('Status')
- if new_state != current_state:
- return True
-
- def compare_dimensions(self, container_info):
- new_dimensions = self.params.get('dimensions')
- # NOTE(mgoddard): The names used by Docker are inconsisent between
- # configuration of a container's resources and the resources in
- # container_info['HostConfig']. This provides a mapping between the
- # two.
- dimension_map = {
- 'mem_limit': 'Memory', 'mem_reservation': 'MemoryReservation',
- 'memswap_limit': 'MemorySwap', 'cpu_period': 'CpuPeriod',
- 'cpu_quota': 'CpuQuota', 'cpu_shares': 'CpuShares',
- 'cpuset_cpus': 'CpusetCpus', 'cpuset_mems': 'CpusetMems',
- 'kernel_memory': 'KernelMemory', 'blkio_weight': 'BlkioWeight',
- 'ulimits': 'Ulimits'}
- unsupported = set(new_dimensions.keys()) - \
- set(dimension_map.keys())
- if unsupported:
- self.module.exit_json(
- failed=True, msg=repr("Unsupported dimensions"),
- unsupported_dimensions=unsupported)
- current_dimensions = container_info['HostConfig']
- for key1, key2 in dimension_map.items():
- # NOTE(mgoddard): If a resource has been explicitly requested,
- # check for a match. Otherwise, ensure it is set to the default.
- if key1 in new_dimensions:
- if key1 == 'ulimits':
- if self.compare_ulimits(new_dimensions[key1],
- current_dimensions[key2]):
- return True
- elif new_dimensions[key1] != current_dimensions[key2]:
- return True
- elif current_dimensions[key2]:
- # The default values of all currently supported resources are
- # '' or 0 - both falsey.
- return True
-
- def compare_ulimits(self, new_ulimits, current_ulimits):
- # The new_ulimits is dict, we need make it to a list of Ulimit
- # instance.
- new_ulimits = self.build_ulimits(new_ulimits)
-
- def key(ulimit):
- return ulimit['Name']
-
- if current_ulimits is None:
- current_ulimits = []
- return sorted(new_ulimits, key=key) != sorted(current_ulimits, key=key)
-
- def compare_command(self, container_info):
- new_command = self.params.get('command')
- if new_command is not None:
- new_command_split = shlex.split(new_command)
- new_path = new_command_split[0]
- new_args = new_command_split[1:]
- if (new_path != container_info['Path'] or
- new_args != container_info['Args']):
- return True
-
- def compare_healthcheck(self, container_info):
- new_healthcheck = self.parse_healthcheck(
- self.params.get('healthcheck'))
- current_healthcheck = container_info['Config'].get('Healthcheck')
-
- healthcheck_map = {
- 'test': 'Test',
- 'retries': 'Retries',
- 'interval': 'Interval',
- 'start_period': 'StartPeriod',
- 'timeout': 'Timeout'}
-
- if new_healthcheck:
- new_healthcheck = new_healthcheck['healthcheck']
- if current_healthcheck:
- new_healthcheck = dict((healthcheck_map.get(k, k), v)
- for (k, v) in new_healthcheck.items())
- return new_healthcheck != current_healthcheck
- else:
- return True
- else:
- if current_healthcheck:
- return True
-
def compare_config(self):
try:
job = self.dc.exec_create(
@@ -415,19 +180,6 @@ def compare_config(self):
'ExitCode: %s Message: %s' %
(exec_inspect['ExitCode'], output))
- def parse_image(self):
- full_image = self.params.get('image')
-
- if '/' in full_image:
- registry, image = full_image.split('/', 1)
- else:
- image = full_image
-
- if ':' in image:
- return full_image.rsplit(':', 1)
- else:
- return full_image, 'latest'
-
def get_image_id(self):
full_image = self.params.get('image')
@@ -471,6 +223,7 @@ def pull_image(self):
self.changed = old_image_id != new_image_id
def remove_container(self):
+ self.changed |= self.systemd.remove_unit_file()
if self.check_container():
self.changed = True
# NOTE(jeffrey4l): in some case, docker failed to remove container
@@ -482,49 +235,11 @@ def remove_container(self):
container=self.params.get('name'),
force=True
)
+ self.systemd.remove_unit_file()
except docker.errors.APIError:
if self.check_container():
raise
- def generate_tmpfs(self):
- tmpfs = self.params.get('tmpfs')
- if tmpfs:
- # NOTE(mgoddard): Filter out any empty strings.
- tmpfs = [t for t in tmpfs if t]
- return tmpfs
-
- def generate_volumes(self):
- volumes = self.params.get('volumes')
- if not volumes:
- return None, None
-
- vol_list = list()
- vol_dict = dict()
-
- for vol in volumes:
- if len(vol) == 0:
- continue
-
- if ':' not in vol:
- vol_list.append(vol)
- continue
-
- split_vol = vol.split(':')
-
- if (len(split_vol) == 2 and
- ('/' not in split_vol[0] or '/' in split_vol[1])):
- split_vol.append('rw')
-
- vol_list.append(split_vol[1])
- vol_dict.update({
- split_vol[0]: {
- 'bind': split_vol[1],
- 'mode': split_vol[2]
- }
- })
-
- return vol_list, vol_dict
-
def parse_dimensions(self, dimensions):
# When the data object contains types such as
# docker.types.Ulimit, Ansible will fail when these are
@@ -577,42 +292,23 @@ def build_host_config(self, binds):
dimensions = self.parse_dimensions(dimensions)
options.update(dimensions)
- restart_policy = self.params.get('restart_policy')
-
- if restart_policy is not None:
- restart_policy = {'Name': restart_policy}
- # NOTE(Jeffrey4l): MaximumRetryCount is only needed for on-failure
- # policy
- if restart_policy['Name'] == 'on-failure':
- retries = self.params.get('restart_retries')
- if retries is not None:
- restart_policy['MaximumRetryCount'] = retries
- options['restart_policy'] = restart_policy
-
if binds:
options['binds'] = binds
host_config = self.dc.create_host_config(**options)
- if self._cgroupns_mode_supported:
- # NOTE(yoctozepto): python-docker does not support CgroupnsMode
- # natively so we stuff it in manually.
- cgroupns_mode = self.params.get('cgroupns_mode')
- if cgroupns_mode is not None:
- host_config['CgroupnsMode'] = cgroupns_mode
+ # NOTE(yoctozepto): python-docker does not support CgroupnsMode
+ # natively so we stuff it in manually.
+ cgroupns_mode = self.params.get('cgroupns_mode')
+ if cgroupns_mode is not None:
+ host_config['CgroupnsMode'] = cgroupns_mode
- return host_config
+ # detached containers should only log to journald
+ if self.params.get('detach'):
+ options['log_config'] = docker.types.LogConfig(
+ type=docker.types.LogConfig.types.NONE)
- def _inject_env_var(self, environment_info):
- newenv = {
- 'KOLLA_SERVICE_NAME': self.params.get('name').replace('_', '-')
- }
- environment_info.update(newenv)
- return environment_info
-
- def _format_env_vars(self):
- env = self._inject_env_var(self.params.get('environment'))
- return {k: "" if env[k] is None else env[k] for k in env}
+ return host_config
def build_container_options(self):
volumes, binds = self.generate_volumes()
@@ -639,6 +335,8 @@ def create_container(self):
self.changed = True
options = self.build_container_options()
self.dc.create_container(**options)
+ if self.params.get('restart_policy') != 'oneshot':
+ self.changed |= self.systemd.create_unit_file()
def recreate_or_restart_container(self):
self.changed = True
@@ -680,7 +378,15 @@ def start_container(self):
if not container['Status'].startswith('Up '):
self.changed = True
- self.dc.start(container=self.params.get('name'))
+ if self.params.get('restart_policy') == 'oneshot':
+ self.dc.start(container=self.params.get('name'))
+ else:
+ self.systemd.create_unit_file()
+ if not self.systemd.start():
+ self.module.fail_json(
+ changed=True,
+ msg="Container timed out",
+ **self.check_container())
# We do not want to detach so we wait around for container to exit
if not self.params.get('detach'):
@@ -706,95 +412,6 @@ def start_container(self):
**self.result
)
- def get_container_env(self):
- name = self.params.get('name')
- info = self.get_container_info()
- if not info:
- self.module.fail_json(msg="No such container: {}".format(name))
- else:
- envs = dict()
- for env in info['Config']['Env']:
- if '=' in env:
- key, value = env.split('=', 1)
- else:
- key, value = env, ''
- envs[key] = value
-
- self.module.exit_json(**envs)
-
- def get_container_state(self):
- name = self.params.get('name')
- info = self.get_container_info()
- if not info:
- self.module.fail_json(msg="No such container: {}".format(name))
- else:
- self.module.exit_json(**info['State'])
-
- def parse_healthcheck(self, healthcheck):
- if not healthcheck:
- return None
-
- result = dict(healthcheck={})
-
- # All supported healthcheck parameters
- supported = set(['test', 'interval', 'timeout', 'start_period',
- 'retries'])
- unsupported = set(healthcheck) - supported
- missing = supported - set(healthcheck)
- duration_options = set(['interval', 'timeout', 'start_period'])
-
- if unsupported:
- self.module.exit_json(failed=True,
- msg=repr("Unsupported healthcheck options"),
- unsupported_healthcheck=unsupported)
-
- if missing:
- self.module.exit_json(failed=True,
- msg=repr("Missing healthcheck option"),
- missing_healthcheck=missing)
-
- for key in healthcheck:
- value = healthcheck.get(key)
- if key in duration_options:
- try:
- result['healthcheck'][key] = int(value) * 1000000000
- except TypeError:
- raise TypeError(
- 'Cannot parse healthcheck "{0}". '
- 'Expected an integer, got "{1}".'
- .format(value, type(value).__name__)
- )
- except ValueError:
- raise ValueError(
- 'Cannot parse healthcheck "{0}". '
- 'Expected an integer, got "{1}".'
- .format(value, type(value).__name__)
- )
- else:
- if key == 'test':
- # If the user explicitly disables the healthcheck,
- # return None as the healthcheck object
- if value in (['NONE'], 'NONE'):
- return None
- else:
- if isinstance(value, (tuple, list)):
- result['healthcheck'][key] = \
- [str(e) for e in value]
- else:
- result['healthcheck'][key] = \
- ['CMD-SHELL', str(value)]
- elif key == 'retries':
- try:
- result['healthcheck'][key] = int(value)
- except ValueError:
- raise ValueError(
- 'Cannot parse healthcheck number of retries.'
- 'Expected an integer, got "{0}".'
- .format(type(value))
- )
-
- return result
-
def stop_container(self):
name = self.params.get('name')
graceful_timeout = self.params.get('graceful_timeout')
@@ -808,7 +425,10 @@ def stop_container(self):
msg="No such container: {} to stop".format(name))
elif not container['Status'].startswith('Exited '):
self.changed = True
- self.dc.stop(name, timeout=graceful_timeout)
+ if not self.systemd.check_unit_file():
+ self.dc.stop(name, timeout=graceful_timeout)
+ else:
+ self.systemd.stop()
def stop_and_remove_container(self):
container = self.check_container()
@@ -827,8 +447,16 @@ def restart_container(self):
msg="No such container: {}".format(name))
else:
self.changed = True
- self.dc.stop(name, timeout=graceful_timeout)
- self.dc.start(name)
+ if self.params.get('restart_policy') != 'oneshot':
+ self.systemd.create_unit_file()
+ if not self.systemd.restart():
+ self.module.fail_json(
+ changed=True,
+ msg="Container timed out",
+ **self.check_container())
+ else:
+ self.dc.stop(name, timeout=graceful_timeout)
+ self.dc.start(name)
def create_volume(self):
if not self.check_volume():
diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py
new file mode 100644
index 0000000000..f0073cef69
--- /dev/null
+++ b/ansible/module_utils/kolla_podman_worker.py
@@ -0,0 +1,685 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from podman.errors import APIError
+from podman import PodmanClient
+
+import shlex
+
+from ansible.module_utils.kolla_container_worker import COMPARE_CONFIG_CMD
+from ansible.module_utils.kolla_container_worker import ContainerWorker
+
+uri = "http+unix:/run/podman/podman.sock"
+
+CONTAINER_PARAMS = [
+ 'name', # string
+ 'cap_add', # list
+ 'cgroupns', # 'str',choices=['private', 'host']
+ 'command', # array of strings -- docker string
+
+ # this part is hidden inside dimensions
+ 'cpu_period', # int
+ 'cpu_quota', # int
+ 'cpuset_cpus', # str
+ 'cpu_shares', # int
+ 'cpuset_mems', # str
+ 'kernel_memory', # int or string
+ 'mem_limit', # (Union[int, str])
+ 'mem_reservation', # (Union[int, str]): Memory soft limit.
+ 'memswap_limit', # (Union[int, str]): Maximum amount of memory
+ # + swap a container is allowed to consume.
+ 'ulimits', # List[Ulimit]
+ 'blkio_weight', # int between 10 and 1000
+
+
+ 'detach', # bool
+ 'entrypoint', # string
+ 'environment', # dict docker - environment - dictionary
+ 'healthcheck', # same schema as docker -- healthcheck
+ 'image', # string
+ 'ipc_mode', # string only option is host
+
+ 'labels', # dict
+ 'netns', # dict
+ 'network_options', # string - none,bridge,host,container:id,
+ # missing in docker but needs to be host
+ 'pid_mode', # "string" host, private or ''
+ 'privileged', # bool
+ 'restart_policy', # set to none, handled by systemd
+ 'remove', # bool
+ 'restart_tries', # int doesn't matter done by systemd
+ 'stop_timeout', # int
+ 'tty', # bool
+ # volumes need to be parsed, see parse_volumes() for more info
+ 'volumes', # array of dict
+ 'volumes_from', # array of strings
+]
+
+
+class PodmanWorker(ContainerWorker):
+
+ def __init__(self, module) -> None:
+ super().__init__(module)
+
+ self.pc = PodmanClient(base_url=uri)
+
+ def prepare_container_args(self):
+ args = dict(
+ network_mode='host'
+ )
+
+ command = self.params.pop('command', '')
+ if command:
+ self.params['command'] = shlex.split(command)
+
+ # we have to transform volumes into mounts because podman-py
+ # functionality is broken
+ mounts = []
+ filtered_volumes = {}
+ volumes = self.params.get('volumes', [])
+ if volumes:
+ self.parse_volumes(volumes, mounts, filtered_volumes)
+ # we can delete original volumes so it won't raise error later
+ self.params.pop('volumes', None)
+
+ args['mounts'] = mounts
+ args['volumes'] = filtered_volumes
+
+ env = self._format_env_vars()
+ args['environment'] = {k: str(v) for k, v in env.items()}
+ self.params.pop('environment', None)
+
+ healthcheck = self.params.get('healthcheck')
+ if healthcheck:
+ healthcheck = self.parse_healthcheck(healthcheck)
+ self.params.pop('healthcheck', None)
+ if healthcheck:
+ args.update(healthcheck)
+
+ # getting dimensions into separate parameters
+ dimensions = self.params.get('dimensions')
+ if dimensions:
+ dimensions = self.parse_dimensions(dimensions)
+ args.update(dimensions)
+
+ # NOTE(m.hiner): currently unsupported by Podman API
+ # args['tmpfs'] = self.generate_tmpfs()
+ self.params.pop('tmpfs', None)
+
+ # NOTE(m.hiner): in case containers are not privileged,
+ # they need this capability
+ if not self.params.get('privileged', False):
+ args['cap_add'] = self.params.pop('cap_add', []) + ['AUDIT_WRITE']
+
+ # maybe can be done straight away,
+ # at first it was around 6 keys that's why it is this way
+ convert_keys = dict(
+ graceful_timeout='stop_timeout',
+ cgroupns_mode='cgroupns'
+ )
+
+ # remap differing args
+ for key_orig, key_new in convert_keys.items():
+ if key_orig in self.params:
+ value = self.params.get(key_orig, None)
+
+ if value is not None:
+ args[key_new] = value
+
+ # record remaining args
+ for key, value in self.params.items():
+ if key in CONTAINER_PARAMS and value is not None:
+ args[key] = value
+
+ args.pop('restart_policy', None) # handled by systemd
+
+ return args
+
+ # NOTE(i.halomi): Podman encounters issues parsing and setting
+ # permissions for a mix of volumes and binds when sent together.
+ # Therefore, we must parse them and set the permissions ourselves
+ # and send them to API separately.
+ def parse_volumes(self, volumes, mounts, filtered_volumes):
+ # we can ignore empty strings
+ volumes = [item for item in volumes if item.strip()]
+
+ for item in volumes:
+ # if it starts with / it is bind not volume
+ if item[0] == '/':
+ mode = None
+ try:
+ if item.count(':') == 2:
+ src, dest, mode = item.split(':')
+ else:
+ src, dest = item.split(':')
+ except ValueError:
+ self.module.fail_json(
+ msg="Wrong format of volume: {}".format(item),
+ failed=True
+ )
+
+ mount_item = dict(
+ source=src,
+ target=dest,
+ type='bind',
+ propagation='rprivate'
+ )
+ if mode == 'ro':
+ mount_item['read_only'] = True
+ if mode == 'shared':
+ mount_item['propagation'] = 'shared'
+ mounts.append(mount_item)
+ else:
+ try:
+ mode = 'rw'
+ if item.count(':') == 2:
+ src, dest, mode = item.split(':')
+ else:
+ src, dest = item.split(':')
+ except ValueError:
+ self.module.fail_json(
+ msg="Wrong format of volume: {}".format(item),
+ failed=True
+ )
+ if src == 'devpts':
+ mount_item = dict(
+ target=dest,
+ type='devpts'
+ )
+ mounts.append(mount_item)
+ else:
+ filtered_volumes[src] = dict(
+ bind=dest,
+ mode=mode
+ )
+
+ def parse_dimensions(self, dimensions):
+ dimensions = dimensions.copy()
+
+ supported = {'cpu_period', 'cpu_quota', 'cpu_shares',
+ 'cpuset_cpus', 'cpuset_mems', 'mem_limit',
+ 'mem_reservation', 'memswap_limit',
+ 'kernel_memory', 'blkio_weight', 'ulimits'}
+ unsupported = set(dimensions) - supported
+ if unsupported:
+ self.module.exit_json(failed=True,
+ msg=repr("Unsupported dimensions"),
+ unsupported_dimensions=unsupported)
+
+ ulimits = dimensions.get('ulimits', {})
+ if ulimits:
+ # NOTE(m.hiner): default ulimits have to be filtered out because
+ # Podman would treat them as new ulimits and break the container
+ # as a result. Names are a copy of
+ # default_podman_dimensions_el9 in /ansible/group_vars/all.yml
+ for name in ['RLIMIT_NOFILE', 'RLIMIT_NPROC']:
+ ulimits.pop(name, None)
+
+ dimensions['ulimits'] = self.build_ulimits(ulimits)
+
+ return dimensions
+
+ def parse_healthcheck(self, healthcheck):
+ hc = super().parse_healthcheck(healthcheck)
+
+ # rename key to right format
+ if hc:
+ sp = hc['healthcheck'].pop('start_period', None)
+ if sp:
+ hc['healthcheck']['StartPeriod'] = sp
+
+ return hc
+
+ def prepare_image_args(self):
+ image, tag = self.parse_image()
+
+ args = dict(
+ repository=image,
+ tag=tag,
+ tls_verify=self.params.get('tls_verify', False),
+ stream=False
+ )
+
+ if self.params.get('auth_username', False):
+ args['auth_config'] = dict(
+ username=self.params.get('auth_username'),
+ password=self.params.get('auth_password', "")
+ )
+
+ if '/' not in image and self.params.get('auth_registry', False):
+ args['image'] = self.params['auth_registry'] + '/' + image
+ return args
+
+ def check_image(self):
+ try:
+ image = self.pc.images.get(self.params.get('image'))
+ return image.attrs
+ except APIError as e:
+ if e.status_code == 404:
+ return {}
+ else:
+ self.module.fail_json(
+ failed=True,
+ msg="Internal error: {}".format(
+ e.explanation
+ )
+ )
+
+ def check_volume(self):
+ try:
+ vol = self.pc.volumes.get(self.params.get('name'))
+ return vol.attrs
+ except APIError as e:
+ if e.status_code == 404:
+ return {}
+
+ def check_container(self):
+ name = self.params.get("name")
+ for cont in self.pc.containers.list(all=True):
+ cont.reload()
+ if name == cont.name:
+ return cont
+
+ def get_container_info(self):
+ container = self.check_container()
+ if not container:
+ return None
+
+ return container.attrs
+
+ def compare_container(self):
+ container = self.check_container()
+ if (not container or
+ self.check_container_differs() or
+ self.compare_config() or
+ self.systemd.check_unit_change()):
+ self.changed = True
+ return self.changed
+
+ def compare_pid_mode(self, container_info):
+ new_pid_mode = self.params.get('pid_mode') or self.params.get('pid')
+ current_pid_mode = container_info['HostConfig'].get('PidMode')
+
+ if not current_pid_mode:
+ current_pid_mode = None
+
+ # podman default pid_mode
+ if new_pid_mode is None and current_pid_mode == 'private':
+ return False
+
+ if new_pid_mode != current_pid_mode:
+ return True
+
+ def compare_image(self, container_info=None):
+ def parse_tag(tag):
+ splits = tag.rsplit('/', 1)
+ return splits[-1]
+
+ container_info = container_info or self.get_container_info()
+ if not container_info:
+ return True
+
+ new_image = self.check_image()
+ current_image = container_info['Image']
+ if not new_image:
+ return True
+ if new_image['Id'] != current_image:
+ return True
+ # compare name:tag
+ elif (parse_tag(self.params.get('image')) !=
+ parse_tag(container_info['Config']['Image'])):
+ return True
+
+ def compare_volumes(self, container_info):
+ def check_slash(string):
+ if not string:
+ return string
+ if string[-1] != '/':
+ return string + '/'
+ else:
+ return string
+
+ raw_volumes, binds = self.generate_volumes()
+ raw_vols, current_binds = self.generate_volumes(
+ container_info['HostConfig'].get('Binds'))
+
+ current_vols = [check_slash(vol) for vol in raw_vols if vol]
+ volumes = [check_slash(vol) for vol in raw_volumes if vol]
+
+ if not volumes:
+ volumes = list()
+ if not current_vols:
+ current_vols = list()
+ if not current_binds:
+ current_binds = list()
+
+ volumes.sort()
+ current_vols.sort()
+
+ if set(volumes).symmetric_difference(set(current_vols)):
+ return True
+
+ new_binds = list()
+ new_current_binds = list()
+ if binds:
+ for k, v in binds.items():
+ k = check_slash(k)
+ v['bind'] = check_slash(v['bind'])
+ new_binds.append(
+ "{}:{}:{}".format(k, v['bind'], v['mode']))
+
+ if current_binds:
+ for k, v in current_binds.items():
+ k = check_slash(k)
+ v['bind'] = check_slash(v['bind'])
+ if 'ro' in v['mode']:
+ v['mode'] = 'ro'
+ else:
+ v['mode'] = 'rw'
+ new_current_binds.append(
+ "{}:{}:{}".format(k, v['bind'], v['mode'][0:2]))
+
+ new_binds.sort()
+ new_current_binds.sort()
+
+ if set(new_binds).symmetric_difference(set(new_current_binds)):
+ return True
+
+ def compare_dimensions(self, container_info):
+ new_dimensions = self.params.get('dimensions')
+
+ # NOTE(mgoddard): The names used by Docker/Podman are inconsistent
+ # between configuration of a container's resources and
+ # the resources in container_info['HostConfig'].
+ # This provides a mapping between the two.
+ dimension_map = {
+ 'mem_limit': 'Memory', 'mem_reservation': 'MemoryReservation',
+ 'memswap_limit': 'MemorySwap', 'cpu_period': 'CpuPeriod',
+ 'cpu_quota': 'CpuQuota', 'cpu_shares': 'CpuShares',
+ 'cpuset_cpus': 'CpusetCpus', 'cpuset_mems': 'CpusetMems',
+ 'kernel_memory': 'KernelMemory', 'blkio_weight': 'BlkioWeight',
+ 'ulimits': 'Ulimits'}
+ unsupported = set(new_dimensions.keys()) - \
+ set(dimension_map.keys())
+ if unsupported:
+ self.module.exit_json(
+ failed=True, msg=repr("Unsupported dimensions"),
+ unsupported_dimensions=unsupported)
+ current_dimensions = container_info['HostConfig']
+ for key1, key2 in dimension_map.items():
+ # NOTE(mgoddard): If a resource has been explicitly requested,
+ # check for a match. Otherwise, ensure it is set to the default.
+ if key1 in new_dimensions:
+ if key1 == 'ulimits':
+ if self.compare_ulimits(new_dimensions[key1],
+ current_dimensions[key2]):
+ return True
+ elif new_dimensions[key1] != current_dimensions[key2]:
+ return True
+ elif current_dimensions[key2]:
+ # The default values of all (except ulimits) currently
+ # supported resources are '' or 0 - both falsey.
+ return True
+
+ def compare_config(self):
+ try:
+ container = self.pc.containers.get(self.params['name'])
+ container.reload()
+ if container.status != 'running':
+ return True
+
+ rc, raw_output = container.exec_run(COMPARE_CONFIG_CMD,
+ user='root')
+ # APIError means either container doesn't exist or exec command
+ # failed, which means that container is in bad state and we can
+ # expect that config is stale so we return True and recreate container
+ except APIError as e:
+ if e.is_client_error():
+ return True
+ else:
+ raise
+ # Exit codes:
+ # 0: not changed
+ # 1: changed
+ # else: error
+ if rc == 0:
+ return False
+ elif rc == 1:
+ return True
+ else:
+ raise Exception('Failed to compare container configuration: '
+ 'ExitCode: %s Message: %s' %
+ (rc, raw_output.decode('utf-8')))
+
+ def pull_image(self):
+ args = self.prepare_image_args()
+ old_image = self.check_image()
+
+ try:
+ image = self.pc.images.pull(**args)
+
+ if image.attrs == {}:
+ self.module.fail_json(
+ msg="The requested image does not exist: {}".format(
+ self.params['image']),
+ failed=True
+ )
+ self.changed = old_image != image.attrs
+ except APIError as e:
+ self.module.fail_json(
+ msg="Unknown error message: {}".format(
+ str(e)),
+ failed=True
+ )
+
+ def remove_container(self):
+ self.changed |= self.systemd.remove_unit_file()
+ container = self.check_container()
+ if container:
+ try:
+ container.remove(force=True)
+ except APIError:
+ if self.check_container():
+ raise
+
+ def build_ulimits(self, ulimits):
+ ulimits_opt = []
+ for key, value in ulimits.items():
+ soft = value.get('soft')
+ hard = value.get('hard')
+ # Converted to simple dictionary instead of Ulimit type
+ ulimits_opt.append(dict(Name=key,
+ Soft=soft,
+ Hard=hard))
+ return ulimits_opt
+
+ def create_container(self):
+ args = self.prepare_container_args()
+ container = self.pc.containers.create(**args)
+ if container.attrs == {}:
+ data = container.to_dict()
+ self.module.fail_json(failed=True, msg="Creation failed", **data)
+ else:
+ self.changed |= self.systemd.create_unit_file()
+ return container
+
+ def recreate_or_restart_container(self):
+ strategy = self.params.get(
+ 'environment', dict()).get('KOLLA_CONFIG_STRATEGY')
+
+ container = self.get_container_info()
+ if not container:
+ self.start_container()
+ return
+
+ if strategy == 'COPY_ONCE' or self.check_container_differs():
+ self.ensure_image()
+
+ self.stop_container()
+ self.remove_container()
+ self.start_container()
+
+ elif strategy == 'COPY_ALWAYS':
+ self.restart_container()
+
+ def start_container(self):
+ self.ensure_image()
+
+ container = self.check_container()
+ if container and self.check_container_differs():
+ self.stop_container()
+ self.remove_container()
+ container = self.check_container()
+
+ if not container:
+ self.create_container()
+ container = self.check_container()
+
+ if container.status != 'running':
+ self.changed = True
+ if self.params.get('restart_policy') == 'oneshot':
+ container = self.check_container()
+ container.start()
+ else:
+ self.systemd.create_unit_file()
+ if not self.systemd.start():
+ self.module.fail_json(
+ changed=True,
+ msg="Container timed out",
+ **self.check_container().attrs)
+
+ if not self.params.get('detach'):
+ container = self.check_container()
+ rc = container.wait()
+
+ stdout = [line.decode() for line in container.logs(stdout=True,
+ stderr=False)]
+ stderr = [line.decode() for line in container.logs(stdout=False,
+ stderr=True)]
+
+ self.result['rc'] = rc
+ self.result['stdout'] = "\n".join(stdout) if len(stdout) else ""
+ self.result['stderr'] = "\n".join(stderr) if len(stderr) else ""
+
+ if self.params.get('remove_on_exit'):
+ self.stop_container()
+ self.remove_container()
+ if rc != 0:
+ self.module.fail_json(
+ changed=True,
+ msg="Container exited with non-zero return code %s" % rc,
+ **self.result
+ )
+
+ def stop_container(self):
+ name = self.params.get('name')
+ graceful_timeout = self.params.get('graceful_timeout')
+ if not graceful_timeout:
+ graceful_timeout = 10
+ container = self.check_container()
+ if not container:
+ ignore_missing = self.params.get('ignore_missing')
+ if not ignore_missing:
+ self.module.fail_json(
+ msg="No such container: {} to stop".format(name))
+ elif not (container.status == 'exited' or
+ container.status == 'stopped'):
+ self.changed = True
+ if self.params.get('restart_policy') != 'oneshot':
+ self.systemd.create_unit_file()
+ self.systemd.stop()
+ else:
+ container.stop(timeout=str(graceful_timeout))
+
+ def stop_and_remove_container(self):
+ container = self.check_container()
+
+ if container:
+ self.stop_container()
+ self.remove_container()
+
+ def restart_container(self):
+ container = self.check_container()
+
+ if not container:
+ self.module.fail_json(
+ msg="No such container: {}".format(self.params.get('name'))
+ )
+ else:
+ self.changed = True
+ self.systemd.create_unit_file()
+
+ if not self.systemd.restart():
+ self.module.fail_json(
+ changed=True,
+ msg="Container timed out",
+ **container.attrs)
+
+ def create_volume(self):
+ if not self.check_volume():
+ self.changed = True
+ args = dict(
+ name=self.params.get('name'),
+ driver='local'
+ )
+
+ vol = self.pc.volumes.create(**args)
+ self.result = vol.attrs
+
+ def remove_volume(self):
+ if self.check_volume():
+ self.changed = True
+ try:
+ self.pc.volumes.remove(self.params.get('name'))
+ except APIError as e:
+ if e.status_code == 409:
+ self.module.fail_json(
+ failed=True,
+ msg="Volume named '{}' is currently in-use".format(
+ self.params.get('name')
+ )
+ )
+ else:
+ self.module.fail_json(
+ failed=True,
+ msg="Internal error: {}".format(
+ e.explanation
+ )
+ )
+ raise
+
+ def remove_image(self):
+ if self.check_image():
+ image = self.pc.images.get(self.params['image'])
+ self.changed = True
+ try:
+ image.remove()
+ except APIError as e:
+ if e.status_code == 409:
+ self.module.fail_json(
+ failed=True,
+ msg="Image '{}' is currently in-use".format(
+ self.params.get('image')
+ )
+ )
+ else:
+ self.module.fail_json(
+ failed=True,
+ msg="Internal error: {}".format(
+ str(e)
+ )
+ )
+ raise
+
+ def ensure_image(self):
+ if not self.check_image():
+ self.pull_image()
diff --git a/ansible/module_utils/kolla_systemd_worker.py b/ansible/module_utils/kolla_systemd_worker.py
new file mode 100644
index 0000000000..b807d2a929
--- /dev/null
+++ b/ansible/module_utils/kolla_systemd_worker.py
@@ -0,0 +1,216 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from string import Template
+from time import sleep
+
+import dbus
+
+
+TEMPLATE = '''# ${service_name}
+# autogenerated by Kolla-Ansible
+
+[Unit]
+Description=${engine} ${service_name}
+After=${deps}
+Wants=${deps}
+StartLimitInterval=${restart_timeout}
+StartLimitBurst=${restart_retries}
+
+[Service]
+ExecStart=/usr/bin/${engine} start -a ${name}
+ExecStop=/usr/bin/${engine} stop ${name} -t ${graceful_timeout}
+Restart=${restart_policy}
+RestartSec=${restart_duration}
+SuccessExitStatus=143
+
+[Install]
+WantedBy=multi-user.target
+'''
+
+
+class SystemdWorker(object):
+ def __init__(self, params):
+ name = params.get('name', None)
+
+ # systemd is not needed
+ if not name:
+ return None
+
+ container_engine = params.get('container_engine')
+ if container_engine == 'docker':
+ dependencies = 'docker.service'
+ else:
+ dependencies = 'network-online.target'
+
+ restart_policy = params.get('restart_policy', 'no')
+ if restart_policy == 'unless-stopped':
+ restart_policy = 'always'
+
+ # NOTE(hinermar): duration * retries should be less than timeout
+ # otherwise service will indefinitely try to restart.
+ # Also, correct timeout and retries values should probably be
+ # checked at the module level inside kolla_container.py
+ restart_timeout = params.get('client_timeout', 120)
+ restart_retries = params.get('restart_retries', 10)
+ restart_duration = (restart_timeout // restart_retries) - 1
+
+ # container info
+ self.container_dict = dict(
+ name=name,
+ service_name='kolla-' + name + '-container.service',
+ engine=container_engine,
+ deps=dependencies,
+ graceful_timeout=params.get('graceful_timeout'),
+ restart_policy=restart_policy,
+ restart_timeout=restart_timeout,
+ restart_retries=restart_retries,
+ restart_duration=restart_duration
+ )
+
+ # systemd
+ self.manager = self.get_manager()
+ self.job_mode = 'replace'
+ self.sysdir = '/etc/systemd/system/'
+
+ # templating
+ self.template = Template(TEMPLATE)
+
+ def get_manager(self):
+ sysbus = dbus.SystemBus()
+ systemd1 = sysbus.get_object(
+ 'org.freedesktop.systemd1',
+ '/org/freedesktop/systemd1'
+ )
+ return dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
+
+ def start(self):
+ if self.perform_action(
+ 'StartUnit',
+ self.container_dict['service_name'],
+ self.job_mode
+ ):
+ return self.wait_for_unit(self.container_dict['restart_timeout'])
+ return False
+
+ def restart(self):
+ if self.perform_action(
+ 'RestartUnit',
+ self.container_dict['service_name'],
+ self.job_mode
+ ):
+ return self.wait_for_unit(self.container_dict['restart_timeout'])
+ return False
+
+ def stop(self):
+ if self.perform_action(
+ 'StopUnit',
+ self.container_dict['service_name'],
+ self.job_mode
+ ):
+ return self.wait_for_unit(
+ self.container_dict['restart_timeout'],
+ state='dead'
+ )
+ return False
+
+ def reload(self):
+ return self.perform_action(
+ 'Reload',
+ self.container_dict['service_name'],
+ self.job_mode
+ )
+
+ def enable(self):
+ return self.perform_action(
+ 'EnableUnitFiles',
+ [self.container_dict['service_name']],
+ False,
+ True
+ )
+
+ def perform_action(self, function, *args):
+ try:
+ getattr(self.manager, function)(*args)
+ return True
+ except Exception:
+ return False
+
+ def check_unit_file(self):
+ return os.path.isfile(
+ self.sysdir + self.container_dict['service_name']
+ )
+
+ def check_unit_change(self, new_content=''):
+ if not new_content:
+ new_content = self.generate_unit_file()
+
+ if self.check_unit_file():
+ with open(
+ self.sysdir + self.container_dict['service_name'], 'r'
+ ) as f:
+ curr_content = f.read()
+
+ # return whether there was change in the unit file
+ return curr_content != new_content
+
+ return True
+
+ def generate_unit_file(self):
+ return self.template.substitute(self.container_dict)
+
+ def create_unit_file(self):
+ file_content = self.generate_unit_file()
+
+ if self.check_unit_change(file_content):
+ with open(
+ self.sysdir + self.container_dict['service_name'], 'w'
+ ) as f:
+ f.write(file_content)
+
+ self.reload()
+ self.enable()
+ return True
+
+ return False
+
+ def remove_unit_file(self):
+ if self.check_unit_file():
+ os.remove(self.sysdir + self.container_dict['service_name'])
+ self.reload()
+
+ return True
+ else:
+ return False
+
+ def get_unit_state(self):
+ unit_list = self.manager.ListUnits()
+
+ for service in unit_list:
+ if str(service[0]) == self.container_dict['service_name']:
+ return str(service[4])
+
+ return None
+
+ def wait_for_unit(self, timeout, state='running'):
+ delay = 5
+ elapsed = 0
+
+ while True:
+ if self.get_unit_state() == state:
+ return True
+ elif elapsed > timeout:
+ return False
+ else:
+ sleep(delay)
+ elapsed += delay
diff --git a/ansible/monasca_cleanup.yml b/ansible/monasca_cleanup.yml
deleted file mode 100644
index 11c50a7c95..0000000000
--- a/ansible/monasca_cleanup.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Cleanup unused Monasca services
- hosts:
- - monasca-api
- - monasca-log-persister
- - monasca-log-metrics
- - monasca-thresh
- - monasca-notification
- - monasca-persister
- roles:
- - { role: monasca,
- tags: monasca }
-
-- name: Cleanup unused Storm services
- hosts:
- - storm-worker
- - storm-nimbus
- roles:
- - { role: storm,
- tags: storm }
diff --git a/ansible/nova-libvirt-cleanup.yml b/ansible/nova-libvirt-cleanup.yml
index 5124b37d44..5a69a8bfa6 100644
--- a/ansible/nova-libvirt-cleanup.yml
+++ b/ansible/nova-libvirt-cleanup.yml
@@ -6,6 +6,10 @@
hosts:
- compute
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_libvirt_cleanup_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tags:
- nova-libvirt-cleanup
tasks:
diff --git a/ansible/nova.yml b/ansible/nova.yml
index 5e12479d7c..c41d854075 100644
--- a/ansible/nova.yml
+++ b/ansible/nova.yml
@@ -37,6 +37,10 @@
- nova-api
- nova-api-bootstrap
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tasks:
# * Create nova API & cell0 DBs & users
# * API DB schema migrations
@@ -109,6 +113,10 @@
- nova-api-deploy
- nova-api-upgrade
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- role: nova
when: enable_nova | bool
@@ -130,6 +138,10 @@
- nova-cell-deploy
- nova-cell-upgrade
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_cell_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- role: nova-cell
when: enable_nova | bool
@@ -147,6 +159,10 @@
- nova-api
- nova-refresh-scheduler-cell-cache
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tasks:
- import_role:
name: nova
@@ -172,6 +188,10 @@
- nova-api
- nova-api-reload
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tasks:
- import_role:
name: nova
@@ -196,6 +216,10 @@
- nova-cell
- nova-cell-reload
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_cell_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tasks:
- import_role:
name: nova-cell
@@ -216,6 +240,10 @@
- nova-api
- nova-api-reload
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ nova_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tasks:
- import_role:
name: nova
@@ -238,6 +266,8 @@
- nova-online-data-migrations
- nova-api-online-data-migrations
serial: '{{ kolla_serial|default("0") }}'
+ # Fail all hosts if any of these once-per-cell tasks fails.
+ any_errors_fatal: true
tasks:
- import_role:
name: nova
diff --git a/ansible/post-deploy.yml b/ansible/post-deploy.yml
index ec83bd11f4..1203ebf901 100644
--- a/ansible/post-deploy.yml
+++ b/ansible/post-deploy.yml
@@ -32,6 +32,33 @@
group: "{{ ansible_facts.user_gid }}"
mode: 0600
+ - name: Template out admin-openrc-system.sh
+ become: true
+ template:
+ src: "roles/common/templates/admin-openrc-system.sh.j2"
+ dest: "{{ node_config }}/admin-openrc-system.sh"
+ owner: "{{ ansible_facts.user_uid }}"
+ group: "{{ ansible_facts.user_gid }}"
+ mode: 0600
+
+ - name: Template out public-openrc.sh
+ become: true
+ template:
+ src: "roles/common/templates/public-openrc.sh.j2"
+ dest: "{{ node_config }}/public-openrc.sh"
+ owner: "{{ ansible_facts.user_uid }}"
+ group: "{{ ansible_facts.user_gid }}"
+ mode: 0600
+
+ - name: Template out public-openrc-system.sh
+ become: true
+ template:
+ src: "roles/common/templates/public-openrc-system.sh.j2"
+ dest: "{{ node_config }}/public-openrc-system.sh"
+ owner: "{{ ansible_facts.user_uid }}"
+ group: "{{ ansible_facts.user_gid }}"
+ mode: 0600
+
- import_role:
name: octavia
tasks_from: openrc.yml
diff --git a/ansible/prune-images.yml b/ansible/prune-images.yml
index 2b6a3adae3..20c1e1c0dc 100644
--- a/ansible/prune-images.yml
+++ b/ansible/prune-images.yml
@@ -5,5 +5,9 @@
hosts: baremetal
serial: '{{ kolla_serial|default("0") }}'
gather_facts: false
+ max_fail_percentage: >-
+ {{ prune_images_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- prune-images
diff --git a/ansible/rabbitmq-reset-state.yml b/ansible/rabbitmq-reset-state.yml
new file mode 100644
index 0000000000..c9d77915db
--- /dev/null
+++ b/ansible/rabbitmq-reset-state.yml
@@ -0,0 +1,10 @@
+---
+- name: Reset RabbitMQ state
+ hosts: rabbitmq
+ tasks:
+ - name: Include RabbitMQ reset-state tasks
+ include_role:
+ name: rabbitmq
+ tasks_from: reset-state
+ when:
+ - enable_rabbitmq | bool
diff --git a/ansible/rabbitmq-upgrade.yml b/ansible/rabbitmq-upgrade.yml
new file mode 100644
index 0000000000..3677676500
--- /dev/null
+++ b/ansible/rabbitmq-upgrade.yml
@@ -0,0 +1,21 @@
+---
+- import_playbook: gather-facts.yml
+
+- name: Group hosts based on configuration (RabbitMQ Only)
+ hosts: all
+ gather_facts: false
+ max_fail_percentage: >-
+ {{ group_hosts_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tasks:
+ - name: Group hosts based on enabled services (RabbitMQ Only)
+ group_by:
+ key: "enable_rabbitmq_{{ enable_rabbitmq | bool }}"
+ changed_when: false
+ tags: always
+
+- import_playbook: rabbitmq.yml
+ vars:
+ kolla_action: upgrade
+ rabbitmq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/rabbitmq-{{ rabbitmq_version_suffix | regex_replace('\\.', '-') }}"
diff --git a/ansible/rabbitmq.yml b/ansible/rabbitmq.yml
new file mode 100644
index 0000000000..6b405cc626
--- /dev/null
+++ b/ansible/rabbitmq.yml
@@ -0,0 +1,70 @@
+---
+# For RabbitMQ we need to be careful about restarting services, to avoid losing quorum.
+- name: Apply role rabbitmq
+ gather_facts: false
+ hosts:
+ - rabbitmq
+ - '&enable_rabbitmq_True'
+ max_fail_percentage: >-
+ {{ rabbitmq_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - rabbitmq
+ tasks:
+ - import_role:
+ name: rabbitmq
+ vars:
+ role_rabbitmq_cluster_cookie: '{{ rabbitmq_cluster_cookie }}'
+ role_rabbitmq_cluster_port: '{{ rabbitmq_cluster_port }}'
+ role_rabbitmq_epmd_port: '{{ rabbitmq_epmd_port }}'
+ role_rabbitmq_groups: rabbitmq
+ role_rabbitmq_management_port: '{{ rabbitmq_management_port }}'
+ role_rabbitmq_monitoring_password: '{{ rabbitmq_monitoring_password }}'
+ role_rabbitmq_monitoring_user: '{{ rabbitmq_monitoring_user }}'
+ role_rabbitmq_password: '{{ rabbitmq_password }}'
+ role_rabbitmq_port: '{{ rabbitmq_port }}'
+ role_rabbitmq_prometheus_port: '{{ rabbitmq_prometheus_port }}'
+ role_rabbitmq_user: '{{ rabbitmq_user }}'
+
+- name: Restart rabbitmq services
+ gather_facts: false
+ hosts:
+ - rabbitmq_restart
+ - '&enable_rabbitmq_True'
+ # Restart in batches
+ serial: "33%"
+ max_fail_percentage: >-
+ {{ rabbitmq_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - rabbitmq
+ tasks:
+ - import_role:
+ name: rabbitmq
+ tasks_from: restart_services.yml
+ vars:
+ role_rabbitmq_cluster_cookie: '{{ rabbitmq_cluster_cookie }}'
+ role_rabbitmq_groups: rabbitmq
+
+- name: Apply rabbitmq post-configuration
+ gather_facts: false
+ hosts:
+ - rabbitmq
+ - '&enable_rabbitmq_True'
+ max_fail_percentage: >-
+ {{ rabbitmq_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ tags:
+ - rabbitmq
+ tasks:
+ - name: Include rabbitmq post-deploy.yml
+ include_role:
+ name: rabbitmq
+ tasks_from: post-deploy.yml
+ when: kolla_action in ['deploy', 'reconfigure', 'upgrade']
+ vars:
+ role_rabbitmq_cluster_cookie: '{{ rabbitmq_cluster_cookie }}'
+ role_rabbitmq_groups: rabbitmq
diff --git a/ansible/roles/aodh/defaults/main.yml b/ansible/roles/aodh/defaults/main.yml
index 0f8a29d718..bb18d9cc30 100644
--- a/ansible/roles/aodh/defaults/main.yml
+++ b/ansible/roles/aodh/defaults/main.yml
@@ -19,7 +19,8 @@ aodh_services:
enabled: "{{ enable_aodh }}"
mode: "http"
external: true
- port: "{{ aodh_api_port }}"
+ external_fqdn: "{{ aodh_external_fqdn }}"
+ port: "{{ aodh_api_public_port }}"
listen_port: "{{ aodh_api_listen_port }}"
aodh-evaluator:
container_name: aodh_evaluator
@@ -55,6 +56,12 @@ aodh_services:
# change this option.
aodh_evaluation_interval: 300
+####################
+# Config Validate
+####################
+aodh_config_validation:
+ - generator: "/aodh/aodh/cmd/aodh-config-generator.conf"
+ config: "/etc/aodh/aodh.conf"
####################
# Database
@@ -83,8 +90,6 @@ aodh_database_shard:
aodh_notification_topics:
- name: notifications
enabled: "{{ enable_ceilometer | bool }}"
- - name: vitrage_notifications
- enabled: "{{ enable_vitrage | bool }}"
aodh_enabled_notification_topics: "{{ aodh_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
@@ -93,19 +98,19 @@ aodh_enabled_notification_topics: "{{ aodh_notification_topics | selectattr('ena
####################
aodh_tag: "{{ openstack_tag }}"
-aodh_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/aodh-api"
+aodh_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}aodh-api"
aodh_api_tag: "{{ aodh_tag }}"
aodh_api_image_full: "{{ aodh_api_image }}:{{ aodh_api_tag }}"
-aodh_evaluator_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/aodh-evaluator"
+aodh_evaluator_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}aodh-evaluator"
aodh_evaluator_tag: "{{ aodh_tag }}"
aodh_evaluator_image_full: "{{ aodh_evaluator_image }}:{{ aodh_evaluator_tag }}"
-aodh_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/aodh-listener"
+aodh_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}aodh-listener"
aodh_listener_tag: "{{ aodh_tag }}"
aodh_listener_image_full: "{{ aodh_listener_image }}:{{ aodh_listener_tag }}"
-aodh_notifier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/aodh-notifier"
+aodh_notifier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}aodh-notifier"
aodh_notifier_tag: "{{ aodh_tag }}"
aodh_notifier_image_full: "{{ aodh_notifier_image }}:{{ aodh_notifier_tag }}"
@@ -172,25 +177,25 @@ aodh_api_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "aodh:/var/lib/aodh/"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/aodh/aodh:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/aodh' if aodh_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/aodh/:/dev-mode/aodh' if aodh_dev_mode | bool else '' }}"
aodh_evaluator_default_volumes:
- "{{ node_config_directory }}/aodh-evaluator/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/aodh/aodh:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/aodh' if aodh_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/aodh/:/dev-mode/aodh' if aodh_dev_mode | bool else '' }}"
aodh_listener_default_volumes:
- "{{ node_config_directory }}/aodh-listener/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/aodh/aodh:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/aodh' if aodh_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/aodh/:/dev-mode/aodh' if aodh_dev_mode | bool else '' }}"
aodh_notifier_default_volumes:
- "{{ node_config_directory }}/aodh-notifier/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/aodh/aodh:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/aodh' if aodh_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/aodh/:/dev-mode/aodh' if aodh_dev_mode | bool else '' }}"
aodh_extra_volumes: "{{ default_extra_volumes }}"
aodh_api_extra_volumes: "{{ aodh_extra_volumes }}"
@@ -201,9 +206,6 @@ aodh_notifier_extra_volumes: "{{ aodh_extra_volumes }}"
####################
# OpenStack
####################
-aodh_internal_endpoint: "{{ internal_protocol }}://{{ aodh_internal_fqdn | put_address_in_context('url') }}:{{ aodh_api_port }}"
-aodh_public_endpoint: "{{ public_protocol }}://{{ aodh_external_fqdn | put_address_in_context('url') }}:{{ aodh_api_port }}"
-
aodh_logging_debug: "{{ openstack_logging_debug }}"
aodh_keystone_user: "aodh"
diff --git a/ansible/roles/aodh/handlers/main.yml b/ansible/roles/aodh/handlers/main.yml
index 4e7202503a..d7aac03f60 100644
--- a/ansible/roles/aodh/handlers/main.yml
+++ b/ansible/roles/aodh/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "aodh-api"
service: "{{ aodh_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart aodh-evaluator container
vars:
service_name: "aodh-evaluator"
service: "{{ aodh_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart aodh-listener container
vars:
service_name: "aodh-listener"
service: "{{ aodh_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,15 +40,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart aodh-notifier container
vars:
service_name: "aodh-notifier"
service: "{{ aodh_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -60,5 +54,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/aodh/tasks/bootstrap.yml b/ansible/roles/aodh/tasks/bootstrap.yml
index a78c1c2afb..6ac0f93438 100644
--- a/ansible/roles/aodh/tasks/bootstrap.yml
+++ b/ansible/roles/aodh/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating aodh database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating aodh database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/aodh/tasks/bootstrap_service.yml b/ansible/roles/aodh/tasks/bootstrap_service.yml
index b3be49e933..24bca33dce 100644
--- a/ansible/roles/aodh/tasks/bootstrap_service.yml
+++ b/ansible/roles/aodh/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
aodh_api: "{{ aodh_services['aodh-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_aodh"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ aodh_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[aodh_api.group][0] }}"
diff --git a/ansible/roles/aodh/tasks/check-containers.yml b/ansible/roles/aodh/tasks/check-containers.yml
index a849226757..b7e2f7c29f 100644
--- a/ansible/roles/aodh/tasks/check-containers.yml
+++ b/ansible/roles/aodh/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check aodh containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ aodh_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/aodh/tasks/config.yml b/ansible/roles/aodh/tasks/config.yml
index 0f5d0c1e7e..cbb918243c 100644
--- a/ansible/roles/aodh/tasks/config.yml
+++ b/ansible/roles/aodh/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ aodh_services }}"
+ with_dict: "{{ aodh_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -39,11 +36,7 @@
become: true
when:
- aodh_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ aodh_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ aodh_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
@@ -55,12 +48,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ aodh_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ aodh_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over aodh.conf
vars:
@@ -75,12 +63,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/aodh.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ aodh_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ aodh_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over wsgi-aodh files for services
vars:
@@ -90,8 +73,4 @@
dest: "{{ node_config_directory }}/aodh-api/wsgi-aodh.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - "Restart aodh-api container"
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/aodh/tasks/config_validate.yml b/ansible/roles/aodh/tasks/config_validate.yml
new file mode 100644
index 0000000000..887b3cf0e5
--- /dev/null
+++ b/ansible/roles/aodh/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ aodh_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ aodh_config_validation }}"
diff --git a/ansible/roles/aodh/tasks/precheck.yml b/ansible/roles/aodh/tasks/precheck.yml
index 48dafbe961..8351e43594 100644
--- a/ansible/roles/aodh/tasks/precheck.yml
+++ b/ansible/roles/aodh/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- aodh_api
+ check_mode: false
register: container_facts
- name: Checking free port for Aodh API
diff --git a/ansible/roles/aodh/templates/aodh-api.json.j2 b/ansible/roles/aodh/templates/aodh-api.json.j2
index f9aa6cdfb0..b7d4feff77 100644
--- a/ansible/roles/aodh/templates/aodh-api.json.j2
+++ b/ansible/roles/aodh/templates/aodh-api.json.j2
@@ -20,6 +20,12 @@
"dest": "/etc/aodh/{{ aodh_policy_file }}",
"owner": "aodh",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/aodh/templates/aodh-evaluator.json.j2 b/ansible/roles/aodh/templates/aodh-evaluator.json.j2
index 995802a8b4..24dda5250b 100644
--- a/ansible/roles/aodh/templates/aodh-evaluator.json.j2
+++ b/ansible/roles/aodh/templates/aodh-evaluator.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/aodh/{{ aodh_policy_file }}",
"owner": "aodh",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/aodh/templates/aodh-listener.json.j2 b/ansible/roles/aodh/templates/aodh-listener.json.j2
index 31d1af6c92..44f74cea97 100644
--- a/ansible/roles/aodh/templates/aodh-listener.json.j2
+++ b/ansible/roles/aodh/templates/aodh-listener.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/aodh/{{ aodh_policy_file }}",
"owner": "aodh",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/aodh/templates/aodh-notifier.json.j2 b/ansible/roles/aodh/templates/aodh-notifier.json.j2
index 49339eaa36..dcf23d1eb3 100644
--- a/ansible/roles/aodh/templates/aodh-notifier.json.j2
+++ b/ansible/roles/aodh/templates/aodh-notifier.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/aodh/{{ aodh_policy_file }}",
"owner": "aodh",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/aodh/templates/aodh.conf.j2 b/ansible/roles/aodh/templates/aodh.conf.j2
index b92ffdd334..7aa1d664fb 100644
--- a/ansible/roles/aodh/templates/aodh.conf.j2
+++ b/ansible/roles/aodh/templates/aodh.conf.j2
@@ -17,7 +17,7 @@ max_pool_size = {{ database_max_pool_size }}
[keystone_authtoken]
service_type = alarming
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
www_authenticate_uri = {{ keystone_internal_url }}
@@ -60,8 +60,15 @@ topics = {{ aodh_enabled_notification_topics | map(attribute='name') | join(',')
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'aodh-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
diff --git a/ansible/roles/barbican/defaults/main.yml b/ansible/roles/barbican/defaults/main.yml
index 0f7778175a..07e79ece6d 100644
--- a/ansible/roles/barbican/defaults/main.yml
+++ b/ansible/roles/barbican/defaults/main.yml
@@ -20,7 +20,8 @@ barbican_services:
enabled: "{{ enable_barbican }}"
mode: "http"
external: true
- port: "{{ barbican_api_port }}"
+ external_fqdn: "{{ barbican_external_fqdn }}"
+ port: "{{ barbican_api_public_port }}"
listen_port: "{{ barbican_api_listen_port }}"
tls_backend: "{{ barbican_enable_tls_backend }}"
barbican-keystone-listener:
@@ -40,6 +41,12 @@ barbican_services:
dimensions: "{{ barbican_worker_dimensions }}"
healthcheck: "{{ barbican_worker_healthcheck }}"
+####################
+# Config Validate
+####################
+barbican_config_validation:
+ - generator: "/barbican/etc/oslo-config-generator/barbican.conf"
+ config: "/etc/barbican/barbican.conf"
####################
# Database
@@ -67,15 +74,15 @@ barbican_database_shard:
####################
barbican_tag: "{{ openstack_tag }}"
-barbican_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/barbican-api"
+barbican_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}barbican-api"
barbican_api_tag: "{{ barbican_tag }}"
barbican_api_image_full: "{{ barbican_api_image }}:{{ barbican_api_tag }}"
-barbican_keystone_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/barbican-keystone-listener"
+barbican_keystone_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}barbican-keystone-listener"
barbican_keystone_listener_tag: "{{ barbican_tag }}"
barbican_keystone_listener_image_full: "{{ barbican_keystone_listener_image }}:{{ barbican_keystone_listener_tag }}"
-barbican_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/barbican-worker"
+barbican_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}barbican-worker"
barbican_worker_tag: "{{ barbican_tag }}"
barbican_worker_image_full: "{{ barbican_worker_image }}:{{ barbican_worker_tag }}"
@@ -128,19 +135,19 @@ barbican_api_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "barbican:/var/lib/barbican/"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/barbican/barbican:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/barbican' if barbican_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/barbican:/dev-mode/barbican' if barbican_dev_mode | bool else '' }}"
barbican_keystone_listener_default_volumes:
- "{{ node_config_directory }}/barbican-keystone-listener/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/barbican/barbican:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/barbican' if barbican_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/barbican:/dev-mode/barbican' if barbican_dev_mode | bool else '' }}"
barbican_worker_default_volumes:
- "{{ node_config_directory }}/barbican-worker/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/barbican/barbican:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/barbican' if barbican_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/barbican:/dev-mode/barbican' if barbican_dev_mode | bool else '' }}"
barbican_extra_volumes: "{{ default_extra_volumes }}"
barbican_api_extra_volumes: "{{ barbican_extra_volumes }}"
@@ -206,3 +213,5 @@ barbican_enabled_notification_topics: "{{ barbican_notification_topics | selecta
# TLS
####################
barbican_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+barbican_copy_certs: "{{ kolla_copy_ca_into_containers | bool or barbican_enable_tls_backend | bool }}"
diff --git a/ansible/roles/barbican/handlers/main.yml b/ansible/roles/barbican/handlers/main.yml
index 256ad1f529..44be9fd5d3 100644
--- a/ansible/roles/barbican/handlers/main.yml
+++ b/ansible/roles/barbican/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "barbican-api"
service: "{{ barbican_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart barbican-keystone-listener container
vars:
service_name: "barbican-keystone-listener"
service: "{{ barbican_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart barbican-worker container
vars:
service_name: "barbican-worker"
service: "{{ barbican_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,5 +40,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/barbican/tasks/bootstrap.yml b/ansible/roles/barbican/tasks/bootstrap.yml
index 7f80093f20..4ace947a6c 100644
--- a/ansible/roles/barbican/tasks/bootstrap.yml
+++ b/ansible/roles/barbican/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating barbican database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating barbican database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/barbican/tasks/bootstrap_service.yml b/ansible/roles/barbican/tasks/bootstrap_service.yml
index 3e05024410..7081abca54 100644
--- a/ansible/roles/barbican/tasks/bootstrap_service.yml
+++ b/ansible/roles/barbican/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
barbican_api: "{{ barbican_services['barbican-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_barbican"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ barbican_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[barbican_api.group][0] }}"
diff --git a/ansible/roles/barbican/tasks/check-containers.yml b/ansible/roles/barbican/tasks/check-containers.yml
index e70d795667..b7e2f7c29f 100644
--- a/ansible/roles/barbican/tasks/check-containers.yml
+++ b/ansible/roles/barbican/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check barbican containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ barbican_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/barbican/tasks/config.yml b/ansible/roles/barbican/tasks/config.yml
index 4d48f4bd06..5a9394451f 100644
--- a/ansible/roles/barbican/tasks/config.yml
+++ b/ansible/roles/barbican/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ barbican_services }}"
+ with_dict: "{{ barbican_services | select_services_enabled_and_mapped_to_host }}"
- name: Ensuring vassals config directories exist
vars:
@@ -22,9 +19,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- "barbican-api/vassals"
@@ -49,7 +44,7 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or barbican_enable_tls_backend | bool
+ - barbican_copy_certs
- name: Copying over config.json files for services
template:
@@ -57,12 +52,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ barbican_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ barbican_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over barbican-api.ini
vars:
@@ -75,11 +65,7 @@
dest: "{{ node_config_directory }}/barbican-api/vassals/barbican-api.ini"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart barbican-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Checking whether barbican-api-paste.ini file exists
vars:
@@ -89,9 +75,7 @@
run_once: True
delegate_to: localhost
register: check_barbican_api_paste_ini
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over barbican-api-paste.ini
vars:
@@ -102,11 +86,8 @@
mode: "0660"
become: true
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- check_barbican_api_paste_ini.stat.exists
- notify:
- - Restart barbican-api container
- name: Copying over barbican.conf
vars:
@@ -121,12 +102,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/barbican.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ barbican_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ barbican_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -136,8 +112,4 @@
become: true
when:
- barbican_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ barbican_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ barbican_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/barbican/tasks/config_validate.yml b/ansible/roles/barbican/tasks/config_validate.yml
new file mode 100644
index 0000000000..31143931f7
--- /dev/null
+++ b/ansible/roles/barbican/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ barbican_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ barbican_config_validation }}"
diff --git a/ansible/roles/barbican/tasks/precheck.yml b/ansible/roles/barbican/tasks/precheck.yml
index da3e4e4077..b33d069879 100644
--- a/ansible/roles/barbican/tasks/precheck.yml
+++ b/ansible/roles/barbican/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- barbican_api
+ check_mode: false
register: container_facts
- name: Checking free port for Barbican API
diff --git a/ansible/roles/barbican/templates/barbican-api.json.j2 b/ansible/roles/barbican/templates/barbican-api.json.j2
index a807c17ea4..b8c305ca07 100644
--- a/ansible/roles/barbican/templates/barbican-api.json.j2
+++ b/ansible/roles/barbican/templates/barbican-api.json.j2
@@ -37,6 +37,12 @@
"dest": "/etc/barbican/{{ barbican_policy_file }}",
"owner": "barbican",
"perm": "0600"
+ }{% endif %}{% if barbican_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/barbican/templates/barbican-keystone-listener.json.j2 b/ansible/roles/barbican/templates/barbican-keystone-listener.json.j2
index e0f1f15618..40d896cf72 100644
--- a/ansible/roles/barbican/templates/barbican-keystone-listener.json.j2
+++ b/ansible/roles/barbican/templates/barbican-keystone-listener.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/barbican/{{ barbican_policy_file }}",
"owner": "barbican",
"perm": "0600"
+ }{% endif %}{% if barbican_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/barbican/templates/barbican-worker.json.j2 b/ansible/roles/barbican/templates/barbican-worker.json.j2
index 81a0ca7b17..1e26e0cb41 100644
--- a/ansible/roles/barbican/templates/barbican-worker.json.j2
+++ b/ansible/roles/barbican/templates/barbican-worker.json.j2
@@ -13,6 +13,13 @@
"owner": "barbican",
"perm": "0600"
}{% endif %}
+ {% if barbican_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/barbican/templates/barbican.conf.j2 b/ansible/roles/barbican/templates/barbican.conf.j2
index 4ea9f53b46..f7085f155e 100644
--- a/ansible/roles/barbican/templates/barbican.conf.j2
+++ b/ansible/roles/barbican/templates/barbican.conf.j2
@@ -12,10 +12,15 @@ host_href = {{ barbican_public_endpoint }}
backlog = 4096
db_auto_create = False
-sql_connection = mysql+pymysql://{{ barbican_database_user }}:{{ barbican_database_password }}@{{ barbican_database_address }}/{{ barbican_database_name }}
transport_url = {{ rpc_transport_url }}
+[database]
+connection = mysql+pymysql://{{ barbican_database_user }}:{{ barbican_database_password }}@{{ barbican_database_address }}/{{ barbican_database_name }}
+connection_recycle_time = {{ database_connection_recycle_time }}
+max_pool_size = {{ database_max_pool_size }}
+max_retries = -1
+
# ================= Secret Store Plugin ===================
[secretstore]
namespace = barbican.secretstore.plugin
@@ -65,7 +70,7 @@ auth_type = password
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -78,11 +83,18 @@ topics = {{ barbican_enabled_notification_topics | map(attribute='name') | join(
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
diff --git a/ansible/roles/bifrost/defaults/main.yml b/ansible/roles/bifrost/defaults/main.yml
index c64f939a4e..60b74d330b 100644
--- a/ansible/roles/bifrost/defaults/main.yml
+++ b/ansible/roles/bifrost/defaults/main.yml
@@ -4,10 +4,17 @@
####################
bifrost_tag: "{{ openstack_tag }}"
-bifrost_deploy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/bifrost-deploy"
+bifrost_deploy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}bifrost-deploy"
bifrost_deploy_tag: "{{ bifrost_tag }}"
bifrost_deploy_image_full: "{{ bifrost_deploy_image }}:{{ bifrost_deploy_tag }}"
bifrost_deploy_container_proxy: "{{ container_proxy }}"
bifrost_deploy_verbosity: "-vvvv"
+
+# Whether to enable the legacy ironic-inspector service
+# NOTE(wszumski): Bifrost plans to remove this option once the native in-band
+# inspection reaches feature parity. Please see:
+# https://bugs.launchpad.net/kolla/+bug/2054685 which contains links for
+# tracking the progress.
+bifrost_enable_ironic_inspector: true
diff --git a/ansible/roles/bifrost/tasks/config.yml b/ansible/roles/bifrost/tasks/config.yml
index 3ca431bfea..6a996193a0 100644
--- a/ansible/roles/bifrost/tasks/config.yml
+++ b/ansible/roles/bifrost/tasks/config.yml
@@ -17,6 +17,7 @@
- "{{ node_custom_config }}/{{ item }}.yml"
- "{{ node_custom_config }}/bifrost/{{ item }}.yml"
dest: "{{ node_config_directory }}/bifrost/{{ item }}.yml"
+ yaml_width: 131072
mode: "0660"
become: true
with_items:
diff --git a/ansible/roles/elasticsearch/tasks/check.yml b/ansible/roles/bifrost/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/elasticsearch/tasks/check.yml
rename to ansible/roles/bifrost/tasks/config_validate.yml
diff --git a/ansible/roles/bifrost/tasks/deploy-servers.yml b/ansible/roles/bifrost/tasks/deploy-servers.yml
index 7e71973393..9907bf78e9 100644
--- a/ansible/roles/bifrost/tasks/deploy-servers.yml
+++ b/ansible/roles/bifrost/tasks/deploy-servers.yml
@@ -5,7 +5,7 @@
{{ kolla_container_engine }} exec bifrost_deploy
bash -c 'export OS_CLOUD=bifrost &&
export BIFROST_INVENTORY_SOURCE=/etc/bifrost/servers.yml &&
- ansible-playbook {{ bifrost_deploy_verbosity }}}
+ ansible-playbook {{ bifrost_deploy_verbosity }}
-i /bifrost/playbooks/inventory/bifrost_inventory.py
/bifrost/playbooks/enroll-dynamic.yaml -e @/etc/bifrost/bifrost.yml'
diff --git a/ansible/roles/bifrost/tasks/reconfigure.yml b/ansible/roles/bifrost/tasks/reconfigure.yml
index d19a455fc4..63f746e714 100644
--- a/ansible/roles/bifrost/tasks/reconfigure.yml
+++ b/ansible/roles/bifrost/tasks/reconfigure.yml
@@ -1,7 +1,8 @@
---
- name: Ensuring the containers up
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item.name }}"
action: "get_container_state"
register: container_state
@@ -27,7 +28,8 @@
# just remove the container and start again
- name: Containers config strategy
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item.name }}"
action: "get_container_env"
register: container_envs
@@ -37,7 +39,8 @@
- name: Remove the containers
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
action: "remove_container"
register: remove_containers
@@ -55,7 +58,8 @@
- name: Restart containers
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
action: "restart_container"
when:
diff --git a/ansible/roles/bifrost/tasks/start.yml b/ansible/roles/bifrost/tasks/start.yml
index 53446f9df6..3c7022c80e 100644
--- a/ansible/roles/bifrost/tasks/start.yml
+++ b/ansible/roles/bifrost/tasks/start.yml
@@ -1,7 +1,7 @@
---
- name: Starting bifrost deploy container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ bifrost_deploy_image_full }}"
diff --git a/ansible/roles/bifrost/tasks/stop.yml b/ansible/roles/bifrost/tasks/stop.yml
index 8220dbf580..1c961d2432 100644
--- a/ansible/roles/bifrost/tasks/stop.yml
+++ b/ansible/roles/bifrost/tasks/stop.yml
@@ -3,6 +3,8 @@
- name: Check if bifrost_deploy container is running
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- bifrost_deploy
register: container_facts
@@ -21,7 +23,7 @@
- name: Stopping bifrost_deploy container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "bifrost_deploy"
diff --git a/ansible/roles/bifrost/templates/bifrost.yml.j2 b/ansible/roles/bifrost/templates/bifrost.yml.j2
index 07f5c3e57e..f2174cb201 100644
--- a/ansible/roles/bifrost/templates/bifrost.yml.j2
+++ b/ansible/roles/bifrost/templates/bifrost.yml.j2
@@ -33,3 +33,6 @@ generate_tls: true
# NOTE: Needs to be world-readable, writeable by root, and persistent, which
# the default /etc/bifrost is not.
tls_root: "/etc/bifrost-certs"
+
+# Whether to enable the legacy ironic-inspector service.
+enable_inspector: "{{ bifrost_enable_ironic_inspector }}"
diff --git a/ansible/roles/blazar/defaults/main.yml b/ansible/roles/blazar/defaults/main.yml
index 36d9d24a13..81e8325b99 100644
--- a/ansible/roles/blazar/defaults/main.yml
+++ b/ansible/roles/blazar/defaults/main.yml
@@ -14,11 +14,14 @@ blazar_services:
mode: "http"
external: false
port: "{{ blazar_api_port }}"
+ listen_port: "{{ blazar_api_listen_port }}"
blazar_api_external:
enabled: "{{ enable_blazar }}"
mode: "http"
external: true
- port: "{{ blazar_api_port }}"
+ external_fqdn: "{{ blazar_external_fqdn }}"
+ port: "{{ blazar_api_public_port }}"
+ listen_port: "{{ blazar_api_listen_port }}"
blazar-manager:
container_name: blazar_manager
group: blazar-manager
@@ -34,6 +37,13 @@ blazar_services:
####################
blazar_aggregate_pool_name: "freepool"
+####################
+# Config Validate
+####################
+blazar_config_validation:
+ - generator: "/blazar/etc/blazar/blazar-config-generator.conf"
+ config: "/etc/blazar/blazar.conf"
+
####################
# Database
####################
@@ -60,11 +70,11 @@ blazar_database_shard:
####################
blazar_tag: "{{ openstack_tag }}"
-blazar_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/blazar-manager"
+blazar_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}blazar-manager"
blazar_manager_tag: "{{ blazar_tag }}"
blazar_manager_image_full: "{{ blazar_manager_image }}:{{ blazar_manager_tag }}"
-blazar_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/blazar-api"
+blazar_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}blazar-api"
blazar_api_tag: "{{ blazar_tag }}"
blazar_api_image_full: "{{ blazar_api_image }}:{{ blazar_api_tag }}"
@@ -104,13 +114,13 @@ blazar_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/blazar/blazar:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/blazar' if blazar_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/blazar:/dev-mode/blazar' if blazar_dev_mode | bool else '' }}"
blazar_manager_default_volumes:
- "{{ node_config_directory }}/blazar-manager/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/blazar/blazar:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/blazar' if blazar_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/blazar:/dev-mode/blazar' if blazar_dev_mode | bool else '' }}"
blazar_extra_volumes: "{{ default_extra_volumes }}"
blazar_api_extra_volumes: "{{ blazar_extra_volumes }}"
@@ -119,8 +129,8 @@ blazar_manager_extra_volumes: "{{ blazar_extra_volumes }}"
####################
# OpenStack
####################
-blazar_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ blazar_api_port }}/v1"
-blazar_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ blazar_api_port }}/v1"
+blazar_internal_endpoint: "{{ blazar_internal_base_endpoint }}/v1"
+blazar_public_endpoint: "{{ blazar_public_base_endpoint }}/v1"
blazar_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/blazar/handlers/main.yml b/ansible/roles/blazar/handlers/main.yml
index 4c08ddaef4..324672d0fb 100644
--- a/ansible/roles/blazar/handlers/main.yml
+++ b/ansible/roles/blazar/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "blazar-api"
service: "{{ blazar_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart blazar-manager container
vars:
service_name: "blazar-manager"
service: "{{ blazar_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,5 +26,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/blazar/tasks/bootstrap.yml b/ansible/roles/blazar/tasks/bootstrap.yml
index c46a8ee993..46e51a2304 100644
--- a/ansible/roles/blazar/tasks/bootstrap.yml
+++ b/ansible/roles/blazar/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating blazar database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating blazar database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
@@ -36,6 +38,7 @@
- name: Creating blazar host aggregate
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: os_nova_host_aggregate
module_args:
auth: "{{ openstack_auth }}"
diff --git a/ansible/roles/blazar/tasks/bootstrap_service.yml b/ansible/roles/blazar/tasks/bootstrap_service.yml
index ac90c642f3..0e4b552194 100644
--- a/ansible/roles/blazar/tasks/bootstrap_service.yml
+++ b/ansible/roles/blazar/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
blazar_api: "{{ blazar_services['blazar-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_blazar"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ blazar_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[blazar_api.group][0] }}"
diff --git a/ansible/roles/blazar/tasks/check-containers.yml b/ansible/roles/blazar/tasks/check-containers.yml
index 3553a0b3f7..b7e2f7c29f 100644
--- a/ansible/roles/blazar/tasks/check-containers.yml
+++ b/ansible/roles/blazar/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check blazar containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ blazar_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/blazar/tasks/config.yml b/ansible/roles/blazar/tasks/config.yml
index 24c9e0c536..652f2c1c5f 100644
--- a/ansible/roles/blazar/tasks/config.yml
+++ b/ansible/roles/blazar/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ blazar_services }}"
+ with_dict: "{{ blazar_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -41,12 +38,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ blazar_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ blazar_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over blazar.conf
vars:
@@ -61,12 +53,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/blazar.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ blazar_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ blazar_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -76,8 +63,4 @@
become: true
when:
- blazar_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ blazar_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ blazar_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/blazar/tasks/config_validate.yml b/ansible/roles/blazar/tasks/config_validate.yml
new file mode 100644
index 0000000000..9a18ef3d5f
--- /dev/null
+++ b/ansible/roles/blazar/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ blazar_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ blazar_config_validation }}"
diff --git a/ansible/roles/blazar/tasks/precheck.yml b/ansible/roles/blazar/tasks/precheck.yml
index f47b01e52b..c77486e814 100644
--- a/ansible/roles/blazar/tasks/precheck.yml
+++ b/ansible/roles/blazar/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- blazar_api
+ check_mode: false
register: container_facts
- name: Checking free port for blazar API
diff --git a/ansible/roles/blazar/templates/blazar-api.json.j2 b/ansible/roles/blazar/templates/blazar-api.json.j2
index 02a8e07591..50fb62b38b 100644
--- a/ansible/roles/blazar/templates/blazar-api.json.j2
+++ b/ansible/roles/blazar/templates/blazar-api.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/blazar/{{ blazar_policy_file }}",
"owner": "blazar",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/blazar/templates/blazar-manager.json.j2 b/ansible/roles/blazar/templates/blazar-manager.json.j2
index 8dda3afbd4..de550dc52f 100644
--- a/ansible/roles/blazar/templates/blazar-manager.json.j2
+++ b/ansible/roles/blazar/templates/blazar-manager.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/blazar/{{ blazar_policy_file }}",
"owner": "blazar",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/blazar/templates/blazar.conf.j2 b/ansible/roles/blazar/templates/blazar.conf.j2
index 7311b66ecc..4538420804 100644
--- a/ansible/roles/blazar/templates/blazar.conf.j2
+++ b/ansible/roles/blazar/templates/blazar.conf.j2
@@ -11,6 +11,7 @@ os_admin_username = {{ blazar_keystone_user }}
os_admin_password = {{ blazar_keystone_password }}
os_admin_project_name = service
identity_service = identity
+cafile = {{ openstack_cacert }}
[api]
api_v2_controllers = oshosts,leases
@@ -32,7 +33,7 @@ service_token_roles_required = True
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -53,11 +54,18 @@ topics = {{ blazar_enabled_notification_topics | map(attribute='name') | join(',
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if blazar_policy_file is defined %}
[oslo_policy]
diff --git a/ansible/roles/ceilometer/defaults/main.yml b/ansible/roles/ceilometer/defaults/main.yml
index 7cbcd5674a..04cf4bbc32 100644
--- a/ansible/roles/ceilometer/defaults/main.yml
+++ b/ansible/roles/ceilometer/defaults/main.yml
@@ -22,7 +22,7 @@ ceilometer_services:
enabled: True
privileged: True
image: "{{ ceilometer_compute_image_full }}"
- volumes: "{{ ceilometer_compute_default_volumes + ceilometer_compute_extra_volumes }}"
+ volumes: "{{ ceilometer_compute_default_volumes + ceilometer_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ ceilometer_compute_dimensions }}"
healthcheck: "{{ ceilometer_compute_healthcheck }}"
ceilometer-ipmi:
@@ -34,25 +34,31 @@ ceilometer_services:
dimensions: "{{ ceilometer_ipmi_dimensions }}"
healthcheck: "{{ ceilometer_ipmi_healthcheck }}"
+####################
+# Config Validate
+####################
+ceilometer_config_validation:
+ - generator: "/ceilometer/etc/ceilometer/ceilometer-config-generator.conf"
+ config: "/etc/ceilometer/ceilometer.conf"
####################
# Docker
####################
ceilometer_tag: "{{ openstack_tag }}"
-ceilometer_notification_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ceilometer-notification"
+ceilometer_notification_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ceilometer-notification"
ceilometer_notification_tag: "{{ ceilometer_tag }}"
ceilometer_notification_image_full: "{{ ceilometer_notification_image }}:{{ ceilometer_notification_tag }}"
-ceilometer_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ceilometer-central"
+ceilometer_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ceilometer-central"
ceilometer_central_tag: "{{ ceilometer_tag }}"
ceilometer_central_image_full: "{{ ceilometer_central_image }}:{{ ceilometer_central_tag }}"
-ceilometer_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ceilometer-compute"
+ceilometer_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ceilometer-compute"
ceilometer_compute_tag: "{{ ceilometer_tag }}"
ceilometer_compute_image_full: "{{ ceilometer_compute_image }}:{{ ceilometer_compute_tag }}"
-ceilometer_ipmi_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ceilometer-ipmi"
+ceilometer_ipmi_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ceilometer-ipmi"
ceilometer_ipmi_tag: "{{ ceilometer_tag }}"
ceilometer_ipmi_image_full: "{{ ceilometer_ipmi_image }}:{{ ceilometer_ipmi_tag }}"
@@ -118,30 +124,30 @@ ceilometer_notification_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/ceilometer/ceilometer:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ceilometer' if ceilometer_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/ceilometer:/dev-mode/ceilometer' if ceilometer_dev_mode | bool else '' }}"
ceilometer_central_default_volumes:
- "{{ node_config_directory }}/ceilometer-central/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "ceilometer:/var/lib/ceilometer/"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/ceilometer/ceilometer:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ceilometer' if ceilometer_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/ceilometer:/dev-mode/ceilometer' if ceilometer_dev_mode | bool else '' }}"
ceilometer_compute_default_volumes:
- "{{ node_config_directory }}/ceilometer-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "ceilometer:/var/lib/ceilometer/"
- "kolla_logs:/var/log/kolla/"
- "{{ ceilometer_libvirt_volume }}:/var/lib/libvirt"
- - "{{ kolla_dev_repos_directory ~ '/ceilometer/ceilometer:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ceilometer' if ceilometer_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/ceilometer:/dev-mode/ceilometer' if ceilometer_dev_mode | bool else '' }}"
ceilometer_ipmi_default_volumes:
- "{{ node_config_directory }}/ceilometer-ipmi/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "ceilometer:/var/lib/ceilometer/"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/ceilometer/ceilometer:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ceilometer' if ceilometer_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/ceilometer:/dev-mode/ceilometer' if ceilometer_dev_mode | bool else '' }}"
ceilometer_extra_volumes: "{{ default_extra_volumes }}"
ceilometer_notification_extra_volumes: "{{ ceilometer_extra_volumes }}"
diff --git a/ansible/roles/ceilometer/handlers/main.yml b/ansible/roles/ceilometer/handlers/main.yml
index a8cff1655a..6d3ae24350 100644
--- a/ansible/roles/ceilometer/handlers/main.yml
+++ b/ansible/roles/ceilometer/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "ceilometer-notification"
service: "{{ ceilometer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ceilometer-central container
vars:
service_name: "ceilometer-central"
service: "{{ ceilometer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ceilometer-compute container
vars:
service_name: "ceilometer-compute"
service: "{{ ceilometer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -45,15 +41,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ceilometer-ipmi container
vars:
service_name: "ceilometer-ipmi"
service: "{{ ceilometer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -61,5 +55,3 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/ceilometer/tasks/bootstrap_service.yml b/ansible/roles/ceilometer/tasks/bootstrap_service.yml
index c5cb839201..c62b567653 100644
--- a/ansible/roles/ceilometer/tasks/bootstrap_service.yml
+++ b/ansible/roles/ceilometer/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
ceilometer_notification: "{{ ceilometer_services['ceilometer-notification'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -16,7 +16,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_ceilometer"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ ceilometer_notification.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[ceilometer_notification.group][0] }}"
diff --git a/ansible/roles/ceilometer/tasks/check-containers.yml b/ansible/roles/ceilometer/tasks/check-containers.yml
index 95829411fe..b7e2f7c29f 100644
--- a/ansible/roles/ceilometer/tasks/check-containers.yml
+++ b/ansible/roles/ceilometer/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check ceilometer containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/ceilometer/tasks/config.yml b/ansible/roles/ceilometer/tasks/config.yml
index a4b46e96fd..218cef78ad 100644
--- a/ansible/roles/ceilometer/tasks/config.yml
+++ b/ansible/roles/ceilometer/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ceilometer_services }}"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if the folder for custom meter definitions exist
stat:
@@ -42,11 +39,9 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- with_dict: "{{ ceilometer_services }}"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
when:
- should_copy_custom_meter_definitions
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- name: Copying custom meter definitions to Ceilometer
copy:
@@ -57,11 +52,7 @@
become: true
when:
- should_copy_custom_meter_definitions
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if the folder ["{{ node_custom_config }}/ceilometer/{{ ceilometer_dynamic_pollsters_local_folder }}"] for dynamic pollsters definitions exist
stat:
@@ -100,8 +91,6 @@
when:
- should_copy_dynamic_pollster_definitions
- inventory_hostname in groups['ceilometer-central']
- notify:
- - "Restart ceilometer-central container"
- name: Check if custom polling.yaml exists
stat:
@@ -118,11 +107,7 @@
become: true
when:
- ceilometer_polling_file.stat.exists
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Set ceilometer polling file's path
set_fact:
@@ -145,11 +130,7 @@
become: true
when:
- ceilometer_gnocchi_resources_file.stat.exists
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Set ceilometer gnocchi_resources file's path
set_fact:
@@ -186,12 +167,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over ceilometer.conf
vars:
@@ -206,12 +182,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/ceilometer.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Check custom event_definitions.yaml exists
stat:
@@ -231,10 +202,7 @@
register: ceilometer_event_definitions_overwriting
when:
- ceilometer_event_definitions_file.stat.exists
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart ceilometer-notification container
+ - service | service_enabled_and_mapped_to_host
- name: Copying over event_definitions.yaml for notification service
vars:
@@ -246,49 +214,21 @@
become: true
register: ceilometer_event_definitions
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- not ceilometer_event_definitions_file.stat.exists
- notify:
- - Restart ceilometer-notification container
-
-- name: Check custom event_pipeline.yaml exists
- stat:
- path: "{{ node_custom_config }}/ceilometer/event_pipeline.yaml"
- delegate_to: localhost
- register: ceilometer_event_pipeline_file
- name: Copying over event_pipeline.yaml
vars:
service: "{{ ceilometer_services['ceilometer-notification'] }}"
- copy:
- src: "{{ node_custom_config }}/ceilometer/event_pipeline.yaml"
- dest: "{{ node_config_directory }}/ceilometer-notification/event_pipeline.yaml"
- force: True
- mode: "0660"
- become: true
- register: ceilometer_event_pipeline_overwriting
- when:
- - ceilometer_event_pipeline_file.stat.exists
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart ceilometer-notification container
-
-- name: Copying over event_pipeline.yaml for notification service
- vars:
- service: "{{ ceilometer_services['ceilometer-notification'] }}"
- template:
- src: "event_pipeline.yaml.j2"
+ merge_yaml:
+ sources:
+ - "{{ role_path }}/templates/event_pipeline.yaml.j2"
+ - "{{ node_custom_config }}/ceilometer/event_pipeline.yaml"
+ - "{{ node_custom_config }}/ceilometer/{{ inventory_hostname }}/event_pipeline.yaml"
dest: "{{ node_config_directory }}/ceilometer-notification/event_pipeline.yaml"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- - not ceilometer_event_pipeline_file.stat.exists
- notify:
- - Restart ceilometer-notification container
+ when: service | service_enabled_and_mapped_to_host
- name: Check custom pipeline.yaml exists
stat:
@@ -296,13 +236,13 @@
delegate_to: localhost
register: ceilometer_pipeline_file
-- name: Copying over pipeline.yaml
+- name: Copying over custom pipeline.yaml file
vars:
services_require_pipeline:
- ceilometer-compute
- ceilometer-central
- ceilometer-notification
- copy:
+ template:
src: "{{ node_custom_config }}/ceilometer/pipeline.yaml"
dest: "{{ node_config_directory }}/{{ item.key }}/pipeline.yaml"
force: True
@@ -311,12 +251,8 @@
register: ceilometer_pipeline_overwriting
when:
- ceilometer_pipeline_file.stat.exists
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- item.key in services_require_pipeline
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over pipeline.yaml file
vars:
@@ -330,13 +266,9 @@
mode: "0660"
become: true
when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- item.key in services_require_pipeline
- not ceilometer_pipeline_file.stat.exists
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying VMware vCenter CA file
become: true
@@ -349,10 +281,7 @@
when:
- nova_compute_virt_type == "vmware"
- not vmware_vcenter_insecure | bool
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart ceilometer-compute container
+ - service | service_enabled_and_mapped_to_host
- name: Copying over existing policy file
template:
@@ -362,8 +291,4 @@
become: true
when:
- ceilometer_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ceilometer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ceilometer_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/ceilometer/tasks/config_validate.yml b/ansible/roles/ceilometer/tasks/config_validate.yml
new file mode 100644
index 0000000000..ef646be6b3
--- /dev/null
+++ b/ansible/roles/ceilometer/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ ceilometer_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ ceilometer_config_validation }}"
diff --git a/ansible/roles/ceilometer/tasks/precheck.yml b/ansible/roles/ceilometer/tasks/precheck.yml
index 96240ca94d..4f5580f135 100644
--- a/ansible/roles/ceilometer/tasks/precheck.yml
+++ b/ansible/roles/ceilometer/tasks/precheck.yml
@@ -5,11 +5,10 @@
service_precheck_services: "{{ ceilometer_services }}"
service_name: "{{ project_name }}"
-- name: Checking gnocchi backend for ceilometer
- fail:
- msg: "gnocchi is required but not enabled"
+- name: Checking Ceilometer publishers
+ assert:
+ that:
+ - not (enable_ceilometer | bool) or enable_gnocchi | bool or enable_ceilometer_prometheus_pushgateway | bool
+ msg: "At least one Ceilometer publisher must be enabled"
run_once: True
changed_when: false
- when:
- - enable_ceilometer | bool
- - not (enable_gnocchi | bool or enable_ceilometer_prometheus_pushgateway | bool)
diff --git a/ansible/roles/ceilometer/tasks/register.yml b/ansible/roles/ceilometer/tasks/register.yml
index de5a675022..d4f0961b83 100644
--- a/ansible/roles/ceilometer/tasks/register.yml
+++ b/ansible/roles/ceilometer/tasks/register.yml
@@ -8,7 +8,8 @@
- name: Associate the ResellerAdmin role and ceilometer user
become: true
kolla_toolbox:
- module_name: "os_user_role"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.role_assignment
module_args:
project: "service"
user: "{{ ceilometer_keystone_user }}"
diff --git a/ansible/roles/ceilometer/templates/ceilometer-central.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-central.json.j2
index 5912248190..dbb2e2dc08 100644
--- a/ansible/roles/ceilometer/templates/ceilometer-central.json.j2
+++ b/ansible/roles/ceilometer/templates/ceilometer-central.json.j2
@@ -42,7 +42,13 @@
"dest": "/etc/ceilometer/pipeline.yaml",
"owner": "ceilometer",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/ceilometer/templates/ceilometer-compute.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-compute.json.j2
index 4cd4f45c01..8b00500eed 100644
--- a/ansible/roles/ceilometer/templates/ceilometer-compute.json.j2
+++ b/ansible/roles/ceilometer/templates/ceilometer-compute.json.j2
@@ -42,6 +42,12 @@
"dest": "/etc/ceilometer/vmware_ca",
"owner": "ceilometer",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/ceilometer/templates/ceilometer-ipmi.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-ipmi.json.j2
index 5c44f8c79e..fca9dbc481 100644
--- a/ansible/roles/ceilometer/templates/ceilometer-ipmi.json.j2
+++ b/ansible/roles/ceilometer/templates/ceilometer-ipmi.json.j2
@@ -30,6 +30,12 @@
"dest": "/etc/ceilometer/meters.d",
"owner": "ceilometer",
"perm": "0700"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/ceilometer/templates/ceilometer-notification.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-notification.json.j2
index a7b2492a55..828fcfbe6a 100644
--- a/ansible/roles/ceilometer/templates/ceilometer-notification.json.j2
+++ b/ansible/roles/ceilometer/templates/ceilometer-notification.json.j2
@@ -42,6 +42,12 @@
"dest": "/etc/ceilometer/{{ ceilometer_policy_file }}",
"owner": "ceilometer",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/ceilometer/templates/ceilometer.conf.j2 b/ansible/roles/ceilometer/templates/ceilometer.conf.j2
index a02ad6ed14..309c5379b7 100644
--- a/ansible/roles/ceilometer/templates/ceilometer.conf.j2
+++ b/ansible/roles/ceilometer/templates/ceilometer.conf.j2
@@ -35,11 +35,18 @@ ca_file = /etc/ceilometer/vmware_ca
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if ceilometer_policy_file is defined %}
[oslo_policy]
diff --git a/ansible/roles/ceilometer/templates/event_definitions.yaml.j2 b/ansible/roles/ceilometer/templates/event_definitions.yaml.j2
index d87e1dca86..051afb7a7b 100644
--- a/ansible/roles/ceilometer/templates/event_definitions.yaml.j2
+++ b/ansible/roles/ceilometer/templates/event_definitions.yaml.j2
@@ -144,31 +144,6 @@
fields: ['_context_trustor_user_id', '_context_user_id']
resource_id:
fields: payload.stack_identity
-- event_type: sahara.cluster.*
- traits: &sahara_crud
- project_id:
- fields: payload.project_id
- user_id:
- fields: _context_user_id
- resource_id:
- fields: payload.cluster_id
-- event_type: sahara.cluster.health
- traits: &sahara_health
- <<: *sahara_crud
- verification_id:
- fields: payload.verification_id
- health_check_status:
- fields: payload.health_check_status
- health_check_name:
- fields: payload.health_check_name
- health_check_description:
- fields: payload.health_check_description
- created_at:
- type: datetime
- fields: payload.created_at
- updated_at:
- type: datetime
- fields: payload.updated_at
- event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*',
'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*']
traits: &identity_crud
diff --git a/ansible/roles/ceph-rgw/defaults/main.yml b/ansible/roles/ceph-rgw/defaults/main.yml
index dfa53a9106..8d916f22bf 100644
--- a/ansible/roles/ceph-rgw/defaults/main.yml
+++ b/ansible/roles/ceph-rgw/defaults/main.yml
@@ -16,7 +16,8 @@ ceph_rgw_services:
enabled: "{{ enable_ceph_rgw_loadbalancer | bool }}"
mode: "http"
external: true
- port: "{{ ceph_rgw_port }}"
+ external_fqdn: "{{ ceph_rgw_external_fqdn }}"
+ port: "{{ ceph_rgw_public_port }}"
custom_member_list: "{{ ceph_rgw_haproxy_members }}"
####################
@@ -59,8 +60,8 @@ ceph_rgw_swift_account_in_url: false
ceph_rgw_endpoint_path: "{{ '/' if ceph_rgw_swift_compatibility | bool else '/swift/' }}v1{% if ceph_rgw_swift_account_in_url | bool %}/AUTH_%(project_id)s{% endif %}"
-ceph_rgw_internal_endpoint: "{{ internal_protocol }}://{{ ceph_rgw_internal_fqdn | put_address_in_context('url') }}:{{ ceph_rgw_port }}{{ ceph_rgw_endpoint_path }}"
-ceph_rgw_public_endpoint: "{{ public_protocol }}://{{ ceph_rgw_external_fqdn | put_address_in_context('url') }}:{{ ceph_rgw_port }}{{ ceph_rgw_endpoint_path }}"
+ceph_rgw_internal_endpoint: "{{ ceph_rgw_internal_base_endpoint + ceph_rgw_endpoint_path }}"
+ceph_rgw_public_endpoint: "{{ ceph_rgw_public_base_endpoint + ceph_rgw_endpoint_path }}"
ceph_rgw_keystone_user: "ceph_rgw"
diff --git a/ansible/roles/freezer/tasks/check.yml b/ansible/roles/ceph-rgw/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/freezer/tasks/check.yml
rename to ansible/roles/ceph-rgw/tasks/config_validate.yml
diff --git a/ansible/roles/certificates/tasks/generate-backend.yml b/ansible/roles/certificates/tasks/generate-backend.yml
index edb7789134..8b9b0600cf 100644
--- a/ansible/roles/certificates/tasks/generate-backend.yml
+++ b/ansible/roles/certificates/tasks/generate-backend.yml
@@ -77,3 +77,17 @@
dest: "{{ kolla_certificates_dir }}/rabbitmq-key.pem"
when:
- rabbitmq_enable_tls | bool
+
+- name: Copy backend TLS certificate and key for Mariadb
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: "0660"
+ remote_src: true
+ with_items:
+ - src: "{{ kolla_tls_backend_cert }}"
+ dest: "{{ kolla_certificates_dir }}/mariadb-cert.pem"
+ - src: "{{ kolla_tls_backend_key }}"
+ dest: "{{ kolla_certificates_dir }}/mariadb-key.pem"
+ when:
+ - database_enable_tls_backend | bool
diff --git a/ansible/roles/certificates/tasks/generate.yml b/ansible/roles/certificates/tasks/generate.yml
index b38f8ab41f..ec24316efa 100644
--- a/ansible/roles/certificates/tasks/generate.yml
+++ b/ansible/roles/certificates/tasks/generate.yml
@@ -59,16 +59,36 @@
path: "{{ external_dir }}/external.key"
mode: "0660"
state: file
+ when:
+ - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == '' or database_enable_tls_internal | bool
+ - kolla_enable_tls_external | bool or database_enable_tls_internal | bool
- - name: Creating external Server PEM File
- assemble:
- regexp: \.(crt|key)$
- src: "{{ external_dir }}"
- dest: "{{ kolla_external_fqdn_cert }}"
- mode: "0660"
+- name: Creating external Server PEM File
+ assemble:
+ regexp: \.(crt|key)$
+ src: "{{ external_dir }}"
+ dest: "{{ kolla_external_fqdn_cert }}"
+ mode: "0660"
when:
+ - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == ''
- kolla_enable_tls_external | bool
+- block:
+ - name: Copy Certificate for ProxySQL
+ copy:
+ src: "{{ external_dir }}/external.crt"
+ dest: "{{ kolla_certificates_dir }}/proxysql-cert.pem"
+ mode: "0660"
+
+ - name: Copy Key for ProxySQL
+ copy:
+ src: "{{ external_dir }}/external.key"
+ dest: "{{ kolla_certificates_dir }}/proxysql-key.pem"
+ mode: "0660"
+ when:
+ - database_enable_tls_internal | bool
+ - kolla_same_external_internal_vip | bool
+
- block:
- name: Copy the external PEM file to be the internal when internal + external are same network
copy:
@@ -77,6 +97,7 @@
remote_src: yes
mode: "0660"
when:
+ - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == ''
- kolla_enable_tls_external | bool
- kolla_enable_tls_internal | bool
- kolla_same_external_internal_vip | bool
@@ -129,13 +150,34 @@
path: "{{ internal_dir }}/internal.key"
mode: "0660"
state: file
+ when:
+ - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' or database_enable_tls_internal | bool
+ - kolla_enable_tls_internal | bool or database_enable_tls_internal | bool
+ - not kolla_same_external_internal_vip | bool
- - name: Creating internal Server PEM File
- assemble:
- regexp: \.(crt|key)$
- src: "{{ internal_dir }}"
- dest: "{{ kolla_internal_fqdn_cert }}"
- mode: "0660"
+- name: Creating internal Server PEM File
+ assemble:
+ regexp: \.(crt|key)$
+ src: "{{ internal_dir }}"
+ dest: "{{ kolla_internal_fqdn_cert }}"
+ mode: "0660"
when:
+ - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == ''
- kolla_enable_tls_internal | bool
- not kolla_same_external_internal_vip | bool
+
+- block:
+ - name: Copy Certificate for ProxySQL
+ copy:
+ src: "{{ internal_dir }}/internal.crt"
+ dest: "{{ kolla_certificates_dir }}/proxysql-cert.pem"
+ mode: "0660"
+
+ - name: Copy Key for ProxySQL
+ copy:
+ src: "{{ internal_dir }}/internal.key"
+ dest: "{{ kolla_certificates_dir }}/proxysql-key.pem"
+ mode: "0660"
+ when:
+ - database_enable_tls_internal | bool
+ - not kolla_same_external_internal_vip | bool
diff --git a/ansible/roles/certificates/tasks/main.yml b/ansible/roles/certificates/tasks/main.yml
index eccd6d668d..e0aa890982 100644
--- a/ansible/roles/certificates/tasks/main.yml
+++ b/ansible/roles/certificates/tasks/main.yml
@@ -3,6 +3,6 @@
- include_tasks: generate.yml
- include_tasks: generate-backend.yml
when:
- - kolla_enable_tls_backend | bool or rabbitmq_enable_tls | bool
+ - kolla_enable_tls_backend | bool or rabbitmq_enable_tls | bool or database_enable_tls_backend | bool
- include_tasks: generate-libvirt.yml
when: certificates_generate_libvirt | bool
diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml
index d7d52cc106..6e5d30dad0 100644
--- a/ansible/roles/cinder/defaults/main.yml
+++ b/ansible/roles/cinder/defaults/main.yml
@@ -20,7 +20,8 @@ cinder_services:
enabled: "{{ enable_cinder }}"
mode: "http"
external: true
- port: "{{ cinder_api_port }}"
+ external_fqdn: "{{ cinder_external_fqdn }}"
+ port: "{{ cinder_api_public_port }}"
listen_port: "{{ cinder_api_listen_port }}"
tls_backend: "{{ cinder_enable_tls_backend }}"
cinder-scheduler:
@@ -39,7 +40,7 @@ cinder_services:
privileged: True
ipc_mode: "host"
tmpfs: "{{ cinder_volume_tmpfs }}"
- volumes: "{{ cinder_volume_default_volumes + cinder_volume_extra_volumes }}"
+ volumes: "{{ cinder_volume_default_volumes + cinder_volume_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ cinder_volume_dimensions }}"
healthcheck: "{{ cinder_volume_healthcheck }}"
cinder-backup:
@@ -48,10 +49,17 @@ cinder_services:
enabled: "{{ enable_cinder_backup | bool }}"
image: "{{ cinder_backup_image_full }}"
privileged: True
- volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes }}"
+ volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ cinder_backup_dimensions }}"
healthcheck: "{{ cinder_backup_healthcheck }}"
+####################
+# Config Validate
+####################
+cinder_config_validation:
+ - generator: "/cinder/tools/config/cinder-config-generator.conf"
+ config: "/etc/cinder/cinder.conf"
+
####################
# Database
####################
@@ -78,19 +86,19 @@ cinder_database_shard:
####################
cinder_tag: "{{ openstack_tag }}"
-cinder_volume_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cinder-volume"
+cinder_volume_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cinder-volume"
cinder_volume_tag: "{{ cinder_tag }}"
cinder_volume_image_full: "{{ cinder_volume_image }}:{{ cinder_volume_tag }}"
-cinder_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cinder-scheduler"
+cinder_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cinder-scheduler"
cinder_scheduler_tag: "{{ cinder_tag }}"
cinder_scheduler_image_full: "{{ cinder_scheduler_image }}:{{ cinder_scheduler_tag }}"
-cinder_backup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cinder-backup"
+cinder_backup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cinder-backup"
cinder_backup_tag: "{{ cinder_tag }}"
cinder_backup_image_full: "{{ cinder_backup_image }}:{{ cinder_backup_tag }}"
-cinder_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cinder-api"
+cinder_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cinder-api"
cinder_api_tag: "{{ cinder_tag }}"
cinder_api_image_full: "{{ cinder_api_image }}:{{ cinder_api_tag }}"
@@ -156,36 +164,36 @@ cinder_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/cinder/cinder:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/cinder' if cinder_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/cinder:/dev-mode/cinder' if cinder_dev_mode | bool else '' }}"
cinder_backup_default_volumes:
- "{{ node_config_directory }}/cinder-backup/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/dev/:/dev/"
- "/lib/modules:/lib/modules:ro"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "cinder:/var/lib/cinder"
- "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/cinder/cinder:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/cinder' if cinder_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/cinder:/dev-mode/cinder' if cinder_dev_mode | bool else '' }}"
cinder_scheduler_default_volumes:
- "{{ node_config_directory }}/cinder-scheduler/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/cinder/cinder:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/cinder' if cinder_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/cinder:/dev-mode/cinder' if cinder_dev_mode | bool else '' }}"
cinder_volume_default_volumes:
- "{{ node_config_directory }}/cinder-volume/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/dev/:/dev/"
- "/lib/modules:/lib/modules:ro"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "cinder:/var/lib/cinder"
- "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "{% if enable_cinder_backend_lvm | bool and cinder_target_helper == 'lioadm' %}target_config:/etc/target{% endif %}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/cinder/cinder:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/cinder' if cinder_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/cinder:/dev-mode/cinder' if cinder_dev_mode | bool else '' }}"
cinder_extra_volumes: "{{ default_extra_volumes }}"
cinder_api_extra_volumes: "{{ cinder_extra_volumes }}"
@@ -202,9 +210,6 @@ cinder_enable_conversion_tmpfs: false
####################
# OpenStack
####################
-cinder_internal_base_endpoint: "{{ internal_protocol }}://{{ cinder_internal_fqdn | put_address_in_context('url') }}:{{ cinder_api_port }}"
-cinder_public_base_endpoint: "{{ public_protocol }}://{{ cinder_external_fqdn | put_address_in_context('url') }}:{{ cinder_api_port }}"
-
cinder_v3_internal_endpoint: "{{ cinder_internal_base_endpoint }}/v3/%(tenant_id)s"
cinder_v3_public_endpoint: "{{ cinder_public_base_endpoint }}/v3/%(tenant_id)s"
@@ -227,14 +232,10 @@ cinder_api_workers: "{{ openstack_service_workers }}"
# Cinder
####################
cinder_backends:
- - name: "{{ cinder_backend_ceph_name }}"
- enabled: "{{ cinder_backend_ceph | bool }}"
- name: "{{ cinder_backend_lvm_name }}"
enabled: "{{ enable_cinder_backend_lvm | bool }}"
- name: "{{ cinder_backend_nfs_name }}"
enabled: "{{ enable_cinder_backend_nfs | bool }}"
- - name: "{{ cinder_backend_hnas_nfs_name }}"
- enabled: "{{ enable_cinder_backend_hnas_nfs | bool }}"
- name: "{{ cinder_backend_vmwarevc_vmdk_name }}"
enabled: "{{ cinder_backend_vmwarevc_vmdk | bool }}"
- name: "{{ cinder_backend_vmware_vstorage_object_name }}"
@@ -245,20 +246,35 @@ cinder_backends:
enabled: "{{ enable_cinder_backend_pure_iscsi | bool }}"
- name: "{{ cinder_backend_pure_fc_name }}"
enabled: "{{ enable_cinder_backend_pure_fc | bool }}"
+ - name: "{{ cinder_backend_pure_roce_name }}"
+ enabled: "{{ enable_cinder_backend_pure_roce | bool }}"
+ - name: "{{ cinder_backend_pure_nvme_tcp_name }}"
+ enabled: "{{ enable_cinder_backend_pure_nvme_tcp | bool }}"
cinder_backend_ceph_name: "rbd-1"
cinder_backend_lvm_name: "lvm-1"
cinder_backend_nfs_name: "nfs-1"
-cinder_backend_hnas_nfs_name: "hnas-nfs"
cinder_backend_vmwarevc_vmdk_name: "vmwarevc-vmdk"
cinder_backend_vmware_vstorage_object_name: "vmware-vstorage-object"
cinder_backend_quobyte_name: "QuobyteHD"
cinder_backend_pure_iscsi_name: "Pure-FlashArray-iscsi"
cinder_backend_pure_fc_name: "Pure-FlashArray-fc"
+cinder_backend_pure_roce_name: "Pure-FlashArray-roce"
+cinder_backend_pure_nvme_tcp_name: "Pure-FlashArray-nvme-tcp"
+
+cinder_ceph_backends:
+ - name: "{{ cinder_backend_ceph_name }}"
+ cluster: "{{ ceph_cluster }}"
+ user: "{{ ceph_cinder_user }}"
+ pool: "{{ ceph_cinder_pool_name }}"
+ enabled: "{{ cinder_backend_ceph | bool }}"
+
+cinder_backup_backend_ceph_name: "rbd-1"
+cinder_backup_ceph_backend: "{{ cinder_ceph_backends | selectattr('name', 'equalto', cinder_backup_backend_ceph_name) | list | first | combine({'pool': ceph_cinder_backup_pool_name, 'user': ceph_cinder_backup_user }) }}"
skip_cinder_backend_check: False
-cinder_enabled_backends: "{{ cinder_backends | selectattr('enabled', 'equalto', true) | list }}"
+cinder_enabled_backends: "{{ cinder_backends | selectattr('enabled', 'equalto', true) | list + cinder_ceph_backends | selectattr('enabled', 'equalto', true) | list }}"
####################
# Notification
@@ -266,21 +282,9 @@ cinder_enabled_backends: "{{ cinder_backends | selectattr('enabled', 'equalto',
cinder_notification_topics:
- name: notifications
enabled: "{{ enable_ceilometer | bool }}"
- - name: vitrage_notifications
- enabled: "{{ enable_vitrage | bool }}"
cinder_enabled_notification_topics: "{{ cinder_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
-##################################
-# Hitachi NAS Platform NFS drivers
-##################################
-# nfs
-hnas_nfs_backend: "hnas_nfs_backend"
-hnas_nfs_username:
-hnas_nfs_mgmt_ip0:
-hnas_nfs_svc0_volume_type:
-hnas_nfs_svc0_hdp:
-
#########################
# Quobyte Storage Driver
#########################
@@ -296,6 +300,14 @@ pure_fc_backend: "pure_fc_backend"
pure_api_token:
pure_san_ip:
+################################
+# Cinder Backup S3
+################################
+cinder_backup_s3_url: "{{ s3_url }}"
+cinder_backup_s3_bucket: "{{ s3_bucket }}"
+cinder_backup_s3_access_key: "{{ s3_access_key }}"
+cinder_backup_s3_secret_key: "{{ s3_secret_key }}"
+
####################
# Kolla
####################
@@ -321,7 +333,19 @@ cinder_ks_users:
password: "{{ cinder_keystone_password }}"
role: "admin"
+cinder_ks_user_roles:
+ - project: "service"
+ user: "{{ cinder_keystone_user }}"
+ role: "service"
+
####################
# TLS
####################
cinder_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+cinder_copy_certs: "{{ kolla_copy_ca_into_containers | bool or cinder_enable_tls_backend | bool }}"
+
+############
+# Clustering
+############
+cinder_cluster_name: ""
+cinder_cluster_skip_precheck: false
diff --git a/ansible/roles/cinder/handlers/main.yml b/ansible/roles/cinder/handlers/main.yml
index 59a08ca8d0..51583bbcc4 100644
--- a/ansible/roles/cinder/handlers/main.yml
+++ b/ansible/roles/cinder/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "cinder-api"
service: "{{ cinder_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart cinder-scheduler container
vars:
service_name: "cinder-scheduler"
service: "{{ cinder_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart cinder-volume container
vars:
service_name: "cinder-volume"
service: "{{ cinder_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -47,15 +43,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart cinder-backup container
vars:
service_name: "cinder-backup"
service: "{{ cinder_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -64,8 +58,6 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
# NOTE(mgoddard): After upgrading cinder, services will have an RPC version cap
# in place. We need to restart all services in order to allow them to use the
diff --git a/ansible/roles/cinder/tasks/bootstrap.yml b/ansible/roles/cinder/tasks/bootstrap.yml
index 25f2e37338..36b8bfe27e 100644
--- a/ansible/roles/cinder/tasks/bootstrap.yml
+++ b/ansible/roles/cinder/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Cinder database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Cinder database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/cinder/tasks/bootstrap_service.yml b/ansible/roles/cinder/tasks/bootstrap_service.yml
index 3ee985c7f1..a203f7d5b1 100644
--- a/ansible/roles/cinder/tasks/bootstrap_service.yml
+++ b/ansible/roles/cinder/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
cinder_api: "{{ cinder_services['cinder-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_cinder"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ cinder_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[cinder_api.group][0] }}"
diff --git a/ansible/roles/cinder/tasks/check-containers.yml b/ansible/roles/cinder/tasks/check-containers.yml
index 3f71fa8f49..b7e2f7c29f 100644
--- a/ansible/roles/cinder/tasks/check-containers.yml
+++ b/ansible/roles/cinder/tasks/check-containers.yml
@@ -1,20 +1,3 @@
---
-- name: Check cinder containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- tmpfs: "{{ item.value.tmpfs | default(omit) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- ipc_mode: "{{ item.value.ipc_mode | default('') }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cinder_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/cinder/tasks/config.yml b/ansible/roles/cinder/tasks/config.yml
index 8e430755ba..5e7485a032 100644
--- a/ansible/roles/cinder/tasks/config.yml
+++ b/ansible/roles/cinder/tasks/config.yml
@@ -7,10 +7,13 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
+ with_dict: "{{ cinder_services | select_services_enabled_and_mapped_to_host }}"
+
+- include_tasks: external_huawei.yml
when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cinder_services }}"
+ - cinder_backend_huawei | bool
+ - cinder_backend_huawei_xml_files | length > 0
+ - inventory_hostname in groups['cinder-volume']
- include_tasks: external_ceph.yml
when:
@@ -39,7 +42,7 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or cinder_enable_tls_backend | bool
+ - cinder_copy_certs
- name: Copying over config.json files for services
template:
@@ -47,12 +50,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cinder_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ cinder_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over cinder-wsgi.conf
vars:
@@ -66,11 +64,7 @@
- "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/cinder-wsgi.conf"
- "{{ node_custom_config }}/cinder/cinder-wsgi.conf"
- "cinder-wsgi.conf.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart cinder-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over cinder.conf
vars:
@@ -85,12 +79,18 @@
dest: "{{ node_config_directory }}/{{ item.key }}/cinder.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ cinder_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ cinder_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Generating 'hostnqn' file for cinder_volume
+ vars:
+ service: "{{ cinder_services['cinder-volume'] }}"
+ hostnqn: "nqn.2014-08.org.nvmexpress:uuid:{{ ansible_facts.hostname | to_uuid }}"
+ template:
+ src: "templates/hostnqn.j2"
+ dest: "{{ node_config_directory }}/cinder-volume/hostnqn"
+ mode: "0660"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over existing policy file
become: true
@@ -99,14 +99,12 @@
dest: "{{ node_config_directory }}/{{ item.key }}/{{ cinder_policy_file }}"
mode: "0660"
when:
- - item.value.enabled | bool
- cinder_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ cinder_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ cinder_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over nfs_shares files for cinder_volume
+ vars:
+ service: "{{ cinder_services['cinder-volume'] }}"
become: true
template:
src: "{{ item }}"
@@ -122,7 +120,5 @@
- "{{ node_custom_config }}/cinder/nfs_shares"
- "{{ node_custom_config }}/cinder/cinder-volume/nfs_shares"
- "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/nfs_shares"
- skip: "{{ not enable_cinder_backend_nfs | bool and not enable_cinder_backend_hnas_nfs | bool }}"
- when: inventory_hostname in groups['cinder-volume']
- notify:
- - Restart cinder-volume container
+ skip: "{{ not enable_cinder_backend_nfs | bool }}"
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/cinder/tasks/config_validate.yml b/ansible/roles/cinder/tasks/config_validate.yml
new file mode 100644
index 0000000000..1c9b602455
--- /dev/null
+++ b/ansible/roles/cinder/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ cinder_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ cinder_config_validation }}"
diff --git a/ansible/roles/cinder/tasks/external_ceph.yml b/ansible/roles/cinder/tasks/external_ceph.yml
index c4da309d28..cf9c34fd13 100644
--- a/ansible/roles/cinder/tasks/external_ceph.yml
+++ b/ansible/roles/cinder/tasks/external_ceph.yml
@@ -1,53 +1,70 @@
---
-- name: Copying over ceph.conf for Cinder
+- name: Ensuring cinder service ceph config subdirs exists
+ vars:
+ service: "{{ cinder_services[item] }}"
+ file:
+ path: "{{ node_config_directory }}/{{ item }}/ceph"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
+ with_items:
+ - "cinder-volume"
+ - "cinder-backup"
+
+- name: Copying over multiple ceph.conf for cinder services
vars:
services_need_config:
- "cinder-volume"
- "cinder-backup"
+ service_name: "{{ item.0.key }}"
+ service: "{{ item.0.value }}"
+ cluster: "{{ item.1.cluster }}"
merge_configs:
sources:
- - "{{ node_custom_config }}/cinder/ceph.conf"
- - "{{ node_custom_config }}/cinder/{{ item.key }}/ceph.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/ceph.conf"
+ - "{{ node_custom_config }}/cinder/{{ cluster }}.conf"
+ - "{{ node_custom_config }}/cinder/{{ service_name }}/{{ cluster }}.conf"
+ dest: "{{ node_config_directory }}/{{ service_name }}/ceph/{{ cluster }}.conf"
mode: "0660"
become: true
when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- - item.key in services_need_config
- with_dict: "{{ cinder_services }}"
- notify:
- - Restart {{ item.key }} container
+ - service | service_enabled_and_mapped_to_host
+ - service_name in services_need_config
+ with_nested:
+ - "{{ cinder_services | dict2items }}"
+ - "{{ cinder_ceph_backends + [cinder_backup_ceph_backend] }}"
- name: Copy over Ceph keyring files for cinder-volume
+ vars:
+ keyring: "{{ item.cluster }}.client.{{ item.user }}.keyring"
+ service: "{{ cinder_services['cinder-volume'] }}"
template:
- src: "{{ node_custom_config }}/cinder/cinder-volume/{{ ceph_cinder_keyring }}"
- dest: "{{ node_config_directory }}/cinder-volume/"
+ src: "{{ node_custom_config }}/cinder/cinder-volume/{{ keyring }}"
+ dest: "{{ node_config_directory }}/cinder-volume/ceph/{{ keyring }}"
mode: "0660"
become: true
+ with_items: "{{ cinder_ceph_backends }}"
when:
- external_ceph_cephx_enabled | bool
- - inventory_hostname in groups['cinder-volume']
- - cinder_services['cinder-volume'].enabled | bool
- notify:
- - Restart cinder-volume container
+ - service | service_enabled_and_mapped_to_host
- name: Copy over Ceph keyring files for cinder-backup
+ vars:
+ service: "{{ cinder_services['cinder-backup'] }}"
+ keyring: "{{ item.cluster }}.client.{{ item.user }}.keyring"
template:
- src: "{{ node_custom_config }}/cinder/{{ item }}"
- dest: "{{ node_config_directory }}/cinder-backup/"
+ src: "{{ node_custom_config }}/cinder/cinder-backup/{{ keyring }}"
+ dest: "{{ node_config_directory }}/cinder-backup/ceph/{{ keyring }}"
mode: "0660"
become: true
- register: cinder_backup_ceph_keyring
with_items:
- - "cinder-backup/{{ ceph_cinder_keyring }}"
- - "cinder-backup/{{ ceph_cinder_backup_keyring }}"
+ - "{{ cinder_ceph_backends }}"
+ - "{{ cinder_backup_ceph_backend }}"
when:
- external_ceph_cephx_enabled | bool
- - inventory_hostname in groups['cinder-backup']
- - cinder_services['cinder-backup'].enabled | bool
- notify:
- - Restart cinder-backup container
+ - service | service_enabled_and_mapped_to_host
- name: Ensuring config directory has correct owner and permission
become: true
diff --git a/ansible/roles/cinder/tasks/external_huawei.yml b/ansible/roles/cinder/tasks/external_huawei.yml
new file mode 100644
index 0000000000..a4ff781095
--- /dev/null
+++ b/ansible/roles/cinder/tasks/external_huawei.yml
@@ -0,0 +1,11 @@
+---
+- name: Copying over Huawei XML files
+ copy:
+ src: "{{ node_custom_config }}/cinder/{{ item }}"
+ dest: "{{ node_config_directory }}/cinder-volume/{{ item }}"
+ mode: "0660"
+ become: true
+ with_items:
+ - "{{ cinder_backend_huawei_xml_files }}"
+ notify:
+ - Restart cinder-volume container
diff --git a/ansible/roles/cinder/tasks/precheck.yml b/ansible/roles/cinder/tasks/precheck.yml
index 0daba33867..cd1361e4c7 100644
--- a/ansible/roles/cinder/tasks/precheck.yml
+++ b/ansible/roles/cinder/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- cinder_api
+ check_mode: false
register: container_facts
- name: Checking free port for Cinder API
@@ -29,7 +32,6 @@
msg: "Please enable at least one backend when enabling Cinder"
when:
- not skip_cinder_backend_check | bool
- - not enable_cinder_backend_hnas_nfs | bool
- not enable_cinder_backend_iscsi | bool
- not enable_cinder_backend_lvm | bool
- not enable_cinder_backend_nfs | bool
@@ -39,6 +41,8 @@
- not enable_cinder_backend_quobyte | bool
- not enable_cinder_backend_pure_iscsi | bool
- not enable_cinder_backend_pure_fc | bool
+ - not enable_cinder_backend_pure_roce | bool
+ - not enable_cinder_backend_pure_nvme_tcp | bool
- name: Checking LVM volume group exists for Cinder
become: true
@@ -46,7 +50,45 @@
register: result
changed_when: false
failed_when: result is failed
+ check_mode: false
when:
- enable_cinder | bool
- enable_cinder_backend_lvm | bool
- inventory_hostname in groups['cinder-volume']
+
+- name: Check if S3 configurations are defined
+ assert:
+ that:
+ - vars[item] is defined
+ msg: "Cinder backup S3 backend is enabled, either the {{ item }} or {{ item | replace('cinder_backup_', '') }} variable must be defined."
+ with_items:
+ - cinder_backup_s3_url
+ - cinder_backup_s3_bucket
+ - cinder_backup_s3_access_key
+ - cinder_backup_s3_secret_key
+ when: cinder_backup_driver == "s3"
+
+- name: Check if cinder_cluster_name is configured for HA configurations
+ assert:
+ that:
+ - cinder_cluster_name != ""
+ msg: |
+ Multiple cinder-volume instances detected but cinder_cluster_name is not set- please see
+ https://docs.openstack.org/kolla-ansible/latest/reference/storage/cinder-guide.html#ha
+ for guidance.
+ when:
+ - not cinder_cluster_skip_precheck
+ - groups['cinder-volume'] | length > 1
+
+- name: Check if cinder_cluster_name is configured and configuration is non-HA
+ assert:
+ that:
+ - cinder_cluster_name == ""
+ msg: |
+ Single cinder-volume instance detected but cinder_cluster_name is set (cluster
+ configuration will not be applied) - please see
+ https://docs.openstack.org/kolla-ansible/latest/reference/storage/cinder-guide.html#ha
+ for guidance.
+ when:
+ - not cinder_cluster_skip_precheck
+ - groups['cinder-volume'] | length == 1
diff --git a/ansible/roles/cinder/tasks/register.yml b/ansible/roles/cinder/tasks/register.yml
index 86511bc411..d090b30d8e 100644
--- a/ansible/roles/cinder/tasks/register.yml
+++ b/ansible/roles/cinder/tasks/register.yml
@@ -5,3 +5,4 @@
service_ks_register_auth: "{{ openstack_cinder_auth }}"
service_ks_register_services: "{{ cinder_ks_services }}"
service_ks_register_users: "{{ cinder_ks_users }}"
+ service_ks_register_user_roles: "{{ cinder_ks_user_roles }}"
diff --git a/ansible/roles/cinder/tasks/reload.yml b/ansible/roles/cinder/tasks/reload.yml
index a8d6dd3289..6c9497f862 100644
--- a/ansible/roles/cinder/tasks/reload.yml
+++ b/ansible/roles/cinder/tasks/reload.yml
@@ -3,7 +3,7 @@
vars:
service: "{{ item.value }}"
become: true
- kolla_docker:
+ kolla_container:
action: "restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
diff --git a/ansible/roles/cinder/tasks/upgrade.yml b/ansible/roles/cinder/tasks/upgrade.yml
index a30a1c726b..e12f771598 100644
--- a/ansible/roles/cinder/tasks/upgrade.yml
+++ b/ansible/roles/cinder/tasks/upgrade.yml
@@ -10,6 +10,13 @@
- import_tasks: check-containers.yml
+# TODO(bbezak): Remove this task in the Dalmatian cycle.
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ openstack_cinder_auth }}"
+ service_ks_register_user_roles: "{{ cinder_ks_user_roles }}"
+
- name: Flush handlers
meta: flush_handlers
@@ -19,7 +26,7 @@
vars:
cinder_api: "{{ cinder_services['cinder-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -30,7 +37,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_cinder"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ cinder_api.volumes }}"
run_once: True
delegate_to: "{{ groups[cinder_api.group][0] }}"
diff --git a/ansible/roles/cinder/templates/cinder-api.json.j2 b/ansible/roles/cinder/templates/cinder-api.json.j2
index bd00a9a3e3..a5ae5a3e9e 100644
--- a/ansible/roles/cinder/templates/cinder-api.json.j2
+++ b/ansible/roles/cinder/templates/cinder-api.json.j2
@@ -32,7 +32,13 @@
"dest": "/etc/cinder/certs/cinder-key.pem",
"owner": "cinder",
"perm": "0600"
- }
+ }{% if cinder_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
{% endif %}],
"permissions": [
{
diff --git a/ansible/roles/cinder/templates/cinder-backup.json.j2 b/ansible/roles/cinder/templates/cinder-backup.json.j2
index c5d8dc15a9..d921a04142 100644
--- a/ansible/roles/cinder/templates/cinder-backup.json.j2
+++ b/ansible/roles/cinder/templates/cinder-backup.json.j2
@@ -14,25 +14,16 @@
"perm": "0600"
}{% endif %}{% if cinder_backend_ceph | bool %},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/ceph",
+ "dest": "/etc/ceph",
"owner": "cinder",
- "perm": "0600",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/{{ ceph_cinder_keyring }}",
- "dest": "/etc/ceph/{{ ceph_cinder_keyring }}",
- "owner": "cinder",
- "perm": "0600",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
- },
+ "perm": "0600"
+ }{% endif %}{% if cinder_copy_certs | bool %},
{
- "source": "{{ container_config_directory }}/{{ ceph_cinder_backup_keyring }}",
- "dest": "/etc/ceph/{{ ceph_cinder_backup_keyring }}",
- "owner": "cinder",
- "perm": "0600",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cinder/templates/cinder-scheduler.json.j2 b/ansible/roles/cinder/templates/cinder-scheduler.json.j2
index cd4a5124a6..e99b9080fa 100644
--- a/ansible/roles/cinder/templates/cinder-scheduler.json.j2
+++ b/ansible/roles/cinder/templates/cinder-scheduler.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/cinder/{{ cinder_policy_file }}",
"owner": "cinder",
"perm": "0600"
+ }{% endif %}{% if cinder_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cinder/templates/cinder-volume.json.j2 b/ansible/roles/cinder/templates/cinder-volume.json.j2
index 6bd55850a9..3de38cddb6 100644
--- a/ansible/roles/cinder/templates/cinder-volume.json.j2
+++ b/ansible/roles/cinder/templates/cinder-volume.json.j2
@@ -6,33 +6,44 @@
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
- },
+ },{% if cinder_backend_huawei | bool and cinder_backend_huawei_xml_files | length > 0 %}{% for file in cinder_backend_huawei_xml_files %}
{
- "source": "{{ container_config_directory }}/{{ ceph_cinder_keyring }}",
- "dest": "/etc/ceph/{{ ceph_cinder_keyring }}",
+ "source": "{{ container_config_directory }}/{{ file }}",
+ "dest": "/etc/cinder/{{ file }}",
"owner": "cinder",
"perm": "0600",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
- },
+ "optional": {{ (not cinder_backend_huawei | bool) | string | lower }}
+ },{% endfor%}{% endif %}{% if cinder_backend_ceph | bool %}
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/ceph",
+ "dest": "/etc/ceph",
"owner": "cinder",
- "perm": "0600",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
- },
+ "perm": "0600"
+ },{% endif %}
{
"source": "{{ container_config_directory }}/nfs_shares",
"dest": "/etc/cinder/nfs_shares",
"owner": "cinder",
"perm": "0600",
"optional": {{ (not enable_cinder_backend_nfs | bool) | string | lower }}
+ },
+ {
+ "source": "{{ container_config_directory }}/hostnqn",
+ "dest": "/etc/nvme/hostnqn",
+ "owner": "root",
+ "perm": "0644"
}{% if cinder_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ cinder_policy_file }}",
"dest": "/etc/cinder/{{ cinder_policy_file }}",
"owner": "cinder",
"perm": "0600"
+ }{% endif %}{% if cinder_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cinder/templates/cinder.conf.j2 b/ansible/roles/cinder/templates/cinder.conf.j2
index 565ab44376..d7efe8cdc7 100644
--- a/ansible/roles/cinder/templates/cinder.conf.j2
+++ b/ansible/roles/cinder/templates/cinder.conf.j2
@@ -20,17 +20,23 @@ glance_api_servers = {{ glance_internal_endpoint }}
glance_num_retries = {{ groups['glance-api'] | length }}
glance_ca_certificates_file = {{ openstack_cacert }}
+{% if service_name == "cinder-volume" and cinder_cluster_name != "" %}
+cluster = {{ cinder_cluster_name }}
+{% endif %}
+
{% if cinder_enabled_backends %}
+{% if service_name == 'cinder-volume' %}
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
{% endif %}
+{% endif %}
{% if service_name == "cinder-backup" and enable_cinder_backup | bool %}
{% if cinder_backup_driver == "ceph" %}
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
-backup_ceph_conf = /etc/ceph/ceph.conf
-backup_ceph_user = {{ ceph_cinder_backup_user }}
+backup_ceph_conf = /etc/ceph/{{ cinder_backup_ceph_backend['cluster'] }}.conf
+backup_ceph_user = {{ cinder_backup_ceph_backend['user'] }}
backup_ceph_chunk_size = 134217728
-backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
+backup_ceph_pool = {{ cinder_backup_ceph_backend['pool'] }}
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
@@ -43,10 +49,17 @@ backup_file_size = 327680000
{% elif enable_swift | bool and cinder_backup_driver == "swift" %}
backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
backup_swift_url = {{ swift_internal_base_endpoint }}/v1/AUTH_
+backup_swift_ca_cert_file = {{ openstack_cacert }}
backup_swift_auth = per_user
backup_swift_auth_version = 1
backup_swift_user =
backup_swift_key =
+{% elif cinder_backup_driver == "s3" %}
+backup_driver = cinder.backup.drivers.s3.S3BackupDriver
+backup_s3_endpoint_url = {{ cinder_backup_s3_url }}
+backup_s3_store_bucket = {{ cinder_backup_s3_bucket }}
+backup_s3_store_access_key = {{ cinder_backup_s3_access_key }}
+backup_s3_store_secret_key = {{ cinder_backup_s3_secret_key }}
{% endif %}
{% endif %}
@@ -65,11 +78,18 @@ topics = {{ cinder_enabled_notification_topics | map(attribute='name') | join(',
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'cinder-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
@@ -99,6 +119,10 @@ max_retries = -1
[keystone_authtoken]
service_type = volume
+# security fix, always validate service tokens
+# see: https://security.openstack.org/ossa/OSSA-2023-003.html
+# and: https://docs.openstack.org/cinder/zed/configuration/block-storage/service-token.html#troubleshooting
+service_token_roles_required = true
www_authenticate_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_internal_url }}
auth_type = password
@@ -110,7 +134,7 @@ password = {{ cinder_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -128,15 +152,24 @@ target_protocol = iscsi
{% endif %}
{% if cinder_backend_ceph | bool %}
-[{{ cinder_backend_ceph_name }}]
+{% if service_name == 'cinder-volume' %}
+{% for backend in cinder_ceph_backends %}
+[{{ backend.name }}]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
-volume_backend_name = {{ cinder_backend_ceph_name }}
-rbd_pool = {{ ceph_cinder_pool_name }}
-rbd_ceph_conf = /etc/ceph/ceph.conf
+volume_backend_name = {{ backend.name }}
+rbd_pool = {{ backend.pool }}
+rbd_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
rados_connect_timeout = 5
-rbd_user = {{ ceph_cinder_user }}
+rbd_user = {{ backend.user }}
+rbd_cluster_name = {{ backend.cluster }}
+rbd_keyring_conf = /etc/ceph/{{ backend.cluster }}.client.{{ backend.user }}.keyring
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
report_discard_supported = True
+{% if backend.availability_zone is defined %}
+backend_availability_zone = {{ backend.availability_zone }}
+{% endif %}
+{% endfor %}
+{% endif %}
{% endif %}
{% if enable_cinder_backend_nfs | bool %}
@@ -149,19 +182,6 @@ nas_secure_file_permissions = False
nas_secure_file_operations = False
{% endif %}
-{% if enable_cinder_backend_hnas_nfs | bool %}
-[{{ cinder_backend_hnas_nfs_name }}]
-volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver
-nfs_shares_config = /home/cinder/nfs_shares
-volume_backend_name = {{ hnas_nfs_backend }}
-hnas_username = {{ hnas_nfs_username }}
-hnas_password = {{ hnas_nfs_password }}
-hnas_mgmt_ip0 = {{ hnas_nfs_mgmt_ip0 }}
-
-hnas_svc0_volume_type = {{ hnas_nfs_svc0_volume_type }}
-hnas_svc0_hdp = {{ hnas_nfs_svc0_hdp }}
-{% endif %}
-
{% if cinder_backend_vmwarevc_vmdk | bool %}
[{{ cinder_backend_vmwarevc_vmdk_name }}]
volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
@@ -204,6 +224,23 @@ san_ip = {{ pure_san_ip }}
pure_api_token = {{ pure_api_token }}
{% endif %}
+{% if enable_cinder_backend_pure_nvme_tcp | bool %}
+[{{ cinder_backend_pure_nvme_tcp_name }}]
+volume_backend_name = {{ pure_nvme_tcp_backend }}
+volume_driver = cinder.volume.drivers.pure.PureNVMEDriver
+pure_nvme_transport = tcp
+san_ip = {{ pure_san_ip }}
+pure_api_token = {{ pure_api_token }}
+{% endif %}
+
+{% if enable_cinder_backend_pure_roce | bool %}
+[{{ cinder_backend_pure_roce_name }}]
+volume_backend_name = {{ pure_roce_backend }}
+volume_driver = cinder.volume.drivers.pure.PureNVMEDriver
+san_ip = {{ pure_san_ip }}
+pure_api_token = {{ pure_api_token }}
+{% endif %}
+
[privsep_entrypoint]
helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf
@@ -226,9 +263,10 @@ verify_ssl_path = {{ openstack_cacert }}
{% if cinder_coordination_backend == 'redis' %}
backend_url = {{ redis_connection_string }}
{% elif cinder_coordination_backend == 'etcd' %}
-# NOTE(yoctozepto): etcd-compatible tooz drivers do not support multiple endpoints here (verified in Stein, Train)
# NOTE(yoctozepto): we must use etcd3gw (aka etcd3+http) due to issues with alternative (etcd3) and eventlet (as used by cinder)
# see https://bugs.launchpad.net/kolla-ansible/+bug/1854932
# and https://review.opendev.org/466098 for details
-backend_url = etcd3+{{ etcd_protocol }}://{{ 'api' | kolla_address(groups['etcd'][0]) | put_address_in_context('url') }}:{{ etcd_client_port }}
+# NOTE(jan.gutter): etcd v3.4 removed the default `v3alpha` api_version. Until
+# tooz defaults to a newer version, we should explicitly specify `v3`
+backend_url = etcd3+{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ etcd_client_port }}?api_version=v3{% if openstack_cacert %}&ca_cert={{ openstack_cacert }}{% endif %}
{% endif %}
diff --git a/ansible/roles/cinder/templates/hostnqn.j2 b/ansible/roles/cinder/templates/hostnqn.j2
new file mode 100644
index 0000000000..6f10135974
--- /dev/null
+++ b/ansible/roles/cinder/templates/hostnqn.j2
@@ -0,0 +1 @@
+{{ hostnqn }}
diff --git a/ansible/roles/cloudkitty/defaults/main.yml b/ansible/roles/cloudkitty/defaults/main.yml
index df72fb115a..c462a0450c 100644
--- a/ansible/roles/cloudkitty/defaults/main.yml
+++ b/ansible/roles/cloudkitty/defaults/main.yml
@@ -14,11 +14,14 @@ cloudkitty_services:
mode: "http"
external: false
port: "{{ cloudkitty_api_port }}"
+ listen_port: "{{ cloudkitty_api_listen_port }}"
cloudkitty_api_external:
enabled: "{{ enable_cloudkitty }}"
mode: "http"
external: true
- port: "{{ cloudkitty_api_port }}"
+ external_fqdn: "{{ cloudkitty_external_fqdn }}"
+ port: "{{ cloudkitty_api_public_port }}"
+ listen_port: "{{ cloudkitty_api_listen_port }}"
cloudkitty-processor:
container_name: "cloudkitty_processor"
group: "cloudkitty-processor"
@@ -28,6 +31,12 @@ cloudkitty_services:
dimensions: "{{ cloudkitty_processor_dimensions }}"
healthcheck: "{{ cloudkitty_processor_healthcheck }}"
+####################
+# Config Validate
+####################
+cloudkitty_config_validation:
+ - generator: "/cloudkitty/etc/oslo-config-generator/cloudkitty.conf"
+ config: "/etc/cloudkitty/cloudkitty.conf"
####################
# Database
@@ -55,11 +64,11 @@ cloudkitty_database_shard:
####################
cloudkitty_tag: "{{ openstack_tag }}"
-cloudkitty_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cloudkitty-api"
+cloudkitty_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cloudkitty-api"
cloudkitty_api_tag: "{{ cloudkitty_tag }}"
cloudkitty_api_image_full: "{{ cloudkitty_api_image }}:{{ cloudkitty_api_tag }}"
-cloudkitty_processor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cloudkitty-processor"
+cloudkitty_processor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cloudkitty-processor"
cloudkitty_processor_tag: "{{ cloudkitty_tag }}"
cloudkitty_processor_image_full: "{{ cloudkitty_processor_image }}:{{ cloudkitty_processor_tag }}"
@@ -71,13 +80,13 @@ cloudkitty_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/cloudkitty/cloudkitty:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/cloudkitty' if cloudkitty_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/cloudkitty:/dev-mode/cloudkitty' if cloudkitty_dev_mode | bool else '' }}"
cloudkitty_processor_default_volumes:
- "{{ node_config_directory }}/cloudkitty-processor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/cloudkitty/cloudkitty:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/cloudkitty' if cloudkitty_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/cloudkitty:/dev-mode/cloudkitty' if cloudkitty_dev_mode | bool else '' }}"
cloudkitty_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
cloudkitty_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
@@ -112,9 +121,6 @@ cloudkitty_api_extra_volumes: "{{ cloudkitty_extra_volumes }}"
####################
# OpenStack
####################
-cloudkitty_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ cloudkitty_api_port }}"
-cloudkitty_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ cloudkitty_api_port }}"
-
cloudkitty_logging_debug: "{{ openstack_logging_debug }}"
cloudkitty_keystone_user: "cloudkitty"
@@ -145,10 +151,10 @@ cloudkitty_custom_metrics_yaml_file: "metrics.yml"
# cloudkitty_influxdb_retention_policy: "autogen"
# Set to true to use SSL for InfluxDB connections.
-cloudkitty_influxdb_use_ssl: false
+cloudkitty_influxdb_use_ssl: "{{ kolla_enable_tls_internal }}"
# Path of the CA certificate to trust for HTTPS connections.
-# cloudkitty_influxdb_cafile: "{{ openstack_cacert }}"
+cloudkitty_influxdb_cafile: "{{ openstack_cacert }}"
# Set to true to authorize insecure HTTPS connections to InfluxDB.
# This means, HTTPS connections without validating the certificate used by InfluxDB
@@ -156,34 +162,41 @@ cloudkitty_influxdb_use_ssl: false
cloudkitty_influxdb_name: "cloudkitty"
-# Set the elasticsearch index name.
+# Set the elasticsearch/opensearch index name.
cloudkitty_elasticsearch_index_name: "cloudkitty"
+cloudkitty_opensearch_index_name: "{{ cloudkitty_elasticsearch_index_name }}"
-# Set the elasticsearch host URL.
-cloudkitty_elasticsearch_url: "{{ internal_protocol }}://{{ elasticsearch_address }}:{{ elasticsearch_port }}"
+# Set the elasticsearch/opensearch host URL.
+cloudkitty_elasticsearch_url: "{{ internal_protocol }}://{{ opensearch_address }}:{{ opensearch_port }}"
+cloudkitty_opensearch_url: "{{ cloudkitty_elasticsearch_url }}"
# Path of the CA certificate to trust for HTTPS connections.
-# cloudkitty_elasticsearch_cafile: "{{ openstack_cacert }}"
+cloudkitty_elasticsearch_cafile: "{{ openstack_cacert }}"
# Set to true to authorize insecure HTTPS connections to Elasticsearch.
# This means, HTTPS connections without validating the certificate used by elasticsearch
cloudkitty_elasticsearch_insecure_connections: false
+# Path of the CA certificate to trust for HTTPS connections.
+cloudkitty_opensearch_cafile: "{{ openstack_cacert }}"
+
+# Set to true to authorize insecure HTTPS connections to OpenSearch.
+# This means, HTTPS connections without validating the certificate used by
+# OpenSearch.
+cloudkitty_opensearch_insecure_connections: false
+
####################
# Collector
####################
-# Valid options are 'gnocchi', 'monasca' or 'prometheus'. The default value is
+# Valid options are 'gnocchi' or 'prometheus'. The default value is
# 'gnocchi', which matches the default in Cloudkitty.
cloudkitty_collector_backend: "gnocchi"
-# Set Monasca interface used for keystone URL discovery.
-cloudkitty_monasca_interface: "internal"
-
# Set prometheus collector URL.
cloudkitty_prometheus_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ prometheus_port }}/api/v1"
# Path of the CA certificate to trust for HTTPS connections.
-# cloudkitty_prometheus_cafile: "{{ openstack_cacert }}"
+cloudkitty_prometheus_cafile: "{{ openstack_cacert }}"
# Set to true to authorize insecure HTTPS connections to Prometheus.
# This means, HTTPS connections without validating the certificate used by prometheus.
@@ -192,7 +205,7 @@ cloudkitty_prometheus_insecure_connections: false
####################
# Fetcher
####################
-# Valid options are 'keystone', 'source', 'gnocchi', 'monasca' or 'prometheus'.
+# Valid options are 'keystone', 'source', 'gnocchi' or 'prometheus'.
# The default value is 'keystone', which matches the default in CloudKitty.
cloudkitty_fetcher_backend: "keystone"
diff --git a/ansible/roles/cloudkitty/handlers/main.yml b/ansible/roles/cloudkitty/handlers/main.yml
index e19a1e9356..e83cbac13d 100644
--- a/ansible/roles/cloudkitty/handlers/main.yml
+++ b/ansible/roles/cloudkitty/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "cloudkitty-api"
service: "{{ cloudkitty_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart cloudkitty-processor container
vars:
service_name: "cloudkitty-processor"
service: "{{ cloudkitty_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,5 +26,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/cloudkitty/tasks/bootstrap.yml b/ansible/roles/cloudkitty/tasks/bootstrap.yml
index 41348dc49a..9e31dd5a47 100644
--- a/ansible/roles/cloudkitty/tasks/bootstrap.yml
+++ b/ansible/roles/cloudkitty/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Cloudkitty database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Cloudkitty database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
@@ -36,12 +38,16 @@
- name: Creating Cloudkitty influxdb database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: influxdb_database
module_args:
hostname: "{{ influxdb_address }}"
port: "{{ influxdb_http_port }}"
ssl: "{{ cloudkitty_influxdb_use_ssl | bool }}"
database_name: "{{ cloudkitty_influxdb_name }}"
+ # The influxdb_database module and the InfluxDB 1.x Python client don't
+ # support specifying a CA certificate file.
+ validate_certs: False
run_once: True
delegate_to: "{{ groups['cloudkitty-api'][0] }}"
when: cloudkitty_storage_backend == 'influxdb'
@@ -49,18 +55,33 @@
- name: Checking if Cloudkitty elasticsearch index exists
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ cloudkitty_elasticsearch_url }}/{{ cloudkitty_elasticsearch_index_name }}"
status_code: 200, 404
run_once: true
delegate_to: "{{ groups['cloudkitty-api'][0] }}"
- register: cloudkitty_index
+ register: cloudkitty_index_elasticsearch
when: cloudkitty_storage_backend == 'elasticsearch'
+- name: Checking if Cloudkitty opensearch index exists
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ cloudkitty_opensearch_url }}/{{ cloudkitty_opensearch_index_name }}"
+ status_code: 200, 404
+ run_once: true
+ delegate_to: "{{ groups['cloudkitty-api'][0] }}"
+ register: cloudkitty_index_opensearch
+ when: cloudkitty_storage_backend == 'opensearch'
+
- name: Creating Cloudkitty elasticsearch index
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ cloudkitty_elasticsearch_url }}/{{ cloudkitty_elasticsearch_index_name }}"
@@ -74,6 +95,25 @@
delegate_to: "{{ groups['cloudkitty-api'][0] }}"
when:
- cloudkitty_storage_backend == 'elasticsearch'
- - cloudkitty_index.get('status') != 200
+ - cloudkitty_index_elasticsearch.get('status') != 200
+
+- name: Creating Cloudkitty opensearch index
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ cloudkitty_opensearch_url }}/{{ cloudkitty_opensearch_index_name }}"
+ method: PUT
+ status_code: 200
+ return_content: yes
+ body: |
+ {}
+ body_format: json
+ run_once: True
+ delegate_to: "{{ groups['cloudkitty-api'][0] }}"
+ when:
+ - cloudkitty_storage_backend == 'opensearch'
+ - cloudkitty_index_opensearch.get('status') != 200
- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/cloudkitty/tasks/bootstrap_service.yml b/ansible/roles/cloudkitty/tasks/bootstrap_service.yml
index ed2e566e2a..17546383c7 100644
--- a/ansible/roles/cloudkitty/tasks/bootstrap_service.yml
+++ b/ansible/roles/cloudkitty/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
cloudkitty_api: "{{ cloudkitty_services['cloudkitty-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_cloudkitty"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ cloudkitty_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[cloudkitty_api.group][0] }}"
diff --git a/ansible/roles/cloudkitty/tasks/check-containers.yml b/ansible/roles/cloudkitty/tasks/check-containers.yml
index bc777d4cc9..b7e2f7c29f 100644
--- a/ansible/roles/cloudkitty/tasks/check-containers.yml
+++ b/ansible/roles/cloudkitty/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check cloudkitty containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cloudkitty_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/cloudkitty/tasks/config.yml b/ansible/roles/cloudkitty/tasks/config.yml
index 967b8cab36..7ef4809341 100644
--- a/ansible/roles/cloudkitty/tasks/config.yml
+++ b/ansible/roles/cloudkitty/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cloudkitty_services }}"
+ with_dict: "{{ cloudkitty_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -45,11 +42,7 @@
become: true
when:
- cloudkitty_custom_metrics_file.stat.exists
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cloudkitty_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ cloudkitty_services | select_services_enabled_and_mapped_to_host }}"
- name: Are we using {{ cloudkitty_custom_metrics_yaml_file }}?
set_fact:
@@ -65,12 +58,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ cloudkitty_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ cloudkitty_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over cloudkitty.conf
vars:
@@ -85,12 +73,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/cloudkitty.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cloudkitty_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ cloudkitty_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over wsgi-cloudkitty.conf
vars:
@@ -100,11 +83,7 @@
dest: "{{ node_config_directory }}/cloudkitty-api/wsgi-cloudkitty.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart cloudkitty-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over existing policy file
template:
@@ -114,8 +93,4 @@
become: true
when:
- cloudkitty_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cloudkitty_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ cloudkitty_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/cloudkitty/tasks/config_validate.yml b/ansible/roles/cloudkitty/tasks/config_validate.yml
new file mode 100644
index 0000000000..4806fd83a7
--- /dev/null
+++ b/ansible/roles/cloudkitty/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ cloudkitty_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ cloudkitty_config_validation }}"
diff --git a/ansible/roles/cloudkitty/tasks/precheck.yml b/ansible/roles/cloudkitty/tasks/precheck.yml
index 7002db1767..0892ba8e6e 100644
--- a/ansible/roles/cloudkitty/tasks/precheck.yml
+++ b/ansible/roles/cloudkitty/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- cloudkitty_api
+ check_mode: false
register: container_facts
- name: Checking free port for Cloudkitty API
diff --git a/ansible/roles/cloudkitty/templates/cloudkitty-api.json.j2 b/ansible/roles/cloudkitty/templates/cloudkitty-api.json.j2
index 29b67738c0..3ba9b72212 100644
--- a/ansible/roles/cloudkitty/templates/cloudkitty-api.json.j2
+++ b/ansible/roles/cloudkitty/templates/cloudkitty-api.json.j2
@@ -26,6 +26,12 @@
"dest": "/etc/cloudkitty/{{ cloudkitty_custom_metrics_yaml_file }}",
"owner": "cloudkitty",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cloudkitty/templates/cloudkitty-processor.json.j2 b/ansible/roles/cloudkitty/templates/cloudkitty-processor.json.j2
index 4cd1041e08..5366b917bc 100644
--- a/ansible/roles/cloudkitty/templates/cloudkitty-processor.json.j2
+++ b/ansible/roles/cloudkitty/templates/cloudkitty-processor.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/cloudkitty/{{ cloudkitty_custom_metrics_yaml_file }}",
"owner": "cloudkitty",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2 b/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
index 539331acd5..b000e3e7b7 100644
--- a/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
+++ b/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
@@ -29,7 +29,7 @@ password = {{ cloudkitty_keystone_password }}
region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -44,11 +44,18 @@ lock_path = /var/lib/cloudkitty/tmp
policy_file = {{ cloudkitty_policy_file }}
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'cloudkitty-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
[collect]
collector = {{ cloudkitty_collector_backend }}
@@ -62,15 +69,11 @@ auth_section = keystone_authtoken
region_name = {{ openstack_region_name }}
{% endif %}
-{% if cloudkitty_collector_backend == "monasca" %}
-[collector_monasca]
-monasca_service_name = monasca
-interface = {{ cloudkitty_monasca_interface }}
-{% endif %}
-
{% if cloudkitty_collector_backend == "prometheus" %}
[collector_prometheus]
prometheus_url = {{ cloudkitty_prometheus_url }}
+prometheus_user = admin
+prometheus_password = {{ prometheus_password }}
{% if cloudkitty_prometheus_cafile is defined %}
cafile = {{ cloudkitty_prometheus_cafile }}
@@ -86,6 +89,7 @@ backend = {{ cloudkitty_fetcher_backend }}
[fetcher_keystone]
keystone_version = 3
auth_section = keystone_authtoken
+cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
{% endif %}
@@ -94,6 +98,8 @@ region_name = {{ openstack_region_name }}
metric = openstack_identity_project_info
scope_attribute = id
prometheus_url = {{ cloudkitty_prometheus_url }}
+prometheus_user = admin
+prometheus_password = {{ prometheus_password }}
{% if cloudkitty_prometheus_cafile is defined %}
cafile = {{ cloudkitty_prometheus_cafile }}
@@ -148,8 +154,18 @@ host = {{ cloudkitty_elasticsearch_url }}
index_name = {{ cloudkitty_elasticsearch_index_name }}
insecure = {{ cloudkitty_elasticsearch_insecure_connections }}
-{% if cloudkitty_elasticsearch_cafile is defined %}
+{% if cloudkitty_elasticsearch_cafile | length > 0 %}
cafile = {{ cloudkitty_elasticsearch_cafile }}
{% endif %}
+{% endif %}
+{% if cloudkitty_storage_backend == 'opensearch' %}
+[storage_opensearch]
+host = {{ cloudkitty_opensearch_url }}
+index_name = {{ cloudkitty_opensearch_index_name }}
+insecure = {{ cloudkitty_opensearch_insecure_connections }}
+
+{% if cloudkitty_opensearch_cafile | length > 0 %}
+cafile = {{ cloudkitty_opensearch_cafile }}
+{% endif %}
{% endif %}
diff --git a/ansible/roles/collectd/defaults/main.yml b/ansible/roles/collectd/defaults/main.yml
index a0470b7f9d..a8c98fe573 100644
--- a/ansible/roles/collectd/defaults/main.yml
+++ b/ansible/roles/collectd/defaults/main.yml
@@ -12,7 +12,7 @@ collectd_services:
####################
# Docker
####################
-collectd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/collectd"
+collectd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}collectd"
collectd_tag: "{{ openstack_tag }}"
collectd_image_full: "{{ collectd_image }}:{{ collectd_tag }}"
diff --git a/ansible/roles/collectd/handlers/main.yml b/ansible/roles/collectd/handlers/main.yml
index b80f41c1bb..66970a1010 100644
--- a/ansible/roles/collectd/handlers/main.yml
+++ b/ansible/roles/collectd/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "collectd"
service: "{{ collectd_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,5 +12,3 @@
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/collectd/tasks/check-containers.yml b/ansible/roles/collectd/tasks/check-containers.yml
index f49a677c87..b7e2f7c29f 100644
--- a/ansible/roles/collectd/tasks/check-containers.yml
+++ b/ansible/roles/collectd/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check collectd containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ collectd_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/collectd/tasks/config.yml b/ansible/roles/collectd/tasks/config.yml
index 6b82fca9fe..1a261bed98 100644
--- a/ansible/roles/collectd/tasks/config.yml
+++ b/ansible/roles/collectd/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ collectd_services }}"
+ with_dict: "{{ collectd_services | select_services_enabled_and_mapped_to_host }}"
- name: Ensuring Plugin directory exist
file:
@@ -20,10 +17,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ collectd_services }}"
+ with_dict: "{{ collectd_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
template:
@@ -31,12 +25,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ collectd_services }}"
- notify:
- - Restart collectd container
+ with_dict: "{{ collectd_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over collectd.conf for services
vars:
@@ -51,8 +40,4 @@
- "{{ node_custom_config }}/collectd/collectd.conf"
- "{{ node_custom_config }}/collectd.conf"
- "collectd.conf.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart collectd container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/kafka/tasks/check.yml b/ansible/roles/collectd/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/kafka/tasks/check.yml
rename to ansible/roles/collectd/tasks/config_validate.yml
diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml
index 72b51b25f7..084655fe52 100644
--- a/ansible/roles/common/defaults/main.yml
+++ b/ansible/roles/common/defaults/main.yml
@@ -17,24 +17,23 @@ common_services:
environment:
ANSIBLE_NOCOLOR: "1"
ANSIBLE_LIBRARY: "/usr/share/ansible"
+ REQUESTS_CA_BUNDLE: "{{ openstack_cacert }}"
privileged: True
- volumes: "{{ kolla_toolbox_default_volumes + kolla_toolbox_extra_volumes }}"
+ volumes: "{{ kolla_toolbox_default_volumes + kolla_toolbox_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ kolla_toolbox_dimensions }}"
- # DUMMY_ENVIRONMENT is needed because empty environment is not supported
cron:
container_name: cron
group: cron
enabled: True
image: "{{ cron_image_full }}"
environment:
- DUMMY_ENVIRONMENT: kolla_useless_env
KOLLA_LOGROTATE_SCHEDULE: "{{ cron_logrotate_schedule }}"
volumes: "{{ cron_default_volumes + cron_extra_volumes }}"
dimensions: "{{ cron_dimensions }}"
-#######################
-# TLS and authenication
-#######################
+########################
+# TLS and authentication
+########################
fluentd_elasticsearch_path: ""
fluentd_elasticsearch_scheme: "{{ internal_protocol }}"
@@ -45,6 +44,15 @@ fluentd_elasticsearch_ssl_verify: "true"
fluentd_elasticsearch_cacert: "{{ openstack_cacert }}"
fluentd_elasticsearch_request_timeout: "60s"
+fluentd_opensearch_path: ""
+fluentd_opensearch_scheme: "{{ internal_protocol }}"
+fluentd_opensearch_user: ""
+fluentd_opensearch_password: ""
+fluentd_opensearch_ssl_version: "TLSv1_2"
+fluentd_opensearch_ssl_verify: "true"
+fluentd_opensearch_cacert: "{{ openstack_cacert }}"
+fluentd_opensearch_request_timeout: "60s"
+
####################
# Docker
####################
@@ -54,15 +62,15 @@ cron_dimensions: "{{ default_container_dimensions }}"
kolla_toolbox_dimensions: "{{ default_container_dimensions }}"
fluentd_dimensions: "{{ default_container_dimensions }}"
-kolla_toolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/kolla-toolbox"
+kolla_toolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}kolla-toolbox"
kolla_toolbox_tag: "{{ common_tag }}"
kolla_toolbox_image_full: "{{ kolla_toolbox_image }}:{{ kolla_toolbox_tag }}"
-cron_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cron"
+cron_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cron"
cron_tag: "{{ common_tag }}"
cron_image_full: "{{ cron_image }}:{{ cron_tag }}"
-fluentd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/fluentd"
+fluentd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}fluentd"
fluentd_tag: "{{ common_tag }}"
fluentd_image_full: "{{ fluentd_image }}:{{ fluentd_tag }}"
@@ -100,7 +108,7 @@ kolla_toolbox_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/dev/:/dev/"
- - "/run/:/run/:shared"
+ - "/run/:/run/{{ ':shared' if kolla_container_engine == 'docker' else '' }}" # see: https://github.com/containers/podman/issues/16305
- "kolla_logs:/var/log/kolla/"
cron_default_volumes:
- "{{ node_config_directory }}/cron/:{{ container_config_directory }}/:ro"
@@ -113,6 +121,7 @@ fluentd_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- "fluentd_data:/var/lib/fluentd/data/"
+ - "/var/log/journal:/var/log/journal:ro"
kolla_toolbox_extra_volumes: "{{ default_extra_volumes }}"
cron_extra_volumes: "{{ default_extra_volumes }}"
fluentd_extra_volumes: "{{ default_extra_volumes }}"
@@ -127,6 +136,21 @@ cron_logrotate_schedule: "daily"
# Fluentd
####################
+# Enable the additional watch timer
+fluentd_enable_watch_timer: "false"
+
+# Set limits for queue size and chunk size
+# We need to ensure that the bulk_message_request_threshold is set below the
+# default maximum content length for the OpenSearch bulk API (100MB). By
+# default the bulk_message_request_threshold is unlimited, which can lead to
+# large payloads being sent and subsequently rejected by the OpenSearch API.
+fluentd_bulk_message_request_threshold: "20M"
+
+# The fluentd buffer chunk limit size is the maximum size of a single chunk in
+# the buffer. This should be set to a value that is less than the maximum size
+# of the bulk_message_request_threshold.
+fluentd_buffer_chunk_limit_size: "8M"
+
fluentd_input_openstack_services:
- name: aodh
enabled: "{{ enable_aodh | bool }}"
@@ -144,12 +168,8 @@ fluentd_input_openstack_services:
enabled: "{{ enable_cyborg | bool }}"
- name: designate
enabled: "{{ enable_designate | bool }}"
- - name: freezer
- enabled: "{{ enable_freezer | bool }}"
- name: glance
enabled: "{{ enable_glance | bool }}"
- - name: glance-tls-proxy
- enabled: "{{ enable_glance | bool }}"
- name: gnocchi
enabled: "{{ enable_gnocchi | bool }}"
- name: heat
@@ -172,24 +192,12 @@ fluentd_input_openstack_services:
enabled: "{{ enable_masakari | bool }}"
- name: mistral
enabled: "{{ enable_mistral | bool }}"
- - name: monasca
- enabled: "{{ enable_monasca | bool }}"
- - name: murano
- enabled: "{{ enable_murano | bool }}"
- name: neutron
enabled: "{{ enable_neutron | bool }}"
- - name: neutron-tls-proxy
- enabled: "{{ neutron_enable_tls_backend | bool }}"
- name: nova
enabled: "{{ enable_nova | bool }}"
- name: octavia
enabled: "{{ enable_octavia | bool }}"
- - name: sahara
- enabled: "{{ enable_sahara | bool }}"
- - name: senlin
- enabled: "{{ enable_senlin | bool }}"
- - name: solum
- enabled: "{{ enable_solum | bool }}"
- name: tacker
enabled: "{{ enable_tacker | bool }}"
- name: trove
diff --git a/ansible/roles/common/filter_plugins/filters.py b/ansible/roles/common/filter_plugins/kolla_common_filters.py
similarity index 100%
rename from ansible/roles/common/filter_plugins/filters.py
rename to ansible/roles/common/filter_plugins/kolla_common_filters.py
diff --git a/ansible/roles/common/handlers/main.yml b/ansible/roles/common/handlers/main.yml
index aad204f7fb..a1f8162d93 100644
--- a/ansible/roles/common/handlers/main.yml
+++ b/ansible/roles/common/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "fluentd"
service: "{{ common_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes }}"
environment: "{{ service.environment }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart kolla-toolbox container
vars:
service_name: "kolla-toolbox"
service: "{{ common_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -29,8 +27,6 @@
volumes: "{{ service.volumes }}"
environment: "{{ service.environment }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
notify:
- Initializing toolbox container using normal user
@@ -44,7 +40,7 @@
service_name: "cron"
service: "{{ common_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -52,5 +48,3 @@
volumes: "{{ service.volumes }}"
environment: "{{ service.environment }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/common/tasks/bootstrap.yml b/ansible/roles/common/tasks/bootstrap.yml
index eb824bf652..fa853b1d1c 100644
--- a/ansible/roles/common/tasks/bootstrap.yml
+++ b/ansible/roles/common/tasks/bootstrap.yml
@@ -1,7 +1,7 @@
---
- name: Creating log volume
become: true
- kolla_docker:
+ kolla_container:
action: "create_volume"
common_options: "{{ docker_common_options }}"
name: "kolla_logs"
@@ -10,7 +10,7 @@
- name: Link kolla_logs volume to /var/log/kolla
become: true
file:
- src: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes/kolla_logs/_data"
+ src: "{{ container_engine_volumes_path }}/kolla_logs/_data"
path: /var/log/kolla
state: link
when: inventory_hostname in groups['kolla-logs']
diff --git a/ansible/roles/common/tasks/check-containers.yml b/ansible/roles/common/tasks/check-containers.yml
index 45f110a01b..b7e2f7c29f 100644
--- a/ansible/roles/common/tasks/check-containers.yml
+++ b/ansible/roles/common/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check common containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- environment: "{{ item.value.environment }}"
- when:
- - item.value | service_enabled_and_mapped_to_host
- with_dict: "{{ common_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml
index dd0790e25a..f4a116c4a2 100644
--- a/ansible/roles/common/tasks/config.yml
+++ b/ansible/roles/common/tasks/config.yml
@@ -27,16 +27,25 @@
when:
- kolla_copy_ca_into_containers | bool
+- name: Copying over /run subdirectories conf
+ become: true
+ template:
+ src: kolla-directories.conf.j2
+ dest: /etc/tmpfiles.d/kolla.conf
+ when: kolla_container_engine == 'podman'
+
+- name: Restart systemd-tmpfiles
+ become: true
+ command: systemd-tmpfiles --create
+ when: kolla_container_engine == 'podman'
+
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when: item.value | service_enabled_and_mapped_to_host
- with_dict: "{{ common_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ common_services | select_services_enabled_and_mapped_to_host }}"
- name: Find custom fluentd input config files
find:
@@ -74,12 +83,12 @@
delegate_to: localhost
when: common_services.fluentd.enabled | bool
-- name: Copying over td-agent.conf
+- name: Copying over fluentd.conf
vars:
- log_direct_to_elasticsearch: >-
- {{ ( enable_elasticsearch | bool or
- ( elasticsearch_address != kolla_internal_fqdn )) and
- ( not enable_monasca | bool or not monasca_ingest_control_plane_logs | bool ) }}
+ log_direct_to_elasticsearch: "{{ elasticsearch_address is defined }}"
+ log_direct_to_opensearch: >-
+ {{ enable_opensearch | bool or
+ ( opensearch_address != kolla_internal_fqdn ) }}
# Inputs
fluentd_input_files: "{{ default_input_files_enabled | customise_fluentd(customised_input_files) }}"
default_input_files_enabled: "{{ default_input_files | selectattr('enabled') | map(attribute='name') | list }}"
@@ -96,16 +105,14 @@
enabled: true
- name: "conf/input/05-libvirt.conf.j2"
enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}"
- - name: "conf/input/06-zookeeper.conf.j2"
- enabled: true
- - name: "conf/input/07-kafka.conf.j2"
- enabled: true
- name: "conf/input/08-prometheus.conf.j2"
enabled: "{{ enable_prometheus_fluentd_integration | bool }}"
- - name: "conf/input/09-monasca.conf.j2"
- enabled: true
- name: "conf/input/10-openvswitch.conf.j2"
enabled: true
+ - name: "conf/input/11-letsencrypt.conf.j2"
+ enabled: "{{ enable_letsencrypt | bool }}"
+ - name: "conf/input/12-systemd.conf.j2"
+ enabled: "{{ enable_fluentd_systemd | bool }}"
customised_input_files: "{{ find_custom_fluentd_inputs.files | map(attribute='path') | list }}"
# Filters
fluentd_filter_files: "{{ default_filter_files | customise_fluentd(customised_filter_files) }}"
@@ -128,18 +135,16 @@
enabled: true
- name: "conf/output/01-es.conf.j2"
enabled: "{{ log_direct_to_elasticsearch }}"
- - name: "conf/output/02-monasca.conf.j2"
- enabled: "{{ enable_monasca | bool and monasca_ingest_control_plane_logs | bool }}"
+ - name: "conf/output/03-opensearch.conf.j2"
+ enabled: "{{ log_direct_to_opensearch }}"
customised_output_files: "{{ find_custom_fluentd_outputs.files | map(attribute='path') | list }}"
template:
- src: "td-agent.conf.j2"
- dest: "{{ node_config_directory }}/fluentd/td-agent.conf"
+ src: "fluentd.conf.j2"
+ dest: "{{ node_config_directory }}/fluentd/fluentd.conf"
mode: "0660"
become: true
when:
- common_services.fluentd | service_enabled_and_mapped_to_host
- notify:
- - Restart fluentd container
- name: Copying over cron logrotate config file
vars:
@@ -159,10 +164,8 @@
- { name: "collectd", enabled: "{{ enable_collectd | bool }}" }
- { name: "cyborg", enabled: "{{ enable_cyborg | bool }}" }
- { name: "designate", enabled: "{{ enable_designate | bool }}" }
- - { name: "elasticsearch", enabled: "{{ enable_elasticsearch | bool }}" }
- { name: "etcd", enabled: "{{ enable_etcd | bool }}" }
- { name: "fluentd", enabled: "{{ enable_fluentd | bool }}" }
- - { name: "freezer", enabled: "{{ enable_freezer | bool }}" }
- { name: "glance", enabled: "{{ enable_glance | bool }}" }
- { name: "glance-tls-proxy", enabled: "{{ glance_enable_tls_backend | bool }}" }
- { name: "gnocchi", enabled: "{{ enable_gnocchi | bool }}" }
@@ -174,49 +177,43 @@
- { name: "influxdb", enabled: "{{ enable_influxdb | bool }}" }
- { name: "ironic", enabled: "{{ enable_ironic | bool }}" }
- { name: "ironic-inspector", enabled: "{{ enable_ironic | bool }}" }
- - { name: "kafka", enabled: "{{ enable_kafka | bool }}" }
- { name: "keystone", enabled: "{{ enable_keystone | bool }}" }
- - { name: "kibana", enabled: "{{ enable_kibana | bool }}" }
- { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" }
- { name: "magnum", enabled: "{{ enable_magnum | bool }}" }
- { name: "manila", enabled: "{{ enable_manila | bool }}" }
- { name: "mariadb", enabled: "{{ enable_mariadb | bool }}" }
- { name: "masakari", enabled: "{{ enable_masakari | bool }}" }
- { name: "mistral", enabled: "{{ enable_mistral | bool }}" }
- - { name: "monasca", enabled: "{{ enable_monasca | bool }}" }
- - { name: "murano", enabled: "{{ enable_murano | bool }}" }
- { name: "neutron", enabled: "{{ enable_neutron | bool }}" }
- { name: "neutron-tls-proxy", enabled: "{{ neutron_enable_tls_backend | bool }}" }
- { name: "nova", enabled: "{{ enable_nova | bool }}" }
- { name: "nova-libvirt", enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}" }
- { name: "octavia", enabled: "{{ enable_octavia | bool }}" }
+ - { name: "opensearch", enabled: "{{ enable_opensearch | bool or enable_opensearch_dashboards | bool }}" }
- { name: "openvswitch", enabled: "{{ enable_openvswitch | bool }}" }
- - { name: "outward-rabbitmq", enabled: "{{ enable_outward_rabbitmq | bool }}" }
- { name: "placement", enabled: "{{ enable_placement | bool }}" }
- { name: "prometheus", enabled: "{{ enable_prometheus | bool }}" }
+ - { name: "proxysql", enabled: "{{ enable_proxysql | bool }}" }
- { name: "rabbitmq", enabled: "{{ enable_rabbitmq | bool }}" }
- - { name: "sahara", enabled: "{{ enable_sahara | bool }}" }
- - { name: "senlin", enabled: "{{ enable_senlin | bool }}" }
- - { name: "skydive", enabled: "{{ enable_skydive | bool }}" }
- - { name: "solum", enabled: "{{ enable_solum | bool }}" }
- - { name: "storm", enabled: "{{ enable_storm | bool }}" }
+ - { name: "redis", enabled: "{{ enable_redis | bool }}" }
+ - { name: "skyline", enabled: "{{ enable_skyline | bool }}" }
- { name: "swift", enabled: "{{ enable_swift | bool }}" }
- { name: "tacker", enabled: "{{ enable_tacker | bool }}" }
- { name: "trove", enabled: "{{ enable_trove | bool }}" }
- { name: "venus", enabled: "{{ enable_venus | bool }}" }
- - { name: "vitrage", enabled: "{{ enable_vitrage | bool }}" }
- { name: "watcher", enabled: "{{ enable_watcher | bool }}" }
- - { name: "zookeeper", enabled: "{{ enable_zookeeper | bool }}" }
- { name: "zun", enabled: "{{ enable_zun | bool }}" }
template:
- src: "cron-logrotate-global.conf.j2"
+ src: "{{ item }}"
dest: "{{ node_config_directory }}/cron/logrotate.conf"
mode: "0660"
become: true
when:
- common_services.cron | service_enabled_and_mapped_to_host
- notify:
- - Restart cron container
+ with_first_found:
+ - "{{ node_custom_config }}/cron/{{ inventory_hostname }}/cron-logrotate-global.conf"
+ - "{{ node_custom_config }}/cron/cron-logrotate-global.conf"
+ - "cron-logrotate-global.conf.j2"
- name: Ensure RabbitMQ Erlang cookie exists
become: true
@@ -227,8 +224,6 @@
when:
- common_services['kolla-toolbox'] | service_enabled_and_mapped_to_host
- enable_rabbitmq | bool
- notify:
- - Restart kolla-toolbox container
- name: Ensuring config directories have correct owner and permission
become: true
@@ -239,9 +234,8 @@
mode: "0770"
ignore_errors: "{{ ansible_check_mode }}"
when:
- - item.value | service_enabled_and_mapped_to_host
- item.key != "kolla-toolbox"
- with_dict: "{{ common_services }}"
+ with_dict: "{{ common_services | select_services_enabled_and_mapped_to_host }}"
- name: Copy rabbitmq-env.conf to kolla toolbox
template:
diff --git a/ansible/roles/kibana/tasks/check.yml b/ansible/roles/common/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/kibana/tasks/check.yml
rename to ansible/roles/common/tasks/config_validate.yml
diff --git a/ansible/roles/common/templates/admin-openrc-system.sh.j2 b/ansible/roles/common/templates/admin-openrc-system.sh.j2
new file mode 100644
index 0000000000..6b4d95b969
--- /dev/null
+++ b/ansible/roles/common/templates/admin-openrc-system.sh.j2
@@ -0,0 +1,23 @@
+# {{ ansible_managed }}
+
+# Clear any old environment that may conflict.
+for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done
+export OS_USER_DOMAIN_NAME='Default'
+export OS_SYSTEM_SCOPE=all
+export OS_USERNAME='{{ keystone_admin_user }}'
+export OS_PASSWORD='{{ keystone_admin_password }}'
+export OS_AUTH_URL='{{ keystone_internal_url }}'
+export OS_INTERFACE='internal'
+export OS_ENDPOINT_TYPE='internalURL'
+{% if enable_manila | bool %}
+export OS_MANILA_ENDPOINT_TYPE='internalURL'
+{% endif %}
+{% if enable_mistral | bool %}
+export OS_MISTRAL_ENDPOINT_TYPE='internalURL'
+{% endif %}
+export OS_IDENTITY_API_VERSION='3'
+export OS_REGION_NAME='{{ openstack_region_name }}'
+export OS_AUTH_PLUGIN='password'
+{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
+export OS_CACERT='{{ kolla_admin_openrc_cacert }}'
+{% endif %}
diff --git a/ansible/roles/common/templates/admin-openrc.sh.j2 b/ansible/roles/common/templates/admin-openrc.sh.j2
index 2e74ade1be..367210639e 100644
--- a/ansible/roles/common/templates/admin-openrc.sh.j2
+++ b/ansible/roles/common/templates/admin-openrc.sh.j2
@@ -2,24 +2,24 @@
# Clear any old environment that may conflict.
for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done
-export OS_PROJECT_DOMAIN_NAME=Default
-export OS_USER_DOMAIN_NAME=Default
-export OS_PROJECT_NAME={{ keystone_admin_project }}
-export OS_TENANT_NAME={{ keystone_admin_project }}
-export OS_USERNAME={{ keystone_admin_user }}
-export OS_PASSWORD={{ keystone_admin_password }}
-export OS_AUTH_URL={{ keystone_internal_url }}
-export OS_INTERFACE=internal
-export OS_ENDPOINT_TYPE=internalURL
+export OS_PROJECT_DOMAIN_NAME='Default'
+export OS_USER_DOMAIN_NAME='Default'
+export OS_PROJECT_NAME='{{ keystone_admin_project }}'
+export OS_TENANT_NAME='{{ keystone_admin_project }}'
+export OS_USERNAME='{{ keystone_admin_user }}'
+export OS_PASSWORD='{{ keystone_admin_password }}'
+export OS_AUTH_URL='{{ keystone_internal_url }}'
+export OS_INTERFACE='internal'
+export OS_ENDPOINT_TYPE='internalURL'
{% if enable_manila | bool %}
-export OS_MANILA_ENDPOINT_TYPE=internalURL
+export OS_MANILA_ENDPOINT_TYPE='internalURL'
{% endif %}
{% if enable_mistral | bool %}
-export OS_MISTRAL_ENDPOINT_TYPE=internalURL
+export OS_MISTRAL_ENDPOINT_TYPE='internalURL'
{% endif %}
-export OS_IDENTITY_API_VERSION=3
-export OS_REGION_NAME={{ openstack_region_name }}
-export OS_AUTH_PLUGIN=password
+export OS_IDENTITY_API_VERSION='3'
+export OS_REGION_NAME='{{ openstack_region_name }}'
+export OS_AUTH_PLUGIN='password'
{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
-export OS_CACERT={{ kolla_admin_openrc_cacert }}
+export OS_CACERT='{{ kolla_admin_openrc_cacert }}'
{% endif %}
diff --git a/ansible/roles/common/templates/clouds.yaml.j2 b/ansible/roles/common/templates/clouds.yaml.j2
index 574a603f9f..0485bedabf 100644
--- a/ansible/roles/common/templates/clouds.yaml.j2
+++ b/ansible/roles/common/templates/clouds.yaml.j2
@@ -10,6 +10,17 @@ clouds:
region_name: {{ openstack_region_name }}
{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
cacert: {{ kolla_admin_openrc_cacert }}
+{% endif %}
+ kolla-admin-system:
+ auth:
+ auth_url: {{ keystone_public_url }}
+ user_domain_name: Default
+ system_scope: all
+ username: {{ keystone_admin_user }}
+ password: {{ keystone_admin_password }}
+ region_name: {{ openstack_region_name }}
+{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
+ cacert: {{ kolla_admin_openrc_cacert }}
{% endif %}
kolla-admin-internal:
auth:
@@ -23,4 +34,16 @@ clouds:
region_name: {{ openstack_region_name }}
{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
cacert: {{ kolla_admin_openrc_cacert }}
+{% endif %}
+ kolla-admin-system-internal:
+ auth:
+ auth_url: {{ keystone_internal_url }}
+ user_domain_name: Default
+ system_scope: all
+ username: {{ keystone_admin_user }}
+ password: {{ keystone_admin_password }}
+ interface: internal
+ region_name: {{ openstack_region_name }}
+{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
+ cacert: {{ kolla_admin_openrc_cacert }}
{% endif %}
diff --git a/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2 b/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
index 723a37dfc8..ffdf37c27b 100644
--- a/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
+++ b/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
@@ -33,8 +33,7 @@
# Rename internal Fluent message field to match other logs. This removes
# all other fields by default, including the original message field. This is
-# intented to avoid duplication of the log message and to prevent passing
-# invalid dimensions to Monasca, if it is enabled. Note that if this step
+# intended to avoid duplication of the log message. Note that if this step
# is moved to the format folder, then it will applied after the second step
# below which will break the logic.
@@ -51,25 +50,3 @@
log_level ${tag_parts[1]}
-
-{% if enable_monasca | bool and monasca_ingest_control_plane_logs | bool %}
-# Kolla configures Fluentd to extract timestamps from OpenStack service
-# logs, however these timestamps are not saved in the event and are not
-# forwarded to Monasca. Here we save the timestamp which has been
-# *parsed* by Fluentd to a field which is part of the event and *is*
-# therefore forwarded to Monasca. If no timestamp is parsed, then this
-# should stamp the event with the current time. Note that since Kolla
-# configures Fluentd to keep the time key, the original, *unparsed*
-# timestamp, if present, will also be forwarded to Monasca as part of the
-# event. However, because the logs which are collected by Fluentd use a
-# variety of time formats the format of this timestamp is irregular and
-# is therefore dropped in the Monasca log pipeline in favour of the
-# timestamp added here. In the future we could investigate getting the
-# Fluentd Monasca plugin to add this timestamp.
-
- @type record_transformer
-
- timestamp ${time}
-
-
-{% endif %}
diff --git a/ansible/roles/common/templates/conf/filter/01-rewrite.conf.j2 b/ansible/roles/common/templates/conf/filter/01-rewrite.conf.j2
index 5a0f96ca8f..ee761a34f0 100644
--- a/ansible/roles/common/templates/conf/filter/01-rewrite.conf.j2
+++ b/ansible/roles/common/templates/conf/filter/01-rewrite.conf.j2
@@ -3,12 +3,12 @@
capitalize_regex_backreference yes
key programname
- pattern ^(cinder-api-access|cloudkitty-api-access|gnocchi-api-access|horizon-access|keystone-apache-admin-access|keystone-apache-public-access|monasca-api-access|octavia-api-access|placement-api-access)$
+ pattern ^(cinder-api-access|cloudkitty-api-access|gnocchi-api-access|horizon-access|keystone-apache-admin-access|keystone-apache-public-access|octavia-api-access|placement-api-access|trove-api-access)$
tag apache_access
key programname
- pattern ^(aodh_wsgi_access|barbican_api_uwsgi_access|zun_api_wsgi_access|vitrage_wsgi_access)$
+ pattern ^(aodh_wsgi_access|barbican_api_uwsgi_access|zun_api_wsgi_access)$
tag wsgi_access
@@ -16,11 +16,6 @@
pattern ^(nova-api|nova-compute|nova-compute-ironic|nova-conductor|nova-manage|nova-novncproxy|nova-scheduler|nova-placement-api|placement-api|privsep-helper)$
tag openstack_python
-
- key programname
- pattern ^(sahara-api|sahara-engine)$
- tag openstack_python
-
key programname
pattern ^(neutron-server|neutron-openvswitch-agent|neutron-ns-metadata-proxy|neutron-metadata-agent|neutron-l3-agent|neutron-dhcp-agent)$
@@ -96,26 +91,11 @@
pattern ^(trove-api|trove-conductor|trove-manage|trove-taskmanager)$
tag openstack_python
-
- key programname
- pattern ^(murano-api|murano-engine)$
- tag openstack_python
-
-
- key programname
- pattern ^(senlin-api|senlin-conductor|senlin-engine|senlin-health-manager)$
- tag openstack_python
-
key programname
pattern ^(watcher-api|watcher-applier|watcher-db-manage|watcher-decision-engine)$
tag openstack_python
-
- key programname
- pattern ^(freezer-api|freezer-api_access|freezer-manage)$
- tag openstack_python
-
key programname
pattern ^(octavia-api|octavia-health-manager|octavia-housekeeping|octavia-worker)$
@@ -146,11 +126,6 @@
pattern ^(tacker-server|tacker-conductor)$
tag openstack_python
-
- key programname
- pattern ^(vitrage-ml|vitrage-notifier|vitrage-graph|vitrage-persistor)$
- tag openstack_python
-
key programname
pattern ^(blazar-api|blazar-manager)$
@@ -158,17 +133,17 @@
key programname
- pattern ^(monasca-api|monasca-notification|monasca-persister|agent-collector|agent-forwarder|agent-statsd)$
+ pattern ^(masakari-engine|masakari-api)$
tag openstack_python
key programname
- pattern ^(masakari-engine|masakari-api)$
+ pattern ^(venus-api|venus-manager)$
tag openstack_python
key programname
- pattern ^(venus-api|venus-manager)$
+ pattern ^(skyline)$
tag openstack_python
diff --git a/ansible/roles/common/templates/conf/format/apache_access.conf.j2 b/ansible/roles/common/templates/conf/format/apache_access.conf.j2
index bef2c4eeb4..ba12c6695d 100644
--- a/ansible/roles/common/templates/conf/format/apache_access.conf.j2
+++ b/ansible/roles/common/templates/conf/format/apache_access.conf.j2
@@ -4,7 +4,7 @@
key_name Payload
@type grok
- grok_pattern \[%{HTTPDATE:Timestamp}\] "(?:%{WORD:http_method} %{NOTSPACE:http_url}(?: HTTP/%{NUMBER:http_version})?|%{DATA:rawrequest})" %{NUMBER:http_status} (?:\d+|-)
+ grok_pattern \[%{HTTPDATE:Timestamp}\] "(?:%{WORD:http_method} %{NOTSPACE:http_url}(?: HTTP/%{NUMBER:http_version})?|%{DATA:rawrequest})" %{NUMBER:http_status} (?:%{NUMBER:http_bytes}|-) (?:%{NUMBER:http_response_time_us}|-) "%{DATA:referrer}" "%{DATA:agent}"
time_key Timestamp
time_format %d/%b/%Y:%H:%M:%S %z
keep_time_key true
diff --git a/ansible/roles/common/templates/conf/input/00-global.conf.j2 b/ansible/roles/common/templates/conf/input/00-global.conf.j2
index af6a4fedb8..fbd84b7f0b 100644
--- a/ansible/roles/common/templates/conf/input/00-global.conf.j2
+++ b/ansible/roles/common/templates/conf/input/00-global.conf.j2
@@ -3,22 +3,20 @@
@type tail
path {% for service in fluentd_enabled_input_openstack_services %}/var/log/kolla/{{ service }}/*.log{% if not loop.last %},{% endif %}{% endfor %}
- exclude_path ["/var/log/kolla/monasca/agent*.log",
- "/var/log/kolla/monasca/monasca-api.log",
- "/var/log/kolla/neutron/dnsmasq.log",
+ exclude_path ["/var/log/kolla/neutron/dnsmasq.log",
"/var/log/kolla/ironic/dnsmasq.log",
"/var/log/kolla/*/*-access.log",
"/var/log/kolla/*/*-error.log",
"/var/log/kolla/*/*_access.log",
"/var/log/kolla/*/*_error.log"]
- pos_file /var/run/td-agent/kolla-openstack.pos
+ pos_file /var/run/fluentd/kolla-openstack.pos
tag kolla.*
ignore_repeated_permission_error true
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type multiline
- format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \d+ \S+ \S+ \[(req-\S+ \S+ \S+ \S+ \S+ \S+|-)\]/
- format1 /^(?\S+ \S+) (?\d+) (?\S+) (?\S+) (\[(req-(?\S+) (?\S+) (?\S+) (?\S+) (?\S+) (?\S+)|-)\])? (?.*)?$/
+ format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \d+ \S+ \S+ \[.*\]/
+ format1 /^(?\S+ \S+) (?\d+) (?\S+) (?\S+) \[(\S+ req-)?((?\S+) (?\S+) (?\S+) (?\S+) (?\S+) (?\S+) (?\S+)|-)\] (?.*)?$/
time_key Timestamp
keep_time_key true
time_format %F %T.%L
diff --git a/ansible/roles/common/templates/conf/input/02-mariadb.conf.j2 b/ansible/roles/common/templates/conf/input/02-mariadb.conf.j2
index f4ed045f81..f5df2b4557 100644
--- a/ansible/roles/common/templates/conf/input/02-mariadb.conf.j2
+++ b/ansible/roles/common/templates/conf/input/02-mariadb.conf.j2
@@ -3,9 +3,9 @@
@type tail
path /var/log/kolla/mariadb/mariadb.log
- pos_file /var/run/td-agent/mariadb.pos
+ pos_file /var/run/fluentd/mariadb.pos
tag infra.mariadb
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type multiline
format_firstline /^(\d{4}-\d{2}-\d{2}|\d{6}) /
@@ -15,10 +15,10 @@
@type tail
path /var/log/kolla/mariadb/xinetd.log
- pos_file /var/run/td-agent/mariadb-xinetd.pos
+ pos_file /var/run/fluentd/mariadb-xinetd.pos
tag infra.mariadb-xinetd
ignore_repeated_permission_error true
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type multiline
format_firstline /^\d{2}/\d{1,2}/\d{1,2}@\d{1,2}:\d{1,2}:\d{1,2}\: (START|EXIT)\: /
diff --git a/ansible/roles/common/templates/conf/input/03-rabbitmq.conf.j2 b/ansible/roles/common/templates/conf/input/03-rabbitmq.conf.j2
index e0e74b1fea..8cffc003df 100644
--- a/ansible/roles/common/templates/conf/input/03-rabbitmq.conf.j2
+++ b/ansible/roles/common/templates/conf/input/03-rabbitmq.conf.j2
@@ -1,12 +1,12 @@
@type tail
path /var/log/kolla/rabbitmq/rabbit@{{ ansible_facts.hostname }}.log
- pos_file /var/run/td-agent/rabbit.pos
+ pos_file /var/run/fluentd/rabbit.pos
tag infra.rabbit
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type multiline
- format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}/
- format1 /^(?\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}) \[(?\w+)\] (?.*)/
+ format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}(\d+\+\d{2}:\d{2})?/
+ format1 /^(?\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})(\d+\+\d{2}:\d{2})? \[(?\w+)\] (?.*)/
diff --git a/ansible/roles/common/templates/conf/input/04-openstack-wsgi.conf.j2 b/ansible/roles/common/templates/conf/input/04-openstack-wsgi.conf.j2
index f94edf8a6a..20d408f123 100644
--- a/ansible/roles/common/templates/conf/input/04-openstack-wsgi.conf.j2
+++ b/ansible/roles/common/templates/conf/input/04-openstack-wsgi.conf.j2
@@ -2,9 +2,9 @@
@type tail
path /var/log/kolla/*/*-access.log,/var/log/kolla/*/*-error.log,/var/log/kolla/*/*_access.log,/var/log/kolla/*/*_error.log
- pos_file /var/run/td-agent/kolla-openstack-wsgi.pos
+ pos_file /var/run/fluentd/kolla-openstack-wsgi.pos
tag kolla.*
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type regexp
expression /^(?.*)$/
diff --git a/ansible/roles/common/templates/conf/input/05-libvirt.conf.j2 b/ansible/roles/common/templates/conf/input/05-libvirt.conf.j2
index 249e52b214..4765c96082 100644
--- a/ansible/roles/common/templates/conf/input/05-libvirt.conf.j2
+++ b/ansible/roles/common/templates/conf/input/05-libvirt.conf.j2
@@ -1,9 +1,9 @@
@type tail
path /var/log/kolla/libvirt/libvirtd.log
- pos_file /var/run/td-agent/libvirt.pos
+ pos_file /var/run/fluentd/libvirt.pos
tag infra.libvirt
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type regexp
expression /^(?\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}\+\d{4}): (?\d+): (?\S+) : (?.*)?$/
diff --git a/ansible/roles/common/templates/conf/input/06-zookeeper.conf.j2 b/ansible/roles/common/templates/conf/input/06-zookeeper.conf.j2
deleted file mode 100644
index 3db0ea0a22..0000000000
--- a/ansible/roles/common/templates/conf/input/06-zookeeper.conf.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-
- @type tail
- path /var/log/kolla/zookeeper/zookeeper.log
- pos_file /var/run/td-agent/zookeeper.pos
- tag infra.*
-
- @type multiline
- format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} \S+ \S+ \S+ .*$/
- format1 /^(?\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) \[(?\S+)\] \S+ (?\S+) (?.*)$/
- time_key Timestamp
-
-
diff --git a/ansible/roles/common/templates/conf/input/07-kafka.conf.j2 b/ansible/roles/common/templates/conf/input/07-kafka.conf.j2
deleted file mode 100644
index a80dd0a00c..0000000000
--- a/ansible/roles/common/templates/conf/input/07-kafka.conf.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-
- @type tail
- path /var/log/kolla/kafka/controller.log, /var/log/kolla/kafka/server.log, /var/log/kolla/kafka/state-change.log
- pos_file /var/run/td-agent/kafka.pos
- tag infra.*
-
- @type multiline
- format_firstline /^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}\] \S+ .*$/
- format1 /^\[(?\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})\] (?\S+) (?.*)$/
- time_key Timestamp
-
-
diff --git a/ansible/roles/common/templates/conf/input/09-monasca.conf.j2 b/ansible/roles/common/templates/conf/input/09-monasca.conf.j2
deleted file mode 100644
index ede130cabb..0000000000
--- a/ansible/roles/common/templates/conf/input/09-monasca.conf.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-
- @type tail
- path /var/log/kolla/monasca/agent*.log
- pos_file /var/run/td-agent/monasca-agent.pos
- tag kolla.*
-
- @type multiline
- format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \S+ \| \S+ \| \S+ \| .*$/
- format1 /^(?\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \S+) \| (?\S+) \| (?\S+) \| (?.*)$/
- time_key Timestamp
-
-
diff --git a/ansible/roles/common/templates/conf/input/10-openvswitch.conf.j2 b/ansible/roles/common/templates/conf/input/10-openvswitch.conf.j2
index f08272bdf9..ffad69348a 100644
--- a/ansible/roles/common/templates/conf/input/10-openvswitch.conf.j2
+++ b/ansible/roles/common/templates/conf/input/10-openvswitch.conf.j2
@@ -1,9 +1,9 @@
@type tail
path /var/log/kolla/openvswitch/ovs-vswitchd.log
- pos_file /var/run/td-agent/openvswitch.pos
+ pos_file /var/run/fluentd/openvswitch.pos
tag infra.openvswitch
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type multiline
format_firstline /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}/
@@ -16,9 +16,9 @@
@type tail
path /var/log/kolla/openvswitch/ovsdb-server.log
- pos_file /var/run/td-agent/openvswitchdb.pos
+ pos_file /var/run/fluentd/openvswitchdb.pos
tag infra.openvswitchdb
- enable_watch_timer false
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
@type multiline
format_firstline /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}/
diff --git a/ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2 b/ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2
new file mode 100644
index 0000000000..cfada0786a
--- /dev/null
+++ b/ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2
@@ -0,0 +1,15 @@
+
+ @type tail
+ @log_level debug
+ path /var/log/kolla/letsencrypt/letsencrypt-lego.log
+ pos_file /var/run/fluentd/letsencrypt.pos
+ tag infra.letsencrypt.lego
+ enable_watch_timer {{ fluentd_enable_watch_timer }}
+
+ @type multiline
+ format_firstline /^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}/
+ format1 /^(?\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) \[(?\S+)\] (?.*)/
+ time_key Timestamp
+ time_format %Y/%m/%d %H:%M:%S
+
+
diff --git a/ansible/roles/common/templates/conf/input/12-systemd.conf.j2 b/ansible/roles/common/templates/conf/input/12-systemd.conf.j2
new file mode 100644
index 0000000000..001cb6a9c9
--- /dev/null
+++ b/ansible/roles/common/templates/conf/input/12-systemd.conf.j2
@@ -0,0 +1,9 @@
+
+ @type systemd
+ tag journal
+ path /var/log/journal
+
+ fields_strip_underscores true
+ fields_lowercase true
+
+
diff --git a/ansible/roles/common/templates/conf/output/00-local.conf.j2 b/ansible/roles/common/templates/conf/output/00-local.conf.j2
index d336230e7c..62c7965bfa 100644
--- a/ansible/roles/common/templates/conf/output/00-local.conf.j2
+++ b/ansible/roles/common/templates/conf/output/00-local.conf.j2
@@ -18,7 +18,7 @@
@type elasticsearch
host {{ elasticsearch_address }}
- port {{ elasticsearch_port }}
+ port {{ elasticsearch_port | default('9200') }}
scheme {{ fluentd_elasticsearch_scheme }}
{% if fluentd_elasticsearch_path != '' %}
path {{ fluentd_elasticsearch_path }}
@@ -35,35 +35,51 @@
password {{ fluentd_elasticsearch_password }}
{% endif %}
logstash_format true
- logstash_prefix {{ kibana_log_prefix }}
+ logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true
request_timeout {{ fluentd_elasticsearch_request_timeout }}
suppress_type_name true
+ bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
@type file
path /var/lib/fluentd/data/elasticsearch.buffer/{{ item.facility }}.*
flush_interval 15s
+ chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
-{% elif enable_monasca | bool and monasca_ingest_control_plane_logs | bool %}
+{% elif log_direct_to_opensearch %}
- @type monasca
- keystone_url {{ keystone_internal_url }}
- monasca_api {{ monasca_log_api_internal_endpoint }}
- monasca_api_version v2.0
- username {{ monasca_agent_user }}
- password {{ monasca_agent_password }}
- domain_id default
- project_name {{ monasca_control_plane_project }}
- message_field_name Payload
- max_retry_wait 1800s
- disable_retry_limit true
-
- @type file
- path /var/lib/fluentd/data/monasca.buffer/{{ item.facility }}.*
- chunk_limit_size 8m
-
-
+ @type opensearch
+ host {{ opensearch_address }}
+ port {{ opensearch_port }}
+ scheme {{ fluentd_opensearch_scheme }}
+{% if fluentd_opensearch_path != '' %}
+ path {{ fluentd_opensearch_path }}
+{% endif %}
+{% if fluentd_opensearch_scheme == 'https' %}
+ ssl_version {{ fluentd_opensearch_ssl_version }}
+ ssl_verify {{ fluentd_opensearch_ssl_verify }}
+{% if fluentd_opensearch_cacert | length > 0 %}
+ ca_file {{ fluentd_opensearch_cacert }}
+{% endif %}
+{% endif %}
+{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%}
+ user {{ fluentd_opensearch_user }}
+ password {{ fluentd_opensearch_password }}
+{% endif %}
+ logstash_format true
+ logstash_prefix {{ opensearch_log_index_prefix }}
+ reconnect_on_error true
+ request_timeout {{ fluentd_opensearch_request_timeout }}
+ suppress_type_name true
+ bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
+
+ @type file
+ path /var/lib/fluentd/data/opensearch.buffer/{{ item.facility }}.*
+ flush_interval 15s
+ chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
+
+
{% endif %}
{% endfor %}
diff --git a/ansible/roles/common/templates/conf/output/01-es.conf.j2 b/ansible/roles/common/templates/conf/output/01-es.conf.j2
index 3056547801..91d011391b 100644
--- a/ansible/roles/common/templates/conf/output/01-es.conf.j2
+++ b/ansible/roles/common/templates/conf/output/01-es.conf.j2
@@ -3,7 +3,7 @@
@type elasticsearch
host {{ elasticsearch_address }}
- port {{ elasticsearch_port }}
+ port {{ elasticsearch_port | default('9200') }}
scheme {{ fluentd_elasticsearch_scheme }}
{% if fluentd_elasticsearch_path != '' %}
path {{ fluentd_elasticsearch_path }}
@@ -20,14 +20,16 @@
password {{ fluentd_elasticsearch_password }}
{% endif %}
logstash_format true
- logstash_prefix {{ kibana_log_prefix }}
+ logstash_prefix {{ opensearch_log_index_prefix }}
reconnect_on_error true
request_timeout {{ fluentd_elasticsearch_request_timeout }}
suppress_type_name true
+ bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
@type file
path /var/lib/fluentd/data/elasticsearch.buffer/openstack.*
flush_interval 15s
+ chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
diff --git a/ansible/roles/common/templates/conf/output/02-monasca.conf.j2 b/ansible/roles/common/templates/conf/output/02-monasca.conf.j2
deleted file mode 100644
index b0aeeeff28..0000000000
--- a/ansible/roles/common/templates/conf/output/02-monasca.conf.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-
- @type copy
-
- @type monasca
- keystone_url {{ keystone_internal_url }}
- monasca_api {{ monasca_log_api_internal_endpoint }}
- monasca_api_version v2.0
- username {{ monasca_agent_user }}
- password {{ monasca_agent_password }}
- domain_id default
- project_name {{ monasca_control_plane_project }}
- message_field_name Payload
- max_retry_wait 1800s
- disable_retry_limit true
-
- @type file
- path /var/lib/fluentd/data/monasca.buffer/openstack.*
- chunk_limit_size 8m
-
-
-
diff --git a/ansible/roles/common/templates/conf/output/03-opensearch.conf.j2 b/ansible/roles/common/templates/conf/output/03-opensearch.conf.j2
new file mode 100644
index 0000000000..6f4beb2d64
--- /dev/null
+++ b/ansible/roles/common/templates/conf/output/03-opensearch.conf.j2
@@ -0,0 +1,35 @@
+
+ @type copy
+
+ @type opensearch
+ host {{ opensearch_address }}
+ port {{ opensearch_port }}
+ scheme {{ fluentd_opensearch_scheme }}
+{% if fluentd_opensearch_path != '' %}
+ path {{ fluentd_opensearch_path }}
+{% endif %}
+{% if fluentd_opensearch_scheme == 'https' %}
+ ssl_version {{ fluentd_opensearch_ssl_version }}
+ ssl_verify {{ fluentd_opensearch_ssl_verify }}
+{% if fluentd_opensearch_cacert | length > 0 %}
+ ca_file {{ fluentd_opensearch_cacert }}
+{% endif %}
+{% endif %}
+{% if fluentd_opensearch_user != '' and fluentd_opensearch_password != ''%}
+ user {{ fluentd_opensearch_user }}
+ password {{ fluentd_opensearch_password }}
+{% endif %}
+ logstash_format true
+ logstash_prefix {{ opensearch_log_index_prefix }}
+ reconnect_on_error true
+ request_timeout {{ fluentd_opensearch_request_timeout }}
+ suppress_type_name true
+ bulk_message_request_threshold {{ fluentd_bulk_message_request_threshold }}
+
+ @type file
+ path /var/lib/fluentd/data/opensearch.buffer/openstack.*
+ flush_interval 15s
+ chunk_limit_size {{ fluentd_buffer_chunk_limit_size }}
+
+
+
diff --git a/ansible/roles/common/templates/cron-logrotate-elasticsearch.conf.j2 b/ansible/roles/common/templates/cron-logrotate-elasticsearch.conf.j2
deleted file mode 100644
index cbdd2c780a..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-elasticsearch.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/elasticsearch/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-freezer.conf.j2 b/ansible/roles/common/templates/cron-logrotate-freezer.conf.j2
deleted file mode 100644
index fd5430fca2..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-freezer.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/freezer/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-kafka.conf.j2 b/ansible/roles/common/templates/cron-logrotate-kafka.conf.j2
deleted file mode 100644
index 79545c60ea..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-kafka.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/kafka/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-kibana.conf.j2 b/ansible/roles/common/templates/cron-logrotate-kibana.conf.j2
deleted file mode 100644
index e971d4dc5a..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-kibana.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/kibana/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2 b/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2
new file mode 100644
index 0000000000..fea08e0163
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/letsencrypt/*.log"
+{
+}
diff --git a/ansible/roles/common/templates/cron-logrotate-monasca.conf.j2 b/ansible/roles/common/templates/cron-logrotate-monasca.conf.j2
deleted file mode 100644
index 6f0f5ea573..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-monasca.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/monasca/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-murano.conf.j2 b/ansible/roles/common/templates/cron-logrotate-murano.conf.j2
deleted file mode 100644
index ab33090192..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-murano.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/murano/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-opensearch.conf.j2 b/ansible/roles/common/templates/cron-logrotate-opensearch.conf.j2
new file mode 100644
index 0000000000..4f02e14a74
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-opensearch.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/opensearch/*.log"
+{
+}
diff --git a/ansible/roles/common/templates/cron-logrotate-outward-rabbitmq.conf.j2 b/ansible/roles/common/templates/cron-logrotate-outward-rabbitmq.conf.j2
deleted file mode 100644
index 34c1ac0b8d..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-outward-rabbitmq.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/outward-rabbitmq/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-proxysql.conf.j2 b/ansible/roles/common/templates/cron-logrotate-proxysql.conf.j2
new file mode 100644
index 0000000000..7055cce4ea
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-proxysql.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/proxysql/*.log"
+{
+}
diff --git a/ansible/roles/common/templates/cron-logrotate-redis.conf.j2 b/ansible/roles/common/templates/cron-logrotate-redis.conf.j2
new file mode 100644
index 0000000000..9fb4c5a500
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-redis.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/redis/*.log"
+{
+}
diff --git a/ansible/roles/common/templates/cron-logrotate-sahara.conf.j2 b/ansible/roles/common/templates/cron-logrotate-sahara.conf.j2
deleted file mode 100644
index 57a98d315c..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-sahara.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/sahara/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-senlin.conf.j2 b/ansible/roles/common/templates/cron-logrotate-senlin.conf.j2
deleted file mode 100644
index b4a61be8c0..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-senlin.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/senlin/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-skydive.conf.j2 b/ansible/roles/common/templates/cron-logrotate-skydive.conf.j2
deleted file mode 100644
index 022ea99cf9..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-skydive.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/skydive/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-skyline.conf.j2 b/ansible/roles/common/templates/cron-logrotate-skyline.conf.j2
new file mode 100644
index 0000000000..847870d9a6
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-skyline.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/skyline/*.log"
+{
+}
diff --git a/ansible/roles/common/templates/cron-logrotate-solum.conf.j2 b/ansible/roles/common/templates/cron-logrotate-solum.conf.j2
deleted file mode 100644
index c90bb37530..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-solum.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/solum/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-storm.conf.j2 b/ansible/roles/common/templates/cron-logrotate-storm.conf.j2
deleted file mode 100644
index a977476300..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-storm.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/storm/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-vitrage.conf.j2 b/ansible/roles/common/templates/cron-logrotate-vitrage.conf.j2
deleted file mode 100644
index 08409f080f..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-vitrage.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/vitrage/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-zookeeper.conf.j2 b/ansible/roles/common/templates/cron-logrotate-zookeeper.conf.j2
deleted file mode 100644
index b2d07766e2..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-zookeeper.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/zookeeper/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron.json.j2 b/ansible/roles/common/templates/cron.json.j2
index 14b0153670..3c712e8d44 100644
--- a/ansible/roles/common/templates/cron.json.j2
+++ b/ansible/roles/common/templates/cron.json.j2
@@ -7,6 +7,12 @@
"dest": "/etc/logrotate.conf",
"owner": "root",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/common/templates/fluentd.conf.j2 b/ansible/roles/common/templates/fluentd.conf.j2
new file mode 100644
index 0000000000..7a8e49c166
--- /dev/null
+++ b/ansible/roles/common/templates/fluentd.conf.j2
@@ -0,0 +1,41 @@
+#jinja2: trim_blocks: False
+{# Ansible restricts Jinja includes to the same directory or subdirectory of a
+ template. To support customised configuration outside of this path we use
+ the template lookup plugin. Jinja includes have a lower overhead, so we use
+ those where possible. #}
+
+# Inputs
+{%- for path in fluentd_input_files %}
+{%- if path.startswith('/') %}
+{{ lookup('template', path) }}
+{%- else %}
+{% include path %}
+{%- endif %}
+{%- endfor %}
+
+# Filters
+{%- for path in fluentd_filter_files %}
+{%- if path.startswith('/') %}
+{{ lookup('template', path) }}
+{%- else %}
+{% include path %}
+{%- endif %}
+{%- endfor %}
+
+# Formats
+{%- for path in fluentd_format_files %}
+{%- if path.startswith('/') %}
+{{ lookup('template', path) }}
+{%- else %}
+{% include path %}
+{%- endif %}
+{%- endfor %}
+
+# Outputs
+{%- for path in fluentd_output_files %}
+{%- if path.startswith('/') %}
+{{ lookup('template', path) }}
+{%- else %}
+{% include path %}
+{%- endif %}
+{%- endfor %}
diff --git a/ansible/roles/common/templates/fluentd.json.j2 b/ansible/roles/common/templates/fluentd.json.j2
index 712182c14a..906978f43d 100644
--- a/ansible/roles/common/templates/fluentd.json.j2
+++ b/ansible/roles/common/templates/fluentd.json.j2
@@ -1,34 +1,42 @@
-{% set fluentd_user = 'td-agent' %}
-{% set fluentd_dir = '/etc/td-agent' %}
-{% set fluentd_conf = 'td-agent.conf' %}
-{% set fluentd_cmd = '/usr/sbin/td-agent' %}
-
{
- "command": "{{ fluentd_cmd }} -o /var/log/kolla/fluentd/fluentd.log",
+ "command": "fluentd -c /etc/fluentd/fluentd.conf -o /var/log/kolla/fluentd/fluentd.log",
"config_files": [
{
- "source": "{{ container_config_directory }}/td-agent.conf",
- "dest": "{{ fluentd_dir }}/{{ fluentd_conf }}",
- "owner": "{{ fluentd_user }}",
+ "source": "{{ container_config_directory }}/fluentd.conf",
+ "dest": "/etc/fluentd/fluentd.conf",
+ "owner": "fluentd",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
"path": "/var/log/kolla/fluentd",
- "owner": "{{ fluentd_user }}:{{ fluentd_user }}",
+ "owner": "fluentd:fluentd",
"recurse": true
},
{% for facility in syslog_facilities | selectattr('enabled') %}
{
"path": "/var/log/kolla/{{ facility.logdir }}",
- "owner": "{{ fluentd_user }}:{{ fluentd_user }}",
+ "owner": "fluentd:fluentd",
"recurse": true
},
{% endfor %}
+{% if enable_fluentd_systemd | bool %}
+ {
+ "path": "/var/log/journal",
+ "owner": "fluentd:fluentd",
+ "recurse": true
+ },
+{% endif %}
{
"path": "/var/lib/fluentd/data",
- "owner": "{{ fluentd_user }}:{{ fluentd_user }}",
+ "owner": "fluentd:fluentd",
"recurse": true
}
]
diff --git a/ansible/roles/common/templates/kolla-directories.conf.j2 b/ansible/roles/common/templates/kolla-directories.conf.j2
new file mode 100644
index 0000000000..3831b21065
--- /dev/null
+++ b/ansible/roles/common/templates/kolla-directories.conf.j2
@@ -0,0 +1,3 @@
+{% for path in run_default_subdirectories %}
+d {{ path }} 0755 root root - -
+{% endfor %}
diff --git a/ansible/roles/common/templates/kolla-toolbox.json.j2 b/ansible/roles/common/templates/kolla-toolbox.json.j2
index d08ea7e72f..fbfaad6411 100644
--- a/ansible/roles/common/templates/kolla-toolbox.json.j2
+++ b/ansible/roles/common/templates/kolla-toolbox.json.j2
@@ -1,5 +1,5 @@
{
- "command": "sleep infinity",
+ "command": "kolla_toolbox",
"config_files": [
{% if enable_rabbitmq | bool %}{
"source": "{{ container_config_directory }}/rabbitmq-erlang.cookie",
@@ -18,6 +18,12 @@
"dest": "/etc/rabbitmq/erl_inetrc",
"owner": "rabbitmq",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/common/templates/public-openrc-system.sh.j2 b/ansible/roles/common/templates/public-openrc-system.sh.j2
new file mode 100644
index 0000000000..6c9f52f33e
--- /dev/null
+++ b/ansible/roles/common/templates/public-openrc-system.sh.j2
@@ -0,0 +1,15 @@
+# {{ ansible_managed }}
+
+# Clear any old environment that may conflict.
+for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done
+export OS_USER_DOMAIN_NAME=Default
+export OS_SYSTEM_SCOPE=all
+export OS_USERNAME={{ keystone_admin_user }}
+export OS_PASSWORD={{ keystone_admin_password }}
+export OS_AUTH_URL={{ keystone_public_url }}
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME={{ openstack_region_name }}
+export OS_AUTH_PLUGIN=password
+{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
+export OS_CACERT={{ kolla_admin_openrc_cacert }}
+{% endif %}
diff --git a/ansible/roles/common/templates/public-openrc.sh.j2 b/ansible/roles/common/templates/public-openrc.sh.j2
new file mode 100644
index 0000000000..b268a8b976
--- /dev/null
+++ b/ansible/roles/common/templates/public-openrc.sh.j2
@@ -0,0 +1,17 @@
+# {{ ansible_managed }}
+
+# Clear any old environment that may conflict.
+for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_NAME={{ keystone_admin_project }}
+export OS_TENANT_NAME={{ keystone_admin_project }}
+export OS_USERNAME={{ keystone_admin_user }}
+export OS_PASSWORD={{ keystone_admin_password }}
+export OS_AUTH_URL={{ keystone_public_url }}
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME={{ openstack_region_name }}
+export OS_AUTH_PLUGIN=password
+{% if kolla_admin_openrc_cacert is not none and kolla_admin_openrc_cacert | length > 0 %}
+export OS_CACERT={{ kolla_admin_openrc_cacert }}
+{% endif %}
diff --git a/ansible/roles/common/templates/td-agent.conf.j2 b/ansible/roles/common/templates/td-agent.conf.j2
deleted file mode 100644
index c5c54cdf37..0000000000
--- a/ansible/roles/common/templates/td-agent.conf.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-#jinja2: trim_blocks: False
-{# Ansible restricts Jinja includes to the same directory or subdirectory of a
- template. To support customised configuration outside of this path we use
- the template lookup plugin. Jinja includes have a lower overhead, so we use
- those where possible. #}
-
-# Inputs
-{%- for path in fluentd_input_files %}
-# Included from {{ path }}:
-{%- if path.startswith('/') %}
-{{ lookup('template', path) }}
-{%- else %}
-{% include path %}
-{%- endif %}
-{%- endfor %}
-
-# Filters
-{%- for path in fluentd_filter_files %}
-# Included from {{ path }}:
-{%- if path.startswith('/') %}
-{{ lookup('template', path) }}
-{%- else %}
-{% include path %}
-{%- endif %}
-{%- endfor %}
-
-# Formats
-{%- for path in fluentd_format_files %}
-# Included from {{ path }}:
-{%- if path.startswith('/') %}
-{{ lookup('template', path) }}
-{%- else %}
-{% include path %}
-{%- endif %}
-{%- endfor %}
-
-# Outputs
-{%- for path in fluentd_output_files %}
-# Included from {{ path }}:
-{%- if path.startswith('/') %}
-{{ lookup('template', path) }}
-{%- else %}
-{% include path %}
-{%- endif %}
-{%- endfor %}
diff --git a/ansible/roles/cyborg/defaults/main.yml b/ansible/roles/cyborg/defaults/main.yml
index eb92290b83..d4ed7bc681 100644
--- a/ansible/roles/cyborg/defaults/main.yml
+++ b/ansible/roles/cyborg/defaults/main.yml
@@ -8,6 +8,20 @@ cyborg_services:
volumes: "{{ cyborg_api_default_volumes + cyborg_api_extra_volumes }}"
dimensions: "{{ cyborg_api_dimensions }}"
healthcheck: "{{ cyborg_api_healthcheck }}"
+ haproxy:
+ cyborg_api:
+ enabled: "{{ enable_cyborg }}"
+ mode: "http"
+ external: false
+ port: "{{ cyborg_api_port }}"
+ listen_port: "{{ cyborg_api_listen_port }}"
+ cyborg_api_external:
+ enabled: "{{ enable_cyborg }}"
+ mode: "http"
+ external: true
+ external_fqdn: "{{ cyborg_external_fqdn }}"
+ port: "{{ cyborg_api_public_port }}"
+ listen_port: "{{ cyborg_api_listen_port }}"
cyborg-agent:
container_name: cyborg_agent
group: cyborg-agent
@@ -26,6 +40,13 @@ cyborg_services:
dimensions: "{{ cyborg_conductor_dimensions }}"
healthcheck: "{{ cyborg_conductor_healthcheck }}"
+####################
+# Config Validate
+####################
+cyborg_config_validation:
+ - generator: "/cyborg/tools/config/cyborg-config-generator.conf"
+ config: "/etc/cyborg/cyborg.conf"
+
####################
# Database
####################
@@ -52,15 +73,15 @@ cyborg_database_shard:
####################
cyborg_tag: "{{ openstack_tag }}"
-cyborg_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cyborg-api"
+cyborg_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cyborg-api"
cyborg_api_tag: "{{ cyborg_tag }}"
cyborg_api_image_full: "{{ cyborg_api_image }}:{{ cyborg_api_tag }}"
-cyborg_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cyborg-agent"
+cyborg_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cyborg-agent"
cyborg_agent_tag: "{{ cyborg_tag }}"
cyborg_agent_image_full: "{{ cyborg_agent_image }}:{{ cyborg_agent_tag }}"
-cyborg_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/cyborg-conductor"
+cyborg_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}cyborg-conductor"
cyborg_conductor_tag: "{{ cyborg_tag }}"
cyborg_conductor_image_full: "{{ cyborg_conductor_image }}:{{ cyborg_conductor_tag }}"
@@ -134,8 +155,8 @@ cyborg_conductor_extra_volumes: "{{ cyborg_extra_volumes }}"
####################
# OpenStack
####################
-cyborg_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ cyborg_api_port }}"
-cyborg_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ cyborg_api_port }}"
+cyborg_internal_endpoint: "{{ cyborg_internal_fqdn | kolla_url(internal_protocol, cyborg_api_port, '/v2') }}"
+cyborg_public_endpoint: "{{ cyborg_external_fqdn | kolla_url(public_protocol, cyborg_api_port, '/v2') }}"
cyborg_logging_debug: "{{ openstack_logging_debug }}"
@@ -158,8 +179,8 @@ cyborg_enabled_notification_topics: "{{ cyborg_notification_topics | selectattr(
####################
cyborg_ks_services:
- name: "cyborg"
- type: "cyborg"
- description: "OpenStack Cyborg Service"
+ type: "accelerator"
+ description: "Acceleration Service"
endpoints:
- {'interface': 'internal', 'url': '{{ cyborg_internal_endpoint }}'}
- {'interface': 'public', 'url': '{{ cyborg_public_endpoint }}'}
diff --git a/ansible/roles/cyborg/handlers/main.yml b/ansible/roles/cyborg/handlers/main.yml
index 912378d01d..745578fe75 100644
--- a/ansible/roles/cyborg/handlers/main.yml
+++ b/ansible/roles/cyborg/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "cyborg-api"
service: "{{ cyborg_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,15 +13,13 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart cyborg-conductor container
vars:
service_name: "cyborg-conductor"
service: "{{ cyborg_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -30,15 +28,13 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart cyborg-agent container
vars:
service_name: "cyborg-agent"
service: "{{ cyborg_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -47,5 +43,3 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/cyborg/tasks/bootstrap.yml b/ansible/roles/cyborg/tasks/bootstrap.yml
index 9739bed680..c31cf6c276 100644
--- a/ansible/roles/cyborg/tasks/bootstrap.yml
+++ b/ansible/roles/cyborg/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating cyborg database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating cyborg database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/cyborg/tasks/bootstrap_service.yml b/ansible/roles/cyborg/tasks/bootstrap_service.yml
index bbba4b8510..fbc9bf4f60 100644
--- a/ansible/roles/cyborg/tasks/bootstrap_service.yml
+++ b/ansible/roles/cyborg/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
cyborg_api: "{{ cyborg_services['cyborg-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_cyborg"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ cyborg_api.volumes }}"
run_once: True
delegate_to: "{{ groups[cyborg_api.group][0] }}"
diff --git a/ansible/roles/cyborg/tasks/check-containers.yml b/ansible/roles/cyborg/tasks/check-containers.yml
index f6245b4d0b..b7e2f7c29f 100644
--- a/ansible/roles/cyborg/tasks/check-containers.yml
+++ b/ansible/roles/cyborg/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check cyborg containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cyborg_services }}"
- notify:
- - Restart {{ item.key }} container
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/cyborg/tasks/config.yml b/ansible/roles/cyborg/tasks/config.yml
index 61dcb68ec2..926cb3df4d 100644
--- a/ansible/roles/cyborg/tasks/config.yml
+++ b/ansible/roles/cyborg/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ cyborg_services }}"
+ with_dict: "{{ cyborg_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -39,11 +36,7 @@
become: true
when:
- cyborg_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ cyborg_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ cyborg_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
@@ -55,12 +48,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ cyborg_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ cyborg_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over cyborg.conf
vars:
@@ -75,12 +63,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/cyborg.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ cyborg_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ cyborg_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over cyborg-api-paste.ini
vars:
@@ -92,8 +75,4 @@
dest: "{{ node_config_directory }}/cyborg-api/api-paste.ini"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups['cyborg-api']
- - service.enabled | bool
- notify:
- - Restart cyborg-api container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/cyborg/tasks/config_validate.yml b/ansible/roles/cyborg/tasks/config_validate.yml
new file mode 100644
index 0000000000..fd9c072360
--- /dev/null
+++ b/ansible/roles/cyborg/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ cyborg_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ cyborg_config_validation }}"
diff --git a/ansible/roles/cyborg/tasks/precheck.yml b/ansible/roles/cyborg/tasks/precheck.yml
index 9167122cd2..6211c5e240 100644
--- a/ansible/roles/cyborg/tasks/precheck.yml
+++ b/ansible/roles/cyborg/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- cyborg_api
+ check_mode: false
register: container_facts
- name: Checking free port for cyborg API
diff --git a/ansible/roles/cyborg/templates/cyborg-agent.json.j2 b/ansible/roles/cyborg/templates/cyborg-agent.json.j2
index f72fe7379a..285c61b41f 100644
--- a/ansible/roles/cyborg/templates/cyborg-agent.json.j2
+++ b/ansible/roles/cyborg/templates/cyborg-agent.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/cyborg/{{ cyborg_policy_file }}",
"owner": "cyborg",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cyborg/templates/cyborg-api.json.j2 b/ansible/roles/cyborg/templates/cyborg-api.json.j2
index 922906e458..29a3f1d148 100644
--- a/ansible/roles/cyborg/templates/cyborg-api.json.j2
+++ b/ansible/roles/cyborg/templates/cyborg-api.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/cyborg/{{ cyborg_policy_file }}",
"owner": "cyborg",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cyborg/templates/cyborg-conductor.json.j2 b/ansible/roles/cyborg/templates/cyborg-conductor.json.j2
index d4b673b81c..a50a4e6995 100644
--- a/ansible/roles/cyborg/templates/cyborg-conductor.json.j2
+++ b/ansible/roles/cyborg/templates/cyborg-conductor.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/cyborg/{{ cyborg_policy_file }}",
"owner": "cyborg",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/cyborg/templates/cyborg.conf.j2 b/ansible/roles/cyborg/templates/cyborg.conf.j2
index 22d95bc103..656094ac34 100644
--- a/ansible/roles/cyborg/templates/cyborg.conf.j2
+++ b/ansible/roles/cyborg/templates/cyborg.conf.j2
@@ -15,8 +15,8 @@ connection_recycle_time = {{ database_connection_recycle_time }}
max_pool_size = {{ database_max_pool_size }}
[keystone_authtoken]
-service_type = cyborg
-memcache_security_strategy = ENCRYPT
+service_type = accelerator
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -43,6 +43,18 @@ region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}
valid_interfaces = internal
+[nova]
+auth_url = {{ keystone_internal_url }}
+auth_type = password
+project_domain_id = {{ default_project_domain_id }}
+user_domain_id = {{ default_user_domain_id }}
+region_name = {{ openstack_region_name }}
+project_name = service
+username = {{ nova_keystone_user }}
+password = {{ nova_keystone_password }}
+endpoint_type = internal
+cafile = {{ openstack_cacert }}
+
{% if cyborg_policy_file is defined %}
[oslo_policy]
policy_file = {{ cyborg_policy_file }}
@@ -57,8 +69,15 @@ topics = {{ cyborg_enabled_notification_topics | map(attribute='name') | join(',
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
diff --git a/ansible/roles/designate/defaults/main.yml b/ansible/roles/designate/defaults/main.yml
index 9619a1689f..be3d670006 100644
--- a/ansible/roles/designate/defaults/main.yml
+++ b/ansible/roles/designate/defaults/main.yml
@@ -19,7 +19,8 @@ designate_services:
enabled: "{{ enable_designate }}"
mode: "http"
external: true
- port: "{{ designate_api_port }}"
+ external_fqdn: "{{ designate_external_fqdn }}"
+ port: "{{ designate_api_public_port }}"
listen_port: "{{ designate_api_listen_port }}"
designate-backend-bind9:
container_name: designate_backend_bind9
@@ -70,6 +71,12 @@ designate_services:
dimensions: "{{ designate_sink_dimensions }}"
healthcheck: "{{ designate_sink_healthcheck }}"
+####################
+# Config Validate
+####################
+designate_config_validation:
+ - generator: "/designate/etc/designate/designate-config-generator.conf"
+ config: "/etc/designate/designate.conf"
####################
# Database
@@ -97,31 +104,31 @@ designate_database_shard:
####################
designate_tag: "{{ openstack_tag }}"
-designate_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/designate-central"
+designate_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}designate-central"
designate_central_tag: "{{ designate_tag }}"
designate_central_image_full: "{{ designate_central_image }}:{{ designate_central_tag }}"
-designate_producer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/designate-producer"
+designate_producer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}designate-producer"
designate_producer_tag: "{{ designate_tag }}"
designate_producer_image_full: "{{ designate_producer_image }}:{{ designate_producer_tag }}"
-designate_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/designate-api"
+designate_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}designate-api"
designate_api_tag: "{{ designate_tag }}"
designate_api_image_full: "{{ designate_api_image }}:{{ designate_api_tag }}"
-designate_backend_bind9_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/designate-backend-bind9"
+designate_backend_bind9_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}designate-backend-bind9"
designate_backend_bind9_tag: "{{ designate_tag }}"
designate_backend_bind9_image_full: "{{ designate_backend_bind9_image }}:{{ designate_backend_bind9_tag }}"
-designate_mdns_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/designate-mdns"
+designate_mdns_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}designate-mdns"
designate_mdns_tag: "{{ designate_tag }}"
designate_mdns_image_full: "{{ designate_mdns_image }}:{{ designate_mdns_tag }}"
-designate_sink_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/designate-sink"
+designate_sink_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}designate-sink"
designate_sink_tag: "{{ designate_tag }}"
designate_sink_image_full: "{{ designate_sink_image }}:{{ designate_sink_tag }}"
-designate_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/designate-worker"
+designate_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}designate-worker"
designate_worker_tag: "{{ designate_tag }}"
designate_worker_image_full: "{{ designate_worker_image }}:{{ designate_worker_tag }}"
@@ -229,7 +236,7 @@ designate_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/designate' if designate_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/designate:/dev-mode/designate' if designate_dev_mode | bool else '' }}"
designate_backend_bind9_default_volumes:
- "{{ node_config_directory }}/designate-backend-bind9/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -241,31 +248,31 @@ designate_central_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/designate' if designate_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/designate:/dev-mode/designate' if designate_dev_mode | bool else '' }}"
designate_mdns_default_volumes:
- "{{ node_config_directory }}/designate-mdns/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/designate' if designate_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/designate:/dev-mode/designate' if designate_dev_mode | bool else '' }}"
designate_producer_default_volumes:
- "{{ node_config_directory }}/designate-producer/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/designate' if designate_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/designate:/dev-mode/designate' if designate_dev_mode | bool else '' }}"
designate_worker_default_volumes:
- "{{ node_config_directory }}/designate-worker/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/designate' if designate_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/designate:/dev-mode/designate' if designate_dev_mode | bool else '' }}"
designate_sink_default_volumes:
- "{{ node_config_directory }}/designate-sink/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/designate' if designate_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/designate:/dev-mode/designate' if designate_dev_mode | bool else '' }}"
designate_extra_volumes: "{{ default_extra_volumes }}"
designate_api_extra_volumes: "{{ designate_extra_volumes }}"
@@ -306,6 +313,7 @@ designate_dnssec_validation: "yes"
designate_recursion: "no"
## Example for designate_forwarders_addresses: "10.199.200.1; 10.199.100.1"
designate_forwarders_addresses: ""
+designate_backend_bind9_cmdline_extras: "-U {{ designate_backend_bind9_workers }} -g"
####################
# Infoblox
diff --git a/ansible/roles/designate/handlers/main.yml b/ansible/roles/designate/handlers/main.yml
index c7b7c320b5..622878fcb5 100644
--- a/ansible/roles/designate/handlers/main.yml
+++ b/ansible/roles/designate/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "designate-backend-bind9"
service: "{{ designate_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart designate-api container
vars:
service_name: "designate-api"
service: "{{ designate_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart designate-central container
vars:
service_name: "designate-central"
service: "{{ designate_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,15 +40,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart designate-producer container
vars:
service_name: "designate-producer"
service: "{{ designate_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -60,15 +54,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart designate-mdns container
vars:
service_name: "designate-mdns"
service: "{{ designate_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -76,15 +68,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart designate-worker container
vars:
service_name: "designate-worker"
service: "{{ designate_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -92,15 +82,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart designate-sink container
vars:
service_name: "designate-sink"
service: "{{ designate_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -108,5 +96,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/designate/tasks/backend_external.yml b/ansible/roles/designate/tasks/backend_external.yml
index 06cf3af959..75e3efe015 100644
--- a/ansible/roles/designate/tasks/backend_external.yml
+++ b/ansible/roles/designate/tasks/backend_external.yml
@@ -8,11 +8,7 @@
when:
- designate_backend_external == 'bind9'
- item.key in [ "designate-worker" ]
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over rndc.key (designate_backend_external)
template:
@@ -23,8 +19,4 @@
when:
- designate_backend_external == 'bind9'
- item.key in [ "designate-worker" ]
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/designate/tasks/bootstrap.yml b/ansible/roles/designate/tasks/bootstrap.yml
index 8173bb120e..7180dae368 100644
--- a/ansible/roles/designate/tasks/bootstrap.yml
+++ b/ansible/roles/designate/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Designate databases
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -19,6 +20,7 @@
- name: Creating Designate databases user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/designate/tasks/bootstrap_service.yml b/ansible/roles/designate/tasks/bootstrap_service.yml
index e3d13510bb..8452faacc1 100644
--- a/ansible/roles/designate/tasks/bootstrap_service.yml
+++ b/ansible/roles/designate/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
designate_central: "{{ designate_services['designate-central'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_designate"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ designate_central.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[designate_central.group][0] }}"
diff --git a/ansible/roles/designate/tasks/check-containers.yml b/ansible/roles/designate/tasks/check-containers.yml
index efe6cac74b..b7e2f7c29f 100644
--- a/ansible/roles/designate/tasks/check-containers.yml
+++ b/ansible/roles/designate/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check designate containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/designate/tasks/config.yml b/ansible/roles/designate/tasks/config.yml
index 92a2d47fcf..2686368729 100644
--- a/ansible/roles/designate/tasks/config.yml
+++ b/ansible/roles/designate/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -41,12 +38,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over designate.conf
vars:
@@ -61,12 +53,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/designate.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over pools.yaml
vars:
@@ -76,14 +63,10 @@
dest: "{{ node_config_directory }}/designate-worker/pools.yaml"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/designate/pools.yaml"
- "{{ role_path }}/templates/pools.yaml.j2"
- notify:
- - Restart designate-worker container
- name: Copying over named.conf
vars:
@@ -95,15 +78,12 @@
become: true
when:
- designate_backend == 'bind9'
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/designate/designate-backend-bind9/{{ inventory_hostname }}/named.conf"
- "{{ node_custom_config }}/designate/designate-backend-bind9/named.conf"
- "{{ node_custom_config }}/designate/named.conf"
- "{{ role_path }}/templates/named.conf.j2"
- notify:
- - Restart designate-backend-bind9 container
- name: Copying over rndc.conf
template:
@@ -114,11 +94,7 @@
when:
- designate_backend == 'bind9' and designate_backend_external == 'no'
- item.key in [ "designate-backend-bind9", "designate-worker" ]
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over rndc.key
template:
@@ -129,11 +105,7 @@
when:
- designate_backend == 'bind9' and designate_backend_external == 'no'
- item.key in [ "designate-backend-bind9", "designate-worker" ]
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: backend_external.yml
when: designate_backend_external == 'bind9'
@@ -146,8 +118,4 @@
become: true
when:
- designate_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ designate_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ designate_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/designate/tasks/config_validate.yml b/ansible/roles/designate/tasks/config_validate.yml
new file mode 100644
index 0000000000..febfb66bf7
--- /dev/null
+++ b/ansible/roles/designate/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ designate_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ designate_config_validation }}"
diff --git a/ansible/roles/designate/tasks/precheck.yml b/ansible/roles/designate/tasks/precheck.yml
index ea2eb1b38b..80337a3a1a 100644
--- a/ansible/roles/designate/tasks/precheck.yml
+++ b/ansible/roles/designate/tasks/precheck.yml
@@ -8,10 +8,13 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- designate_api
- designate_backend_bind9
- designate_mdns
+ check_mode: false
register: container_facts
- name: Checking free port for designate API
diff --git a/ansible/roles/designate/tasks/update_pools.yml b/ansible/roles/designate/tasks/update_pools.yml
index 0ac62443a2..edc4e7e6d9 100644
--- a/ansible/roles/designate/tasks/update_pools.yml
+++ b/ansible/roles/designate/tasks/update_pools.yml
@@ -1,6 +1,6 @@
---
-- name: Update DNS pools
+- name: Non-destructive DNS pools update
become: true
- command: "{{ kolla_container_engine }} exec -t designate_worker designate-manage pool update --file /etc/designate/pools.yaml"
+ command: "{{ kolla_container_engine }} exec -t designate_worker designate-manage pool update"
run_once: True
delegate_to: "{{ groups['designate-worker'][0] }}"
diff --git a/ansible/roles/designate/templates/designate-api.json.j2 b/ansible/roles/designate/templates/designate-api.json.j2
index 73c1011096..b07b41daed 100644
--- a/ansible/roles/designate/templates/designate-api.json.j2
+++ b/ansible/roles/designate/templates/designate-api.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/designate/{{ designate_policy_file }}",
"owner": "designate",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/designate/templates/designate-backend-bind9.json.j2 b/ansible/roles/designate/templates/designate-backend-bind9.json.j2
index 49a786576c..bbfad04ae7 100644
--- a/ansible/roles/designate/templates/designate-backend-bind9.json.j2
+++ b/ansible/roles/designate/templates/designate-backend-bind9.json.j2
@@ -2,7 +2,7 @@
{% set bind_file = 'bind/named.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'named.conf' %}
{
- "command": "/usr/sbin/{{ bind_cmd }} -U {{ designate_backend_bind9_workers }} -g",
+ "command": "/usr/sbin/{{ bind_cmd }} {{ designate_backend_bind9_cmdline_extras }}",
"config_files": [
{
"source": "{{ container_config_directory }}/named.conf",
@@ -23,7 +23,13 @@
"owner": "root",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/designate/templates/designate-central.json.j2 b/ansible/roles/designate/templates/designate-central.json.j2
index 3605761b4d..3f4f8bd038 100644
--- a/ansible/roles/designate/templates/designate-central.json.j2
+++ b/ansible/roles/designate/templates/designate-central.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/designate/{{ designate_policy_file }}",
"owner": "designate",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/designate/templates/designate-mdns.json.j2 b/ansible/roles/designate/templates/designate-mdns.json.j2
index 6e83d5e611..28907c9a0c 100644
--- a/ansible/roles/designate/templates/designate-mdns.json.j2
+++ b/ansible/roles/designate/templates/designate-mdns.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/designate/{{ designate_policy_file }}",
"owner": "designate",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/designate/templates/designate-producer.json.j2 b/ansible/roles/designate/templates/designate-producer.json.j2
index 7e3c257031..e496a9d651 100644
--- a/ansible/roles/designate/templates/designate-producer.json.j2
+++ b/ansible/roles/designate/templates/designate-producer.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/designate/{{ designate_policy_file }}",
"owner": "designate",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/designate/templates/designate-sink.json.j2 b/ansible/roles/designate/templates/designate-sink.json.j2
index c8d0768c70..39273e00a9 100644
--- a/ansible/roles/designate/templates/designate-sink.json.j2
+++ b/ansible/roles/designate/templates/designate-sink.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/designate/{{ designate_policy_file }}",
"owner": "designate",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/designate/templates/designate-worker.json.j2 b/ansible/roles/designate/templates/designate-worker.json.j2
index c1d8f765d0..4b4ee2dcbf 100644
--- a/ansible/roles/designate/templates/designate-worker.json.j2
+++ b/ansible/roles/designate/templates/designate-worker.json.j2
@@ -33,7 +33,13 @@
"owner": "designate",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/designate/templates/designate.conf.j2 b/ansible/roles/designate/templates/designate.conf.j2
index 368a5280cc..2fbb768dfb 100644
--- a/ansible/roles/designate/templates/designate.conf.j2
+++ b/ansible/roles/designate/templates/designate.conf.j2
@@ -33,7 +33,7 @@ service_token_roles_required = True
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -49,9 +49,9 @@ workers = {{ designate_worker_workers }}
[service:producer]
workers = {{ designate_producer_workers }}
threads = 1000
-enabled_tasks = None
[network_api:neutron]
+ca_certificates_file = "{{ openstack_cacert }}"
endpoint_type = internalURL
[storage:sqlalchemy]
@@ -92,11 +92,18 @@ topics = {{ designate_enabled_notification_topics | map(attribute='name') | join
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
[oslo_concurrency]
lock_path = /var/lib/designate/tmp
diff --git a/ansible/roles/destroy/tasks/cleanup_containers.yml b/ansible/roles/destroy/tasks/cleanup_containers.yml
index ad7b8af275..5d9b02a9ff 100644
--- a/ansible/roles/destroy/tasks/cleanup_containers.yml
+++ b/ansible/roles/destroy/tasks/cleanup_containers.yml
@@ -1,3 +1,3 @@
---
- name: Destroying all Kolla containers and volumes
- script: ../tools/cleanup-containers
+ script: ../tools/cleanup-containers "{{ kolla_container_engine }}"
diff --git a/ansible/roles/destroy/tasks/cleanup_host.yml b/ansible/roles/destroy/tasks/cleanup_host.yml
index 30517dd272..d230a983a2 100644
--- a/ansible/roles/destroy/tasks/cleanup_host.yml
+++ b/ansible/roles/destroy/tasks/cleanup_host.yml
@@ -5,16 +5,16 @@
environment:
enable_haproxy: "{{ enable_haproxy }}"
enable_swift: "{{ enable_swift }}"
- elasticsearch_datadir_volume: "{{ elasticsearch_datadir_volume }}"
glance_file_datadir_volume: "{{ glance_file_datadir_volume }}"
nova_instance_datadir_volume: "{{ nova_instance_datadir_volume }}"
gnocchi_metric_datadir_volume: "{{ gnocchi_metric_datadir_volume }}"
influxdb_datadir_volume: "{{ influxdb_datadir_volume }}"
- kafka_datadir_volume: "{{ kafka_datadir_volume }}"
kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_external_vip_address }}"
kolla_dev_repos_directory: "{{ kolla_dev_repos_directory }}"
+ opensearch_datadir_volume: "{{ opensearch_datadir_volume }}"
destroy_include_dev: "{{ destroy_include_dev }}"
+ kolla_ansible_inventories: "{{ ansible_inventory_sources | join(' ') }}"
- block:
- name: Disable octavia-interface service
diff --git a/ansible/roles/destroy/tasks/cleanup_images.yml b/ansible/roles/destroy/tasks/cleanup_images.yml
index 8484b7e9c9..1ed2eb6004 100644
--- a/ansible/roles/destroy/tasks/cleanup_images.yml
+++ b/ansible/roles/destroy/tasks/cleanup_images.yml
@@ -1,5 +1,5 @@
---
- name: Removing Kolla images
- script: ../tools/cleanup-images --all
+ script: ../tools/cleanup-images --all -e "{{ kolla_container_engine }}"
when:
- destroy_include_images | bool
diff --git a/ansible/roles/destroy/tasks/validate_docker_execute.yml b/ansible/roles/destroy/tasks/validate_docker_execute.yml
index 0e85832c65..ff17105a76 100644
--- a/ansible/roles/destroy/tasks/validate_docker_execute.yml
+++ b/ansible/roles/destroy/tasks/validate_docker_execute.yml
@@ -1,3 +1,5 @@
---
- name: Ensure the docker service is running
+ environment:
+ CONTAINER_ENGINE: "{{ kolla_container_engine }}"
script: ../tools/validate-docker-execute.sh
diff --git a/ansible/roles/elasticsearch/defaults/main.yml b/ansible/roles/elasticsearch/defaults/main.yml
deleted file mode 100644
index 4f3aa334cc..0000000000
--- a/ansible/roles/elasticsearch/defaults/main.yml
+++ /dev/null
@@ -1,128 +0,0 @@
----
-elasticsearch_services:
- elasticsearch:
- container_name: elasticsearch
- group: elasticsearch
- enabled: true
- image: "{{ elasticsearch_image_full }}"
- environment:
- ES_JAVA_OPTS: "{{ es_java_opts }}"
- volumes: "{{ elasticsearch_default_volumes + elasticsearch_extra_volumes }}"
- dimensions: "{{ elasticsearch_dimensions }}"
- healthcheck: "{{ elasticsearch_healthcheck }}"
- haproxy:
- elasticsearch:
- enabled: "{{ enable_elasticsearch }}"
- mode: "http"
- external: false
- port: "{{ elasticsearch_port }}"
- frontend_http_extra:
- - "option dontlog-normal"
- elasticsearch-curator:
- container_name: elasticsearch_curator
- group: elasticsearch-curator
- enabled: "{{ enable_elasticsearch_curator }}"
- image: "{{ elasticsearch_curator_image_full }}"
- volumes: "{{ elasticsearch_curator_default_volumes + elasticsearch_curator_extra_volumes }}"
- dimensions: "{{ elasticsearch_curator_dimensions }}"
-
-
-####################
-# Elasticsearch
-####################
-
-# Register Elasticsearch internal endpoint in the Keystone service catalogue
-elasticsearch_enable_keystone_registration: False
-
-elasticsearch_cluster_name: "kolla_logging"
-es_heap_size: "1g"
-es_java_opts: "{% if es_heap_size %}-Xms{{ es_heap_size }} -Xmx{{ es_heap_size }}{% endif %} -Dlog4j2.formatMsgNoLookups=true"
-
-#######################
-# Elasticsearch Curator
-#######################
-
-# Helper variable used to define the default hour Curator runs to avoid
-# simultaneous runs in multinode deployments.
-elasticsearch_curator_instance_id: "{{ groups['elasticsearch-curator'].index(inventory_hostname) }}"
-
-# How frequently Curator runs.
-# For multinode deployments of Curator you should ensure each node has
-# a different schedule so that Curator does not run simultaneously on
-# multiple nodes. Use hostvars or parameterize like in the default
-# below.
-# The default depends on Curator's id as defined above which dictates
-# the daily hour the schedule runs (0, 1, etc.).
-elasticsearch_curator_cron_schedule: "0 {{ elasticsearch_curator_instance_id }} * * *"
-
-# When set to True, Curator will not modify Elasticsearch data, but
-# will print what it *would* do to the Curator log file. This is a
-# useful way of checking that Curator actions are working as expected.
-elasticsearch_curator_dry_run: false
-
-# Index prefix pattern. Any indices matching this regex will
-# be managed by Curator.
-elasticsearch_curator_index_pattern: "^{{ '(monasca|' + kibana_log_prefix + ')' if enable_monasca | bool else kibana_log_prefix }}-.*" # noqa jinja[spacing]
-
-# Duration after which an index is staged for deletion. This is
-# implemented by closing the index. Whilst in this state the index
-# contributes negligible load on the cluster and may be manually
-# re-opened if required.
-elasticsearch_curator_soft_retention_period_days: 30
-
-# Duration after which an index is permanently erased from the cluster.
-elasticsearch_curator_hard_retention_period_days: 60
-
-####################
-# Keystone
-####################
-elasticsearch_openstack_auth: "{{ openstack_auth }}"
-
-elasticsearch_ks_services:
- - name: "elasticsearch"
- type: "log-storage"
- description: "Elasticsearch"
- endpoints:
- - {'interface': 'internal', 'url': '{{ elasticsearch_internal_endpoint }}'}
-
-####################
-# Docker
-####################
-elasticsearch_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/elasticsearch"
-elasticsearch_tag: "{{ openstack_tag }}"
-elasticsearch_image_full: "{{ elasticsearch_image }}:{{ elasticsearch_tag }}"
-
-elasticsearch_curator_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/elasticsearch-curator"
-elasticsearch_curator_tag: "{{ openstack_tag }}"
-elasticsearch_curator_image_full: "{{ elasticsearch_curator_image }}:{{ elasticsearch_curator_tag }}"
-
-elasticsearch_dimensions: "{{ default_container_dimensions }}"
-elasticsearch_curator_dimensions: "{{ default_container_dimensions }}"
-
-elasticsearch_enable_healthchecks: "{{ enable_container_healthchecks }}"
-elasticsearch_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-elasticsearch_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-elasticsearch_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-elasticsearch_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"]
-elasticsearch_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-elasticsearch_healthcheck:
- interval: "{{ elasticsearch_healthcheck_interval }}"
- retries: "{{ elasticsearch_healthcheck_retries }}"
- start_period: "{{ elasticsearch_healthcheck_start_period }}"
- test: "{% if elasticsearch_enable_healthchecks | bool %}{{ elasticsearch_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ elasticsearch_healthcheck_timeout }}"
-
-elasticsearch_default_volumes:
- - "{{ node_config_directory }}/elasticsearch/:{{ container_config_directory }}/"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ elasticsearch_datadir_volume }}:/var/lib/elasticsearch/data"
- - "kolla_logs:/var/log/kolla/"
-elasticsearch_curator_default_volumes:
- - "{{ node_config_directory }}/elasticsearch-curator/:{{ container_config_directory }}/"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
-
-elasticsearch_extra_volumes: "{{ default_extra_volumes }}"
-elasticsearch_curator_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/elasticsearch/handlers/main.yml b/ansible/roles/elasticsearch/handlers/main.yml
deleted file mode 100644
index 641163c22c..0000000000
--- a/ansible/roles/elasticsearch/handlers/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: Restart elasticsearch container
- vars:
- service_name: "elasticsearch"
- service: "{{ elasticsearch_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart elasticsearch-curator container
- vars:
- service_name: "elasticsearch-curator"
- service: "{{ elasticsearch_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/elasticsearch/tasks/check-containers.yml b/ansible/roles/elasticsearch/tasks/check-containers.yml
deleted file mode 100644
index 8d4b89ca1b..0000000000
--- a/ansible/roles/elasticsearch/tasks/check-containers.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Check elasticsearch containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- environment: "{{ item.value.environment | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ elasticsearch_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/elasticsearch/tasks/config.yml b/ansible/roles/elasticsearch/tasks/config.yml
deleted file mode 100644
index 630b15e55d..0000000000
--- a/ansible/roles/elasticsearch/tasks/config.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ elasticsearch_services }}"
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ elasticsearch_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over elasticsearch service config files
- merge_yaml:
- sources:
- - "{{ role_path }}/templates/{{ item.key }}.yml.j2"
- - "{{ node_custom_config }}/elasticsearch.yml"
- - "{{ node_custom_config }}/elasticsearch/{{ item.key }}.yml"
- - "{{ node_custom_config }}/elasticsearch/{{ inventory_hostname }}/{{ item.key }}.yml"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ item.key }}.yml"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ elasticsearch_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over elasticsearch curator actions
- vars:
- service: "{{ elasticsearch_services['elasticsearch-curator'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/elasticsearch-curator/elasticsearch-curator-actions.yml"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- with_first_found:
- - "{{ node_custom_config }}/elasticsearch/elasticsearch-curator-actions.yml"
- - "{{ role_path }}/templates/elasticsearch-curator-actions.yml.j2"
- notify:
- - Restart elasticsearch-curator container
-
-- name: Copying over elasticsearch curator crontab
- vars:
- service: "{{ elasticsearch_services['elasticsearch-curator'] }}"
- template:
- src: "{{ role_path }}/templates/elasticsearch-curator.crontab.j2"
- dest: "{{ node_config_directory }}/elasticsearch-curator/elasticsearch-curator.crontab"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart elasticsearch-curator container
diff --git a/ansible/roles/elasticsearch/tasks/copy-certs.yml b/ansible/roles/elasticsearch/tasks/copy-certs.yml
deleted file mode 100644
index 38cd3476f3..0000000000
--- a/ansible/roles/elasticsearch/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ elasticsearch_services }}"
diff --git a/ansible/roles/elasticsearch/tasks/deploy.yml b/ansible/roles/elasticsearch/tasks/deploy.yml
deleted file mode 100644
index dba49b3609..0000000000
--- a/ansible/roles/elasticsearch/tasks/deploy.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- import_tasks: config-host.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: register.yml
- when: elasticsearch_enable_keystone_registration | bool
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/elasticsearch/tasks/loadbalancer.yml b/ansible/roles/elasticsearch/tasks/loadbalancer.yml
deleted file mode 100644
index e4a921891a..0000000000
--- a/ansible/roles/elasticsearch/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ elasticsearch_services }}"
- tags: always
diff --git a/ansible/roles/elasticsearch/tasks/precheck.yml b/ansible/roles/elasticsearch/tasks/precheck.yml
deleted file mode 100644
index 54e3f4b390..0000000000
--- a/ansible/roles/elasticsearch/tasks/precheck.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ elasticsearch_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - elasticsearch
- register: container_facts
-
-- name: Checking free port for Elasticsearch
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ elasticsearch_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['elasticsearch'] is not defined
- - inventory_hostname in groups['elasticsearch']
diff --git a/ansible/roles/elasticsearch/tasks/register.yml b/ansible/roles/elasticsearch/tasks/register.yml
deleted file mode 100644
index 5957f14727..0000000000
--- a/ansible/roles/elasticsearch/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ elasticsearch_openstack_auth }}"
- service_ks_register_services: "{{ elasticsearch_ks_services }}"
- tags: always
diff --git a/ansible/roles/elasticsearch/tasks/stop.yml b/ansible/roles/elasticsearch/tasks/stop.yml
deleted file mode 100644
index fee24e492a..0000000000
--- a/ansible/roles/elasticsearch/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ elasticsearch_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/elasticsearch/tasks/upgrade.yml b/ansible/roles/elasticsearch/tasks/upgrade.yml
deleted file mode 100644
index 518f4bb2fd..0000000000
--- a/ansible/roles/elasticsearch/tasks/upgrade.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-# The official procedure for upgrade elasticsearch:
-# https://www.elastic.co/guide/en/elasticsearch/reference/6.x/restart-upgrade.html
-- name: Disable shard allocation
- become: true
- vars:
- elasticsearch_shard_body: {"transient": {"cluster.routing.allocation.enable": "none"}}
- kolla_toolbox:
- module_name: uri
- module_args:
- url: "{{ elasticsearch_internal_endpoint }}/_cluster/settings"
- method: PUT
- status_code: 200
- return_content: yes
- body: "{{ elasticsearch_shard_body | to_json }}" # noqa jinja[invalid]
- body_format: json
- delegate_to: "{{ groups['elasticsearch'][0] }}"
- run_once: true
-
-- name: Perform a synced flush
- become: true
- kolla_toolbox:
- module_name: uri
- module_args:
- url: "{{ elasticsearch_internal_endpoint }}/_flush/synced"
- method: POST
- status_code: 200
- return_content: yes
- body_format: json
- delegate_to: "{{ groups['elasticsearch'][0] }}"
- run_once: true
- retries: 10
- delay: 5
- register: result
- until: ('status' in result) and result.status == 200
-
-# Stop all elasticsearch containers before applying configuration to ensure
-# handlers are triggered to restart them.
-- name: Stopping all elasticsearch containers
- vars:
- service_name: "elasticsearch"
- service: "{{ elasticsearch_services[service_name] }}"
- become: true
- kolla_docker:
- action: "stop_container"
- common_options: "{{ docker_common_options }}"
- name: "elasticsearch"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes }}"
- when: inventory_hostname in groups[service.group]
-
-- import_tasks: config-host.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: register.yml
- when: elasticsearch_enable_keystone_registration | bool
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch-curator-actions.yml.j2 b/ansible/roles/elasticsearch/templates/elasticsearch-curator-actions.yml.j2
deleted file mode 100644
index 3da7c5a722..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch-curator-actions.yml.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-actions:
- 1:
- action: delete_indices
- description: >-
- Delete indicies
- options:
- ignore_empty_list: True
- continue_if_exception: True
- filters:
- - filtertype: pattern
- kind: prefix
- value: "{{ elasticsearch_curator_index_pattern }}"
- - filtertype: age
- source: name
- direction: older
- timestring: '%Y.%m.%d'
- unit: days
- unit_count: "{{ elasticsearch_curator_hard_retention_period_days }}"
- 2:
- action: close
- description: >-
- Closes indices
- options:
- ignore_empty_list: True
- continue_if_exception: True
- filters:
- - filtertype: pattern
- kind: prefix
- value: "{{ elasticsearch_curator_index_pattern }}"
- - filtertype: age
- source: name
- direction: older
- timestring: '%Y.%m.%d'
- unit: days
- unit_count: "{{ elasticsearch_curator_soft_retention_period_days }}"
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch-curator.crontab.j2 b/ansible/roles/elasticsearch/templates/elasticsearch-curator.crontab.j2
deleted file mode 100644
index 6f8a3654a4..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch-curator.crontab.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-PATH=/usr/local/bin:/usr/bin:/bin
-
-{{ elasticsearch_curator_cron_schedule }} curator --config /etc/elasticsearch-curator/curator.yml {% if elasticsearch_curator_dry_run|bool %}--dry-run {% endif %}/etc/elasticsearch-curator/actions.yml
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch-curator.json.j2 b/ansible/roles/elasticsearch/templates/elasticsearch-curator.json.j2
deleted file mode 100644
index 1412731855..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch-curator.json.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
-{% set cron_path = '/var/spool/cron/crontabs/elasticsearch' if kolla_base_distro in ['ubuntu', 'debian'] else '/var/spool/cron/elasticsearch' %}
-{
- "command": "{{ cron_cmd }}",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/elasticsearch-curator.crontab",
- "dest": "{{ cron_path }}",
- "owner": "elasticsearch",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/elasticsearch-curator.yml",
- "dest": "/etc/elasticsearch-curator/curator.yml",
- "owner": "elasticsearch",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/elasticsearch-curator-actions.yml",
- "dest": "/etc/elasticsearch-curator/actions.yml",
- "owner": "elasticsearch",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/elasticsearch",
- "owner": "elasticsearch:elasticsearch",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch-curator.yml.j2 b/ansible/roles/elasticsearch/templates/elasticsearch-curator.yml.j2
deleted file mode 100644
index 544f554e8d..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch-curator.yml.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-client:
- hosts: [{% for host in groups['elasticsearch'] %}"{{ 'api' | kolla_address(host) }}"{% if not loop.last %},{% endif %}{% endfor %}]
- port: {{ elasticsearch_port }}
- timeout: 30
-
-logging:
- loglevel: INFO
- logfile: /var/log/kolla/elasticsearch/elasticsearch-curator.log
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.json.j2 b/ansible/roles/elasticsearch/templates/elasticsearch.json.j2
deleted file mode 100644
index 317ae56583..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/usr/share/elasticsearch/bin/elasticsearch",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/elasticsearch.yml",
- "dest": "/etc/elasticsearch/elasticsearch.yml",
- "owner": "elasticsearch",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/elasticsearch",
- "owner": "elasticsearch:elasticsearch",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/elasticsearch",
- "owner": "elasticsearch:elasticsearch",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2 b/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2
deleted file mode 100644
index 1f6f944218..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{% set num_nodes = groups['elasticsearch'] | length %}
-{% set minimum_master_nodes = (num_nodes / 2 + 1) | round(0, 'floor') | int if num_nodes > 2 else 1 %}
-{% set recover_after_nodes = (num_nodes * 2 / 3) | round(0, 'floor') | int if num_nodes > 1 else 1 %}
-node.name: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
-network.host: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
-
-cluster.name: "{{ elasticsearch_cluster_name }}"
-cluster.initial_master_nodes: [{% for host in groups['elasticsearch'] %}"{{ 'api' | kolla_address(host) }}"{% if not loop.last %},{% endif %}{% endfor %}]
-node.master: true
-node.data: true
-discovery.seed_hosts: [{% for host in groups['elasticsearch'] %}"{{ 'api' | kolla_address(host) | put_address_in_context('url') }}"{% if not loop.last %},{% endif %}{% endfor %}]
-
-discovery.zen.minimum_master_nodes: {{ minimum_master_nodes }}
-http.port: {{ elasticsearch_port }}
-gateway.expected_nodes: {{ num_nodes }}
-gateway.recover_after_time: "5m"
-gateway.recover_after_nodes: {{ recover_after_nodes }}
-path.data: "/var/lib/elasticsearch/data"
-path.logs: "/var/log/kolla/elasticsearch"
-indices.fielddata.cache.size: 40%
-action.auto_create_index: "true"
diff --git a/ansible/roles/elasticsearch/vars/main.yml b/ansible/roles/elasticsearch/vars/main.yml
deleted file mode 100644
index 6c47bd5f9b..0000000000
--- a/ansible/roles/elasticsearch/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "elasticsearch"
diff --git a/ansible/roles/etcd/defaults/main.yml b/ansible/roles/etcd/defaults/main.yml
index 10510fc4fc..4cd94c4a3e 100644
--- a/ansible/roles/etcd/defaults/main.yml
+++ b/ansible/roles/etcd/defaults/main.yml
@@ -4,31 +4,60 @@ etcd_services:
container_name: etcd
group: etcd
enabled: true
- environment:
- ETCD_DATA_DIR: "/var/lib/etcd"
- ETCD_NAME: "{{ ansible_facts.hostname }}"
- ETCD_ADVERTISE_CLIENT_URLS: "{{ etcd_client_internal_endpoint }}"
- ETCD_LISTEN_CLIENT_URLS: "{{ etcd_client_internal_endpoint }}"
- ETCD_INITIAL_ADVERTISE_PEER_URLS: "{{ etcd_peer_internal_endpoint }}"
- ETCD_LISTEN_PEER_URLS: "{{ etcd_peer_internal_endpoint }}"
- ETCD_INITIAL_CLUSTER_TOKEN: "{{ etcd_cluster_token }}"
- ETCD_INITIAL_CLUSTER: "{% for host in groups['etcd'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"
- ETCD_INITIAL_CLUSTER_STATE: "new"
- ETCD_OUT_FILE: "/var/log/kolla/etcd/etcd.log"
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- ETCD_CERT_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-cert.pem{% endif %}"
- ETCD_KEY_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-key.pem{% endif %}"
- ETCD_PEER_CERT_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-cert.pem{% endif %}"
- ETCD_PEER_KEY_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-key.pem{% endif %}"
+ environment: "{{ etcd_base_environment | combine(etcd_default_environment) }}"
image: "{{ etcd_image_full }}"
volumes: "{{ etcd_default_volumes + etcd_extra_volumes }}"
dimensions: "{{ etcd_dimensions }}"
+ haproxy:
+ etcd:
+ enabled: true
+ mode: "http"
+ external: false
+ port: "{{ etcd_client_port }}"
+ tls_backend: "{{ etcd_enable_tls | bool }}"
+####################
+# Environment
+####################
+etcd_base_environment:
+ # KOLLA_BOOTSTRAP_STATUS is used to indicate whether the container should
+ # be recreated. Otherwise the kolla_container task doesn't detect that the
+ # environment has changed if variables are removed.
+ KOLLA_BOOTSTRAP_STATUS: "undefined"
+ ETCDCTL_API: "3"
+ ETCDCTL_ENDPOINTS: "{{ etcd_client_internal_endpoint }}"
+ ETCDCTL_WRITE_OUT: "json"
+ ETCD_DATA_DIR: "/var/lib/etcd"
+ ETCD_NAME: "{{ ansible_facts.hostname }}"
+ ETCD_ADVERTISE_CLIENT_URLS: "{{ etcd_client_internal_endpoint }}"
+ ETCD_LISTEN_CLIENT_URLS: "{{ etcd_client_internal_endpoint }}"
+ ETCD_LISTEN_PEER_URLS: "{{ etcd_peer_internal_endpoint }}"
+ ETCD_LOGGER: "zap"
+ ETCD_LOG_OUTPUTS: "stderr,/var/log/kolla/etcd/etcd.log"
+ KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
+ ETCD_CERT_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-cert.pem{% endif %}"
+ ETCD_KEY_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-key.pem{% endif %}"
+ ETCD_PEER_CERT_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-cert.pem{% endif %}"
+ ETCD_PEER_KEY_FILE: "{% if etcd_enable_tls | bool %}/etc/etcd/certs/etcd-key.pem{% endif %}"
+etcd_default_environment:
+ KOLLA_BOOTSTRAP_STATUS: "bootstrap completed"
+etcd_bootstrap_service_environment:
+ KOLLA_BOOTSTRAP_STATUS: "bootstrap service"
+ ETCD_INITIAL_CLUSTER_STATE: "existing"
+ ETCD_INITIAL_ADVERTISE_PEER_URLS: "{{ etcd_peer_internal_endpoint }}"
+ ETCD_INITIAL_CLUSTER_TOKEN: "{{ etcd_cluster_token }}"
+ ETCD_INITIAL_CLUSTER: "{% for host in groups['etcd_had_volume_True'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }},{% endfor %}{{ ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(inventory_hostname) | put_address_in_context('url') }}:{{ etcd_peer_port }}"
+etcd_bootstrap_cluster_environment:
+ KOLLA_BOOTSTRAP_STATUS: "bootstrap cluster"
+ ETCD_INITIAL_CLUSTER_STATE: "new"
+ ETCD_INITIAL_ADVERTISE_PEER_URLS: "{{ etcd_peer_internal_endpoint }}"
+ ETCD_INITIAL_CLUSTER_TOKEN: "{{ etcd_cluster_token }}"
+ ETCD_INITIAL_CLUSTER: "{% for host in groups['etcd'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"
####################
# Docker
####################
-etcd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/etcd"
+etcd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}etcd"
etcd_tag: "{{ openstack_tag }}"
etcd_image_full: "{{ etcd_image }}:{{ etcd_tag }}"
etcd_dimensions: "{{ default_container_dimensions }}"
@@ -44,5 +73,15 @@ etcd_extra_volumes: "{{ default_extra_volumes }}"
############
# Endpoints
############
-etcd_client_internal_endpoint: "{{ internal_protocol }}://{{ api_interface_address | put_address_in_context('url') }}:{{ etcd_client_port }}"
-etcd_peer_internal_endpoint: "{{ internal_protocol }}://{{ api_interface_address | put_address_in_context('url') }}:{{ etcd_peer_port }}"
+etcd_client_internal_endpoint: "{{ etcd_protocol }}://{{ api_interface_address | put_address_in_context('url') }}:{{ etcd_client_port }}"
+etcd_peer_internal_endpoint: "{{ etcd_protocol }}://{{ api_interface_address | put_address_in_context('url') }}:{{ etcd_peer_port }}"
+
+###################
+# Managing members
+###################
+etcd_remove_deleted_members: "no"
+
+###################
+# Copy certificates
+###################
+etcd_copy_certs: "{{ kolla_copy_ca_into_containers | bool or etcd_enable_tls | bool }}"
diff --git a/ansible/roles/etcd/handlers/main.yml b/ansible/roles/etcd/handlers/main.yml
index dd5a3d1f99..539883fd2b 100644
--- a/ansible/roles/etcd/handlers/main.yml
+++ b/ansible/roles/etcd/handlers/main.yml
@@ -1,16 +1,68 @@
---
-- name: Restart etcd container
- vars:
- service_name: "etcd"
- service: "{{ etcd_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
+- name: Bootstrap etcd on new cluster
+ include_tasks: 'bootstrap_cluster.yml'
when:
- kolla_action != "config"
+ listen:
+ - Bootstrap etcd cluster
+
+- name: Look up the cluster leader
+ include_tasks: 'lookup_leader.yml'
+ listen:
+ - Restart etcd container
+ - Bootstrap etcd services
+ - Bootstrap etcd cluster
+ - Check for deleted members
+
+- name: Bootstrap etcd on new services
+ include_tasks: 'bootstrap_services.yml'
+ when:
+ - groups.etcd_had_volume_False is defined
+ - inventory_hostname in groups.etcd_had_volume_False
+ - kolla_action != "config"
+ listen:
+ - Bootstrap etcd services
+
+# When upgrading an etcd cluster we have to do it one by one
+- name: Upgrade etcd non-leaders
+ include_tasks: 'restart_services.yml'
+ when:
+ - inventory_hostname not in (groups.etcd_is_leader_True | default([]))
+ - kolla_action == "upgrade"
+ listen:
+ - Restart etcd container
+ loop: "{{ groups.etcd }}"
+
+# When there is no upgrade we can restart 25% of the services without
+# losing quorum.
+- name: Rolling restart of etcd non-leaders
+ include_tasks: 'restart_services.yml'
+ when:
+ - inventory_hostname not in (groups.etcd_is_leader_True | default([]))
+ - groups.etcd.index(inventory_hostname) % 4 == item
+ - kolla_action != "upgrade"
+ listen:
+ - Restart etcd container
+ - Bootstrap etcd services
+ - Bootstrap etcd cluster
+ loop:
+ - 0
+ - 1
+ - 2
+ - 3
+
+- name: Restart etcd leader
+ include_tasks: 'restart_services.yml'
+ when:
+ - inventory_hostname in (groups.etcd_is_leader_True | default([]))
+ listen:
+ - Restart etcd container
+ - Bootstrap etcd services
+ - Bootstrap etcd cluster
+
+- name: Remove deleted members
+ include_tasks: 'remove_deleted_members.yml'
+ when:
+ - kolla_action != "config"
+ listen:
+ - Check for deleted members
diff --git a/ansible/roles/etcd/tasks/bootstrap.yml b/ansible/roles/etcd/tasks/bootstrap.yml
new file mode 100644
index 0000000000..eb0d00a20d
--- /dev/null
+++ b/ansible/roles/etcd/tasks/bootstrap.yml
@@ -0,0 +1,25 @@
+---
+- import_tasks: lookup_cluster.yml
+
+# NOTE(jan.gutter): The following two tasks set facts that aren't really used.
+# They serve the purpose to trigger the handlers for bootstrapping:
+# If no etcd data volumes exist, bootstrap a new initial cluster.
+# If some volumes exist, add the new nodes to an existing cluster.
+
+- name: Determine whether a new cluster needs bootstrapping
+ set_fact:
+ etcd_bootstrap_cluster: "{% for host in groups['etcd'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"
+ when: not (etcd_cluster_exists | bool)
+ changed_when: not (etcd_cluster_exists | bool)
+ notify: Bootstrap etcd cluster
+
+- name: Determine when new services need bootstrapping
+ set_fact:
+ etcd_bootstrap_services: "{% for host in groups['etcd_had_volume_False'] %}{{ hostvars[host].ansible_facts.hostname }}={{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"
+ when:
+ - etcd_cluster_exists | bool
+ - groups.etcd_had_volume_False is defined
+ changed_when:
+ - etcd_cluster_exists | bool
+ - groups.etcd_had_volume_False is defined
+ notify: Bootstrap etcd services
diff --git a/ansible/roles/etcd/tasks/bootstrap_cluster.yml b/ansible/roles/etcd/tasks/bootstrap_cluster.yml
new file mode 100644
index 0000000000..bf9bbfee9b
--- /dev/null
+++ b/ansible/roles/etcd/tasks/bootstrap_cluster.yml
@@ -0,0 +1,41 @@
+---
+- name: Bootstrapping etcd cluster
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "start_container"
+ common_options: "{{ docker_common_options }}"
+ environment: "{{ etcd_base_environment | combine(etcd_bootstrap_cluster_environment) }}"
+ image: "{{ service.image }}"
+ name: "{{ service.container_name }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+
+- name: Wait for etcd service port liveness
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ etcd_client_port }}"
+ connect_timeout: 1
+ timeout: 60
+ register: check_etcd_port
+ until: check_etcd_port is success
+ retries: 10
+ delay: 6
+
+- name: Wait for etcd endpoints to be healthy
+ become: true
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ command: >-
+ {{ kolla_container_engine }} exec {{ service.container_name }}
+ etcdctl endpoint health
+ changed_when: false
+ register: result
+ until:
+ - result is success
+ - ((result.stdout | from_json | first)['health'] | default(False) | bool)
+ retries: 10
+ delay: 6
diff --git a/ansible/roles/etcd/tasks/bootstrap_services.yml b/ansible/roles/etcd/tasks/bootstrap_services.yml
new file mode 100644
index 0000000000..3630a38d10
--- /dev/null
+++ b/ansible/roles/etcd/tasks/bootstrap_services.yml
@@ -0,0 +1,36 @@
+---
+- name: Add new member to etcd cluster
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ become: true
+ command: >-
+ {{ kolla_container_engine }} exec {{ service.container_name }}
+ etcdctl member add {{ ansible_facts.hostname }}
+ --peer-urls={{ etcd_protocol }}://{{ 'api' | kolla_address(inventory_hostname) | put_address_in_context('url') }}:{{ etcd_peer_port }}
+ delegate_to: "{{ etcd_cluster_leader | default(groups[service.group][0]) }}"
+
+- name: Bootstrapping etcd containers
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "start_container"
+ common_options: "{{ docker_common_options }}"
+ environment: "{{ etcd_base_environment | combine(etcd_bootstrap_service_environment) }}"
+ image: "{{ service.image }}"
+ name: "{{ service.container_name }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+
+- name: Wait for etcd service port liveness
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ etcd_client_port }}"
+ connect_timeout: 1
+ timeout: 60
+ register: check_etcd_client_port
+ until: check_etcd_client_port is success
+ retries: 10
+ delay: 6
diff --git a/ansible/roles/etcd/tasks/check-containers.yml b/ansible/roles/etcd/tasks/check-containers.yml
index aa39144d38..b7e2f7c29f 100644
--- a/ansible/roles/etcd/tasks/check-containers.yml
+++ b/ansible/roles/etcd/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check etcd containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- environment: "{{ item.value.environment }}"
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ etcd_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/etcd/tasks/config.yml b/ansible/roles/etcd/tasks/config.yml
index e3fc555405..dee6939d8c 100644
--- a/ansible/roles/etcd/tasks/config.yml
+++ b/ansible/roles/etcd/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ etcd_services }}"
+ with_dict: "{{ etcd_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
template:
@@ -18,13 +15,8 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ etcd_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ etcd_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
- - etcd_enable_tls | bool
+ - etcd_copy_certs
diff --git a/ansible/roles/monasca/tasks/check.yml b/ansible/roles/etcd/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/monasca/tasks/check.yml
rename to ansible/roles/etcd/tasks/config_validate.yml
diff --git a/ansible/roles/etcd/tasks/copy-certs.yml b/ansible/roles/etcd/tasks/copy-certs.yml
index 7601236f55..5530b6a8ee 100644
--- a/ansible/roles/etcd/tasks/copy-certs.yml
+++ b/ansible/roles/etcd/tasks/copy-certs.yml
@@ -1,50 +1,6 @@
---
-- name: "{{ project_name }} | Copying over extra CA certificates"
- become: true
- copy:
- src: "{{ kolla_certificates_dir }}/ca/"
- dest: "{{ node_config_directory }}/{{ item.key }}/ca-certificates"
- mode: "0644"
- when:
- - kolla_copy_ca_into_containers | bool
- with_dict: "{{ etcd_services | select_services_enabled_and_mapped_to_host }}"
- notify:
- - "Restart {{ item.key }} container"
-
-- name: "{{ project_name }} | Copying over etcd TLS certificate"
+- name: "Copy certificates and keys for {{ project_name }}"
+ import_role:
+ role: service-cert-copy
vars:
- certs:
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/{{ project_name }}-cert.pem"
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-cert.pem"
- - "{{ kolla_certificates_dir }}/{{ project_name }}-cert.pem"
- - "{{ kolla_tls_backend_cert }}"
- backend_tls_cert: "{{ lookup('first_found', certs) }}"
- copy:
- src: "{{ backend_tls_cert }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ project_name }}-cert.pem"
- mode: "0644"
- become: true
- with_dict: "{{ etcd_services | select_services_enabled_and_mapped_to_host }}"
- notify:
- - "Restart {{ item.key }} container"
- when:
- - etcd_enable_tls | bool
-
-- name: "{{ project_name }} | Copying over etcd TLS key"
- vars:
- keys:
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/{{ project_name }}-key.pem"
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-key.pem"
- - "{{ kolla_certificates_dir }}/{{ project_name }}-key.pem"
- - "{{ kolla_tls_backend_key }}"
- backend_tls_key: "{{ lookup('first_found', keys) }}"
- copy:
- src: "{{ backend_tls_key }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ project_name }}-key.pem"
- mode: "0600"
- become: true
- with_dict: "{{ etcd_services | select_services_enabled_and_mapped_to_host }}"
- notify:
- - "Restart {{ item.key }} container"
- when:
- - etcd_enable_tls | bool
+ project_services: "{{ etcd_services }}"
diff --git a/ansible/roles/etcd/tasks/deploy.yml b/ansible/roles/etcd/tasks/deploy.yml
index 49edff81e3..d0b36cb78b 100644
--- a/ansible/roles/etcd/tasks/deploy.yml
+++ b/ansible/roles/etcd/tasks/deploy.yml
@@ -3,5 +3,7 @@
- import_tasks: check-containers.yml
+- import_tasks: bootstrap.yml
+
- name: Flush handlers
meta: flush_handlers
diff --git a/ansible/roles/etcd/tasks/loadbalancer.yml b/ansible/roles/etcd/tasks/loadbalancer.yml
new file mode 100644
index 0000000000..07e1a3be44
--- /dev/null
+++ b/ansible/roles/etcd/tasks/loadbalancer.yml
@@ -0,0 +1,7 @@
+---
+- name: "Configure loadbalancer for {{ project_name }}"
+ import_role:
+ name: loadbalancer-config
+ vars:
+ project_services: "{{ etcd_services }}"
+ tags: always
diff --git a/ansible/roles/etcd/tasks/lookup_cluster.yml b/ansible/roles/etcd/tasks/lookup_cluster.yml
new file mode 100644
index 0000000000..bd95d573b0
--- /dev/null
+++ b/ansible/roles/etcd/tasks/lookup_cluster.yml
@@ -0,0 +1,26 @@
+---
+- name: Ensure etcd volume
+ become: true
+ kolla_container:
+ action: "create_volume"
+ common_options: "{{ docker_common_options }}"
+ name: "kolla_etcd"
+ register: etcd_volume
+
+# NOTE(jan.gutter): If the play is interrupted before properly bootstrapping,
+# we will incorrectly assume that an etcd cluster exists. This likely requires
+# manual intervention to unwedge. If a volume exists we must assume there's
+# data on it.
+
+- name: Divide hosts by their etcd volume availability
+ group_by:
+ key: etcd_had_volume_{{ etcd_volume is not changed }}
+ changed_when: false
+
+- name: Establish whether the cluster has already existed
+ set_fact:
+ etcd_cluster_exists: "{{ groups.etcd_had_volume_True is defined }}"
+ changed_when:
+ - etcd_remove_deleted_members | bool
+ - groups.etcd_had_volume_True is defined
+ notify: Check for deleted members
diff --git a/ansible/roles/etcd/tasks/lookup_leader.yml b/ansible/roles/etcd/tasks/lookup_leader.yml
new file mode 100644
index 0000000000..aebd851a14
--- /dev/null
+++ b/ansible/roles/etcd/tasks/lookup_leader.yml
@@ -0,0 +1,41 @@
+---
+# NOTE(jan.gutter): These tasks assume a cluster is running
+- name: Check for the etcd leader
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ become: true
+ # NOTE(jan.gutter): We need to set the ETCD environment vars here to
+ # handle an upgrade scenario from older etcd containers. These can be
+ # removed once the new workflow has been in place for a cycle or two.
+ command: >-
+ {{ kolla_container_engine }} exec
+ -e ETCDCTL_API=3
+ -e ETCDCTL_ENDPOINTS="{{ etcd_client_internal_endpoint }}"
+ -e ETCDCTL_WRITE_OUT="json"
+ {{ service.container_name }}
+ etcdctl endpoint status
+ changed_when: false
+ when:
+ - inventory_hostname in (groups.etcd_had_volume_True | default([]))
+ register: etcd_endpoint_status_result
+
+- name: Divide hosts by their etcd leader status
+ vars:
+ etcd_endpoint_status: >-
+ {{ etcd_endpoint_status_result.stdout | default('[]') | from_json }}
+ etcd_member_id: >-
+ {{ etcd_endpoint_status[0]['Status']['header']['member_id']
+ | default('') }}
+ etcd_leader_id: >-
+ {{ etcd_endpoint_status[0]['Status']['leader']
+ | default('none') }}
+ group_by:
+ key: etcd_is_leader_{{ etcd_member_id == etcd_leader_id }}
+ changed_when: false
+
+- name: Set the etcd cluster leader
+ set_fact:
+ etcd_cluster_leader: "{{ groups.etcd_is_leader_True | sort | first }}"
+ when: groups.etcd_is_leader_True is defined
+ changed_when: false
diff --git a/ansible/roles/etcd/tasks/precheck.yml b/ansible/roles/etcd/tasks/precheck.yml
index be45e3a356..131f4132b2 100644
--- a/ansible/roles/etcd/tasks/precheck.yml
+++ b/ansible/roles/etcd/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- etcd
+ check_mode: false
register: container_facts
- name: Checking free port for Etcd Peer
diff --git a/ansible/roles/etcd/tasks/remove_deleted_members.yml b/ansible/roles/etcd/tasks/remove_deleted_members.yml
new file mode 100644
index 0000000000..188498e5cb
--- /dev/null
+++ b/ansible/roles/etcd/tasks/remove_deleted_members.yml
@@ -0,0 +1,39 @@
+---
+- name: List the etcd members
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ become: true
+ command: >-
+ {{ kolla_container_engine }} exec {{ service.container_name }}
+ etcdctl member list
+ changed_when: false
+ run_once: true
+ delegate_to: "{{ etcd_cluster_leader | default(groups[service.group][0]) }}"
+ register: etcd_member_list_result
+
+- name: Remove deleted members from the etcd cluster
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ etcd_members_from_inventory: >-
+ {{ groups['etcd']
+ | map('extract', hostvars, 'ansible_facts')
+ | map(attribute='hostname')
+ | list }}
+ etcd_deleted_members: >-
+ {{ etcd_member_list_result.stdout | from_json
+ | json_query('members[].name')
+ | difference(etcd_members_from_inventory) }}
+ etcd_member_id: >-
+ {{ etcd_member_list_result.stdout | from_json
+ | json_query('members[].{key: name, value: ID}') | items2dict }}
+ become: true
+ command: >-
+ {{ kolla_container_engine }} exec {{ service.container_name }}
+ etcdctl member remove {{ '%x' % etcd_member_id[etcd_deleted_member] }}
+ run_once: true
+ delegate_to: "{{ etcd_cluster_leader | default(groups[service.group][0]) }}"
+ loop: "{{ etcd_deleted_members }}"
+ loop_control:
+ loop_var: etcd_deleted_member
diff --git a/ansible/roles/etcd/tasks/restart_services.yml b/ansible/roles/etcd/tasks/restart_services.yml
new file mode 100644
index 0000000000..622e1b467e
--- /dev/null
+++ b/ansible/roles/etcd/tasks/restart_services.yml
@@ -0,0 +1,41 @@
+---
+- name: Restart etcd container
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ environment: "{{ service.environment }}"
+
+- name: Wait for etcd service port liveness
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ etcd_client_port }}"
+ connect_timeout: 1
+ timeout: 60
+ register: check_etcd_client_port
+ until: check_etcd_client_port is success
+ retries: 10
+ delay: 6
+
+- name: Wait for etcd endpoints to be healthy
+ become: true
+ vars:
+ service_name: "etcd"
+ service: "{{ etcd_services[service_name] }}"
+ command: >-
+ {{ kolla_container_engine }} exec {{ service.container_name }}
+ etcdctl endpoint health
+ changed_when: false
+ register: result
+ until:
+ - result is success
+ - ((result.stdout | from_json | first)['health'] | default(False) | bool)
+ retries: 10
+ delay: 6
diff --git a/ansible/roles/etcd/templates/etcd.json.j2 b/ansible/roles/etcd/templates/etcd.json.j2
index dfd66d2e19..81324af915 100644
--- a/ansible/roles/etcd/templates/etcd.json.j2
+++ b/ansible/roles/etcd/templates/etcd.json.j2
@@ -13,6 +13,12 @@
"dest": "/etc/etcd/certs/etcd-key.pem",
"owner": "etcd",
"perm": "0600"
+ }{% endif %}{% if etcd_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
]
}
diff --git a/ansible/roles/freezer/defaults/main.yml b/ansible/roles/freezer/defaults/main.yml
deleted file mode 100644
index 6d7ae1a66d..0000000000
--- a/ansible/roles/freezer/defaults/main.yml
+++ /dev/null
@@ -1,128 +0,0 @@
----
-freezer_services:
- freezer-api:
- container_name: freezer_api
- group: freezer-api
- enabled: true
- image: "{{ freezer_api_image_full }}"
- volumes: "{{ freezer_api_default_volumes + freezer_api_extra_volumes }}"
- dimensions: "{{ freezer_api_dimensions }}"
- haproxy:
- freezer_api:
- enabled: "{{ enable_freezer }}"
- mode: "http"
- external: false
- port: "{{ freezer_api_port }}"
- freezer_api_external:
- enabled: "{{ enable_freezer }}"
- mode: "http"
- external: true
- port: "{{ freezer_api_port }}"
- freezer-scheduler:
- container_name: freezer_scheduler
- group: freezer-scheduler
- enabled: true
- image: "{{ freezer_scheduler_image_full }}"
- volumes: "{{ freezer_scheduler_default_volumes + freezer_scheduler_extra_volumes }}"
- dimensions: "{{ freezer_scheduler_dimensions }}"
-
-####################
-## Database
-#####################
-freezer_database_backend: "mariadb"
-freezer_database_name: "freezer"
-freezer_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}freezer{% endif %}"
-freezer_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-freezer_elasticsearch_replicas: "1"
-freezer_es_protocol: "{{ internal_protocol }}"
-freezer_es_address: "{{ elasticsearch_address }}"
-freezer_es_port: "{{ elasticsearch_port }}"
-
-####################
-# Database sharding
-####################
-freezer_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ freezer_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-freezer_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-freezer_database_shard:
- users:
- - user: "{{ freezer_database_user }}"
- password: "{{ freezer_database_password }}"
- rules:
- - schema: "{{ freezer_database_name }}"
- shard_id: "{{ freezer_database_shard_id }}"
-
-
-####################
-# Docker
-####################
-freezer_tag: "{{ openstack_tag }}"
-
-freezer_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/freezer-api"
-freezer_api_tag: "{{ freezer_tag }}"
-freezer_api_image_full: "{{ freezer_api_image }}:{{ freezer_api_tag }}"
-
-freezer_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/freezer-scheduler"
-freezer_scheduler_tag: "{{ freezer_tag }}"
-freezer_scheduler_image_full: "{{ freezer_scheduler_image }}:{{ freezer_scheduler_tag }}"
-
-freezer_api_dimensions: "{{ default_container_dimensions }}"
-freezer_scheduler_dimensions: "{{ default_container_dimensions }}"
-
-freezer_api_default_volumes:
- - "{{ node_config_directory }}/freezer-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "freezer:/var/lib/freezer/"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/freezer-api/freezer_api:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/freezer_api' if freezer_dev_mode | bool else '' }}"
-freezer_scheduler_default_volumes:
- - "{{ node_config_directory }}/freezer-scheduler/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "freezer:/var/lib/freezer/"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/freezer/freezer:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/freezer' if freezer_dev_mode | bool else '' }}"
-
-freezer_extra_volumes: "{{ default_extra_volumes }}"
-freezer_api_extra_volumes: "{{ freezer_extra_volumes }}"
-freezer_scheduler_extra_volumes: "{{ freezer_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-freezer_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ freezer_api_port }}"
-freezer_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ freezer_api_port }}"
-
-freezer_logging_debug: "{{ openstack_logging_debug }}"
-
-freezer_keystone_user: "freezer"
-
-openstack_freezer_auth: "{{ openstack_auth }}"
-
-freezer_api_workers: "{{ openstack_service_workers }}"
-
-####################
-# Kolla
-####################
-freezer_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-freezer_api_git_repository: "{{ kolla_dev_repos_git }}/freezer-api"
-freezer_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-freezer_dev_mode: "{{ kolla_dev_mode }}"
-freezer_source_version: "{{ kolla_source_version }}"
-
-####################
-# Keystone
-####################
-freezer_ks_services:
- - name: "freezer"
- type: "backup"
- description: "Openstack Freezer Backup Service"
- endpoints:
- - {'interface': 'internal', 'url': '{{ freezer_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ freezer_public_endpoint }}'}
-
-freezer_ks_users:
- - project: "service"
- user: "{{ freezer_keystone_user }}"
- password: "{{ freezer_keystone_password }}"
- role: "admin"
diff --git a/ansible/roles/freezer/handlers/main.yml b/ansible/roles/freezer/handlers/main.yml
deleted file mode 100644
index 0ca41d0c93..0000000000
--- a/ansible/roles/freezer/handlers/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Restart freezer-api container
- vars:
- service_name: "freezer-api"
- service: "{{ freezer_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart freezer-scheduler container
- vars:
- service_name: "freezer-scheduler"
- service: "{{ freezer_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/freezer/tasks/bootstrap.yml b/ansible/roles/freezer/tasks/bootstrap.yml
deleted file mode 100644
index 952e85d1c2..0000000000
--- a/ansible/roles/freezer/tasks/bootstrap.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- name: Creating Freezer database
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ freezer_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ freezer_database_name }}"
- become: true
- run_once: True
- delegate_to: "{{ groups['freezer-api'][0] }}"
- when:
- - freezer_database_backend == 'mariadb'
- - not use_preconfigured_databases | bool
-
-- name: Creating Freezer database user and setting permissions
- kolla_toolbox:
- module_name: mysql_user
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ freezer_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ freezer_database_user }}"
- password: "{{ freezer_database_password }}"
- host: "%"
- priv: "{{ freezer_database_name }}.*:ALL"
- append_privs: "yes"
- become: true
- run_once: True
- delegate_to: "{{ groups['freezer-api'][0] }}"
- when:
- - freezer_database_backend == 'mariadb'
- - not use_preconfigured_databases | bool
-
-- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/freezer/tasks/bootstrap_service.yml b/ansible/roles/freezer/tasks/bootstrap_service.yml
deleted file mode 100644
index a70b29454e..0000000000
--- a/ansible/roles/freezer/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Freezer bootstrap container
- vars:
- freezer_api: "{{ freezer_services['freezer-api'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ freezer_api.image }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_freezer"
- restart_policy: no
- volumes: "{{ freezer_api.volumes | reject('equalto', '') | list }}"
- run_once: True
- delegate_to: "{{ groups[freezer_api.group][0] }}"
diff --git a/ansible/roles/freezer/tasks/check-containers.yml b/ansible/roles/freezer/tasks/check-containers.yml
deleted file mode 100644
index 1e5034dae7..0000000000
--- a/ansible/roles/freezer/tasks/check-containers.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Check freezer containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ freezer_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/freezer/tasks/clone.yml b/ansible/roles/freezer/tasks/clone.yml
deleted file mode 100644
index faa337569b..0000000000
--- a/ansible/roles/freezer/tasks/clone.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Cloning freezer source repository for development
- become: true
- git:
- repo: "{{ freezer_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ freezer_dev_repos_pull }}"
- version: "{{ freezer_source_version }}"
-
-- name: Cloning freezer-api source repository for development
- become: true
- git:
- repo: "{{ freezer_api_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/freezer-api"
- update: "{{ freezer_dev_repos_pull }}"
- version: "{{ freezer_source_version }}"
diff --git a/ansible/roles/freezer/tasks/config.yml b/ansible/roles/freezer/tasks/config.yml
deleted file mode 100644
index b6f42441f9..0000000000
--- a/ansible/roles/freezer/tasks/config.yml
+++ /dev/null
@@ -1,97 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ freezer_services }}"
-
-- name: Check if policies shall be overwritten
- stat:
- path: "{{ item }}"
- delegate_to: localhost
- run_once: True
- register: freezer_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/freezer/"
- skip: true
-
-- name: Set freezer policy file
- set_fact:
- freezer_policy_file: "{{ freezer_policy.results.0.stat.path | basename }}"
- freezer_policy_file_path: "{{ freezer_policy.results.0.stat.path }}"
- when:
- - freezer_policy.results
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ freezer_services }}"
- notify:
- - "Restart {{ item.key }} container"
-
-- name: Copying over wsgi-freezer-api.conf
- vars:
- service: "{{ freezer_services['freezer-api'] }}"
- template:
- src: "wsgi-freezer-api.conf.j2"
- dest: "{{ node_config_directory }}/freezer-api/wsgi-freezer-api.conf"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart freezer-api container
-
-- name: Copying over freezer confs
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/freezer.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/freezer.conf"
- - "{{ node_custom_config }}/freezer/{{ item.key }}.conf"
- - "{{ node_custom_config }}/freezer/{{ inventory_hostname }}/{{ item.key }}.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/freezer.conf"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ freezer_services }}"
- notify:
- - "Restart {{ item.key }} container"
-
-- name: Copying over existing policy file
- template:
- src: "{{ freezer_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ freezer_policy_file }}"
- mode: "0660"
- become: true
- when:
- - freezer_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ freezer_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/freezer/tasks/copy-certs.yml b/ansible/roles/freezer/tasks/copy-certs.yml
deleted file mode 100644
index 861d2ed118..0000000000
--- a/ansible/roles/freezer/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ freezer_services }}"
diff --git a/ansible/roles/freezer/tasks/deploy.yml b/ansible/roles/freezer/tasks/deploy.yml
deleted file mode 100644
index 107355641f..0000000000
--- a/ansible/roles/freezer/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- import_tasks: register.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: clone.yml
- when: freezer_dev_mode | bool
-
-- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/freezer/tasks/loadbalancer.yml b/ansible/roles/freezer/tasks/loadbalancer.yml
deleted file mode 100644
index e8bcef064b..0000000000
--- a/ansible/roles/freezer/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ freezer_services }}"
- tags: always
diff --git a/ansible/roles/freezer/tasks/precheck.yml b/ansible/roles/freezer/tasks/precheck.yml
deleted file mode 100644
index 555a53e5f8..0000000000
--- a/ansible/roles/freezer/tasks/precheck.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ freezer_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - freezer_api
- register: container_facts
-
-- name: Checking free port for Freezer API
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ freezer_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups[freezer_services['freezer-api']['group']]
- - container_facts['freezer_api'] is not defined
diff --git a/ansible/roles/freezer/tasks/register.yml b/ansible/roles/freezer/tasks/register.yml
deleted file mode 100644
index da9e975ee1..0000000000
--- a/ansible/roles/freezer/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_freezer_auth }}"
- service_ks_register_services: "{{ freezer_ks_services }}"
- service_ks_register_users: "{{ freezer_ks_users }}"
diff --git a/ansible/roles/freezer/tasks/stop.yml b/ansible/roles/freezer/tasks/stop.yml
deleted file mode 100644
index cb39b3d793..0000000000
--- a/ansible/roles/freezer/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ freezer_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/freezer/tasks/upgrade.yml b/ansible/roles/freezer/tasks/upgrade.yml
deleted file mode 100644
index 6ba9f99799..0000000000
--- a/ansible/roles/freezer/tasks/upgrade.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap_service.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/freezer/templates/freezer-api.json.j2 b/ansible/roles/freezer/templates/freezer-api.json.j2
deleted file mode 100644
index 1c25a9d1bc..0000000000
--- a/ansible/roles/freezer/templates/freezer-api.json.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{
- "command": "{{ apache_cmd }} -DFOREGROUND",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/freezer.conf",
- "dest": "/etc/freezer/freezer.conf",
- "owner": "freezer",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/wsgi-freezer-api.conf",
- "dest": "/etc/{{ apache_dir }}/wsgi-freezer.conf",
- "owner": "freezer",
- "perm": "0600"
- }{% if freezer_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ freezer_policy_file }}",
- "dest": "/etc/freezer/{{ freezer_policy_file }}",
- "owner": "freezer",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/freezer",
- "owner": "freezer:freezer",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/freezer/templates/freezer-scheduler.json.j2 b/ansible/roles/freezer/templates/freezer-scheduler.json.j2
deleted file mode 100644
index f892d725cc..0000000000
--- a/ansible/roles/freezer/templates/freezer-scheduler.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "freezer-scheduler --config-file /etc/freezer/freezer.conf start",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/freezer.conf",
- "dest": "/etc/freezer/freezer.conf",
- "owner": "freezer",
- "perm": "0600"
- }{% if freezer_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ freezer_policy_file }}",
- "dest": "/etc/freezer/{{ freezer_policy_file }}",
- "owner": "freezer",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/freezer",
- "owner": "freezer:freezer",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/freezer/templates/freezer.conf.j2 b/ansible/roles/freezer/templates/freezer.conf.j2
deleted file mode 100644
index 3da301ceff..0000000000
--- a/ansible/roles/freezer/templates/freezer.conf.j2
+++ /dev/null
@@ -1,77 +0,0 @@
-[DEFAULT]
-debug = {{ freezer_logging_debug }}
-
-log_dir = /var/log/kolla/freezer
-
-{% if service_name == 'freezer-api' %}
-bind_host = {{ api_interface_address }}
-bind_port = {{ freezer_api_port }}
-{% endif %}
-
-{% if service_name == 'freezer-scheduler' %}
-client_id = {{ inventory_hostname }}
-jobs_dir = /etc/freezer/scheduler/conf.d
-
-os_username = {{ openstack_auth.username }}
-os_password = {{ openstack_auth.password }}
-os_auth_url = {{ openstack_auth.auth_url }}
-os_project_name = {{ keystone_admin_project }}
-# TODO: transition to system scoped token when freezer supports that
-# configuration option, os_project_domain_name should be removed.
-os_project_domain_name = {{ default_project_domain_name }}
-os_user_domain_name = {{ openstack_auth.user_domain_name }}
-{% endif %}
-
-{% if service_name == 'freezer-api' %}
-[keystone_authtoken]
-service_type = backup
-www_authenticate_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ freezer_keystone_user }}
-password = {{ freezer_keystone_password }}
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if freezer_policy_file is defined %}
-[oslo_policy]
-policy_file = {{ freezer_policy_file }}
-{% endif %}
-
-[oslo_middleware]
-enable_proxy_headers_parsing = True
-
-[paste_deploy]
-config_file = /etc/freezer/freezer-paste.ini
-
-
-{% if freezer_database_backend == 'mariadb' %}
-[storage]
-backend = sqlachemy
-driver = sqlalchemy
-
-[database]
-connection = mysql+pymysql://{{ freezer_database_user }}:{{ freezer_database_password }}@{{ freezer_database_address }}/{{ freezer_database_name }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-max_retries = -1
-{% endif %}
-
-{% if freezer_database_backend == 'elasticsearch' %}
-[storage]
-backend = elasticsearch
-driver = elasticsearch
-
-[elasticsearch]
-hosts = {{ freezer_es_protocol }}://{{ freezer_es_address | put_address_in_context('url') }}:{{ freezer_es_port }}
-number_of_replicas = {{ freezer_elasticsearch_replicas }}
-index = freezer
-{% endif %}
-{% endif %}
diff --git a/ansible/roles/freezer/templates/wsgi-freezer-api.conf.j2 b/ansible/roles/freezer/templates/wsgi-freezer-api.conf.j2
deleted file mode 100644
index 4cb1db2fa4..0000000000
--- a/ansible/roles/freezer/templates/wsgi-freezer-api.conf.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-{% set freezer_log_dir = '/var/log/kolla/freezer' %}
-{% set python_path = '/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages' %}
-Listen {{ api_interface_address | put_address_in_context('url') }}:{{ freezer_api_port }}
-
-ServerSignature Off
-ServerTokens Prod
-TraceEnable off
-TimeOut {{ kolla_httpd_timeout }}
-KeepAliveTimeout {{ kolla_httpd_keep_alive }}
-
-ErrorLog "{{ freezer_log_dir }}/apache-error.log"
-
- CustomLog "{{ freezer_log_dir }}/apache-access.log" common
-
-
-{% if freezer_logging_debug | bool %}
-LogLevel info
-{% endif %}
-
-
- WSGIDaemonProcess freezer-api processes={{ freezer_api_workers }} threads=1 user=freezer display-name=freezer-api
- WSGIProcessGroup freezer-api
- WSGIApplicationGroup %{GLOBAL}
- WSGIScriptAlias / {{ python_path }}/freezer_api/cmd/wsgi.py
-
- ErrorLog {{ freezer_log_dir }}/freezer-api.log
- LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
- CustomLog {{ freezer_log_dir }}/freezer-api_access.log logformat
-
-
- Options Indexes FollowSymLinks MultiViews
- Require all granted
- AllowOverride None
- Order allow,deny
- allow from all
- LimitRequestBody 102400
-
-
diff --git a/ansible/roles/freezer/vars/main.yml b/ansible/roles/freezer/vars/main.yml
deleted file mode 100644
index 20fbc8f5f5..0000000000
--- a/ansible/roles/freezer/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "freezer"
diff --git a/ansible/roles/glance/defaults/main.yml b/ansible/roles/glance/defaults/main.yml
index 5ca70d9c70..c791b58cf4 100644
--- a/ansible/roles/glance/defaults/main.yml
+++ b/ansible/roles/glance/defaults/main.yml
@@ -7,7 +7,7 @@ glance_services:
enabled: true
image: "{{ glance_api_image_full }}"
environment: "{{ glance_api_container_proxy }}"
- privileged: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
+ privileged: "{{ enable_cinder | bool and (enable_cinder_backend_iscsi | bool or cinder_backend_ceph | bool) }}"
volumes: "{{ glance_api_default_volumes + glance_api_extra_volumes }}"
dimensions: "{{ glance_api_dimensions }}"
healthcheck: "{{ glance_api_healthcheck }}"
@@ -26,7 +26,8 @@ glance_services:
enabled: "{{ enable_glance | bool and not glance_enable_tls_backend | bool }}"
mode: "http"
external: true
- port: "{{ glance_api_port }}"
+ external_fqdn: "{{ glance_external_fqdn }}"
+ port: "{{ glance_api_public_port }}"
frontend_http_extra:
- "timeout client {{ haproxy_glance_api_client_timeout }}"
backend_http_extra:
@@ -57,7 +58,8 @@ glance_services:
enabled: "{{ enable_glance | bool and glance_enable_tls_backend | bool }}"
mode: "http"
external: true
- port: "{{ glance_api_port }}"
+ external_fqdn: "{{ glance_external_fqdn }}"
+ port: "{{ glance_api_public_port }}"
frontend_http_extra:
- "timeout client {{ haproxy_glance_api_client_timeout }}"
backend_http_extra:
@@ -65,6 +67,19 @@ glance_services:
custom_member_list: "{{ haproxy_tls_members.split(';') }}"
tls_backend: "yes"
+####################
+# Config Validate
+####################
+glance_config_validation:
+ - generator: "/glance/etc/oslo-config-generator/glance-api.conf"
+ config: "/etc/glance/glance-api.conf"
+ - generator: "/glance/etc/oslo-config-generator/glance-cache.conf"
+ config: "/etc/glance/glance-cache.conf"
+ - generator: "/glance/etc/oslo-config-generator/glance-manage.conf"
+ config: "/etc/glance/glance-manage.conf"
+ - generator: "/glance/etc/oslo-config-generator/glance-scrubber.conf"
+ config: "/etc/glance/glance-scrubber.conf"
+
####################
# HAProxy
####################
@@ -124,18 +139,26 @@ glance_database_shard:
haproxy_glance_api_client_timeout: "6h"
haproxy_glance_api_server_timeout: "6h"
+####################
+# Glance S3 Backend
+####################
+glance_backend_s3_url: "{{ s3_url }}"
+glance_backend_s3_bucket: "{{ s3_bucket }}"
+glance_backend_s3_access_key: "{{ s3_access_key }}"
+glance_backend_s3_secret_key: "{{ s3_secret_key }}"
####################
# Docker
####################
+haproxy_tag: "{{ openstack_tag }}"
glance_tag: "{{ openstack_tag }}"
-glance_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/glance-api"
+glance_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}glance-api"
glance_api_tag: "{{ glance_tag }}"
glance_api_image_full: "{{ glance_api_image }}:{{ glance_api_tag }}"
-glance_tls_proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/haproxy"
-glance_tls_proxy_tag: "{{ glance_tag }}"
+glance_tls_proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}haproxy"
+glance_tls_proxy_tag: "{{ haproxy_tag }}"
glance_tls_proxy_image_full: "{{ glance_tls_proxy_image }}:{{ glance_tls_proxy_tag }}"
glance_api_dimensions: "{{ default_container_dimensions }}"
@@ -172,7 +195,7 @@ glance_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "{{ glance_file_datadir_volume }}:/var/lib/glance/"
- - "{{ kolla_dev_repos_directory ~ '/glance/glance:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/glance' if glance_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/glance:/dev-mode/glance' if glance_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
# NOTE(yoctozepto): below to support Cinder iSCSI backends
- "{% if enable_cinder | bool and enable_cinder_backend_iscsi | bool %}iscsi_info:/etc/iscsi{% endif %}"
@@ -193,15 +216,15 @@ glance_api_container_proxy: "{{ container_proxy }}"
# Glance
####################
glance_backends:
+ - name: s3
+ type: s3
+ enabled: "{{ glance_backend_s3 | bool }}"
- name: file
type: file
enabled: "{{ glance_backend_file | bool }}"
- name: http
type: http
enabled: true
- - name: rbd
- type: rbd
- enabled: "{{ glance_backend_ceph | bool }}"
- name: vmware
type: vmware
enabled: "{{ glance_backend_vmware | bool }}"
@@ -212,7 +235,16 @@ glance_backends:
type: swift
enabled: "{{ glance_backend_swift | bool }}"
-glance_store_backends: "{{ glance_backends | selectattr('enabled', 'equalto', true) | list }}"
+glance_ceph_backends:
+ - name: "rbd"
+ type: "rbd"
+ cluster: "{{ ceph_cluster }}"
+ pool: "{{ ceph_glance_pool_name }}"
+ user: "{{ ceph_glance_user }}"
+ enabled: "{{ glance_backend_ceph | bool }}"
+
+glance_store_backends: "{{ glance_backends | selectattr('enabled', 'equalto', true) | list + glance_ceph_backends | selectattr('enabled', 'equalto', true) | list }}"
+glance_default_backend: "{% if glance_backend_vmware | bool %}vmware{% elif glance_backend_ceph | bool %}{{ glance_ceph_backends[0].name }}{% elif glance_backend_swift | bool %}swift{% elif glance_backend_s3 | bool %}s3{% else %}file{% endif %}"
####################
# OpenStack
@@ -251,8 +283,8 @@ syslog_server: "{{ api_interface_address }}"
syslog_glance_tls_proxy_facility: "local2"
glance_tls_proxy_max_connections: 40000
-glance_tls_proxy_processes: 1
-glance_tls_proxy_process_cpu_map: "no"
+glance_tls_proxy_threads: 1
+glance_tls_proxy_thread_cpu_map: "no"
glance_tls_proxy_defaults_max_connections: 10000
# Glance TLS proxy timeout values
@@ -266,3 +298,8 @@ glance_tls_proxy_check_timeout: "10s"
# Check http://www.haproxy.org/download/1.5/doc/configuration.txt for available options
glance_tls_proxy_defaults_balance: "roundrobin"
+
+###################
+# Copy certificates
+###################
+glance_copy_certs: "{{ kolla_copy_ca_into_containers | bool or glance_enable_tls_backend | bool }}"
diff --git a/ansible/roles/glance/handlers/main.yml b/ansible/roles/glance/handlers/main.yml
index 85d5e4e871..3adcf4fae8 100644
--- a/ansible/roles/glance/handlers/main.yml
+++ b/ansible/roles/glance/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "glance-api"
service: "{{ glance_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -14,15 +14,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart glance-tls-proxy container
vars:
service_name: "glance-tls-proxy"
service: "{{ glance_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -30,5 +28,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/glance/tasks/bootstrap.yml b/ansible/roles/glance/tasks/bootstrap.yml
index 8bcf5a6f40..9ba1afb862 100644
--- a/ansible/roles/glance/tasks/bootstrap.yml
+++ b/ansible/roles/glance/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Glance database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Glance database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/glance/tasks/bootstrap_service.yml b/ansible/roles/glance/tasks/bootstrap_service.yml
index 756d8cbf73..2645e647a2 100644
--- a/ansible/roles/glance/tasks/bootstrap_service.yml
+++ b/ansible/roles/glance/tasks/bootstrap_service.yml
@@ -2,6 +2,7 @@
- name: Enable log_bin_trust_function_creators function
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_variables
module_args:
login_host: "{{ database_address }}"
@@ -19,7 +20,7 @@
vars:
glance_api: "{{ glance_services['glance-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -30,7 +31,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_glance"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ glance_api_hosts[0] }}"
@@ -38,6 +39,7 @@
- name: Disable log_bin_trust_function_creators function
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_variables
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/glance/tasks/check-containers.yml b/ansible/roles/glance/tasks/check-containers.yml
index abea314d84..b7e2f7c29f 100644
--- a/ansible/roles/glance/tasks/check-containers.yml
+++ b/ansible/roles/glance/tasks/check-containers.yml
@@ -1,19 +1,3 @@
---
-- name: Check glance containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(omit) }}"
- environment: "{{ item.value.environment | default(omit) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - item.value.host_in_groups | bool
- - item.value.enabled | bool
- with_dict: "{{ glance_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/glance/tasks/config.yml b/ansible/roles/glance/tasks/config.yml
index 702501d7a9..70983f82ba 100644
--- a/ansible/roles/glance/tasks/config.yml
+++ b/ansible/roles/glance/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - item.value.host_in_groups | bool
- - item.value.enabled | bool
- with_dict: "{{ glance_services }}"
+ with_dict: "{{ glance_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: external_ceph.yml
when:
@@ -37,11 +34,11 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or glance_enable_tls_backend | bool
+ - glance_copy_certs
- name: Creating TLS backend PEM File
vars:
- glance_tls_proxy: "{{ glance_services['glance-tls-proxy'] }}"
+ service: "{{ glance_services['glance-tls-proxy'] }}"
assemble:
src: "{{ node_config_directory }}/glance-tls-proxy/"
dest: "{{ node_config_directory }}/glance-tls-proxy/glance-cert-and-key.pem"
@@ -49,9 +46,7 @@
regexp: "^glance-(cert|key)\\.pem$"
remote_src: true
become: true
- when:
- - glance_tls_proxy.enabled | bool
- - glance_tls_proxy.host_in_groups | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over config.json files for services
template:
@@ -59,16 +54,11 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ glance_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ glance_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over glance-api.conf
vars:
- glance_api: "{{ glance_services['glance-api'] }}"
+ service: "{{ glance_services['glance-api'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/glance-api.conf.j2"
@@ -79,15 +69,11 @@
dest: "{{ node_config_directory }}/glance-api/glance-api.conf"
mode: "0660"
become: true
- when:
- - glance_api.enabled | bool
- - glance_api.host_in_groups | bool
- notify:
- - Restart glance-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over glance-cache.conf for glance_api
vars:
- glance_api: "{{ glance_services['glance-api'] }}"
+ service: "{{ glance_services['glance-api'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/glance-cache.conf.j2"
@@ -98,15 +84,12 @@
mode: "0660"
become: true
when:
- - glance_api.enabled | bool
- - glance_api.host_in_groups | bool
+ - service | service_enabled_and_mapped_to_host
- enable_glance_image_cache | bool
- notify:
- - Restart glance-api container
- name: Copying over glance-swift.conf for glance_api
vars:
- glance_api: "{{ glance_services['glance-api'] }}"
+ service: "{{ glance_services['glance-api'] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/glance-swift.conf.j2"
@@ -117,45 +100,36 @@
mode: "0660"
become: true
when:
- - glance_api.enabled | bool
- - glance_api.host_in_groups | bool
+ - service | service_enabled_and_mapped_to_host
- glance_backend_swift | bool
- notify:
- - Restart glance-api container
- name: Copying over glance-image-import.conf
vars:
- glance_api: "{{ glance_services['glance-api'] }}"
+ service: "{{ glance_services['glance-api'] }}"
copy:
src: "{{ node_custom_config }}/glance/glance-image-import.conf"
dest: "{{ node_config_directory }}/glance-api/glance-image-import.conf"
mode: "0660"
become: true
when:
- - glance_api.enabled | bool
- - inventory_hostname in groups[glance_api.group]
+ - service | service_enabled_and_mapped_to_host
- glance_enable_interoperable_image_import | bool
- notify:
- - Restart glance-api container
- name: Copying over property-protections-rules.conf
vars:
- glance_api: "{{ glance_services['glance-api'] }}"
+ service: "{{ glance_services['glance-api'] }}"
copy:
src: "{{ node_custom_config }}/glance/property-protections-rules.conf"
dest: "{{ node_config_directory }}/glance-api/property-protections-rules.conf"
mode: "0660"
become: true
when:
- - glance_api.enabled | bool
- - inventory_hostname in groups[glance_api.group]
+ - service | service_enabled_and_mapped_to_host
- glance_enable_property_protection | bool
- notify:
- - Restart glance-api container
- name: Copying over existing policy file
vars:
- glance_api: "{{ glance_services['glance-api'] }}"
+ service: "{{ glance_services['glance-api'] }}"
template:
src: "{{ glance_policy_file_path }}"
dest: "{{ node_config_directory }}/glance-api/{{ glance_policy_file }}"
@@ -163,14 +137,11 @@
become: true
when:
- glance_policy_file is defined
- - glance_api.host_in_groups | bool
- - glance_api.enabled | bool
- notify:
- - Restart glance-api container
+ - service | service_enabled_and_mapped_to_host
- name: Copying over glance-haproxy-tls.cfg
vars:
- glance_tls_proxy: "{{ glance_services['glance-tls-proxy'] }}"
+ service: "{{ glance_services['glance-tls-proxy'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/glance-tls-proxy/glance-tls-proxy.cfg"
@@ -180,8 +151,4 @@
- "{{ node_custom_config }}/glance/{{ inventory_hostname }}/glance-tls-proxy.cfg"
- "{{ node_custom_config }}/glance/glance-tls-proxy.cfg"
- "glance-tls-proxy.cfg.j2"
- when:
- - glance_tls_proxy.enabled | bool
- - glance_tls_proxy.host_in_groups | bool
- notify:
- - Restart glance-tls-proxy container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/glance/tasks/config_validate.yml b/ansible/roles/glance/tasks/config_validate.yml
new file mode 100644
index 0000000000..f4f0a66381
--- /dev/null
+++ b/ansible/roles/glance/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ glance_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ glance_config_validation }}"
diff --git a/ansible/roles/glance/tasks/external_ceph.yml b/ansible/roles/glance/tasks/external_ceph.yml
index a37e896888..3daa656bda 100644
--- a/ansible/roles/glance/tasks/external_ceph.yml
+++ b/ansible/roles/glance/tasks/external_ceph.yml
@@ -1,23 +1,37 @@
---
-- name: Copy over ceph.conf for Glance
- template:
- src: "{{ node_custom_config }}/glance/ceph.conf"
- dest: "{{ node_config_directory }}/glance-api/ceph.conf"
+- name: Ensuring glance service ceph config subdir exists
+ vars:
+ service: "{{ glance_services['glance-api'] }}"
+ file:
+ path: "{{ node_config_directory }}/glance-api/ceph"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copy over multiple ceph configs for Glance
+ merge_configs:
+ sources:
+ - "{{ node_custom_config }}/glance/{{ item.cluster }}.conf"
+ - "{{ node_custom_config }}/glance/glance-api/{{ item.cluster }}.conf"
+ dest: "{{ node_config_directory }}/glance-api/ceph/{{ item.cluster }}.conf"
mode: "0660"
become: true
when: inventory_hostname in groups['glance-api']
- notify:
- - Restart glance-api container
+ with_items: "{{ glance_ceph_backends }}"
-- name: Copy over ceph Glance keyring
+- name: Copy over ceph Glance keyrings
+ vars:
+ keyring: "{{ item.cluster }}.client.{{ item.user }}.keyring"
template:
- src: "{{ node_custom_config }}/glance/{{ ceph_glance_keyring }}"
- dest: "{{ node_config_directory }}/glance-api/{{ ceph_glance_keyring }}"
+ src: "{{ node_custom_config }}/glance/{{ keyring }}"
+ dest: "{{ node_config_directory }}/glance-api/ceph/{{ keyring }}"
mode: "0660"
become: true
+ with_items: "{{ glance_ceph_backends }}"
when: inventory_hostname in groups['glance-api']
- notify:
- - Restart glance-api container
- name: Ensuring config directory has correct owner and permission
file:
diff --git a/ansible/roles/glance/tasks/precheck.yml b/ansible/roles/glance/tasks/precheck.yml
index ceb7a5bd80..09cff1e9ea 100644
--- a/ansible/roles/glance/tasks/precheck.yml
+++ b/ansible/roles/glance/tasks/precheck.yml
@@ -8,10 +8,15 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name: "{{ glance_services.values() | map(attribute='container_name') | list }}"
+ check_mode: false
register: container_facts
- name: Checking free port for Glance API
+ vars:
+ service: "{{ glance_services['glance-api'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ glance_api_listen_port }}"
@@ -19,6 +24,17 @@
timeout: 1
state: stopped
when:
- - glance_services['glance-api'].host_in_groups | bool
- - glance_services['glance-api'].enabled | bool
+ - service | service_enabled_and_mapped_to_host
- container_facts['glance_api'] is not defined
+
+- name: Check if S3 configurations are defined
+ assert:
+ that:
+ - vars[item] is defined
+ msg: "Glance S3 backend is enabled, either the {{ item }} or {{ item | replace('glance_backend_', '') }} variable must be defined."
+ with_items:
+ - glance_backend_s3_url
+ - glance_backend_s3_bucket
+ - glance_backend_s3_access_key
+ - glance_backend_s3_secret_key
+ when: glance_backend_s3 | bool
diff --git a/ansible/roles/glance/tasks/rolling_upgrade.yml b/ansible/roles/glance/tasks/rolling_upgrade.yml
index 2215616761..772ed0df3c 100644
--- a/ansible/roles/glance/tasks/rolling_upgrade.yml
+++ b/ansible/roles/glance/tasks/rolling_upgrade.yml
@@ -13,6 +13,7 @@
- name: Enable log_bin_trust_function_creators function
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_variables
module_args:
login_host: "{{ database_address }}"
@@ -30,7 +31,7 @@
vars:
glance_api: "{{ glance_services['glance-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -42,7 +43,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_glance"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ glance_api_hosts[0] }}"
@@ -51,7 +52,7 @@
vars:
glance_api: "{{ glance_services['glance-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -63,7 +64,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_glance"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ glance_api_hosts[0] }}"
@@ -83,7 +84,7 @@
vars:
glance_api: "{{ glance_services['glance-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -95,7 +96,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_glance"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ glance_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ glance_api_hosts[0] }}"
@@ -103,6 +104,7 @@
- name: Disable log_bin_trust_function_creators function
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_variables
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/glance/tasks/stop_service.yml b/ansible/roles/glance/tasks/stop_service.yml
index a98c460757..e45a49d2ae 100644
--- a/ansible/roles/glance/tasks/stop_service.yml
+++ b/ansible/roles/glance/tasks/stop_service.yml
@@ -1,12 +1,10 @@
---
- name: Stop glance service
vars:
- glance_api: "{{ glance_services['glance-api'] }}"
+ service: "{{ glance_services['glance-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
- name: "{{ glance_api.container_name }}"
- when:
- - glance_api.host_in_groups | bool
- - glance_api.enabled | bool
+ name: "{{ service.container_name }}"
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/glance/templates/glance-api.conf.j2 b/ansible/roles/glance/templates/glance-api.conf.j2
index 7df76e18db..99631a4eb7 100644
--- a/ansible/roles/glance/templates/glance-api.conf.j2
+++ b/ansible/roles/glance/templates/glance-api.conf.j2
@@ -3,7 +3,8 @@ debug = {{ glance_logging_debug }}
# NOTE(elemoine) log_dir alone does not work for Glance
log_file = /var/log/kolla/glance/glance-api.log
-use_forwarded_for = true
+
+worker_self_reference_url = {{ 'https' if glance_enable_tls_backend | bool else 'http' }}://{{ api_interface_address | put_address_in_context('url') }}:{{ glance_api_port }}
{% if glance_enable_tls_backend | bool %}
bind_host = 127.0.0.1
@@ -49,7 +50,7 @@ password = {{ glance_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -57,15 +58,7 @@ memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_addres
flavor = {% if enable_glance_image_cache | bool %}keystone+cachemanagement{% else %}keystone{% endif %}
[glance_store]
-{% if glance_backend_vmware | bool %}
-default_backend = vmware
-{% elif glance_backend_ceph | bool %}
-default_backend = rbd
-{% elif glance_backend_swift | bool %}
-default_backend = swift
-{% else %}
-default_backend = file
-{% endif %}
+default_backend = "{{ glance_default_backend }}"
{% if glance_backend_file | bool %}
[file]
@@ -73,9 +66,12 @@ filesystem_store_datadir = /var/lib/glance/images/
{% endif %}
{% if glance_backend_ceph | bool %}
-[rbd]
-rbd_store_user = {{ ceph_glance_user }}
-rbd_store_pool = {{ ceph_glance_pool_name }}
+{% for backend in glance_ceph_backends %}
+[{{ backend.name }}]
+rbd_store_user = {{ backend.user }}
+rbd_store_pool = {{ backend.pool }}
+rbd_store_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
+{% endfor %}
{% endif %}
{% if glance_backend_swift | bool %}
@@ -90,6 +86,14 @@ swift_store_config_file = /etc/glance/glance-swift.conf
swift_store_auth_insecure = True
{% endif %}
+{% if glance_backend_s3 | bool %}
+[s3]
+s3_store_host = {{ glance_backend_s3_url }}
+s3_store_access_key = {{ glance_backend_s3_access_key }}
+s3_store_secret_key = {{ glance_backend_s3_secret_key }}
+s3_store_bucket = {{ glance_backend_s3_bucket }}
+{% endif %}
+
{% if glance_backend_vmware | bool %}
[vmware]
vmware_server_host = {{ vmware_vcenter_host_ip }}
@@ -120,11 +124,18 @@ topics = {{ glance_enabled_notification_topics | map(attribute='name') | join(',
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if glance_policy_file is defined %}
[oslo_policy]
@@ -145,3 +156,6 @@ auth_endpoint = {{ keystone_internal_url }}
barbican_endpoint_type = internal
verify_ssl_path = {{ openstack_cacert }}
{% endif %}
+
+[cors]
+allowed_origin = {{ horizon_public_endpoint }}
diff --git a/ansible/roles/glance/templates/glance-api.json.j2 b/ansible/roles/glance/templates/glance-api.json.j2
index 6774a6bc87..486d4d6686 100644
--- a/ansible/roles/glance/templates/glance-api.json.j2
+++ b/ansible/roles/glance/templates/glance-api.json.j2
@@ -14,14 +14,8 @@
"perm": "0600"
}{% endif %}{% if glance_backend_ceph | bool %},
{
- "source": "{{ container_config_directory }}/{{ ceph_glance_keyring }}",
- "dest": "/etc/ceph/{{ ceph_glance_keyring }}",
- "owner": "glance",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/ceph",
+ "dest": "/etc/ceph",
"owner": "glance",
"perm": "0600"
}{% endif %}{% if glance_backend_swift | bool %},
@@ -48,6 +42,12 @@
"dest": "/etc/glance/property-protections-rules.conf",
"owner": "glance",
"perm": "0600"
+ }{% endif %}{% if glance_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/glance/templates/glance-tls-proxy.cfg.j2 b/ansible/roles/glance/templates/glance-tls-proxy.cfg.j2
index 18e29e94d9..302938d934 100644
--- a/ansible/roles/glance/templates/glance-tls-proxy.cfg.j2
+++ b/ansible/roles/glance/templates/glance-tls-proxy.cfg.j2
@@ -6,15 +6,17 @@ global
daemon
log {{ syslog_server }}:{{ syslog_udp_port }} {{ syslog_glance_tls_proxy_facility }}
maxconn {{ glance_tls_proxy_max_connections }}
- nbproc {{ glance_tls_proxy_processes }}
- {% if (glance_tls_proxy_processes | int > 1) and (glance_tls_proxy_process_cpu_map | bool) %}
- {% for cpu_idx in range(0, glance_tls_proxy_processes) %}
- cpu-map {{ cpu_idx + 1 }} {{ cpu_idx }}
- {% endfor %}
+ nbthread {{ glance_tls_proxy_threads }}
+ {% if (glance_tls_proxy_threads | int > 1) and (glance_tls_proxy_thread_cpu_map | bool) %}
+ cpu-map auto:1/all 0-63
{% endif %}
- ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES
- ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11
+ {% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
+ {% for line in haproxy_ssl_settings.split('\n') %}
+ {{ line }}
+ {% endfor %}
tune.ssl.default-dh-param 4096
+ ca-base {{ haproxy_backend_cacert_dir }}
+ {% endif %}
defaults
log global
diff --git a/ansible/roles/glance/templates/glance-tls-proxy.json.j2 b/ansible/roles/glance/templates/glance-tls-proxy.json.j2
index 27546f2d17..711f054f38 100644
--- a/ansible/roles/glance/templates/glance-tls-proxy.json.j2
+++ b/ansible/roles/glance/templates/glance-tls-proxy.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/glance/certs/glance-cert-and-key.pem",
"owner": "glance",
"perm": "0600"
- }
+ }{% if glance_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/gnocchi/defaults/main.yml b/ansible/roles/gnocchi/defaults/main.yml
index e15582f50c..c27147cf1d 100644
--- a/ansible/roles/gnocchi/defaults/main.yml
+++ b/ansible/roles/gnocchi/defaults/main.yml
@@ -13,12 +13,15 @@ gnocchi_services:
enabled: "{{ enable_gnocchi }}"
mode: "http"
external: false
- port: "{{ gnocchi_api_listen_port }}"
+ port: "{{ gnocchi_api_port }}"
+ listen_port: "{{ gnocchi_api_listen_port }}"
gnocchi_api_external:
enabled: "{{ enable_gnocchi }}"
mode: "http"
external: true
- port: "{{ gnocchi_api_listen_port }}"
+ external_fqdn: "{{ gnocchi_external_fqdn }}"
+ port: "{{ gnocchi_api_public_port }}"
+ listen_port: "{{ gnocchi_api_listen_port }}"
gnocchi-metricd:
container_name: gnocchi_metricd
group: gnocchi-metricd
@@ -36,6 +39,12 @@ gnocchi_services:
dimensions: "{{ gnocchi_statsd_dimensions }}"
healthcheck: "{{ gnocchi_statsd_healthcheck }}"
+####################
+# Config Validate
+####################
+gnocchi_config_validation:
+ - generator: "/gnocchi/gnocchi/gnocchi-config-generator.conf"
+ config: "/etc/gnocchi/gnocchi.conf"
####################
# Swift
@@ -70,15 +79,15 @@ gnocchi_database_shard:
####################
gnocchi_tag: "{{ openstack_tag }}"
-gnocchi_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/gnocchi-api"
+gnocchi_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}gnocchi-api"
gnocchi_api_tag: "{{ gnocchi_tag }}"
gnocchi_api_image_full: "{{ gnocchi_api_image }}:{{ gnocchi_api_tag }}"
-gnocchi_statsd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/gnocchi-statsd"
+gnocchi_statsd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}gnocchi-statsd"
gnocchi_statsd_tag: "{{ gnocchi_tag }}"
gnocchi_statsd_image_full: "{{ gnocchi_statsd_image }}:{{ gnocchi_statsd_tag }}"
-gnocchi_metricd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/gnocchi-metricd"
+gnocchi_metricd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}gnocchi-metricd"
gnocchi_metricd_tag: "{{ gnocchi_tag }}"
gnocchi_metricd_image_full: "{{ gnocchi_metricd_image }}:{{ gnocchi_metricd_tag }}"
@@ -154,9 +163,6 @@ gnocchi_statsd_extra_volumes: "{{ gnocchi_extra_volumes }}"
####################
# OpenStack
####################
-gnocchi_internal_endpoint: "{{ internal_protocol }}://{{ gnocchi_internal_fqdn | put_address_in_context('url') }}:{{ gnocchi_api_port }}"
-gnocchi_public_endpoint: "{{ public_protocol }}://{{ gnocchi_external_fqdn | put_address_in_context('url') }}:{{ gnocchi_api_port }}"
-
gnocchi_logging_debug: "{{ openstack_logging_debug }}"
gnocchi_metricd_workers: "{{ openstack_service_workers }}"
diff --git a/ansible/roles/gnocchi/handlers/main.yml b/ansible/roles/gnocchi/handlers/main.yml
index cc2d5762ae..e9839c5089 100644
--- a/ansible/roles/gnocchi/handlers/main.yml
+++ b/ansible/roles/gnocchi/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "gnocchi-api"
service: "{{ gnocchi_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart gnocchi-metricd container
vars:
service_name: "gnocchi-metricd"
service: "{{ gnocchi_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart gnocchi-statsd container
vars:
service_name: "gnocchi-statsd"
service: "{{ gnocchi_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,5 +40,3 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/gnocchi/tasks/bootstrap.yml b/ansible/roles/gnocchi/tasks/bootstrap.yml
index e2cb62e373..2157d78826 100644
--- a/ansible/roles/gnocchi/tasks/bootstrap.yml
+++ b/ansible/roles/gnocchi/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating gnocchi database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating gnocchi database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/gnocchi/tasks/bootstrap_service.yml b/ansible/roles/gnocchi/tasks/bootstrap_service.yml
index 0992575a59..3f9e8090cc 100644
--- a/ansible/roles/gnocchi/tasks/bootstrap_service.yml
+++ b/ansible/roles/gnocchi/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
gnocchi_api: "{{ gnocchi_services['gnocchi-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_gnocchi"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ gnocchi_api.volumes }}"
run_once: True
delegate_to: "{{ groups[gnocchi_api.group][0] }}"
diff --git a/ansible/roles/gnocchi/tasks/check-containers.yml b/ansible/roles/gnocchi/tasks/check-containers.yml
index ed92fb1da6..b7e2f7c29f 100644
--- a/ansible/roles/gnocchi/tasks/check-containers.yml
+++ b/ansible/roles/gnocchi/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check gnocchi containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ gnocchi_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/gnocchi/tasks/config.yml b/ansible/roles/gnocchi/tasks/config.yml
index 98448789f3..85216ec50d 100644
--- a/ansible/roles/gnocchi/tasks/config.yml
+++ b/ansible/roles/gnocchi/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ gnocchi_services }}"
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: external_ceph.yml
when:
@@ -45,12 +42,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ gnocchi_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over gnocchi.conf
vars:
@@ -65,12 +57,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/gnocchi.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ gnocchi_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over wsgi-gnocchi.conf
vars:
@@ -80,13 +67,9 @@
dest: "{{ node_config_directory }}/{{ item }}/wsgi-gnocchi.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups['gnocchi-api']
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- "gnocchi-api"
- notify:
- - Restart gnocchi-api container
- name: Copying over existing policy file
template:
@@ -96,8 +79,4 @@
become: true
when:
- gnocchi_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ gnocchi_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/gnocchi/tasks/config_validate.yml b/ansible/roles/gnocchi/tasks/config_validate.yml
new file mode 100644
index 0000000000..a8fa5f57ee
--- /dev/null
+++ b/ansible/roles/gnocchi/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ gnocchi_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ gnocchi_config_validation }}"
diff --git a/ansible/roles/gnocchi/tasks/external_ceph.yml b/ansible/roles/gnocchi/tasks/external_ceph.yml
index 1f36b2483c..c36fd98550 100644
--- a/ansible/roles/gnocchi/tasks/external_ceph.yml
+++ b/ansible/roles/gnocchi/tasks/external_ceph.yml
@@ -1,29 +1,31 @@
---
-- name: Copy over ceph.conf file
- template:
- src: "{{ node_custom_config }}/gnocchi/ceph.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/ceph.conf"
+- name: Ensuring gnocchi service ceph config subdir exists
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}/ceph"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copy over ceph config for Gnocchi
+ merge_configs:
+ sources:
+ - "{{ node_custom_config }}/gnocchi/{{ ceph_cluster }}.conf"
+ - "{{ node_custom_config }}/gnocchi/{{ item.key }}/{{ ceph_cluster }}.conf"
+ dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ ceph_cluster }}.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ gnocchi_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
-- name: Copy over ceph gnocchi keyring
+- name: Copy over ceph Gnocchi keyrings
template:
- src: "{{ node_custom_config }}/gnocchi/{{ ceph_gnocchi_keyring }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ ceph_gnocchi_keyring }}"
+ src: "{{ node_custom_config }}/gnocchi/{{ ceph_cluster }}.client.{{ ceph_gnocchi_user }}.keyring"
+ dest: "{{ node_config_directory }}/{{ item.key }}/ceph/{{ ceph_cluster }}.client.{{ ceph_gnocchi_user }}.keyring"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ gnocchi_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
- name: Ensuring config directory has correct owner and permission
become: true
@@ -32,7 +34,4 @@
recurse: yes
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ gnocchi_services }}"
+ with_dict: "{{ gnocchi_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/gnocchi/tasks/precheck.yml b/ansible/roles/gnocchi/tasks/precheck.yml
index 283f33c291..f0ecf557c4 100644
--- a/ansible/roles/gnocchi/tasks/precheck.yml
+++ b/ansible/roles/gnocchi/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- gnocchi_api
+ check_mode: false
register: container_facts
- name: Checking free port for Gnocchi API
diff --git a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
index 53b27b353a..de8ed12900 100644
--- a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
+++ b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
@@ -22,18 +22,17 @@
"perm": "0600"
}{% endif %}{% if gnocchi_backend_storage == 'ceph' %},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/ceph",
+ "dest": "/etc/ceph",
"owner": "gnocchi",
"perm": "0600"
- },
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
{
- "source": "{{ container_config_directory }}/{{ ceph_gnocchi_keyring }}",
- "dest": "/etc/ceph/{{ ceph_gnocchi_keyring }}",
- "owner": "gnocchi",
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
"perm": "0600"
}{% endif %}
-
],
"permissions": [
{
diff --git a/ansible/roles/gnocchi/templates/gnocchi-metricd.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-metricd.json.j2
index aa43975270..f9b4c6a53f 100644
--- a/ansible/roles/gnocchi/templates/gnocchi-metricd.json.j2
+++ b/ansible/roles/gnocchi/templates/gnocchi-metricd.json.j2
@@ -14,15 +14,15 @@
"perm": "0600"
}{% endif %}{% if gnocchi_backend_storage == 'ceph' %},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/ceph",
+ "dest": "/etc/ceph",
"owner": "gnocchi",
"perm": "0600"
- },
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
{
- "source": "{{ container_config_directory }}/{{ ceph_gnocchi_keyring }}",
- "dest": "/etc/ceph/{{ ceph_gnocchi_keyring }}",
- "owner": "gnocchi",
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
"perm": "0600"
}{% endif %}
],
diff --git a/ansible/roles/gnocchi/templates/gnocchi-statsd.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-statsd.json.j2
index 027e146860..0024b5405d 100644
--- a/ansible/roles/gnocchi/templates/gnocchi-statsd.json.j2
+++ b/ansible/roles/gnocchi/templates/gnocchi-statsd.json.j2
@@ -14,15 +14,15 @@
"perm": "0600"
}{% endif %}{% if gnocchi_backend_storage == 'ceph' %},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/ceph",
+ "dest": "/etc/ceph",
"owner": "gnocchi",
"perm": "0600"
- },
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
{
- "source": "{{ container_config_directory }}/{{ ceph_gnocchi_keyring }}",
- "dest": "/etc/ceph/{{ ceph_gnocchi_keyring }}",
- "owner": "gnocchi",
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
"perm": "0600"
}{% endif %}
],
diff --git a/ansible/roles/gnocchi/templates/gnocchi.conf.j2 b/ansible/roles/gnocchi/templates/gnocchi.conf.j2
index 0d347e4b68..a23a1170fb 100644
--- a/ansible/roles/gnocchi/templates/gnocchi.conf.j2
+++ b/ansible/roles/gnocchi/templates/gnocchi.conf.j2
@@ -56,7 +56,7 @@ auth_type = password
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -70,6 +70,9 @@ policy_file = {{ gnocchi_policy_file }}
driver = redis
redis_url = {{ redis_connection_string }}
{% endif %}
+{% if gnocchi_backend_storage == 'swift' %}
+swift_cacert = {{ openstack_cacert }}
+{% endif %}
[storage]
{% if gnocchi_backend_storage == 'file' %}
@@ -79,11 +82,12 @@ file_basepath = /var/lib/gnocchi
driver = ceph
ceph_pool = {{ ceph_gnocchi_pool_name }}
ceph_username = {{ ceph_gnocchi_user }}
-ceph_keyring = /etc/ceph/{{ ceph_gnocchi_keyring }}
-ceph_conffile = /etc/ceph/ceph.conf
+ceph_keyring = /etc/ceph/{{ ceph_cluster }}.client.{{ ceph_gnocchi_user }}.keyring
+ceph_conffile = /etc/ceph/{{ ceph_cluster }}.conf
{% elif gnocchi_backend_storage == 'swift' %}
driver = swift
swift_authurl = {{ keystone_internal_url }}
+swift_cacert = {{ openstack_cacert }}
swift_auth_version = 3
swift_user = service:{{ swift_keystone_user }}
swift_key = {{ swift_keystone_password }}
diff --git a/ansible/roles/grafana/defaults/main.yml b/ansible/roles/grafana/defaults/main.yml
index 18248c7f00..cce9562fe2 100644
--- a/ansible/roles/grafana/defaults/main.yml
+++ b/ansible/roles/grafana/defaults/main.yml
@@ -13,11 +13,14 @@ grafana_services:
mode: "http"
external: false
port: "{{ grafana_server_port }}"
+ listen_port: "{{ grafana_server_listen_port }}"
grafana_server_external:
enabled: "{{ enable_grafana_external | bool }}"
mode: "http"
external: true
- port: "{{ grafana_server_port }}"
+ external_fqdn: "{{ grafana_external_fqdn }}"
+ port: "{{ grafana_server_public_port }}"
+ listen_port: "{{ grafana_server_listen_port }}"
####################
# Database
@@ -54,31 +57,24 @@ grafana_data_sources:
url: "{{ influxdb_internal_endpoint }}"
access: "proxy"
basicAuth: false
- elasticsearch:
- enabled: "{{ enable_elasticsearch | bool }}"
+ opensearch:
+ enabled: "{{ enable_opensearch | bool }}"
data:
- name: "elasticsearch"
- type: "elasticsearch"
+ name: "opensearch"
+ type: "grafana-opensearch-datasource"
access: "proxy"
- url: "{{ elasticsearch_internal_endpoint }}"
- database: "flog-*"
+ url: "{{ opensearch_internal_endpoint }}"
jsonData:
- esVersion: 5
+ flavor: "OpenSearch"
+ database: "{{ opensearch_log_index_prefix }}-*"
+ version: "2.11.1"
timeField: "@timestamp"
- monasca:
- enabled: "{{ enable_monasca | bool }}"
- data:
- name: "Monasca"
- type: "monasca-datasource"
- access: "proxy"
- url: "{{ monasca_api_internal_base_endpoint }}"
- jsonData:
- keystoneAuth: True
+ logLevelField: "log_level"
##########
# Grafana
##########
-grafana_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/grafana"
+grafana_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}grafana"
grafana_tag: "{{ openstack_tag }}"
grafana_image_full: "{{ grafana_image }}:{{ grafana_tag }}"
grafana_admin_username: "admin"
@@ -88,13 +84,15 @@ grafana_default_volumes:
- "{{ node_config_directory }}/grafana/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "grafana:/var/lib/grafana/"
- "kolla_logs:/var/log/kolla/"
grafana_extra_volumes: "{{ default_extra_volumes }}"
grafana_start_first_node_delay: 10
grafana_start_first_node_retries: 12
+# TODO(dawudm): make this True in the D release
+grafana_remove_old_volume: false
+
############
# Prometheus
############
diff --git a/ansible/roles/grafana/handlers/main.yml b/ansible/roles/grafana/handlers/main.yml
index 0f13e4a187..7096d367d5 100644
--- a/ansible/roles/grafana/handlers/main.yml
+++ b/ansible/roles/grafana/handlers/main.yml
@@ -5,7 +5,7 @@
service_name: "grafana"
service: "{{ grafana_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,7 +13,6 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- - kolla_action != "config"
- inventory_hostname == groups[service.group] | first
- name: Waiting for grafana to start on first node
@@ -23,6 +22,7 @@
service: "{{ grafana_services[service_name] }}"
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "http://{{ api_interface_address | put_address_in_context('url') }}:{{ grafana_server_port }}/login"
@@ -32,7 +32,6 @@
retries: "{{ grafana_start_first_node_retries }}"
delay: "{{ grafana_start_first_node_delay }}"
when:
- - kolla_action != "config"
- inventory_hostname == groups[service.group] | first
- name: Restart remaining grafana containers
@@ -41,7 +40,7 @@
service_name: "grafana"
service: "{{ grafana_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -49,5 +48,4 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
when:
- - kolla_action != "config"
- inventory_hostname != groups[service.group] | first
diff --git a/ansible/roles/grafana/tasks/bootstrap.yml b/ansible/roles/grafana/tasks/bootstrap.yml
index 5c4e56c400..8c8df3d4a7 100644
--- a/ansible/roles/grafana/tasks/bootstrap.yml
+++ b/ansible/roles/grafana/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating grafana database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating grafana database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/grafana/tasks/check-containers.yml b/ansible/roles/grafana/tasks/check-containers.yml
index bb2f46ae75..b7e2f7c29f 100644
--- a/ansible/roles/grafana/tasks/check-containers.yml
+++ b/ansible/roles/grafana/tasks/check-containers.yml
@@ -1,16 +1,3 @@
---
-- name: Check grafana containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ grafana_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/grafana/tasks/config.yml b/ansible/roles/grafana/tasks/config.yml
index cc12d874a8..0e08edbcf1 100644
--- a/ansible/roles/grafana/tasks/config.yml
+++ b/ansible/roles/grafana/tasks/config.yml
@@ -7,14 +7,14 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ grafana_services }}"
+ with_dict: "{{ grafana_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if extra configuration file exists
find:
path: "{{ node_custom_config }}/grafana/"
+ excludes:
+ - "prometheus.yaml"
+ - "provisioning.yaml"
delegate_to: localhost
changed_when: False
run_once: True
@@ -30,12 +30,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ grafana_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ grafana_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over grafana.ini
vars:
@@ -48,12 +43,7 @@
dest: "{{ node_config_directory }}/grafana/grafana.ini"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ grafana_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ grafana_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over extra configuration file
become: true
@@ -65,25 +55,6 @@
- item is defined
with_items:
- "{{ check_extra_conf_grafana.files }}"
- notify:
- - Restart grafana container
-
-- name: Check if custom grafana home dashboard exists
- stat:
- path: "{{ node_custom_config }}/grafana/grafana_home_dashboard.json"
- delegate_to: localhost
- register: grafana_custom_dashboard_file
- run_once: True
-
-- name: Copying over grafana home dashboard if exists
- become: true
- template:
- src: "{{ node_custom_config }}/grafana/grafana_home_dashboard.json"
- dest: "{{ node_config_directory }}/grafana/grafana_home_dashboard.json"
- mode: "0660"
- when: grafana_custom_dashboard_file.stat.exists
- notify:
- - Restart grafana container
- name: Configuring Prometheus as data source for Grafana
vars:
@@ -94,15 +65,12 @@
mode: "0660"
become: true
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- enable_prometheus | bool
with_first_found:
- "{{ node_custom_config }}/grafana/{{ inventory_hostname }}/prometheus.yaml"
- "{{ node_custom_config }}/grafana/prometheus.yaml"
- "prometheus.yaml.j2"
- notify:
- - Restart grafana container
- name: Configuring dashboards provisioning
vars:
@@ -112,15 +80,11 @@
dest: "{{ node_config_directory }}/grafana/provisioning.yaml"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/grafana/{{ inventory_hostname }}/provisioning.yaml"
- "{{ node_custom_config }}/grafana/provisioning.yaml"
- "{{ role_path }}/templates/provisioning.yaml.j2"
- notify:
- - Restart grafana container
- name: Check if the folder for custom grafana dashboards exists
stat:
@@ -136,9 +100,7 @@
file:
state: absent
path: "{{ node_config_directory }}/grafana/dashboards/"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over custom dashboards
vars:
@@ -151,7 +113,4 @@
when:
- grafana_custom_dashboards_folder.stat.exists
- grafana_custom_dashboards_folder.stat.isdir
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart grafana container
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/murano/tasks/check.yml b/ansible/roles/grafana/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/murano/tasks/check.yml
rename to ansible/roles/grafana/tasks/config_validate.yml
diff --git a/ansible/roles/grafana/tasks/post_config.yml b/ansible/roles/grafana/tasks/post_config.yml
index 9a085b10ea..97a9bc818d 100644
--- a/ansible/roles/grafana/tasks/post_config.yml
+++ b/ansible/roles/grafana/tasks/post_config.yml
@@ -2,6 +2,7 @@
- name: Wait for grafana application ready
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ grafana_internal_endpoint }}/login"
@@ -12,9 +13,17 @@
delay: 2
run_once: true
+- name: Remove old grafana docker volume
+ become: true
+ kolla_container:
+ action: "remove_volume"
+ name: grafana
+ when: grafana_remove_old_volume | bool
+
- name: Enable grafana datasources
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ grafana_internal_endpoint }}/api/datasources"
@@ -36,6 +45,7 @@
- name: Disable Getting Started panel
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ grafana_internal_endpoint }}/api/user/helpflags/1"
@@ -47,4 +57,4 @@
register: grafana_response
changed_when: grafana_response.status == 200
run_once: true
- when: grafana_custom_dashboard_file.stat.exists
+ when: "'grafana_home_dashboard.json' in check_extra_conf_grafana.files | map(attribute='path') | map('basename')"
diff --git a/ansible/roles/grafana/tasks/precheck.yml b/ansible/roles/grafana/tasks/precheck.yml
index aaedcbc1c1..a95701c490 100644
--- a/ansible/roles/grafana/tasks/precheck.yml
+++ b/ansible/roles/grafana/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- grafana
+ check_mode: false
register: container_facts
- name: Checking free port for Grafana server
diff --git a/ansible/roles/grafana/tasks/upgrade.yml b/ansible/roles/grafana/tasks/upgrade.yml
index ce6e87e99a..88e2196ddb 100644
--- a/ansible/roles/grafana/tasks/upgrade.yml
+++ b/ansible/roles/grafana/tasks/upgrade.yml
@@ -4,7 +4,7 @@
service_name: "grafana"
service: "{{ grafana_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "compare_image"
common_options: "{{ docker_common_options }}"
name: "{{ project_name }}"
@@ -21,7 +21,7 @@
# is upgraded first, we stop all the other ones.
- name: Stopping all Grafana instances but the first node
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "{{ project_name }}"
diff --git a/ansible/roles/grafana/templates/grafana.json.j2 b/ansible/roles/grafana/templates/grafana.json.j2
index fcc3cc34bb..8b47e25e7c 100644
--- a/ansible/roles/grafana/templates/grafana.json.j2
+++ b/ansible/roles/grafana/templates/grafana.json.j2
@@ -7,21 +7,22 @@
"owner": "grafana",
"perm": "0600"
},
-{% if check_extra_conf_grafana is defined %}{% if check_extra_conf_grafana.matched > 0 %}{% for plugin in check_extra_conf_grafana.files %}
+{% if check_extra_conf_grafana is defined %}
+{% for extra_conf in check_extra_conf_grafana.files | map(attribute='path') | map('basename') %}
{
- "source": "{{ container_config_directory }}/{{ plugin.path | basename }}",
- "dest": "/etc/grafana/{{ plugin.path | basename }}",
- "owner": "grafana",
- "perm": "0600"
- },
-{% endfor %}{% endif %}{% endif %}
- {
- "source": "{{ container_config_directory }}/grafana_home_dashboard.json",
+ "source": "{{ container_config_directory }}/{{ extra_conf }}",
+{% if extra_conf == 'grafana_home_dashboard.json' %}
"dest": "/usr/share/grafana/public/dashboards/home.json",
"owner": "root",
- "perm": "0644",
- "optional": true
+ "perm": "0644"
+{% else %}
+ "dest": "/etc/grafana/{{ extra_conf }}",
+ "owner": "grafana",
+ "perm": "0600"
+{% endif %}
},
+{% endfor %}
+{% endif %}
{
"source": "{{ container_config_directory }}/prometheus.yaml",
"dest": "/etc/grafana/provisioning/datasources/prometheus.yaml",
@@ -41,7 +42,13 @@
"owner": "grafana",
"perm": "0755",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/grafana/templates/prometheus.yaml.j2 b/ansible/roles/grafana/templates/prometheus.yaml.j2
index a0b1a4ae19..5615638fb5 100644
--- a/ansible/roles/grafana/templates/prometheus.yaml.j2
+++ b/ansible/roles/grafana/templates/prometheus.yaml.j2
@@ -4,6 +4,10 @@ datasources:
- name: Prometheus
type: prometheus
access: proxy
+ basicAuth: true
+ basicAuthUser: "{{ prometheus_grafana_user }}"
+ secureJsonData:
+ basicAuthPassword: "{{ prometheus_grafana_password }}"
orgId: 1
url: {{ grafana_prometheus_url }}
version: 1
diff --git a/ansible/roles/hacluster/defaults/main.yml b/ansible/roles/hacluster/defaults/main.yml
index 24867af318..2619eccf94 100644
--- a/ansible/roles/hacluster/defaults/main.yml
+++ b/ansible/roles/hacluster/defaults/main.yml
@@ -42,15 +42,15 @@ hacluster_services:
####################
hacluster_tag: "{{ openstack_tag }}"
-hacluster_corosync_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/hacluster-corosync"
+hacluster_corosync_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}hacluster-corosync"
hacluster_corosync_tag: "{{ openstack_tag }}"
hacluster_corosync_image_full: "{{ hacluster_corosync_image }}:{{ hacluster_corosync_tag }}"
-hacluster_pacemaker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/hacluster-pacemaker"
+hacluster_pacemaker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}hacluster-pacemaker"
hacluster_pacemaker_tag: "{{ openstack_tag }}"
hacluster_pacemaker_image_full: "{{ hacluster_pacemaker_image }}:{{ hacluster_pacemaker_tag }}"
-hacluster_pacemaker_remote_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/hacluster-pacemaker-remote"
+hacluster_pacemaker_remote_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}hacluster-pacemaker-remote"
hacluster_pacemaker_remote_tag: "{{ openstack_tag }}"
hacluster_pacemaker_remote_image_full: "{{ hacluster_pacemaker_remote_image }}:{{ hacluster_pacemaker_remote_tag }}"
diff --git a/ansible/roles/hacluster/handlers/main.yml b/ansible/roles/hacluster/handlers/main.yml
index 8f9d62a396..a9042dbb55 100644
--- a/ansible/roles/hacluster/handlers/main.yml
+++ b/ansible/roles/hacluster/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "hacluster-corosync"
service: "{{ hacluster_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,15 +13,13 @@
ipc_mode: "{{ service.ipc_mode }}"
cap_add: "{{ service.cap_add }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart hacluster-pacemaker container
vars:
service_name: "hacluster-pacemaker"
service: "{{ hacluster_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -30,15 +28,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
ipc_mode: "{{ service.ipc_mode }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart hacluster-pacemaker-remote container
vars:
service_name: "hacluster-pacemaker-remote"
service: "{{ hacluster_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -46,5 +42,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
ipc_mode: "{{ service.ipc_mode }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/hacluster/tasks/bootstrap_service.yml b/ansible/roles/hacluster/tasks/bootstrap_service.yml
index 8d503f6740..40f0507dde 100644
--- a/ansible/roles/hacluster/tasks/bootstrap_service.yml
+++ b/ansible/roles/hacluster/tasks/bootstrap_service.yml
@@ -10,23 +10,21 @@
- name: Ensure remote node is added
vars:
pacemaker_service: "{{ hacluster_services['hacluster-pacemaker'] }}"
- pacemaker_remote_service: "{{ hacluster_services['hacluster-pacemaker-remote'] }}"
+ service: "{{ hacluster_services['hacluster-pacemaker-remote'] }}"
shell: >
{{ kolla_container_engine }} exec {{ pacemaker_service.container_name }}
cibadmin --modify --scope resources -X '
-
-
-
+
+
+
-
+
'
become: true
delegate_to: "{{ groups[pacemaker_service.group][0] }}"
- when:
- - inventory_hostname in groups[pacemaker_remote_service.group]
- - pacemaker_remote_service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/hacluster/tasks/check-containers.yml b/ansible/roles/hacluster/tasks/check-containers.yml
index 6db67a9504..b7e2f7c29f 100644
--- a/ansible/roles/hacluster/tasks/check-containers.yml
+++ b/ansible/roles/hacluster/tasks/check-containers.yml
@@ -1,25 +1,3 @@
---
-- name: Check hacluster containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image | default(omit) }}"
- volumes: "{{ service.volumes | default(omit) }}"
- dimensions: "{{ service.dimensions | default(omit) }}"
- volumes_from: "{{ service.volumes_from | default(omit) }}"
- privileged: "{{ service.privileged | default(omit) }}"
- cap_add: "{{ service.cap_add | default(omit) }}"
- environment: "{{ service.environment | default(omit) }}"
- ipc_mode: "{{ service.ipc_mode | default(omit) }}"
- pid_mode: "{{ service.pid_mode | default(omit) }}"
- security_opt: "{{ service.security_opt | default(omit) }}"
- labels: "{{ service.labels | default(omit) }}"
- command: "{{ service.command | default(omit) }}"
- vars:
- service_name: "{{ item.key }}"
- service: "{{ item.value }}"
- with_dict: "{{ hacluster_services | select_services_enabled_and_mapped_to_host }}"
- notify:
- - "Restart {{ service_name }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/hacluster/tasks/config.yml b/ansible/roles/hacluster/tasks/config.yml
index 1380e66185..584167dfe6 100644
--- a/ansible/roles/hacluster/tasks/config.yml
+++ b/ansible/roles/hacluster/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ hacluster_services }}"
+ with_dict: "{{ hacluster_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
become: true
@@ -19,12 +16,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
register: config_jsons
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ hacluster_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ hacluster_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over corosync.conf into hacluster-corosync
vars:
@@ -34,15 +26,11 @@
dest: "{{ node_config_directory }}/hacluster-corosync/corosync.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/hacluster-corosync/{{ inventory_hostname }}/corosync.conf"
- "{{ node_custom_config }}/hacluster-corosync/corosync.conf"
- "hacluster_corosync.conf.j2"
- notify:
- - Restart hacluster-corosync container
- name: Copying over Corosync authkey file
vars:
@@ -52,14 +40,10 @@
dest: "{{ node_config_directory }}/hacluster-corosync/authkey"
mode: "0600"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/hacluster-corosync/{{ inventory_hostname }}/authkey"
- "{{ node_custom_config }}/hacluster-corosync/authkey"
- notify:
- - Restart hacluster-corosync container
- name: Copying over Pacemaker authkey file
vars:
@@ -69,14 +53,10 @@
dest: "{{ node_config_directory }}//hacluster-pacemaker/authkey"
mode: "0600"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/hacluster-pacemaker/{{ inventory_hostname }}/authkey"
- "{{ node_custom_config }}/hacluster-pacemaker/authkey"
- notify:
- - Restart hacluster-pacemaker container
- name: Copying over Pacemaker authkey file into hacluster-pacemaker-remote
vars:
@@ -86,11 +66,7 @@
dest: "{{ node_config_directory }}/hacluster-pacemaker-remote/authkey"
mode: "0600"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/hacluster-pacemaker/{{ inventory_hostname }}/authkey"
- "{{ node_custom_config }}/hacluster-pacemaker/authkey"
- notify:
- - Restart hacluster-pacemaker-remote container
diff --git a/ansible/roles/ovn/tasks/check.yml b/ansible/roles/hacluster/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/ovn/tasks/check.yml
rename to ansible/roles/hacluster/tasks/config_validate.yml
diff --git a/ansible/roles/hacluster/tasks/precheck.yml b/ansible/roles/hacluster/tasks/precheck.yml
index 012b8f1fb6..80391d046d 100644
--- a/ansible/roles/hacluster/tasks/precheck.yml
+++ b/ansible/roles/hacluster/tasks/precheck.yml
@@ -2,8 +2,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- hacluster_pacemaker_remote
+ check_mode: false
register: container_facts
# NOTE(yoctozepto): Corosync runs over UDP so one cannot use wait_for to check
diff --git a/ansible/roles/hacluster/templates/hacluster_corosync.conf.j2 b/ansible/roles/hacluster/templates/hacluster_corosync.conf.j2
index 6547272a27..734eb08a14 100644
--- a/ansible/roles/hacluster/templates/hacluster_corosync.conf.j2
+++ b/ansible/roles/hacluster/templates/hacluster_corosync.conf.j2
@@ -13,7 +13,7 @@ nodelist {
{% for host in groups['hacluster'] | sort %}
node {
ring0_addr: {{ 'api' | kolla_address(host) }}
- name: {{ hostvars[host].ansible_facts.hostname }}
+ name: {{ hostvars[host].ansible_facts.nodename }}
nodeid: {{ loop.index }}
}
{% endfor %}
diff --git a/ansible/roles/haproxy-config/defaults/main.yml b/ansible/roles/haproxy-config/defaults/main.yml
index d6456bd2da..ea72d59906 100644
--- a/ansible/roles/haproxy-config/defaults/main.yml
+++ b/ansible/roles/haproxy-config/defaults/main.yml
@@ -5,6 +5,7 @@ haproxy_service_template: "haproxy_single_service_split.cfg.j2"
haproxy_frontend_http_extra:
- "option httplog"
- "option forwardfor"
+haproxy_frontend_redirect_extra: []
haproxy_frontend_tcp_extra:
- "option tcplog"
haproxy_backend_http_extra: []
diff --git a/ansible/roles/haproxy-config/tasks/main.yml b/ansible/roles/haproxy-config/tasks/main.yml
index ee1b2ef376..1e3f293adb 100644
--- a/ansible/roles/haproxy-config/tasks/main.yml
+++ b/ansible/roles/haproxy-config/tasks/main.yml
@@ -22,10 +22,41 @@
notify:
- Restart haproxy container
+- name: "Add configuration for {{ project_name }} when using single external frontend"
+ vars:
+ service: "{{ item.value }}"
+ blockinfile:
+ create: yes
+ path: "{{ node_config_directory }}/haproxy/external-frontend-map"
+ insertafter: EOF
+ marker: "# {mark} {{ item.key }}"
+ mode: "0660"
+ block: |
+ {%- set haproxy = service.haproxy | default({}) %}
+ {%- for haproxy_name, haproxy_service in haproxy.items() %}
+ {% set external = haproxy_service.external | default(false) | bool %}
+ {% set enabled = haproxy_service.enabled | default(false) | bool %}
+ {% set with_frontend = haproxy_service.with_frontend | default(true) | bool %}
+ {% set mode = haproxy_service.mode | default('http') %}
+ {%- if external and with_frontend and enabled and mode == 'http' %}
+ {{ haproxy_service.external_fqdn }} {{ haproxy_name }}_back
+ {% endif -%}
+ {%- endfor -%}
+ become: true
+ with_dict: "{{ project_services }}"
+ when:
+ - haproxy_single_external_frontend | bool
+ - service.enabled | bool
+ - service.haproxy is defined
+ - enable_haproxy | bool
+ notify:
+ - Restart haproxy container
+
- name: "Configuring firewall for {{ project_name }}"
firewalld:
- offline: "yes"
- permanent: "yes"
+ immediate: true
+ offline: true
+ permanent: true
port: "{{ item.value.port }}/tcp"
state: "enabled"
zone: "{{ external_api_firewalld_zone }}"
@@ -36,6 +67,5 @@
- item.value.port is defined
- item.value.external | default('false') | bool
- enable_external_api_firewalld | bool
+ - kolla_action != "config"
with_dict: "{{ project_services | extract_haproxy_services }}"
- notify:
- - "Reload firewalld"
diff --git a/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2 b/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2
index 6e5b71bf72..124c04dc92 100644
--- a/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2
+++ b/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2
@@ -1,6 +1,8 @@
#jinja2: lstrip_blocks: True
-{%- set external_tls_bind_info = 'ssl crt /etc/haproxy/haproxy.pem' if kolla_enable_tls_external|bool else '' %}
-{%- set internal_tls_bind_info = 'ssl crt /etc/haproxy/haproxy-internal.pem' if kolla_enable_tls_internal|bool else '' %}
+{%- set external_tls_bind_info = 'ssl crt /etc/haproxy/certificates/haproxy.pem' if kolla_enable_tls_external|bool else '' %}
+{%- set external_tls_bind_info = "%s %s" % (external_tls_bind_info, haproxy_http2_protocol) if kolla_enable_tls_external|bool and haproxy_enable_http2|bool else external_tls_bind_info %}
+{%- set internal_tls_bind_info = 'ssl crt /etc/haproxy/certificates/haproxy-internal.pem' if kolla_enable_tls_internal|bool else '' %}
+{%- set internal_tls_bind_info = "%s %s" % (internal_tls_bind_info, haproxy_http2_protocol) if kolla_enable_tls_internal|bool and haproxy_enable_http2|bool else internal_tls_bind_info %}
{%- macro userlist_macro(service_name, auth_user, auth_pass) %}
userlist {{ service_name }}-user
@@ -8,7 +10,7 @@ userlist {{ service_name }}-user
{% endmacro %}
{%- macro frontend_macro(service_name, service_port, service_mode, external,
- frontend_http_extra, frontend_tcp_extra) %}
+ frontend_http_extra, frontend_redirect_extra, frontend_tcp_extra) %}
frontend {{ service_name }}_front
{% if service_mode == 'redirect' %}
mode http
@@ -16,6 +18,9 @@ frontend {{ service_name }}_front
mode {{ service_mode }}
{% endif %}
{% if service_mode == 'http' %}
+ {% if external|bool %}
+ http-request deny if { path -i -m beg /server-status }
+ {% endif %}
{# Delete any pre-populated XFP header #}
http-request del-header X-Forwarded-Proto
{% for http_option in frontend_http_extra %}
@@ -45,7 +50,10 @@ frontend {{ service_name }}_front
{{ "bind %s:%s %s"|e|format(vip_address, service_port, tls_option)|trim() }}
{# Redirect mode sets a redirect scheme instead of a backend #}
{% if service_mode == 'redirect' %}
- redirect scheme https code 301 if !{ ssl_fc }
+ redirect scheme https code 301 if !{ ssl_fc } !{ path_reg ^/.well-known/acme-challenge/.+ }
+ {% for redirect_option in frontend_redirect_extra %}
+ {{ redirect_option }}
+ {% endfor %}
{% else %}
default_backend {{ service_name }}_back
{% endif %}
@@ -128,6 +136,7 @@ backend {{ service_name }}_back
{% set frontend_tcp_extra = haproxy_service.frontend_tcp_extra|default([]) + haproxy_frontend_tcp_extra %}
{% set backend_tcp_extra = haproxy_service.backend_tcp_extra|default([]) %}
{% set frontend_http_extra = haproxy_service.frontend_http_extra|default([]) + haproxy_frontend_http_extra %}
+ {% set frontend_redirect_extra = haproxy_service.frontend_redirect_extra|default([]) + haproxy_frontend_redirect_extra %}
{% set backend_http_extra = haproxy_service.backend_http_extra|default([]) %}
{% set tls_backend = haproxy_service.tls_backend|default(false) %}
{# Allow for basic auth #}
@@ -137,8 +146,10 @@ backend {{ service_name }}_back
{{ userlist_macro(haproxy_name, auth_user, auth_pass) }}
{% endif %}
{% if with_frontend %}
+ {% if not (external|bool and haproxy_single_external_frontend|bool and mode == 'http') %}
{{ frontend_macro(haproxy_name, haproxy_service.port, mode, external,
- frontend_http_extra, frontend_tcp_extra) }}
+ frontend_http_extra, frontend_redirect_extra, frontend_tcp_extra) }}
+ {% endif %}
{% endif %}
{# Redirect (to https) is a special case, as it does not include a backend #}
{% if with_backend and mode != 'redirect' %}
diff --git a/ansible/roles/heat/defaults/main.yml b/ansible/roles/heat/defaults/main.yml
index 37a4622f62..b465f010fd 100644
--- a/ansible/roles/heat/defaults/main.yml
+++ b/ansible/roles/heat/defaults/main.yml
@@ -20,7 +20,8 @@ heat_services:
enabled: "{{ enable_heat }}"
mode: "http"
external: true
- port: "{{ heat_api_port }}"
+ external_fqdn: "{{ heat_external_fqdn }}"
+ port: "{{ heat_api_public_port }}"
listen_port: "{{ heat_api_listen_port }}"
tls_backend: "{{ heat_enable_tls_backend }}"
heat-api-cfn:
@@ -43,7 +44,8 @@ heat_services:
enabled: "{{ enable_heat }}"
mode: "http"
external: true
- port: "{{ heat_api_cfn_port }}"
+ external_fqdn: "{{ heat_cfn_external_fqdn }}"
+ port: "{{ heat_api_cfn_public_port }}"
listen_port: "{{ heat_api_cfn_listen_port }}"
tls_backend: "{{ heat_enable_tls_backend }}"
heat-engine:
@@ -55,6 +57,13 @@ heat_services:
dimensions: "{{ heat_engine_dimensions }}"
healthcheck: "{{ heat_engine_healthcheck }}"
+####################
+# Config Validate
+####################
+heat_config_validation:
+ - generator: "/heat/config-generator.conf"
+ config: "/etc/heat/heat.conf"
+
####################
# Database
####################
@@ -81,15 +90,15 @@ heat_database_shard:
####################
heat_tag: "{{ openstack_tag }}"
-heat_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/heat-api"
+heat_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}heat-api"
heat_api_tag: "{{ heat_tag }}"
heat_api_image_full: "{{ heat_api_image }}:{{ heat_api_tag }}"
-heat_api_cfn_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/heat-api-cfn"
+heat_api_cfn_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}heat-api-cfn"
heat_api_cfn_tag: "{{ heat_tag }}"
heat_api_cfn_image_full: "{{ heat_api_cfn_image }}:{{ heat_api_cfn_tag }}"
-heat_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/heat-engine"
+heat_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}heat-engine"
heat_engine_tag: "{{ heat_tag }}"
heat_engine_image_full: "{{ heat_engine_image }}:{{ heat_engine_tag }}"
@@ -140,19 +149,19 @@ heat_api_default_volumes:
- "{{ node_config_directory }}/heat-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/heat/heat:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/heat' if heat_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/heat:/dev-mode/heat' if heat_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
heat_api_cfn_default_volumes:
- "{{ node_config_directory }}/heat-api-cfn/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/heat/heat:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/heat' if heat_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/heat:/dev-mode/heat' if heat_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
heat_engine_default_volumes:
- "{{ node_config_directory }}/heat-engine/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/heat/heat:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/heat' if heat_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/heat:/dev-mode/heat' if heat_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
heat_extra_volumes: "{{ default_extra_volumes }}"
@@ -163,12 +172,10 @@ heat_engine_extra_volumes: "{{ heat_extra_volumes }}"
####################
# OpenStack
####################
-heat_internal_endpoint: "{{ internal_protocol }}://{{ heat_internal_fqdn | put_address_in_context('url') }}:{{ heat_api_port }}/v1/%(tenant_id)s"
-heat_public_endpoint: "{{ public_protocol }}://{{ heat_external_fqdn | put_address_in_context('url') }}:{{ heat_api_port }}/v1/%(tenant_id)s"
-
-heat_cfn_public_base_endpoint: "{{ public_protocol }}://{{ heat_cfn_external_fqdn | put_address_in_context('url') }}:{{ heat_api_cfn_port }}"
+heat_internal_endpoint: "{{ heat_internal_base_endpoint }}/v1/%(tenant_id)s"
+heat_public_endpoint: "{{ heat_public_base_endpoint }}/v1/%(tenant_id)s"
-heat_cfn_internal_endpoint: "{{ internal_protocol }}://{{ heat_cfn_internal_fqdn | put_address_in_context('url') }}:{{ heat_api_cfn_port }}/v1"
+heat_cfn_internal_endpoint: "{{ heat_cfn_internal_base_endpoint }}/v1"
heat_cfn_public_endpoint: "{{ heat_cfn_public_base_endpoint }}/v1"
heat_logging_debug: "{{ openstack_logging_debug }}"
@@ -228,7 +235,7 @@ heat_ks_roles:
- "{{ heat_stack_user_role }}"
heat_ks_user_roles:
- - project: "{{ keystone_admin_project }}"
+ - project: "{{ openstack_auth.project_name }}"
user: "{{ openstack_auth.username }}"
role: "{{ heat_stack_owner_role }}"
@@ -236,3 +243,5 @@ heat_ks_user_roles:
# TLS
####################
heat_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+heat_copy_certs: "{{ kolla_copy_ca_into_containers | bool or heat_enable_tls_backend | bool }}"
diff --git a/ansible/roles/heat/handlers/main.yml b/ansible/roles/heat/handlers/main.yml
index e5569dc9ba..6997f5a010 100644
--- a/ansible/roles/heat/handlers/main.yml
+++ b/ansible/roles/heat/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "heat-api"
service: "{{ heat_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart heat-api-cfn container
vars:
service_name: "heat-api-cfn"
service: "{{ heat_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart heat-engine container
vars:
service_name: "heat-engine"
service: "{{ heat_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,5 +40,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/heat/tasks/bootstrap.yml b/ansible/roles/heat/tasks/bootstrap.yml
index 50d8b27138..8f893ce69d 100644
--- a/ansible/roles/heat/tasks/bootstrap.yml
+++ b/ansible/roles/heat/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Heat database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Heat database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/heat/tasks/bootstrap_service.yml b/ansible/roles/heat/tasks/bootstrap_service.yml
index 4aa7ea9132..872c456f13 100644
--- a/ansible/roles/heat/tasks/bootstrap_service.yml
+++ b/ansible/roles/heat/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
heat_api: "{{ heat_services['heat-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -15,8 +15,8 @@
OS_INTERFACE: "internal"
OS_USERNAME: "{{ openstack_auth.username }}"
OS_PASSWORD: "{{ openstack_auth.password }}"
+ OS_PROJECT_NAME: "{{ openstack_auth.project_name }}"
OS_USER_DOMAIN_NAME: "{{ openstack_auth.user_domain_name }}"
- OS_SYSTEM_SCOPE: "{{ openstack_auth.system_scope }}"
OS_REGION_NAME: "{{ openstack_region_name }}"
OS_CACERT: "{{ openstack_cacert | default(omit) }}"
HEAT_DOMAIN_ADMIN_PASSWORD: "{{ heat_domain_admin_password }}"
@@ -24,7 +24,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_heat"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ heat_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[heat_api.group][0] }}"
diff --git a/ansible/roles/heat/tasks/check-containers.yml b/ansible/roles/heat/tasks/check-containers.yml
index 4ed5471a5c..b7e2f7c29f 100644
--- a/ansible/roles/heat/tasks/check-containers.yml
+++ b/ansible/roles/heat/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check heat containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ heat_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/heat/tasks/config.yml b/ansible/roles/heat/tasks/config.yml
index 37e6539154..f8244b552a 100644
--- a/ansible/roles/heat/tasks/config.yml
+++ b/ansible/roles/heat/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ heat_services }}"
+ with_dict: "{{ heat_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -33,7 +30,7 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or heat_enable_tls_backend | bool
+ - heat_copy_certs
- name: Copying over config.json files for services
become: true
@@ -41,12 +38,7 @@
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ heat_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ heat_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over heat.conf
become: true
@@ -61,12 +53,7 @@
- "{{ node_custom_config }}/heat/{{ inventory_hostname }}/heat.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/heat.conf"
mode: "0660"
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ heat_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ heat_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
become: true
@@ -76,11 +63,7 @@
mode: "0660"
when:
- heat_policy_file is defined
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ heat_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ heat_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over heat-api wsgi config
vars:
@@ -90,11 +73,7 @@
dest: "{{ node_config_directory }}/heat-api/wsgi-heat-api.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart heat-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over heat-api-cfn wsgi config
vars:
@@ -104,8 +83,4 @@
dest: "{{ node_config_directory }}/heat-api-cfn/wsgi-heat-api-cfn.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart heat-api-cfn container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/heat/tasks/config_validate.yml b/ansible/roles/heat/tasks/config_validate.yml
new file mode 100644
index 0000000000..e1931df598
--- /dev/null
+++ b/ansible/roles/heat/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ heat_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ heat_config_validation }}"
diff --git a/ansible/roles/heat/tasks/precheck.yml b/ansible/roles/heat/tasks/precheck.yml
index a4e6d093ae..d10ba4ae7c 100644
--- a/ansible/roles/heat/tasks/precheck.yml
+++ b/ansible/roles/heat/tasks/precheck.yml
@@ -8,6 +8,8 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- heat_api
- heat_api_cfn
diff --git a/ansible/roles/heat/templates/heat-api-cfn.json.j2 b/ansible/roles/heat/templates/heat-api-cfn.json.j2
index 3d7e483f52..dc75a092af 100644
--- a/ansible/roles/heat/templates/heat-api-cfn.json.j2
+++ b/ansible/roles/heat/templates/heat-api-cfn.json.j2
@@ -32,7 +32,13 @@
"owner": "heat",
"perm": "0600"
}
- {% endif %}
+ {% endif %}{% if heat_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/heat/templates/heat-api.json.j2 b/ansible/roles/heat/templates/heat-api.json.j2
index f339f3383b..08f7bd8a97 100644
--- a/ansible/roles/heat/templates/heat-api.json.j2
+++ b/ansible/roles/heat/templates/heat-api.json.j2
@@ -32,7 +32,13 @@
"owner": "heat",
"perm": "0600"
}
- {% endif %}
+ {% endif %}{% if heat_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/heat/templates/heat-engine.json.j2 b/ansible/roles/heat/templates/heat-engine.json.j2
index abad7cb725..afab1fc656 100644
--- a/ansible/roles/heat/templates/heat-engine.json.j2
+++ b/ansible/roles/heat/templates/heat-engine.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/heat/{{ heat_policy_file }}",
"owner": "heat",
"perm": "0600"
+ }{% endif %}{% if heat_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/heat/templates/heat.conf.j2 b/ansible/roles/heat/templates/heat.conf.j2
index 54c9c6e689..5b8a1fbb0c 100644
--- a/ansible/roles/heat/templates/heat.conf.j2
+++ b/ansible/roles/heat/templates/heat.conf.j2
@@ -41,7 +41,7 @@ password = {{ heat_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -53,7 +53,6 @@ memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address
[trustee]
-auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_internal_url }}
auth_type = password
user_domain_id = {{ default_user_domain_id }}
@@ -72,11 +71,18 @@ topics = {{ heat_enabled_notification_topics | map(attribute='name') | join(',')
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'heat-api' or service_name == 'heat-api-cfn' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if heat_policy_file is defined %}
[oslo_policy]
@@ -97,3 +103,6 @@ trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}
+
+[volumes]
+backups_enabled = {{ enable_cinder_backup | bool }}
diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml
index 8ee5f1fac3..0caf95ef5c 100644
--- a/ansible/roles/horizon/defaults/main.yml
+++ b/ansible/roles/horizon/defaults/main.yml
@@ -9,23 +9,17 @@ horizon_services:
ENABLE_BLAZAR: "{{ 'yes' if enable_horizon_blazar | bool else 'no' }}"
ENABLE_CLOUDKITTY: "{{ 'yes' if enable_horizon_cloudkitty | bool else 'no' }}"
ENABLE_DESIGNATE: "{{ 'yes' if enable_horizon_designate | bool else 'no' }}"
- ENABLE_FREEZER: "{{ 'yes' if enable_horizon_freezer | bool else 'no' }}"
+ ENABLE_FWAAS: "{{ 'yes' if enable_horizon_fwaas | bool else 'no' }}"
ENABLE_HEAT: "{{ 'yes' if enable_horizon_heat | bool else 'no' }}"
ENABLE_IRONIC: "{{ 'yes' if enable_horizon_ironic | bool else 'no' }}"
ENABLE_MAGNUM: "{{ 'yes' if enable_horizon_magnum | bool else 'no' }}"
ENABLE_MANILA: "{{ 'yes' if enable_horizon_manila | bool else 'no' }}"
ENABLE_MASAKARI: "{{ 'yes' if enable_horizon_masakari | bool else 'no' }}"
ENABLE_MISTRAL: "{{ 'yes' if enable_horizon_mistral | bool else 'no' }}"
- ENABLE_MONASCA: "{{ 'yes' if enable_horizon_monasca | bool else 'no' }}"
- ENABLE_MURANO: "{{ 'yes' if enable_horizon_murano | bool else 'no' }}"
ENABLE_NEUTRON_VPNAAS: "{{ 'yes' if enable_horizon_neutron_vpnaas | bool else 'no' }}"
ENABLE_OCTAVIA: "{{ 'yes' if enable_horizon_octavia | bool else 'no' }}"
- ENABLE_SAHARA: "{{ 'yes' if enable_horizon_sahara | bool else 'no' }}"
- ENABLE_SENLIN: "{{ 'yes' if enable_horizon_senlin | bool else 'no' }}"
- ENABLE_SOLUM: "{{ 'yes' if enable_horizon_solum | bool else 'no' }}"
ENABLE_TACKER: "{{ 'yes' if enable_horizon_tacker | bool else 'no' }}"
ENABLE_TROVE: "{{ 'yes' if enable_horizon_trove | bool else 'no' }}"
- ENABLE_VITRAGE: "{{ 'yes' if enable_horizon_vitrage | bool else 'no' }}"
ENABLE_WATCHER: "{{ 'yes' if enable_horizon_watcher | bool else 'no' }}"
ENABLE_ZUN: "{{ 'yes' if enable_horizon_zun | bool else 'no' }}"
FORCE_GENERATE: "{{ 'yes' if horizon_dev_mode | bool else 'no' }}"
@@ -50,10 +44,13 @@ horizon_services:
external: false
port: "{{ horizon_port }}"
listen_port: "{{ horizon_listen_port }}"
+ frontend_redirect_extra:
+ - "use_backend acme_client_back if { path_reg ^/.well-known/acme-challenge/.+ }"
horizon_external:
enabled: "{{ enable_horizon }}"
mode: "http"
external: true
+ external_fqdn: "{{ horizon_external_fqdn }}"
port: "{% if kolla_enable_tls_external | bool %}{{ horizon_tls_port }}{% else %}{{ horizon_port }}{% endif %}"
listen_port: "{{ horizon_listen_port }}"
frontend_http_extra:
@@ -65,8 +62,11 @@ horizon_services:
enabled: "{{ enable_horizon | bool and kolla_enable_tls_external | bool }}"
mode: "redirect"
external: true
+ external_fqdn: "{{ horizon_external_fqdn }}"
port: "{{ horizon_port }}"
listen_port: "{{ horizon_listen_port }}"
+ frontend_redirect_extra:
+ - "use_backend acme_client_back if { path_reg ^/.well-known/acme-challenge/.+ }"
acme_client:
enabled: "{{ enable_horizon }}"
with_frontend: false
@@ -101,7 +101,7 @@ horizon_database_shard:
####################
# Docker
####################
-horizon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/horizon"
+horizon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}horizon"
horizon_tag: "{{ openstack_tag }}"
horizon_image_full: "{{ horizon_image }}:{{ horizon_tag }}"
@@ -122,13 +122,10 @@ horizon_healthcheck:
horizon_default_volumes:
- "{{ node_config_directory }}/horizon/:{{ container_config_directory }}/:ro"
- - "{{ kolla_dev_repos_directory ~ '/horizon/horizon:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/horizon' if horizon_dev_mode | bool else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/horizon/openstack_dashboard:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/openstack_dashboard' if horizon_dev_mode | bool else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/murano-dashboard/muranodashboard:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/muranodashboard' if horizon_murano_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/horizon:/dev-mode/horizon' if horizon_dev_mode | bool else '' }}"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "/tmp:/tmp"
horizon_extra_volumes: "{{ default_extra_volumes }}"
@@ -150,16 +147,18 @@ horizon_wsgi_threads: 1
# Kolla
####################
horizon_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-murano_dashboard_git_repository: "{{ kolla_dev_repos_git }}/murano-dashboard"
horizon_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
horizon_dev_mode: "{{ kolla_dev_mode }}"
-horizon_murano_dev_mode: "{{ kolla_dev_mode }}"
horizon_source_version: "{{ kolla_source_version }}"
-horizon_murano_source_version: "{{ kolla_source_version }}"
# This variable was created for administrators to define which one of the Keystone's URLs should be configured in Horizon.
# In some cases, such as when using OIDC, horizon will need to be configured with Keystone's public URL.
# Therefore, instead of overriding the whole "horizon_keystone_url", this change allows an easier integration because
# the Keystone public URL is already defined with variable "keystone_public_url".
horizon_use_keystone_public_url: False
+
+###################
+# Copy certificates
+###################
+horizon_copy_certs: "{{ kolla_copy_ca_into_containers | bool or horizon_enable_tls_backend | bool }}"
diff --git a/ansible/roles/horizon/handlers/main.yml b/ansible/roles/horizon/handlers/main.yml
index 51b41dc4c2..51a688aa4f 100644
--- a/ansible/roles/horizon/handlers/main.yml
+++ b/ansible/roles/horizon/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "horizon"
service: "{{ horizon_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,5 +13,3 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/horizon/tasks/bootstrap.yml b/ansible/roles/horizon/tasks/bootstrap.yml
index ecac253e2c..2ec7e6a780 100644
--- a/ansible/roles/horizon/tasks/bootstrap.yml
+++ b/ansible/roles/horizon/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Horizon database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Horizon database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/horizon/tasks/bootstrap_service.yml b/ansible/roles/horizon/tasks/bootstrap_service.yml
index 213d71fab1..74454a9911 100644
--- a/ansible/roles/horizon/tasks/bootstrap_service.yml
+++ b/ansible/roles/horizon/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
horizon: "{{ horizon_services['horizon'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_horizon"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ horizon.volumes }}"
run_once: True
delegate_to: "{{ groups[horizon.group][0] }}"
diff --git a/ansible/roles/horizon/tasks/check-containers.yml b/ansible/roles/horizon/tasks/check-containers.yml
index 8bec8a6dd3..b7e2f7c29f 100644
--- a/ansible/roles/horizon/tasks/check-containers.yml
+++ b/ansible/roles/horizon/tasks/check-containers.yml
@@ -1,20 +1,3 @@
---
-- name: Deploy horizon container
- vars:
- horizon: "{{ horizon_services['horizon'] }}"
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ horizon.container_name }}"
- image: "{{ horizon.image }}"
- environment: "{{ horizon.environment }}"
- volumes: "{{ horizon.volumes }}"
- dimensions: "{{ horizon.dimensions }}"
- healthcheck: "{{ horizon.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[horizon.group]
- - horizon.enabled | bool
- with_dict: "{{ horizon_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/horizon/tasks/clone.yml b/ansible/roles/horizon/tasks/clone.yml
index 6e03a95296..19ded83ee4 100644
--- a/ansible/roles/horizon/tasks/clone.yml
+++ b/ansible/roles/horizon/tasks/clone.yml
@@ -6,13 +6,3 @@
dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
update: "{{ horizon_dev_repos_pull }}"
version: "{{ horizon_source_version }}"
-
-- name: Cloning murano-dashboard source repositories for development
- become: true
- git:
- repo: "{{ murano_dashboard_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/murano-dashboard"
- update: "{{ horizon_dev_repos_pull }}"
- version: "{{ horizon_murano_source_version }}"
- when:
- - horizon_murano_dev_mode | bool
diff --git a/ansible/roles/horizon/tasks/config.yml b/ansible/roles/horizon/tasks/config.yml
index f302c6b59b..9183c7fc7d 100644
--- a/ansible/roles/horizon/tasks/config.yml
+++ b/ansible/roles/horizon/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ horizon_services }}"
+ with_dict: "{{ horizon_services | select_services_enabled_and_mapped_to_host }}"
- name: Set empty custom policy
set_fact:
@@ -25,7 +22,6 @@
- { name: "cinder", enabled: "{{ enable_cinder_horizon_policy_file }}" }
- { name: "cloudkitty", enabled: "{{ enable_horizon_cloudkitty }}" }
- { name: "designate", enabled: "{{ enable_horizon_designate }}" }
- - { name: "freezer", enabled: "{{ enable_horizon_freezer }}" }
- { name: "glance", enabled: "{{ enable_glance_horizon_policy_file }}" }
- { name: "heat", enabled: "{{ enable_heat_horizon_policy_file }}" }
- { name: "ironic", enabled: "{{ enable_horizon_ironic }}" }
@@ -34,14 +30,9 @@
- { name: "manila", enabled: "{{ enable_horizon_manila }}" }
- { name: "masakari", enabled: "{{ enable_horizon_masakari }}" }
- { name: "mistral", enabled: "{{ enable_horizon_mistral }}" }
- - { name: "monasca", enabled: "{{ enable_horizon_monasca }}" }
- - { name: "murano", enabled: "{{ enable_horizon_murano }}" }
- { name: "neutron", enabled: "{{ enable_neutron_horizon_policy_file }}" }
- { name: "nova", enabled: "{{ enable_nova_horizon_policy_file }}" }
- { name: "octavia", enabled: "{{ enable_horizon_octavia }}" }
- - { name: "sahara", enabled: "{{ enable_horizon_sahara }}" }
- - { name: "senlin", enabled: "{{ enable_horizon_senlin }}" }
- - { name: "solum", enabled: "{{ enable_horizon_solum }}" }
- { name: "tacker", enabled: "{{ enable_horizon_tacker }}" }
- { name: "trove", enabled: "{{ enable_horizon_trove }}" }
- { name: "watcher", enabled: "{{ enable_horizon_watcher }}" }
@@ -49,21 +40,17 @@
- name: Copying over config.json files for services
become: true
vars:
- horizon: "{{ horizon_services['horizon'] }}"
+ service: "{{ horizon_services['horizon'] }}"
template:
src: "horizon.json.j2"
dest: "{{ node_config_directory }}/horizon/config.json"
mode: "0660"
- when:
- - horizon.enabled | bool
- - inventory_hostname in groups[horizon.group]
- notify:
- - Restart horizon container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over horizon.conf
become: true
vars:
- horizon: "{{ horizon_services['horizon'] }}"
+ service: "{{ horizon_services['horizon'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/horizon/horizon.conf"
@@ -72,79 +59,60 @@
- "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/horizon.conf"
- "{{ node_custom_config }}/horizon/horizon.conf"
- "horizon.conf.j2"
- when:
- - horizon.enabled | bool
- - inventory_hostname in groups[horizon.group]
- notify:
- - Restart horizon container
+ when: service | service_enabled_and_mapped_to_host
-- name: Copying over local_settings
+- name: Copying over kolla-settings.py
become: true
vars:
- horizon: "{{ horizon_services['horizon'] }}"
+ service: "{{ horizon_services['horizon'] }}"
template:
src: "{{ item }}"
- dest: "{{ node_config_directory }}/horizon/local_settings"
+ dest: "{{ node_config_directory }}/horizon/_9998-kolla-settings.py"
mode: "0660"
with_first_found:
- - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/local_settings"
- - "{{ node_custom_config }}/horizon/local_settings"
- - "local_settings.j2"
- when:
- - horizon.enabled | bool
- - inventory_hostname in groups[horizon.group]
- notify:
- - Restart horizon container
+ - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/_9998-kolla-settings.py"
+ - "{{ node_custom_config }}/horizon/_9998-kolla-settings.py"
+ - "_9998-kolla-settings.py.j2"
+ when: service | service_enabled_and_mapped_to_host
-- name: Copying over custom_local_settings
+- name: Copying over custom-settings.py
become: true
vars:
- horizon: "{{ horizon_services['horizon'] }}"
+ service: "{{ horizon_services['horizon'] }}"
template:
src: "{{ item }}"
- dest: "{{ node_config_directory }}/horizon/custom_local_settings"
+ dest: "{{ node_config_directory }}/horizon/_9999-custom-settings.py"
mode: "0660"
with_first_found:
- - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/custom_local_settings"
- - "{{ node_custom_config }}/horizon/custom_local_settings"
- - "custom_local_settings.j2"
- when:
- - horizon.enabled | bool
- - inventory_hostname in groups[horizon.group]
- notify:
- - Restart horizon container
+ - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/_9999-custom-settings.py"
+ - "{{ node_custom_config }}/horizon/_9999-custom-settings.py"
+ - "_9999-custom-settings.py.j2"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over existing policy file
become: true
vars:
- horizon: "{{ horizon_services['horizon'] }}"
+ service: "{{ horizon_services['horizon'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/horizon/{{ item | basename }}"
mode: "0660"
- when:
- - horizon.enabled | bool
- - inventory_hostname in groups[horizon.group]
+ when: service | service_enabled_and_mapped_to_host
with_items: "{{ custom_policy }}"
- notify:
- - Restart horizon container
- name: Copying over custom themes
become: true
vars:
- horizon: "{{ horizon_services['horizon'] }}"
+ service: "{{ horizon_services['horizon'] }}"
copy:
src: "{{ node_custom_config }}/horizon/themes/{{ item.name }}"
dest: "{{ node_config_directory }}/horizon/themes/"
mode: 0660
when:
- - horizon.enabled | bool
- - inventory_hostname in groups[horizon.group]
+ - service | service_enabled_and_mapped_to_host
- horizon_custom_themes | length > 0
with_items: "{{ horizon_custom_themes }}"
- notify:
- - Restart horizon container
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or horizon_enable_tls_backend | bool
+ - horizon_copy_certs
diff --git a/ansible/roles/sahara/tasks/check.yml b/ansible/roles/horizon/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/sahara/tasks/check.yml
rename to ansible/roles/horizon/tasks/config_validate.yml
diff --git a/ansible/roles/horizon/tasks/policy_item.yml b/ansible/roles/horizon/tasks/policy_item.yml
index 7350dc7554..708f60e6ef 100644
--- a/ansible/roles/horizon/tasks/policy_item.yml
+++ b/ansible/roles/horizon/tasks/policy_item.yml
@@ -20,6 +20,6 @@
- name: Update custom policy file name
set_fact:
- custom_policy: "{{ custom_policy }} + [ '{{ overwritten_files.results.0.stat.path }}' ]"
+ custom_policy: "{{ custom_policy + [overwritten_files.results.0.stat.path] }}"
when:
- overwritten_files.results
diff --git a/ansible/roles/horizon/tasks/precheck.yml b/ansible/roles/horizon/tasks/precheck.yml
index 7e7490a0a4..08f0c21636 100644
--- a/ansible/roles/horizon/tasks/precheck.yml
+++ b/ansible/roles/horizon/tasks/precheck.yml
@@ -8,6 +8,8 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- horizon
register: container_facts
@@ -24,3 +26,25 @@
when:
- container_facts['horizon'] is not defined
- inventory_hostname in groups[horizon.group]
+
+# TODO(mgoddard): Remove in the 2025.1 E release.
+- name: Check for old local_settings file
+ assert:
+ that:
+ - "'{{ node_custom_config }}/horizon/{{ inventory_hostname }}/local_settings' is not exists"
+ - "'{{ node_custom_config }}/horizon/local_settings' is not exists"
+ fail_msg: >-
+ Horizon configuration must now be provided using
+ /etc/kolla/config/horizon/_9998-kolla-settings.py rather than
+ /etc/kolla/config/horizon/local_settings.
+
+# TODO(mgoddard): Remove in the 2025.1 E release.
+- name: Check for old custom_local_settings file
+ assert:
+ that:
+ - "'{{ node_custom_config }}/horizon/{{ inventory_hostname }}/custom_local_settings' is not exists"
+ - "'{{ node_custom_config }}/horizon/custom_local_settings' is not exists"
+ fail_msg: >-
+ Custom horizon configuration must now be provided using
+ /etc/kolla/config/horizon/_9999-custom-settings.py rather than
+ /etc/kolla/config/horizon/custom_local_settings.
diff --git a/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2 b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2
new file mode 100644
index 0000000000..a15d49004d
--- /dev/null
+++ b/ansible/roles/horizon/templates/_9998-kolla-settings.py.j2
@@ -0,0 +1,381 @@
+DEBUG = {{ horizon_logging_debug }}
+TEMPLATE_DEBUG = DEBUG
+COMPRESS_OFFLINE = True
+WEBROOT = '/'
+ALLOWED_HOSTS = ['*']
+
+{% if horizon_backend_database | bool %}
+SESSION_ENGINE = 'django.contrib.sessions.backends.db'
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.mysql',
+ 'NAME': '{{ horizon_database_name }}',
+ 'USER': '{{ horizon_database_user }}',
+ 'PASSWORD': '{{ horizon_database_password }}',
+ 'HOST': '{{ database_address }}',
+ 'PORT': '{{ database_port }}'
+ }
+}
+{% elif groups['memcached'] | length > 0 and not horizon_backend_database | bool %}
+SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+CACHES['default']['LOCATION'] = [{% for host in groups['memcached'] %}'{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ memcached_port }}'{% if not loop.last %},{% endif %}{% endfor %}]
+{% endif %}
+
+{% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+CSRF_COOKIE_SECURE = True
+SESSION_COOKIE_SECURE = True
+{% endif %}
+
+OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+}
+
+OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = {{ horizon_keystone_multidomain | bool }}
+OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = {{ 'True' if horizon_keystone_domain_choices|length > 1 else 'False' }}
+OPENSTACK_KEYSTONE_DOMAIN_CHOICES = (
+{% for key, value in horizon_keystone_domain_choices.items() %}
+ ('{{ key }}', '{{ value }}'),
+{% endfor %}
+)
+
+LOCAL_PATH = '/tmp'
+SECRET_KEY='{{ horizon_secret_key }}'
+
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+
+{% if multiple_regions_names|length > 1 %}
+AVAILABLE_REGIONS = [
+{% for region_name in multiple_regions_names %}
+ ('{{ keystone_internal_url }}', '{{ region_name }}'),
+{% endfor %}
+]
+{% endif %}
+
+OPENSTACK_HOST = "{{ kolla_internal_fqdn }}"
+# TODO(fprzewozn): URL /v3 suffix is required until Horizon bug #2073639 is resolved
+OPENSTACK_KEYSTONE_URL = "{{ horizon_keystone_url }}/v3"
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}"
+
+{% if enable_keystone_federation | bool %}
+WEBSSO_ENABLED = True
+WEBSSO_KEYSTONE_URL = "{{ keystone_public_url }}/v3"
+WEBSSO_CHOICES = (
+ ("credentials", _("Keystone Credentials")),
+ {% for idp in keystone_identity_providers %}
+ ("{{ idp.name }}_{{ idp.protocol }}", "{{ idp.public_name }}"),
+ {% endfor %}
+)
+WEBSSO_IDP_MAPPING = {
+{% for idp in keystone_identity_providers %}
+ "{{ idp.name }}_{{ idp.protocol }}": ("{{ idp.name }}", "{{ idp.protocol }}"),
+{% endfor %}
+}
+{% endif %}
+
+{% if openstack_cacert == "" %}
+{% else %}
+OPENSTACK_SSL_CACERT = '{{ openstack_cacert }}'
+{% endif %}
+
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True,
+ 'can_edit_group': True,
+ 'can_edit_project': True,
+ 'can_edit_domain': True,
+ 'can_edit_role': True,
+}
+
+OPENSTACK_HYPERVISOR_FEATURES = {
+ 'can_set_mount_point': False,
+ 'can_set_password': False,
+ 'requires_keypair': False,
+ 'enable_quotas': True
+}
+
+OPENSTACK_CINDER_FEATURES = {
+ 'enable_backup': {{ 'True' if enable_cinder_backup | bool else 'False' }},
+}
+
+OPENSTACK_NEUTRON_NETWORK = {
+ 'enable_router': True,
+ 'enable_quotas': True,
+ 'enable_ipv6': True,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': True,
+ 'enable_firewall': True,
+ 'enable_vpn': True,
+ 'enable_fip_topology_check': True,
+ 'supported_vnic_types': ['*'],
+}
+
+OPENSTACK_HEAT_STACK = {
+ 'enable_user_pass': True,
+}
+
+
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+ "architecture": _("Architecture"),
+ "kernel_id": _("Kernel ID"),
+ "ramdisk_id": _("Ramdisk ID"),
+ "image_state": _("Euca2ools state"),
+ "project_id": _("Project ID"),
+ "image_type": _("Image Type"),
+}
+
+IMAGE_RESERVED_CUSTOM_PROPERTIES = []
+HORIZON_IMAGES_UPLOAD_MODE = 'direct'
+OPENSTACK_ENDPOINT_TYPE = "internalURL"
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
+DROPDOWN_MAX_ITEMS = 30
+TIME_ZONE = "UTC"
+POLICY_FILES_PATH = '/etc/openstack-dashboard'
+
+{% if horizon_custom_themes | length > 0 %}
+AVAILABLE_THEMES = [
+ ('default', 'Default', 'themes/default'),
+ ('material', 'Material', 'themes/material'),
+{% for theme in horizon_custom_themes %}
+ ('{{ theme.name|e }}', '{{ theme.label|e }}', '/etc/openstack-dashboard/themes/{{ theme.name|e }}'),
+{% endfor %}
+]
+{% endif %}
+
+LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'operation': {
+ # The format of "%(message)s" is defined by
+ # OPERATION_LOG_OPTIONS['format']
+ 'format': '%(asctime)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'null': {
+ 'level': 'DEBUG',
+ 'class': 'logging.NullHandler',
+ },
+ 'console': {
+ # Set the level to "DEBUG" for verbose output logging.
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ },
+ 'operation': {
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'operation',
+ },
+ },
+ 'loggers': {
+ # Logging from django.db.backends is VERY verbose, send to null
+ # by default.
+ 'django.db.backends': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'requests': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'horizon': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'horizon.operation_log': {
+ 'handlers': ['operation'],
+ 'level': 'INFO',
+ 'propagate': False,
+ },
+ 'openstack_dashboard': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'novaclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'cinderclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'keystoneclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'glanceclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'neutronclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'heatclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'ceilometerclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'swiftclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_auth': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'nose.plugins.manager': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'iso8601': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'scss': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ },
+}
+
+SECURITY_GROUP_RULES = {
+ 'all_tcp': {
+ 'name': _('All TCP'),
+ 'ip_protocol': 'tcp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_udp': {
+ 'name': _('All UDP'),
+ 'ip_protocol': 'udp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_icmp': {
+ 'name': _('All ICMP'),
+ 'ip_protocol': 'icmp',
+ 'from_port': '-1',
+ 'to_port': '-1',
+ },
+ 'ssh': {
+ 'name': 'SSH',
+ 'ip_protocol': 'tcp',
+ 'from_port': '22',
+ 'to_port': '22',
+ },
+ 'smtp': {
+ 'name': 'SMTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '25',
+ 'to_port': '25',
+ },
+ 'dns': {
+ 'name': 'DNS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '53',
+ 'to_port': '53',
+ },
+ 'http': {
+ 'name': 'HTTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '80',
+ 'to_port': '80',
+ },
+ 'pop3': {
+ 'name': 'POP3',
+ 'ip_protocol': 'tcp',
+ 'from_port': '110',
+ 'to_port': '110',
+ },
+ 'imap': {
+ 'name': 'IMAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '143',
+ 'to_port': '143',
+ },
+ 'ldap': {
+ 'name': 'LDAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '389',
+ 'to_port': '389',
+ },
+ 'https': {
+ 'name': 'HTTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '443',
+ 'to_port': '443',
+ },
+ 'smtps': {
+ 'name': 'SMTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '465',
+ 'to_port': '465',
+ },
+ 'imaps': {
+ 'name': 'IMAPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '993',
+ 'to_port': '993',
+ },
+ 'pop3s': {
+ 'name': 'POP3S',
+ 'ip_protocol': 'tcp',
+ 'from_port': '995',
+ 'to_port': '995',
+ },
+ 'ms_sql': {
+ 'name': 'MS SQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1433',
+ 'to_port': '1433',
+ },
+ 'mysql': {
+ 'name': 'MYSQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3306',
+ 'to_port': '3306',
+ },
+ 'rdp': {
+ 'name': 'RDP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3389',
+ 'to_port': '3389',
+ },
+}
+
+REST_API_REQUIRED_SETTINGS = [
+ 'CREATE_IMAGE_DEFAULTS',
+ 'DEFAULT_BOOT_SOURCE',
+ 'ENFORCE_PASSWORD_CHECK',
+ 'LAUNCH_INSTANCE_DEFAULTS',
+ 'OPENSTACK_HYPERVISOR_FEATURES',
+ 'OPENSTACK_IMAGE_FORMATS',
+ 'OPENSTACK_KEYSTONE_BACKEND',
+ 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN',
+]
+
diff --git a/ansible/roles/horizon/templates/custom_local_settings.j2 b/ansible/roles/horizon/templates/_9999-custom-settings.py.j2
similarity index 100%
rename from ansible/roles/horizon/templates/custom_local_settings.j2
rename to ansible/roles/horizon/templates/_9999-custom-settings.py.j2
diff --git a/ansible/roles/horizon/templates/horizon.conf.j2 b/ansible/roles/horizon/templates/horizon.conf.j2
index 3d7aa08ec4..e5f851aeba 100644
--- a/ansible/roles/horizon/templates/horizon.conf.j2
+++ b/ansible/roles/horizon/templates/horizon.conf.j2
@@ -15,7 +15,8 @@ TraceEnable off
LogLevel warn
- ErrorLog /var/log/kolla/horizon/horizon.log
+ ErrorLogFormat "%{cu}t %M"
+ ErrorLog /var/log/kolla/horizon/horizon-error.log
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog /var/log/kolla/horizon/horizon-access.log logformat
@@ -30,6 +31,10 @@ TraceEnable off
Require all granted
+
+ Require local
+
+
Alias /static {{ python_path }}/static
SetHandler None
@@ -40,6 +45,9 @@ TraceEnable off
SSLCertificateFile /etc/horizon/certs/horizon-cert.pem
SSLCertificateKeyFile /etc/horizon/certs/horizon-key.pem
{% endif %}
+{% if horizon_httpd_limitrequestbody is defined %}
+ LimitRequestBody {{ horizon_httpd_limitrequestbody }}
+{% endif %}
diff --git a/ansible/roles/horizon/templates/horizon.json.j2 b/ansible/roles/horizon/templates/horizon.json.j2
index 64fe58152a..1a0786196b 100644
--- a/ansible/roles/horizon/templates/horizon.json.j2
+++ b/ansible/roles/horizon/templates/horizon.json.j2
@@ -19,14 +19,14 @@
},
{% endfor %}
{
- "source": "{{ container_config_directory }}/local_settings",
- "dest": "/etc/openstack-dashboard/local_settings",
+ "source": "{{ container_config_directory }}/_9998-kolla-settings.py",
+ "dest": "/etc/openstack-dashboard/local_settings.d/_9998-kolla-settings.py",
"owner": "horizon",
"perm": "0600"
},
{
- "source": "{{ container_config_directory }}/custom_local_settings",
- "dest": "/etc/openstack-dashboard/custom_local_settings",
+ "source": "{{ container_config_directory }}/_9999-custom-settings.py",
+ "dest": "/etc/openstack-dashboard/local_settings.d/_9999-custom-settings.py",
"owner": "horizon",
"perm": "0600"
}{% if horizon_enable_tls_backend | bool %},
@@ -48,5 +48,12 @@
"owner": "horizon",
"perm": "0600"
}{% endif %}
+ {% if horizon_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/horizon/templates/local_settings.j2 b/ansible/roles/horizon/templates/local_settings.j2
deleted file mode 100644
index 8ca6b74484..0000000000
--- a/ansible/roles/horizon/templates/local_settings.j2
+++ /dev/null
@@ -1,869 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import os
-
-from django.utils.translation import ugettext_lazy as _
-
-from openstack_dashboard import exceptions
-from openstack_dashboard.settings import HORIZON_CONFIG
-
-DEBUG = {{ horizon_logging_debug }}
-TEMPLATE_DEBUG = DEBUG
-
-COMPRESS_OFFLINE = True
-
-# WEBROOT is the location relative to Webserver root
-# should end with a slash.
-WEBROOT = '/'
-#LOGIN_URL = WEBROOT + 'auth/login/'
-#LOGOUT_URL = WEBROOT + 'auth/logout/'
-#
-# LOGIN_REDIRECT_URL can be used as an alternative for
-# HORIZON_CONFIG.user_home, if user_home is not set.
-# Do not set it to '/home/', as this will cause circular redirect loop
-#LOGIN_REDIRECT_URL = WEBROOT
-
-# If horizon is running in production (DEBUG is False), set this
-# with the list of host/domain names that the application can serve.
-# For more information see:
-# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
-ALLOWED_HOSTS = ['*']
-
-{% if horizon_backend_database | bool %}
-SESSION_ENGINE = 'django.contrib.sessions.backends.db'
-DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.mysql',
- 'NAME': '{{ horizon_database_name }}',
- 'USER': '{{ horizon_database_user }}',
- 'PASSWORD': '{{ horizon_database_password }}',
- 'HOST': '{{ database_address }}',
- 'PORT': '{{ database_port }}'
- }
-}
-{% endif %}
-
-# Set SSL proxy settings:
-# Pass this header from the proxy after terminating the SSL,
-# and don't forget to strip it from the client's request.
-# For more information see:
-# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
-#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
-
-# If Horizon is being served through SSL, then uncomment the following two
-# settings to better secure the cookies from security exploits
-#CSRF_COOKIE_SECURE = True
-#SESSION_COOKIE_SECURE = True
-
-{% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
-SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
-CSRF_COOKIE_SECURE = True
-SESSION_COOKIE_SECURE = True
-{% endif %}
-
-# The absolute path to the directory where message files are collected.
-# The message file must have a .json file extension. When the user logins to
-# horizon, the message files collected are processed and displayed to the user.
-#MESSAGES_PATH=None
-
-# Overrides for OpenStack API versions. Use this setting to force the
-# OpenStack dashboard to use a specific API version for a given service API.
-# Versions specified here should be integers or floats, not strings.
-# NOTE: The version should be formatted as it appears in the URL for the
-# service API. For example, The identity service APIs have inconsistent
-# use of the decimal point, so valid options would be 2.0 or 3.
-# Minimum compute version to get the instance locked status is 2.9.
-#OPENSTACK_API_VERSIONS = {
-# "data-processing": 1.1,
-# "identity": 3,
-# "volume": 2,
-# "compute": 2,
-#}
-
-OPENSTACK_API_VERSIONS = {
- "identity": 3,
-}
-
-# Set this to True if running on a multi-domain model. When this is enabled, it
-# will require the user to enter the Domain name in addition to the username
-# for login.
-OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = {{ horizon_keystone_multidomain | bool }}
-
-# Set this to True if you want available domains displayed as a dropdown menu
-# on the login screen. It is strongly advised NOT to enable this for public
-# clouds, as advertising enabled domains to unauthenticated customers
-# irresponsibly exposes private information. This should only be used for
-# private clouds where the dashboard sits behind a corporate firewall.
-OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = {{ 'True' if horizon_keystone_domain_choices|length > 1 else 'False' }}
-
-# If OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN is enabled, this option can be used to
-# set the available domains to choose from. This is a list of pairs whose first
-# value is the domain name and the second is the display name.
-OPENSTACK_KEYSTONE_DOMAIN_CHOICES = (
-{% for key, value in horizon_keystone_domain_choices.items() %}
- ('{{ key }}', '{{ value }}'),
-{% endfor %}
-)
-
-# Overrides the default domain used when running on single-domain model
-# with Keystone V3. All entities will be created in the default domain.
-# NOTE: This value must be the ID of the default domain, NOT the name.
-# Also, you will most likely have a value in the keystone policy file like this
-# "cloud_admin": "rule:admin_required and domain_id:"
-# This value must match the domain id specified there.
-#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'
-
-# Set this to True to enable panels that provide the ability for users to
-# manage Identity Providers (IdPs) and establish a set of rules to map
-# federation protocol attributes to Identity API attributes.
-# This extension requires v3.0+ of the Identity API.
-#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False
-
-# Set Console type:
-# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
-# Set to None explicitly if you want to deactivate the console.
-#CONSOLE_TYPE = "AUTO"
-
-# If provided, a "Report Bug" link will be displayed in the site header
-# which links to the value of this setting (ideally a URL containing
-# information on how to report issues).
-#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com"
-
-# Show backdrop element outside the modal, do not close the modal
-# after clicking on backdrop.
-#HORIZON_CONFIG["modal_backdrop"] = "static"
-
-# Specify a regular expression to validate user passwords.
-#HORIZON_CONFIG["password_validator"] = {
-# "regex": '.*',
-# "help_text": _("Your password does not meet the requirements."),
-#}
-
-# Disable simplified floating IP address management for deployments with
-# multiple floating IP pools or complex network requirements.
-#HORIZON_CONFIG["simple_ip_management"] = False
-
-# Turn off browser autocompletion for forms including the login form and
-# the database creation workflow if so desired.
-#HORIZON_CONFIG["password_autocomplete"] = "off"
-
-# Setting this to True will disable the reveal button for password fields,
-# including on the login form.
-#HORIZON_CONFIG["disable_password_reveal"] = False
-
-LOCAL_PATH = '/tmp'
-
-# Set custom secret key:
-# You can either set it to a specific value or you can let horizon generate a
-# default secret key that is unique on this machine, e.i. regardless of the
-# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,
-# there may be situations where you would want to set this explicitly, e.g.
-# when multiple dashboard instances are distributed on different machines
-# (usually behind a load-balancer). Either you have to make sure that a session
-# gets all requests routed to the same dashboard instance or you set the same
-# SECRET_KEY for all of them.
-SECRET_KEY='{{ horizon_secret_key }}'
-
-# We recommend you use memcached for development; otherwise after every reload
-# of the django development server, you will have to login again. To use
-# memcached set CACHES to something like
-#CACHES = {
-# 'default': {
-# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
-# 'LOCATION': '127.0.0.1:11211',
-# },
-#}
-
-{% if groups['memcached'] | length > 0 and horizon_backend_database | bool == False %}
-SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
-CACHES = {
- 'default': {
- 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
- 'LOCATION': [{% for host in groups['memcached'] %}'{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}'{% if not loop.last %},{% endif %}{% endfor %}]
- }
-}
-{% endif %}
-
-# Send email to the console by default
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-# Or send them to /dev/null
-#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
-
-# Configure these for your outgoing email host
-#EMAIL_HOST = 'smtp.my-company.com'
-#EMAIL_PORT = 25
-#EMAIL_HOST_USER = 'djangomail'
-#EMAIL_HOST_PASSWORD = 'top-secret!'
-
-{% if multiple_regions_names|length > 1 %}
-# For multiple regions uncomment this configuration, and add (endpoint, title).
-AVAILABLE_REGIONS = [
-{% for region_name in multiple_regions_names %}
- ('{{ keystone_internal_url }}', '{{ region_name }}'),
-{% endfor %}
-]
-{% endif %}
-
-OPENSTACK_HOST = "{{ kolla_internal_fqdn }}"
-
-OPENSTACK_KEYSTONE_URL = "{{ horizon_keystone_url }}"
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}"
-
-{% if enable_keystone_federation | bool %}
-# Enables keystone web single-sign-on if set to True.
-WEBSSO_ENABLED = True
-
-# Determines which authentication choice to show as default.
-#WEBSSO_INITIAL_CHOICE = "credentials"
-
-# The list of authentication mechanisms which include keystone
-# federation protocols and identity provider/federation protocol
-# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol
-# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID
-# Connect respectively.
-# Do not remove the mandatory credentials mechanism.
-# Note: The last two tuples are sample mapping keys to a identity provider
-# and federation protocol combination (WEBSSO_IDP_MAPPING).
-WEBSSO_KEYSTONE_URL = "{{ keystone_public_url }}"
-WEBSSO_CHOICES = (
- ("credentials", _("Keystone Credentials")),
- {% for idp in keystone_identity_providers %}
- ("{{ idp.name }}_{{ idp.protocol }}", "{{ idp.public_name }}"),
- {% endfor %}
-)
-
-# A dictionary of specific identity provider and federation protocol
-# combinations. From the selected authentication mechanism, the value
-# will be looked up as keys in the dictionary. If a match is found,
-# it will redirect the user to a identity provider and federation protocol
-# specific WebSSO endpoint in keystone, otherwise it will use the value
-# as the protocol_id when redirecting to the WebSSO by protocol endpoint.
-# NOTE: The value is expected to be a tuple formatted as: (, ).
-WEBSSO_IDP_MAPPING = {
-{% for idp in keystone_identity_providers %}
- "{{ idp.name }}_{{ idp.protocol }}": ("{{ idp.name }}", "{{ idp.protocol }}"),
-{% endfor %}
-}
-{% endif %}
-
-# Disable SSL certificate checks (useful for self-signed certificates):
-#OPENSTACK_SSL_NO_VERIFY = True
-
-# The CA certificate to use to verify SSL connections
-{% if openstack_cacert == "" %}
-#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
-{% else %}
-OPENSTACK_SSL_CACERT = '{{ openstack_cacert }}'
-{% endif %}
-
-# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
-# capabilities of the auth backend for Keystone.
-# If Keystone has been configured to use LDAP as the auth backend then set
-# can_edit_user to False and name to 'ldap'.
-#
-# TODO(tres): Remove these once Keystone has an API to identify auth backend.
-OPENSTACK_KEYSTONE_BACKEND = {
- 'name': 'native',
- 'can_edit_user': True,
- 'can_edit_group': True,
- 'can_edit_project': True,
- 'can_edit_domain': True,
- 'can_edit_role': True,
-}
-
-# Setting this to True, will add a new "Retrieve Password" action on instance,
-# allowing Admin session password retrieval/decryption.
-#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
-
-# The Launch Instance user experience has been significantly enhanced.
-# You can choose whether to enable the new launch instance experience,
-# the legacy experience, or both. The legacy experience will be removed
-# in a future release, but is available as a temporary backup setting to ensure
-# compatibility with existing deployments. Further development will not be
-# done on the legacy experience. Please report any problems with the new
-# experience via the Launchpad tracking system.
-#
-# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
-# determine the experience to enable. Set them both to true to enable
-# both.
-#LAUNCH_INSTANCE_LEGACY_ENABLED = True
-#LAUNCH_INSTANCE_NG_ENABLED = False
-
-# A dictionary of settings which can be used to provide the default values for
-# properties found in the Launch Instance modal.
-#LAUNCH_INSTANCE_DEFAULTS = {
-# 'config_drive': False,
-# 'enable_scheduler_hints': True
-#}
-
-# The Xen Hypervisor has the ability to set the mount point for volumes
-# attached to instances (other Hypervisors currently do not). Setting
-# can_set_mount_point to True will add the option to set the mount point
-# from the UI.
-OPENSTACK_HYPERVISOR_FEATURES = {
- 'can_set_mount_point': False,
- 'can_set_password': False,
- 'requires_keypair': False,
- 'enable_quotas': True
-}
-
-# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
-# services provided by cinder that is not exposed by its extension API.
-OPENSTACK_CINDER_FEATURES = {
- 'enable_backup': {{ 'True' if enable_cinder_backup | bool else 'False' }},
-}
-
-# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
-# services provided by neutron. Options currently available are load
-# balancer service, security groups, quotas, VPN service.
-OPENSTACK_NEUTRON_NETWORK = {
- 'enable_router': True,
- 'enable_quotas': True,
- 'enable_ipv6': True,
- 'enable_distributed_router': False,
- 'enable_ha_router': False,
- 'enable_lb': True,
- 'enable_firewall': True,
- 'enable_vpn': True,
- 'enable_fip_topology_check': True,
-
- # Default dns servers you would like to use when a subnet is
- # created. This is only a default, users can still choose a different
- # list of dns servers when creating a new subnet.
- # The entries below are examples only, and are not appropriate for
- # real deployments
- # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"],
-
- # The profile_support option is used to detect if an external router can be
- # configured via the dashboard. When using specific plugins the
- # profile_support can be turned on if needed.
- 'profile_support': None,
- #'profile_support': 'cisco',
-
- # Set which provider network types are supported. Only the network types
- # in this list will be available to choose from when creating a network.
- # Network types include local, flat, vlan, gre, vxlan and geneve.
- # 'supported_provider_types': ['*'],
-
- # You can configure available segmentation ID range per network type
- # in your deployment.
- # 'segmentation_id_range': {
- # 'vlan': [1024, 2048],
- # 'vxlan': [4094, 65536],
- # },
-
- # You can define additional provider network types here.
- # 'extra_provider_types': {
- # 'awesome_type': {
- # 'display_name': 'Awesome New Type',
- # 'require_physical_network': False,
- # 'require_segmentation_id': True,
- # }
- # },
-
- # Set which VNIC types are supported for port binding. Only the VNIC
- # types in this list will be available to choose from when creating a
- # port.
- # VNIC types include 'normal', 'macvtap' and 'direct'.
- # Set to empty list or None to disable VNIC type selection.
- 'supported_vnic_types': ['*'],
-}
-
-# The OPENSTACK_HEAT_STACK settings can be used to disable password
-# field required while launching the stack.
-OPENSTACK_HEAT_STACK = {
- 'enable_user_pass': True,
-}
-
-# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
-# in the OpenStack Dashboard related to the Image service, such as the list
-# of supported image formats.
-#OPENSTACK_IMAGE_BACKEND = {
-# 'image_formats': [
-# ('', _('Select format')),
-# ('aki', _('AKI - Amazon Kernel Image')),
-# ('ami', _('AMI - Amazon Machine Image')),
-# ('ari', _('ARI - Amazon Ramdisk Image')),
-# ('docker', _('Docker')),
-# ('iso', _('ISO - Optical Disk Image')),
-# ('ova', _('OVA - Open Virtual Appliance')),
-# ('qcow2', _('QCOW2 - QEMU Emulator')),
-# ('raw', _('Raw')),
-# ('vdi', _('VDI - Virtual Disk Image')),
-# ('vhd', _('VHD - Virtual Hard Disk')),
-# ('vmdk', _('VMDK - Virtual Machine Disk')),
-# ],
-#}
-
-# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
-# image custom property attributes that appear on image detail pages.
-IMAGE_CUSTOM_PROPERTY_TITLES = {
- "architecture": _("Architecture"),
- "kernel_id": _("Kernel ID"),
- "ramdisk_id": _("Ramdisk ID"),
- "image_state": _("Euca2ools state"),
- "project_id": _("Project ID"),
- "image_type": _("Image Type"),
-}
-
-# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
-# custom properties should not be displayed in the Image Custom Properties
-# table.
-IMAGE_RESERVED_CUSTOM_PROPERTIES = []
-
-# Set to 'legacy' or 'direct' to allow users to upload images to glance via
-# Horizon server. When enabled, a file form field will appear on the create
-# image form. If set to 'off', there will be no file form field on the create
-# image form. See documentation for deployment considerations.
-#HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
-
-# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'publicURL'.
-OPENSTACK_ENDPOINT_TYPE = "internalURL"
-
-# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
-# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is None. This
-# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
-#SECONDARY_ENDPOINT_TYPE = None
-
-# The number of objects (Swift containers/objects or images) to display
-# on a single page before providing a paging element (a "more" link)
-# to paginate results.
-API_RESULT_LIMIT = 1000
-API_RESULT_PAGE_SIZE = 20
-
-# The size of chunk in bytes for downloading objects from Swift
-SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
-
-# Specify a maximum number of items to display in a dropdown.
-DROPDOWN_MAX_ITEMS = 30
-
-# The timezone of the server. This should correspond with the timezone
-# of your entire OpenStack installation, and hopefully be in UTC.
-TIME_ZONE = "UTC"
-
-# When launching an instance, the menu of available flavors is
-# sorted by RAM usage, ascending. If you would like a different sort order,
-# you can provide another flavor attribute as sorting key. Alternatively, you
-# can provide a custom callback method to use for sorting. You can also provide
-# a flag for reverse sort. For more info, see
-# http://docs.python.org/2/library/functions.html#sorted
-#CREATE_INSTANCE_FLAVOR_SORT = {
-# 'key': 'name',
-# # or
-# 'key': my_awesome_callback_method,
-# 'reverse': False,
-#}
-
-# Set this to True to display an 'Admin Password' field on the Change Password
-# form to verify that it is indeed the admin logged-in who wants to change
-# the password.
-#ENFORCE_PASSWORD_CHECK = False
-
-# Modules that provide /auth routes that can be used to handle different types
-# of user authentication. Add auth plugins that require extra route handling to
-# this list.
-#AUTHENTICATION_URLS = [
-# 'openstack_auth.urls',
-#]
-
-# The Horizon Policy Enforcement engine uses these values to load per service
-# policy rule files. The content of these files should match the files the
-# OpenStack services are using to determine role based access control in the
-# target installation.
-
-# Path to directory containing policy.json files
-POLICY_FILES_PATH = '/etc/openstack-dashboard'
-
-# Map of local copy of service policy files.
-# Please insure that your identity policy file matches the one being used on
-# your keystone servers. There is an alternate policy file that may be used
-# in the Keystone v3 multi-domain case, policy.v3cloudsample.json.
-# This file is not included in the Horizon repository by default but can be
-# found at
-# http://opendev.org/openstack/keystone/tree/etc/ \
-# policy.v3cloudsample.json
-# Having matching policy files on the Horizon and Keystone servers is essential
-# for normal operation. This holds true for all services and their policy files.
-#POLICY_FILES = {
-# 'identity': 'keystone_policy.json',
-# 'compute': 'nova_policy.json',
-# 'volume': 'cinder_policy.json',
-# 'image': 'glance_policy.json',
-# 'orchestration': 'heat_policy.json',
-# 'network': 'neutron_policy.json',
-# 'telemetry': 'ceilometer_policy.json',
-#}
-
-# TODO: (david-lyle) remove when plugins support adding settings.
-# Note: Only used when trove-dashboard plugin is configured to be used by
-# Horizon.
-# Trove user and database extension support. By default support for
-# creating users and databases on database instances is turned on.
-# To disable these extensions set the permission here to something
-# unusable such as ["!"].
-#TROVE_ADD_USER_PERMS = []
-#TROVE_ADD_DATABASE_PERMS = []
-
-# Change this patch to the appropriate list of tuples containing
-# a key, label and static directory containing two files:
-# _variables.scss and _styles.scss
-#AVAILABLE_THEMES = [
-# ('default', 'Default', 'themes/default'),
-# ('material', 'Material', 'themes/material'),
-#]
-{% if horizon_custom_themes | length > 0 %}
-AVAILABLE_THEMES = [
- ('default', 'Default', 'themes/default'),
- ('material', 'Material', 'themes/material'),
-{% for theme in horizon_custom_themes %}
- ('{{ theme.name|e }}', '{{ theme.label|e }}', '/etc/openstack-dashboard/themes/{{ theme.name|e }}'),
-{% endfor %}
-]
-{% endif %}
-
-LOGGING = {
- 'version': 1,
- # When set to True this will disable all logging except
- # for loggers specified in this configuration dictionary. Note that
- # if nothing is specified here and disable_existing_loggers is True,
- # django.db.backends will still log unless it is disabled explicitly.
- 'disable_existing_loggers': False,
- 'formatters': {
- 'operation': {
- # The format of "%(message)s" is defined by
- # OPERATION_LOG_OPTIONS['format']
- 'format': '%(asctime)s %(message)s'
- },
- },
- 'handlers': {
- 'null': {
- 'level': 'DEBUG',
- 'class': 'logging.NullHandler',
- },
- 'console': {
- # Set the level to "DEBUG" for verbose output logging.
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- },
- 'operation': {
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- 'formatter': 'operation',
- },
- },
- 'loggers': {
- # Logging from django.db.backends is VERY verbose, send to null
- # by default.
- 'django.db.backends': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'requests': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'horizon': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'horizon.operation_log': {
- 'handlers': ['operation'],
- 'level': 'INFO',
- 'propagate': False,
- },
- 'openstack_dashboard': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'novaclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'cinderclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'keystoneclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'glanceclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'neutronclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'heatclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'ceilometerclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'swiftclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_auth': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'nose.plugins.manager': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'django': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'iso8601': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'scss': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- },
-}
-
-# 'direction' should not be specified for all_tcp/udp/icmp.
-# It is specified in the form.
-SECURITY_GROUP_RULES = {
- 'all_tcp': {
- 'name': _('All TCP'),
- 'ip_protocol': 'tcp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_udp': {
- 'name': _('All UDP'),
- 'ip_protocol': 'udp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_icmp': {
- 'name': _('All ICMP'),
- 'ip_protocol': 'icmp',
- 'from_port': '-1',
- 'to_port': '-1',
- },
- 'ssh': {
- 'name': 'SSH',
- 'ip_protocol': 'tcp',
- 'from_port': '22',
- 'to_port': '22',
- },
- 'smtp': {
- 'name': 'SMTP',
- 'ip_protocol': 'tcp',
- 'from_port': '25',
- 'to_port': '25',
- },
- 'dns': {
- 'name': 'DNS',
- 'ip_protocol': 'tcp',
- 'from_port': '53',
- 'to_port': '53',
- },
- 'http': {
- 'name': 'HTTP',
- 'ip_protocol': 'tcp',
- 'from_port': '80',
- 'to_port': '80',
- },
- 'pop3': {
- 'name': 'POP3',
- 'ip_protocol': 'tcp',
- 'from_port': '110',
- 'to_port': '110',
- },
- 'imap': {
- 'name': 'IMAP',
- 'ip_protocol': 'tcp',
- 'from_port': '143',
- 'to_port': '143',
- },
- 'ldap': {
- 'name': 'LDAP',
- 'ip_protocol': 'tcp',
- 'from_port': '389',
- 'to_port': '389',
- },
- 'https': {
- 'name': 'HTTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '443',
- 'to_port': '443',
- },
- 'smtps': {
- 'name': 'SMTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '465',
- 'to_port': '465',
- },
- 'imaps': {
- 'name': 'IMAPS',
- 'ip_protocol': 'tcp',
- 'from_port': '993',
- 'to_port': '993',
- },
- 'pop3s': {
- 'name': 'POP3S',
- 'ip_protocol': 'tcp',
- 'from_port': '995',
- 'to_port': '995',
- },
- 'ms_sql': {
- 'name': 'MS SQL',
- 'ip_protocol': 'tcp',
- 'from_port': '1433',
- 'to_port': '1433',
- },
- 'mysql': {
- 'name': 'MYSQL',
- 'ip_protocol': 'tcp',
- 'from_port': '3306',
- 'to_port': '3306',
- },
- 'rdp': {
- 'name': 'RDP',
- 'ip_protocol': 'tcp',
- 'from_port': '3389',
- 'to_port': '3389',
- },
-}
-
-# Deprecation Notice:
-#
-# The setting FLAVOR_EXTRA_KEYS has been deprecated.
-# Please load extra spec metadata into the Glance Metadata Definition Catalog.
-#
-# The sample quota definitions can be found in:
-# /etc/metadefs/compute-quota.json
-#
-# The metadata definition catalog supports CLI and API:
-# $glance --os-image-api-version 2 help md-namespace-import
-# $glance-manage db_load_metadefs
-#
-# See Metadata Definitions on: https://docs.openstack.org/glance/latest/
-
-# TODO: (david-lyle) remove when plugins support settings natively
-# Note: This is only used when the Sahara plugin is configured and enabled
-# for use in Horizon.
-# Indicate to the Sahara data processing service whether or not
-# automatic floating IP allocation is in effect. If it is not
-# in effect, the user will be prompted to choose a floating IP
-# pool for use in their cluster. False by default. You would want
-# to set this to True if you were running Nova Networking with
-# auto_assign_floating_ip = True.
-#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
-
-# The hash algorithm to use for authentication tokens. This must
-# match the hash algorithm that the identity server and the
-# auth_token middleware are using. Allowed values are the
-# algorithms supported by Python's hashlib library.
-#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
-
-# AngularJS requires some settings to be made available to
-# the client side. Some settings are required by in-tree / built-in horizon
-# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
-# form of ['SETTING_1','SETTING_2'], etc.
-#
-# You may remove settings from this list for security purposes, but do so at
-# the risk of breaking a built-in horizon feature. These settings are required
-# for horizon to function properly. Only remove them if you know what you
-# are doing. These settings may in the future be moved to be defined within
-# the enabled panel configuration.
-# You should not add settings to this list for out of tree extensions.
-# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
-REST_API_REQUIRED_SETTINGS = [
- 'CREATE_IMAGE_DEFAULTS',
- 'DEFAULT_BOOT_SOURCE',
- 'ENFORCE_PASSWORD_CHECK',
- 'LAUNCH_INSTANCE_DEFAULTS',
- 'OPENSTACK_HYPERVISOR_FEATURES',
- 'OPENSTACK_IMAGE_FORMATS',
- 'OPENSTACK_KEYSTONE_BACKEND',
- 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN',
-]
-
-# Additional settings can be made available to the client side for
-# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
-# !! Please use extreme caution as the settings are transferred via HTTP/S
-# and are not encrypted on the browser. This is an experimental API and
-# may be deprecated in the future without notice.
-#REST_API_ADDITIONAL_SETTINGS = []
-
-# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
-# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
-# Scripting (XFS) vulnerability, so this option allows extra security hardening
-# where iframes are not used in deployment. Default setting is True.
-# For more information see:
-# http://tinyurl.com/anticlickjack
-#DISALLOW_IFRAME_EMBED = True
-
-# Help URL can be made available for the client. To provide a help URL, edit the
-# following attribute to the URL of your choice.
-#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org"
-
-# Settings for OperationLogMiddleware
-# OPERATION_LOG_ENABLED is flag to use the function to log an operation on
-# Horizon.
-# mask_targets is arrangement for appointing a target to mask.
-# method_targets is arrangement of HTTP method to output log.
-# format is the log contents.
-#OPERATION_LOG_ENABLED = False
-#OPERATION_LOG_OPTIONS = {
-# 'mask_fields': ['password'],
-# 'target_methods': ['POST'],
-# 'format': ("[%(domain_name)s] [%(domain_id)s] [%(project_name)s]"
-# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]"
-# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]"
-# " [%(http_status)s] [%(param)s]"),
-#}
-
-# The default date range in the Overview panel meters - either minus N
-# days (if the value is integer N), or from the beginning of the current month
-# until today (if set to None). This setting should be used to limit the amount
-# of data fetched by default when rendering the Overview panel.
-#OVERVIEW_DAYS_RANGE = 1
-
-# To allow operators to require admin users provide a search criteria first
-# before loading any data into the admin views, set the following attribute to
-# True
-#ADMIN_FILTER_DATA_FIRST=False
-
-{% if enable_murano | bool and enable_barbican | bool %}
-KEY_MANAGER = {
- 'auth_url': '{{ keystone_internal_url }}',
- 'username': '{{ murano_keystone_user }}',
- 'user_domain_name': '{{ default_project_domain_name }}',
- 'password': '{{ murano_keystone_password }}',
- 'project_name': 'service',
- 'project_domain_name': '{{ default_project_domain_name }}'
-}
-{% endif %}
-
-# Allow operators to overwrite variables (LP: #1769970).
-from .custom_local_settings import *
diff --git a/ansible/roles/influxdb/defaults/main.yml b/ansible/roles/influxdb/defaults/main.yml
index 1f392317e1..c384e87cf0 100644
--- a/ansible/roles/influxdb/defaults/main.yml
+++ b/ansible/roles/influxdb/defaults/main.yml
@@ -26,7 +26,7 @@ influxdb_enable_tsi: True
####################
# Docker
####################
-influxdb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/influxdb"
+influxdb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}influxdb"
influxdb_tag: "{{ openstack_tag }}"
influxdb_image_full: "{{ influxdb_image }}:{{ influxdb_tag }}"
influxdb_dimensions: "{{ default_container_dimensions }}"
diff --git a/ansible/roles/influxdb/handlers/main.yml b/ansible/roles/influxdb/handlers/main.yml
index 493df20750..c395bc951b 100644
--- a/ansible/roles/influxdb/handlers/main.yml
+++ b/ansible/roles/influxdb/handlers/main.yml
@@ -4,12 +4,10 @@
service_name: "influxdb"
service: "{{ influxdb_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/influxdb/tasks/check-containers.yml b/ansible/roles/influxdb/tasks/check-containers.yml
index 8807355df8..b7e2f7c29f 100644
--- a/ansible/roles/influxdb/tasks/check-containers.yml
+++ b/ansible/roles/influxdb/tasks/check-containers.yml
@@ -1,16 +1,3 @@
---
-- name: Check influxdb containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ influxdb_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/influxdb/tasks/config.yml b/ansible/roles/influxdb/tasks/config.yml
index 841e7aeb7a..315bdd3b78 100644
--- a/ansible/roles/influxdb/tasks/config.yml
+++ b/ansible/roles/influxdb/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ influxdb_services }}"
+ with_dict: "{{ influxdb_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files
template:
@@ -18,12 +15,7 @@
dest: "{{ node_config_directory }}/influxdb/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ influxdb_services }}"
- notify:
- - Restart influxdb container
+ with_dict: "{{ influxdb_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over influxdb config file
vars:
@@ -33,12 +25,8 @@
dest: "{{ node_config_directory }}/influxdb/influxdb.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/influxdb/{{ inventory_hostname }}/influxdb.conf"
- "{{ node_custom_config }}/influxdb.conf"
- "influxdb.conf.j2"
- notify:
- - Restart influxdb container
diff --git a/ansible/roles/senlin/tasks/check.yml b/ansible/roles/influxdb/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/senlin/tasks/check.yml
rename to ansible/roles/influxdb/tasks/config_validate.yml
diff --git a/ansible/roles/influxdb/tasks/precheck.yml b/ansible/roles/influxdb/tasks/precheck.yml
index 7728a845a9..4629ccc764 100644
--- a/ansible/roles/influxdb/tasks/precheck.yml
+++ b/ansible/roles/influxdb/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- influxdb
+ check_mode: false
register: container_facts
- name: Checking free port for Influxdb Http
diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml
index ad735938bd..8f5d7bc9f2 100644
--- a/ansible/roles/ironic/defaults/main.yml
+++ b/ansible/roles/ironic/defaults/main.yml
@@ -20,7 +20,8 @@ ironic_services:
enabled: "{{ enable_ironic }}"
mode: "http"
external: true
- port: "{{ ironic_api_port }}"
+ external_fqdn: "{{ ironic_external_fqdn }}"
+ port: "{{ ironic_api_public_port }}"
listen_port: "{{ ironic_api_listen_port }}"
tls_backend: "{{ ironic_enable_tls_backend }}"
ironic-conductor:
@@ -29,7 +30,7 @@ ironic_services:
enabled: true
image: "{{ ironic_conductor_image_full }}"
privileged: True
- volumes: "{{ ironic_conductor_default_volumes + ironic_conductor_extra_volumes }}"
+ volumes: "{{ ironic_conductor_default_volumes + ironic_conductor_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ ironic_conductor_dimensions }}"
healthcheck: "{{ ironic_conductor_healthcheck }}"
ironic-inspector:
@@ -52,7 +53,8 @@ ironic_services:
enabled: "{{ enable_ironic }}"
mode: "http"
external: true
- port: "{{ ironic_inspector_port }}"
+ external_fqdn: "{{ ironic_inspector_external_fqdn }}"
+ port: "{{ ironic_inspector_public_port }}"
listen_port: "{{ ironic_inspector_listen_port }}"
ironic-tftp:
container_name: ironic_tftp
@@ -77,13 +79,27 @@ ironic_services:
ironic-dnsmasq:
container_name: ironic_dnsmasq
group: ironic-inspector
- enabled: true
+ enabled: "{{ enable_ironic_dnsmasq }}"
cap_add:
- NET_ADMIN
+ - NET_RAW
image: "{{ ironic_dnsmasq_image_full }}"
volumes: "{{ ironic_dnsmasq_default_volumes + ironic_dnsmasq_extra_volumes }}"
dimensions: "{{ ironic_dnsmasq_dimensions }}"
+ ironic-prometheus-exporter:
+ container_name: ironic_prometheus_exporter
+ group: ironic-conductor
+ enabled: "{{ enable_ironic_prometheus_exporter }}"
+ image: "{{ ironic_prometheus_exporter_image_full }}"
+ volumes: "{{ ironic_prometheus_exporter_default_volumes + ironic_prometheus_exporter_extra_volumes }}"
+ dimensions: "{{ ironic_prometheus_exporter_dimensions }}"
+####################
+# Config Validate
+####################
+ironic_config_validation:
+ - generator: "/ironic/tools/config/ironic-config-generator.conf"
+ config: "/etc/ironic/ironic.conf"
####################
# Database
@@ -100,6 +116,7 @@ ironic_inspector_database_address: "{{ database_address | put_address_in_context
# Database sharding
####################
ironic_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ ironic_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
+ironic_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
ironic_database_shard:
users:
- user: "{{ ironic_database_user }}"
@@ -118,32 +135,37 @@ ironic_database_shard:
####################
ironic_tag: "{{ openstack_tag }}"
-ironic_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ironic-api"
+ironic_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ironic-api"
ironic_api_tag: "{{ ironic_tag }}"
ironic_api_image_full: "{{ ironic_api_image }}:{{ ironic_api_tag }}"
-ironic_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ironic-conductor"
+ironic_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ironic-conductor"
ironic_conductor_tag: "{{ ironic_tag }}"
ironic_conductor_image_full: "{{ ironic_conductor_image }}:{{ ironic_conductor_tag }}"
-ironic_pxe_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ironic-pxe"
+ironic_pxe_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ironic-pxe"
ironic_pxe_tag: "{{ ironic_tag }}"
ironic_pxe_image_full: "{{ ironic_pxe_image }}:{{ ironic_pxe_tag }}"
-ironic_inspector_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ironic-inspector"
+ironic_inspector_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ironic-inspector"
ironic_inspector_tag: "{{ ironic_tag }}"
ironic_inspector_image_full: "{{ ironic_inspector_image }}:{{ ironic_inspector_tag }}"
-ironic_dnsmasq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/dnsmasq"
+ironic_dnsmasq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}dnsmasq"
ironic_dnsmasq_tag: "{{ ironic_tag }}"
ironic_dnsmasq_image_full: "{{ ironic_dnsmasq_image }}:{{ ironic_dnsmasq_tag }}"
+ironic_prometheus_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ironic-prometheus-exporter"
+ironic_prometheus_exporter_tag: "{{ ironic_tag }}"
+ironic_prometheus_exporter_image_full: "{{ ironic_prometheus_exporter_image }}:{{ ironic_prometheus_exporter_tag }}"
+
ironic_api_dimensions: "{{ default_container_dimensions }}"
ironic_conductor_dimensions: "{{ default_container_dimensions }}"
ironic_tftp_dimensions: "{{ default_container_dimensions }}"
ironic_http_dimensions: "{{ default_container_dimensions }}"
ironic_inspector_dimensions: "{{ default_container_dimensions }}"
ironic_dnsmasq_dimensions: "{{ default_container_dimensions }}"
+ironic_prometheus_exporter_dimensions: "{{ default_container_dimensions }}"
ironic_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
ironic_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
@@ -202,7 +224,7 @@ ironic_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla"
- - "{{ kolla_dev_repos_directory ~ '/ironic/ironic:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ironic' if ironic_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/ironic:/dev-mode/ironic' if ironic_dev_mode | bool else '' }}"
ironic_conductor_default_volumes:
- "{{ node_config_directory }}/ironic-conductor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -210,10 +232,11 @@ ironic_conductor_default_volumes:
- "/lib/modules:/lib/modules:ro"
- "/sys:/sys"
- "/dev:/dev"
- - "/run:/run:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "kolla_logs:/var/log/kolla"
- "ironic:/var/lib/ironic"
- - "{{ kolla_dev_repos_directory ~ '/ironic/ironic:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ironic' if ironic_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/ironic:/dev-mode/ironic' if ironic_dev_mode | bool else '' }}"
+ - "{{ 'ironic_prometheus_exporter_data:/var/lib/ironic/metrics' if enable_ironic_prometheus_exporter | bool else '' }}"
ironic_tftp_default_volumes:
- "{{ node_config_directory }}/ironic-tftp/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -232,13 +255,19 @@ ironic_inspector_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla"
- "ironic_inspector_dhcp_hosts:/var/lib/ironic-inspector/dhcp-hostsdir"
- - "{{ kolla_dev_repos_directory ~ '/ironic-inspector/ironic_inspector:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ironic_inspector' if ironic_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/ironic-inspector:/dev-mode/ironic-inspector' if ironic_inspector_dev_mode | bool else '' }}"
ironic_dnsmasq_default_volumes:
- "{{ node_config_directory }}/ironic-dnsmasq/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla"
- "ironic_inspector_dhcp_hosts:/etc/dnsmasq/dhcp-hostsdir:ro"
+ironic_prometheus_exporter_default_volumes:
+ - "{{ node_config_directory }}/ironic-prometheus-exporter/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "kolla_logs:/var/log/kolla"
+ - "ironic_prometheus_exporter_data:/var/lib/ironic/metrics"
ironic_extra_volumes: "{{ default_extra_volumes }}"
ironic_api_extra_volumes: "{{ ironic_extra_volumes }}"
@@ -247,15 +276,13 @@ ironic_tftp_extra_volumes: "{{ ironic_extra_volumes }}"
ironic_http_extra_volumes: "{{ ironic_extra_volumes }}"
ironic_inspector_extra_volumes: "{{ ironic_extra_volumes }}"
ironic_dnsmasq_extra_volumes: "{{ ironic_extra_volumes }}"
+ironic_prometheus_exporter_extra_volumes: "{{ ironic_extra_volumes }}"
####################
# OpenStack
####################
ironic_inspector_keystone_user: "ironic-inspector"
-ironic_inspector_internal_endpoint: "{{ internal_protocol }}://{{ ironic_inspector_internal_fqdn | put_address_in_context('url') }}:{{ ironic_inspector_port }}"
-ironic_inspector_public_endpoint: "{{ public_protocol }}://{{ ironic_inspector_external_fqdn | put_address_in_context('url') }}:{{ ironic_inspector_port }}"
-
ironic_logging_debug: "{{ openstack_logging_debug }}"
openstack_ironic_auth: "{{ openstack_auth }}"
@@ -276,10 +303,14 @@ ironic_dnsmasq_uefi_ipxe_boot_file: "snponly.efi"
ironic_cleaning_network:
ironic_console_serial_speed: "115200n8"
ironic_http_url: "http://{{ ironic_http_interface_address | put_address_in_context('url') }}:{{ ironic_http_port }}"
+ironic_tftp_listen_address: "{{ ironic_tftp_interface_address }}"
ironic_enable_rolling_upgrade: "yes"
ironic_upgrade_skip_wait_check: false
ironic_inspector_kernel_cmdline_extras: []
ironic_inspector_pxe_filter: "{% if enable_neutron | bool %}dnsmasq{% else %}noop{% endif %}"
+ironic_prometheus_exporter_data_dir: "/var/lib/ironic-prometheus-exporter/data"
+ironic_prometheus_exporter_sensor_data_interval: 30
+ironic_prometheus_exporter_sensor_data_undeployed_nodes: "true"
####################
@@ -289,7 +320,10 @@ ironic_inspector_git_repository: "{{ kolla_dev_repos_git }}/ironic-inspector"
ironic_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
ironic_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
ironic_dev_mode: "{{ kolla_dev_mode }}"
+ironic_inspector_dev_mode: "{{ ironic_dev_mode }}"
ironic_source_version: "{{ kolla_source_version }}"
+ironic_inspector_source_version: "{{ ironic_source_version }}"
+ironic_agent_files_directory: "{{ node_custom_config }}"
####################
@@ -329,7 +363,20 @@ ironic_ks_users:
password: "{{ ironic_inspector_keystone_password }}"
role: "admin"
+ironic_ks_user_roles:
+ - project: "service"
+ user: "{{ ironic_keystone_user }}"
+ role: "service"
+ - project: "service"
+ user: "{{ ironic_inspector_keystone_user }}"
+ role: "service"
+ - system: "all"
+ user: "{{ ironic_inspector_keystone_user }}"
+ role: "service"
+
####################
# TLS
####################
ironic_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+ironic_copy_certs: "{{ kolla_copy_ca_into_containers | bool or ironic_enable_tls_backend | bool }}"
diff --git a/ansible/roles/ironic/handlers/main.yml b/ansible/roles/ironic/handlers/main.yml
index 95cf4e7648..8fd1a5394d 100644
--- a/ansible/roles/ironic/handlers/main.yml
+++ b/ansible/roles/ironic/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "ironic-conductor"
service: "{{ ironic_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,15 +13,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ironic-api container
vars:
service_name: "ironic-api"
service: "{{ ironic_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -29,15 +27,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ironic-inspector container
vars:
service_name: "ironic-inspector"
service: "{{ ironic_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -46,15 +42,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ironic-tftp container
vars:
service_name: "ironic-tftp"
service: "{{ ironic_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -62,15 +56,13 @@
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart ironic-http container
vars:
service_name: "ironic-http"
service: "{{ ironic_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -78,15 +70,13 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ironic-dnsmasq container
vars:
service_name: "ironic-dnsmasq"
service: "{{ ironic_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -94,5 +84,16 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
cap_add: "{{ service.cap_add }}"
- when:
- - kolla_action != "config"
+
+- name: Restart ironic-prometheus-exporter container
+ vars:
+ service_name: "ironic-prometheus-exporter"
+ service: "{{ ironic_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
diff --git a/ansible/roles/ironic/tasks/bootstrap.yml b/ansible/roles/ironic/tasks/bootstrap.yml
index e1b9c164ae..1de6550ea1 100644
--- a/ansible/roles/ironic/tasks/bootstrap.yml
+++ b/ansible/roles/ironic/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Ironic database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -23,6 +24,7 @@
- name: Creating Ironic database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/ironic/tasks/bootstrap_service.yml b/ansible/roles/ironic/tasks/bootstrap_service.yml
index 318f418cf9..bb8a4e5e6e 100644
--- a/ansible/roles/ironic/tasks/bootstrap_service.yml
+++ b/ansible/roles/ironic/tasks/bootstrap_service.yml
@@ -9,7 +9,7 @@
KOLLA_UPGRADE:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -18,7 +18,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_ironic"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ ironic_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[ironic_api.group][0] }}"
@@ -28,7 +28,7 @@
vars:
ironic_inspector: "{{ ironic_services['ironic-inspector'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -39,7 +39,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_ironic_inspector"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ ironic_inspector.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[ironic_inspector.group][0] }}"
@@ -47,9 +47,9 @@
- name: Running ironic-tftp bootstrap container
vars:
- ironic_tftp: "{{ ironic_services['ironic-tftp'] }}"
+ service: "{{ ironic_services['ironic-tftp'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -58,10 +58,10 @@
HTTPBOOT_PATH: /var/lib/ironic/httpboot
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ ironic_tftp.image }}"
+ image: "{{ service.image }}"
labels:
BOOTSTRAP:
name: "bootstrap_ironic_tftp"
- restart_policy: no
- volumes: "{{ ironic_tftp.volumes }}"
- when: inventory_hostname in groups[ironic_tftp.group]
+ restart_policy: oneshot
+ volumes: "{{ service.volumes }}"
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/ironic/tasks/check-containers.yml b/ansible/roles/ironic/tasks/check-containers.yml
index b7e5fe123a..b7e2f7c29f 100644
--- a/ansible/roles/ironic/tasks/check-containers.yml
+++ b/ansible/roles/ironic/tasks/check-containers.yml
@@ -1,20 +1,3 @@
---
-- name: Check ironic containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- cap_add: "{{ item.value.cap_add | default([]) }}"
- environment: "{{ item.value.environment | default(omit) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ironic_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/ironic/tasks/clone.yml b/ansible/roles/ironic/tasks/clone.yml
index 0dc10862f1..51ca6a221b 100644
--- a/ansible/roles/ironic/tasks/clone.yml
+++ b/ansible/roles/ironic/tasks/clone.yml
@@ -13,4 +13,5 @@
repo: "{{ ironic_inspector_git_repository }}"
dest: "{{ kolla_dev_repos_directory }}/ironic-inspector"
update: "{{ ironic_dev_repos_pull }}"
- version: "{{ ironic_source_version }}"
+ version: "{{ ironic_inspector_source_version }}"
+ when: ironic_inspector_dev_mode | bool
diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml
index ee9ef5a082..682676a57e 100644
--- a/ansible/roles/ironic/tasks/config.yml
+++ b/ansible/roles/ironic/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ironic_services }}"
+ with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if Ironic policies shall be overwritten
stat:
@@ -50,9 +47,34 @@
when:
- ironic_inspector_policy.results
+- name: Check if Ironic Inspector known_devices.yaml shall be overwritten
+ stat:
+ path: "{{ node_custom_config }}/ironic-inspector/known_devices.yaml"
+ delegate_to: localhost
+ run_once: True
+ register: ironic_inspector_known_devices
+
+- name: Set known_devices file path
+ set_fact:
+ ironic_inspector_known_devices_file_path: "{{ ironic_inspector_known_devices.stat.path }}"
+ when:
+ - ironic_inspector_known_devices.stat.exists
+
+- name: Copying over known_devices.yaml
+ vars:
+ service: "{{ ironic_services['ironic-inspector'] }}"
+ template:
+ src: "{{ ironic_inspector_known_devices_file_path }}"
+ dest: "{{ node_config_directory }}/ironic-inspector/known_devices.yaml"
+ mode: "0660"
+ become: true
+ when:
+ - ironic_inspector_known_devices_file_path is defined
+ - service | service_enabled_and_mapped_to_host
+
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or ironic_enable_tls_backend | bool
+ - ironic_copy_certs
- name: Copying over config.json files for services
template:
@@ -60,12 +82,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ironic_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over ironic.conf
vars:
@@ -81,12 +98,8 @@
mode: "0660"
become: true
when:
- - item.key in [ "ironic-api", "ironic-conductor" ]
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ironic_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ - item.key in [ "ironic-api", "ironic-conductor", "ironic-prometheus-exporter" ]
+ with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over inspector.conf
vars:
@@ -101,11 +114,7 @@
dest: "{{ node_config_directory }}/ironic-inspector/inspector.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart ironic-inspector container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over dnsmasq.conf
vars:
@@ -115,15 +124,11 @@
dest: "{{ node_config_directory }}/ironic-dnsmasq/dnsmasq.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/ironic/ironic-dnsmasq.conf"
- "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/ironic-dnsmasq.conf"
- "ironic-dnsmasq.conf.j2"
- notify:
- - Restart ironic-dnsmasq container
- name: Copying pxelinux.cfg default
vars:
@@ -140,37 +145,14 @@
when:
# Only required when Ironic inspector is in use.
- groups['ironic-inspector'] | length > 0
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- - not enable_ironic_pxe_uefi | bool
+ - service | service_enabled_and_mapped_to_host
- not ironic_dnsmasq_serve_ipxe | bool
- notify:
- - Restart ironic-tftp container
-
-- name: Copying ironic_pxe_uefi.cfg default
- vars:
- service: "{{ ironic_services['ironic-tftp'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/ironic-tftp/default"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/ironic/ironic_pxe_uefi.default"
- - "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/ironic_pxe_uefi.default"
- - "ironic_pxe_uefi.default.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- - enable_ironic_pxe_uefi | bool
- notify:
- - Restart ironic-tftp container
- name: Copying ironic-agent kernel and initramfs (PXE)
vars:
service: "{{ ironic_services['ironic-tftp'] }}"
copy:
- src: "{{ node_custom_config }}/ironic/{{ item }}"
+ src: "{{ ironic_agent_files_directory }}/ironic/{{ item }}"
dest: "{{ node_config_directory }}/ironic-tftp/{{ item }}"
mode: "0660"
become: true
@@ -180,18 +162,14 @@
when:
# Only required when Ironic inspector is in use.
- groups['ironic-inspector'] | length > 0
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- - not enable_ironic_pxe_uefi | bool
+ - service | service_enabled_and_mapped_to_host
- not ironic_dnsmasq_serve_ipxe | bool
- notify:
- - Restart ironic-tftp container
- name: Copying ironic-agent kernel and initramfs (iPXE)
vars:
service: "{{ ironic_services['ironic-http'] }}"
copy:
- src: "{{ node_custom_config }}/ironic/{{ item }}"
+ src: "{{ ironic_agent_files_directory }}/ironic/{{ item }}"
dest: "{{ node_config_directory }}/ironic-http/{{ item }}"
mode: "0660"
become: true
@@ -201,10 +179,7 @@
when:
# Only required when Ironic inspector is in use.
- groups['ironic-inspector'] | length > 0
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart ironic-http container
+ - service | service_enabled_and_mapped_to_host
- name: Copying inspector.ipxe
vars:
@@ -221,10 +196,7 @@
when:
# Only required when Ironic inspector is in use.
- groups['ironic-inspector'] | length > 0
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart ironic-http container
+ - service | service_enabled_and_mapped_to_host
- name: Copying ironic-http-httpd.conf
vars:
@@ -237,11 +209,21 @@
with_first_found:
- "{{ node_custom_config }}/ironic/ironic-http-httpd.conf"
- "ironic-http-httpd.conf.j2"
- when:
- - service.enabled | bool
- - inventory_hostname in groups[service.group]
- notify:
- - Restart ironic-http container
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copying over ironic-prometheus-exporter-wsgi.conf
+ vars:
+ service: "{{ ironic_services['ironic-prometheus-exporter'] }}"
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/ironic-prometheus-exporter/ironic-prometheus-exporter-wsgi.conf"
+ mode: "0660"
+ become: true
+ with_first_found:
+ - "{{ node_config_directory }}/ironic/{{ inventory_hostname }}/ironic-prometheus-exporter-wsgi.conf"
+ - "{{ node_config_directory }}/ironic/ironic-prometheus-exporter-wsgi.conf"
+ - "ironic-prometheus-exporter-wsgi.conf.j2"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over existing Ironic policy file
vars:
@@ -256,11 +238,7 @@
when:
- ironic_policy_file is defined
- item.key in services_require_policy_json
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ironic_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing Ironic Inspector policy file
vars:
@@ -274,20 +252,14 @@
when:
- ironic_inspector_policy_file is defined
- item.key in services_require_inspector_policy_json
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ironic_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ironic_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over ironic-api-wsgi.conf
+ vars:
+ service: "{{ ironic_services['ironic-api'] }}"
template:
src: "ironic-api-wsgi.conf.j2"
dest: "{{ node_config_directory }}/ironic-api/ironic-api-wsgi.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups["ironic-api"]
- - ironic_services["ironic-api"].enabled | bool
- notify:
- - "Restart ironic-api container"
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/ironic/tasks/config_validate.yml b/ansible/roles/ironic/tasks/config_validate.yml
new file mode 100644
index 0000000000..7b1806f346
--- /dev/null
+++ b/ansible/roles/ironic/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ ironic_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ ironic_config_validation }}"
diff --git a/ansible/roles/ironic/tasks/precheck.yml b/ansible/roles/ironic/tasks/precheck.yml
index 005b16f753..03d275d2d3 100644
--- a/ansible/roles/ironic/tasks/precheck.yml
+++ b/ansible/roles/ironic/tasks/precheck.yml
@@ -8,12 +8,13 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- ironic_api
- ironic_inspector
- # TODO(yoctozepto): Remove ironic_ipxe entry in Zed.
- - ironic_ipxe
- ironic_http
+ check_mode: false
register: container_facts
- name: Checking free port for Ironic API
@@ -46,14 +47,24 @@
timeout: 1
state: stopped
when:
- # TODO(yoctozepto): Remove ironic_ipxe entry in Zed.
- - container_facts['ironic_ipxe'] is not defined
- container_facts['ironic_http'] is not defined
- inventory_hostname in groups['ironic-http']
+- name: Checking free port for Ironic Prometheus Exporter
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ ironic_prometheus_exporter_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - enable_ironic_prometheus_exporter | bool
+ - container_facts['ironic_prometheus_exporter'] is not defined
+ - inventory_hostname in groups['ironic-conductor']
+
- name: Checking ironic-agent files exist for Ironic Inspector
stat:
- path: "{{ node_custom_config }}/ironic/{{ item }}"
+ path: "{{ ironic_agent_files_directory }}/ironic/{{ item }}"
delegate_to: localhost
run_once: True
register: result
@@ -63,7 +74,6 @@
- groups['ironic-inspector'] | length > 0
- (not ironic_dnsmasq_serve_ipxe | bool and inventory_hostname in groups['ironic-tftp']) or
(ironic_dnsmasq_serve_ipxe | bool and inventory_hostname in groups['ironic-http'])
- - not enable_ironic_pxe_uefi | bool
with_items:
- "ironic-agent.kernel"
- "ironic-agent.initramfs"
@@ -74,4 +84,6 @@
ironic_dnsmasq_dhcp_ranges must be a list
connection: local
run_once: True
- when: not ironic_dnsmasq_dhcp_ranges is sequence
+ when:
+ - enable_ironic_dnsmasq | bool
+ - not ironic_dnsmasq_dhcp_ranges is sequence
diff --git a/ansible/roles/ironic/tasks/register.yml b/ansible/roles/ironic/tasks/register.yml
index 5d19d89b99..c101c8d731 100644
--- a/ansible/roles/ironic/tasks/register.yml
+++ b/ansible/roles/ironic/tasks/register.yml
@@ -5,3 +5,4 @@
service_ks_register_auth: "{{ openstack_ironic_auth }}"
service_ks_register_services: "{{ ironic_ks_services }}"
service_ks_register_users: "{{ ironic_ks_users }}"
+ service_ks_register_user_roles: "{{ ironic_ks_user_roles }}"
diff --git a/ansible/roles/ironic/tasks/rolling_upgrade.yml b/ansible/roles/ironic/tasks/rolling_upgrade.yml
index ad39fba27a..d3577e4122 100644
--- a/ansible/roles/ironic/tasks/rolling_upgrade.yml
+++ b/ansible/roles/ironic/tasks/rolling_upgrade.yml
@@ -31,7 +31,7 @@
vars:
ironic_api: "{{ ironic_services['ironic-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -42,7 +42,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_ironic"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ ironic_api.volumes }}"
run_once: True
delegate_to: "{{ groups[ironic_api.group][0] }}"
diff --git a/ansible/roles/ironic/tasks/upgrade.yml b/ansible/roles/ironic/tasks/upgrade.yml
index a88d565ae4..8d8094b323 100644
--- a/ansible/roles/ironic/tasks/upgrade.yml
+++ b/ansible/roles/ironic/tasks/upgrade.yml
@@ -9,7 +9,7 @@
--os-password {{ openstack_auth.password }}
--os-identity-api-version 3
--os-user-domain-name {{ openstack_auth.user_domain_name }}
- --os-system-scope {{ openstack_auth.system_scope }}
+ --os-system-scope "all"
--os-region-name {{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
baremetal node list --format json --column "Provisioning State"
@@ -27,33 +27,15 @@
run_once: true
when: not ironic_upgrade_skip_wait_check | bool
-# TODO(yoctozepto): Remove this task in Zed.
-- name: Remove old Ironic containers
- become: true
- kolla_docker:
- action: "stop_and_remove_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item }}"
- with_items:
- # NOTE(yoctozepto): Removing conductor to avoid it
- # thinking that the tftp and http servers are available.
- - ironic_conductor
- - ironic_pxe
- - ironic_ipxe
-
- include_tasks: rolling_upgrade.yml
when: ironic_enable_rolling_upgrade | bool
- include_tasks: legacy_upgrade.yml
when: not ironic_enable_rolling_upgrade | bool
-# TODO(yoctozepto): Remove this task in Zed.
-- name: Remove old Ironic volumes
- become: true
- kolla_docker:
- action: "remove_volume"
- common_options: "{{ docker_common_options }}"
- name: "{{ item }}"
- with_items:
- - ironic_pxe
- - ironic_ipxe
+# TODO(bbezak): Remove this task in the Dalmatian cycle.
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ openstack_ironic_auth }}"
+ service_ks_register_user_roles: "{{ ironic_ks_user_roles }}"
diff --git a/ansible/roles/ironic/templates/ironic-conductor.json.j2 b/ansible/roles/ironic/templates/ironic-conductor.json.j2
index f5a0477992..06ff789dff 100644
--- a/ansible/roles/ironic/templates/ironic-conductor.json.j2
+++ b/ansible/roles/ironic/templates/ironic-conductor.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/ironic/{{ ironic_policy_file }}",
"owner": "ironic",
"perm": "0600"
+ }{% endif %}{% if ironic_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
@@ -24,6 +30,11 @@
"path": "/var/lib/ironic",
"owner": "ironic:ironic",
"recurse": true
- }
+ }{% if enable_ironic_prometheus_exporter | bool %},
+ {
+ "path": "/var/lib/ironic/metrics",
+ "owner": "ironic:ironic",
+ "recurse": true
+ }{% endif %}
]
}
diff --git a/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2 b/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2
index f7eb73eb63..aa55625e93 100644
--- a/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2
+++ b/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2
@@ -9,9 +9,8 @@ bind-interfaces
{% set tag = item.tag | default('range_' ~ loop.index) %}
{% set lease_time = item.lease_time | default(ironic_dnsmasq_dhcp_default_lease_time) %}
dhcp-range=set:{{ tag }},{{ item.range }},{{ lease_time }}
-{% if item.routers is defined %}
-dhcp-option=tag:{{ tag }},option:router,{{ item.routers }}
-{% endif %}
+{% if item.routers is defined %}dhcp-option=tag:{{ tag }},option:router,{{ item.routers }}{% endif %}
+{% if item.ntp_server is defined %}dhcp-option=tag:{{ tag }},option:ntp-server,{{ item.ntp_server }}{% endif %}
{% endfor %}
{% if api_address_family == 'ipv6' %}
diff --git a/ansible/roles/ironic/templates/ironic-dnsmasq.json.j2 b/ansible/roles/ironic/templates/ironic-dnsmasq.json.j2
index baab505285..dcca1843d3 100644
--- a/ansible/roles/ironic/templates/ironic-dnsmasq.json.j2
+++ b/ansible/roles/ironic/templates/ironic-dnsmasq.json.j2
@@ -6,6 +6,12 @@
"dest": "/etc/dnsmasq.conf",
"owner": "root",
"perm": "0600"
- }
+ }{% if ironic_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/ironic/templates/ironic-http.json.j2 b/ansible/roles/ironic/templates/ironic-http.json.j2
index 8f1b7d08d5..670d45f1d3 100644
--- a/ansible/roles/ironic/templates/ironic-http.json.j2
+++ b/ansible/roles/ironic/templates/ironic-http.json.j2
@@ -28,6 +28,12 @@
"dest": "/etc/{{ apache_conf_dir }}/httpboot.conf",
"owner": "root",
"perm": "0644"
- }
+ }{% if ironic_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/ironic/templates/ironic-inspector.conf.j2 b/ansible/roles/ironic/templates/ironic-inspector.conf.j2
index 7675784efa..7c93c975cd 100644
--- a/ansible/roles/ironic/templates/ironic-inspector.conf.j2
+++ b/ansible/roles/ironic/templates/ironic-inspector.conf.j2
@@ -12,24 +12,30 @@ transport_url = {{ rpc_transport_url }}
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
[ironic]
{% if ironic_enable_keystone_integration | bool %}
auth_url = {{ keystone_internal_url }}
auth_type = password
-project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
-project_name = service
username = {{ ironic_inspector_keystone_user }}
password = {{ ironic_inspector_keystone_password }}
-os_endpoint_type = internalURL
+valid_interfaces = internal
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
+system_scope = all
{% else %}
auth_type = none
endpoint_override = {{ ironic_internal_endpoint }}
@@ -49,7 +55,7 @@ password = {{ ironic_inspector_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
{% endif %}
@@ -79,9 +85,15 @@ dnsmasq_interface = {{ ironic_dnsmasq_interface }}
{% if ironic_coordination_backend == 'redis' %}
backend_url = {{ redis_connection_string }}
{% elif ironic_coordination_backend == 'etcd' %}
-# NOTE(yoctozepto): etcd-compatible tooz drivers do not support multiple endpoints here (verified in Stein, Train)
# NOTE(yoctozepto): we must use etcd3gw (aka etcd3+http) due to issues with alternative (etcd3) and eventlet (as used by cinder)
# see https://bugs.launchpad.net/kolla-ansible/+bug/1854932
# and https://review.opendev.org/466098 for details
-backend_url = etcd3+{{ etcd_protocol }}://{{ 'api' | kolla_address(groups['etcd'][0]) | put_address_in_context('url') }}:{{ etcd_client_port }}
+# NOTE(jan.gutter): etcd v3.4 removed the default `v3alpha` api_version. Until
+# tooz defaults to a newer version, we should explicitly specify `v3`
+backend_url = etcd3+{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ etcd_client_port }}?api_version=v3{% if openstack_cacert %}?ca_cert={{ openstack_cacert }}{% endif %}
+{% endif %}
+
+{% if ironic_inspector_known_devices_file_path is defined %}
+[accelerators]
+known_devices = /etc/ironic-inspector/known_devices.yaml
{% endif %}
diff --git a/ansible/roles/ironic/templates/ironic-inspector.json.j2 b/ansible/roles/ironic/templates/ironic-inspector.json.j2
index 6047e14c3d..3282698f42 100644
--- a/ansible/roles/ironic/templates/ironic-inspector.json.j2
+++ b/ansible/roles/ironic/templates/ironic-inspector.json.j2
@@ -12,6 +12,17 @@
"dest": "/etc/ironic-inspector/{{ ironic_inspector_policy_file }}",
"owner": "ironic-inspector",
"perm": "0600"
+ }{% endif %}{% if ironic_inspector_known_devices_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/known_devices.yaml",
+ "dest": "/etc/ironic-inspector/known_devices.yaml",
+ "owner": "ironic-inspector",
+ }{% endif %}{% if ironic_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
]
}
diff --git a/ansible/roles/ironic/templates/ironic-prometheus-exporter-wsgi.conf.j2 b/ansible/roles/ironic/templates/ironic-prometheus-exporter-wsgi.conf.j2
new file mode 100644
index 0000000000..ace7fd9a1d
--- /dev/null
+++ b/ansible/roles/ironic/templates/ironic-prometheus-exporter-wsgi.conf.j2
@@ -0,0 +1,38 @@
+{% set ironic_log_dir = '/var/log/kolla/ironic' %}
+{% set python_path = '/var/lib/kolla/venv/lib/python' + distro_python_version + '/site-packages' %}
+Listen {{ api_interface_address | put_address_in_context('url') }}:{{ ironic_prometheus_exporter_port }}
+
+ServerSignature Off
+ServerTokens Prod
+TraceEnable off
+
+
+
+ AllowOverride None
+ Options None
+ Require all granted
+
+
+
+ErrorLog "{{ ironic_log_dir }}/apache-error.log"
+
+ CustomLog "{{ ironic_log_dir }}/apache-access.log" common
+
+
+{% if ironic_logging_debug | bool %}
+LogLevel info
+{% endif %}
+
+
+ ErrorLog "{{ ironic_log_dir }}/ironic-prometheus-exporter-wsgi-error.log"
+ LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
+ CustomLog "{{ ironic_log_dir }}/ironic-prometheus-exporter-wsgi-access.log" logformat
+
+ WSGIDaemonProcess ironic-prometheus-exporter processes={{ openstack_service_workers }} threads=1 user=ironic display-name=%{GROUP} python-path={{ python_path }}
+ WSGIProcessGroup ironic-prometheus-exporter
+ WSGIScriptAlias / {{ python_path }}/ironic_prometheus_exporter/app/wsgi.py
+ WSGIApplicationGroup %{GLOBAL}
+
+ Require all granted
+
+
diff --git a/ansible/roles/ironic/templates/ironic-prometheus-exporter.json.j2 b/ansible/roles/ironic/templates/ironic-prometheus-exporter.json.j2
new file mode 100644
index 0000000000..5099c23240
--- /dev/null
+++ b/ansible/roles/ironic/templates/ironic-prometheus-exporter.json.j2
@@ -0,0 +1,37 @@
+{% set ironic_prometheus_exporter_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
+{% set ironic_prometheus_exporter_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
+{
+ "command": "/usr/sbin/{{ ironic_prometheus_exporter_cmd }} -DFOREGROUND",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/ironic-prometheus-exporter-wsgi.conf",
+ "dest": "/etc/{{ ironic_prometheus_exporter_dir }}/ironic-prometheus-exporter-wsgi.conf",
+ "owner": "ironic",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/ironic.conf",
+ "dest": "/etc/ironic/ironic.conf",
+ "owner": "ironic",
+ "perm": "0600"
+ }{% if ironic_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/ironic",
+ "owner": "ironic:ironic",
+ "recurse": true
+ },
+ {
+ "path": "/var/lib/ironic/metrics",
+ "owner": "ironic:ironic",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/ironic/templates/ironic-tftp.json.j2 b/ansible/roles/ironic/templates/ironic-tftp.json.j2
index 28b394e0cf..8526aea56b 100644
--- a/ansible/roles/ironic/templates/ironic-tftp.json.j2
+++ b/ansible/roles/ironic/templates/ironic-tftp.json.j2
@@ -1,20 +1,10 @@
-{% if enable_ironic_pxe_uefi | bool %}
- {% if kolla_base_distro in ['debian', 'ubuntu'] %}
- {% set pxe_dir = '/var/lib/ironic/tftpboot/grub' %}
- {% elif kolla_base_distro in ['centos', 'rocky'] %}
- {% set pxe_dir = '/var/lib/ironic/tftpboot/EFI/{{ kolla_base_distro }}' %}
- {% endif %}
-{% else %}
- {% set pxe_dir = '/var/lib/ironic/tftpboot/pxelinux.cfg' %}
-{% endif %}
-
-{% set pxe_cfg = 'grub.cfg' if enable_ironic_pxe_uefi | bool else 'default' %}
+{% set pxe_dir = '/var/lib/ironic/tftpboot/pxelinux.cfg' %}
+{% set pxe_cfg = 'default' %}
{
- "command": "/usr/sbin/in.tftpd --verbose --foreground --user nobody --address 0.0.0.0:69 --map-file /map-file /var/lib/ironic/tftpboot",
+ "command": "/usr/sbin/in.tftpd --verbose --foreground --user nobody --address {{ ironic_tftp_listen_address }}:69 --map-file /map-file /var/lib/ironic/tftpboot",
"config_files": [
{% if not ironic_dnsmasq_serve_ipxe | bool and groups['ironic-inspector'] | length > 0 %}
-{% if not enable_ironic_pxe_uefi | bool %}
{
"source": "{{ container_config_directory }}/ironic-agent.kernel",
"dest": "/var/lib/ironic/tftpboot/ironic-agent.kernel",
@@ -27,7 +17,6 @@
"owner": "root",
"perm": "0644"
},
-{% endif %}
{
"source": "{{ container_config_directory }}/default",
"dest": "{{ pxe_dir }}/{{ pxe_cfg }}",
diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2
index 6bf0cd18f2..66e0774c1a 100644
--- a/ansible/roles/ironic/templates/ironic.conf.j2
+++ b/ansible/roles/ironic/templates/ironic.conf.j2
@@ -18,20 +18,37 @@ my_ip = {{ api_interface_address }}
notification_level = info
{% endif %}
+rbac_service_role_elevated_access = True
+
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
+{% if ironic_enabled_notification_topics or enable_ironic_prometheus_exporter | bool %}
{% if ironic_enabled_notification_topics %}
driver = messagingv2
topics = {{ ironic_enabled_notification_topics | map(attribute='name') | join(',') }}
+{% endif %}
+{% if enable_ironic_prometheus_exporter | bool %}
+driver = prometheus_exporter
+{% endif %}
{% else %}
driver = noop
{% endif %}
+{% if enable_ironic_prometheus_exporter | bool %}
+location = /var/lib/ironic/metrics
+{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'ironic-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if ironic_policy_file is defined %}
[oslo_policy]
@@ -41,6 +58,11 @@ policy_file = {{ ironic_policy_file }}
{% if service_name == 'ironic-conductor' %}
[conductor]
automated_clean=false
+{% if enable_ironic_prometheus_exporter | bool %}
+send_sensor_data = true
+send_sensor_data_for_undeployed_nodes = {{ ironic_prometheus_exporter_sensor_data_undeployed_nodes }}
+send_sensor_data_interval = {{ ironic_prometheus_exporter_sensor_data_interval }}
+{% endif %}
{% endif %}
[database]
@@ -64,7 +86,7 @@ region_name = {{ openstack_region_name }}
valid_interfaces = internal
cafile = {{ openstack_cacert }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
{% endif %}
diff --git a/ansible/roles/iscsi/defaults/main.yml b/ansible/roles/iscsi/defaults/main.yml
index ceee34eff5..8067c794ab 100644
--- a/ansible/roles/iscsi/defaults/main.yml
+++ b/ansible/roles/iscsi/defaults/main.yml
@@ -7,7 +7,7 @@ iscsi_services:
image: "{{ iscsid_image_full }}"
ipc_mode: "host"
privileged: True
- volumes: "{{ iscsid_default_volumes + iscsid_extra_volumes }}"
+ volumes: "{{ iscsid_default_volumes + iscsid_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ iscsid_dimensions }}"
tgtd:
container_name: tgtd
@@ -16,20 +16,21 @@ iscsi_services:
image: "{{ tgtd_image_full }}"
ipc_mode: "host"
privileged: True
- volumes: "{{ tgtd_default_volumes + tgtd_extra_volumes }}"
+ volumes: "{{ tgtd_default_volumes + tgtd_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ tgtd_dimensions }}"
+tgtd_interface_address: "{{ api_interface_address }}"
####################
# Docker
####################
iscsi_tag: "{{ openstack_tag }}"
-iscsid_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/iscsid"
+iscsid_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}iscsid"
iscsid_tag: "{{ iscsi_tag }}"
iscsid_image_full: "{{ iscsid_image }}:{{ iscsid_tag }}"
-tgtd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/tgtd"
+tgtd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}tgtd"
tgtd_tag: "{{ iscsi_tag }}"
tgtd_image_full: "{{ tgtd_image }}:{{ tgtd_tag }}"
@@ -42,7 +43,7 @@ iscsid_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/dev/:/dev/"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/sys/kernel/config:/configfs"
- "iscsi_info:/etc/iscsi"
@@ -52,7 +53,7 @@ tgtd_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/dev/:/dev/"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/sys/kernel/config:/configfs"
iscsid_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/iscsi/handlers/main.yml b/ansible/roles/iscsi/handlers/main.yml
index 5efd8cc82e..2b03fdf20b 100644
--- a/ansible/roles/iscsi/handlers/main.yml
+++ b/ansible/roles/iscsi/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "iscsid"
service: "{{ iscsi_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,15 +13,13 @@
privileged: "{{ service.privileged }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart tgtd container
vars:
service_name: "tgtd"
service: "{{ iscsi_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -30,5 +28,3 @@
privileged: "{{ service.privileged }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/iscsi/tasks/check-containers.yml b/ansible/roles/iscsi/tasks/check-containers.yml
index ed85f5062f..b7e2f7c29f 100644
--- a/ansible/roles/iscsi/tasks/check-containers.yml
+++ b/ansible/roles/iscsi/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check iscsi containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- ipc_mode: "{{ item.value.ipc_mode }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ iscsi_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/iscsi/tasks/config.yml b/ansible/roles/iscsi/tasks/config.yml
index 1c9da9dfc0..a47ddfbaf6 100644
--- a/ansible/roles/iscsi/tasks/config.yml
+++ b/ansible/roles/iscsi/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ iscsi_services }}"
+ with_dict: "{{ iscsi_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
template:
@@ -18,9 +15,4 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ iscsi_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ iscsi_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/skydive/tasks/check.yml b/ansible/roles/iscsi/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/skydive/tasks/check.yml
rename to ansible/roles/iscsi/tasks/config_validate.yml
diff --git a/ansible/roles/iscsi/tasks/precheck.yml b/ansible/roles/iscsi/tasks/precheck.yml
index 858ce7dbd2..bd72b1da43 100644
--- a/ansible/roles/iscsi/tasks/precheck.yml
+++ b/ansible/roles/iscsi/tasks/precheck.yml
@@ -8,13 +8,16 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- iscsid
+ check_mode: false
register: container_facts
- name: Checking free port for iscsi
wait_for:
- host: "{{ api_interface_address }}"
+ host: "{{ tgtd_interface_address }}"
port: "{{ iscsi_port }}"
connect_timeout: 1
timeout: 1
@@ -25,9 +28,9 @@
- iscsi_services.iscsid.enabled | bool
- name: Check supported platforms for tgtd
- fail:
- msg:
- The SCSI target daemon tgtd is not supported on CentOS/RHEL 8 and later
+ assert:
+ that: ansible_facts.os_family != 'RedHat'
+ fail_msg: >
+ The SCSI target daemon tgtd is not supported on CentOS/RHEL
when:
- - ansible_facts.os_family == 'RedHat'
- enable_tgtd | bool
diff --git a/ansible/roles/iscsi/templates/tgtd.json.j2 b/ansible/roles/iscsi/templates/tgtd.json.j2
index 0501c52fbd..cb9032bffe 100644
--- a/ansible/roles/iscsi/templates/tgtd.json.j2
+++ b/ansible/roles/iscsi/templates/tgtd.json.j2
@@ -1,4 +1,4 @@
{
- "command": "tgtd -d 1 -f --iscsi portal={{ api_interface_address | put_address_in_context('url') }}:{{ iscsi_port }}",
+ "command": "tgtd -d 1 -f --iscsi portal={{ tgtd_interface_address | put_address_in_context('url') }}:{{ iscsi_port }}",
"config_files": []
}
diff --git a/ansible/roles/kafka/defaults/main.yml b/ansible/roles/kafka/defaults/main.yml
deleted file mode 100644
index 5a2195bbb3..0000000000
--- a/ansible/roles/kafka/defaults/main.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-kafka_services:
- kafka:
- container_name: kafka
- group: kafka
- enabled: true
- image: "{{ kafka_image_full }}"
- environment:
- LOG_DIR: "{{ kafka_log_dir }}"
- KAFKA_HEAP_OPTS: "{{ kafka_heap_opts }}"
- volumes: "{{ kafka_default_volumes + kafka_extra_volumes }}"
- dimensions: "{{ kafka_dimensions }}"
- healthcheck: "{{ kafka_healthcheck }}"
-
-
-####################
-# Kafka
-####################
-kafka_cluster_name: "kolla_kafka"
-kafka_log_dir: "/var/log/kolla/kafka"
-kafka_heap_opts: "-Xmx1G -Xms1G"
-kafka_zookeeper: "{% for host in groups['zookeeper'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ zookeeper_client_port }}{% if not loop.last %},{% endif %}{% endfor %}"
-
-####################
-# Docker
-####################
-kafka_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/kafka"
-kafka_tag: "{{ openstack_tag }}"
-kafka_image_full: "{{ kafka_image }}:{{ kafka_tag }}"
-kafka_dimensions: "{{ default_container_dimensions }}"
-
-kafka_enable_healthchecks: "{{ enable_container_healthchecks }}"
-kafka_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-kafka_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-kafka_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-kafka_healthcheck_test: ["CMD-SHELL", "healthcheck_listen java {{ kafka_port }}"]
-kafka_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-kafka_healthcheck:
- interval: "{{ kafka_healthcheck_interval }}"
- retries: "{{ kafka_healthcheck_retries }}"
- start_period: "{{ kafka_healthcheck_start_period }}"
- test: "{% if kafka_enable_healthchecks | bool %}{{ kafka_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ kafka_healthcheck_timeout }}"
-
-kafka_default_volumes:
- - "{{ node_config_directory }}/kafka/:{{ container_config_directory }}/"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kafka_datadir_volume }}:/var/lib/kafka/data"
- - "kolla_logs:/var/log/kolla/"
-kafka_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/kafka/handlers/main.yml b/ansible/roles/kafka/handlers/main.yml
deleted file mode 100644
index 132de1e048..0000000000
--- a/ansible/roles/kafka/handlers/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Restart kafka container
- vars:
- service_name: "kafka"
- service: "{{ kafka_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/kafka/tasks/check-containers.yml b/ansible/roles/kafka/tasks/check-containers.yml
deleted file mode 100644
index f383d0813f..0000000000
--- a/ansible/roles/kafka/tasks/check-containers.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Check kafka containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- environment: "{{ item.value.environment }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kafka_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/kafka/tasks/config.yml b/ansible/roles/kafka/tasks/config.yml
deleted file mode 100644
index 5338a89e98..0000000000
--- a/ansible/roles/kafka/tasks/config.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kafka_services }}"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kafka_services }}"
- notify:
- - Restart kafka container
-
-- name: Copying over kafka config
- merge_configs:
- sources:
- - "{{ role_path }}/templates/kafka.server.properties.j2"
- - "{{ node_custom_config }}/kafka.server.properties"
- - "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/kafka.server.properties"
- dest: "{{ node_config_directory }}/{{ item.key }}/kafka.server.properties"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kafka_services }}"
- notify:
- - Restart kafka container
diff --git a/ansible/roles/kafka/tasks/precheck.yml b/ansible/roles/kafka/tasks/precheck.yml
deleted file mode 100644
index a6f76ca84b..0000000000
--- a/ansible/roles/kafka/tasks/precheck.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ kafka_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - kafka
- register: container_facts
-
-- name: Checking free port for Kafka
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ kafka_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['kafka'] is not defined
- - inventory_hostname in groups['kafka']
diff --git a/ansible/roles/kafka/tasks/stop.yml b/ansible/roles/kafka/tasks/stop.yml
deleted file mode 100644
index 2ede1b9cb2..0000000000
--- a/ansible/roles/kafka/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ kafka_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/kafka/templates/kafka.json.j2 b/ansible/roles/kafka/templates/kafka.json.j2
deleted file mode 100644
index 7f83d55595..0000000000
--- a/ansible/roles/kafka/templates/kafka.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/opt/kafka/bin/kafka-server-start.sh /etc/kafka/kafka.server.properties",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/kafka.server.properties",
- "dest": "/etc/kafka/kafka.server.properties",
- "owner": "kafka",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/kafka",
- "owner": "kafka:kafka",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/kafka",
- "owner": "kafka:kafka",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/kafka/templates/kafka.server.properties.j2 b/ansible/roles/kafka/templates/kafka.server.properties.j2
deleted file mode 100644
index 944a7088a5..0000000000
--- a/ansible/roles/kafka/templates/kafka.server.properties.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-listeners=PLAINTEXT://{{ api_interface_address | put_address_in_context('url') }}:{{ kafka_port }}
-controlled.shutdown.enable=true
-auto.leader.rebalance.enable=true
-num.network.threads=3
-num.io.threads=8
-socket.send.buffer.bytes=102400
-socket.receive.buffer.bytes=102400
-socket.request.max.bytes=104857600
-log.dirs=/var/lib/kafka/data
-min.insync.replicas={{ kafka_broker_count if kafka_broker_count|int < 3 else 2 }}
-default.replication.factor={{ kafka_broker_count if kafka_broker_count|int < 3 else 3 }}
-num.partitions=30
-num.recovery.threads.per.data.dir=1
-offsets.topic.replication.factor={{ kafka_broker_count if kafka_broker_count|int < 3 else 3 }}
-transaction.state.log.replication.factor={{ kafka_broker_count if kafka_broker_count|int < 3 else 3 }}
-transaction.state.log.min.isr={{ kafka_broker_count if kafka_broker_count|int < 3 else 2 }}
-log.retention.hours=168
-log.segment.bytes=1073741824
-log.retention.check.interval.ms=300000
-zookeeper.connect={{ kafka_zookeeper }}
-zookeeper.connection.timeout.ms=6000
-{% if enable_monasca | bool %}
-log.message.format.version=0.9.0.0
-connections.max.idle.ms=31540000000
-{% endif %}
diff --git a/ansible/roles/kafka/vars/main.yml b/ansible/roles/kafka/vars/main.yml
deleted file mode 100644
index be887f0f73..0000000000
--- a/ansible/roles/kafka/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "kafka"
diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml
index d0710e308d..c3266e1773 100644
--- a/ansible/roles/keystone/defaults/main.yml
+++ b/ansible/roles/keystone/defaults/main.yml
@@ -14,26 +14,20 @@ keystone_services:
mode: "http"
external: false
tls_backend: "{{ keystone_enable_tls_backend }}"
- port: "{{ keystone_public_port }}"
- listen_port: "{{ keystone_public_listen_port }}"
- backend_http_extra: "{{ ['balance source'] if enable_keystone_federation | bool else [] }}"
+ port: "{{ keystone_internal_port }}"
+ listen_port: "{{ keystone_internal_listen_port }}"
+ backend_http_extra:
+ - "balance {{ 'source' if enable_keystone_federation | bool else 'roundrobin' }}"
keystone_external:
enabled: "{{ enable_keystone }}"
mode: "http"
external: true
+ external_fqdn: "{{ keystone_external_fqdn }}"
tls_backend: "{{ keystone_enable_tls_backend }}"
port: "{{ keystone_public_port }}"
listen_port: "{{ keystone_public_listen_port }}"
- backend_http_extra: "{{ ['balance source'] if enable_keystone_federation | bool else [] }}"
- # NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
- # TODO(yoctozepto): Remove after Zed.
- keystone_admin:
- enabled: "{{ enable_keystone and kolla_action == 'upgrade' }}"
- mode: "http"
- external: false
- tls_backend: "{{ keystone_enable_tls_backend }}"
- port: "{{ keystone_admin_port }}"
- listen_port: "{{ keystone_admin_listen_port }}"
+ backend_http_extra:
+ - "balance {{ 'source' if enable_keystone_federation | bool else 'roundrobin' }}"
keystone-ssh:
container_name: "keystone_ssh"
group: "keystone"
@@ -61,6 +55,13 @@ keystone_services:
dimensions: "{{ keystone_fernet_dimensions }}"
healthcheck: "{{ keystone_fernet_healthcheck }}"
+####################
+# Config Validate
+####################
+keystone_config_validation:
+ - generator: "/keystone/config-generator/keystone.conf"
+ config: "/etc/keystone/keystone.conf"
+
####################
# Database
####################
@@ -94,15 +95,15 @@ keystone_groupname: "keystone"
####################
keystone_tag: "{{ openstack_tag }}"
-keystone_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/keystone"
+keystone_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}keystone"
keystone_service_tag: "{{ keystone_tag }}"
keystone_image_full: "{{ keystone_image }}:{{ keystone_service_tag }}"
-keystone_fernet_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/keystone-fernet"
+keystone_fernet_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}keystone-fernet"
keystone_fernet_tag: "{{ keystone_tag }}"
keystone_fernet_image_full: "{{ keystone_fernet_image }}:{{ keystone_fernet_tag }}"
-keystone_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/keystone-ssh"
+keystone_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}keystone-ssh"
keystone_ssh_tag: "{{ keystone_tag }}"
keystone_ssh_image_full: "{{ keystone_ssh_image }}:{{ keystone_ssh_tag }}"
@@ -153,7 +154,7 @@ keystone_default_volumes:
- "{{ node_config_directory }}/keystone/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/keystone/keystone:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/keystone' if keystone_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/keystone:/dev-mode/keystone' if keystone_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
- "keystone_fernet_tokens:/etc/keystone/fernet-keys"
@@ -199,15 +200,11 @@ keystone_service_endpoints:
- {'interface': 'internal', 'url': '{{ keystone_internal_url }}'}
- {'interface': 'public', 'url': '{{ keystone_public_url }}'}
-# TODO(yoctozepto): Remove admin_endpoint leftovers in Antelope (2023.1).
-keystone_service_admin_endpoint: {'interface': 'admin', 'url': '{{ keystone_internal_url }}'}
-keystone_create_admin_endpoint: false
-
keystone_ks_services:
- name: "keystone"
type: "identity"
description: "Openstack Identity Service"
- endpoints: "{{ keystone_service_endpoints + ([keystone_service_admin_endpoint] if kolla_action == 'upgrade' or keystone_create_admin_endpoint | bool else []) }}"
+ endpoints: "{{ keystone_service_endpoints }}"
####################
# TLS
@@ -226,15 +223,31 @@ keystone_host_federation_oidc_metadata_folder: "{{ node_config_directory }}/keys
keystone_host_federation_oidc_idp_certificate_folder: "{{ node_config_directory }}/keystone/federation/oidc/cert"
keystone_host_federation_oidc_attribute_mappings_folder: "{{ node_config_directory }}/keystone/federation/oidc/attribute_maps"
keystone_federation_oidc_jwks_uri: ""
+keystone_federation_oidc_additional_options: {}
# These variables are used to define multiple trusted Horizon dashboards.
# keystone_trusted_dashboards: ['', '', '']
-keystone_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, kolla_external_fqdn), '%s/auth/websso/' % (horizon_public_endpoint)] if enable_horizon | bool else [] }}"
+horizon_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, kolla_external_fqdn), '%s/auth/websso/' % (horizon_public_endpoint)] if enable_horizon | bool else [] }}"
+skyline_trusted_dashboards: "{{ ['%s/api/openstack/skyline/api/v1/websso' % (skyline_console_public_endpoint)] if enable_skyline | bool else [] }}"
+keystone_trusted_dashboards: "{{ horizon_trusted_dashboards + skyline_trusted_dashboards }}"
keystone_enable_federation_openid: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}"
keystone_should_remove_attribute_mappings: False
keystone_should_remove_identity_providers: False
keystone_federation_oidc_response_type: "id_token"
+# can be set to any supported headers, according to
+# https://github.com/OpenIDC/mod_auth_openidc/blob/ea3af872dcdbb4634a7e541c5e8c7326dafbb090/auth_openidc.conf
+# e.g."X-Forwarded-Proto", "X-Forwarded-Port" etc.
+keystone_federation_oidc_forwarded_headers: ""
+keystone_federation_oidc_claim_delimiter: ";"
keystone_federation_oidc_scopes: "openid email profile"
# OIDC caching
keystone_oidc_enable_memcached: "{{ enable_memcached }}"
+
+# Database
+keystone_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}"
+
+###################
+# Copy certificates
+###################
+keystone_copy_certs: "{{ kolla_copy_ca_into_containers | bool or keystone_enable_tls_backend | bool }}"
diff --git a/ansible/roles/keystone/handlers/main.yml b/ansible/roles/keystone/handlers/main.yml
index 570833d2d3..c149877918 100644
--- a/ansible/roles/keystone/handlers/main.yml
+++ b/ansible/roles/keystone/handlers/main.yml
@@ -7,7 +7,7 @@
service_name: "keystone"
service: "{{ keystone_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -18,7 +18,7 @@
labels:
KOLLA_UPGRADE:
name: "init_upgrade_database"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
run_once: True
@@ -31,7 +31,7 @@
service_name: "keystone-ssh"
service: "{{ keystone_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -39,15 +39,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart keystone-fernet container
vars:
service_name: "keystone-fernet"
service: "{{ keystone_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -55,15 +53,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart keystone container
vars:
service_name: "keystone"
service: "{{ keystone_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -71,15 +67,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Finish keystone database upgrade
vars:
service_name: "keystone"
service: "{{ keystone_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -90,7 +84,7 @@
labels:
KOLLA_UPGRADE:
name: "finish_upgrade_database"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
run_once: True
diff --git a/ansible/roles/keystone/tasks/bootstrap.yml b/ansible/roles/keystone/tasks/bootstrap.yml
index 15f19d2b5b..e31041394f 100644
--- a/ansible/roles/keystone/tasks/bootstrap.yml
+++ b/ansible/roles/keystone/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating keystone database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Keystone database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/keystone/tasks/bootstrap_service.yml b/ansible/roles/keystone/tasks/bootstrap_service.yml
index be77d44669..daefcfaebc 100644
--- a/ansible/roles/keystone/tasks/bootstrap_service.yml
+++ b/ansible/roles/keystone/tasks/bootstrap_service.yml
@@ -2,6 +2,8 @@
- name: Checking for any running keystone_fernet containers
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- keystone_fernet
register: container_facts
@@ -33,7 +35,7 @@
vars:
keystone: "{{ keystone_services.keystone }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -44,7 +46,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_keystone"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ keystone.volumes | reject('equalto', '') | list }}"
run_once: True
@@ -52,7 +54,7 @@
vars:
keystone_fernet: "{{ keystone_services['keystone-fernet'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -66,7 +68,7 @@
keystone-manage --config-file /etc/keystone/keystone.conf
fernet_setup --keystone-user {{ keystone_username }} --keystone-group {{ keystone_groupname }}'
name: "bootstrap_keystone_fernet"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ keystone_fernet.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups['keystone'][0] }}"
diff --git a/ansible/roles/keystone/tasks/check-containers.yml b/ansible/roles/keystone/tasks/check-containers.yml
index 03f4ffffb7..b7e2f7c29f 100644
--- a/ansible/roles/keystone/tasks/check-containers.yml
+++ b/ansible/roles/keystone/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check keystone containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ keystone_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/keystone/tasks/config-federation-oidc.yml b/ansible/roles/keystone/tasks/config-federation-oidc.yml
index 81384931d0..51cd41d4e1 100644
--- a/ansible/roles/keystone/tasks/config-federation-oidc.yml
+++ b/ansible/roles/keystone/tasks/config-federation-oidc.yml
@@ -28,11 +28,11 @@
when:
- inventory_hostname in groups[keystone.group]
-- name: Copying OpenID Identity Providers metadata
+- name: Templating OpenID Identity Providers metadata
vars:
keystone: "{{ keystone_services['keystone'] }}"
become: true
- copy:
+ template:
src: "{{ item.metadata_folder }}/"
dest: "{{ keystone_host_federation_oidc_metadata_folder }}"
mode: "0660"
@@ -55,11 +55,11 @@
- item.certificate_file is defined
- inventory_hostname in groups[keystone.group]
-- name: Copying OpenStack Identity Providers attribute mappings
+- name: Templating OpenStack Identity Providers attribute mappings
vars:
keystone: "{{ keystone_services['keystone'] }}"
become: true
- copy:
+ template:
src: "{{ item.file }}"
dest: "{{ keystone_host_federation_oidc_attribute_mappings_folder }}/{{ item.file | basename }}"
mode: "0660"
diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml
index 53c1d9cfc3..4b2a2a63b5 100644
--- a/ansible/roles/keystone/tasks/config.yml
+++ b/ansible/roles/keystone/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ keystone_services }}"
+ with_dict: "{{ keystone_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -40,7 +37,7 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or keystone_enable_tls_backend | bool
+ - keystone_copy_certs
- name: Copying over config.json files for services
template:
@@ -48,12 +45,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- with_dict: "{{ keystone_services }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ keystone_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over keystone.conf
vars:
@@ -68,39 +60,30 @@
dest: "{{ node_config_directory }}/{{ item.key }}/keystone.conf"
mode: "0660"
become: true
- with_dict: "{{ keystone_services }}"
+ with_dict: "{{ keystone_services | select_services_enabled_and_mapped_to_host }}"
when:
- - inventory_hostname in groups[item.value.group]
- item.key in [ "keystone", "keystone-fernet" ]
- - item.value.enabled | bool
- notify:
- - Restart {{ item.key }} container
- name: Copying keystone-startup script for keystone
vars:
- keystone: "{{ keystone_services['keystone'] }}"
+ service: "{{ keystone_services['keystone'] }}"
template:
src: "keystone-startup.sh.j2"
dest: "{{ node_config_directory }}/keystone/keystone-startup.sh"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[keystone.group]
- - keystone.enabled | bool
- notify:
- - Restart keystone container
+ when: service | service_enabled_and_mapped_to_host
- name: Create Keystone domain-specific config directory
vars:
- keystone: "{{ keystone_services.keystone }}"
+ service: "{{ keystone_services['keystone'] }}"
file:
dest: "{{ node_config_directory }}/keystone/domains/"
state: "directory"
mode: "0770"
become: true
when:
- - inventory_hostname in groups[keystone.group]
- - keystone.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- keystone_domain_directory.stat.exists
- name: Get file list in custom domains folder
@@ -114,7 +97,7 @@
- name: Copying Keystone Domain specific settings
vars:
- keystone: "{{ keystone_services.keystone }}"
+ service: "{{ keystone_services['keystone'] }}"
template:
src: "{{ item.path }}"
dest: "{{ node_config_directory }}/keystone/domains/"
@@ -122,12 +105,9 @@
become: true
register: keystone_domains
when:
- - inventory_hostname in groups[keystone.group]
- - keystone.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- keystone_domain_directory.stat.exists
with_items: "{{ keystone_domains.files | default([]) }}"
- notify:
- - Restart keystone container
- name: Copying over existing policy file
template:
@@ -136,13 +116,9 @@
mode: "0660"
become: true
when:
- - inventory_hostname in groups[item.value.group]
- item.key in [ "keystone", "keystone-fernet" ]
- - item.value.enabled | bool
- keystone_policy_file is defined
- with_dict: "{{ keystone_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ keystone_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: config-federation-oidc.yml
when:
@@ -150,47 +126,39 @@
- name: Copying over wsgi-keystone.conf
vars:
- keystone: "{{ keystone_services.keystone }}"
+ service: "{{ keystone_services['keystone'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/keystone/wsgi-keystone.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[keystone.group]
- - keystone.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/wsgi-keystone.conf"
- "{{ node_custom_config }}/keystone/wsgi-keystone.conf"
- "wsgi-keystone.conf.j2"
- notify:
- - Restart keystone container
- name: Checking whether keystone-paste.ini file exists
vars:
- keystone: "{{ keystone_services.keystone }}"
+ service: "{{ keystone_services['keystone'] }}"
stat:
path: "{{ node_custom_config }}/keystone/keystone-paste.ini"
delegate_to: localhost
run_once: True
register: check_keystone_paste_ini
- when:
- - keystone.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over keystone-paste.ini
vars:
- keystone: "{{ keystone_services.keystone }}"
+ service: "{{ keystone_services['keystone'] }}"
template:
src: "{{ node_custom_config }}/keystone/keystone-paste.ini"
dest: "{{ node_config_directory }}/keystone/keystone-paste.ini"
mode: "0660"
become: true
when:
- - inventory_hostname in groups[keystone.group]
- - keystone.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- check_keystone_paste_ini.stat.exists
- notify:
- - Restart keystone container
- name: Generate the required cron jobs for the node
command: >
@@ -199,7 +167,9 @@
-i {{ groups['keystone'].index(inventory_hostname) }}
-n {{ (groups['keystone'] | length) }}
changed_when: false
+ check_mode: false
register: cron_jobs_json
+ connection: local
delegate_to: localhost
- name: Set fact with the generated cron jobs for building the crontab later
@@ -209,7 +179,7 @@
- name: Copying files for keystone-fernet
vars:
- keystone_fernet: "{{ keystone_services['keystone-fernet'] }}"
+ service: "{{ keystone_services['keystone-fernet'] }}"
template:
src: "{{ item.src }}"
dest: "{{ node_config_directory }}/keystone-fernet/{{ item.dest }}"
@@ -224,15 +194,11 @@
- { src: "fernet-healthcheck.sh.j2", dest: "fernet-healthcheck.sh" }
- { src: "id_rsa", dest: "id_rsa" }
- { src: "ssh_config.j2", dest: "ssh_config" }
- when:
- - inventory_hostname in groups[keystone_fernet.group]
- - keystone_fernet.enabled | bool
- notify:
- - Restart keystone-fernet container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying files for keystone-ssh
vars:
- keystone_ssh: "{{ keystone_services['keystone-ssh'] }}"
+ service: "{{ keystone_services['keystone-ssh'] }}"
template:
src: "{{ item.src }}"
dest: "{{ node_config_directory }}/keystone-ssh/{{ item.dest }}"
@@ -241,8 +207,4 @@
with_items:
- { src: "sshd_config.j2", dest: "sshd_config" }
- { src: "id_rsa.pub", dest: "id_rsa.pub" }
- when:
- - inventory_hostname in groups[keystone_ssh.group]
- - keystone_ssh.enabled | bool
- notify:
- - Restart keystone-ssh container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/keystone/tasks/config_validate.yml b/ansible/roles/keystone/tasks/config_validate.yml
new file mode 100644
index 0000000000..400463d208
--- /dev/null
+++ b/ansible/roles/keystone/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ keystone_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ keystone_config_validation }}"
diff --git a/ansible/roles/keystone/tasks/precheck.yml b/ansible/roles/keystone/tasks/precheck.yml
index 7801f2175f..3a84f86770 100644
--- a/ansible/roles/keystone/tasks/precheck.yml
+++ b/ansible/roles/keystone/tasks/precheck.yml
@@ -8,9 +8,12 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- keystone
- keystone_ssh
+ check_mode: false
register: container_facts
- name: Checking free port for Keystone Public
diff --git a/ansible/roles/keystone/tasks/register.yml b/ansible/roles/keystone/tasks/register.yml
index 5666b87395..733c3f903d 100644
--- a/ansible/roles/keystone/tasks/register.yml
+++ b/ansible/roles/keystone/tasks/register.yml
@@ -3,7 +3,7 @@
become: true
command: >
{{ kolla_container_engine }} exec keystone kolla_keystone_bootstrap
- {{ openstack_auth.username }} {{ openstack_auth.password }} {{ keystone_admin_project }}
+ {{ openstack_auth.username }} {{ openstack_auth.password }} {{ openstack_auth.project_name }}
admin {{ keystone_internal_url }} {{ keystone_public_url }} {{ item }}
register: keystone_bootstrap
changed_when: (keystone_bootstrap.stdout | from_json).changed
@@ -21,7 +21,8 @@
- name: Creating default user role
become: true
kolla_toolbox:
- module_name: "os_keystone_role"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.identity_role
module_args:
name: "{{ keystone_default_user_role }}"
auth: "{{ openstack_keystone_auth }}"
diff --git a/ansible/roles/keystone/tasks/register_identity_providers.yml b/ansible/roles/keystone/tasks/register_identity_providers.yml
index 45d8540585..c5e9c166d5 100644
--- a/ansible/roles/keystone/tasks/register_identity_providers.yml
+++ b/ansible/roles/keystone/tasks/register_identity_providers.yml
@@ -7,12 +7,13 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %}
mapping list -c ID --format value
run_once: True
+ changed_when: False
become: True
register: existing_mappings_register
@@ -28,9 +29,9 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %}
mapping delete {{ item }}
@@ -44,6 +45,7 @@
- name: Create unexisting domains
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: "os_keystone_domain"
module_args:
name: "{{ item.openstack_domain }}"
@@ -63,7 +65,7 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface {{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %}
@@ -84,7 +86,7 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %}
@@ -105,12 +107,13 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }} {% endif %}
identity provider list -c ID --format value
run_once: True
+ changed_when: False
register: existing_idps_register
- name: Register existing idps
@@ -126,7 +129,7 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }}{% endif %}
@@ -146,7 +149,7 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }}{% endif %}
@@ -169,7 +172,7 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface {{ openstack_interface }}
- --os-system-scope {{ openstack_auth.system_scope }}
+ --os-system-scope "all"
--os-user-domain-name {{ openstack_auth.user_domain_name }}
--os-region-name {{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }} {% endif %}
@@ -191,7 +194,7 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }}{% endif %}
@@ -213,7 +216,7 @@
--os-username={{ openstack_auth.username }}
--os-identity-api-version=3
--os-interface={{ openstack_interface }}
- --os-system-scope={{ openstack_auth.system_scope }}
+ --os-system-scope="all"
--os-user-domain-name={{ openstack_auth.user_domain_name }}
--os-region-name={{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert={{ openstack_cacert }}{% endif %}
diff --git a/ansible/roles/keystone/tasks/upgrade.yml b/ansible/roles/keystone/tasks/upgrade.yml
index cd387b787a..bbe2ca9338 100644
--- a/ansible/roles/keystone/tasks/upgrade.yml
+++ b/ansible/roles/keystone/tasks/upgrade.yml
@@ -6,6 +6,7 @@
- name: Enable log_bin_trust_function_creators function
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_variables
module_args:
login_host: "{{ database_address }}"
@@ -35,6 +36,7 @@
- name: Disable log_bin_trust_function_creators function
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_variables
module_args:
login_host: "{{ database_address }}"
@@ -46,14 +48,3 @@
run_once: True
when:
- not use_preconfigured_databases | bool
-
-# TODO(yoctozepto): Remove after Zed (in AA).
-# This is needed to update the admin endpoint as the port has
-# changed in the same release (Zed), i.e., the admin endpoint uses the
-# same port as the other ones (public, internal).
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_keystone_auth }}"
- service_ks_register_services: "{{ keystone_ks_services }}"
- run_once: True
diff --git a/ansible/roles/keystone/templates/keystone-fernet.json.j2 b/ansible/roles/keystone/templates/keystone-fernet.json.j2
index 208e0dd922..695cad20a7 100644
--- a/ansible/roles/keystone/templates/keystone-fernet.json.j2
+++ b/ansible/roles/keystone/templates/keystone-fernet.json.j2
@@ -55,6 +55,12 @@
"dest": "/usr/bin/fernet-healthcheck.sh",
"owner": "root",
"perm": "0755"
+ }{% endif %}{% if keystone_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/keystone/templates/keystone-ssh.json.j2 b/ansible/roles/keystone/templates/keystone-ssh.json.j2
index d2b5edb415..96870348d4 100644
--- a/ansible/roles/keystone/templates/keystone-ssh.json.j2
+++ b/ansible/roles/keystone/templates/keystone-ssh.json.j2
@@ -12,7 +12,13 @@
"dest": "/var/lib/keystone/.ssh/authorized_keys",
"owner": "keystone",
"perm": "0600"
- }
+ }{% if keystone_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/keystone/templates/keystone.conf.j2 b/ansible/roles/keystone/templates/keystone.conf.j2
index 92f317ab32..614a9eab08 100644
--- a/ansible/roles/keystone/templates/keystone.conf.j2
+++ b/ansible/roles/keystone/templates/keystone.conf.j2
@@ -16,7 +16,7 @@ policy_file = {{ keystone_policy_file }}
{% endif %}
[database]
-connection = mysql+pymysql://{{ keystone_database_user }}:{{ keystone_database_password }}@{{ keystone_database_address }}/{{ keystone_database_name }}
+connection = mysql+pymysql://{{ keystone_database_user }}:{{ keystone_database_password }}@{{ keystone_database_address }}/{{ keystone_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if keystone_database_enable_tls_internal | bool }}
connection_recycle_time = {{ database_connection_recycle_time }}
max_pool_size = {{ database_max_pool_size }}
max_retries = -1
@@ -59,11 +59,18 @@ topics = {{ keystone_enabled_notification_topics | map(attribute='name') | join(
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'keystone' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if enable_osprofiler | bool %}
[profiler]
@@ -78,7 +85,7 @@ connection_string = {{ osprofiler_backend_connection_string }}
allowed_origin = {{ grafana_public_endpoint }}
{% endif %}
-{% if enable_keystone_federation %}
+{% if enable_keystone_federation | bool %}
[federation]
{% for dashboard in keystone_trusted_dashboards %}
trusted_dashboard = {{ dashboard }}
diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2
index d4973a9ecf..05c435773d 100644
--- a/ansible/roles/keystone/templates/keystone.json.j2
+++ b/ansible/roles/keystone/templates/keystone.json.j2
@@ -67,8 +67,13 @@
"owner": "{{ apache_user }}:{{ apache_user }}",
"perm": "0600",
"merge": true
- }
- {% endif %}
+ }{% endif %}{% if keystone_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
@@ -78,7 +83,7 @@
{
"path": "/var/log/kolla/keystone/keystone.log",
"owner": "keystone:keystone"
- },{% if keystone_enable_federation_openid %}
+ },{% if keystone_enable_federation_openid | bool %}
{
"path": "{{ keystone_container_federation_oidc_metadata_folder }}",
"owner": "{{ apache_user }}:{{ apache_user }}",
diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
index a78266bd77..8275b8b917 100644
--- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
+++ b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
@@ -8,11 +8,6 @@ LoadModule ssl_module /usr/lib/apache2/modules/mod_ssl.so
{% endif %}
{% endif %}
Listen {{ api_interface_address | put_address_in_context('url') }}:{{ keystone_public_listen_port }}
-{% if kolla_action == 'upgrade' %}
-# NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
-# TODO(yoctozepto): Remove after Zed.
-Listen {{ api_interface_address | put_address_in_context('url') }}:{{ keystone_admin_listen_port }}
-{% endif %}
ServerSignature Off
ServerTokens Prod
@@ -62,9 +57,10 @@ LogLevel info
SSLCertificateKeyFile /etc/keystone/certs/keystone-key.pem
{% endif -%}
-{% if keystone_enable_federation_openid %}
+{% if keystone_enable_federation_openid | bool %}
+ OIDCXForwardedHeaders "{{ keystone_federation_oidc_forwarded_headers }}"
OIDCClaimPrefix "OIDC-"
- OIDCClaimDelimiter ";"
+ OIDCClaimDelimiter "{{ keystone_federation_oidc_claim_delimiter }}"
OIDCResponseType "{{ keystone_federation_oidc_response_type }}"
OIDCScope "{{ keystone_federation_oidc_scopes }}"
OIDCMetadataDir {{ keystone_container_federation_oidc_metadata_folder }}
@@ -80,6 +76,9 @@ LogLevel info
OIDCCacheType memcache
OIDCMemCacheServers "{% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %} {% endif %}{% endfor %}"
{% endif %}
+{% for key, value in keystone_federation_oidc_additional_options.items() %}
+ {{ key }} {{ value }}
+{% endfor %}
Require valid-user
@@ -116,26 +115,3 @@ LogLevel info
{% endif %}
-{% if kolla_action == 'upgrade' %}
-# NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
-# TODO(yoctozepto): Remove after Zed.
-
- WSGIDaemonProcess keystone-admin processes={{ keystone_api_workers }} threads=1 user=keystone group=keystone display-name=keystone-admin
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / {{ binary_path }}/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog "{{ keystone_log_dir }}/keystone-apache-admin-error.log"
- LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
- CustomLog "{{ keystone_log_dir }}/keystone-apache-admin-access.log" logformat
-
-{% if keystone_enable_tls_backend | bool %}
- SSLEngine on
- SSLCertificateFile /etc/keystone/certs/keystone-cert.pem
- SSLCertificateKeyFile /etc/keystone/certs/keystone-key.pem
-{% endif %}
-
-{% endif %}
diff --git a/ansible/roles/kibana/defaults/main.yml b/ansible/roles/kibana/defaults/main.yml
deleted file mode 100644
index 9f4935dca0..0000000000
--- a/ansible/roles/kibana/defaults/main.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-kibana_services:
- kibana:
- container_name: "kibana"
- image: "{{ kibana_image_full }}"
- enabled: true
- group: "kibana"
- volumes: "{{ kibana_default_volumes + kibana_extra_volumes }}"
- dimensions: "{{ kibana_dimensions }}"
- healthcheck: "{{ kibana_healthcheck }}"
- haproxy:
- kibana:
- enabled: "{{ enable_kibana }}"
- mode: "http"
- external: false
- port: "{{ kibana_server_port }}"
- auth_user: "{{ kibana_user }}"
- auth_pass: "{{ kibana_password }}"
- kibana_external:
- enabled: "{{ enable_kibana_external | bool }}"
- mode: "http"
- external: true
- port: "{{ kibana_server_port }}"
- auth_user: "{{ kibana_user }}"
- auth_pass: "{{ kibana_password }}"
-
-
-####################
-# Kibana
-####################
-kibana_default_app_id: "discover"
-kibana_elasticsearch_request_timeout: 300000
-kibana_elasticsearch_shard_timeout: 0
-kibana_elasticsearch_ssl_verify: true
-
-
-####################
-# Docker
-####################
-kibana_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/kibana"
-kibana_tag: "{{ openstack_tag }}"
-kibana_image_full: "{{ kibana_image }}:{{ kibana_tag }}"
-kibana_dimensions: "{{ default_container_dimensions }}"
-
-kibana_enable_healthchecks: "{{ enable_container_healthchecks }}"
-kibana_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-kibana_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-kibana_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-kibana_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ kibana_server_port }}"]
-kibana_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-kibana_healthcheck:
- interval: "{{ kibana_healthcheck_interval }}"
- retries: "{{ kibana_healthcheck_retries }}"
- start_period: "{{ kibana_healthcheck_start_period }}"
- test: "{% if kibana_enable_healthchecks | bool %}{{ kibana_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ kibana_healthcheck_timeout }}"
-
-kibana_default_volumes:
- - "{{ node_config_directory }}/kibana/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
-kibana_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/kibana/files/kibana-6-index.json b/ansible/roles/kibana/files/kibana-6-index.json
deleted file mode 100644
index 08e61bb0d1..0000000000
--- a/ansible/roles/kibana/files/kibana-6-index.json
+++ /dev/null
@@ -1,264 +0,0 @@
-{
- "settings" : {
- "number_of_shards" : 1,
- "index.mapper.dynamic": false
- },
- "mappings" : {
- "doc": {
- "properties": {
- "type": {
- "type": "keyword"
- },
- "updated_at": {
- "type": "date"
- },
- "config": {
- "properties": {
- "buildNum": {
- "type": "keyword"
- }
- }
- },
- "index-pattern": {
- "properties": {
- "fieldFormatMap": {
- "type": "text"
- },
- "fields": {
- "type": "text"
- },
- "intervalName": {
- "type": "keyword"
- },
- "notExpandable": {
- "type": "boolean"
- },
- "sourceFilters": {
- "type": "text"
- },
- "timeFieldName": {
- "type": "keyword"
- },
- "title": {
- "type": "text"
- }
- }
- },
- "visualization": {
- "properties": {
- "description": {
- "type": "text"
- },
- "kibanaSavedObjectMeta": {
- "properties": {
- "searchSourceJSON": {
- "type": "text"
- }
- }
- },
- "savedSearchId": {
- "type": "keyword"
- },
- "title": {
- "type": "text"
- },
- "uiStateJSON": {
- "type": "text"
- },
- "version": {
- "type": "integer"
- },
- "visState": {
- "type": "text"
- }
- }
- },
- "search": {
- "properties": {
- "columns": {
- "type": "keyword"
- },
- "description": {
- "type": "text"
- },
- "hits": {
- "type": "integer"
- },
- "kibanaSavedObjectMeta": {
- "properties": {
- "searchSourceJSON": {
- "type": "text"
- }
- }
- },
- "sort": {
- "type": "keyword"
- },
- "title": {
- "type": "text"
- },
- "version": {
- "type": "integer"
- }
- }
- },
- "dashboard": {
- "properties": {
- "description": {
- "type": "text"
- },
- "hits": {
- "type": "integer"
- },
- "kibanaSavedObjectMeta": {
- "properties": {
- "searchSourceJSON": {
- "type": "text"
- }
- }
- },
- "optionsJSON": {
- "type": "text"
- },
- "panelsJSON": {
- "type": "text"
- },
- "refreshInterval": {
- "properties": {
- "display": {
- "type": "keyword"
- },
- "pause": {
- "type": "boolean"
- },
- "section": {
- "type": "integer"
- },
- "value": {
- "type": "integer"
- }
- }
- },
- "timeFrom": {
- "type": "keyword"
- },
- "timeRestore": {
- "type": "boolean"
- },
- "timeTo": {
- "type": "keyword"
- },
- "title": {
- "type": "text"
- },
- "uiStateJSON": {
- "type": "text"
- },
- "version": {
- "type": "integer"
- }
- }
- },
- "url": {
- "properties": {
- "accessCount": {
- "type": "long"
- },
- "accessDate": {
- "type": "date"
- },
- "createDate": {
- "type": "date"
- },
- "url": {
- "type": "text",
- "fields": {
- "keyword": {
- "type": "keyword",
- "ignore_above": 2048
- }
- }
- }
- }
- },
- "server": {
- "properties": {
- "uuid": {
- "type": "keyword"
- }
- }
- },
- "timelion-sheet": {
- "properties": {
- "description": {
- "type": "text"
- },
- "hits": {
- "type": "integer"
- },
- "kibanaSavedObjectMeta": {
- "properties": {
- "searchSourceJSON": {
- "type": "text"
- }
- }
- },
- "timelion_chart_height": {
- "type": "integer"
- },
- "timelion_columns": {
- "type": "integer"
- },
- "timelion_interval": {
- "type": "keyword"
- },
- "timelion_other_interval": {
- "type": "keyword"
- },
- "timelion_rows": {
- "type": "integer"
- },
- "timelion_sheet": {
- "type": "text"
- },
- "title": {
- "type": "text"
- },
- "version": {
- "type": "integer"
- }
- }
- },
- "graph-workspace": {
- "properties": {
- "description": {
- "type": "text"
- },
- "kibanaSavedObjectMeta": {
- "properties": {
- "searchSourceJSON": {
- "type": "text"
- }
- }
- },
- "numLinks": {
- "type": "integer"
- },
- "numVertices": {
- "type": "integer"
- },
- "title": {
- "type": "text"
- },
- "version": {
- "type": "integer"
- },
- "wsState": {
- "type": "text"
- }
- }
- }
- }
- }
- }
-}
diff --git a/ansible/roles/kibana/handlers/main.yml b/ansible/roles/kibana/handlers/main.yml
deleted file mode 100644
index 28b5c9aef7..0000000000
--- a/ansible/roles/kibana/handlers/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Restart kibana container
- vars:
- service_name: "kibana"
- service: "{{ kibana_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/kibana/tasks/check-containers.yml b/ansible/roles/kibana/tasks/check-containers.yml
deleted file mode 100644
index 87bafdbb72..0000000000
--- a/ansible/roles/kibana/tasks/check-containers.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Check kibana containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kibana_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/kibana/tasks/config.yml b/ansible/roles/kibana/tasks/config.yml
deleted file mode 100644
index 6623f356cb..0000000000
--- a/ansible/roles/kibana/tasks/config.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Ensuring kibana config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kibana_services }}"
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kibana_services }}"
- notify:
- - Restart kibana container
-
-- name: Copying over kibana configuration file
- vars:
- kibana: "{{ kibana_services.kibana }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/kibana/kibana.yml"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/kibana/{{ inventory_hostname }}/kibana.yml"
- - "{{ node_custom_config }}/kibana/kibana.yml"
- - "kibana.yml.j2"
- when:
- - inventory_hostname in groups[kibana.group]
- - kibana.enabled | bool
- notify:
- - Restart kibana container
diff --git a/ansible/roles/kibana/tasks/copy-certs.yml b/ansible/roles/kibana/tasks/copy-certs.yml
deleted file mode 100644
index ab73c673a9..0000000000
--- a/ansible/roles/kibana/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ kibana_services }}"
diff --git a/ansible/roles/kibana/tasks/deploy.yml b/ansible/roles/kibana/tasks/deploy.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/kibana/tasks/deploy.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/kibana/tasks/loadbalancer.yml b/ansible/roles/kibana/tasks/loadbalancer.yml
deleted file mode 100644
index 608ef559d0..0000000000
--- a/ansible/roles/kibana/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ kibana_services }}"
- tags: always
diff --git a/ansible/roles/kibana/tasks/precheck.yml b/ansible/roles/kibana/tasks/precheck.yml
deleted file mode 100644
index e5db1096d9..0000000000
--- a/ansible/roles/kibana/tasks/precheck.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ kibana_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - kibana
- register: container_facts
-
-- name: Checking free port for Kibana Server
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ kibana_server_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['kibana'] is not defined
- - inventory_hostname in groups['kibana']
diff --git a/ansible/roles/kibana/tasks/stop.yml b/ansible/roles/kibana/tasks/stop.yml
deleted file mode 100644
index 878fb7e678..0000000000
--- a/ansible/roles/kibana/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ kibana_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/kibana/tasks/upgrade.yml b/ansible/roles/kibana/tasks/upgrade.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/kibana/tasks/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/kibana/templates/kibana.json.j2 b/ansible/roles/kibana/templates/kibana.json.j2
deleted file mode 100644
index 2ceb5493cc..0000000000
--- a/ansible/roles/kibana/templates/kibana.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/usr/share/kibana/bin/kibana --config /etc/kibana/kibana.yml",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/kibana.yml",
- "dest": "/etc/kibana/kibana.yml",
- "owner": "kibana",
- "perm": "0640"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/kibana",
- "owner": "kibana:kibana",
- "recurse": true
- },
- {
- "path": "/usr/share/kibana/optimize/bundles",
- "owner": "kibana:kibana",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/kibana/templates/kibana.yml.j2 b/ansible/roles/kibana/templates/kibana.yml.j2
deleted file mode 100644
index ef032a1db1..0000000000
--- a/ansible/roles/kibana/templates/kibana.yml.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-kibana.defaultAppId: "{{ kibana_default_app_id }}"
-logging.dest: /var/log/kolla/kibana/kibana.log
-server.port: {{ kibana_server_port }}
-server.host: "{{ api_interface_address }}"
-elasticsearch.hosts: "{{ elasticsearch_internal_endpoint }}"
-elasticsearch.requestTimeout: {{ kibana_elasticsearch_request_timeout }}
-elasticsearch.shardTimeout: {{ kibana_elasticsearch_shard_timeout }}
-elasticsearch.ssl.verificationMode: "{{ 'full' if kibana_elasticsearch_ssl_verify | bool else 'none' }}"
-telemetry.enabled: false
-{% if openstack_cacert | length > 0 %}
-elasticsearch.ssl.certificateAuthorities: {{ openstack_cacert }}
-{% endif %}
diff --git a/ansible/roles/kibana/vars/main.yml b/ansible/roles/kibana/vars/main.yml
deleted file mode 100644
index 6ba4733485..0000000000
--- a/ansible/roles/kibana/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "kibana"
diff --git a/ansible/roles/kuryr/defaults/main.yml b/ansible/roles/kuryr/defaults/main.yml
index 92c60fc5bd..1476eed9ae 100644
--- a/ansible/roles/kuryr/defaults/main.yml
+++ b/ansible/roles/kuryr/defaults/main.yml
@@ -16,15 +16,21 @@ kuryr_services:
privileged: True
cap_add:
- NET_ADMIN
- volumes: "{{ kuryr_default_volumes + kuryr_extra_volumes }}"
+ volumes: "{{ kuryr_default_volumes + kuryr_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ kuryr_dimensions }}"
healthcheck: "{{ kuryr_healthcheck }}"
+####################
+# Config Validate
+####################
+kuryr_config_validation:
+ - generator: "/kuryr/etc/kuryr-config-generator.conf"
+ config: "/etc/kuryr/kuryr.conf"
####################
# Docker
####################
-kuryr_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/kuryr-libnetwork"
+kuryr_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}kuryr-libnetwork"
kuryr_tag: "{{ openstack_tag }}"
kuryr_image_full: "{{ kuryr_image }}:{{ kuryr_tag }}"
@@ -46,10 +52,10 @@ kuryr_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "/usr/lib/docker:/usr/lib/docker"
- - "{{ kolla_dev_repos_directory ~ '/kuryr/kuryr:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/kuryr' if kuryr_dev_mode | bool else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/kuryr-libnetwork/kuryr_libnetwork:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/kuryr_libnetwork' if kuryr_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/kuryr:/dev-mode/kuryr' if kuryr_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/kuryr-libnetwork:/dev-mode/kuryr-libnetwork' if kuryr_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
kuryr_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/kuryr/handlers/main.yml b/ansible/roles/kuryr/handlers/main.yml
index 3b7dbac15d..aa4cad094e 100644
--- a/ansible/roles/kuryr/handlers/main.yml
+++ b/ansible/roles/kuryr/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "kuryr"
service: "{{ kuryr_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -14,5 +14,3 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/kuryr/tasks/check-containers.yml b/ansible/roles/kuryr/tasks/check-containers.yml
index 7a7d5a7e95..b7e2f7c29f 100644
--- a/ansible/roles/kuryr/tasks/check-containers.yml
+++ b/ansible/roles/kuryr/tasks/check-containers.yml
@@ -1,19 +1,3 @@
---
-- name: Check kuryr containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- cap_add: "{{ item.value.cap_add }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kuryr_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/kuryr/tasks/config.yml b/ansible/roles/kuryr/tasks/config.yml
index c05971366a..357e5e8420 100644
--- a/ansible/roles/kuryr/tasks/config.yml
+++ b/ansible/roles/kuryr/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kuryr_services }}"
+ with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -41,12 +38,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kuryr_services }}"
- notify:
- - Restart kuryr container
+ with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over kuryr.conf
vars:
@@ -61,12 +53,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/kuryr.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kuryr_services }}"
- notify:
- - Restart kuryr container
+ with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over kuryr.spec
vars:
@@ -76,13 +63,9 @@
dest: "{{ node_config_directory }}/{{ item }}/kuryr.spec"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- "kuryr"
- notify:
- - Restart kuryr container
- name: Copying over existing policy file
template:
@@ -92,8 +75,4 @@
become: true
when:
- kuryr_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ kuryr_services }}"
- notify:
- - Restart kuryr container
+ with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/kuryr/tasks/config_validate.yml b/ansible/roles/kuryr/tasks/config_validate.yml
new file mode 100644
index 0000000000..9ed33e0f20
--- /dev/null
+++ b/ansible/roles/kuryr/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ kuryr_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ kuryr_config_validation }}"
diff --git a/ansible/roles/kuryr/tasks/precheck.yml b/ansible/roles/kuryr/tasks/precheck.yml
index 4597bb1c4a..31acfd6c08 100644
--- a/ansible/roles/kuryr/tasks/precheck.yml
+++ b/ansible/roles/kuryr/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- kuryr
+ check_mode: false
register: container_facts
- name: Checking free port for Kuryr
diff --git a/ansible/roles/kuryr/templates/kuryr.conf.j2 b/ansible/roles/kuryr/templates/kuryr.conf.j2
index c399740c07..2db4ae9579 100644
--- a/ansible/roles/kuryr/templates/kuryr.conf.j2
+++ b/ansible/roles/kuryr/templates/kuryr.conf.j2
@@ -3,7 +3,7 @@ kuryr_uri = {{ kuryr_internal_endpoint }}
debug = {{ kuryr_logging_debug }}
log_dir = /var/log/kolla/kuryr
-capability_scope = global
+capability_scope = local
bindir = /var/lib/kolla/venv/libexec/kuryr
[binding]
diff --git a/ansible/roles/kuryr/templates/kuryr.json.j2 b/ansible/roles/kuryr/templates/kuryr.json.j2
index bff4724a64..4ba23100ff 100644
--- a/ansible/roles/kuryr/templates/kuryr.json.j2
+++ b/ansible/roles/kuryr/templates/kuryr.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/kuryr/{{ kuryr_policy_file }}",
"owner": "kuryr",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/letsencrypt/defaults/main.yml b/ansible/roles/letsencrypt/defaults/main.yml
new file mode 100644
index 0000000000..e0287add43
--- /dev/null
+++ b/ansible/roles/letsencrypt/defaults/main.yml
@@ -0,0 +1,61 @@
+---
+letsencrypt_services:
+ letsencrypt-lego:
+ container_name: letsencrypt_lego
+ group: letsencrypt-lego
+ enabled: true
+ image: "{{ letsencrypt_lego_image_full }}"
+ volumes: "{{ letsencrypt_lego_default_volumes + letsencrypt_lego_extra_volumes }}"
+ dimensions: "{{ letsencrypt_lego_dimensions }}"
+ letsencrypt-webserver:
+ container_name: letsencrypt_webserver
+ group: letsencrypt-webserver
+ enabled: true
+ image: "{{ letsencrypt_webserver_image_full }}"
+ volumes: "{{ letsencrypt_webserver_default_volumes + letsencrypt_webserver_extra_volumes }}"
+ dimensions: "{{ letsencrypt_webserver_dimensions }}"
+
+
+##############
+# LetsEncrypt
+##############
+letsencrypt_tag: "{{ openstack_tag }}"
+letsencrypt_logging_debug: "{{ openstack_logging_debug }}"
+
+letsencrypt_lego_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}letsencrypt-lego"
+letsencrypt_lego_tag: "{{ letsencrypt_tag }}"
+letsencrypt_lego_image_full: "{{ letsencrypt_lego_image }}:{{ letsencrypt_lego_tag }}"
+
+letsencrypt_webserver_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}letsencrypt-webserver"
+letsencrypt_webserver_tag: "{{ letsencrypt_tag }}"
+letsencrypt_webserver_image_full: "{{ letsencrypt_webserver_image }}:{{ letsencrypt_webserver_tag }}"
+
+letsencrypt_lego_dimensions: "{{ default_container_dimensions }}"
+letsencrypt_webserver_dimensions: "{{ default_container_dimensions }}"
+
+letsencrypt_lego_default_volumes:
+ - "{{ node_config_directory }}/letsencrypt-lego/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "letsencrypt:/etc/letsencrypt"
+ - "kolla_logs:/var/log/kolla/"
+letsencrypt_lego_extra_volumes: "{{ default_extra_volumes }}"
+
+letsencrypt_webserver_default_volumes:
+ - "{{ node_config_directory }}/letsencrypt-webserver/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "letsencrypt:/etc/letsencrypt"
+ - "kolla_logs:/var/log/kolla/"
+letsencrypt_webserver_extra_volumes: "{{ default_extra_volumes }}"
+
+# attempt to renew Let's Encrypt certificate every 4 hours
+letsencrypt_cron_renew_schedule: "0 */4 * * *"
+# The email used for certificate registration and recovery contact. Required.
+letsencrypt_email: ""
+letsencrypt_cert_valid_days: "30"
+
+letsencrypt_external_fqdns:
+ - "{{ kolla_external_fqdn }}"
+letsencrypt_internal_fqdns:
+ - "{{ kolla_internal_fqdn }}"
+
+letsencrypt_external_account_binding: "no"
diff --git a/ansible/roles/letsencrypt/handlers/main.yml b/ansible/roles/letsencrypt/handlers/main.yml
new file mode 100644
index 0000000000..b07b0b6d30
--- /dev/null
+++ b/ansible/roles/letsencrypt/handlers/main.yml
@@ -0,0 +1,30 @@
+---
+- name: Restart letsencrypt-webserver container
+ vars:
+ service_name: "letsencrypt-webserver"
+ service: "{{ letsencrypt_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ environment: "{{ service.environment | default(omit) }}"
+
+- name: Restart letsencrypt-lego container
+ vars:
+ service_name: "letsencrypt-lego"
+ service: "{{ letsencrypt_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ environment: "{{ service.environment | default(omit) }}"
diff --git a/ansible/roles/letsencrypt/tasks/check-containers.yml b/ansible/roles/letsencrypt/tasks/check-containers.yml
new file mode 100644
index 0000000000..b7e2f7c29f
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/check-containers.yml
@@ -0,0 +1,3 @@
+---
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/letsencrypt/tasks/config.yml b/ansible/roles/letsencrypt/tasks/config.yml
new file mode 100644
index 0000000000..e369fc8b34
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/config.yml
@@ -0,0 +1,50 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ with_dict: "{{ letsencrypt_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ with_dict: "{{ letsencrypt_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over letsencrypt-webserver.conf
+ vars:
+ service: "{{ letsencrypt_services['letsencrypt-webserver'] }}"
+ become: true
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/letsencrypt-webserver/letsencrypt-webserver.conf"
+ mode: "0660"
+ with_first_found:
+ - "{{ node_custom_config }}/letsencrypt/{{ inventory_hostname }}/letsencrypt-webserver.conf"
+ - "{{ node_custom_config }}/letsencrypt/letsencrypt-webserver.conf"
+ - "letsencrypt-webserver.conf.j2"
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copying files for letsencrypt-lego
+ vars:
+ service: "{{ letsencrypt_services['letsencrypt-lego'] }}"
+ template:
+ src: "{{ item.src }}"
+ dest: "{{ node_config_directory }}/letsencrypt-lego/{{ item.dest }}"
+ mode: "0660"
+ become: true
+ with_items:
+ - { src: "crontab.j2", dest: "crontab" }
+ - { src: "id_rsa.j2", dest: "id_rsa" }
+ - { src: "letsencrypt-lego-run.sh.j2", dest: "letsencrypt-lego-run.sh" }
+ when: service | service_enabled_and_mapped_to_host
+
+- include_tasks: copy-certs.yml
+ when:
+ - kolla_copy_ca_into_containers | bool
diff --git a/ansible/roles/solum/tasks/check.yml b/ansible/roles/letsencrypt/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/solum/tasks/check.yml
rename to ansible/roles/letsencrypt/tasks/config_validate.yml
diff --git a/ansible/roles/letsencrypt/tasks/copy-certs.yml b/ansible/roles/letsencrypt/tasks/copy-certs.yml
new file mode 100644
index 0000000000..567b23612e
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/copy-certs.yml
@@ -0,0 +1,6 @@
+---
+- name: "Copy certificates and keys for {{ project_name }}"
+ import_role:
+ role: service-cert-copy
+ vars:
+ project_services: "{{ letsencrypt_services }}"
diff --git a/ansible/roles/elasticsearch/tasks/deploy-containers.yml b/ansible/roles/letsencrypt/tasks/deploy-containers.yml
similarity index 100%
rename from ansible/roles/elasticsearch/tasks/deploy-containers.yml
rename to ansible/roles/letsencrypt/tasks/deploy-containers.yml
diff --git a/ansible/roles/kafka/tasks/deploy.yml b/ansible/roles/letsencrypt/tasks/deploy.yml
similarity index 100%
rename from ansible/roles/kafka/tasks/deploy.yml
rename to ansible/roles/letsencrypt/tasks/deploy.yml
diff --git a/ansible/roles/letsencrypt/tasks/loadbalancer.yml b/ansible/roles/letsencrypt/tasks/loadbalancer.yml
new file mode 100644
index 0000000000..a9a2a5c4bc
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/loadbalancer.yml
@@ -0,0 +1,7 @@
+---
+- name: "Configure loadbalancer for {{ project_name }}"
+ import_role:
+ name: loadbalancer-config
+ vars:
+ project_services: "{{ letsencrypt_services }}"
+ tags: always
diff --git a/ansible/roles/elasticsearch/tasks/main.yml b/ansible/roles/letsencrypt/tasks/main.yml
similarity index 100%
rename from ansible/roles/elasticsearch/tasks/main.yml
rename to ansible/roles/letsencrypt/tasks/main.yml
diff --git a/ansible/roles/letsencrypt/tasks/precheck.yml b/ansible/roles/letsencrypt/tasks/precheck.yml
new file mode 100644
index 0000000000..f232457106
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/precheck.yml
@@ -0,0 +1,44 @@
+---
+- name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - letsencrypt_webserver
+ register: container_facts
+
+- name: Checking free port for LetsEncrypt server
+ vars:
+ service: "{{ letsencrypt_services['letsencrypt-webserver'] }}"
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ letsencrypt_webserver_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['letsencrypt_webserver'] is not defined
+ - service | service_enabled_and_mapped_to_host
+
+- name: Validating letsencrypt email variable
+ run_once: true
+ vars:
+ replace: "valid"
+ assert:
+ that: letsencrypt_email | regex_replace('.*@.*$', replace) == "valid"
+ fail_msg: "Letsencrypt contact email value didn't pass validation."
+ when:
+ - enable_letsencrypt | bool
+ - kolla_enable_tls_external | bool
+
+- name: Validating letsencrypt EAB variables
+ run_once: true
+ assert:
+ that:
+ - letsencrypt_eab_key_id != ""
+ - letsencrypt_eab_hmac != ""
+ fail_msg: "Both letsencrypt_eab_key_id and letsencrypt_eab_hmac must be set when External account binding is turned on."
+ when:
+ - enable_letsencrypt | bool
+ - letsencrypt_external_account_binding | bool
diff --git a/ansible/roles/elasticsearch/tasks/pull.yml b/ansible/roles/letsencrypt/tasks/pull.yml
similarity index 100%
rename from ansible/roles/elasticsearch/tasks/pull.yml
rename to ansible/roles/letsencrypt/tasks/pull.yml
diff --git a/ansible/roles/elasticsearch/tasks/reconfigure.yml b/ansible/roles/letsencrypt/tasks/reconfigure.yml
similarity index 100%
rename from ansible/roles/elasticsearch/tasks/reconfigure.yml
rename to ansible/roles/letsencrypt/tasks/reconfigure.yml
diff --git a/ansible/roles/letsencrypt/tasks/stop.yml b/ansible/roles/letsencrypt/tasks/stop.yml
new file mode 100644
index 0000000000..9fbda55f16
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/stop.yml
@@ -0,0 +1,6 @@
+---
+- import_role:
+ role: service-stop
+ vars:
+ project_services: "{{ letsencrypt_services }}"
+ service_name: "{{ project_name }}"
diff --git a/ansible/roles/storm/tasks/upgrade.yml b/ansible/roles/letsencrypt/tasks/upgrade.yml
similarity index 100%
rename from ansible/roles/storm/tasks/upgrade.yml
rename to ansible/roles/letsencrypt/tasks/upgrade.yml
diff --git a/ansible/roles/letsencrypt/templates/crontab.j2 b/ansible/roles/letsencrypt/templates/crontab.j2
new file mode 100644
index 0000000000..521d19b9b7
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/crontab.j2
@@ -0,0 +1,10 @@
+PATH=/usr/local/bin:/usr/bin:/bin
+
+{% if 'external' in letsencrypt_managed_certs and kolla_external_fqdn != kolla_external_vip_address %}
+# External Certificates
+{{ letsencrypt_cron_renew_schedule }} /usr/bin/letsencrypt-certificates --external --fqdns {% for fqdn in letsencrypt_external_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_external_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
+{% if 'internal' in letsencrypt_managed_certs and kolla_internal_fqdn != kolla_internal_vip_address %}
+# Internal Certificates
+{{ letsencrypt_cron_renew_schedule }} /usr/bin/letsencrypt-certificates --internal --fqdns {% for fqdn in letsencrypt_internal_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_internal_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
diff --git a/ansible/roles/letsencrypt/templates/id_rsa.j2 b/ansible/roles/letsencrypt/templates/id_rsa.j2
new file mode 100644
index 0000000000..9e42a132a3
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/id_rsa.j2
@@ -0,0 +1 @@
+{{ haproxy_ssh_key.private_key }}
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-lego-run.sh.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-lego-run.sh.j2
new file mode 100644
index 0000000000..743f390946
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-lego-run.sh.j2
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
+
+{% if 'external' in letsencrypt_managed_certs and kolla_external_fqdn != kolla_external_vip_address %}
+# External Certificates
+/usr/bin/letsencrypt-certificates --external --fqdns {% for fqdn in letsencrypt_external_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_external_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %}{% if letsencrypt_external_account_binding | bool %} --eab --hmac {{ letsencrypt_eab_hmac }} --kid {{ letsencrypt_eab_key_id }}{% endif %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
+{% if 'internal' in letsencrypt_managed_certs and kolla_internal_fqdn != kolla_internal_vip_address %}
+# Internal Certificates
+/usr/bin/letsencrypt-certificates --internal --fqdns {% for fqdn in letsencrypt_internal_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_internal_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %}{% if letsencrypt_external_account_binding | bool %} --eab --hmac {{ letsencrypt_eab_hmac }} --kid {{ letsencrypt_eab_key_id }}{% endif %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
+
+{{ cron_cmd }}
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-lego.json.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-lego.json.j2
new file mode 100644
index 0000000000..0791f49c98
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-lego.json.j2
@@ -0,0 +1,32 @@
+{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
+{% set cron_path = '/var/spool/cron/crontabs/root' if kolla_base_distro in ['ubuntu', 'debian'] else '/var/spool/cron/root' %}
+{
+ "command": "/usr/local/bin/letsencrypt-lego-run.sh",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/letsencrypt-lego-run.sh",
+ "dest": "/usr/local/bin/letsencrypt-lego-run.sh",
+ "owner": "root",
+ "perm": "0700"
+ },
+ {
+ "source": "{{ container_config_directory }}/crontab",
+ "dest": "{{ cron_path }}",
+ "owner": "root",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/id_rsa",
+ "dest": "/var/lib/letsencrypt/.ssh/id_rsa",
+ "owner": "letsencrypt",
+ "perm": "0600"
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ]
+}
+
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-webserver.conf.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.conf.j2
new file mode 100644
index 0000000000..f2555caad9
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.conf.j2
@@ -0,0 +1,19 @@
+Listen {{ api_interface_address }}:8081
+
+ServerSignature Off
+ServerTokens Prod
+TraceEnable off
+KeepAliveTimeout 60
+
+
+ DocumentRoot /etc/letsencrypt/http-01
+ ErrorLog "/var/log/kolla/letsencrypt/letsencrypt-webserver-error.log"
+ CustomLog "/var/log/kolla/letsencrypt/letsencrypt-webserver-access.log" common
+
+
+ Options None
+ AllowOverride None
+ Require all granted
+
+
+
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-webserver.json.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.json.j2
new file mode 100644
index 0000000000..195c374425
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.json.j2
@@ -0,0 +1,20 @@
+{% set letsencrypt_apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
+{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
+
+{
+ "command": "/usr/sbin/{{ apache_binary }} -DFOREGROUND",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/letsencrypt-webserver.conf",
+ "dest": "/etc/{{ letsencrypt_apache_dir }}/letsencrypt-webserver.conf",
+ "owner": "letsencrypt",
+ "perm": "0600"
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ]
+}
diff --git a/ansible/roles/letsencrypt/vars/main.yml b/ansible/roles/letsencrypt/vars/main.yml
new file mode 100644
index 0000000000..66b02925f9
--- /dev/null
+++ b/ansible/roles/letsencrypt/vars/main.yml
@@ -0,0 +1,2 @@
+---
+project_name: "letsencrypt"
diff --git a/ansible/roles/loadbalancer/defaults/main.yml b/ansible/roles/loadbalancer/defaults/main.yml
index 7a7a1f5746..651e93810d 100644
--- a/ansible/roles/loadbalancer/defaults/main.yml
+++ b/ansible/roles/loadbalancer/defaults/main.yml
@@ -26,23 +26,35 @@ loadbalancer_services:
privileged: True
volumes: "{{ keepalived_default_volumes + keepalived_extra_volumes }}"
dimensions: "{{ keepalived_dimensions }}"
+ haproxy-ssh:
+ container_name: "haproxy_ssh"
+ group: loadbalancer
+ enabled: "{{ enable_letsencrypt | bool }}"
+ image: "{{ haproxy_ssh_image_full }}"
+ volumes: "{{ haproxy_ssh_default_volumes }}"
+ dimensions: "{{ haproxy_ssh_dimensions }}"
+ healthcheck: "{{ haproxy_ssh_healthcheck }}"
####################
# Docker
####################
-keepalived_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/keepalived"
+keepalived_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}keepalived"
keepalived_tag: "{{ openstack_tag }}"
keepalived_image_full: "{{ keepalived_image }}:{{ keepalived_tag }}"
-haproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/haproxy"
+haproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}haproxy"
haproxy_tag: "{{ openstack_tag }}"
haproxy_image_full: "{{ haproxy_image }}:{{ haproxy_tag }}"
-proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/proxysql"
+proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}proxysql"
proxysql_tag: "{{ openstack_tag }}"
proxysql_image_full: "{{ proxysql_image }}:{{ proxysql_tag }}"
+haproxy_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}haproxy-ssh"
+haproxy_ssh_tag: "{{ haproxy_tag }}"
+haproxy_ssh_image_full: "{{ haproxy_ssh_image }}:{{ haproxy_ssh_tag }}"
+
syslog_server: "{{ api_interface_address }}"
syslog_haproxy_facility: "local1"
@@ -51,14 +63,15 @@ keepalived_traffic_mode: "multicast"
# Extended global configuration, optimization options.
haproxy_max_connections: 40000
-haproxy_processes: 1
-haproxy_process_cpu_map: "no"
+haproxy_threads: 1
+haproxy_thread_cpu_map: "no"
# Matches the mariadb 10000 max connections limit
haproxy_defaults_max_connections: 10000
haproxy_dimensions: "{{ default_container_dimensions }}"
proxysql_dimensions: "{{ default_container_dimensions }}"
keepalived_dimensions: "{{ default_container_dimensions }}"
+haproxy_ssh_dimensions: "{{ default_container_dimensions }}"
haproxy_enable_healthchecks: "{{ enable_container_healthchecks }}"
haproxy_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
@@ -86,11 +99,27 @@ proxysql_healthcheck:
test: "{% if proxysql_enable_healthchecks | bool %}{{ proxysql_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ proxysql_healthcheck_timeout }}"
+haproxy_ssh_enable_healthchecks: "{{ enable_container_healthchecks }}"
+haproxy_ssh_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+haproxy_ssh_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+haproxy_ssh_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+haproxy_ssh_healthcheck_test: ["CMD-SHELL", "healthcheck_listen sshd {{ haproxy_ssh_port }}"]
+haproxy_ssh_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+haproxy_ssh_healthcheck:
+ interval: "{{ haproxy_ssh_healthcheck_interval }}"
+ retries: "{{ haproxy_ssh_healthcheck_retries }}"
+ start_period: "{{ haproxy_ssh_healthcheck_start_period }}"
+ test: "{% if haproxy_ssh_enable_healthchecks | bool %}{{ haproxy_ssh_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ haproxy_ssh_healthcheck_timeout }}"
+
+
haproxy_default_volumes:
- "{{ node_config_directory }}/haproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "haproxy_socket:/var/lib/kolla/haproxy/"
+ - "letsencrypt_certificates:/etc/haproxy/certificates"
+
proxysql_default_volumes:
- "{{ node_config_directory }}/proxysql/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -105,6 +134,13 @@ keepalived_default_volumes:
- "/lib/modules:/lib/modules:ro"
- "{{ 'haproxy_socket:/var/lib/kolla/haproxy/' if enable_haproxy | bool else '' }}"
- "{{ 'proxysql_socket:/var/lib/kolla/proxysql/' if enable_proxysql | bool else '' }}"
+haproxy_ssh_default_volumes:
+ - "{{ node_config_directory }}/haproxy-ssh/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "haproxy_socket:/var/lib/kolla/haproxy/"
+ - "{{ 'letsencrypt:/etc/letsencrypt' if enable_letsencrypt | bool else omit }}"
+ - "{{ 'letsencrypt_certificates:/etc/haproxy/certificates' if enable_letsencrypt | bool else omit }}"
haproxy_extra_volumes: "{{ default_extra_volumes }}"
proxysql_extra_volumes: "{{ default_extra_volumes }}"
@@ -126,6 +162,9 @@ proxysql_backend_max_replication_lag: "0"
proxysql_admin_user: "kolla-admin"
proxysql_stats_user: "kolla-stats"
+# Proxysql prometheus exporter
+proxysql_prometheus_exporter_memory_metrics_interval: "60"
+
# Default timeout values
haproxy_http_request_timeout: "10s"
haproxy_http_keep_alive_timeout: "10s"
@@ -143,6 +182,27 @@ haproxy_defaults_balance: "roundrobin"
haproxy_host_ipv4_tcp_retries2: "KOLLA_UNSET"
# HAProxy socket admin permissions enable
-haproxy_socket_level_admin: "no"
-
+haproxy_socket_level_admin: "{{ enable_letsencrypt | bool }}"
kolla_externally_managed_cert: False
+
+# Allow to disable keepalived tracking script (e.g. for single node environments
+# where this proves problematic in some cases)
+keepalived_track_script_enabled: True
+
+# Default backend for single external frontend (for missing mappings)
+haproxy_external_single_frontend_default_backend: "horizon_external_back"
+
+haproxy_external_single_frontend_public_port: "443"
+
+haproxy_external_single_frontend_options:
+ - option httplog
+ - option forwardfor
+ - "timeout client {{ haproxy_glance_api_client_timeout }}"
+
+haproxy_glance_api_client_timeout: "6h"
+loadbalancer_copy_certs: "{{ kolla_copy_ca_into_containers | bool or kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool or kolla_enable_tls_backend | bool or database_enable_tls_internal | bool or database_enable_tls_backend | bool }}"
+
+################
+# ProxySQL
+################
+mariadb_monitor_read_only_interval: ""
diff --git a/ansible/roles/loadbalancer/handlers/main.yml b/ansible/roles/loadbalancer/handlers/main.yml
index 6aeb61e235..4c8688c1b8 100644
--- a/ansible/roles/loadbalancer/handlers/main.yml
+++ b/ansible/roles/loadbalancer/handlers/main.yml
@@ -1,10 +1,4 @@
---
-- name: Reload firewalld
- become: True
- service:
- name: "firewalld"
- state: reloaded
-
# NOTE(yoctozepto): this handler dance is to ensure we delay restarting master
# keepalived and haproxy which control VIP address until we have working backups.
# This could be improved by checking if backup keepalived do not report FAULT state.
@@ -41,7 +35,7 @@
- name: Stop backup keepalived container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
# NOTE(yoctozepto): backup node might not have keepalived yet - ignore
ignore_missing: true
@@ -59,7 +53,7 @@
- name: Stop backup haproxy container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
# NOTE(kevko): backup node might not have haproxy yet - ignore
ignore_missing: true
@@ -75,7 +69,7 @@
- name: Stop backup proxysql container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
# NOTE(kevko): backup node might not have proxysql yet - ignore
ignore_missing: true
@@ -98,7 +92,7 @@
service_name: "haproxy"
service: "{{ loadbalancer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -128,7 +122,7 @@
service_name: "proxysql"
service: "{{ loadbalancer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -162,7 +156,7 @@
service_name: "keepalived"
service: "{{ loadbalancer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -184,7 +178,7 @@
# to the VIP address.
- name: Stop master haproxy container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
# NOTE(yoctozepto): master node might not have haproxy yet - ignore
ignore_missing: true
@@ -200,7 +194,7 @@
- name: Stop master proxysql container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "proxysql"
@@ -215,7 +209,7 @@
- name: Stop master keepalived container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "keepalived"
@@ -232,7 +226,7 @@
service_name: "haproxy"
service: "{{ loadbalancer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -262,7 +256,7 @@
service_name: "proxysql"
service: "{{ loadbalancer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -292,7 +286,7 @@
service_name: "keepalived"
service: "{{ loadbalancer_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -333,3 +327,19 @@
- service.enabled | bool
listen:
- Wait for virtual IP to appear
+
+- name: Restart haproxy-ssh container
+ vars:
+ service_name: "haproxy-ssh"
+ service: "{{ loadbalancer_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ when:
+ - kolla_action != "config"
diff --git a/ansible/roles/loadbalancer/tasks/check-containers.yml b/ansible/roles/loadbalancer/tasks/check-containers.yml
index d2c4035417..b7e2f7c29f 100644
--- a/ansible/roles/loadbalancer/tasks/check-containers.yml
+++ b/ansible/roles/loadbalancer/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check loadbalancer containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ loadbalancer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/loadbalancer/tasks/config-host.yml b/ansible/roles/loadbalancer/tasks/config-host.yml
index 46b262c7a4..6c8cc05878 100644
--- a/ansible/roles/loadbalancer/tasks/config-host.yml
+++ b/ansible/roles/loadbalancer/tasks/config-host.yml
@@ -1,22 +1,21 @@
---
+
+- name: Check IPv6 support
+ command: /usr/sbin/sysctl -n net.ipv6.conf.all.disable_ipv6
+ register: ipv6_disabled
+ changed_when: false
+
- name: Setting sysctl values
+ include_role:
+ name: sysctl
vars:
- should_set: "{{ item.value != 'KOLLA_UNSET' }}"
- sysctl:
- name: "{{ item.name }}"
- state: "{{ should_set | ternary('present', 'absent') }}"
- value: "{{ should_set | ternary(item.value, omit) }}"
- sysctl_set: "{{ should_set }}"
- sysctl_file: "{{ kolla_sysctl_conf_path }}"
- become: true
- with_items:
- - { name: "net.ipv4.ip_nonlocal_bind", value: 1 }
- - { name: "net.ipv6.ip_nonlocal_bind", value: 1 }
- - { name: "net.ipv4.tcp_retries2", value: "{{ haproxy_host_ipv4_tcp_retries2 }}" }
- - { name: "net.unix.max_dgram_qlen", value: 128 }
+ settings:
+ - { name: "net.ipv6.ip_nonlocal_bind", value: 1 }
+ - { name: "net.ipv4.ip_nonlocal_bind", value: 1 }
+ - { name: "net.ipv4.tcp_retries2", value: "{{ haproxy_host_ipv4_tcp_retries2 }}" }
+ - { name: "net.unix.max_dgram_qlen", value: 128 }
when:
- set_sysctl | bool
- - item.value != 'KOLLA_SKIP'
- name: Load and persist keepalived module
import_role:
diff --git a/ansible/roles/loadbalancer/tasks/config.yml b/ansible/roles/loadbalancer/tasks/config.yml
index 2425324208..a685e2d5be 100644
--- a/ansible/roles/loadbalancer/tasks/config.yml
+++ b/ansible/roles/loadbalancer/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ loadbalancer_services }}"
+ with_dict: "{{ loadbalancer_services | select_services_enabled_and_mapped_to_host }}"
- name: Ensuring haproxy service config subdir exists
vars:
@@ -22,9 +19,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Ensuring proxysql service config subdirectories exist
vars:
@@ -39,9 +34,7 @@
with_items:
- "users"
- "rules"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Ensuring keepalived checks subdir exists
vars:
@@ -53,9 +46,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Remove mariadb.cfg if proxysql enabled
vars:
@@ -65,11 +56,8 @@
state: absent
become: true
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- loadbalancer_services.proxysql.enabled | bool
- notify:
- - Restart haproxy container
- name: Removing checks for services which are disabled
vars:
@@ -80,13 +68,12 @@
become: true
with_dict: "{{ loadbalancer_services }}"
when:
- - inventory_hostname in groups[service.group]
+ - keepalived_track_script_enabled | bool
- item.key != 'keepalived'
+ - item.key != 'haproxy-ssh'
- not item.value.enabled | bool
or not inventory_hostname in groups[item.value.group]
- - service.enabled | bool
- notify:
- - Restart keepalived container
+ - service | service_enabled_and_mapped_to_host
- name: Copying checks for services which are enabled
vars:
@@ -98,13 +85,12 @@
become: true
with_dict: "{{ loadbalancer_services }}"
when:
- - inventory_hostname in groups[service.group]
+ - keepalived_track_script_enabled | bool
- inventory_hostname in groups[item.value.group]
- item.key != 'keepalived'
+ - item.key != 'haproxy-ssh'
- item.value.enabled | bool
- - service.enabled | bool
- notify:
- - Restart keepalived container
+ - service | service_enabled_and_mapped_to_host
- name: Copying over config.json files for services
template:
@@ -112,12 +98,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ loadbalancer_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ loadbalancer_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over haproxy.cfg
vars:
@@ -127,15 +108,11 @@
dest: "{{ node_config_directory }}/haproxy/haproxy.cfg"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/haproxy/{{ inventory_hostname }}/haproxy_main.cfg"
- "{{ node_custom_config }}/haproxy/haproxy_main.cfg"
- "haproxy/haproxy_main.cfg.j2"
- notify:
- - Restart haproxy container
- name: Copying over proxysql config
vars:
@@ -145,15 +122,23 @@
dest: "{{ node_config_directory }}/proxysql/proxysql.yaml"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/proxysql/{{ inventory_hostname }}/proxysql.yaml"
- "{{ node_custom_config }}/proxysql/proxysql.yaml"
- "proxysql/proxysql.yaml.j2"
- notify:
- - Restart proxysql container
+
+- name: Copying over haproxy single external frontend config
+ vars:
+ service: "{{ loadbalancer_services['haproxy'] }}"
+ template:
+ src: "haproxy/haproxy_external_frontend.cfg.j2"
+ dest: "{{ node_config_directory }}/haproxy/services.d/external-frontend.cfg"
+ mode: "0660"
+ become: true
+ when:
+ - service | service_enabled_and_mapped_to_host
+ - haproxy_single_external_frontend | bool
- name: Copying over custom haproxy services configuration
vars:
@@ -163,13 +148,9 @@
dest: "{{ node_config_directory }}/haproxy/services.d/"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_fileglob:
- "{{ node_custom_config }}/haproxy/services.d/*.cfg"
- notify:
- - Restart haproxy container
- name: Copying over keepalived.conf
vars:
@@ -179,55 +160,15 @@
dest: "{{ node_config_directory }}/keepalived/keepalived.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/keepalived/{{ inventory_hostname }}/keepalived.conf"
- "{{ node_custom_config }}/keepalived/keepalived.conf"
- "keepalived/keepalived.conf.j2"
- notify:
- - Restart keepalived container
-
-- name: Copying over haproxy.pem
- vars:
- service: "{{ loadbalancer_services['haproxy'] }}"
- copy:
- src: "{{ kolla_external_fqdn_cert }}"
- dest: "{{ node_config_directory }}/haproxy/{{ item }}"
- mode: "0660"
- become: true
- when:
- - kolla_enable_tls_external | bool
- - not kolla_externally_managed_cert | bool
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- with_items:
- - "haproxy.pem"
- notify:
- - Restart haproxy container
-
-- name: Copying over haproxy-internal.pem
- vars:
- service: "{{ loadbalancer_services['haproxy'] }}"
- copy:
- src: "{{ kolla_internal_fqdn_cert }}"
- dest: "{{ node_config_directory }}/haproxy/{{ item }}"
- mode: "0660"
- become: true
- when:
- - kolla_enable_tls_internal | bool
- - not kolla_externally_managed_cert | bool
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- with_items:
- - "haproxy-internal.pem"
- notify:
- - Restart haproxy container
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool
+ - loadbalancer_copy_certs
- name: Copying over haproxy start script
vars:
@@ -237,15 +178,11 @@
dest: "{{ node_config_directory }}/haproxy/haproxy_run.sh"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/haproxy/{{ inventory_hostname }}/haproxy_run.sh"
- "{{ node_custom_config }}/haproxy/haproxy_run.sh"
- "haproxy/haproxy_run.sh.j2"
- notify:
- - Restart haproxy container
- name: Copying over proxysql start script
vars:
@@ -255,12 +192,21 @@
dest: "{{ node_config_directory }}/proxysql/proxysql_run.sh"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/proxysql/{{ inventory_hostname }}/proxysql_run.sh"
- "{{ node_custom_config }}/proxysql/proxysql_run.sh"
- "proxysql/proxysql_run.sh.j2"
- notify:
- - Restart proxysql container
+
+- name: Copying files for haproxy-ssh
+ vars:
+ service: "{{ loadbalancer_services['haproxy-ssh'] }}"
+ template:
+ src: "{{ item.src }}"
+ dest: "{{ node_config_directory }}/haproxy-ssh/{{ item.dest }}"
+ mode: "0600"
+ become: true
+ with_items:
+ - { src: "haproxy-ssh/sshd_config.j2", dest: "sshd_config" }
+ - { src: "haproxy-ssh/id_rsa.pub", dest: "id_rsa.pub" }
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/loadbalancer/tasks/config_validate.yml b/ansible/roles/loadbalancer/tasks/config_validate.yml
new file mode 100644
index 0000000000..cfb336919c
--- /dev/null
+++ b/ansible/roles/loadbalancer/tasks/config_validate.yml
@@ -0,0 +1,27 @@
+---
+- name: Validating haproxy config files
+ vars:
+ service: "{{ loadbalancer_services['haproxy'] }}"
+ shell: >-
+ {{ kolla_container_engine }} exec -i haproxy haproxy
+ -c -f /etc/haproxy/haproxy.cfg -f /etc/haproxy/services.d/
+ register: haproxy_config_validation_result
+ check_mode: false
+ become: true
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+
+- name: Assert haproxy config is valid
+ vars:
+ service: "{{ loadbalancer_services['haproxy'] }}"
+ assert:
+ that:
+ - haproxy_config_validation_result.rc == 0
+ fail_msg: >-
+ haproxy config is invalid or
+ validation could not be performed.
+ success_msg: "haproxy config is valid"
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
diff --git a/ansible/roles/loadbalancer/tasks/copy-certs.yml b/ansible/roles/loadbalancer/tasks/copy-certs.yml
index 7e26c26482..1b1ec5a6b9 100644
--- a/ansible/roles/loadbalancer/tasks/copy-certs.yml
+++ b/ansible/roles/loadbalancer/tasks/copy-certs.yml
@@ -1,6 +1,63 @@
---
+- name: Copying over haproxy.pem
+ vars:
+ service: "{{ loadbalancer_services['haproxy'] }}"
+ copy:
+ src: "{{ kolla_external_fqdn_cert }}"
+ dest: "{{ node_config_directory }}/haproxy/{{ item }}"
+ mode: "0660"
+ become: true
+ when:
+ - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == ''
+ - kolla_enable_tls_external | bool
+ - not kolla_externally_managed_cert | bool
+ - service | service_enabled_and_mapped_to_host
+ with_items:
+ - "haproxy.pem"
+
+- name: Copying over haproxy-internal.pem
+ vars:
+ service: "{{ loadbalancer_services['haproxy'] }}"
+ copy:
+ src: "{{ kolla_internal_fqdn_cert }}"
+ dest: "{{ node_config_directory }}/haproxy/{{ item }}"
+ mode: "0660"
+ become: true
+ when:
+ - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == ''
+ - kolla_enable_tls_internal | bool
+ - not kolla_externally_managed_cert | bool
+ - service | service_enabled_and_mapped_to_host
+ with_items:
+ - "haproxy-internal.pem"
+
+- name: Copying over proxysql-cert.pem
+ vars:
+ service: "{{ loadbalancer_services['proxysql'] }}"
+ copy:
+ src: "{{ kolla_certificates_dir }}/proxysql-cert.pem"
+ dest: "{{ node_config_directory }}/proxysql/proxysql-cert.pem"
+ mode: "0660"
+ become: true
+ when:
+ - database_enable_tls_internal | bool
+ - service | service_enabled_and_mapped_to_host
+
+- name: Copying over proxysql-key.pem
+ vars:
+ service: "{{ loadbalancer_services['proxysql'] }}"
+ copy:
+ src: "{{ kolla_certificates_dir }}/proxysql-key.pem"
+ dest: "{{ node_config_directory }}/proxysql/proxysql-key.pem"
+ mode: "0660"
+ become: true
+ when:
+ - database_enable_tls_internal | bool
+ - service | service_enabled_and_mapped_to_host
+
- name: "Copy certificates and keys for {{ project_name }}"
import_role:
role: service-cert-copy
vars:
project_services: "{{ loadbalancer_services }}"
+ project_name: mariadb
diff --git a/ansible/roles/loadbalancer/tasks/precheck.yml b/ansible/roles/loadbalancer/tasks/precheck.yml
index 77639ce1eb..b499c086bb 100644
--- a/ansible/roles/loadbalancer/tasks/precheck.yml
+++ b/ansible/roles/loadbalancer/tasks/precheck.yml
@@ -8,16 +8,20 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- haproxy
- proxysql
- keepalived
+ check_mode: false
register: container_facts
- name: Group hosts by whether they are running keepalived
group_by:
key: "keepalived_running_{{ container_facts['keepalived'] is defined }}"
changed_when: false
+ check_mode: false
when:
- enable_keepalived | bool
- inventory_hostname in groups['loadbalancer']
@@ -26,6 +30,7 @@
group_by:
key: "haproxy_running_{{ container_facts['haproxy'] is defined }}"
changed_when: false
+ check_mode: false
when:
- enable_haproxy | bool
- inventory_hostname in groups['loadbalancer']
@@ -34,6 +39,7 @@
group_by:
key: "proxysql_running_{{ container_facts['proxysql'] is defined }}"
changed_when: false
+ check_mode: false
when:
- enable_proxysql | bool
- inventory_hostname in groups['loadbalancer']
@@ -49,57 +55,57 @@
haproxy_vip_prechecks: "{{ all_hosts_in_batch and groups['haproxy_running_True'] is not defined }}"
proxysql_vip_prechecks: "{{ all_hosts_in_batch and groups['proxysql_running_True'] is not defined }}"
-- name: Checking if external haproxy certificate exists
- run_once: true
- stat:
- path: "{{ kolla_external_fqdn_cert }}"
- delegate_to: localhost
- register: haproxy_cert_file
- changed_when: false
- when:
- - not kolla_externally_managed_cert | bool
- - kolla_enable_tls_external | bool
+- block:
+ - name: Checking if external haproxy certificate exists
+ run_once: true
+ stat:
+ path: "{{ kolla_external_fqdn_cert }}"
+ delegate_to: localhost
+ register: haproxy_cert_file
+ changed_when: false
-- name: Fail if external haproxy certificate is absent
- run_once: true
- fail:
- msg: "External haproxy certificate file is not found. It is configured via 'kolla_external_fqdn_cert'"
+ - name: Assert that external haproxy certificate exists
+ run_once: true
+ assert:
+ that: haproxy_cert_file.stat.exists
+ fail_msg: "External haproxy certificate file is not found. It is configured via 'kolla_external_fqdn_cert'"
when:
- not kolla_externally_managed_cert | bool
+ - letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == ''
- kolla_enable_tls_external | bool
- - not haproxy_cert_file.stat.exists
-
-- name: Checking if internal haproxy certificate exists
- run_once: true
- stat:
- path: "{{ kolla_internal_fqdn_cert }}"
- delegate_to: localhost
- register: haproxy_internal_cert_file
- changed_when: false
- when:
- - not kolla_externally_managed_cert | bool
- - kolla_enable_tls_internal | bool
-- name: Fail if internal haproxy certificate is absent
- run_once: true
- fail:
- msg: "Internal haproxy certificate file is not found. It is configured via 'kolla_internal_fqdn_cert'"
+- block:
+ - name: Checking if internal haproxy certificate exists
+ run_once: true
+ stat:
+ path: "{{ kolla_internal_fqdn_cert }}"
+ delegate_to: localhost
+ register: haproxy_internal_cert_file
+ changed_when: false
+
+ - name: Assert that internal haproxy certificate exists
+ run_once: true
+ assert:
+ that: haproxy_internal_cert_file.stat.exists
+ fail_msg: "Internal haproxy certificate file is not found. It is configured via 'kolla_internal_fqdn_cert'"
when:
- not kolla_externally_managed_cert | bool
+ - letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == ''
- kolla_enable_tls_internal | bool
- - not haproxy_internal_cert_file.stat.exists
- name: Checking the kolla_external_vip_interface is present
- fail: "msg='Please check the kolla_external_vip_interface property - interface {{ kolla_external_vip_interface }} not found'"
+ assert:
+ that: kolla_external_vip_interface in ansible_facts.interfaces
+ fail_msg: "Please check the kolla_external_vip_interface property - interface {{ kolla_external_vip_interface }} not found"
when:
- haproxy_enable_external_vip | bool
- - kolla_external_vip_interface not in ansible_facts.interfaces
- name: Checking the kolla_external_vip_interface is active
- fail: "msg='Please check the kolla_external_vip_interface settings - interface {{ kolla_external_vip_interface }} is not active'"
+ assert:
+ that: hostvars[inventory_hostname].ansible_facts[kolla_external_vip_interface | replace('-', '_')]['active']
+ fail_msg: "Please check the kolla_external_vip_interface settings - interface {{ kolla_external_vip_interface }} is not active"
when:
- haproxy_enable_external_vip | bool
- - not hostvars[inventory_hostname].ansible_facts[kolla_external_vip_interface]['active']
# NOTE(hrw): let assume that each supported host OS has ping with ipv4/v6 support
- name: Checking if kolla_internal_vip_address and kolla_external_vip_address are not pingable from any node
@@ -107,6 +113,7 @@
register: ping_output
changed_when: false
failed_when: ping_output.rc != 1
+ check_mode: false
with_items:
- "{{ kolla_internal_vip_address }}"
- "{{ kolla_external_vip_address }}"
@@ -177,6 +184,33 @@
- inventory_hostname in groups['loadbalancer']
- api_interface_address != kolla_internal_vip_address
+- name: Checking free port for ProxySQL prometheus exporter (api interface)
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ proxysql_prometheus_exporter_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - enable_proxysql | bool
+ - enable_prometheus_proxysql_exporter | bool
+ - container_facts['proxysql'] is not defined
+ - inventory_hostname in groups['loadbalancer']
+
+- name: Checking free port for ProxySQL prometheus exporter (vip interface)
+ wait_for:
+ host: "{{ kolla_internal_vip_address }}"
+ port: "{{ proxysql_prometheus_exporter_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - enable_proxysql | bool
+ - enable_prometheus_proxysql_exporter | bool
+ - proxysql_vip_prechecks
+ - inventory_hostname in groups['loadbalancer']
+ - api_interface_address != kolla_internal_vip_address
+
# FIXME(yoctozepto): this req seems arbitrary, they need not be, just routable is fine
- name: Checking if kolla_internal_vip_address is in the same network as api_interface on all nodes
become: true
@@ -186,6 +220,7 @@
failed_when: >-
( ip_addr_output is failed or
kolla_internal_vip_address | ipaddr(ip_addr_output.stdout.split()[3]) is none)
+ check_mode: false
when:
- enable_haproxy | bool
- enable_keepalived | bool
@@ -197,6 +232,7 @@
shell: echo "show stat" | {{ kolla_container_engine }} exec -i haproxy socat unix-connect:/var/lib/kolla/haproxy/haproxy.sock stdio # noqa risky-shell-pipe
register: haproxy_stat_shell
changed_when: false
+ check_mode: false
when: container_facts['haproxy'] is defined
- name: Setting haproxy stat fact
@@ -308,19 +344,6 @@
- haproxy_stat.find('designate_api') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Elasticsearch HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ elasticsearch_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_elasticsearch | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('elasticsearch') == -1
- - haproxy_vip_prechecks
-
- name: Checking free port for Glance API HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
@@ -347,19 +370,6 @@
- haproxy_stat.find('gnocchi_api') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Freezer API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ freezer_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_freezer | bool
- - haproxy_stat.find('freezer_api') == -1
- - inventory_hostname in groups['loadbalancer']
- - haproxy_vip_prechecks
-
- name: Checking free port for Grafana server HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
@@ -465,19 +475,6 @@
- haproxy_stat.find('keystone_external') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Kibana HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ kibana_server_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_kibana | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('kibana') == -1
- - haproxy_vip_prechecks
-
- name: Checking free port for Magnum API HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
@@ -543,75 +540,6 @@
- haproxy_stat.find('mistral_api') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Monasca API internal HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ monasca_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_monasca | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('monasca_api') == -1
- - haproxy_vip_prechecks
-
-- name: Checking free port for Monasca API public HAProxy
- wait_for:
- host: "{{ kolla_external_vip_address }}"
- port: "{{ monasca_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - haproxy_enable_external_vip | bool
- - enable_monasca | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('monasca_api_external') == -1
- - haproxy_vip_prechecks
-
-- name: Checking free port for Monasca Log API internal HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ monasca_log_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_monasca | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('monasca_log_api') == -1
- - haproxy_vip_prechecks
- - monasca_log_api_port != monasca_api_port
-
-- name: Checking free port for Monasca Log API public HAProxy
- wait_for:
- host: "{{ kolla_external_vip_address }}"
- port: "{{ monasca_log_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - haproxy_enable_external_vip | bool
- - enable_monasca | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('monasca_log_api_external') == -1
- - haproxy_vip_prechecks
- - monasca_log_api_port != monasca_api_port
-
-- name: Checking free port for Murano API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ murano_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_murano | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('murano_api') == -1
- - haproxy_vip_prechecks
-
- name: Checking free port for Neutron Server HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
@@ -719,81 +647,43 @@
- haproxy_stat.find('octavia_api') == -1
- haproxy_vip_prechecks
-- name: Checking free port for RabbitMQ Management HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ rabbitmq_management_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_rabbitmq | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('rabbitmq_management') == -1
- - haproxy_vip_prechecks
-
-- name: Checking free port for outward RabbitMQ Management HAProxy
+- name: Checking free port for OpenSearch HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
- port: "{{ outward_rabbitmq_management_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - enable_outward_rabbitmq | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('outward_rabbitmq_management') == -1
- - haproxy_vip_prechecks
-
-- name: Checking free port for Sahara API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ sahara_api_port }}"
+ port: "{{ opensearch_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- - enable_sahara | bool
+ - enable_opensearch | bool
- inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('sahara_api') == -1
+ - haproxy_stat.find('opensearch') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Senlin API HAProxy
+- name: Checking free port for OpenSearch Dashboards HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
- port: "{{ senlin_api_port }}"
+ port: "{{ opensearch_dashboards_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- - enable_senlin | bool
+ - enable_opensearch_dashboards | bool
- inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('senlin_api') == -1
+ - haproxy_stat.find('opensearch_dashboards') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Solum Application Deployment HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ solum_application_deployment_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_solum | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('solum_application_deployment') == -1
- - haproxy_vip_prechecks
-
-- name: Checking free port for Solum Image Builder HAProxy
+- name: Checking free port for RabbitMQ Management HAProxy
wait_for:
host: "{{ kolla_internal_vip_address }}"
- port: "{{ solum_image_builder_port }}"
+ port: "{{ rabbitmq_management_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- - enable_solum | bool
+ - enable_rabbitmq | bool
- inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('solum_image_builder') == -1
+ - haproxy_stat.find('rabbitmq_management') == -1
- haproxy_vip_prechecks
- name: Checking free port for Swift Proxy Server HAProxy
@@ -861,18 +751,6 @@
- haproxy_stat.find('zun_api') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Vitrage API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ vitrage_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - enable_vitrage | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('vitrage_api') == -1
- - haproxy_vip_prechecks
-
- name: Firewalld checks
block:
- name: Check if firewalld is running # noqa command-instead-of-module
@@ -882,6 +760,7 @@
register: firewalld_is_active
changed_when: false
failed_when: false
+ check_mode: false
- name: Fail if firewalld is not running
fail:
diff --git a/ansible/roles/loadbalancer/tasks/upgrade.yml b/ansible/roles/loadbalancer/tasks/upgrade.yml
index 5b10a7e111..57fa9c5813 100644
--- a/ansible/roles/loadbalancer/tasks/upgrade.yml
+++ b/ansible/roles/loadbalancer/tasks/upgrade.yml
@@ -1,2 +1,21 @@
---
+# TODO(dawudm) We should replace these two tasks with a task to check the port
+# is free in precheck.yml in the D release
+- name: Stop and remove containers for haproxy exporter containers
+ become: true
+ kolla_container:
+ action: "stop_and_remove_container"
+ common_options: "{{ docker_common_options }}"
+ name: "prometheus_haproxy_exporter"
+ when:
+ - inventory_hostname in groups['loadbalancer']
+
+- name: Removing config for haproxy exporter
+ file:
+ path: "{{ node_config_directory }}/prometheus-haproxy-exporter"
+ state: "absent"
+ become: true
+ when:
+ - inventory_hostname in groups['loadbalancer']
+
- import_tasks: deploy.yml
diff --git a/ansible/roles/loadbalancer/templates/haproxy-ssh/haproxy-ssh.json.j2 b/ansible/roles/loadbalancer/templates/haproxy-ssh/haproxy-ssh.json.j2
new file mode 100644
index 0000000000..bf5f18de0c
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy-ssh/haproxy-ssh.json.j2
@@ -0,0 +1,23 @@
+{
+ "command": "/usr/sbin/sshd -D",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/sshd_config",
+ "dest": "/etc/ssh/sshd_config",
+ "owner": "root",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/id_rsa.pub",
+ "dest": "/var/lib/haproxy/.ssh/authorized_keys",
+ "owner": "haproxy",
+ "perm": "0600"
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ]
+}
diff --git a/ansible/roles/loadbalancer/templates/haproxy-ssh/id_rsa.pub b/ansible/roles/loadbalancer/templates/haproxy-ssh/id_rsa.pub
new file mode 100644
index 0000000000..e7b2ce1c99
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy-ssh/id_rsa.pub
@@ -0,0 +1 @@
+{{ haproxy_ssh_key.public_key }}
diff --git a/ansible/roles/loadbalancer/templates/haproxy-ssh/sshd_config.j2 b/ansible/roles/loadbalancer/templates/haproxy-ssh/sshd_config.j2
new file mode 100644
index 0000000000..287fd195a9
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy-ssh/sshd_config.j2
@@ -0,0 +1,5 @@
+Port {{ haproxy_ssh_port }}
+ListenAddress {{ api_interface_address }}
+
+SyslogFacility AUTHPRIV
+UsePAM yes
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2
index a51a8ed7ab..c6a3bd5037 100644
--- a/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2
@@ -18,20 +18,34 @@
"dest": "/etc/haproxy/services.d",
"owner": "root",
"perm": "0700"
- },
+ }{% if kolla_enable_tls_external | bool %},
{
- "source": "{{ container_config_directory }}/haproxy.pem",
- "dest": "/etc/haproxy/haproxy.pem",
+ "source": "{{ container_config_directory }}/external-frontend-map",
+ "dest": "/etc/haproxy/external-frontend-map",
"owner": "root",
"perm": "0600",
+ "optional": {{ (not haproxy_single_external_frontend | bool) | string | lower }}
+ }{% endif %}{% if kolla_enable_tls_external and letsencrypt_managed_certs == 'internal' or letsencrypt_managed_certs == '' %},
+ {
+ "source": "{{ container_config_directory }}/haproxy.pem",
+ "dest": "/etc/haproxy/certificates/haproxy.pem",
+ "owner": "haproxy",
+ "perm": "0600",
"optional": {{ (not kolla_enable_tls_external | bool) | string | lower }}
- },
+ }{% endif %}{% if kolla_enable_tls_internal | bool and letsencrypt_managed_certs == 'external' or letsencrypt_managed_certs == '' %},
{
"source": "{{ container_config_directory }}/haproxy-internal.pem",
- "dest": "/etc/haproxy/haproxy-internal.pem",
- "owner": "root",
+ "dest": "/etc/haproxy/certificates/haproxy-internal.pem",
+ "owner": "haproxy",
"perm": "0600",
"optional": {{ (not kolla_enable_tls_internal | bool) | string | lower }}
}
+ {% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2
new file mode 100644
index 0000000000..87922259e2
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2
@@ -0,0 +1,11 @@
+{%- set external_tls_bind_info = 'ssl crt /etc/haproxy/certificates/haproxy.pem' if kolla_enable_tls_external|bool else '' %}
+
+frontend external_frontend
+ mode http
+ http-request del-header X-Forwarded-Proto
+{% for http_option in haproxy_external_single_frontend_options %}
+ {{ http_option }}
+{% endfor %}
+ http-request set-header X-Forwarded-Proto https if { ssl_fc }
+ bind {{ kolla_external_vip_address }}:{{ haproxy_external_single_frontend_public_port }} {{ external_tls_bind_info }}
+ use_backend %[req.hdr(host),lower,map_dom(/etc/haproxy/external-frontend-map,{{ haproxy_external_single_frontend_default_backend }})]
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_main.cfg.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_main.cfg.j2
index 5e4ad2c673..4b41674850 100644
--- a/ansible/roles/loadbalancer/templates/haproxy/haproxy_main.cfg.j2
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_main.cfg.j2
@@ -6,17 +6,16 @@ global
daemon
log {{ syslog_server }}:{{ syslog_udp_port }} {{ syslog_haproxy_facility }}
maxconn {{ haproxy_max_connections }}
- nbproc {{ haproxy_processes }}
- {% if (haproxy_processes | int > 1) and (haproxy_process_cpu_map | bool) %}
- {% for cpu_idx in range(0, haproxy_processes) %}
- cpu-map {{ cpu_idx + 1 }} {{ cpu_idx }}
- {% endfor %}
+ nbthread {{ haproxy_threads }}
+ {% if (haproxy_threads | int > 1) and (haproxy_thread_cpu_map | bool) %}
+ cpu-map auto:1/all 0-63
{% endif %}
stats socket /var/lib/kolla/haproxy/haproxy.sock group kolla mode 660{% if haproxy_socket_level_admin | bool %} level admin{% endif %}
{% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
- ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES
- ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11
+ {% for line in haproxy_ssl_settings.split('\n') %}
+ {{ line }}
+ {% endfor %}
tune.ssl.default-dh-param 4096
ca-base {{ haproxy_backend_cacert_dir }}
{% endif %}
@@ -44,6 +43,13 @@ listen stats
stats realm Haproxy\ Stats
stats auth {{ haproxy_user }}:{{ haproxy_password }}
+{% if enable_prometheus_haproxy_exporter | bool %}
+listen metrics
+ bind {{ api_interface_address }}:{{ prometheus_haproxy_exporter_port }}
+ mode http
+ http-request use-service prometheus-exporter if { path /metrics }
+{% endif %}
+
frontend status
bind {{ api_interface_address }}:{{ haproxy_monitor_port }}
{% if api_interface_address != kolla_internal_vip_address %}
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
index 91cf78f4a9..7d3492c08a 100644
--- a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
@@ -1,9 +1,38 @@
#!/bin/bash -x
-# We need to run haproxy with one `-f` for each service, because including an
-# entire config directory was not a feature until version 1.7 of HAProxy.
-# So, append "-f $cfg" to the haproxy command for each service file.
-# This will run haproxy_cmd *exactly once*.
-find /etc/haproxy/services.d/ -mindepth 1 -print0 | \
- xargs -0 -Icfg echo -f cfg | \
- xargs /usr/sbin/haproxy -W -db -p /run/haproxy.pid -f /etc/haproxy/haproxy.cfg
+{% if kolla_enable_tls_internal | bool or kolla_enable_tls_external | bool %}
+{% if kolla_enable_tls_external | bool %}
+if [ ! -e "/etc/haproxy/certificates/haproxy.pem" ]; then
+ # Generate temporary self-signed cert
+ # This means external tls is enabled but the certificate was not copied
+ # to the container - so letsencrypt is enabled
+ #
+ # Let's generate certificate to make haproxy happy, lego will
+ # replace it in a while
+ ssl_tmp_dir=$(mktemp -d)
+ openssl req -x509 -newkey rsa:2048 -sha256 -days 1 -nodes -keyout ${ssl_tmp_dir}/haproxy$$.key -out ${ssl_tmp_dir}/haproxy$$.crt -subj "/CN={{ kolla_external_fqdn }}"
+ cat ${ssl_tmp_dir}/haproxy$$.crt ${ssl_tmp_dir}/haproxy$$.key> /etc/haproxy/certificates/haproxy.pem
+ rm -rf ${ssl_tmp_dir}
+ chown haproxy:haproxy /etc/haproxy/certificates/haproxy.pem
+ chmod 0660 /etc/haproxy/certificates/haproxy.pem
+fi
+{% endif %}
+{% if kolla_enable_tls_internal | bool %}
+if [ ! -e "/etc/haproxy/certificates/haproxy-internal.pem" ]; then
+ # Generate temporary self-signed cert
+ # This means external tls is enabled but the certificate was not copied
+ # to the container - so letsencrypt is enabled
+ #
+ # Let's generate certificate to make haproxy happy, lego will
+ # replace it in a while
+ ssl_tmp_dir=$(mktemp -d)
+ openssl req -x509 -newkey rsa:2048 -sha256 -days 1 -nodes -keyout ${ssl_tmp_dir}/haproxy-internal$$.key -out ${ssl_tmp_dir}/haproxy-internal$$.crt -subj "/CN={{ kolla_internal_fqdn }}"
+ cat ${ssl_tmp_dir}/haproxy-internal$$.crt ${ssl_tmp_dir}/haproxy-internal$$.key> /etc/haproxy/certificates/haproxy-internal.pem
+ rm -rf ${ssl_tmp_dir}
+ chown haproxy:haproxy /etc/haproxy/certificates/haproxy-internal.pem
+ chmod 0660 /etc/haproxy/certificates/haproxy-internal.pem
+fi
+{% endif %}
+{% endif %}
+
+exec /usr/sbin/haproxy -W -db -p /run/haproxy.pid -f /etc/haproxy/haproxy.cfg -f /etc/haproxy/services.d/
diff --git a/ansible/roles/loadbalancer/templates/keepalived/keepalived.conf.j2 b/ansible/roles/loadbalancer/templates/keepalived/keepalived.conf.j2
index e6e48370c1..6ef7d6a824 100644
--- a/ansible/roles/loadbalancer/templates/keepalived/keepalived.conf.j2
+++ b/ansible/roles/loadbalancer/templates/keepalived/keepalived.conf.j2
@@ -1,9 +1,11 @@
+{% if keepalived_track_script_enabled | bool %}
vrrp_script check_alive {
script "/check_alive.sh"
interval 2
fall 2
rise 10
}
+{% endif %}
vrrp_instance kolla_internal_vip_{{ keepalived_virtual_router_id }} {
state BACKUP
@@ -40,7 +42,9 @@ vrrp_instance kolla_internal_vip_{{ keepalived_virtual_router_id }} {
auth_type PASS
auth_pass {{ keepalived_password }}
}
+{% if keepalived_track_script_enabled | bool %}
track_script {
check_alive
}
+{% endif %}
}
diff --git a/ansible/roles/loadbalancer/templates/keepalived/keepalived.json.j2 b/ansible/roles/loadbalancer/templates/keepalived/keepalived.json.j2
index eaadb5e175..e2c7c89da0 100644
--- a/ansible/roles/loadbalancer/templates/keepalived/keepalived.json.j2
+++ b/ansible/roles/loadbalancer/templates/keepalived/keepalived.json.j2
@@ -12,6 +12,12 @@
"dest": "/checks",
"owner": "root",
"perm": "0770"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/loadbalancer/templates/proxysql/proxysql.json.j2 b/ansible/roles/loadbalancer/templates/proxysql/proxysql.json.j2
index 047692b25d..ae1e90856a 100644
--- a/ansible/roles/loadbalancer/templates/proxysql/proxysql.json.j2
+++ b/ansible/roles/loadbalancer/templates/proxysql/proxysql.json.j2
@@ -24,6 +24,50 @@
"dest": "/etc/proxysql/rules",
"owner": "proxysql",
"perm": "0700"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ {% if database_enable_tls_backend | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates/root.crt",
+ "dest": "/etc/proxysql/certs/root.crt",
+ "owner": "proxysql",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/mariadb-cert.pem",
+ "dest": "/etc/proxysql/certs/mariadb-cert.pem",
+ "owner": "proxysql",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/mariadb-key.pem",
+ "dest": "/etc/proxysql/certs/mariadb-key.pem",
+ "owner": "proxysql",
+ "perm": "0600"
+ }{% endif %}
+ {% if database_enable_tls_internal | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates/root.crt",
+ "dest": "/var/lib/proxysql/proxysql-ca.pem",
+ "owner": "proxysql",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/proxysql-cert.pem",
+ "dest": "/var/lib/proxysql/proxysql-cert.pem",
+ "owner": "proxysql",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/proxysql-key.pem",
+ "dest": "/var/lib/proxysql/proxysql-key.pem",
+ "owner": "proxysql",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/loadbalancer/templates/proxysql/proxysql.yaml.j2 b/ansible/roles/loadbalancer/templates/proxysql/proxysql.yaml.j2
index f5c6f38a31..cd3403f35f 100644
--- a/ansible/roles/loadbalancer/templates/proxysql/proxysql.yaml.j2
+++ b/ansible/roles/loadbalancer/templates/proxysql/proxysql.yaml.j2
@@ -10,15 +10,41 @@ errorlog: "/var/log/kolla/proxysql/proxysql.log"
admin_variables:
admin_credentials: "{{ proxysql_admin_user }}:{{ proxysql_admin_password }}"
- mysql_ifaces: "{{ api_interface_address }}:{{ proxysql_admin_port }};{{ kolla_internal_vip_address }}:{{ proxysql_admin_port }};/var/lib/kolla/proxysql/admin.sock"
+ mysql_ifaces: "{{ api_interface_address | put_address_in_context('url') }}:{{ proxysql_admin_port }};{{ kolla_internal_vip_address | put_address_in_context('url') }}:{{ proxysql_admin_port }};/var/lib/kolla/proxysql/admin.sock"
stats_credentials: "{{ proxysql_stats_user }}:{{ proxysql_stats_password }}"
+ restapi_enabled: "{{ enable_prometheus_proxysql_exporter | bool }}"
+ restapi_port: "{{ proxysql_prometheus_exporter_port }}"
+ prometheus_memory_metrics_interval: "{{ proxysql_prometheus_exporter_memory_metrics_interval }}"
mysql_variables:
threads: {{ proxysql_workers }}
max_connections: {{ proxysql_max_connections }}
- interfaces: "{{ kolla_internal_vip_address }}:{{ database_port }}"
+ interfaces: "{{ kolla_internal_vip_address | put_address_in_context('url') }}:{{ database_port }}"
+ connect_retries_delay: "{{ mariadb_connect_retries_delay }}"
+ connect_retries_on_failure: "{{ mariadb_connect_retries_on_failure }}"
+ shun_on_failures: "{{ mariadb_shun_on_failures }}"
monitor_username: "{{ mariadb_monitor_user }}"
monitor_password: "{{ mariadb_monitor_password }}"
+ monitor_connect_interval: "{{ mariadb_monitor_connect_interval }}"
+ monitor_galera_healthcheck_interval: "{{ mariadb_monitor_galera_healthcheck_interval }}"
+ monitor_galera_healthcheck_timeout: "{{ mariadb_monitor_galera_healthcheck_timeout }}"
+ monitor_galera_healthcheck_max_timeout_count: "{{ mariadb_monitor_galera_healthcheck_max_timeout_count }}"
+ monitor_ping_interval: "{{ mariadb_monitor_ping_interval }}"
+ monitor_ping_timeout: "{{ mariadb_monitor_ping_timeout }}"
+ monitor_ping_max_failures: "{{ mariadb_monitor_ping_max_failures }}"
+{% if mariadb_monitor_read_only_interval | length > 0 %}
+ monitor_read_only_interval: {{ mariadb_monitor_read_only_interval }}
+{% endif %}
+ monitor_connect_timeout: 6000
+ connect_timeout_client: 100000
+ connect_timeout_server: 30000
+ connect_timeout_server_max: 100000
+{% if database_enable_tls_backend | bool %}
+ ssl_p2s_ca: "/etc/proxysql/certs/root.crt"
+ ssl_p2s_cert: "/etc/proxysql/certs/mariadb-cert.pem"
+ ssl_p2s_key: "/etc/proxysql/certs/mariadb-key.pem"
+ have_ssl: true
+{% endif %}
mysql_servers:
{% for shard_id, shard in mariadb_shards_info.shards.items() %}
@@ -30,12 +56,15 @@ mysql_servers:
{% set WEIGHT = 10 %}
{% endif %}
- address: "{{ 'api' | kolla_address(host) }}"
- port : {{ database_port }}
+ port : {{ mariadb_port }}
hostgroup : {{ WRITER_GROUP }}
max_connections: {{ proxysql_backend_max_connections }}
max_replication_lag: {{ proxysql_backend_max_replication_lag }}
weight : {{ WEIGHT }}
comment : "Writer {{ host }}"
+{% if database_enable_tls_backend | bool %}
+ use_ssl: 1
+{% endif %}
{% endfor %}
{% endfor %}
diff --git a/ansible/roles/loadbalancer/templates/proxysql/proxysql_run.sh.j2 b/ansible/roles/loadbalancer/templates/proxysql/proxysql_run.sh.j2
index cbb8739d15..c4e18235d6 100644
--- a/ansible/roles/loadbalancer/templates/proxysql/proxysql_run.sh.j2
+++ b/ansible/roles/loadbalancer/templates/proxysql/proxysql_run.sh.j2
@@ -4,4 +4,5 @@ PROXYSQL_LOG_FILE="/var/log/kolla/proxysql/proxysql.log"
proxysql \
--idle-threads \
+ --initial \
--no-version-check -f -c /etc/proxysql.cnf >> ${PROXYSQL_LOG_FILE} 2>&1
diff --git a/ansible/roles/magnum/defaults/main.yml b/ansible/roles/magnum/defaults/main.yml
index 1a051095d8..9f9e1cf3ef 100644
--- a/ansible/roles/magnum/defaults/main.yml
+++ b/ansible/roles/magnum/defaults/main.yml
@@ -16,11 +16,14 @@ magnum_services:
mode: "http"
external: false
port: "{{ magnum_api_port }}"
+ listen_port: "{{ magnum_api_listen_port }}"
magnum_api_external:
enabled: "{{ enable_magnum }}"
mode: "http"
external: true
- port: "{{ magnum_api_port }}"
+ external_fqdn: "{{ magnum_external_fqdn }}"
+ port: "{{ magnum_api_public_port }}"
+ listen_port: "{{ magnum_api_listen_port }}"
magnum-conductor:
container_name: magnum_conductor
group: magnum-conductor
@@ -31,6 +34,12 @@ magnum_services:
dimensions: "{{ magnum_conductor_dimensions }}"
healthcheck: "{{ magnum_conductor_healthcheck }}"
+####################
+# Config Validate
+####################
+magnum_config_validation:
+ - generator: "/magnum/etc/magnum/magnum-config-generator.conf"
+ config: "/etc/magnum/magnum.conf"
####################
# Database
@@ -67,11 +76,11 @@ default_docker_volume_type: ""
####################
magnum_tag: "{{ openstack_tag }}"
-magnum_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/magnum-api"
+magnum_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}magnum-api"
magnum_api_tag: "{{ magnum_tag }}"
magnum_api_image_full: "{{ magnum_api_image }}:{{ magnum_api_tag }}"
-magnum_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/magnum-conductor"
+magnum_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}magnum-conductor"
magnum_conductor_tag: "{{ magnum_tag }}"
magnum_conductor_image_full: "{{ magnum_conductor_image }}:{{ magnum_conductor_tag }}"
@@ -108,14 +117,14 @@ magnum_api_default_volumes:
- "{{ node_config_directory }}/magnum-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/magnum/magnum:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/magnum' if magnum_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/magnum:/dev-mode/magnum' if magnum_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
magnum_conductor_default_volumes:
- "{{ node_config_directory }}/magnum-conductor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "magnum:/var/lib/magnum/"
- - "{{ kolla_dev_repos_directory ~ '/magnum/magnum:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/magnum' if magnum_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/magnum:/dev-mode/magnum' if magnum_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
magnum_extra_volumes: "{{ default_extra_volumes }}"
@@ -127,8 +136,8 @@ magnum_conductor_container_proxy: "{{ container_proxy }}"
####################
# OpenStack
####################
-magnum_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ magnum_api_port }}/v1"
-magnum_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ magnum_api_port }}/v1"
+magnum_internal_endpoint: "{{ magnum_internal_base_endpoint }}/v1"
+magnum_public_endpoint: "{{ magnum_public_base_endpoint }}/v1"
magnum_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/magnum/handlers/main.yml b/ansible/roles/magnum/handlers/main.yml
index 049877a584..df6b7333e9 100644
--- a/ansible/roles/magnum/handlers/main.yml
+++ b/ansible/roles/magnum/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "magnum-api"
service: "{{ magnum_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,15 +13,13 @@
environment: "{{ service.environment }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart magnum-conductor container
vars:
service_name: "magnum-conductor"
service: "{{ magnum_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -30,5 +28,3 @@
environment: "{{ service.environment }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/magnum/tasks/bootstrap.yml b/ansible/roles/magnum/tasks/bootstrap.yml
index bf0afa219a..54b4afef83 100644
--- a/ansible/roles/magnum/tasks/bootstrap.yml
+++ b/ansible/roles/magnum/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Magnum database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Magnum database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/magnum/tasks/bootstrap_service.yml b/ansible/roles/magnum/tasks/bootstrap_service.yml
index fe79cbeef2..9ebf3aefc4 100644
--- a/ansible/roles/magnum/tasks/bootstrap_service.yml
+++ b/ansible/roles/magnum/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
magnum_api: "{{ magnum_services['magnum-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_magnum"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ magnum_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[magnum_api.group][0] }}"
diff --git a/ansible/roles/magnum/tasks/check-containers.yml b/ansible/roles/magnum/tasks/check-containers.yml
index 96882bd034..b7e2f7c29f 100644
--- a/ansible/roles/magnum/tasks/check-containers.yml
+++ b/ansible/roles/magnum/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check magnum containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- environment: "{{ item.value.environment }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ magnum_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/magnum/tasks/config.yml b/ansible/roles/magnum/tasks/config.yml
index c299631058..1ffc759ee4 100644
--- a/ansible/roles/magnum/tasks/config.yml
+++ b/ansible/roles/magnum/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ magnum_services }}"
+ with_dict: "{{ magnum_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -31,6 +28,29 @@
when:
- magnum_policy.results
+- name: Check if kubeconfig file is supplied
+ stat:
+ path: "{{ node_custom_config }}/magnum/kubeconfig"
+ delegate_to: localhost
+ run_once: True
+ register: magnum_kubeconfig_file
+
+- name: Copying over kubeconfig file
+ template:
+ src: "{{ node_custom_config }}/magnum/kubeconfig"
+ dest: "{{ node_config_directory }}/{{ item.key }}/kubeconfig"
+ mode: "0660"
+ become: true
+ when:
+ - magnum_kubeconfig_file.stat.exists
+ with_dict: "{{ magnum_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Set magnum kubeconfig file's path
+ set_fact:
+ magnum_kubeconfig_file_path: "{{ magnum_kubeconfig_file.stat.path }}"
+ when:
+ - magnum_kubeconfig_file.stat.exists
+
- include_tasks: copy-certs.yml
when:
- kolla_copy_ca_into_containers | bool
@@ -41,12 +61,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ magnum_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ magnum_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over magnum.conf
vars:
@@ -61,12 +76,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/magnum.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ magnum_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ magnum_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -76,8 +86,4 @@
become: true
when:
- magnum_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ magnum_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ magnum_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/magnum/tasks/config_validate.yml b/ansible/roles/magnum/tasks/config_validate.yml
new file mode 100644
index 0000000000..fae91de74e
--- /dev/null
+++ b/ansible/roles/magnum/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ magnum_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ magnum_config_validation }}"
diff --git a/ansible/roles/magnum/tasks/precheck.yml b/ansible/roles/magnum/tasks/precheck.yml
index de612b0dad..d9e83fb00f 100644
--- a/ansible/roles/magnum/tasks/precheck.yml
+++ b/ansible/roles/magnum/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- magnum_api
+ check_mode: false
register: container_facts
- name: Checking free port for Magnum API
diff --git a/ansible/roles/magnum/tasks/register.yml b/ansible/roles/magnum/tasks/register.yml
index a258d3a1d9..a8b5165bed 100644
--- a/ansible/roles/magnum/tasks/register.yml
+++ b/ansible/roles/magnum/tasks/register.yml
@@ -9,7 +9,8 @@
- name: Creating Magnum trustee domain
become: true
kolla_toolbox:
- module_name: "os_keystone_domain"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: "openstack.cloud.identity_domain"
module_args:
name: "{{ magnum_trustee_domain }}"
description: "Owns users and projects created by magnum"
@@ -23,11 +24,13 @@
- name: Creating Magnum trustee user
become: true
kolla_toolbox:
- module_name: "os_user"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: "openstack.cloud.identity_user"
module_args:
name: "{{ magnum_trustee_domain_admin }}"
domain: "{{ magnum_trustee_domain }}"
password: "{{ magnum_keystone_password }}"
+ update_password: "{{ 'always' if update_keystone_service_user_passwords | bool else 'on_create' }}"
auth: "{{ openstack_magnum_auth }}"
endpoint_type: "{{ openstack_interface }}"
cacert: "{{ openstack_cacert }}"
@@ -37,9 +40,10 @@
- name: Creating Magnum trustee user role
become: true
kolla_toolbox:
- module_name: "os_user_role"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: "openstack.cloud.role_assignment"
module_args:
- domain: "{{ trustee_domain.id }}"
+ domain: "{{ trustee_domain.domain.id }}"
user: "{{ magnum_trustee_domain_admin }}"
role: "admin"
auth: "{{ openstack_magnum_auth }}"
diff --git a/ansible/roles/magnum/templates/magnum-api.json.j2 b/ansible/roles/magnum/templates/magnum-api.json.j2
index 9737ad8072..671c122051 100644
--- a/ansible/roles/magnum/templates/magnum-api.json.j2
+++ b/ansible/roles/magnum/templates/magnum-api.json.j2
@@ -6,12 +6,25 @@
"dest": "/etc/magnum/magnum.conf",
"owner": "magnum",
"perm": "0600"
- }{% if magnum_policy_file is defined %},
+ }{% if magnum_kubeconfig_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/kubeconfig",
+ "dest": "/var/lib/magnum/.kube/config",
+ "owner": "magnum",
+ "perm": "0600"
+ }{% endif %}
+ {% if magnum_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ magnum_policy_file }}",
"dest": "/etc/magnum/{{ magnum_policy_file }}",
"owner": "magnum",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/magnum/templates/magnum-conductor.json.j2 b/ansible/roles/magnum/templates/magnum-conductor.json.j2
index f77b1609d1..8d077396eb 100644
--- a/ansible/roles/magnum/templates/magnum-conductor.json.j2
+++ b/ansible/roles/magnum/templates/magnum-conductor.json.j2
@@ -6,12 +6,25 @@
"dest": "/etc/magnum/magnum.conf",
"owner": "magnum",
"perm": "0600"
- }{% if magnum_policy_file is defined %},
+ }{% if magnum_kubeconfig_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/kubeconfig",
+ "dest": "/var/lib/magnum/.kube/config",
+ "owner": "magnum",
+ "perm": "0600"
+ }{% endif %}
+ {% if magnum_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ magnum_policy_file }}",
"dest": "/etc/magnum/{{ magnum_policy_file }}",
"owner": "magnum",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/magnum/templates/magnum.conf.j2 b/ansible/roles/magnum/templates/magnum.conf.j2
index e0a2e0a9cd..37d8d21380 100644
--- a/ansible/roles/magnum/templates/magnum.conf.j2
+++ b/ansible/roles/magnum/templates/magnum.conf.j2
@@ -92,7 +92,7 @@ password = {{ magnum_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -126,11 +126,18 @@ topics = {{ magnum_enabled_notification_topics | map(attribute='name') | join(',
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if magnum_policy_file is defined %}
[oslo_policy]
@@ -144,3 +151,8 @@ trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}
+
+{% if magnum_kubeconfig_file_path is not defined %}
+[drivers]
+disabled_drivers = k8s_cluster_api_flatcar,k8s_cluster_api_rockylinux,k8s_cluster_api_ubuntu,k8s_cluster_api_ubuntu_focal
+{% endif %}
diff --git a/ansible/roles/manila/defaults/main.yml b/ansible/roles/manila/defaults/main.yml
index 895290a329..3ad48f651a 100644
--- a/ansible/roles/manila/defaults/main.yml
+++ b/ansible/roles/manila/defaults/main.yml
@@ -14,11 +14,14 @@ manila_services:
mode: "http"
external: false
port: "{{ manila_api_port }}"
+ listen_port: "{{ manila_api_listen_port }}"
manila_api_external:
enabled: "{{ enable_manila }}"
mode: "http"
external: true
- port: "{{ manila_api_port }}"
+ external_fqdn: "{{ manila_external_fqdn }}"
+ port: "{{ manila_api_public_port }}"
+ listen_port: "{{ manila_api_listen_port }}"
manila-scheduler:
container_name: "manila_scheduler"
group: "manila-scheduler"
@@ -33,7 +36,7 @@ manila_services:
image: "{{ manila_share_image_full }}"
enabled: True
privileged: True
- volumes: "{{ manila_share_default_volumes + manila_share_extra_volumes }}"
+ volumes: "{{ manila_share_default_volumes + manila_share_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ manila_share_dimensions }}"
healthcheck: "{{ manila_share_healthcheck }}"
manila-data:
@@ -42,10 +45,16 @@ manila_services:
image: "{{ manila_data_image_full }}"
enabled: True
privileged: True
- volumes: "{{ manila_data_default_volumes + manila_data_extra_volumes }}"
+ volumes: "{{ manila_data_default_volumes + manila_data_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ manila_data_dimensions }}"
healthcheck: "{{ manila_data_healthcheck }}"
+####################
+# Config Validate
+####################
+manila_config_validation:
+ - generator: "/manila/etc/oslo-config-generator/manila.conf"
+ config: "/etc/manila/manila.conf"
#####################
## Database
@@ -73,19 +82,19 @@ manila_database_shard:
#####################
manila_tag: "{{ openstack_tag }}"
-manila_share_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/manila-share"
+manila_share_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}manila-share"
manila_share_tag: "{{ manila_tag }}"
manila_share_image_full: "{{ manila_share_image }}:{{ manila_share_tag }}"
-manila_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/manila-scheduler"
+manila_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}manila-scheduler"
manila_scheduler_tag: "{{ manila_tag }}"
manila_scheduler_image_full: "{{ manila_scheduler_image }}:{{ manila_scheduler_tag }}"
-manila_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/manila-api"
+manila_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}manila-api"
manila_api_tag: "{{ manila_tag }}"
manila_api_image_full: "{{ manila_api_image }}:{{ manila_api_tag }}"
-manila_data_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/manila-data"
+manila_data_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}manila-data"
manila_data_tag: "{{ manila_tag }}"
manila_data_image_full: "{{ manila_data_image }}:{{ manila_data_tag }}"
@@ -150,30 +159,30 @@ manila_share_default_volumes:
- "{{ node_config_directory }}/manila-share/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "kolla_logs:/var/log/kolla/"
- "/lib/modules:/lib/modules:ro"
- - "{{ kolla_dev_repos_directory ~ '/manila/manila:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/manila' if manila_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/manila:/dev-mode/manila' if manila_dev_mode | bool else '' }}"
manila_scheduler_default_volumes:
- "{{ node_config_directory }}/manila-scheduler/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/manila/manila:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/manila' if manila_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/manila:/dev-mode/manila' if manila_dev_mode | bool else '' }}"
manila_api_default_volumes:
- "{{ node_config_directory }}/manila-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/manila/manila:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/manila' if manila_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/manila:/dev-mode/manila' if manila_dev_mode | bool else '' }}"
manila_data_default_volumes:
- "{{ node_config_directory }}/manila-data/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/dev/:/dev/"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/manila/manila:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/manila' if manila_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/manila:/dev-mode/manila' if manila_dev_mode | bool else '' }}"
manila_extra_volumes: "{{ default_extra_volumes }}"
manila_share_extra_volumes: "{{ manila_extra_volumes }}"
@@ -184,9 +193,6 @@ manila_data_extra_volumes: "{{ manila_extra_volumes }}"
#####################
## OpenStack
#####################
-manila_internal_base_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ manila_api_port }}"
-manila_public_base_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ manila_api_port }}"
-
manila_internal_endpoint: "{{ manila_internal_base_endpoint }}/v1/%(tenant_id)s"
manila_public_endpoint: "{{ manila_public_base_endpoint }}/v1/%(tenant_id)s"
@@ -219,24 +225,30 @@ manila_backends:
protocols:
- "NFS"
- "CIFS"
+ - name: "glusterfsnfs1"
+ driver: "glusterfsnfs"
+ enabled: "{{ enable_manila_backend_glusterfs_nfs | bool }}"
+ protocols:
+ - "NFS"
+
+manila_ceph_backends:
- name: "cephfsnative1"
+ share_name: "CEPHFS1"
driver: "cephfsnative"
+ cluster: "{{ ceph_cluster }}"
enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
protocols:
- "CEPHFS"
- name: "cephfsnfs1"
+ share_name: "CEPHFSNFS1"
driver: "cephfsnfs"
+ cluster: "{{ ceph_cluster }}"
enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
protocols:
- "NFS"
- "CIFS"
- - name: "glusterfsnfs1"
- driver: "glusterfsnfs"
- enabled: "{{ enable_manila_backend_glusterfs_nfs | bool }}"
- protocols:
- - "NFS"
-manila_enabled_backends: "{{ manila_backends | selectattr('enabled', 'equalto', true) | list }}"
+manila_enabled_backends: "{{ manila_backends | selectattr('enabled', 'equalto', true) | list + manila_ceph_backends | selectattr('enabled', 'equalto', true) | list }}"
####################
diff --git a/ansible/roles/manila/handlers/main.yml b/ansible/roles/manila/handlers/main.yml
index d7ef447b1a..8609bba899 100644
--- a/ansible/roles/manila/handlers/main.yml
+++ b/ansible/roles/manila/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "manila-api"
service: "{{ manila_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart manila-data container
vars:
service_name: "manila-data"
service: "{{ manila_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -29,15 +27,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart manila-scheduler container
vars:
service_name: "manila-scheduler"
service: "{{ manila_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -45,15 +41,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart manila-share container
vars:
service_name: "manila-share"
service: "{{ manila_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -63,5 +57,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/manila/tasks/bootstrap.yml b/ansible/roles/manila/tasks/bootstrap.yml
index 061c27dedd..5c399321b7 100644
--- a/ansible/roles/manila/tasks/bootstrap.yml
+++ b/ansible/roles/manila/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Manila database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Manila database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/manila/tasks/bootstrap_service.yml b/ansible/roles/manila/tasks/bootstrap_service.yml
index 13786be068..f3e72da72d 100644
--- a/ansible/roles/manila/tasks/bootstrap_service.yml
+++ b/ansible/roles/manila/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
manila_api: "{{ manila_services['manila-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_manila"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ manila_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[manila_api.group][0] }}"
diff --git a/ansible/roles/manila/tasks/check-containers.yml b/ansible/roles/manila/tasks/check-containers.yml
index 18fc052f8a..b7e2f7c29f 100644
--- a/ansible/roles/manila/tasks/check-containers.yml
+++ b/ansible/roles/manila/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check manila containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ manila_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/manila/tasks/config.yml b/ansible/roles/manila/tasks/config.yml
index bf2929c804..6a88e6ec4f 100644
--- a/ansible/roles/manila/tasks/config.yml
+++ b/ansible/roles/manila/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ manila_services }}"
+ with_dict: "{{ manila_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: external_ceph.yml
when:
@@ -46,12 +43,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ manila_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ manila_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over manila.conf
vars:
@@ -68,11 +60,7 @@
become: true
when:
- item.key in [ "manila-api", "manila-data", "manila-scheduler" ]
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ manila_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ manila_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over manila-share.conf
vars:
@@ -90,13 +78,9 @@
dest: "{{ node_config_directory }}/{{ item }}/manila.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- "manila-share"
- notify:
- - Restart manila-share container
- name: Copying over existing policy file
template:
@@ -105,8 +89,4 @@
mode: "0660"
when:
- manila_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ manila_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ manila_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/manila/tasks/config_validate.yml b/ansible/roles/manila/tasks/config_validate.yml
new file mode 100644
index 0000000000..6f14e38f6b
--- /dev/null
+++ b/ansible/roles/manila/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ manila_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ manila_config_validation }}"
diff --git a/ansible/roles/manila/tasks/external_ceph.yml b/ansible/roles/manila/tasks/external_ceph.yml
index 7eedc5d3fa..3b70b0ad4f 100644
--- a/ansible/roles/manila/tasks/external_ceph.yml
+++ b/ansible/roles/manila/tasks/external_ceph.yml
@@ -1,25 +1,39 @@
---
-- name: Copying over ceph.conf for manila
- template:
- src: "{{ node_custom_config }}/manila/ceph.conf"
- dest: "{{ node_config_directory }}/manila-share/ceph.conf"
+- name: Ensuring manila service ceph config subdir exists
+ vars:
+ service: "{{ manila_services['manila-share'] }}"
+ file:
+ path: "{{ node_config_directory }}/manila-share/ceph"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copy over multiple ceph configs for Manila
+ merge_configs:
+ sources:
+ - "{{ node_custom_config }}/manila/{{ item.cluster }}.conf"
+ - "{{ node_custom_config }}/manila/manila-share/{{ item.cluster }}.conf"
+ dest: "{{ node_config_directory }}/manila-share/ceph/{{ item.cluster }}.conf"
mode: "0660"
become: true
when:
- inventory_hostname in groups['manila-share']
- notify:
- - Restart manila-share container
+ - item.enabled | bool
+ with_items: "{{ manila_ceph_backends }}"
-- name: Copy over Ceph keyring files for manila
+- name: Copy over ceph Manila keyrings
template:
- src: "{{ node_custom_config }}/manila/{{ ceph_manila_keyring }}"
- dest: "{{ node_config_directory }}/manila-share/{{ ceph_manila_keyring }}"
- mode: "0600"
+ src: "{{ node_custom_config }}/manila/{{ item.cluster }}.client.{{ ceph_manila_user }}.keyring"
+ dest: "{{ node_config_directory }}/manila-share/ceph/{{ item.cluster }}.client.{{ ceph_manila_user }}.keyring"
+ mode: "0660"
become: true
+ with_items: "{{ manila_ceph_backends }}"
when:
- inventory_hostname in groups['manila-share']
- notify:
- - Restart manila-share container
+ - item.enabled | bool
- name: Ensuring config directory has correct owner and permission
become: true
diff --git a/ansible/roles/manila/tasks/precheck.yml b/ansible/roles/manila/tasks/precheck.yml
index 8c88562dcf..630b288628 100644
--- a/ansible/roles/manila/tasks/precheck.yml
+++ b/ansible/roles/manila/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- manila_api
+ check_mode: false
register: container_facts
- name: Checking free port for Manila API
diff --git a/ansible/roles/manila/templates/manila-api.json.j2 b/ansible/roles/manila/templates/manila-api.json.j2
index e1d6c8af8b..cd189484c8 100644
--- a/ansible/roles/manila/templates/manila-api.json.j2
+++ b/ansible/roles/manila/templates/manila-api.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/manila/{{ manila_policy_file }}",
"owner": "manila",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/manila/templates/manila-data.json.j2 b/ansible/roles/manila/templates/manila-data.json.j2
index 7c1f82316c..6dbd175ef1 100644
--- a/ansible/roles/manila/templates/manila-data.json.j2
+++ b/ansible/roles/manila/templates/manila-data.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/manila/{{ manila_policy_file }}",
"owner": "manila",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/manila/templates/manila-scheduler.json.j2 b/ansible/roles/manila/templates/manila-scheduler.json.j2
index 2d6987af64..4f7c7b5af8 100644
--- a/ansible/roles/manila/templates/manila-scheduler.json.j2
+++ b/ansible/roles/manila/templates/manila-scheduler.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/manila/{{ manila_policy_file }}",
"owner": "manila",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/manila/templates/manila-share.conf.j2 b/ansible/roles/manila/templates/manila-share.conf.j2
index 82cecff683..98cb78b4e6 100644
--- a/ansible/roles/manila/templates/manila-share.conf.j2
+++ b/ansible/roles/manila/templates/manila-share.conf.j2
@@ -30,7 +30,7 @@ username = {{ cinder_keystone_user }}
password = {{ cinder_keystone_password }}
cafile = {{ openstack_cacert }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -47,7 +47,7 @@ username = {{ nova_keystone_user }}
password = {{ nova_keystone_password }}
cafile = {{ openstack_cacert }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -65,7 +65,7 @@ username = {{ neutron_keystone_user }}
password = {{ neutron_keystone_password }}
cafile = {{ openstack_cacert }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -106,33 +106,41 @@ hitachi_hnas_file_system_name = {{ hnas_file_system_name }}
{% endif %}
{% if enable_manila_backend_cephfs_native | bool %}
-[cephfsnative1]
+{% for backend in manila_ceph_backends %}
+{% if backend.driver == 'cephfsnative' %}
+[{{ backend.name }}]
driver_handles_share_servers = False
-share_backend_name = CEPHFS1
+share_backend_name = {{ backend.share_name }}
share_driver = manila.share.drivers.cephfs.driver.CephFSDriver
-cephfs_conf_path = /etc/ceph/ceph.conf
+cephfs_conf_path = /etc/ceph/{{ backend.cluster }}.conf
cephfs_auth_id = {{ ceph_manila_user }}
-cephfs_cluster_name = ceph
+cephfs_cluster_name = {{ backend.cluster }}
{% if manila_cephfs_filesystem_name | length %}
cephfs_filesystem_name = {{ manila_cephfs_filesystem_name }}
{% endif %}
{% endif %}
+{% endfor %}
+{% endif %}
{% if enable_manila_backend_cephfs_nfs | bool %}
-[cephfsnfs1]
+{% for backend in manila_ceph_backends %}
+{% if backend.driver == 'cephfsnfs' %}
+[{{ backend.name }}]
driver_handles_share_servers = False
-share_backend_name = CEPHFSNFS1
+share_backend_name = {{ backend.share_name }}
share_driver = manila.share.drivers.cephfs.driver.CephFSDriver
cephfs_protocol_helper_type = NFS
-cephfs_conf_path = /etc/ceph/ceph.conf
+cephfs_conf_path = /etc/ceph/{{ backend.cluster }}.conf
cephfs_auth_id = {{ ceph_manila_user }}
-cephfs_cluster_name = ceph
+cephfs_cluster_name = {{ backend.cluster }}
{% if manila_cephfs_filesystem_name | length %}
cephfs_filesystem_name = {{ manila_cephfs_filesystem_name }}
{% endif %}
cephfs_ganesha_server_is_remote= False
cephfs_ganesha_server_ip = {{ api_interface_address }}
{% endif %}
+{% endfor %}
+{% endif %}
{% if enable_manila_backend_glusterfs_nfs | bool %}
[glusterfsnfs1]
diff --git a/ansible/roles/manila/templates/manila-share.json.j2 b/ansible/roles/manila/templates/manila-share.json.j2
index 50e0456a08..b304665fd5 100644
--- a/ansible/roles/manila/templates/manila-share.json.j2
+++ b/ansible/roles/manila/templates/manila-share.json.j2
@@ -8,14 +8,8 @@
"perm": "0600"
}{% if enable_manila_backend_cephfs_native | bool or enable_manila_backend_cephfs_nfs | bool %},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "manila",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/{{ ceph_manila_keyring }}",
- "dest": "/etc/ceph/{{ ceph_manila_keyring }}",
+ "source": "{{ container_config_directory }}/ceph",
+ "dest": "/etc/ceph",
"owner": "manila",
"perm": "0600"
}{% endif %}{% if manila_policy_file is defined %},
@@ -24,6 +18,12 @@
"dest": "/etc/manila/{{ manila_policy_file }}",
"owner": "manila",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/manila/templates/manila.conf.j2 b/ansible/roles/manila/templates/manila.conf.j2
index 0151d2b5e4..1cd53601bf 100644
--- a/ansible/roles/manila/templates/manila.conf.j2
+++ b/ansible/roles/manila/templates/manila.conf.j2
@@ -13,7 +13,7 @@ osapi_share_workers = {{ manila_api_workers }}
rootwrap_config = /etc/manila/rootwrap.conf
api_paste_config = /etc/manila/api-paste.ini
-enabled_share_protocols = "{{ manila_backends|selectattr('enabled', 'equalto', true)|sum(attribute='protocols', start=[]) | unique | join(',') }}"
+enabled_share_protocols = "{{ manila_enabled_backends | sum(attribute='protocols', start=[]) | unique | join(',') }}"
auth_strategy = keystone
@@ -43,7 +43,7 @@ password = {{ manila_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -56,11 +56,18 @@ topics = {{ manila_enabled_notification_topics | map(attribute='name') | join(',
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml
index 5e8c7d9a1b..0ebb5c3989 100644
--- a/ansible/roles/mariadb/defaults/main.yml
+++ b/ansible/roles/mariadb/defaults/main.yml
@@ -67,11 +67,11 @@ external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %
####################
# Docker
####################
-mariadb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/mariadb-server"
+mariadb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}mariadb-server"
mariadb_tag: "{{ openstack_tag }}"
mariadb_image_full: "{{ mariadb_image }}:{{ mariadb_tag }}"
-mariadb_clustercheck_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/mariadb-clustercheck"
+mariadb_clustercheck_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}mariadb-clustercheck"
mariadb_clustercheck_tag: "{{ mariadb_tag }}"
mariadb_clustercheck_image_full: "{{ mariadb_clustercheck_image }}:{{ mariadb_clustercheck_tag }}"
@@ -82,7 +82,7 @@ mariadb_default_volumes:
- "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "mariadb:/var/lib/mysql"
+ - "{{ mariadb_datadir_volume }}:/var/lib/mysql"
- "kolla_logs:/var/log/kolla/"
mariadb_clustercheck_default_volumes:
- "{{ node_config_directory }}/mariadb-clustercheck/:{{ container_config_directory }}/:ro"
@@ -120,12 +120,8 @@ mariadb_wsrep_extra_provider_options: []
####################
# Backups
####################
-mariabackup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/mariadb-server"
-mariabackup_tag: "{{ mariadb_tag }}"
-mariabackup_image_full: "{{ mariabackup_image }}:{{ mariabackup_tag }}"
-
mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}"
-mariadb_backup_database_schema: "PERCONA_SCHEMA"
+mariadb_backup_database_schema: "mysql"
mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}{{ mariadb_shard_backup_user_prefix }}{{ mariadb_shard_id | string }}{% endif %}"
mariadb_backup_type: "full"
mariadb_backup_possible: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}"
@@ -133,7 +129,7 @@ mariadb_backup_possible: "{{ mariadb_loadbalancer != 'haproxy' or inventory_host
####################
# Clustercheck
####################
-enable_mariadb_clustercheck: "{{ enable_haproxy }}"
+enable_mariadb_clustercheck: "{{ 'True' if mariadb_loadbalancer == 'haproxy' else 'False' }}"
####################
# Sharding
diff --git a/ansible/roles/mariadb/handlers/main.yml b/ansible/roles/mariadb/handlers/main.yml
index 1c76fc48ff..500f489c63 100644
--- a/ansible/roles/mariadb/handlers/main.yml
+++ b/ansible/roles/mariadb/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "mariadb"
service: "{{ mariadb_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "{{ service.container_name }}"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
listen: Bootstrap MariaDB cluster
@@ -37,7 +37,7 @@
become: true
command: >-
{{ kolla_container_engine }} exec {{ mariadb_service.container_name }}
- mysql -uroot -p{{ database_password }}
+ mariadb -uroot -p{{ database_password }}
--silent --skip-column-names
-e 'SHOW STATUS LIKE "wsrep_local_state_comment"'
changed_when: false
@@ -48,45 +48,34 @@
no_log: true
listen: Bootstrap MariaDB cluster
+- name: Ensure MariaDB is running normally on bootstrap host
+ group_by:
+ key: mariadb_bootstrap_restart
+ listen: Bootstrap MariaDB cluster
+
- name: Restart MariaDB on existing cluster members
- include_tasks: 'restart_services.yml'
+ group_by:
+ key: mariadb_restart
when:
- groups[mariadb_shard_group + '_port_alive_True'] is defined
- inventory_hostname in groups[mariadb_shard_group + '_port_alive_True']
- - groups[mariadb_shard_group + '_port_alive_True'].index(inventory_hostname) % 4 == item
- - kolla_action != "config"
- listen: restart mariadb
- loop:
- - 0
- - 1
- - 2
- - 3
+ listen: Restart mariadb container
- name: Start MariaDB on new nodes
- include_tasks: 'restart_services.yml'
+ group_by:
+ key: mariadb_start
when:
- bootstrap_host is not defined or bootstrap_host != inventory_hostname
- groups[mariadb_shard_group + '_port_alive_False'] is defined
- inventory_hostname in groups[mariadb_shard_group + '_port_alive_False']
- - groups[mariadb_shard_group + '_port_alive_False'].index(inventory_hostname) % 4 == item
- - kolla_action != "config"
- listen: restart mariadb
- loop:
- - 0
- - 1
- - 2
- - 3
-
-- name: Ensure MariaDB is running normally on bootstrap host
- include_tasks: 'restart_services.yml'
- listen: Bootstrap MariaDB cluster
+ listen: Restart mariadb container
- name: Restart mariadb-clustercheck container
vars:
service_name: "mariadb-clustercheck"
service: "{{ mariadb_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
image: "{{ service.image }}"
@@ -94,7 +83,5 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
environment: "{{ service.environment }}"
- listen:
- - restart mariadb-clustercheck
when:
- - kolla_action != "config"
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/mariadb/tasks/backup.yml b/ansible/roles/mariadb/tasks/backup.yml
index c57f673fc7..1c25f9bac0 100644
--- a/ansible/roles/mariadb/tasks/backup.yml
+++ b/ansible/roles/mariadb/tasks/backup.yml
@@ -1,14 +1,29 @@
---
+- name: Get MariaDB container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - "{{ mariadb_services.mariadb.container_name }}"
+ check_mode: false
+ register: container_facts
+
- name: Taking {{ mariadb_backup_type }} database backup via Mariabackup
+ vars:
+ cmd: "{{ 'kolla_mariadb_backup.sh' if mariadb_backup_target == 'active' else 'kolla_mariadb_backup_replica.sh' }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
- command: "bash -c 'sudo -E kolla_set_configs && /usr/local/bin/kolla_mariadb_backup.sh'"
+ command: "bash -c 'sudo -E kolla_set_configs && /usr/local/bin/{{ cmd }}'"
common_options: "{{ docker_common_options }}"
detach: False
- image: "{{ mariabackup_image_full }}"
+ # NOTE(mgoddard): Try to use the same image as the MariaDB server container
+ # to avoid compatibility issues. See
+ # https://bugs.launchpad.net/kolla-ansible/+bug/2058644.
+ image: "{{ container_facts.mariadb.Image | default(mariadb_services.mariadb.image) }}"
name: "mariabackup"
- restart_policy: no
+ restart_policy: oneshot
remove_on_exit: True
environment:
BACKUP_TYPE: "{{ mariadb_backup_type }}"
diff --git a/ansible/roles/mariadb/tasks/bootstrap_cluster.yml b/ansible/roles/mariadb/tasks/bootstrap_cluster.yml
index e9efe4391d..9a412c3579 100644
--- a/ansible/roles/mariadb/tasks/bootstrap_cluster.yml
+++ b/ansible/roles/mariadb/tasks/bootstrap_cluster.yml
@@ -4,7 +4,7 @@
service_name: "mariadb"
service: "{{ mariadb_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -17,7 +17,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_mariadb"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ service.volumes }}"
notify:
- Bootstrap MariaDB cluster
diff --git a/ansible/roles/mariadb/tasks/check-containers.yml b/ansible/roles/mariadb/tasks/check-containers.yml
index e7a65d60eb..b7e2f7c29f 100644
--- a/ansible/roles/mariadb/tasks/check-containers.yml
+++ b/ansible/roles/mariadb/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check mariadb containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- environment: "{{ item.value.environment | default(omit) }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ mariadb_services }}"
- notify:
- - "restart {{ item.key }}"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/mariadb/tasks/check.yml b/ansible/roles/mariadb/tasks/check.yml
index db5c254697..1697a9012b 100644
--- a/ansible/roles/mariadb/tasks/check.yml
+++ b/ansible/roles/mariadb/tasks/check.yml
@@ -6,7 +6,7 @@
become: true
command: >
{{ kolla_container_engine }} exec {{ mariadb_service.container_name }}
- mysql -h {{ database_address }} -P {{ database_port }}
+ mariadb -h {{ database_address }} -P {{ database_port }}
-u {{ mariadb_shard_database_user }} -p{{ database_password }} -e 'show databases;'
register: result
until: result is success
diff --git a/ansible/roles/mariadb/tasks/config.yml b/ansible/roles/mariadb/tasks/config.yml
index fedb9b4ed2..87ef65f68d 100644
--- a/ansible/roles/mariadb/tasks/config.yml
+++ b/ansible/roles/mariadb/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ mariadb_services }}"
+ with_dict: "{{ mariadb_services | select_services_enabled_and_mapped_to_host }}"
- name: Ensuring database backup config directory exists
file:
@@ -45,12 +42,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ mariadb_services }}"
- notify:
- - "restart {{ item.key }}"
+ with_dict: "{{ mariadb_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for mariabackup
vars:
@@ -76,8 +68,6 @@
dest: "{{ node_config_directory }}/{{ service_name }}/galera.cnf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - restart mariadb
+ when: service | service_enabled_and_mapped_to_host
+
+- include_tasks: copy-certs.yml
diff --git a/ansible/roles/storm/tasks/check.yml b/ansible/roles/mariadb/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/storm/tasks/check.yml
rename to ansible/roles/mariadb/tasks/config_validate.yml
diff --git a/ansible/roles/mariadb/tasks/copy-certs.yml b/ansible/roles/mariadb/tasks/copy-certs.yml
new file mode 100644
index 0000000000..e1f6cd766a
--- /dev/null
+++ b/ansible/roles/mariadb/tasks/copy-certs.yml
@@ -0,0 +1,7 @@
+---
+- name: "Copy certificates and keys for {{ project_name }}"
+ import_role:
+ role: service-cert-copy
+ vars:
+ project_services: "{{ mariadb_services }}"
+ when: database_enable_tls_backend | bool
diff --git a/ansible/roles/mariadb/tasks/deploy.yml b/ansible/roles/mariadb/tasks/deploy.yml
index 348f61b84c..e1f3ad8557 100644
--- a/ansible/roles/mariadb/tasks/deploy.yml
+++ b/ansible/roles/mariadb/tasks/deploy.yml
@@ -4,10 +4,3 @@
- import_tasks: check-containers.yml
- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
-
-- import_tasks: register.yml
-
-- import_tasks: check.yml
diff --git a/ansible/roles/mariadb/tasks/loadbalancer.yml b/ansible/roles/mariadb/tasks/loadbalancer.yml
index 8ea7c2d7d0..30fee0fdcd 100644
--- a/ansible/roles/mariadb/tasks/loadbalancer.yml
+++ b/ansible/roles/mariadb/tasks/loadbalancer.yml
@@ -21,6 +21,7 @@
host: "{{ mariadb_shards_info.shards[shard_id].hosts[0] }}"
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ host }}"
@@ -30,7 +31,7 @@
name: "{{ mariadb_monitor_user }}"
password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
host: "%"
- priv: "*.*:USAGE"
+ priv: "*.*:USAGE,REPLICATION CLIENT"
tags: always
with_dict: "{{ mariadb_shards_info.shards }}"
loop_control:
diff --git a/ansible/roles/mariadb/tasks/lookup_cluster.yml b/ansible/roles/mariadb/tasks/lookup_cluster.yml
index 39172abc4c..b1ed909610 100644
--- a/ansible/roles/mariadb/tasks/lookup_cluster.yml
+++ b/ansible/roles/mariadb/tasks/lookup_cluster.yml
@@ -1,7 +1,7 @@
---
- name: Create MariaDB volume
become: true
- kolla_docker:
+ kolla_container:
action: "create_volume"
common_options: "{{ docker_common_options }}"
name: "mariadb"
@@ -46,10 +46,11 @@
become: true
command: >-
{{ kolla_container_engine }} exec {{ mariadb_service.container_name }}
- mysql -uroot -p{{ database_password }}
+ mariadb -uroot -p{{ database_password }}
--silent --skip-column-names
-e 'SHOW STATUS LIKE "wsrep_local_state_comment"'
changed_when: false
+ check_mode: false
register: check_mariadb_sync_status
no_log: true
diff --git a/ansible/roles/mariadb/tasks/post-deploy.yml b/ansible/roles/mariadb/tasks/post-deploy.yml
new file mode 100644
index 0000000000..c487e00be4
--- /dev/null
+++ b/ansible/roles/mariadb/tasks/post-deploy.yml
@@ -0,0 +1,4 @@
+---
+- import_tasks: register.yml
+
+- import_tasks: check.yml
diff --git a/ansible/roles/mariadb/tasks/post-upgrade.yml b/ansible/roles/mariadb/tasks/post-upgrade.yml
new file mode 100644
index 0000000000..ac878bef6b
--- /dev/null
+++ b/ansible/roles/mariadb/tasks/post-upgrade.yml
@@ -0,0 +1,24 @@
+---
+- name: Run upgrade in MariaDB container
+ vars:
+ service_name: "mariadb"
+ service: "{{ mariadb_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "start_container"
+ common_options: "{{ docker_common_options }}"
+ detach: False
+ dimensions: "{{ service.dimensions }}"
+ environment:
+ KOLLA_UPGRADE:
+ KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
+ DB_HOST: "{{ api_interface_address }}"
+ DB_PORT: "{{ mariadb_port }}"
+ DB_ROOT_PASSWORD: "{{ database_password }}"
+ image: "{{ service.image }}"
+ labels:
+ UPGRADE:
+ name: "upgrade_mariadb"
+ restart_policy: oneshot
+ volumes: "{{ service.volumes }}"
+ no_log: true
diff --git a/ansible/roles/mariadb/tasks/precheck.yml b/ansible/roles/mariadb/tasks/precheck.yml
index 5c84496067..d6337e7de2 100644
--- a/ansible/roles/mariadb/tasks/precheck.yml
+++ b/ansible/roles/mariadb/tasks/precheck.yml
@@ -8,14 +8,17 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- mariadb
+ check_mode: false
register: container_facts
- name: Checking free port for MariaDB
wait_for:
host: "{{ api_interface_address }}"
- port: "{{ database_port }}"
+ port: "{{ mariadb_port }}"
connect_timeout: 1
timeout: 1
state: stopped
diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml
index a2a47a4087..5046e9b01e 100644
--- a/ansible/roles/mariadb/tasks/recover_cluster.yml
+++ b/ansible/roles/mariadb/tasks/recover_cluster.yml
@@ -9,6 +9,7 @@
path: "{{ item }}"
state: absent
delegate_to: localhost
+ connection: local
changed_when: false
check_mode: no
run_once: true
@@ -17,16 +18,17 @@
- block:
- name: Stop MariaDB containers
become: true
- kolla_docker:
+ kolla_container:
name: "{{ mariadb_service.container_name }}"
action: "stop_container"
+ common_options: "{{ docker_common_options }}"
ignore_missing: true
# Run wsrep recovery with detach=false to block until completion. Use a
# different container name to avoid the mariadb container being removed.
- name: Run MariaDB wsrep recovery
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: false
@@ -37,18 +39,18 @@
labels:
BOOTSTRAP:
name: mariadb_wsrep_recovery
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ mariadb_service.volumes }}"
- name: Copying MariaDB log file to /tmp
become: true
- command: "cp {{ docker_runtime_directory or '/var/lib/docker' }}/volumes/kolla_logs/_data/mariadb/mariadb.log /tmp/mariadb_tmp.log"
+ command: "cp {{ container_engine_volumes_path }}/kolla_logs/_data/mariadb/mariadb.log /tmp/mariadb_tmp.log"
# Look for sequence number in logs. Format is:
# WSREP: Recovered position: :.
- name: Get MariaDB wsrep recovery seqno
become: true
- shell: tail -n 200 /tmp/mariadb_tmp.log | awk -F" " '$0~/Recovered position/{print $NF;exit;}' | awk -F":" '{print $2}'
+ shell: awk -F" " '/Recovered position/{seqno=$NF} END{split(seqno, a, ":"); print a[2]}' /tmp/mariadb_tmp.log
register: wsrep_recovery_seqno
- name: Removing MariaDB log file from /tmp
@@ -82,10 +84,12 @@
dest: "{{ mariadb_recover_tmp_file_path }}"
mode: 0644
delegate_to: localhost
+ connection: local
changed_when: false
when: seqno_compare.results | map(attribute='stdout') | join('') == ""
- name: Registering mariadb_recover_inventory_name from temp file
+ connection: local
set_fact:
mariadb_recover_inventory_name: "{{ lookup('file', mariadb_recover_tmp_file_path) }}"
when:
@@ -99,7 +103,7 @@
become: true
lineinfile:
create: yes
- dest: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes/mariadb/_data/grastate.dat"
+ dest: "{{ container_engine_volumes_path }}/mariadb/_data/grastate.dat"
regexp: 'safe_to_bootstrap:(.*)$'
line: 'safe_to_bootstrap: 1'
state: present
@@ -109,7 +113,7 @@
- name: Starting first MariaDB container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -119,7 +123,7 @@
labels:
BOOTSTRAP:
name: "{{ mariadb_service.container_name }}"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ mariadb_service.volumes }}"
when:
- bootstrap_host is defined
@@ -142,7 +146,7 @@
- name: Set first MariaDB container as primary
become: true
- shell: "{{ kolla_container_engine }} exec {{ mariadb_service.container_name }} mysql -uroot -p{{ database_password }} -e \"SET GLOBAL wsrep_provider_options='pc.bootstrap=yes';\""
+ shell: "{{ kolla_container_engine }} exec {{ mariadb_service.container_name }} mariadb -uroot -p{{ database_password }} -e \"SET GLOBAL wsrep_provider_options='pc.bootstrap=yes';\""
no_log: True
when:
- bootstrap_host is defined
@@ -152,7 +156,7 @@
become: true
command: >-
{{ kolla_container_engine }} exec {{ mariadb_service.container_name }}
- mysql -uroot -p{{ database_password }}
+ mariadb -uroot -p{{ database_password }}
--silent --skip-column-names
-e 'SHOW STATUS LIKE "wsrep_evs_state"'
changed_when: false
@@ -167,7 +171,7 @@
- name: Restart slave MariaDB container(s)
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
@@ -176,7 +180,7 @@
dimensions: "{{ item.value.dimensions }}"
environment: "{{ item.value.environment | default({}) }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- with_dict: "{{ mariadb_services }}"
+ with_dict: "{{ mariadb_services | select_services_enabled_and_mapped_to_host }}"
when:
- bootstrap_host is defined
- bootstrap_host != inventory_hostname
@@ -198,7 +202,7 @@
- name: Restart master MariaDB container(s)
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
@@ -207,7 +211,7 @@
dimensions: "{{ item.value.dimensions }}"
environment: "{{ item.value.environment | default({}) }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- with_dict: "{{ mariadb_services }}"
+ with_dict: "{{ mariadb_services | select_services_enabled_and_mapped_to_host }}"
when:
- bootstrap_host is defined
- bootstrap_host == inventory_hostname
diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml
index 504061f3db..71b4107e20 100644
--- a/ansible/roles/mariadb/tasks/register.yml
+++ b/ansible/roles/mariadb/tasks/register.yml
@@ -2,6 +2,7 @@
- name: Creating shard root mysql user
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ api_interface_address }}"
@@ -18,6 +19,7 @@
- name: Creating mysql monitor user
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ api_interface_address }}"
@@ -27,27 +29,14 @@
name: "{{ mariadb_monitor_user }}"
password: "{% if enable_proxysql | bool %}{{ mariadb_monitor_password }}{% endif %}"
host: "%"
- priv: "*.*:USAGE"
+ priv: "*.*:USAGE,REPLICATION CLIENT"
when:
- inventory_hostname == groups[mariadb_shard_group][0]
-- name: Creating the Mariabackup database
- become: true
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ api_interface_address }}"
- login_port: "{{ mariadb_port }}"
- login_user: "{{ mariadb_shard_database_user }}"
- login_password: "{{ database_password }}"
- name: "{{ mariadb_backup_database_schema }}"
- when:
- - enable_mariabackup | bool
- - inventory_hostname == mariadb_backup_host
-
- name: Creating database backup user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ api_interface_address }}"
@@ -57,7 +46,7 @@
name: "{{ mariadb_backup_database_user }}"
password: "{{ mariadb_backup_database_password }}"
host: "%"
- priv: "*.*:CREATE TABLESPACE,RELOAD,PROCESS,SUPER,LOCK TABLES,BINLOG MONITOR"
+ priv: "*.*:CREATE TABLESPACE,RELOAD,PROCESS,SUPER,LOCK TABLES,BINLOG MONITOR/{{ mariadb_backup_database_schema }}.mariadb_backup_history:CREATE"
append_privs: True
when:
- enable_mariabackup | bool
@@ -66,6 +55,7 @@
- name: Granting permissions on Mariabackup database to backup user
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ api_interface_address }}"
diff --git a/ansible/roles/mariadb/tasks/restart_services.yml b/ansible/roles/mariadb/tasks/restart_services.yml
index b80c909f3e..2e82923929 100644
--- a/ansible/roles/mariadb/tasks/restart_services.yml
+++ b/ansible/roles/mariadb/tasks/restart_services.yml
@@ -4,7 +4,7 @@
service_name: "mariadb"
service: "{{ mariadb_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -23,6 +23,7 @@
timeout: 60
search_regex: "MariaDB"
register: check_mariadb_port
+ check_mode: false
until: check_mariadb_port is success
retries: 10
delay: 6
@@ -31,7 +32,8 @@
become: true
command: >-
{{ kolla_container_engine }} exec {{ mariadb_service.container_name }}
- mysql -uroot -p{{ database_password }}
+ mariadb -uroot -p{{ database_password }}
+ -h {{ api_interface_address }} -P {{ mariadb_port }}
--silent --skip-column-names
-e 'SHOW STATUS LIKE "wsrep_local_state_comment"'
changed_when: false
@@ -40,6 +42,7 @@
retries: 10
delay: 6
no_log: true
+ check_mode: false
when:
# NOTE(yoctozepto): we don't want to wait for new nodes to fully sync
# with an existing cluster as this could take time
diff --git a/ansible/roles/mariadb/tasks/upgrade.yml b/ansible/roles/mariadb/tasks/upgrade.yml
index 967d57bdac..5b10a7e111 100644
--- a/ansible/roles/mariadb/tasks/upgrade.yml
+++ b/ansible/roles/mariadb/tasks/upgrade.yml
@@ -1,26 +1,2 @@
---
- import_tasks: deploy.yml
-
-- name: Run upgrade in MariaDB container
- vars:
- service_name: "mariadb"
- service: "{{ mariadb_services[service_name] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- dimensions: "{{ service.dimensions }}"
- environment:
- KOLLA_UPGRADE:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- DB_HOST: "{{ api_interface_address }}"
- DB_PORT: "{{ mariadb_port }}"
- DB_ROOT_PASSWORD: "{{ database_password }}"
- image: "{{ service.image }}"
- labels:
- UPGRADE:
- name: "upgrade_mariadb"
- restart_policy: no
- volumes: "{{ service.volumes }}"
- no_log: true
diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2
index ddf6a360f5..c7e5916fd5 100644
--- a/ansible/roles/mariadb/templates/galera.cnf.j2
+++ b/ansible/roles/mariadb/templates/galera.cnf.j2
@@ -11,7 +11,11 @@ default-character-set=utf8
basedir=/usr
bind-address={{ api_interface_address }}
port={{ mariadb_port }}
-
+{% if database_enable_tls_backend | bool %}
+ssl_ca=/etc/mariadb/certs/root.crt
+ssl_cert=/etc/mariadb/certs/mariadb-cert.pem
+ssl_key=/etc/mariadb/certs/mariadb-key.pem
+{% endif %}
log_error=/var/log/kolla/mariadb/mariadb.log
log_bin=mysql-bin
diff --git a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2 b/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2
index 7e273d8887..aad07bff6a 100644
--- a/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2
+++ b/ansible/roles/mariadb/templates/mariadb-clustercheck.json.j2
@@ -1,5 +1,5 @@
{
- "command": "socat {% if network_address_family == 'ipv6' %}-6{% endif %} -d -lf/var/log/kolla/mariadb/mariadb-clustercheck.log tcp-l:{{ mariadb_clustercheck_port }},fork,reuseaddr,bind={{ api_interface_address }} EXEC:clustercheck",
+ "command": "socat_wrapper {% if network_address_family == 'ipv6' %}-6{% endif %} -d -lf/var/log/kolla/mariadb/mariadb-clustercheck.log tcp-l:{{ mariadb_clustercheck_port }},fork,reuseaddr,bind={{ api_interface_address }} EXEC:clustercheck",
"config_files": [],
"permissions": [
{
diff --git a/ansible/roles/mariadb/templates/mariadb.json.j2 b/ansible/roles/mariadb/templates/mariadb.json.j2
index ac1b5bf27d..7910d69293 100644
--- a/ansible/roles/mariadb/templates/mariadb.json.j2
+++ b/ansible/roles/mariadb/templates/mariadb.json.j2
@@ -1,6 +1,6 @@
{% set mysql_dir = 'mysql' if kolla_base_distro in ['ubuntu', 'debian'] else '' %}
{
- "command": "/usr/bin/mysqld_safe",
+ "command": "/usr/bin/mariadbd-safe",
"config_files": [
{
"source": "{{ container_config_directory }}/galera.cnf",
@@ -8,6 +8,25 @@
"owner": "mysql",
"perm": "0600"
}
+ {% if database_enable_tls_backend | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates/root.crt",
+ "dest": "/etc/mariadb/certs/root.crt",
+ "owner": "mysql",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/mariadb-cert.pem",
+ "dest": "/etc/mariadb/certs/mariadb-cert.pem",
+ "owner": "mysql",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/mariadb-key.pem",
+ "dest": "/etc/mariadb/certs/mariadb-key.pem",
+ "owner": "mysql",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/masakari/defaults/main.yml b/ansible/roles/masakari/defaults/main.yml
index d9bc87a6fe..8925f6b848 100644
--- a/ansible/roles/masakari/defaults/main.yml
+++ b/ansible/roles/masakari/defaults/main.yml
@@ -13,11 +13,14 @@ masakari_services:
mode: "http"
external: false
port: "{{ masakari_api_port }}"
+ listen_port: "{{ masakari_api_listen_port }}"
masakari_api_external:
enabled: "{{ enable_masakari }}"
mode: "http"
external: true
- port: "{{ masakari_api_port }}"
+ external_fqdn: "{{ masakari_external_fqdn }}"
+ port: "{{ masakari_api_public_port }}"
+ listen_port: "{{ masakari_api_listen_port }}"
masakari-engine:
container_name: masakari_engine
group: masakari-engine
@@ -42,6 +45,12 @@ masakari_services:
volumes: "{{ masakari_hostmonitor_default_volumes + masakari_hostmonitor_extra_volumes }}"
dimensions: "{{ masakari_hostmonitor_dimensions }}"
+####################
+# Config Validate
+####################
+masakari_config_validation:
+ - generator: "/masakari/etc/masakari/masakari-config-generator.conf"
+ config: "/etc/masakari/masakari.conf"
####################
# Database
@@ -69,15 +78,15 @@ masakari_database_shard:
####################
masakari_tag: "{{ openstack_tag }}"
-masakari_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/masakari-api"
+masakari_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}masakari-api"
masakari_api_tag: "{{ masakari_tag }}"
masakari_api_image_full: "{{ masakari_api_image }}:{{ masakari_api_tag }}"
-masakari_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/masakari-engine"
+masakari_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}masakari-engine"
masakari_engine_tag: "{{ masakari_tag }}"
masakari_engine_image_full: "{{ masakari_engine_image }}:{{ masakari_engine_tag }}"
-masakari_monitors_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/masakari-monitors"
+masakari_monitors_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}masakari-monitors"
masakari_monitors_tag: "{{ masakari_tag }}"
masakari_monitors_image_full: "{{ masakari_monitors_image }}:{{ masakari_monitors_tag }}"
@@ -99,34 +108,31 @@ masakari_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/masakari/masakari:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/masakari' if masakari_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/masakari:/dev-mode/masakari' if masakari_dev_mode | bool else '' }}"
masakari_engine_default_volumes:
- "{{ node_config_directory }}/masakari-engine/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/masakari/masakari:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/masakari' if masakari_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/masakari:/dev-mode/masakari' if masakari_dev_mode | bool else '' }}"
- "masakari:/var/lib/masakari/"
masakari_instancemonitor_default_volumes:
- "{{ node_config_directory }}/masakari-instancemonitor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/masakari-monitors/masakarimonitors:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/masakarimonitors' if masakari_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/masakari-monitors:/dev-mode/masakari-monitors' if masakari_dev_mode | bool else '' }}"
masakari_hostmonitor_default_volumes:
- "{{ node_config_directory }}/masakari-hostmonitor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/masakari-monitors/masakarimonitors:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/masakarimonitors' if masakari_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/masakari-monitors:/dev-mode/masakari-monitors' if masakari_dev_mode | bool else '' }}"
####################
# OpenStack
####################
-masakari_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ masakari_api_port }}"
-masakari_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ masakari_api_port }}"
-
masakari_logging_debug: "{{ openstack_logging_debug }}"
masakari_keystone_user: "masakari"
diff --git a/ansible/roles/masakari/handlers/main.yml b/ansible/roles/masakari/handlers/main.yml
index b78bc4b933..948e35c588 100644
--- a/ansible/roles/masakari/handlers/main.yml
+++ b/ansible/roles/masakari/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "masakari-api"
service: "{{ masakari_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart masakari-engine container
vars:
service_name: "masakari-engine"
service: "{{ masakari_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart masakari-instancemonitor container
vars:
service_name: "masakari-instancemonitor"
service: "{{ masakari_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,15 +40,13 @@
privileged: "{{ service.privileged | default(True) }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart masakari-hostmonitor container
vars:
service_name: "masakari-hostmonitor"
service: "{{ masakari_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -60,5 +54,3 @@
ipc_mode: "{{ service.ipc_mode }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/masakari/tasks/bootstrap.yml b/ansible/roles/masakari/tasks/bootstrap.yml
index 518b4bf3cc..9e74a4e99f 100644
--- a/ansible/roles/masakari/tasks/bootstrap.yml
+++ b/ansible/roles/masakari/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Masakari database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Masakari database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/masakari/tasks/bootstrap_service.yml b/ansible/roles/masakari/tasks/bootstrap_service.yml
index c5782dc184..62f02128cf 100644
--- a/ansible/roles/masakari/tasks/bootstrap_service.yml
+++ b/ansible/roles/masakari/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
masakari_api: "{{ masakari_services['masakari-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_masakari"
- restart_policy: "no"
+ restart_policy: "oneshot"
volumes: "{{ masakari_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[masakari_api.group][0] }}"
diff --git a/ansible/roles/masakari/tasks/check-containers.yml b/ansible/roles/masakari/tasks/check-containers.yml
index 027f2d3318..b7e2f7c29f 100644
--- a/ansible/roles/masakari/tasks/check-containers.yml
+++ b/ansible/roles/masakari/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check masakari containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- ipc_mode: "{{ item.value.ipc_mode | default(omit) }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ masakari_services }}"
- notify:
- - Restart {{ item.key }} container
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/masakari/tasks/config.yml b/ansible/roles/masakari/tasks/config.yml
index e3a10d788b..5263dac56b 100644
--- a/ansible/roles/masakari/tasks/config.yml
+++ b/ansible/roles/masakari/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ masakari_services }}"
+ with_dict: "{{ masakari_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -34,16 +31,12 @@
- name: Copying over existing policy file
template:
src: "{{ masakari_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item }}/{{ masakari_policy_file }}"
+ dest: "{{ node_config_directory }}/{{ item.key }}/{{ masakari_policy_file }}"
mode: "0660"
become: true
when:
- masakari_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ masakari_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ masakari_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
template:
@@ -51,12 +44,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ masakari_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ masakari_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
@@ -76,14 +64,10 @@
dest: "{{ node_config_directory }}/{{ service_name }}/masakari.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- masakari-api
- masakari-engine
- notify:
- - Restart {{ service_name }} container
- name: Copying over masakari-monitors.conf
vars:
@@ -99,14 +83,10 @@
dest: "{{ node_config_directory }}/{{ service_name }}/masakari-monitors.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- masakari-instancemonitor
- masakari-hostmonitor
- notify:
- - Restart {{ service_name }} container
- name: Copying over wsgi-masakari file for services
vars:
@@ -116,11 +96,7 @@
dest: "{{ node_config_directory }}/masakari-api/wsgi-masakari.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart masakari-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over masakari-api-paste.ini
vars:
@@ -132,11 +108,7 @@
dest: "{{ node_config_directory }}/masakari-api/masakari-api-paste.ini"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart masakari-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over libvirt SASL configuration
become: true
@@ -149,9 +121,6 @@
mode: "0660"
when:
- libvirt_enable_sasl | bool
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
with_items:
- { src: "auth.conf.j2", dest: "auth.conf", service: "masakari-instancemonitor" }
- notify:
- - Restart {{ service_name }} container
diff --git a/ansible/roles/masakari/tasks/config_validate.yml b/ansible/roles/masakari/tasks/config_validate.yml
new file mode 100644
index 0000000000..de3be4b384
--- /dev/null
+++ b/ansible/roles/masakari/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ masakari_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ masakari_config_validation }}"
diff --git a/ansible/roles/masakari/tasks/precheck.yml b/ansible/roles/masakari/tasks/precheck.yml
index 07b99314c0..71d9cca21a 100644
--- a/ansible/roles/masakari/tasks/precheck.yml
+++ b/ansible/roles/masakari/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- masakari_api
+ check_mode: false
register: container_facts
- name: Checking free port for Masakari API
diff --git a/ansible/roles/masakari/templates/masakari-api.json.j2 b/ansible/roles/masakari/templates/masakari-api.json.j2
index 51805120f0..f911d38cba 100644
--- a/ansible/roles/masakari/templates/masakari-api.json.j2
+++ b/ansible/roles/masakari/templates/masakari-api.json.j2
@@ -26,6 +26,12 @@
"dest": "/etc/masakari/{{ masakari_policy_file }}",
"owner": "masakari",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/masakari/templates/masakari-engine.json.j2 b/ansible/roles/masakari/templates/masakari-engine.json.j2
index acee59ab57..681b3bf9a7 100644
--- a/ansible/roles/masakari/templates/masakari-engine.json.j2
+++ b/ansible/roles/masakari/templates/masakari-engine.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/masakari/{{ masakari_policy_file }}",
"owner": "masakari",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/masakari/templates/masakari-hostmonitor.json.j2 b/ansible/roles/masakari/templates/masakari-hostmonitor.json.j2
index 15dff56a4a..f2c6015b47 100644
--- a/ansible/roles/masakari/templates/masakari-hostmonitor.json.j2
+++ b/ansible/roles/masakari/templates/masakari-hostmonitor.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/masakari-monitors/masakari-monitors.conf",
"owner": "masakari",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/masakari/templates/masakari-instancemonitor.json.j2 b/ansible/roles/masakari/templates/masakari-instancemonitor.json.j2
index 2197bc0a0e..d4be66f59d 100644
--- a/ansible/roles/masakari/templates/masakari-instancemonitor.json.j2
+++ b/ansible/roles/masakari/templates/masakari-instancemonitor.json.j2
@@ -12,6 +12,12 @@
"dest": "/var/lib/masakari/.config/libvirt/auth.conf",
"owner": "masakari",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/masakari/templates/masakari.conf.j2 b/ansible/roles/masakari/templates/masakari.conf.j2
index 6575690348..10c4ac072c 100644
--- a/ansible/roles/masakari/templates/masakari.conf.j2
+++ b/ansible/roles/masakari/templates/masakari.conf.j2
@@ -36,7 +36,7 @@ region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}
{% if enable_memcached | bool %}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
{% endif %}
@@ -50,11 +50,18 @@ topics = notifications
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'masakari-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
@@ -66,3 +73,14 @@ policy_file = {{ masakari_policy_file }}
[taskflow]
connection = mysql+pymysql://{{ masakari_database_user }}:{{ masakari_database_password }}@{{ masakari_database_address }}/{{ masakari_database_name }}
+
+{% if service_name == 'masakari-api' %}
+[coordination]
+{% if masakari_coordination_backend == 'redis' %}
+backend_url = {{ redis_connection_string }}
+{% elif masakari_coordination_backend == 'etcd' %}
+# NOTE(jan.gutter): etcd v3.4 removed the default `v3alpha` api_version. Until
+# tooz defaults to a newer version, we should explicitly specify `v3`
+backend_url = etcd3+{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ etcd_client_port }}?api_version=v3{% if openstack_cacert %}?ca_cert={{ openstack_cacert }}{% endif %}
+{% endif %}
+{% endif %}
diff --git a/ansible/roles/memcached/defaults/main.yml b/ansible/roles/memcached/defaults/main.yml
index fe0674dfbf..67cd1a33cc 100644
--- a/ansible/roles/memcached/defaults/main.yml
+++ b/ansible/roles/memcached/defaults/main.yml
@@ -24,7 +24,7 @@ memcached_services:
####################
# Docker
####################
-memcached_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/memcached"
+memcached_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}memcached"
memcached_tag: "{{ openstack_tag }}"
memcached_image_full: "{{ memcached_image }}:{{ memcached_tag }}"
memcached_dimensions: "{{ default_container_dimensions }}"
diff --git a/ansible/roles/memcached/handlers/main.yml b/ansible/roles/memcached/handlers/main.yml
index 4bdfe525e0..cd4ad34bf2 100644
--- a/ansible/roles/memcached/handlers/main.yml
+++ b/ansible/roles/memcached/handlers/main.yml
@@ -3,7 +3,7 @@
vars:
service: "{{ memcached_services.memcached }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
image: "{{ service.image }}"
@@ -11,5 +11,3 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/memcached/tasks/check-containers.yml b/ansible/roles/memcached/tasks/check-containers.yml
index d74e92991c..b7e2f7c29f 100644
--- a/ansible/roles/memcached/tasks/check-containers.yml
+++ b/ansible/roles/memcached/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check memcached container
- vars:
- service: "{{ memcached_services.memcached }}"
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify: Restart memcached container
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/memcached/tasks/config.yml b/ansible/roles/memcached/tasks/config.yml
index 20edb14162..74be41b958 100644
--- a/ansible/roles/memcached/tasks/config.yml
+++ b/ansible/roles/memcached/tasks/config.yml
@@ -20,7 +20,4 @@
become: true
with_items:
- "memcached"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify: Restart memcached container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/vitrage/tasks/check.yml b/ansible/roles/memcached/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/vitrage/tasks/check.yml
rename to ansible/roles/memcached/tasks/config_validate.yml
diff --git a/ansible/roles/memcached/tasks/precheck.yml b/ansible/roles/memcached/tasks/precheck.yml
index ac8945c014..c5ab94ce02 100644
--- a/ansible/roles/memcached/tasks/precheck.yml
+++ b/ansible/roles/memcached/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- memcached
+ check_mode: false
register: container_facts
- name: Checking free port for Memcached
diff --git a/ansible/roles/mistral/defaults/main.yml b/ansible/roles/mistral/defaults/main.yml
index b00acd8da3..beb9a85fe0 100644
--- a/ansible/roles/mistral/defaults/main.yml
+++ b/ansible/roles/mistral/defaults/main.yml
@@ -14,11 +14,14 @@ mistral_services:
mode: "http"
external: false
port: "{{ mistral_api_port }}"
+ listen_port: "{{ mistral_api_listen_port }}"
mistral_api_external:
enabled: "{{ enable_mistral }}"
mode: "http"
external: true
- port: "{{ mistral_api_port }}"
+ external_fqdn: "{{ mistral_external_fqdn }}"
+ port: "{{ mistral_api_public_port }}"
+ listen_port: "{{ mistral_api_listen_port }}"
mistral-engine:
container_name: mistral_engine
group: mistral-engine
@@ -44,6 +47,12 @@ mistral_services:
dimensions: "{{ mistral_executor_dimensions }}"
healthcheck: "{{ mistral_executor_healthcheck }}"
+####################
+# Config Validate
+####################
+mistral_config_validation:
+ - generator: "/mistral/tools/config/config-generator.mistral.conf"
+ config: "/etc/mistral/mistral.conf"
####################
# Database
@@ -71,19 +80,19 @@ mistral_database_shard:
####################
mistral_tag: "{{ openstack_tag }}"
-mistral_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/mistral-engine"
+mistral_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}mistral-engine"
mistral_engine_tag: "{{ mistral_tag }}"
mistral_engine_image_full: "{{ mistral_engine_image }}:{{ mistral_engine_tag }}"
-mistral_event_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/mistral-event-engine"
+mistral_event_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}mistral-event-engine"
mistral_event_engine_tag: "{{ mistral_tag }}"
mistral_event_engine_image_full: "{{ mistral_event_engine_image }}:{{ mistral_event_engine_tag }}"
-mistral_executor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/mistral-executor"
+mistral_executor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}mistral-executor"
mistral_executor_tag: "{{ mistral_tag }}"
mistral_executor_image_full: "{{ mistral_executor_image }}:{{ mistral_executor_tag }}"
-mistral_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/mistral-api"
+mistral_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}mistral-api"
mistral_api_tag: "{{ mistral_tag }}"
mistral_api_image_full: "{{ mistral_api_image }}:{{ mistral_api_tag }}"
@@ -149,25 +158,25 @@ mistral_engine_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/mistral/mistral:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/mistral' if mistral_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/mistral:/dev-mode/mistral' if mistral_dev_mode | bool else '' }}"
mistral_event_engine_default_volumes:
- "{{ node_config_directory }}/mistral-event-engine/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/mistral/mistral:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/mistral' if mistral_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/mistral:/dev-mode/mistral' if mistral_dev_mode | bool else '' }}"
mistral_executor_default_volumes:
- "{{ node_config_directory }}/mistral-executor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/mistral/mistral:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/mistral' if mistral_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/mistral:/dev-mode/mistral' if mistral_dev_mode | bool else '' }}"
mistral_api_default_volumes:
- "{{ node_config_directory }}/mistral-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/mistral/mistral:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/mistral' if mistral_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/mistral:/dev-mode/mistral' if mistral_dev_mode | bool else '' }}"
mistral_extra_volumes: "{{ default_extra_volumes }}"
mistral_engine_extra_volumes: "{{ mistral_extra_volumes }}"
@@ -178,10 +187,8 @@ mistral_api_extra_volumes: "{{ mistral_extra_volumes }}"
####################
# OpenStack
####################
-mistral_internal_base_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ mistral_api_port }}"
-
mistral_internal_endpoint: "{{ mistral_internal_base_endpoint }}/v2"
-mistral_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ mistral_api_port }}/v2"
+mistral_public_endpoint: "{{ mistral_public_base_endpoint }}/v2"
mistral_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/mistral/handlers/main.yml b/ansible/roles/mistral/handlers/main.yml
index 60a3852c5d..58f67e0732 100644
--- a/ansible/roles/mistral/handlers/main.yml
+++ b/ansible/roles/mistral/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "mistral-api"
service: "{{ mistral_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart mistral-engine container
vars:
service_name: "mistral-engine"
service: "{{ mistral_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart mistral-event-engine container
vars:
service_name: "mistral-event-engine"
service: "{{ mistral_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,15 +40,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart mistral-executor container
vars:
service_name: "mistral-executor"
service: "{{ mistral_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -60,5 +54,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/mistral/tasks/bootstrap.yml b/ansible/roles/mistral/tasks/bootstrap.yml
index d553572ff5..da3d5bc736 100644
--- a/ansible/roles/mistral/tasks/bootstrap.yml
+++ b/ansible/roles/mistral/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Mistral database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Mistral database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/mistral/tasks/bootstrap_service.yml b/ansible/roles/mistral/tasks/bootstrap_service.yml
index f37ae7fca0..3fb1f2bb23 100644
--- a/ansible/roles/mistral/tasks/bootstrap_service.yml
+++ b/ansible/roles/mistral/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
mistral_api: "{{ mistral_services['mistral-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_mistral"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ mistral_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[mistral_api.group][0] }}"
diff --git a/ansible/roles/mistral/tasks/check-containers.yml b/ansible/roles/mistral/tasks/check-containers.yml
index adf9eb8cd2..b7e2f7c29f 100644
--- a/ansible/roles/mistral/tasks/check-containers.yml
+++ b/ansible/roles/mistral/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check mistral containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ mistral_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/mistral/tasks/config.yml b/ansible/roles/mistral/tasks/config.yml
index 3f30bb675a..66a89fad3f 100644
--- a/ansible/roles/mistral/tasks/config.yml
+++ b/ansible/roles/mistral/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ mistral_services }}"
+ with_dict: "{{ mistral_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -41,12 +38,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ mistral_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ mistral_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over mistral.conf
vars:
@@ -61,12 +53,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/mistral.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ mistral_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ mistral_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -76,11 +63,7 @@
become: true
when:
- mistral_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ mistral_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ mistral_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over event_definitions.yaml
become: true
@@ -90,12 +73,8 @@
src: "{{ item }}"
dest: "{{ node_config_directory }}/mistral-event-engine/event_definitions.yaml"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/mistral/{{ inventory_hostname }}/event_definitions.yaml"
- "{{ node_custom_config }}/mistral/event_definitions.yaml"
- "{{ role_path }}/templates/event_definitions.yaml.j2"
- notify:
- - "Restart mistral-event-engine container"
diff --git a/ansible/roles/mistral/tasks/config_validate.yml b/ansible/roles/mistral/tasks/config_validate.yml
new file mode 100644
index 0000000000..91fad6d18f
--- /dev/null
+++ b/ansible/roles/mistral/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ mistral_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ mistral_config_validation }}"
diff --git a/ansible/roles/mistral/tasks/precheck.yml b/ansible/roles/mistral/tasks/precheck.yml
index c6c00efafc..307b4376eb 100644
--- a/ansible/roles/mistral/tasks/precheck.yml
+++ b/ansible/roles/mistral/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- mistral_api
+ check_mode: false
register: container_facts
- name: Checking free port for Mistral API
diff --git a/ansible/roles/mistral/templates/mistral-api.json.j2 b/ansible/roles/mistral/templates/mistral-api.json.j2
index 34f2406d5d..c92e8c47cb 100644
--- a/ansible/roles/mistral/templates/mistral-api.json.j2
+++ b/ansible/roles/mistral/templates/mistral-api.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/mistral/{{ mistral_policy_file }}",
"owner": "mistral",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/mistral/templates/mistral-engine.json.j2 b/ansible/roles/mistral/templates/mistral-engine.json.j2
index 358b8e15e0..bab45348fc 100644
--- a/ansible/roles/mistral/templates/mistral-engine.json.j2
+++ b/ansible/roles/mistral/templates/mistral-engine.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/mistral/{{ mistral_policy_file }}",
"owner": "mistral",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/mistral/templates/mistral-event-engine.json.j2 b/ansible/roles/mistral/templates/mistral-event-engine.json.j2
index 6d45c2966f..67e8dc516c 100644
--- a/ansible/roles/mistral/templates/mistral-event-engine.json.j2
+++ b/ansible/roles/mistral/templates/mistral-event-engine.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/mistral/{{ mistral_policy_file }}",
"owner": "mistral",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/mistral/templates/mistral-executor.json.j2 b/ansible/roles/mistral/templates/mistral-executor.json.j2
index 53c792ed32..e409bdab6d 100644
--- a/ansible/roles/mistral/templates/mistral-executor.json.j2
+++ b/ansible/roles/mistral/templates/mistral-executor.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/mistral/{{ mistral_policy_file }}",
"owner": "mistral",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/mistral/templates/mistral.conf.j2 b/ansible/roles/mistral/templates/mistral.conf.j2
index 45d39e38e6..84f86d7ffc 100644
--- a/ansible/roles/mistral/templates/mistral.conf.j2
+++ b/ansible/roles/mistral/templates/mistral.conf.j2
@@ -51,7 +51,7 @@ password = {{ mistral_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -72,11 +72,18 @@ topics = {{ mistral_enabled_notification_topics | map(attribute='name') | join('
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if mistral_policy_file is defined %}
[oslo_policy]
diff --git a/ansible/roles/monasca/defaults/main.yml b/ansible/roles/monasca/defaults/main.yml
deleted file mode 100644
index 76a6133e6f..0000000000
--- a/ansible/roles/monasca/defaults/main.yml
+++ /dev/null
@@ -1,367 +0,0 @@
----
-monasca_services:
- monasca-api:
- container_name: monasca_api
- group: monasca-api
- enabled: true
- image: "{{ monasca_api_image_full }}"
- volumes: "{{ monasca_api_default_volumes + monasca_api_extra_volumes }}"
- dimensions: "{{ monasca_api_dimensions }}"
- haproxy:
- monasca_api:
- enabled: "{{ enable_monasca }}"
- mode: "http"
- external: false
- port: "{{ monasca_api_port }}"
- monasca_api_external:
- enabled: "{{ enable_monasca }}"
- mode: "http"
- external: true
- port: "{{ monasca_api_port }}"
- monasca-log-persister:
- container_name: monasca_log_persister
- group: monasca-log-persister
- enabled: true
- image: "{{ monasca_logstash_image_full }}"
- volumes: "{{ monasca_log_persister_default_volumes + monasca_log_persister_extra_volumes }}"
- dimensions: "{{ monasca_log_persister_dimensions }}"
- # TODO(dszumski): We can remove log-metrics and all other references to it after
- # the Xena release. This is used for cleaning up the service.
- monasca-log-metrics:
- container_name: monasca_log_metrics
- group: monasca-log-metrics
- enabled: false
- image: "{{ monasca_logstash_image_full }}"
- volumes: "{{ monasca_log_metrics_default_volumes + monasca_log_metrics_extra_volumes }}"
- dimensions: "{{ monasca_log_metrics_dimensions }}"
- monasca-thresh:
- container_name: monasca_thresh
- group: monasca-thresh
- enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- image: "{{ monasca_thresh_image_full }}"
- volumes: "{{ monasca_thresh_default_volumes + monasca_thresh_extra_volumes }}"
- dimensions: "{{ monasca_thresh_dimensions }}"
- state: "exited"
- monasca-notification:
- container_name: monasca_notification
- group: monasca-notification
- enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- image: "{{ monasca_notification_image_full }}"
- volumes: "{{ monasca_notification_default_volumes + monasca_notification_extra_volumes }}"
- dimensions: "{{ monasca_notification_dimensions }}"
- monasca-persister:
- container_name: monasca_persister
- group: monasca-persister
- enabled: true
- image: "{{ monasca_persister_image_full }}"
- volumes: "{{ monasca_persister_default_volumes + monasca_persister_extra_volumes }}"
- dimensions: "{{ monasca_persister_dimensions }}"
- monasca-agent-collector:
- container_name: monasca_agent_collector
- group: monasca-agent-collector
- enabled: true
- image: "{{ monasca_agent_image_full }}"
- pid_mode: "host"
- volumes: "{{ monasca_agent_collector_default_volumes + monasca_agent_collector_extra_volumes }}"
- dimensions: "{{ monasca_agent_dimensions }}"
- monasca-agent-statsd:
- container_name: monasca_agent_statsd
- group: monasca-agent-statsd
- enabled: true
- image: "{{ monasca_agent_image_full }}"
- volumes: "{{ monasca_agent_statsd_default_volumes + monasca_agent_statsd_extra_volumes }}"
- dimensions: "{{ monasca_agent_dimensions }}"
- monasca-agent-forwarder:
- container_name: monasca_agent_forwarder
- group: monasca-agent-forwarder
- enabled: true
- image: "{{ monasca_agent_image_full }}"
- volumes: "{{ monasca_agent_forwarder_default_volumes + monasca_agent_forwarder_extra_volumes }}"
- dimensions: "{{ monasca_agent_dimensions }}"
-
-####################
-# Databases
-####################
-monasca_database_name: "monasca"
-monasca_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}monasca{% endif %}"
-monasca_database_address: "{{ database_address }}"
-monasca_database_port: "{{ database_port }}"
-
-monasca_influxdb_name: "monasca"
-monasca_influxdb_address: "{{ influxdb_address }}"
-monasca_influxdb_http_port: "{{ influxdb_http_port }}"
-monasca_influxdb_retention_policy:
- name: 'monasca_metrics'
- duration: "1w"
- replication_count: 1
-
-####################
-# Database sharding
-####################
-monasca_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ monasca_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-monasca_database_shard:
- users:
- - user: "{{ monasca_database_user }}"
- password: "{{ monasca_database_password }}"
- rules:
- - schema: "{{ monasca_database_name }}"
- shard_id: "{{ monasca_database_shard_id }}"
- - schema: "{{ monasca_grafana_database_name }}"
- shard_id: "{{ monasca_database_shard_id }}"
-
-
-####################
-# Monasca
-####################
-
-monasca_kafka_servers: "{% for host in groups['kafka'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ kafka_port }}{% if not loop.last %},{% endif %}{% endfor %}"
-monasca_zookeeper_servers: "{% for host in groups['zookeeper'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ zookeeper_client_port }}{% if not loop.last %},{% endif %}{% endfor %}"
-monasca_memcached_servers: "{% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}"
-monasca_elasticsearch_servers: "{% for host in groups['elasticsearch'] %}'{{ internal_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ elasticsearch_port }}'{% if not loop.last %},{% endif %}{% endfor %}"
-monasca_storm_nimbus_servers: "{% for host in groups['storm-nimbus'] %}'{{ 'api' | kolla_address(host) }}'{% if not loop.last %},{% endif %}{% endfor %}"
-# NOTE(dszumski): Only one NTP server is currently supported by the Monasca Agent plugin
-monasca_ntp_server: "0.pool.ntp.org"
-
-# The default number of Kafka topic partitions. This effectively limits
-# the maximum number of workers per topic, counted over all nodes in the
-# Monasca deployment. For example, if you have a 3 node Monasca
-# deployment, you will by default have 3 instances of Monasca Persister,
-# with each instance having 2 workers by default for the metrics topic.
-# In this case, each worker on the metrics topic will be assigned 5
-# partitions of the metrics topic. If you increase the worker or instance
-# count, you may need to increase the partition count to ensure that all
-# workers can get a share of the work.
-monasca_default_topic_partitions: 30
-
-# The default number of topic replicas. Generally you should not change
-# this.
-monasca_default_topic_replication_factor: "{{ kafka_broker_count if kafka_broker_count | int < 3 else 3 }}"
-
-# Kafka topic names used by Monasca services
-monasca_metrics_topic: "metrics"
-monasca_raw_logs_topic: "logs"
-monasca_transformed_logs_topic: "transformed-logs"
-monasca_events_topic: "events"
-monasca_alarm_state_transitions_topic: "alarm-state-transitions"
-monasca_alarm_notifications_topic: "alarm-notifications"
-monasca_alarm_notifications_retry_topic: "retry-notifications"
-monasca_periodic_notifications_topic: "60-seconds-notifications"
-
-# Kafka topic configuration. Most users will not need to modify these
-# settings, however for deployments where resources are tightly
-# constrained, or very large deployments where there are many parallel
-# workers, it is worth considering changing them. Note that if you do
-# change these settings, then you will need to manually remove each
-# topic from the Kafka deployment for the change to take effect when
-# the Monasca service is reconfigured.
-monasca_all_topics:
- - name: "{{ monasca_metrics_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: True
- - name: "{{ monasca_raw_logs_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: True
- - name: "{{ monasca_transformed_logs_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: False
- - name: "{{ monasca_events_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- - name: "{{ monasca_alarm_state_transitions_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- - name: "{{ monasca_alarm_notifications_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- - name: "{{ monasca_alarm_notifications_retry_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
- - name: "{{ monasca_periodic_notifications_topic }}"
- partitions: "{{ monasca_default_topic_partitions }}"
- replication_factor: "{{ monasca_default_topic_replication_factor }}"
- enabled: "{{ monasca_enable_alerting_pipeline | bool }}"
-
-# NOTE(dszumski): Due to the way monasca-notification is currently
-# implemented it is not recommended to change this period.
-monasca_periodic_notifications_period: 60
-
-# Agent settings
-monasca_agent_max_buffer_size: 1000
-monasca_agent_backlog_send_rate: 1000
-monasca_agent_max_batch_size: 1000
-monasca_agent_check_frequency: 30
-
-# Processing pipeline threads. In a large scale deployment you will likely
-# want to tune these with finer precision. For example, if you have a very
-# high log throughput, the log metrics service consumer may require a
-# higher thread count than the producer. You will also want to ensure that
-# the total number of threads across all instances of a service does not
-# exceed the Kafka topic partition count.
-monasca_log_pipeline_threads: 2
-monasca_metric_pipeline_threads: 2
-
-####################
-# Docker
-####################
-
-# NOTE(dszumski): Binary support for Monasca images is not yet available in Kolla
-monasca_tag: "{{ openstack_tag }}"
-
-monasca_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/monasca-agent"
-monasca_agent_tag: "{{ monasca_tag }}"
-monasca_agent_image_full: "{{ monasca_agent_image }}:{{ monasca_agent_tag }}"
-
-monasca_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/monasca-api"
-monasca_api_tag: "{{ monasca_tag }}"
-monasca_api_image_full: "{{ monasca_api_image }}:{{ monasca_api_tag }}"
-
-monasca_logstash_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/logstash"
-monasca_logstash_tag: "{{ monasca_tag }}"
-monasca_logstash_image_full: "{{ monasca_logstash_image }}:{{ monasca_logstash_tag }}"
-
-monasca_thresh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/monasca-thresh"
-monasca_thresh_tag: "{{ monasca_tag }}"
-monasca_thresh_image_full: "{{ monasca_thresh_image }}:{{ monasca_thresh_tag }}"
-
-monasca_notification_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/monasca-notification"
-monasca_notification_tag: "{{ monasca_tag }}"
-monasca_notification_image_full: "{{ monasca_notification_image }}:{{ monasca_notification_tag }}"
-
-monasca_persister_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/monasca-persister"
-monasca_persister_tag: "{{ monasca_tag }}"
-monasca_persister_image_full: "{{ monasca_persister_image }}:{{ monasca_persister_tag }}"
-
-monasca_agent_dimensions: "{{ default_container_dimensions }}"
-monasca_api_dimensions: "{{ default_container_dimensions }}"
-monasca_log_api_dimensions: "{{ default_container_dimensions }}"
-monasca_log_persister_dimensions: "{{ default_container_dimensions }}"
-monasca_log_metrics_dimensions: "{{ default_container_dimensions }}"
-monasca_thresh_dimensions: "{{ default_container_dimensions }}"
-monasca_notification_dimensions: "{{ default_container_dimensions }}"
-monasca_persister_dimensions: "{{ default_container_dimensions }}"
-
-monasca_agent_collector_default_volumes:
- - "{{ node_config_directory }}/monasca-agent-collector/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
- - "/sys:/sys:ro"
- - "/dev/disk/:/dev/disk:ro"
-monasca_agent_statsd_default_volumes:
- - "{{ node_config_directory }}/monasca-agent-statsd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
-monasca_agent_forwarder_default_volumes:
- - "{{ node_config_directory }}/monasca-agent-forwarder/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
-monasca_api_default_volumes:
- - "{{ node_config_directory }}/monasca-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
-monasca_log_persister_default_volumes:
- - "{{ node_config_directory }}/monasca-log-persister/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
- - "monasca_log_persister_data:/var/lib/logstash"
-monasca_log_metrics_default_volumes:
- - "{{ node_config_directory }}/monasca-log-metrics/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
- - "monasca_log_metrics_data:/var/lib/logstash"
-monasca_thresh_default_volumes:
- - "{{ node_config_directory }}/monasca-thresh/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "monasca_thresh:/var/lib/monasca-thresh/"
- - "kolla_logs:/var/log/kolla"
-monasca_notification_default_volumes:
- - "{{ node_config_directory }}/monasca-notification/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
-monasca_persister_default_volumes:
- - "{{ node_config_directory }}/monasca-persister/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla"
-
-monasca_extra_volumes: "{{ default_extra_volumes }}"
-monasca_agent_collector_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_agent_statsd_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_agent_forwarder_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_api_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_log_persister_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_log_metrics_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_thresh_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_notification_extra_volumes: "{{ monasca_extra_volumes }}"
-monasca_persister_extra_volumes: "{{ monasca_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-monasca_openstack_auth: "{{ openstack_auth }}"
-
-monasca_keystone_user: "monasca"
-monasca_default_authorized_roles:
- - admin
-monasca_read_only_authorized_roles:
- - monasca-read-only-user
-# NOTE(dszumski): The first role in this list is assigned to the monasca-agent
-# user for monitoring the OpenStack deployment.
-monasca_agent_authorized_roles:
- - agent
-monasca_delegate_authorized_roles:
- - admin
-
-monasca_api_internal_endpoint: "{{ monasca_api_internal_base_endpoint }}/v2.0"
-monasca_api_public_endpoint: "{{ monasca_api_public_base_endpoint }}/v2.0"
-
-monasca_logging_debug: "{{ openstack_logging_debug }}"
-
-monasca_api_workers: "{{ openstack_service_workers }}"
-
-####################
-# Keystone
-####################
-monasca_ks_services:
- - name: "monasca-api"
- type: "monitoring"
- description: "Monasca monitoring as a service"
- endpoints:
- - {'interface': 'internal', 'url': '{{ monasca_api_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ monasca_api_public_endpoint }}'}
- - name: "monasca-log-api"
- type: "logging"
- description: "Monasca logging as a service"
- endpoints:
- - {'interface': 'internal', 'url': '{{ monasca_log_api_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ monasca_log_api_public_endpoint }}'}
-
-monasca_ks_users:
- - project: "service"
- user: "{{ monasca_keystone_user }}"
- password: "{{ monasca_keystone_password }}"
- role: "admin"
- - project: "{{ monasca_control_plane_project }}"
- user: "{{ monasca_agent_user }}"
- password: "{{ monasca_agent_password }}"
- role: "{{ monasca_agent_authorized_roles | first }}"
-
-monasca_ks_roles:
- - "{{ monasca_default_authorized_roles }}"
- - "{{ monasca_agent_authorized_roles }}"
- - "{{ monasca_read_only_authorized_roles }}"
- - "{{ monasca_delegate_authorized_roles }}"
diff --git a/ansible/roles/monasca/handlers/main.yml b/ansible/roles/monasca/handlers/main.yml
deleted file mode 100644
index 41d67af5a5..0000000000
--- a/ansible/roles/monasca/handlers/main.yml
+++ /dev/null
@@ -1,148 +0,0 @@
----
-- name: Restart monasca-api container
- vars:
- service_name: "monasca-api"
- service: "{{ monasca_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart monasca-log-persister container
- vars:
- service_name: "monasca-log-persister"
- service: "{{ monasca_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart monasca-thresh container
- vars:
- service: "{{ monasca_services['monasca-thresh'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- detach: False
- remove_on_exit: false
- restart_policy: no
- environment:
- KOLLA_BOOTSTRAP:
- run_once: True
- delegate_to: "{{ groups[service.group] | first }}"
- when:
- - kolla_action != "config"
-
-- name: Resubmitting monasca-thresh topology
- vars:
- service: "{{ monasca_services['monasca-thresh'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "resubmit_{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- detach: False
- restart_policy: no
- environment:
- KOLLA_BOOTSTRAP:
- TOPOLOGY_REPLACE:
- run_once: True
- delegate_to: "{{ groups[service.group] | first }}"
- when:
- - kolla_action != "config"
-
-- name: Restart monasca-notification container
- vars:
- service_name: "monasca-notification"
- service: "{{ monasca_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart monasca-persister container
- vars:
- service_name: "monasca-persister"
- service: "{{ monasca_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart monasca-agent-collector container
- vars:
- service_name: "monasca-agent-collector"
- service: "{{ monasca_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- pid_mode: "{{ service.pid_mode }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart monasca-agent-forwarder container
- vars:
- service_name: "monasca-agent-forwarder"
- service: "{{ monasca_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart monasca-agent-statsd container
- vars:
- service_name: "monasca-agent-statsd"
- service: "{{ monasca_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/monasca/tasks/bootstrap.yml b/ansible/roles/monasca/tasks/bootstrap.yml
deleted file mode 100644
index 402c5b0201..0000000000
--- a/ansible/roles/monasca/tasks/bootstrap.yml
+++ /dev/null
@@ -1,106 +0,0 @@
----
-- name: Creating monasca databases
- become: true
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ monasca_database_address }}"
- login_port: "{{ monasca_database_port }}"
- login_user: "{{ monasca_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ item }}"
- run_once: True
- delegate_to: "{{ groups['monasca-api'][0] }}"
- with_items:
- - "{{ monasca_database_name }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating monasca database user and setting permissions
- become: true
- kolla_toolbox:
- module_name: mysql_user
- module_args:
- login_host: "{{ monasca_database_address }}"
- login_port: "{{ monasca_database_port }}"
- login_user: "{{ monasca_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ monasca_database_user }}"
- password: "{{ monasca_database_password }}"
- host: "%"
- priv: "{{ monasca_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: True
- delegate_to: "{{ groups['monasca-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- import_tasks: bootstrap_service.yml
-
-# NOTE(dszumski): Monasca is not yet compatible with InfluxDB > 1.1.10, which means
-# that the official Ansible modules for managing InfluxDB don't work [1].
-# We therefore fall back to manual commands to register the database
-# and set a default retention policy.
-# [1] https://github.com/influxdata/influxdb-python#influxdb-pre-v110-users
-- name: List influxdb databases
- become: true
- command: "{{ kolla_container_engine }} exec influxdb influx -host {{ monasca_influxdb_address }} -port {{ monasca_influxdb_http_port }} -execute 'show databases'"
- run_once: True
- delegate_to: "{{ groups['influxdb'][0] }}"
- register: monasca_influxdb_database
- changed_when: False
-
-- name: Creating monasca influxdb database
- become: true
- command: >
- {{ kolla_container_engine }} exec influxdb influx -host {{ monasca_influxdb_address }} -port {{ monasca_influxdb_http_port }} -execute
- 'CREATE DATABASE {{ monasca_influxdb_name }} WITH DURATION {{ monasca_influxdb_retention_policy.duration }}
- REPLICATION {{ monasca_influxdb_retention_policy.replication_count }} NAME {{ monasca_influxdb_retention_policy.name }}'
- run_once: True
- delegate_to: "{{ groups['influxdb'][0] }}"
- when: monasca_influxdb_name not in monasca_influxdb_database.stdout_lines
-
-# NOTE(dszumski): Although we can take advantage of automatic topic
-# creation in Kafka, creating the topics manually allows unique settings
-# to be used per topic, rather than the defaults. It also avoids an edge
-# case where services on multiple nodes may race to create topics, and
-# paves the way for enabling things like compression on a per topic basis.
-- name: List monasca kafka topics
- become: true
- command: >
- {{ kolla_container_engine }} exec kafka /opt/kafka/bin/kafka-topics.sh
- --zookeeper localhost
- --list
- register: kafka_topics
- run_once: True
- delegate_to: "{{ groups['kafka'][0] }}"
-
-- name: Create monasca kafka topics if they don't exist
- become: true
- command: >
- {{ kolla_container_engine }} exec kafka /opt/kafka/bin/kafka-topics.sh
- --create
- --topic {{ item.name }}
- --partitions {{ item.partitions }}
- --replication-factor {{ item.replication_factor }}
- --zookeeper localhost
- run_once: True
- delegate_to: "{{ groups['kafka'][0] }}"
- when:
- - item.name not in kafka_topics.stdout_lines
- - item.enabled | bool
- with_items: "{{ monasca_all_topics }}"
-
-- name: Remove monasca kafka topics for disabled services
- become: true
- command: >
- {{ kolla_container_engine }} exec kafka /opt/kafka/bin/kafka-topics.sh
- --delete
- --topic "{{ item.name }}"
- --zookeeper localhost
- run_once: True
- delegate_to: "{{ groups['kafka'][0] }}"
- when:
- - item.name in kafka_topics.stdout_lines
- - not item.enabled | bool
- with_items: "{{ monasca_all_topics }}"
diff --git a/ansible/roles/monasca/tasks/bootstrap_service.yml b/ansible/roles/monasca/tasks/bootstrap_service.yml
deleted file mode 100644
index f1826c5a0b..0000000000
--- a/ansible/roles/monasca/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running monasca bootstrap container
- vars:
- monasca_api: "{{ monasca_services['monasca-api'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ monasca_api.image }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_monasca"
- restart_policy: no
- volumes: "{{ monasca_api.volumes }}"
- run_once: True
- delegate_to: "{{ groups[monasca_api.group][0] }}"
diff --git a/ansible/roles/monasca/tasks/check-containers.yml b/ansible/roles/monasca/tasks/check-containers.yml
deleted file mode 100644
index 74a3a6af43..0000000000
--- a/ansible/roles/monasca/tasks/check-containers.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Check monasca containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- pid_mode: "{{ item.value.pid_mode | default('') }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- state: "{{ item.value.state | default('running') }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ monasca_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/monasca/tasks/cleanup.yml b/ansible/roles/monasca/tasks/cleanup.yml
deleted file mode 100644
index dd23dbc153..0000000000
--- a/ansible/roles/monasca/tasks/cleanup.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Stop and remove containers for disabled monasca services
- become: true
- kolla_docker:
- action: "stop_and_remove_container"
- name: "{{ item.value.container_name }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - not item.value.enabled | bool
- with_dict: "{{ monasca_services }}"
-
-- name: Removing config for any disabled services
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "absent"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - not item.value.enabled | bool
- with_dict: "{{ monasca_services }}"
-
-# NOTE(dszumski): Docker volume removal is currently a manual procedure
diff --git a/ansible/roles/monasca/tasks/config.yml b/ansible/roles/monasca/tasks/config.yml
deleted file mode 100644
index 7f6f729af0..0000000000
--- a/ansible/roles/monasca/tasks/config.yml
+++ /dev/null
@@ -1,331 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ monasca_services }}"
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}/{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ monasca_services }}"
- notify:
- - "Restart {{ item.key }} container"
-
-- name: Copying over monasca-agent-collector config
- vars:
- service: "{{ monasca_services['monasca-agent-collector'] }}"
- merge_yaml:
- sources:
- - "{{ role_path }}/templates/monasca-agent-collector/{{ item }}.j2"
- - "{{ node_custom_config }}/monasca/{{ item }}"
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
- dest: "{{ node_config_directory }}/monasca-agent-collector/{{ item }}"
- mode: "0660"
- become: true
- with_items:
- - agent-collector.yml
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-agent-collector container
-
-- name: Ensuring monasca-agent collector plugin config directory exists
- vars:
- service: "{{ monasca_services['monasca-agent-collector'] }}"
- file:
- path: "{{ node_config_directory }}/monasca-agent-collector/plugins"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
-
-- name: Find monasca-agent-collector plugin configuration files
- find:
- paths:
- - "{{ role_path }}/templates/monasca-agent-collector/plugins/"
- - "{{ node_custom_config }}/monasca/agent_plugins/"
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/agent_plugins/"
- patterns: '*.yaml'
- delegate_to: localhost
- register: agent_plugins
-
-- name: Copying over monasca-agent-collector plugins
- vars:
- service: "{{ monasca_services['monasca-agent-collector'] }}"
- template:
- src: "{{ item.path }}"
- dest: "{{ node_config_directory }}/monasca-agent-collector/plugins/{{ item.path | basename }}"
- mode: "0660"
- become: true
- with_items:
- "{{ agent_plugins.files }}"
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-agent-collector container
-
-- name: Copying over monasca-agent-forwarder config
- vars:
- service: "{{ monasca_services['monasca-agent-forwarder'] }}"
- merge_yaml:
- sources:
- - "{{ role_path }}/templates/monasca-agent-forwarder/{{ item }}.j2"
- - "{{ node_custom_config }}/monasca/{{ item }}"
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
- dest: "{{ node_config_directory }}/monasca-agent-forwarder/{{ item }}"
- mode: "0660"
- become: true
- with_items:
- - agent-forwarder.yml
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-agent-forwarder container
-
-- name: Copying over monasca-agent-statsd config
- vars:
- service: "{{ monasca_services['monasca-agent-statsd'] }}"
- merge_yaml:
- sources:
- - "{{ role_path }}/templates/monasca-agent-statsd/{{ item }}.j2"
- - "{{ node_custom_config }}/monasca/{{ item }}"
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
- dest: "{{ node_config_directory }}/monasca-agent-statsd/{{ item }}"
- mode: "0660"
- become: true
- with_items:
- - agent-statsd.yml
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-agent-statsd container
-
-- name: Copying over monasca-api config
- vars:
- service: "{{ monasca_services['monasca-api'] }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/monasca-api/{{ item }}.j2"
- - "{{ node_custom_config }}/monasca/{{ item }}"
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
- dest: "{{ node_config_directory }}/monasca-api/{{ item }}"
- mode: "0660"
- become: true
- with_items:
- - api.conf
- - api-config.ini
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-api container
-
-- name: Copying over monasca-api wsgi config
- vars:
- service: "{{ monasca_services['monasca-api'] }}"
- template:
- src: "{{ role_path }}/templates/monasca-api/wsgi-api.conf.j2"
- dest: "{{ node_config_directory }}/monasca-api/wsgi-api.conf"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-api container
-
-- name: Ensuring logstash patterns folder exists
- vars:
- service: "{{ monasca_services['monasca-log-persister'] }}"
- file:
- path: "{{ node_config_directory }}/monasca-log-persister/logstash_patterns"
- state: "directory"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
-
-- name: Find custom logstash patterns
- find:
- path: "{{ node_custom_config }}/monasca/logstash_patterns"
- pattern: "*"
- delegate_to: localhost
- run_once: True
- register: monasca_custom_logstash_patterns
-
-- name: Copying over custom logstash patterns
- vars:
- service: "{{ monasca_services['monasca-log-persister'] }}"
- template:
- src: "{{ item.path }}"
- dest: "{{ node_config_directory }}/monasca-log-persister/logstash_patterns/{{ item.path | basename }}"
- mode: "0660"
- with_items: "{{ monasca_custom_logstash_patterns.files }}"
- become: true
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-log-persister container
-
-- name: Copying over monasca-log-persister config
- vars:
- service: "{{ monasca_services['monasca-log-persister'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/monasca-log-persister/log-persister.conf"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/log-persister.conf"
- - "{{ node_custom_config }}/monasca/log-persister.conf"
- - "{{ role_path }}/templates/monasca-log-persister/log-persister.conf.j2"
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-log-persister container
-
-- name: Copying over monasca-log-persister elasticsearch template
- vars:
- service: "{{ monasca_services['monasca-log-persister'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/monasca-log-persister/elasticsearch-template.json"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/elasticsearch-template.json"
- - "{{ node_custom_config }}/monasca/elasticsearch-template.json"
- - "{{ role_path }}/templates/monasca-log-persister/elasticsearch-template.json"
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-log-persister container
-
-- name: Copying over monasca-thresh config
- vars:
- service: "{{ monasca_services['monasca-thresh'] }}"
- # NOTE(dszumski): We can't use merge_yaml since it replaces empty values
- # with `null`. This breaks the thresholder config file parsing (which should
- # probably be more robust).
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/monasca-thresh/thresh-config.yml"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/thresh-config.yml"
- - "{{ node_custom_config }}/monasca/thresh-config.yml"
- - "{{ role_path }}/templates/monasca-thresh/thresh-config.yml.j2"
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Resubmitting monasca-thresh topology
-
-- name: Copying over monasca-thresh storm config
- vars:
- service: "{{ monasca_services['monasca-thresh'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/monasca-thresh/storm.yml"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/storm.yml"
- - "{{ node_custom_config }}/monasca/storm.yml"
- - "{{ role_path }}/templates/monasca-thresh/storm.yml.j2"
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Resubmitting monasca-thresh topology
-
-- name: Copying over monasca-notification config
- vars:
- service: "{{ monasca_services['monasca-notification'] }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/monasca-notification/{{ item }}.j2"
- - "{{ node_custom_config }}/monasca/{{ item }}"
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
- dest: "{{ node_config_directory }}/monasca-notification/{{ item }}"
- mode: "0660"
- become: true
- with_items:
- - notification.conf
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-notification container
-
-- name: Check for monasca-notification templates
- stat:
- path: "{{ node_custom_config }}/monasca/notification_templates"
- delegate_to: localhost
- run_once: True
- register: notification_templates
-
-- name: Copying over monasca-notification templates
- vars:
- service: "{{ monasca_services['monasca-notification'] }}"
- copy:
- src: "{{ node_custom_config }}/monasca/notification_templates"
- dest: "{{ node_config_directory }}/monasca-notification/"
- mode: "0660"
- become: true
- when:
- - notification_templates.stat.exists and notification_templates.stat.isdir
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-notification container
-
-- name: Copying over monasca-persister config
- vars:
- service: "{{ monasca_services['monasca-persister'] }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/monasca-persister/{{ item }}.j2"
- - "{{ node_custom_config }}/monasca/{{ item }}"
- - "{{ node_custom_config }}/monasca/{{ inventory_hostname }}/{{ item }}"
- dest: "{{ node_config_directory }}/monasca-persister/{{ item }}"
- mode: "0660"
- become: true
- with_items:
- - persister.conf
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart monasca-persister container
diff --git a/ansible/roles/monasca/tasks/copy-certs.yml b/ansible/roles/monasca/tasks/copy-certs.yml
deleted file mode 100644
index ccb4ee44c1..0000000000
--- a/ansible/roles/monasca/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ monasca_services }}"
diff --git a/ansible/roles/monasca/tasks/deploy.yml b/ansible/roles/monasca/tasks/deploy.yml
deleted file mode 100644
index b2afb693ea..0000000000
--- a/ansible/roles/monasca/tasks/deploy.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- import_tasks: register.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
-
-- import_tasks: check.yml
diff --git a/ansible/roles/monasca/tasks/loadbalancer.yml b/ansible/roles/monasca/tasks/loadbalancer.yml
deleted file mode 100644
index 8eac15fe30..0000000000
--- a/ansible/roles/monasca/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ monasca_services }}"
- tags: always
diff --git a/ansible/roles/monasca/tasks/precheck.yml b/ansible/roles/monasca/tasks/precheck.yml
deleted file mode 100644
index 1e3cc9cd59..0000000000
--- a/ansible/roles/monasca/tasks/precheck.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ monasca_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name: "{{ monasca_services.values() | map(attribute='container_name') | list }}"
- register: container_facts
-
-- name: Checking free port for monasca-api
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ monasca_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups[monasca_services['monasca-api']['group']]
- - container_facts['monasca_api'] is not defined
-
-- name: Checking free port for monasca-agent-forwarder
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ monasca_agent_forwarder_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups[monasca_services['monasca-agent-forwarder']['group']]
- - container_facts['monasca_agent_forwarder'] is not defined
-
-- name: Checking free port for monasca-agent-statsd
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ monasca_agent_statsd_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups[monasca_services['monasca-agent-statsd']['group']]
- - container_facts['monasca_agent_statsd'] is not defined
diff --git a/ansible/roles/monasca/tasks/register.yml b/ansible/roles/monasca/tasks/register.yml
deleted file mode 100644
index ddc05f3254..0000000000
--- a/ansible/roles/monasca/tasks/register.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ monasca_openstack_auth }}"
- service_ks_register_services: "{{ monasca_ks_services }}"
- service_ks_register_users: "{{ monasca_ks_users }}"
- service_ks_register_roles: "{{ monasca_ks_roles }}"
diff --git a/ansible/roles/monasca/tasks/stop.yml b/ansible/roles/monasca/tasks/stop.yml
deleted file mode 100644
index 362d507665..0000000000
--- a/ansible/roles/monasca/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ monasca_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/monasca/tasks/upgrade.yml b/ansible/roles/monasca/tasks/upgrade.yml
deleted file mode 100644
index 9db8c9dddb..0000000000
--- a/ansible/roles/monasca/tasks/upgrade.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: cleanup.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: register.yml
-
-- import_tasks: bootstrap_service.yml
-
-# NOTE(sshambar): We don't want pre-upgrade monasca-thresh instances
-# running in local mode after an upgrade, so stop them.
-# The first node will be replaced with the submission container in the
-# handlers below.
-- name: Stopping all monasca-thresh instances but the first node
- become: true
- kolla_docker:
- action: "stop_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ monasca_services['monasca-thresh']['container_name'] }}"
- when:
- - inventory_hostname in groups['monasca-thresh']
- - inventory_hostname != groups['monasca-thresh'] | first
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/agent-collector.yml.j2 b/ansible/roles/monasca/templates/monasca-agent-collector/agent-collector.yml.j2
deleted file mode 100644
index c5245f3601..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/agent-collector.yml.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-Main:
- hostname: {{ ansible_facts.hostname }}
- check_freq: {{ monasca_agent_check_frequency }}
- forwarder_url: http://127.0.0.1:{{ monasca_agent_forwarder_port }}
-
-Logging:
- log_level: {{ 'DEBUG' if monasca_logging_debug else 'INFO' }}
- collector_log_file: /var/log/kolla/monasca/agent-collector.log
- enable_logrotate: False
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/monasca-agent-collector.json.j2 b/ansible/roles/monasca/templates/monasca-agent-collector/monasca-agent-collector.json.j2
deleted file mode 100644
index d33ee62e02..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/monasca-agent-collector.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "monasca-collector foreground --config-file /etc/monasca/agent-collector.yml",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/agent-collector.yml",
- "dest": "/etc/monasca/agent-collector.yml",
- "owner": "monasca",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/plugins/*.yaml",
- "dest": "/etc/monasca/conf.d/",
- "owner": "monasca",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/monasca",
- "owner": "monasca:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/cpu.yaml b/ansible/roles/monasca/templates/monasca-agent-collector/plugins/cpu.yaml
deleted file mode 100644
index 47495c222d..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/cpu.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-init_config: null
-instances:
- - built_by: System
- name: cpu_stats
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/disk.yaml b/ansible/roles/monasca/templates/monasca-agent-collector/plugins/disk.yaml
deleted file mode 100644
index b8a591c652..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/disk.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-init_config: null
-instances:
- - built_by: System
- device_blacklist_re: .*freezer_backup_snap.*
- ignore_filesystem_types: iso9660,tmpfs
- name: disk_stats
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/load.yaml b/ansible/roles/monasca/templates/monasca-agent-collector/plugins/load.yaml
deleted file mode 100644
index b2a3eeccf1..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/load.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-init_config: null
-instances:
- - built_by: System
- name: load_stats
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/memory.yaml b/ansible/roles/monasca/templates/monasca-agent-collector/plugins/memory.yaml
deleted file mode 100644
index b7bd0afa43..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/memory.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-init_config: null
-instances:
- - built_by: System
- name: memory_stats
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/network.yaml b/ansible/roles/monasca/templates/monasca-agent-collector/plugins/network.yaml
deleted file mode 100644
index fd22be75de..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/network.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-init_config: null
-instances:
- - built_by: System
- excluded_interface_re: lo.*|vnet.*|tun.*|ovs.*|br.*|tap.*|qbr.*|qvb.*|qvo.*
- name: network_stats
diff --git a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/ntp.yaml b/ansible/roles/monasca/templates/monasca-agent-collector/plugins/ntp.yaml
deleted file mode 100644
index 15f5f43cee..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-collector/plugins/ntp.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-init_config: null
-instances:
- - built_by: Ntp
- host: "{{ monasca_ntp_server }}"
diff --git a/ansible/roles/monasca/templates/monasca-agent-forwarder/agent-forwarder.yml.j2 b/ansible/roles/monasca/templates/monasca-agent-forwarder/agent-forwarder.yml.j2
deleted file mode 100644
index 5a55dbf792..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-forwarder/agent-forwarder.yml.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-Api:
- service_type: monitoring
- endpoint_type: internal
- region_name: {{ openstack_region_name }}
- username: {{ monasca_agent_user }}
- password: {{ monasca_agent_password }}
- keystone_url: {{ keystone_internal_url }}
- user_domain_name: Default
- project_name: {{ monasca_control_plane_project }}
- project_domain_id: {{ default_project_domain_id }}
- project_domain_name: {{ default_project_domain_name }}
- insecure: False
- ca_file: /var/lib/kolla/venv/lib/python{{ distro_python_version }}/site-packages/certifi/cacert.pem
- max_measurement_buffer_size: {{ monasca_agent_max_buffer_size }}
- backlog_send_rate: {{ monasca_agent_backlog_send_rate }}
- max_batch_size: {{ monasca_agent_max_batch_size }}
-
-Main:
- hostname: {{ ansible_facts.hostname }}
- non_local_traffic: True
- listen_port: {{ monasca_agent_forwarder_port }}
-
-Logging:
- log_level: {{ 'DEBUG' if monasca_logging_debug else 'INFO' }}
- forwarder_log_file: /var/log/kolla/monasca/agent-forwarder.log
- enable_logrotate: False
diff --git a/ansible/roles/monasca/templates/monasca-agent-forwarder/monasca-agent-forwarder.json.j2 b/ansible/roles/monasca/templates/monasca-agent-forwarder/monasca-agent-forwarder.json.j2
deleted file mode 100644
index 59da8c9bc9..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-forwarder/monasca-agent-forwarder.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "monasca-forwarder --config-file=/etc/monasca/agent-forwarder.yml",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/agent-forwarder.yml",
- "dest": "/etc/monasca/agent-forwarder.yml",
- "owner": "monasca",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/monasca",
- "owner": "monasca:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-agent-statsd/agent-statsd.yml.j2 b/ansible/roles/monasca/templates/monasca-agent-statsd/agent-statsd.yml.j2
deleted file mode 100644
index 36fa7d9fd2..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-statsd/agent-statsd.yml.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-Main:
- hostname: {{ ansible_facts.hostname }}
- forwarder_url: http://127.0.0.1:{{ monasca_agent_forwarder_port }}
-
-Statsd:
- monasca_statsd_port : {{ monasca_agent_statsd_port }}
- non_local_traffic: True
-
-Logging:
- log_level: {{ 'DEBUG' if monasca_logging_debug else 'INFO' }}
- statsd_log_file: /var/log/kolla/monasca/agent-statsd.log
- enable_logrotate: False
diff --git a/ansible/roles/monasca/templates/monasca-agent-statsd/monasca-agent-statsd.json.j2 b/ansible/roles/monasca/templates/monasca-agent-statsd/monasca-agent-statsd.json.j2
deleted file mode 100644
index fe93ba8f1d..0000000000
--- a/ansible/roles/monasca/templates/monasca-agent-statsd/monasca-agent-statsd.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "monasca-statsd --config-file /etc/monasca/agent-statsd.yml",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/agent-statsd.yml",
- "dest": "/etc/monasca/agent-statsd.yml",
- "owner": "monasca",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/monasca",
- "owner": "monasca:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-api/api-config.ini.j2 b/ansible/roles/monasca/templates/monasca-api/api-config.ini.j2
deleted file mode 100644
index 6ac702a4cd..0000000000
--- a/ansible/roles/monasca/templates/monasca-api/api-config.ini.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-[DEFAULT]
-name = monasca_api
-
-[pipeline:main]
-pipeline = request_id auth api
-
-[app:api]
-paste.app_factory = monasca_api.api.server:launch
-
-[filter:auth]
-paste.filter_factory = monasca_api.healthcheck.keystone_protocol:filter_factory
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware.request_id:RequestId.factory
diff --git a/ansible/roles/monasca/templates/monasca-api/api.conf.j2 b/ansible/roles/monasca/templates/monasca-api/api.conf.j2
deleted file mode 100644
index c9bc03a412..0000000000
--- a/ansible/roles/monasca/templates/monasca-api/api.conf.j2
+++ /dev/null
@@ -1,70 +0,0 @@
-[DEFAULT]
-log_file = monasca-api.log
-log_dir = /var/log/kolla/monasca
-debug = {{ monasca_logging_debug }}
-region = {{ openstack_region_name }}
-enable_logs_api = True
-
-[database]
-database = {{ monasca_database_name }}
-connection = mysql+pymysql://{{ monasca_database_user }}:{{ monasca_database_password }}@{{ monasca_database_address | put_address_in_context('url') }}:{{ monasca_database_port }}/{{ monasca_database_name }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-
-[influxdb]
-database_name = {{ monasca_influxdb_name }}
-ip_address = {{ monasca_influxdb_address }}
-port = {{ monasca_influxdb_http_port }}
-
-[kafka]
-metrics_topic = {{ monasca_metrics_topic }}
-logs_topics = {{ monasca_raw_logs_topic }}
-uri = {{ monasca_kafka_servers }}
-
-[messaging]
-driver = monasca_api.common.messaging.kafka_publisher:KafkaPublisher
-
-[security]
-default_authorized_roles = {{ monasca_default_authorized_roles|join(', ') }}
-agent_authorized_roles = {{ monasca_agent_authorized_roles|join(', ') }}
-read_only_authorized_roles = {{ monasca_read_only_authorized_roles|join(', ') }}
-delegate_authorized_roles = {{ monasca_delegate_authorized_roles|join(', ') }}
-
-[keystone_authtoken]
-service_type = logging-monitoring
-www_authenticate_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ monasca_keystone_user }}
-password = {{ monasca_keystone_password }}
-service_token_roles_required=True
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {{ monasca_memcached_servers }}
-
-[dispatcher]
-versions = monasca_api.v2.reference.versions:Versions
-version_2_0 = monasca_api.v2.reference.version_2_0:Version2
-metrics = monasca_api.v2.reference.metrics:Metrics
-metrics_measurements = monasca_api.v2.reference.metrics:MetricsMeasurements
-metrics_statistics = monasca_api.v2.reference.metrics:MetricsStatistics
-metrics_names = monasca_api.v2.reference.metrics:MetricsNames
-alarm_definitions = monasca_api.v2.reference.alarm_definitions:AlarmDefinitions
-alarms = monasca_api.v2.reference.alarms:Alarms
-alarms_count = monasca_api.v2.reference.alarms:AlarmsCount
-alarms_state_history = monasca_api.v2.reference.alarms:AlarmsStateHistory
-notification_methods = monasca_api.v2.reference.notifications:Notifications
-dimension_values = monasca_api.v2.reference.metrics:DimensionValues
-dimension_names = monasca_api.v2.reference.metrics:DimensionNames
-notification_method_types = monasca_api.v2.reference.notificationstype:NotificationsType
-healthchecks = monasca_api.healthchecks:HealthChecks
-
-[log_publisher]
-# Increase the maximum payload size to slightly above the default Fluentd chunk size (8MB)
-max_log_size = 10485760
diff --git a/ansible/roles/monasca/templates/monasca-api/monasca-api.json.j2 b/ansible/roles/monasca/templates/monasca-api/monasca-api.json.j2
deleted file mode 100644
index 657a92de95..0000000000
--- a/ansible/roles/monasca/templates/monasca-api/monasca-api.json.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-{% set monasca_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set wsgi_conf_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{
- "command": "/usr/sbin/{{ monasca_cmd }} -DFOREGROUND",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/api.conf",
- "dest": "/etc/monasca/monasca-api.conf",
- "owner": "monasca",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/api-config.ini",
- "dest": "/etc/monasca/api-config.ini",
- "owner": "monasca",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/wsgi-api.conf",
- "dest": "/etc/{{ wsgi_conf_dir }}/wsgi-config.conf",
- "owner": "monasca",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/monasca",
- "owner": "monasca:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-api/wsgi-api.conf.j2 b/ansible/roles/monasca/templates/monasca-api/wsgi-api.conf.j2
deleted file mode 100644
index 3848610f45..0000000000
--- a/ansible/roles/monasca/templates/monasca-api/wsgi-api.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{% set monasca_log_dir = '/var/log/kolla/monasca' %}
-{% set wsgi_path = '/monasca-api/monasca_api/api' %}
-
-Listen {{ api_interface_address | put_address_in_context('url') }}:{{ monasca_api_port }}
-
-TraceEnable off
-TimeOut {{ kolla_httpd_timeout }}
-KeepAliveTimeout {{ kolla_httpd_keep_alive }}
-
-ErrorLog "{{ monasca_log_dir }}/apache-api-error.log"
-
- CustomLog "{{ monasca_log_dir }}/apache-api-access.log" common
-
-
-{% if monasca_logging_debug | bool %}
-LogLevel info
-{% endif %}
-
-
-
- ErrorLog "{{ monasca_log_dir }}/monasca-api-error.log"
- LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
- CustomLog "{{ monasca_log_dir }}/monasca-api-access.log" logformat
- WSGIApplicationGroup %{GLOBAL}
- WSGIDaemonProcess monasca-api group=monasca processes={{ monasca_api_workers }} threads=1 user=monasca
- WSGIProcessGroup monasca-api
- WSGIScriptAlias / {{ wsgi_path }}/wsgi.py
- WSGIPassAuthorization On
- SetEnv no-gzip 1
-
-
- Require all granted
-
-
-
diff --git a/ansible/roles/monasca/templates/monasca-log-persister/elasticsearch-template.json b/ansible/roles/monasca/templates/monasca-log-persister/elasticsearch-template.json
deleted file mode 100644
index 15ea285d30..0000000000
--- a/ansible/roles/monasca/templates/monasca-log-persister/elasticsearch-template.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "aliases": {},
- "mappings": {
- "log": {
- "_all": {
- "enabled": true,
- "omit_norms": true
- },
- "dynamic_templates": [
- {
- "message_field": {
- "mapping": {
- "fielddata": {
- "format": "disabled"
- },
- "index": true,
- "omit_norms": true,
- "type": "text"
- },
- "match": "message",
- "match_mapping_type": "string"
- }
- },
- {
- "other_fields": {
- "mapping": {
- "index": true,
- "type": "keyword"
- },
- "match": "*",
- "match_mapping_type": "string"
- }
- }
- ],
- "properties": {
- "@timestamp": {
- "type": "date"
- },
- "@version": {
- "index": true,
- "type": "keyword"
- },
- "creation_time": {
- "type": "date"
- }
- }
- }
- },
- "order": 0,
- "settings": {
- "index": {
- "refresh_interval": "5s"
- }
- },
- "template": "monasca-*"
-}
diff --git a/ansible/roles/monasca/templates/monasca-log-persister/log-persister.conf.j2 b/ansible/roles/monasca/templates/monasca-log-persister/log-persister.conf.j2
deleted file mode 100644
index c4e6994776..0000000000
--- a/ansible/roles/monasca/templates/monasca-log-persister/log-persister.conf.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-# Persist logs to Elasticsearch.
-
-input {
- kafka {
- bootstrap_servers => "{{ monasca_kafka_servers }}"
- topics => ["{{ monasca_raw_logs_topic }}"]
- group_id => "log_persister"
- consumer_threads => "{{ monasca_log_pipeline_threads }}"
- codec => json
- }
-}
-
-filter {
- # Update the timestamp of the event based on the time in the message.
- date {
- match => [ "[log][dimensions][timestamp]", "yyyy-MM-dd HH:mm:ss Z", "ISO8601"]
- remove_field => [ "[log][dimensions][timestamp]", "[log][dimensions][Timestamp]" ]
- }
-
- # Monasca Log API adds a timestamp when it processes a log entry. This
- # timestamp needs to be converted from seconds since the epoch for
- # Elasticsearch to parse it correctly. Here we make that conversion.
- date {
- match => ["creation_time", "UNIX"]
- target => "creation_time"
- }
-
- # OpenStack log levels are uppercase, and syslog are lowercase.
- # Furthermore, syslog has more log levels that OpenStack. To avoid
- # mapping syslog log levels to OpenStack log levels, we standardise
- # on the syslog style here.
- if [log][dimensions][log_level] {
- mutate {
- lowercase => [ "[log][dimensions][log_level]" ]
- }
- }
-}
-
-output {
- elasticsearch {
- index => "monasca-%{[meta][tenantId]}-%{+YYYY.MM.dd}"
- hosts => [{{ monasca_elasticsearch_servers }}]
- document_type => "log"
- template_name => "monasca"
- template => "/etc/logstash/elasticsearch-template.json"
- }
-}
diff --git a/ansible/roles/monasca/templates/monasca-log-persister/monasca-log-persister.json.j2 b/ansible/roles/monasca/templates/monasca-log-persister/monasca-log-persister.json.j2
deleted file mode 100644
index 365d293dde..0000000000
--- a/ansible/roles/monasca/templates/monasca-log-persister/monasca-log-persister.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "/usr/share/logstash/bin/logstash --path.settings /etc/logstash/ --log.format json --path.logs /var/log/kolla/logstash/monasca-log-persister -f /etc/logstash/conf.d/log-persister.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/log-persister.conf",
- "dest": "/etc/logstash/conf.d/log-persister.conf",
- "owner": "logstash",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/elasticsearch-template.json",
- "dest": "/etc/logstash/elasticsearch-template.json",
- "owner": "logstash",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/logstash",
- "owner": "logstash:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-notification/monasca-notification.json.j2 b/ansible/roles/monasca/templates/monasca-notification/monasca-notification.json.j2
deleted file mode 100644
index 0d00122056..0000000000
--- a/ansible/roles/monasca/templates/monasca-notification/monasca-notification.json.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "command": "monasca-notification --config-file /etc/monasca/notification.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/notification.conf",
- "dest": "/etc/monasca/notification.conf",
- "owner": "monasca",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/notification_templates/*",
- "dest": "/etc/monasca/",
- "owner": "monasca",
- "perm": "0600",
- "optional": true
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/monasca",
- "owner": "monasca:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-notification/notification.conf.j2 b/ansible/roles/monasca/templates/monasca-notification/notification.conf.j2
deleted file mode 100644
index ce934eea12..0000000000
--- a/ansible/roles/monasca/templates/monasca-notification/notification.conf.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-[DEFAULT]
-log_file = monasca-notification.log
-log_dir = /var/log/kolla/monasca
-debug = {{ monasca_logging_debug }}
-
-[kafka]
-url = {{ monasca_kafka_servers }}
-alarm_topic = {{ monasca_alarm_state_transitions_topic }}
-notification_topic = {{ monasca_alarm_notifications_topic }}
-notification_retry_topic = {{ monasca_alarm_notifications_retry_topic }}
-periodic = {{ monasca_periodic_notifications_period }}:{{ monasca_periodic_notifications_topic }}
-
-[mysql]
-host = {{ monasca_database_address }}
-port = {{ monasca_database_port }}
-user = {{ monasca_database_user }}
-passwd = {{ monasca_database_password }}
-db = {{ monasca_database_name }}
-
-[statsd]
-port = {{ monasca_agent_statsd_port }}
-
-[zookeeper]
-url = {{ monasca_zookeeper_servers }}
diff --git a/ansible/roles/monasca/templates/monasca-persister/monasca-persister.json.j2 b/ansible/roles/monasca/templates/monasca-persister/monasca-persister.json.j2
deleted file mode 100644
index 41f9979d97..0000000000
--- a/ansible/roles/monasca/templates/monasca-persister/monasca-persister.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "monasca-persister --config-file /etc/monasca/persister.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/persister.conf",
- "dest": "/etc/monasca/persister.conf",
- "owner": "monasca",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/monasca",
- "owner": "monasca:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-persister/persister.conf.j2 b/ansible/roles/monasca/templates/monasca-persister/persister.conf.j2
deleted file mode 100644
index f0d1a602da..0000000000
--- a/ansible/roles/monasca/templates/monasca-persister/persister.conf.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-[DEFAULT]
-log_file = monasca-persister.log
-log_dir = /var/log/kolla/monasca
-debug = {{ monasca_logging_debug }}
-
-[influxdb]
-database_name = {{ monasca_influxdb_name }}
-# FIXME(dszumski): This doesn't work with a FQDN so use the VIP directly
-ip_address = {{ kolla_internal_vip_address }}
-port = {{ monasca_influxdb_http_port }}
-
-[kafka_alarm_history]
-{% if not monasca_enable_alerting_pipeline | bool %}
-enabled = False
-{% else %}
-uri = {{ monasca_kafka_servers }}
-topic = {{ monasca_alarm_state_transitions_topic }}
-num_processors = 1
-{% endif %}
-
-[kafka_metrics]
-uri = {{ monasca_kafka_servers }}
-topic = {{ monasca_metrics_topic }}
-num_processors = {{ monasca_metric_pipeline_threads }}
-
-[zookeeper]
-uri = {{ monasca_zookeeper_servers }}
diff --git a/ansible/roles/monasca/templates/monasca-thresh/monasca-thresh.json.j2 b/ansible/roles/monasca/templates/monasca-thresh/monasca-thresh.json.j2
deleted file mode 100644
index b7e28e2fd8..0000000000
--- a/ansible/roles/monasca/templates/monasca-thresh/monasca-thresh.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "/opt/storm/bin/storm jar /monasca-thresh-source/monasca-thresh-*/thresh/target/monasca-thresh-*-SNAPSHOT-shaded.jar -Djava.io.tmpdir=/var/lib/monasca-thresh/data monasca.thresh.ThresholdingEngine /etc/monasca/thresh-config.yml monasca-thresh",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/thresh-config.yml",
- "dest": "/etc/monasca/thresh-config.yml",
- "owner": "monasca",
- "perm": "0600"
- },
- {
- "source": "/var/lib/kolla/config_files/storm.yml",
- "dest": "/opt/storm/conf/storm.yaml",
- "owner": "monasca",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/monasca",
- "owner": "monasca:kolla",
- "recurse": true
- },
- {
- "path": "/var/lib/monasca-thresh",
- "owner": "monasca:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/monasca/templates/monasca-thresh/storm.yml.j2 b/ansible/roles/monasca/templates/monasca-thresh/storm.yml.j2
deleted file mode 100644
index 70aa8fd217..0000000000
--- a/ansible/roles/monasca/templates/monasca-thresh/storm.yml.j2
+++ /dev/null
@@ -1 +0,0 @@
-nimbus.seeds: [{{ monasca_storm_nimbus_servers }}]
diff --git a/ansible/roles/monasca/templates/monasca-thresh/thresh-config.yml.j2 b/ansible/roles/monasca/templates/monasca-thresh/thresh-config.yml.j2
deleted file mode 100644
index cacad8f828..0000000000
--- a/ansible/roles/monasca/templates/monasca-thresh/thresh-config.yml.j2
+++ /dev/null
@@ -1,170 +0,0 @@
-#
-# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
-# Copyright 2017 Fujitsu LIMITED
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-metricSpoutThreads: 2
-metricSpoutTasks: 2
-
-statsdConfig:
- host: 127.0.0.1
- port: {{ monasca_agent_statsd_port }}
- debugmetrics: {{ monasca_logging_debug }}
- dimensions: !!map
- service : monitoring
- component : storm
- whitelist: !!seq
- - aggregation-bolt.execute-count.filtering-bolt_alarm-creation-stream
- - aggregation-bolt.execute-count.filtering-bolt_default
- - aggregation-bolt.execute-count.system_tick
- - filtering-bolt.execute-count.event-bolt_metric-alarm-events
- - filtering-bolt.execute-count.metrics-spout_default
- - thresholding-bolt.execute-count.aggregation-bolt_default
- - thresholding-bolt.execute-count.event-bolt_alarm-definition-events
- - system.memory_heap.committedBytes
- - system.memory_nonHeap.committedBytes
- - system.newWorkerEvent
- - system.startTimeSecs
- - system.GC_ConcurrentMarkSweep.timeMs
- metricmap: !!map
- aggregation-bolt.execute-count.filtering-bolt_alarm-creation-stream :
- monasca.threshold.aggregation-bolt.execute-count.filtering-bolt_alarm-creation-stream
- aggregation-bolt.execute-count.filtering-bolt_default :
- monasca.threshold.aggregation-bolt.execute-count.filtering-bolt_default
- aggregation-bolt.execute-count.system_tick :
- monasca.threshold.aggregation-bolt.execute-count.system_tick
- filtering-bolt.execute-count.event-bolt_metric-alarm-events :
- monasca.threshold.filtering-bolt.execute-count.event-bolt_metric-alarm-events
- filtering-bolt.execute-count.metrics-spout_default :
- monasca.threshold.filtering-bolt.execute-count.metrics-spout_default
- thresholding-bolt.execute-count.aggregation-bolt_default :
- monasca.threshold.thresholding-bolt.execute-count.aggregation-bolt_default
- thresholding-bolt.execute-count.event-bolt_alarm-definition-events :
- monasca.threshold.thresholding-bolt.execute-count.event-bolt_alarm-definition-events
- system.memory_heap.committedBytes :
- monasca.threshold.system.memory_heap.committedBytes
- system.memory_nonHeap.committedBytes :
- monasca.threshold.system.memory_nonHeap.committedBytes
- system.newWorkerEvent :
- monasca.threshold.system.newWorkerEvent
- system.startTimeSecs :
- monasca.threshold.system.startTimeSecs
- system.GC_ConcurrentMarkSweep.timeMs :
- monasca.threshold.system.GC_ConcurrentMarkSweep.timeMs
-
-
-metricSpoutConfig:
- kafkaConsumerConfiguration:
- # See http://kafka.apache.org/documentation.html#api for semantics and defaults.
- topic: "{{ monasca_metrics_topic }}"
- numThreads: 1
- groupId: "thresh-metric"
- zookeeperConnect: "{{ monasca_zookeeper_servers }}"
- consumerId: 1
- socketTimeoutMs: 30000
- socketReceiveBufferBytes: 65536
- fetchMessageMaxBytes: 1048576
- autoCommitEnable: true
- autoCommitIntervalMs: 60000
- queuedMaxMessageChunks: 10
- rebalanceMaxRetries: 4
- fetchMinBytes: 1
- fetchWaitMaxMs: 100
- rebalanceBackoffMs: 2000
- refreshLeaderBackoffMs: 200
- autoOffsetReset: largest
- consumerTimeoutMs: -1
- clientId: 1
- zookeeperSessionTimeoutMs: 60000
- zookeeperConnectionTimeoutMs: 60000
- zookeeperSyncTimeMs: 2000
-
-
-eventSpoutConfig:
- kafkaConsumerConfiguration:
- # See http://kafka.apache.org/documentation.html#api for semantics and defaults.
- topic: "{{ monasca_events_topic }}"
- numThreads: 1
- groupId: "thresh-event"
- zookeeperConnect: "{{ monasca_zookeeper_servers }}"
- consumerId: 1
- socketTimeoutMs: 30000
- socketReceiveBufferBytes: 65536
- fetchMessageMaxBytes: 1048576
- autoCommitEnable: true
- autoCommitIntervalMs: 60000
- queuedMaxMessageChunks: 10
- rebalanceMaxRetries: 4
- fetchMinBytes: 1
- fetchWaitMaxMs: 100
- rebalanceBackoffMs: 2000
- refreshLeaderBackoffMs: 200
- autoOffsetReset: largest
- consumerTimeoutMs: -1
- clientId: 1
- zookeeperSessionTimeoutMs: 60000
- zookeeperConnectionTimeoutMs: 60000
- zookeeperSyncTimeMs: 2000
-
-
-kafkaProducerConfig:
- # See http://kafka.apache.org/documentation.html#api for semantics and defaults.
- topic: "{{ monasca_alarm_state_transitions_topic }}"
- metadataBrokerList: "{{ monasca_kafka_servers }}"
- serializerClass: kafka.serializer.StringEncoder
- partitionerClass:
- requestRequiredAcks: 1
- requestTimeoutMs: 10000
- producerType: sync
- keySerializerClass:
- compressionCodec: none
- compressedTopics:
- messageSendMaxRetries: 3
- retryBackoffMs: 100
- topicMetadataRefreshIntervalMs: 600000
- queueBufferingMaxMs: 5000
- queueBufferingMaxMessages: 10000
- queueEnqueueTimeoutMs: -1
- batchNumMessages: 200
- sendBufferBytes: 102400
- clientId: Threshold_Engine
-
-
-sporadicMetricNamespaces:
- - foo
-
-database:
- driverClass: org.drizzle.jdbc.DrizzleDriver
- url: "jdbc:drizzle://{{ monasca_database_address | put_address_in_context('url') }}:{{ monasca_database_port }}/{{ monasca_database_name }}"
- user: "{{ monasca_database_user }}"
- password: "{{ monasca_database_password }}"
- properties:
- ssl: false
- # the maximum amount of time to wait on an empty pool before throwing an exception
- maxWaitForConnection: 1s
- # the SQL query to run when validating a connection's liveness TODO FIXME
- validationQuery: "/* MyService Health Check */ SELECT 1"
- # the minimum number of connections to keep open
- minSize: 8
- # the maximum number of connections to keep open
- maxSize: 41
- hibernateSupport: false
- # hibernate provider class
- providerClass: com.zaxxer.hikari.hibernate.HikariConnectionProvider
- databaseName: "{{ monasca_database_name }}"
- serverName: "{{ monasca_database_address }}"
- portNumber: "{{ monasca_database_port }}"
- # hibernate auto configuration parameter
- autoConfig: validate
diff --git a/ansible/roles/monasca/vars/main.yml b/ansible/roles/monasca/vars/main.yml
deleted file mode 100644
index 73bec68f49..0000000000
--- a/ansible/roles/monasca/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "monasca"
diff --git a/ansible/roles/multipathd/defaults/main.yml b/ansible/roles/multipathd/defaults/main.yml
index 0d098d9deb..ec683876ca 100644
--- a/ansible/roles/multipathd/defaults/main.yml
+++ b/ansible/roles/multipathd/defaults/main.yml
@@ -7,13 +7,13 @@ multipathd_services:
ipc_mode: "host"
privileged: True
image: "{{ multipathd_image_full }}"
- volumes: "{{ multipathd_default_volumes + multipathd_extra_volumes }}"
+ volumes: "{{ multipathd_default_volumes + multipathd_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
####################
# Docker
####################
-multipathd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/multipathd"
+multipathd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}multipathd"
multipathd_tag: "{{ openstack_tag }}"
multipathd_image_full: "{{ multipathd_image }}:{{ multipathd_tag }}"
@@ -23,7 +23,7 @@ multipathd_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/dev/:/dev/"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/sys/kernel/config:/configfs"
multipathd_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/multipathd/handlers/main.yml b/ansible/roles/multipathd/handlers/main.yml
index 91983d4486..8ec93c9651 100644
--- a/ansible/roles/multipathd/handlers/main.yml
+++ b/ansible/roles/multipathd/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "multipathd"
service: "{{ multipathd_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,5 +12,3 @@
ipc_mode: "{{ service.ipc_mode }}"
privileged: "{{ service.privileged }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/multipathd/tasks/check-containers.yml b/ansible/roles/multipathd/tasks/check-containers.yml
index e2ebb98cbd..b7e2f7c29f 100644
--- a/ansible/roles/multipathd/tasks/check-containers.yml
+++ b/ansible/roles/multipathd/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check multipathd containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- ipc_mode: "{{ item.value.ipc_mode }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ multipathd_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/multipathd/tasks/config.yml b/ansible/roles/multipathd/tasks/config.yml
index f9281e9f8e..7d4289e3e7 100644
--- a/ansible/roles/multipathd/tasks/config.yml
+++ b/ansible/roles/multipathd/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ multipathd_services }}"
+ with_dict: "{{ multipathd_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
template:
@@ -18,12 +15,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ multipathd_services }}"
- notify:
- - Restart multipathd container
+ with_dict: "{{ multipathd_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over multipath.conf
vars:
@@ -37,8 +29,4 @@
- "{{ node_custom_config }}/multipath/{{ inventory_hostname }}/multipath.conf"
- "{{ node_custom_config }}/multipath.conf"
- "multipath.conf.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart multipathd container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/zookeeper/tasks/check.yml b/ansible/roles/multipathd/tasks/config_validate.yml
similarity index 100%
rename from ansible/roles/zookeeper/tasks/check.yml
rename to ansible/roles/multipathd/tasks/config_validate.yml
diff --git a/ansible/roles/multipathd/templates/multipath.conf.j2 b/ansible/roles/multipathd/templates/multipath.conf.j2
index 5aa10573e7..478eef0230 100644
--- a/ansible/roles/multipathd/templates/multipath.conf.j2
+++ b/ansible/roles/multipathd/templates/multipath.conf.j2
@@ -1,6 +1,7 @@
defaults {
user_friendly_names no
find_multipaths yes
+ skip_kpartx yes
}
blacklist {
diff --git a/ansible/roles/murano/defaults/main.yml b/ansible/roles/murano/defaults/main.yml
deleted file mode 100644
index e97ce75dbb..0000000000
--- a/ansible/roles/murano/defaults/main.yml
+++ /dev/null
@@ -1,131 +0,0 @@
----
-murano_services:
- murano-api:
- container_name: murano_api
- group: murano-api
- enabled: true
- image: "{{ murano_api_image_full }}"
- volumes: "{{ murano_api_default_volumes + murano_api_extra_volumes }}"
- dimensions: "{{ murano_api_dimensions }}"
- haproxy:
- murano_api:
- enabled: "{{ enable_murano }}"
- mode: "http"
- external: false
- port: "{{ murano_api_port }}"
- murano_api_external:
- enabled: "{{ enable_murano }}"
- mode: "http"
- external: true
- port: "{{ murano_api_port }}"
- murano-engine:
- container_name: murano_engine
- group: murano-engine
- enabled: true
- image: "{{ murano_engine_image_full }}"
- volumes: "{{ murano_engine_default_volumes + murano_engine_extra_volumes }}"
- dimensions: "{{ murano_engine_dimensions }}"
-
-
-####################
-# Database
-####################
-murano_database_name: "murano"
-murano_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}murano{% endif %}"
-murano_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-
-####################
-# Database sharding
-####################
-murano_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ murano_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-murano_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-murano_database_shard:
- users:
- - user: "{{ murano_database_user }}"
- password: "{{ murano_database_password }}"
- rules:
- - schema: "{{ murano_database_name }}"
- shard_id: "{{ murano_database_shard_id }}"
-
-
-####################
-# Docker
-####################
-murano_tag: "{{ openstack_tag }}"
-
-murano_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/murano-api"
-murano_api_tag: "{{ murano_tag }}"
-murano_api_image_full: "{{ murano_api_image }}:{{ murano_api_tag }}"
-
-murano_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/murano-engine"
-murano_engine_tag: "{{ murano_tag }}"
-murano_engine_image_full: "{{ murano_engine_image }}:{{ murano_engine_tag }}"
-
-murano_api_dimensions: "{{ default_container_dimensions }}"
-murano_engine_dimensions: "{{ default_container_dimensions }}"
-
-murano_api_default_volumes:
- - "{{ node_config_directory }}/murano-api/:{{ container_config_directory }}/:ro"
- - "{{ kolla_dev_repos_directory ~ '/murano/murano:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/murano' if murano_dev_mode | bool else '' }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
-murano_engine_default_volumes:
- - "{{ node_config_directory }}/murano-engine/:{{ container_config_directory }}/:ro"
- - "{{ kolla_dev_repos_directory ~ '/murano/murano:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/murano' if murano_dev_mode | bool else '' }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
-
-murano_extra_volumes: "{{ default_extra_volumes }}"
-murano_api_extra_volumes: "{{ murano_extra_volumes }}"
-murano_engine_extra_volumes: "{{ murano_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-murano_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ murano_api_port }}"
-murano_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ murano_api_port }}"
-
-murano_logging_debug: "{{ openstack_logging_debug }}"
-
-openstack_murano_auth: "{{ openstack_auth }}"
-
-murano_agent_timeout: 3600
-
-murano_engine_workers: "{{ openstack_service_workers }}"
-murano_api_workers: "{{ openstack_service_workers }}"
-
-####################
-# Kolla
-####################
-murano_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-murano_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-murano_dev_mode: "{{ kolla_dev_mode }}"
-murano_source_version: "{{ kolla_source_version }}"
-
-####################
-# Notifications
-####################
-murano_notification_topics:
- - name: notifications
- enabled: "{{ enable_ceilometer | bool }}"
-
-murano_enabled_notification_topics: "{{ murano_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
-
-####################
-# Keystone
-####################
-murano_ks_services:
- - name: "murano"
- type: "application-catalog"
- description: "Openstack Application Catalogue"
- endpoints:
- - {'interface': 'internal', 'url': '{{ murano_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ murano_public_endpoint }}'}
-
-murano_ks_users:
- - project: "service"
- user: "{{ murano_keystone_user }}"
- password: "{{ murano_keystone_password }}"
- role: "admin"
diff --git a/ansible/roles/murano/handlers/main.yml b/ansible/roles/murano/handlers/main.yml
deleted file mode 100644
index c9eb955f45..0000000000
--- a/ansible/roles/murano/handlers/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Restart murano-api container
- vars:
- service_name: "murano-api"
- service: "{{ murano_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart murano-engine container
- vars:
- service_name: "murano-engine"
- service: "{{ murano_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- privileged: "{{ service.privileged | default(False) }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/murano/tasks/bootstrap.yml b/ansible/roles/murano/tasks/bootstrap.yml
deleted file mode 100644
index 00ce24f37e..0000000000
--- a/ansible/roles/murano/tasks/bootstrap.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Creating Murano database
- become: true
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ murano_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ murano_database_name }}"
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating Murano database user and setting permissions
- become: true
- kolla_toolbox:
- module_name: mysql_user
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ murano_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ murano_database_user }}"
- password: "{{ murano_database_password }}"
- host: "%"
- priv: "{{ murano_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/murano/tasks/bootstrap_service.yml b/ansible/roles/murano/tasks/bootstrap_service.yml
deleted file mode 100644
index c1acf18ee2..0000000000
--- a/ansible/roles/murano/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Running Murano bootstrap container
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ murano_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_murano"
- restart_policy: no
- volumes:
- - "{{ node_config_directory }}/murano-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
diff --git a/ansible/roles/murano/tasks/check-containers.yml b/ansible/roles/murano/tasks/check-containers.yml
deleted file mode 100644
index cabcf1262d..0000000000
--- a/ansible/roles/murano/tasks/check-containers.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Check murano containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ murano_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/murano/tasks/clone.yml b/ansible/roles/murano/tasks/clone.yml
deleted file mode 100644
index d284a1b13a..0000000000
--- a/ansible/roles/murano/tasks/clone.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cloning source repositories for development
- become: true
- git:
- repo: "{{ murano_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ murano_dev_repos_pull }}"
- version: "{{ murano_source_version }}"
diff --git a/ansible/roles/murano/tasks/config.yml b/ansible/roles/murano/tasks/config.yml
deleted file mode 100644
index 8f26465784..0000000000
--- a/ansible/roles/murano/tasks/config.yml
+++ /dev/null
@@ -1,82 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ murano_services }}"
-
-- name: Check if policies shall be overwritten
- stat:
- path: "{{ item }}"
- delegate_to: localhost
- run_once: True
- register: murano_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/murano/"
- skip: true
-
-- name: Set murano policy file
- set_fact:
- murano_policy_file: "{{ murano_policy.results.0.stat.path | basename }}"
- murano_policy_file_path: "{{ murano_policy.results.0.stat.path }}"
- when:
- - murano_policy.results
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ murano_services }}"
- notify:
- - "Restart {{ item.key }} container"
-
-- name: Copying over murano.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/murano.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/murano.conf"
- - "{{ node_custom_config }}/murano/{{ item.key }}.conf"
- - "{{ node_custom_config }}/murano/{{ inventory_hostname }}/murano.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/murano.conf"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ murano_services }}"
- notify:
- - "Restart {{ item.key }} container"
-
-- name: Copying over existing policy file
- template:
- src: "{{ murano_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ murano_policy_file }}"
- mode: "0660"
- when:
- - murano_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ murano_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/murano/tasks/copy-certs.yml b/ansible/roles/murano/tasks/copy-certs.yml
deleted file mode 100644
index f3dcc6dd6b..0000000000
--- a/ansible/roles/murano/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ murano_services }}"
diff --git a/ansible/roles/murano/tasks/deploy-containers.yml b/ansible/roles/murano/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/murano/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/murano/tasks/deploy.yml b/ansible/roles/murano/tasks/deploy.yml
deleted file mode 100644
index a039004dd7..0000000000
--- a/ansible/roles/murano/tasks/deploy.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- import_tasks: register.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: clone.yml
- when:
- - murano_dev_mode | bool
-
-- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
-
-- import_tasks: import_library_packages.yml
diff --git a/ansible/roles/murano/tasks/import_library_packages.yml b/ansible/roles/murano/tasks/import_library_packages.yml
deleted file mode 100644
index ba8ce43e3b..0000000000
--- a/ansible/roles/murano/tasks/import_library_packages.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-- name: Waiting for Murano API service to be ready on first node
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ murano_api_port }}"
- connect_timeout: 1
- timeout: 60
- run_once: True
- register: check_murano_port
- until: check_murano_port is success
- retries: 10
- delay: 6
- delegate_to: "{{ groups['murano-api'][0] }}"
-
-- name: Checking if Murano core and applications library packages exist
- become: true
- command: >
- {{ kolla_container_engine }} exec murano_api murano
- --os-username {{ openstack_auth.username }}
- --os-password {{ openstack_auth.password }}
- --os-system-scope {{ openstack_auth.system_scope }}
- {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
- --os-auth-url {{ openstack_auth.auth_url }}
- --murano-url {{ murano_internal_endpoint }}
- package-list
- register: status
- changed_when: False
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
-
-- name: Importing Murano core library package
- become: true
- command: >
- {{ kolla_container_engine }} exec murano_api murano
- --os-username {{ openstack_auth.username }}
- --os-password {{ openstack_auth.password }}
- --os-system-scope {{ openstack_auth.system_scope }}
- {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
- --os-auth-url {{ openstack_auth.auth_url }}
- --murano-url {{ murano_internal_endpoint }}
- package-import --exists-action u --is-public /io.murano.zip
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
- when:
- - status.stdout.find("io.murano") == -1 or kolla_action == "upgrade"
-
-- name: Importing Murano applications library package
- become: true
- command: >
- {{ kolla_container_engine }} exec murano_api murano
- --os-username {{ openstack_auth.username }}
- --os-password {{ openstack_auth.password }}
- --os-system-scope {{ openstack_auth.system_scope }}
- {% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
- --os-auth-url {{ openstack_auth.auth_url }}
- --murano-url {{ murano_internal_endpoint }}
- package-import --exists-action u --is-public /io.murano.applications.zip
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
- when:
- - status.stdout.find("io.murano.applications") == -1 or kolla_action == "upgrade"
diff --git a/ansible/roles/murano/tasks/loadbalancer.yml b/ansible/roles/murano/tasks/loadbalancer.yml
deleted file mode 100644
index 215d32c461..0000000000
--- a/ansible/roles/murano/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ murano_services }}"
- tags: always
diff --git a/ansible/roles/murano/tasks/main.yml b/ansible/roles/murano/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/murano/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/murano/tasks/precheck.yml b/ansible/roles/murano/tasks/precheck.yml
deleted file mode 100644
index cedd21ff7f..0000000000
--- a/ansible/roles/murano/tasks/precheck.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ murano_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - murano_api
- register: container_facts
-
-- name: Checking free port for Murano API
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ murano_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['murano_api'] is not defined
- - inventory_hostname in groups['murano-api']
diff --git a/ansible/roles/murano/tasks/pull.yml b/ansible/roles/murano/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/murano/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/murano/tasks/register.yml b/ansible/roles/murano/tasks/register.yml
deleted file mode 100644
index 2b10a818a0..0000000000
--- a/ansible/roles/murano/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_murano_auth }}"
- service_ks_register_services: "{{ murano_ks_services }}"
- service_ks_register_users: "{{ murano_ks_users }}"
diff --git a/ansible/roles/murano/tasks/stop.yml b/ansible/roles/murano/tasks/stop.yml
deleted file mode 100644
index 8e110ef997..0000000000
--- a/ansible/roles/murano/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ murano_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/murano/tasks/upgrade.yml b/ansible/roles/murano/tasks/upgrade.yml
deleted file mode 100644
index 7d9c287b04..0000000000
--- a/ansible/roles/murano/tasks/upgrade.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap_service.yml
-
-- name: Flush handlers
- meta: flush_handlers
-
-- import_tasks: import_library_packages.yml
diff --git a/ansible/roles/murano/templates/murano-api.json.j2 b/ansible/roles/murano/templates/murano-api.json.j2
deleted file mode 100644
index 6b30be2fc7..0000000000
--- a/ansible/roles/murano/templates/murano-api.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "murano-api --config-file /etc/murano/murano.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/murano.conf",
- "dest": "/etc/murano/murano.conf",
- "owner": "murano",
- "perm": "0600"
- }{% if murano_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ murano_policy_file }}",
- "dest": "/etc/murano/{{ murano_policy_file }}",
- "owner": "murano",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/murano",
- "owner": "murano:murano",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/murano/templates/murano-engine.json.j2 b/ansible/roles/murano/templates/murano-engine.json.j2
deleted file mode 100644
index a42329e002..0000000000
--- a/ansible/roles/murano/templates/murano-engine.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "murano-engine --config-file /etc/murano/murano.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/murano.conf",
- "dest": "/etc/murano/murano.conf",
- "owner": "murano",
- "perm": "0600"
- }{% if murano_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ murano_policy_file }}",
- "dest": "/etc/murano/{{ murano_policy_file }}",
- "owner": "murano",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/murano",
- "owner": "murano:murano",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/murano/templates/murano.conf.j2 b/ansible/roles/murano/templates/murano.conf.j2
deleted file mode 100644
index fa4797e776..0000000000
--- a/ansible/roles/murano/templates/murano.conf.j2
+++ /dev/null
@@ -1,112 +0,0 @@
-[DEFAULT]
-debug = {{ murano_logging_debug }}
-
-log_dir = /var/log/kolla/murano
-
-{% if service_name == 'murano-api' %}
-bind_host = {{ api_interface_address }}
-bind_port = {{ murano_api_port }}
-{% endif %}
-
-transport_url = {{ rpc_transport_url }}
-
-[engine]
-engine_workers = {{ murano_engine_workers }}
-agent_timeout = {{ murano_agent_timeout }}
-
-[database]
-connection = mysql+pymysql://{{ murano_database_user }}:{{ murano_database_password }}@{{ murano_database_address }}/{{ murano_database_name }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-max_retries = -1
-
-[keystone_authtoken]
-service_type = application-catalog
-www_authenticate_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ murano_keystone_user }}
-password = {{ murano_keystone_password }}
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[murano_auth]
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_name = {{ default_project_domain_name }}
-user_domain_name = {{ default_user_domain_name }}
-project_name = service
-username = {{ murano_keystone_user }}
-password = {{ murano_keystone_password }}
-cafile = {{ openstack_cacert }}
-
-[murano]
-url = {{ murano_internal_endpoint }}
-api_workers = {{ murano_api_workers }}
-
-[oslo_messaging_notifications]
-transport_url = {{ notify_transport_url }}
-{% if murano_enabled_notification_topics %}
-driver = messagingv2
-topics = {{ murano_enabled_notification_topics | map(attribute='name') | join(',') }}
-{% else %}
-driver = noop
-{% endif %}
-
-{% if om_enable_rabbitmq_tls | bool %}
-[oslo_messaging_rabbit]
-ssl = true
-ssl_ca_file = {{ om_rabbitmq_cacert }}
-{% endif %}
-
-[oslo_middleware]
-enable_proxy_headers_parsing = True
-
-{% if murano_policy_file is defined %}
-[oslo_policy]
-policy_file = {{ murano_policy_file }}
-{% endif %}
-
-{% if service_name == 'murano-engine' %}
-[rabbitmq]
-host = {{ kolla_external_fqdn }}
-port = {{ outward_rabbitmq_port }}
-login = {{ murano_agent_rabbitmq_user }}
-password = {{ murano_agent_rabbitmq_password }}
-virtual_host = {{ murano_agent_rabbitmq_vhost }}
-
-{% if enable_barbican | bool %}
-[key_manager]
-auth_type = keystone_password
-auth_url = {{ keystone_internal_url }}
-username = {{ murano_keystone_user }}
-password = {{ murano_keystone_password }}
-user_domain_name = {{ default_project_domain_name }}
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-{% endif %}
-{% endif %}
-
-[neutron]
-endpoint_type = internalURL
-cafile = {{ openstack_cacert }}
-
-[heat]
-endpoint_type = internalURL
-cafile = {{ openstack_cacert }}
-
-[glance]
-endpoint_type = internalURL
-cafile = {{ openstack_cacert }}
-
-[mistral]
-endpoint_type = internalURL
-cafile = {{ openstack_cacert }}
diff --git a/ansible/roles/murano/vars/main.yml b/ansible/roles/murano/vars/main.yml
deleted file mode 100644
index b9c934a3d2..0000000000
--- a/ansible/roles/murano/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "murano"
diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml
index 6960112025..2501ce3a42 100644
--- a/ansible/roles/neutron/defaults/main.yml
+++ b/ansible/roles/neutron/defaults/main.yml
@@ -20,7 +20,8 @@ neutron_services:
enabled: "{{ enable_neutron | bool and not neutron_enable_tls_backend | bool }}"
mode: "http"
external: true
- port: "{{ neutron_server_port }}"
+ external_fqdn: "{{ neutron_external_fqdn }}"
+ port: "{{ neutron_server_public_port }}"
listen_port: "{{ neutron_server_listen_port }}"
neutron-openvswitch-agent:
container_name: "neutron_openvswitch_agent"
@@ -194,9 +195,46 @@ neutron_services:
enabled: "{{ enable_neutron | bool and neutron_enable_tls_backend | bool }}"
mode: "http"
external: true
+ external_fqdn: "{{ neutron_external_fqdn }}"
port: "{{ neutron_server_port }}"
listen_port: "{{ neutron_server_listen_port }}"
tls_backend: "yes"
+ neutron-ovn-agent:
+ container_name: neutron_ovn_agent
+ group: neutron-ovn-agent
+ host_in_groups: "{{ inventory_hostname in groups['neutron-ovn-agent'] }}"
+ enabled: "{{ neutron_enable_ovn_agent | bool }}"
+ image: "{{ neutron_ovn_agent_image_full }}"
+ volumes: "{{ neutron_ovn_agent_default_volumes + neutron_ovn_agent_extra_volumes }}"
+ dimensions: "{{ neutron_ovn_agent_dimensions }}"
+ healthcheck: "{{ neutron_ovn_agent_healthcheck }}"
+
+####################
+# Config Validate
+####################
+neutron_config_validation:
+ - generator: "/neutron/etc/oslo-config-generator/neutron.conf"
+ config: "/etc/neutron/neutron.conf"
+ - generator: "/neutron/etc/oslo-config-generator/neutron.conf"
+ config: "/etc/neutron/neutron_vpnaas.conf"
+ - generator: "/neutron/etc/oslo-config-generator/ml2_conf.ini"
+ config: "/etc/neutron/plugins/ml2/ml2_conf.ini"
+ - generator: "/neutron/etc/oslo-config-generator/openvswitch_agent.ini"
+ config: "/etc/neutron/plugins/ml2/openvswitch_agent.ini"
+ - generator: "/neutron/etc/oslo-config-generator/metering_agent.ini"
+ config: "/etc/neutron/metering_agent.ini"
+ - generator: "/neutron/etc/oslo-config-generator/neutron_ovn_metadata_agent.ini"
+ config: "/etc/neutron/neutron_ovn_metadata_agent.ini"
+ - generator: "/neutron/etc/oslo-config-generator/metadata_agent.ini"
+ config: "/etc/neutron/metadata_agent.ini"
+ - generator: "/neutron/etc/oslo-config-generator/sriov_agent.ini"
+ config: "/etc/neutron/plugins/ml2/sriov_agent.ini"
+ - generator: "/neutron/etc/oslo-config-generator/l3_agent.ini"
+ config: "/etc/neutron/l3_agent.ini"
+ - generator: "/neutron/etc/oslo-config-generator/dhcp_agent.ini"
+ config: "/etc/neutron/dhcp_agent.ini"
+ - generator: "/neutron/etc/oslo-config-generator/linuxbridge_agent.ini"
+ config: "/etc/neutron/plugins/ml2/linuxbridge_agent.ini"
####################
# Database
@@ -222,68 +260,73 @@ neutron_database_shard:
####################
# Docker
####################
+haproxy_tag: "{{ openstack_tag }}"
neutron_tag: "{{ openstack_tag }}"
-neutron_dhcp_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-dhcp-agent"
+neutron_dhcp_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-dhcp-agent"
neutron_dhcp_agent_tag: "{{ neutron_tag }}"
neutron_dhcp_agent_image_full: "{{ neutron_dhcp_agent_image }}:{{ neutron_dhcp_agent_tag }}"
-neutron_l3_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-l3-agent"
+neutron_l3_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-l3-agent"
neutron_l3_agent_tag: "{{ neutron_tag }}"
neutron_l3_agent_image_full: "{{ neutron_l3_agent_image }}:{{ neutron_l3_agent_tag }}"
-neutron_sriov_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-sriov-agent"
+neutron_sriov_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-sriov-agent"
neutron_sriov_agent_tag: "{{ neutron_tag }}"
neutron_sriov_agent_image_full: "{{ neutron_sriov_agent_image }}:{{ neutron_sriov_agent_tag }}"
-neutron_mlnx_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-mlnx-agent"
+neutron_mlnx_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-mlnx-agent"
neutron_mlnx_agent_tag: "{{ neutron_tag }}"
neutron_mlnx_agent_image_full: "{{ neutron_mlnx_agent_image }}:{{ neutron_mlnx_agent_tag }}"
-neutron_eswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-mlnx-agent"
+neutron_eswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-mlnx-agent"
neutron_eswitchd_tag: "{{ neutron_mlnx_agent_tag }}"
neutron_eswitchd_image_full: "{{ neutron_eswitchd_image }}:{{ neutron_eswitchd_tag }}"
-neutron_linuxbridge_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-linuxbridge-agent"
+neutron_linuxbridge_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-linuxbridge-agent"
neutron_linuxbridge_agent_tag: "{{ neutron_tag }}"
neutron_linuxbridge_agent_image_full: "{{ neutron_linuxbridge_agent_image }}:{{ neutron_linuxbridge_agent_tag }}"
-neutron_metadata_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-metadata-agent"
+neutron_metadata_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-metadata-agent"
neutron_metadata_agent_tag: "{{ neutron_tag }}"
neutron_metadata_agent_image_full: "{{ neutron_metadata_agent_image }}:{{ neutron_metadata_agent_tag }}"
-neutron_ovn_metadata_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-metadata-agent"
+neutron_ovn_metadata_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-metadata-agent"
neutron_ovn_metadata_agent_tag: "{{ neutron_tag }}"
neutron_ovn_metadata_agent_image_full: "{{ neutron_ovn_metadata_agent_image }}:{{ neutron_ovn_metadata_agent_tag }}"
-neutron_openvswitch_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-openvswitch-agent"
+neutron_openvswitch_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-openvswitch-agent"
neutron_openvswitch_agent_tag: "{{ neutron_tag }}"
neutron_openvswitch_agent_image_full: "{{ neutron_openvswitch_agent_image }}:{{ neutron_openvswitch_agent_tag }}"
-neutron_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-server"
+neutron_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-server"
neutron_server_tag: "{{ neutron_tag }}"
neutron_server_image_full: "{{ neutron_server_image }}:{{ neutron_server_tag }}"
-neutron_bgp_dragent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-bgp-dragent"
+neutron_bgp_dragent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-bgp-dragent"
neutron_bgp_dragent_tag: "{{ neutron_tag }}"
neutron_bgp_dragent_image_full: "{{ neutron_bgp_dragent_image }}:{{ neutron_bgp_dragent_tag }}"
-neutron_infoblox_ipam_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-infoblox-ipam-agent"
+neutron_infoblox_ipam_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-infoblox-ipam-agent"
neutron_infoblox_ipam_agent_tag: "{{ neutron_tag }}"
neutron_infoblox_ipam_agent_image_full: "{{ neutron_infoblox_ipam_agent_image }}:{{ neutron_infoblox_ipam_agent_tag }}"
-neutron_metering_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/neutron-metering-agent"
+neutron_metering_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-metering-agent"
neutron_metering_agent_tag: "{{ neutron_tag }}"
neutron_metering_agent_image_full: "{{ neutron_metering_agent_image }}:{{ neutron_metering_agent_tag }}"
-ironic_neutron_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ironic-neutron-agent"
+ironic_neutron_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ironic-neutron-agent"
ironic_neutron_agent_tag: "{{ neutron_tag }}"
ironic_neutron_agent_image_full: "{{ ironic_neutron_agent_image }}:{{ ironic_neutron_agent_tag }}"
-neutron_tls_proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/haproxy"
-neutron_tls_proxy_tag: "{{ neutron_tag }}"
+neutron_tls_proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}haproxy"
+neutron_tls_proxy_tag: "{{ haproxy_tag }}"
neutron_tls_proxy_image_full: "{{ neutron_tls_proxy_image }}:{{ neutron_tls_proxy_tag }}"
+neutron_ovn_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}neutron-ovn-agent"
+neutron_ovn_agent_tag: "{{ neutron_tag }}"
+neutron_ovn_agent_image_full: "{{ neutron_ovn_agent_image }}:{{ neutron_ovn_agent_tag }}"
+
neutron_agent_dimensions: "{{ default_container_dimensions }}"
neutron_dhcp_agent_dimensions: "{{ neutron_agent_dimensions }}"
@@ -301,6 +344,7 @@ neutron_infoblox_ipam_agent_dimensions: "{{ default_container_dimensions }}"
neutron_metering_agent_dimensions: "{{ neutron_agent_dimensions }}"
ironic_neutron_agent_dimensions: "{{ default_container_dimensions }}"
neutron_tls_proxy_dimensions: "{{ default_container_dimensions }}"
+neutron_ovn_agent_dimensions: "{{ neutron_agent_dimensions }}"
neutron_dhcp_agent_enable_healthchecks: "{{ enable_container_healthchecks }}"
neutron_dhcp_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
@@ -384,7 +428,7 @@ neutron_ovn_metadata_agent_enable_healthchecks: "{{ enable_container_healthcheck
neutron_ovn_metadata_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
neutron_ovn_metadata_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
neutron_ovn_metadata_agent_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-neutron_ovn_metadata_agent_healthcheck_test: ["CMD-SHELL", "healthcheck_port python {{ ovn_sb_db_port }}"]
+neutron_ovn_metadata_agent_healthcheck_test: ["CMD-SHELL", "healthcheck_port neutron-ovn-metadata-agent {{ ovsdb_port }}"]
neutron_ovn_metadata_agent_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
neutron_ovn_metadata_agent_healthcheck:
interval: "{{ neutron_ovn_metadata_agent_healthcheck_interval }}"
@@ -432,6 +476,19 @@ neutron_sriov_agent_healthcheck:
test: "{% if neutron_sriov_agent_enable_healthchecks | bool %}{{ neutron_sriov_agent_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ neutron_sriov_agent_healthcheck_timeout }}"
+neutron_ovn_agent_enable_healthchecks: "{{ enable_container_healthchecks }}"
+neutron_ovn_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+neutron_ovn_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+neutron_ovn_agent_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+neutron_ovn_agent_healthcheck_test: ["CMD-SHELL", "healthcheck_port neutron-ovn-agent {{ ovsdb_port }}"]
+neutron_ovn_agent_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+neutron_ovn_agent_healthcheck:
+ interval: "{{ neutron_ovn_agent_healthcheck_interval }}"
+ retries: "{{ neutron_ovn_agent_healthcheck_retries }}"
+ start_period: "{{ neutron_ovn_agent_healthcheck_start_period }}"
+ test: "{% if neutron_ovn_agent_enable_healthchecks | bool %}{{ neutron_ovn_agent_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ neutron_ovn_agent_healthcheck_timeout }}"
+
ironic_neutron_agent_enable_healthchecks: "{{ enable_container_healthchecks }}"
ironic_neutron_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
ironic_neutron_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
@@ -452,7 +509,7 @@ neutron_dhcp_agent_default_volumes:
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "/run/netns:/run/netns:shared"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_l3_agent_default_volumes:
- "{{ node_config_directory }}/neutron-l3-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -461,40 +518,40 @@ neutron_l3_agent_default_volumes:
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "/run/netns:/run/netns:shared"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_sriov_agent_default_volumes:
- "{{ node_config_directory }}/neutron-sriov-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_mlnx_agent_default_volumes:
- "{{ node_config_directory }}/neutron-mlnx-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_eswitchd_default_volumes:
- "{{ node_config_directory }}/neutron-eswitchd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/run/libvirt:/run/libvirt:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_linuxbridge_agent_default_volumes:
- "{{ node_config_directory }}/neutron-linuxbridge-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_metadata_agent_default_volumes:
- "{{ node_config_directory }}/neutron-metadata-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "neutron_metadata_socket:/var/lib/neutron/kolla/"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_ovn_metadata_agent_default_volumes:
- "{{ node_config_directory }}/neutron-ovn-metadata-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -502,7 +559,7 @@ neutron_ovn_metadata_agent_default_volumes:
- "/run/openvswitch:/run/openvswitch:shared"
- "/run/netns:/run/netns:shared"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_openvswitch_agent_default_volumes:
- "{{ node_config_directory }}/neutron-openvswitch-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -510,13 +567,13 @@ neutron_openvswitch_agent_default_volumes:
- "/lib/modules:/lib/modules:ro"
- "/run/openvswitch:/run/openvswitch:shared"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_server_default_volumes:
- "{{ node_config_directory }}/neutron-server/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
neutron_bgp_dragent_default_volumes:
- "{{ node_config_directory }}/neutron-bgp-dragent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -532,7 +589,7 @@ neutron_metering_agent_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/neutron/neutron:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/neutron' if neutron_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/neutron:/dev-mode/neutron' if neutron_dev_mode | bool else '' }}"
ironic_neutron_agent_default_volumes:
- "{{ node_config_directory }}/ironic-neutron-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -543,6 +600,11 @@ neutron_tls_proxy_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
+neutron_ovn_agent_default_volumes:
+ - "{{ node_config_directory }}/neutron-ovn-agent/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "kolla_logs:/var/log/kolla/"
neutron_extra_volumes: "{{ default_extra_volumes }}"
neutron_dhcp_agent_extra_volumes: "{{ neutron_extra_volumes }}"
@@ -560,6 +622,7 @@ neutron_infoblox_ipam_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_metering_agent_extra_volumes: "{{ neutron_extra_volumes }}"
ironic_neutron_agent_extra_volumes: "{{ neutron_extra_volumes }}"
neutron_tls_proxy_extra_volumes: "{{ neutron_extra_volumes }}"
+neutron_ovn_agent_extra_volumes: "{{ neutron_extra_volumes }}"
####################
# OpenStack
@@ -567,6 +630,10 @@ neutron_tls_proxy_extra_volumes: "{{ neutron_extra_volumes }}"
dhcp_agents_per_network: 2
max_l3_agents_per_router: 3
+# Adds a delay (in seconds) to the serial neutron_l3_agent container restart
+# process, allowing routers to fail over without loss of connectivity.
+neutron_l3_agent_failover_delay: 0
+
ovsdb_timeout: 10
neutron_logging_debug: "{{ openstack_logging_debug }}"
@@ -589,6 +656,8 @@ neutron_metadata_workers: "{{ openstack_service_workers }}"
# Subprojects
####################
neutron_subprojects:
+ - name: "neutron-fwaas"
+ enabled: "{{ enable_neutron_fwaas | bool }}"
- name: "networking-sfc"
enabled: "{{ enable_neutron_sfc | bool }}"
- name: "neutron-dynamic-routing"
@@ -597,6 +666,8 @@ neutron_subprojects:
enabled: "{{ enable_neutron_vpnaas | bool }}"
- name: "vmware-nsx"
enabled: "{{ neutron_plugin_agent in ['vmware_dvs', 'vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp'] }}"
+ - name: "tap-as-a-service"
+ enabled: "{{ enable_neutron_taas | bool }}"
####################
# Mechanism drivers
@@ -628,7 +699,7 @@ extension_drivers:
- name: "port_security"
enabled: true
- name: "subnet_dns_publish_fixed_ip"
- enabled: "{{ enable_designate | bool }}"
+ enabled: "{{ neutron_dns_integration | bool }}"
- name: "sfc"
enabled: "{{ enable_neutron_sfc | bool }}"
@@ -645,10 +716,19 @@ neutron_bootstrap_services: "{{ neutron_subprojects | selectattr('enabled') | ma
neutron_enable_rolling_upgrade: "yes"
neutron_rolling_upgrade_services: "{{ neutron_subprojects | selectattr('enabled') | map(attribute='name') | list }}"
+####################
+# Neutron modules
+####################
+neutron_modules_default:
+ - name: 'ip6_tables'
+neutron_modules_extra: []
+
####################
# Service Plugins
####################
service_plugins:
+ - name: "firewall_v2"
+ enabled: "{{ enable_neutron_fwaas | bool }}"
- name: "flow_classifier"
enabled: "{{ enable_neutron_sfc | bool }}"
- name: "metering"
@@ -673,6 +753,8 @@ service_plugins:
enabled: "{{ neutron_plugin_agent == 'ovn' }}"
- name: "log"
enabled: "{{ enable_neutron_packet_logging | bool }}"
+ - name: "taas"
+ enabled: "{{ enable_neutron_taas | bool }}"
neutron_service_plugins: "{{ service_plugins | selectattr('enabled', 'equalto', true) | list }}"
@@ -684,8 +766,6 @@ neutron_notification_topics:
enabled: "{{ enable_ceilometer | bool or enable_neutron_infoblox_ipam_agent | bool }}"
- name: "{{ designate_notifications_topic_name }}"
enabled: "{{ designate_enable_notifications_sink | bool }}"
- - name: vitrage_notifications
- enabled: "{{ enable_vitrage | bool }}"
neutron_enabled_notification_topics: "{{ neutron_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
@@ -701,10 +781,14 @@ agent_extensions:
enabled: "{{ enable_neutron_sriov | bool }}"
- name: "log"
enabled: "{{ enable_neutron_packet_logging | bool }}"
+ - name: "taas"
+ enabled: "{{ enable_neutron_taas | bool }}"
neutron_agent_extensions: "{{ agent_extensions | selectattr('enabled', 'equalto', true) | list }}"
l3_agent_extensions:
+ - name: "fwaas_v2"
+ enabled: "{{ enable_neutron_fwaas | bool }}"
- name: "vpnaas"
enabled: "{{ enable_neutron_vpnaas | bool }}"
- name: "port_forwarding"
@@ -820,6 +904,11 @@ neutron_ks_users:
password: "{{ neutron_keystone_password }}"
role: "admin"
+neutron_ks_user_roles:
+ - project: "service"
+ user: "{{ neutron_keystone_user }}"
+ role: "service"
+
####################
# SRIOV
####################
@@ -832,8 +921,8 @@ syslog_server: "{{ api_interface_address }}"
syslog_neutron_tls_proxy_facility: "local4"
neutron_tls_proxy_max_connections: 40000
-neutron_tls_proxy_processes: 1
-neutron_tls_proxy_process_cpu_map: "no"
+neutron_tls_proxy_threads: 1
+neutron_tls_proxy_thread_cpu_map: "no"
neutron_tls_proxy_defaults_max_connections: 10000
neutron_tls_proxy_http_request_timeout: "10s"
neutron_tls_proxy_http_keep_alive_timeout: "10s"
@@ -844,3 +933,15 @@ neutron_tls_proxy_server_timeout: "1m"
neutron_tls_proxy_check_timeout: "10s"
# Check http://www.haproxy.org/download/1.5/doc/configuration.txt for available options
neutron_tls_proxy_defaults_balance: "roundrobin"
+
+####################
+# DNS
+####################
+neutron_dns_integration: "{{ enable_designate | bool }}"
+# When overridden by the user, this value must end with a dot.
+neutron_dns_domain: "openstacklocal"
+
+###################
+# Copy certificates
+###################
+neutron_copy_certs: "{{ kolla_copy_ca_into_containers | bool or neutron_enable_tls_backend | bool }}"
diff --git a/ansible/roles/neutron/handlers/main.yml b/ansible/roles/neutron/handlers/main.yml
index 20981506a2..ffaf8950ed 100644
--- a/ansible/roles/neutron/handlers/main.yml
+++ b/ansible/roles/neutron/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "neutron-server"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,15 +13,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-openvswitch-agent container
vars:
service_name: "neutron-openvswitch-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -31,15 +29,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart fake neutron-openvswitch-agent container
vars:
service_name: "neutron-openvswitch-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -49,15 +45,13 @@
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
with_sequence: "start=1 end={{ num_nova_fake_per_node }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-linuxbridge-agent container
vars:
service_name: "neutron-linuxbridge-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -67,15 +61,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-dhcp-agent container
vars:
service_name: "neutron-dhcp-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -84,15 +76,56 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
+
+- name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - neutron_l3_agent
+ register: container_facts
+ listen: Restart neutron-l3-agent container
+
+- name: Group hosts
+ group_by:
+ key: neutron_l3_agent_running_{{ container_facts['neutron_l3_agent'] is defined }}
+ listen: Restart neutron-l3-agent container
+
+- name: Start stopped neutron-l3-agent container
+ vars:
+ service_name: "neutron-l3-agent"
+ service: "{{ neutron_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ environment: "{{ service.environment }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+ privileged: "{{ service.privileged | default(False) }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ when:
+ - groups['neutron_l3_agent_running_False'] is defined
+ - inventory_hostname in groups['neutron_l3_agent_running_False']
+ listen: Restart neutron-l3-agent container
+
+- name: Wait if container starting
+ wait_for:
+ timeout: "{{ neutron_l3_agent_failover_delay }}"
when:
- - kolla_action != "config"
+ - groups['neutron_l3_agent_running_False'] is defined
+ - groups['neutron_l3_agent_running_True'] is defined
+ listen: Restart neutron-l3-agent container
-- name: Restart neutron-l3-agent container
+- name: Restart running neutron-l3-agent container
vars:
service_name: "neutron-l3-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -103,14 +136,18 @@
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
+ - inventory_hostname == item
+ loop: "{{ groups['neutron_l3_agent_running_True'] | default([]) }}"
+ loop_control:
+ pause: "{{ neutron_l3_agent_failover_delay }}"
+ listen: Restart neutron-l3-agent container
- name: Restart neutron-sriov-agent container
vars:
service_name: "neutron-sriov-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -119,15 +156,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-mlnx-agent container
vars:
service_name: "neutron-mlnx-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -135,15 +170,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-eswitchd container
vars:
service_name: "neutron-eswitchd"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -151,15 +184,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-tls-proxy container
vars:
service_name: "neutron-tls-proxy"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -167,15 +198,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-metadata-agent container
vars:
service_name: "neutron-metadata-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -184,15 +213,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-ovn-metadata-agent container
vars:
service_name: "neutron-ovn-metadata-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -201,15 +228,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-bgp-dragent container
vars:
service_name: "neutron-bgp-dragent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -218,15 +243,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-infoblox-ipam-agent container
vars:
service_name: "neutron-infoblox-ipam-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -235,15 +258,13 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart neutron-metering-agent container
vars:
service_name: "neutron-metering-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -252,15 +273,28 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart ironic-neutron-agent container
vars:
service_name: "ironic-neutron-agent"
service: "{{ neutron_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ privileged: "{{ service.privileged | default(False) }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+
+- name: Restart neutron-ovn-agent container
+ vars:
+ service_name: "neutron-ovn-agent"
+ service: "{{ neutron_services[service_name] }}"
+ become: true
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -269,5 +303,3 @@
dimensions: "{{ service.dimensions }}"
privileged: "{{ service.privileged | default(False) }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/neutron/tasks/bootstrap.yml b/ansible/roles/neutron/tasks/bootstrap.yml
index 7dc876bcb1..75c150817a 100644
--- a/ansible/roles/neutron/tasks/bootstrap.yml
+++ b/ansible/roles/neutron/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Neutron database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Neutron database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/neutron/tasks/bootstrap_service.yml b/ansible/roles/neutron/tasks/bootstrap_service.yml
index 6c212bd690..094f4ca968 100644
--- a/ansible/roles/neutron/tasks/bootstrap_service.yml
+++ b/ansible/roles/neutron/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
neutron_server: "{{ neutron_services['neutron-server'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -15,7 +15,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_neutron"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ neutron_server.volumes }}"
run_once: True
delegate_to: "{{ groups[neutron_server.group][0] }}"
diff --git a/ansible/roles/neutron/tasks/check-containers.yml b/ansible/roles/neutron/tasks/check-containers.yml
index 19917220a3..b7e2f7c29f 100644
--- a/ansible/roles/neutron/tasks/check-containers.yml
+++ b/ansible/roles/neutron/tasks/check-containers.yml
@@ -1,19 +1,3 @@
---
-- name: Check neutron containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- environment: "{{ item.value.environment | default(omit) }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ neutron_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/neutron/tasks/config-host.yml b/ansible/roles/neutron/tasks/config-host.yml
index b0b1c6fc38..a33919b123 100644
--- a/ansible/roles/neutron/tasks/config-host.yml
+++ b/ansible/roles/neutron/tasks/config-host.yml
@@ -1,10 +1,9 @@
---
-- name: Load and persist ip6_tables module
+- name: Load and persist kernel modules
include_role:
name: module-load
vars:
- modules:
- - {'name': ip6_tables}
+ modules: "{{ neutron_modules_default + neutron_modules_extra }}"
when: >-
neutron_services |
select_services_enabled_and_mapped_to_host |
@@ -13,25 +12,23 @@
list |
length > 0
+- name: Check IPv6 support
+ command: /usr/sbin/sysctl -n net.ipv6.conf.all.disable_ipv6
+ register: ipv6_disabled
+ changed_when: false
+
- name: Setting sysctl values
- become: true
+ include_role:
+ name: sysctl
vars:
- neutron_l3_agent: "{{ neutron_services['neutron-l3-agent'] }}"
- should_set: "{{ item.value != 'KOLLA_UNSET' }}"
- sysctl:
- name: "{{ item.name }}"
- state: "{{ should_set | ternary('present', 'absent') }}"
- value: "{{ should_set | ternary(item.value, omit) }}"
- sysctl_set: "{{ should_set }}"
- sysctl_file: "{{ kolla_sysctl_conf_path }}"
- with_items:
- - { name: "net.ipv4.neigh.default.gc_thresh1", value: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh1 }}"}
- - { name: "net.ipv4.neigh.default.gc_thresh2", value: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh2 }}"}
- - { name: "net.ipv4.neigh.default.gc_thresh3", value: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh3 }}"}
- - { name: "net.ipv6.neigh.default.gc_thresh1", value: "{{ neutron_l3_agent_host_ipv6_neigh_gc_thresh1 }}"}
- - { name: "net.ipv6.neigh.default.gc_thresh2", value: "{{ neutron_l3_agent_host_ipv6_neigh_gc_thresh2 }}"}
- - { name: "net.ipv6.neigh.default.gc_thresh3", value: "{{ neutron_l3_agent_host_ipv6_neigh_gc_thresh3 }}"}
+ service: "{{ neutron_services['neutron-l3-agent'] }}"
+ settings:
+ - { name: "net.ipv4.neigh.default.gc_thresh1", value: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh1 }}"}
+ - { name: "net.ipv4.neigh.default.gc_thresh2", value: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh2 }}"}
+ - { name: "net.ipv4.neigh.default.gc_thresh3", value: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh3 }}"}
+ - { name: "net.ipv6.neigh.default.gc_thresh1", value: "{{ neutron_l3_agent_host_ipv6_neigh_gc_thresh1 }}"}
+ - { name: "net.ipv6.neigh.default.gc_thresh2", value: "{{ neutron_l3_agent_host_ipv6_neigh_gc_thresh2 }}"}
+ - { name: "net.ipv6.neigh.default.gc_thresh3", value: "{{ neutron_l3_agent_host_ipv6_neigh_gc_thresh3 }}"}
when:
- set_sysctl | bool
- - item.value != 'KOLLA_SKIP'
- - (neutron_l3_agent.enabled | bool and neutron_l3_agent.host_in_groups | bool)
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/neutron/tasks/config-neutron-fake.yml b/ansible/roles/neutron/tasks/config-neutron-fake.yml
index d392990169..ea26ba7c5d 100644
--- a/ansible/roles/neutron/tasks/config-neutron-fake.yml
+++ b/ansible/roles/neutron/tasks/config-neutron-fake.yml
@@ -64,7 +64,7 @@
vars:
neutron_openvswitch_agent: "{{ neutron_services['neutron-openvswitch-agent'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ neutron_openvswitch_agent.container_name }}"
diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml
index 07de73cf18..f5f307a376 100644
--- a/ansible/roles/neutron/tasks/config.yml
+++ b/ansible/roles/neutron/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ neutron_services }}"
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if extra ml2 plugins exists
find:
@@ -22,11 +19,11 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or neutron_enable_tls_backend | bool
+ - neutron_copy_certs
- name: Creating TLS backend PEM File
vars:
- neutron_tls_proxy: "{{ neutron_services['neutron-tls-proxy'] }}"
+ service: "{{ neutron_services['neutron-tls-proxy'] }}"
assemble:
src: "{{ node_config_directory }}/neutron-tls-proxy/"
dest: "{{ node_config_directory }}/neutron-tls-proxy/neutron-cert-and-key.pem"
@@ -34,9 +31,7 @@
regexp: "^neutron-(cert|key)\\.pem$"
remote_src: true
become: true
- when:
- - neutron_tls_proxy.enabled | bool
- - neutron_tls_proxy.host_in_groups | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Check if policies shall be overwritten
stat:
@@ -65,11 +60,7 @@
become: true
when:
- neutron_policy_file is defined
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ neutron_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
become: true
@@ -77,12 +68,7 @@
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ neutron_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over neutron.conf
become: true
@@ -103,6 +89,7 @@
- "neutron-sriov-agent"
- "neutron-mlnx-agent"
- "neutron-eswitchd"
+ - "neutron-ovn-agent"
merge_configs:
sources:
- "{{ role_path }}/templates/neutron.conf.j2"
@@ -113,12 +100,8 @@
dest: "{{ node_config_directory }}/{{ item.key }}/neutron.conf"
mode: "0660"
when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- item.key in services_need_neutron_conf
- with_dict: "{{ neutron_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over neutron_vpnaas.conf
become: true
@@ -135,24 +118,18 @@
dest: "{{ node_config_directory }}/{{ item.key }}/neutron_vpnaas.conf"
mode: "0660"
when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- item.key in services_need_neutron_vpnaas_conf
- with_dict: "{{ neutron_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over ssh key
become: true
vars:
- neutron_server: "{{ neutron_services['neutron-server'] }}"
+ service: "{{ neutron_services['neutron-server'] }}"
template:
src: "id_rsa"
dest: "{{ node_config_directory }}/neutron-server/id_rsa"
mode: 0600
- when:
- - neutron_server.enabled | bool
- - neutron_server.host_in_groups | bool
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over ml2_conf.ini
become: true
@@ -170,16 +147,13 @@
mode: "0660"
when:
- item.key in services_need_ml2_conf_ini
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ neutron_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over linuxbridge_agent.ini
become: true
vars:
service_name: "neutron-linuxbridge-agent"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/linuxbridge_agent.ini.j2"
@@ -187,16 +161,13 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/linuxbridge_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/linuxbridge_agent.ini"
mode: "0660"
- when:
- - neutron_services[service_name].enabled | bool
- - neutron_services[service_name].host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over openvswitch_agent.ini
become: true
vars:
service_name: "neutron-openvswitch-agent"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/openvswitch_agent.ini.j2"
@@ -204,17 +175,13 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/openvswitch_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/openvswitch_agent.ini"
mode: "0660"
- when:
- - neutron_services[service_name].enabled | bool
- - neutron_services[service_name].host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over sriov_agent.ini
become: true
vars:
service_name: "neutron-sriov-agent"
- neutron_sriov_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/sriov_agent.ini.j2"
@@ -222,17 +189,13 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/sriov_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/sriov_agent.ini"
mode: "0660"
- when:
- - neutron_sriov_agent.enabled | bool
- - neutron_sriov_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over mlnx_agent.ini
become: true
vars:
service_name: "neutron-mlnx-agent"
- neutron_mlnx_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/mlnx_agent.ini.j2"
@@ -240,17 +203,13 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/mlnx_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/mlnx_agent.ini"
mode: "0660"
- when:
- - neutron_mlnx_agent.enabled | bool
- - neutron_mlnx_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over eswitchd.conf
become: true
vars:
service_name: "neutron-eswitchd"
- neutron_eswitchd: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/eswitchd.conf.j2"
@@ -258,17 +217,13 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/eswitchd.conf"
dest: "{{ node_config_directory }}/{{ service_name }}/eswitchd.conf"
mode: "0660"
- when:
- - neutron_eswitchd.enabled | bool
- - neutron_eswitchd.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over dhcp_agent.ini
become: true
vars:
service_name: "neutron-dhcp-agent"
- neutron_dhcp_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/dhcp_agent.ini.j2"
@@ -276,17 +231,13 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/dhcp_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/dhcp_agent.ini"
mode: "0660"
- when:
- - neutron_dhcp_agent.enabled | bool
- - neutron_dhcp_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over dnsmasq.conf
become: true
vars:
service_name: "neutron-dhcp-agent"
- neutron_dhcp_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/{{ service_name }}/dnsmasq.conf"
@@ -295,11 +246,7 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/dnsmasq.conf"
- "{{ node_custom_config }}/neutron/dnsmasq.conf"
- "dnsmasq.conf.j2"
- when:
- - neutron_dhcp_agent.enabled | bool
- - neutron_dhcp_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over l3_agent.ini
become: true
@@ -316,102 +263,109 @@
mode: "0660"
when:
- item.key in services_need_l3_agent_ini
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ neutron_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over fwaas_driver.ini
+ become: true
+ vars:
+ service_name: "{{ item.key }}"
+ services_need_fwaas_driver_ini:
+ - "neutron-server"
+ - "neutron-l3-agent"
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/fwaas_driver.ini.j2"
+ - "{{ node_custom_config }}/neutron/fwaas_driver.ini"
+ dest: "{{ node_config_directory }}/{{ service_name }}/fwaas_driver.ini"
+ mode: "0660"
+ when:
+ - enable_neutron_fwaas | bool
+ - item.key in services_need_fwaas_driver_ini
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over metadata_agent.ini
become: true
vars:
service_name: "neutron-metadata-agent"
- neutron_metadata_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/metadata_agent.ini.j2"
- "{{ node_custom_config }}/neutron/metadata_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/metadata_agent.ini"
mode: "0660"
- when:
- - neutron_metadata_agent.enabled | bool
- - neutron_metadata_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over neutron_ovn_metadata_agent.ini
become: true
vars:
service_name: "neutron-ovn-metadata-agent"
- neutron_ovn_metadata_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/neutron_ovn_metadata_agent.ini.j2"
- "{{ node_custom_config }}/neutron/neutron_ovn_metadata_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/neutron_ovn_metadata_agent.ini"
mode: "0660"
- when:
- - neutron_ovn_metadata_agent.enabled | bool
- - neutron_ovn_metadata_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over metering_agent.ini
become: true
vars:
service_name: "neutron-metering-agent"
- neutron_metering_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/metering_agent.ini.j2"
- "{{ node_custom_config }}/neutron/metering_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/metering_agent.ini"
mode: "0660"
- when:
- - neutron_metering_agent.enabled | bool
- - neutron_metering_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over ironic_neutron_agent.ini
become: true
vars:
service_name: "ironic-neutron-agent"
- ironic_neutron_agent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/ironic_neutron_agent.ini.j2"
- "{{ node_custom_config }}/neutron/ironic_neutron_agent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/ironic_neutron_agent.ini"
mode: "0660"
- when:
- - ironic_neutron_agent.enabled | bool
- - ironic_neutron_agent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over bgp_dragent.ini
become: true
vars:
service_name: "neutron-bgp-dragent"
- neutron_bgp_dragent: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/bgp_dragent.ini.j2"
- "{{ node_custom_config }}/neutron/bgp_dragent.ini"
dest: "{{ node_config_directory }}/{{ service_name }}/bgp_dragent.ini"
mode: "0660"
- when:
- - neutron_bgp_dragent.enabled | bool
- - neutron_bgp_dragent.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copying over ovn_agent.ini
+ become: true
+ vars:
+ service_name: "neutron-ovn-agent"
+ service: "{{ neutron_services[service_name] }}"
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/ovn_agent.ini.j2"
+ - "{{ node_custom_config }}/neutron/ovn_agent.ini"
+ dest: "{{ node_config_directory }}/{{ service_name }}/ovn_agent.ini"
+ mode: "0660"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over nsx.ini
become: true
vars:
service_name: "neutron-server"
- neutron_server: "{{ neutron_services[service_name] }}"
+ service: "{{ neutron_services[service_name] }}"
merge_configs:
sources:
- "{{ role_path }}/templates/nsx.ini.j2"
@@ -420,11 +374,8 @@
dest: "{{ node_config_directory }}/{{ service_name }}/nsx.ini"
mode: "0660"
when:
- - neutron_server.enabled | bool
- - neutron_server.host_in_groups | bool
+ - service | service_enabled_and_mapped_to_host
- neutron_plugin_agent in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp', 'vmware_dvs']
- notify:
- - "Restart {{ service_name }} container"
- name: Copy neutron-l3-agent-wrapper script
become: true
@@ -435,11 +386,7 @@
src: neutron-l3-agent-wrapper.sh.j2
dest: "{{ node_config_directory }}/{{ service_name }}/neutron-l3-agent-wrapper.sh"
mode: "0770"
- when:
- - service.enabled | bool
- - service.host_in_groups | bool
- notify:
- - "Restart {{ service_name }} container"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over extra ml2 plugins
become: true
@@ -461,12 +408,10 @@
with_nested:
- "{{ neutron_services | dictsort }}"
- "{{ check_extra_ml2_plugins.files }}"
- notify:
- - "Restart {{ item.0 }} container"
- name: Copying over neutron-tls-proxy.cfg
vars:
- neutron_tls_proxy: "{{ neutron_services['neutron-tls-proxy'] }}"
+ service: "{{ neutron_services['neutron-tls-proxy'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/neutron-tls-proxy/neutron-tls-proxy.cfg"
@@ -476,8 +421,23 @@
- "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron-tls-proxy.cfg"
- "{{ node_custom_config }}/neutron/neutron-tls-proxy.cfg"
- "neutron-tls-proxy.cfg.j2"
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copying over neutron_taas.conf
+ become: true
+ vars:
+ service_name: "{{ item.key }}"
+ services_need_neutron_taas_conf:
+ - "neutron-server"
+ - "neutron-openvswitch-agent"
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/neutron_taas.conf.j2"
+ - "{{ node_custom_config }}/neutron/neutron_taas.conf"
+ - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron_taas.conf"
+ dest: "{{ node_config_directory }}/{{ item.key }}/neutron_taas.conf"
+ mode: "0660"
when:
- - neutron_tls_proxy.enabled | bool
- - neutron_tls_proxy.host_in_groups | bool
- notify:
- - Restart neutron-tls-proxy container
+ - enable_neutron_taas | bool
+ - item.key in services_need_neutron_taas_conf
+ with_dict: "{{ neutron_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/neutron/tasks/config_validate.yml b/ansible/roles/neutron/tasks/config_validate.yml
new file mode 100644
index 0000000000..5459c7b014
--- /dev/null
+++ b/ansible/roles/neutron/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ neutron_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ neutron_config_validation }}"
diff --git a/ansible/roles/neutron/tasks/deploy.yml b/ansible/roles/neutron/tasks/deploy.yml
index 49d79e20d8..68a1f941d7 100644
--- a/ansible/roles/neutron/tasks/deploy.yml
+++ b/ansible/roles/neutron/tasks/deploy.yml
@@ -1,4 +1,6 @@
---
+- import_tasks: neutron_plugin_agent_check.yml
+
- import_tasks: register.yml
- include_tasks: clone.yml
diff --git a/ansible/roles/neutron/tasks/neutron_plugin_agent_check.yml b/ansible/roles/neutron/tasks/neutron_plugin_agent_check.yml
new file mode 100644
index 0000000000..8fa64093fc
--- /dev/null
+++ b/ansible/roles/neutron/tasks/neutron_plugin_agent_check.yml
@@ -0,0 +1,36 @@
+---
+- name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - neutron_openvswitch_agent
+ - ovn_controller
+ check_mode: false
+ register: container_facts
+
+- name: Get container volume facts
+ become: true
+ kolla_container_volume_facts:
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - ovn_nb_db
+ - ovn_sb_db
+ check_mode: false
+ register: container_volume_facts
+
+- name: Check for ML2/OVN presence
+ assert:
+ that: neutron_plugin_agent == 'ovn'
+ fail_msg: "ML2/OVN agent detected, neutron_plugin_agent is not set to 'ovn', Kolla-Ansible does not support this migration operation."
+ when: (container_facts['ovn_controller'] is defined) or (container_volume_facts['ovn_nb_db'] is defined) or (container_volume_facts['ovn_sb_db'] is defined)
+
+- name: Check for ML2/OVS presence
+ assert:
+ that:
+ - neutron_plugin_agent == 'openvswitch'
+ - container_volume_facts['ovn_nb_db'] is not defined
+ - container_volume_facts['ovn_sb_db'] is not defined
+ fail_msg: "ML2/OVS agent detected, neutron_plugin_agent is not set to 'openvswitch', Kolla-Ansible does not support this migration operation."
+ when: container_facts['neutron_openvswitch_agent'] is defined
diff --git a/ansible/roles/neutron/tasks/precheck.yml b/ansible/roles/neutron/tasks/precheck.yml
index e58ef40530..74f1eabc2b 100644
--- a/ansible/roles/neutron/tasks/precheck.yml
+++ b/ansible/roles/neutron/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- neutron_server
+ check_mode: false
register: container_facts
- name: Checking free port for Neutron Server
@@ -24,31 +27,45 @@
- inventory_hostname in groups['neutron-server']
- name: Checking number of network agents
- fail:
- msg: "Number of network agents are less than two when enabling agent ha"
+ assert:
+ that:
+ - groups['neutron-dhcp-agent'] | length > 1
+ - groups['neutron-l3-agent'] | length > 1
+ fail_msg: "Number of network agents are less than two when enabling agent ha"
changed_when: false
run_once: True
when:
- enable_neutron_agent_ha | bool
- - groups['neutron-dhcp-agent'] | length < 2
- or groups['neutron-l3-agent'] | length < 2
- name: Checking tenant network types
+ assert:
+ that: item in type_drivers
+ fail_msg: "Tenant network type '{{ item }}' is not in type drivers [{{ neutron_type_drivers }}]"
+ with_items: "{{ tenant_network_types }}"
vars:
type_drivers: "{{ neutron_type_drivers.replace(' ', '').split(',') | reject('equalto', '') | list }}"
tenant_network_types: "{{ neutron_tenant_network_types.replace(' ', '').split(',') | reject('equalto', '') | list }}"
- fail:
- msg: "Tenant network type '{{ item }}' is not in type drivers [{{ neutron_type_drivers }}]"
- changed_when: false
- when: item not in type_drivers
run_once: true
- with_items: "{{ tenant_network_types }}"
- name: Checking whether Ironic enabled
- fail:
- msg: "Ironic must be enabled when using networking-baremetal/ironic-neutron-agent"
- changed_when: false
+ assert:
+ that: enable_ironic | bool
+ fail_msg: "Ironic must be enabled when using networking-baremetal/ironic-neutron-agent"
run_once: True
when:
- enable_ironic_neutron_agent | bool
- - not (enable_ironic | bool)
+
+- name: Checking if neutron's dns domain has proper value
+ assert:
+ that:
+ - neutron_dns_domain != None
+ - neutron_dns_domain | length != 0
+ - neutron_dns_domain[-1] == '.'
+ - neutron_dns_domain != "openstacklocal"
+ fail_msg: "The neutron_dns_domain value has to be non-empty and must end with a period '.'"
+ changed_when: false
+ run_once: True
+ when:
+ - neutron_dns_integration | bool
+
+- import_tasks: neutron_plugin_agent_check.yml
diff --git a/ansible/roles/neutron/tasks/register.yml b/ansible/roles/neutron/tasks/register.yml
index 1dc2e4dbc0..17a6ca2cb9 100644
--- a/ansible/roles/neutron/tasks/register.yml
+++ b/ansible/roles/neutron/tasks/register.yml
@@ -5,3 +5,4 @@
service_ks_register_auth: "{{ openstack_neutron_auth }}"
service_ks_register_services: "{{ neutron_ks_services }}"
service_ks_register_users: "{{ neutron_ks_users }}"
+ service_ks_register_user_roles: "{{ neutron_ks_user_roles }}"
diff --git a/ansible/roles/neutron/tasks/rolling_upgrade.yml b/ansible/roles/neutron/tasks/rolling_upgrade.yml
index 20c523bbde..d0a98f94ad 100644
--- a/ansible/roles/neutron/tasks/rolling_upgrade.yml
+++ b/ansible/roles/neutron/tasks/rolling_upgrade.yml
@@ -14,7 +14,7 @@
vars:
neutron_server: "{{ neutron_services['neutron-server'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -27,7 +27,7 @@
labels:
UPGRADE:
name: "bootstrap_neutron"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ neutron_server.volumes }}"
run_once: True
delegate_to: "{{ groups['neutron-server'][0] }}"
@@ -45,19 +45,19 @@
- name: Stopping all neutron-server for contract db
vars:
- neutron_server: "{{ neutron_services['neutron-server'] }}"
- first_neutron_server_host: "{{ groups[neutron_server.group][0] }}"
+ service: "{{ neutron_services['neutron-server'] }}"
+ first_neutron_server_host: "{{ groups[service.group][0] }}"
results_of_check_pending_contract_scripts: "{{ hostvars[first_neutron_server_host]['neutron_check_contract_db_stdout'] }}"
# NOTE(hrw): no idea
filter_rc: "results[?rc!=`0`]"
is_stop_neutron_server: "{{ results_of_check_pending_contract_scripts | json_query(filter_rc) }}"
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
- name: "{{ neutron_server.container_name }}"
+ name: "{{ service.container_name }}"
when:
- - neutron_server.host_in_groups | bool
+ - service.host_in_groups | bool
- is_stop_neutron_server | length > 0
notify:
- "Restart neutron-server container"
@@ -66,7 +66,7 @@
vars:
neutron_server: "{{ neutron_services['neutron-server'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -79,7 +79,7 @@
labels:
UPGRADE:
name: "bootstrap_neutron"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ neutron_server.volumes }}"
run_once: True
delegate_to: "{{ groups['neutron-server'][0] }}"
diff --git a/ansible/roles/neutron/tasks/upgrade.yml b/ansible/roles/neutron/tasks/upgrade.yml
index 9e9355b7da..ccc5b7cd7c 100644
--- a/ansible/roles/neutron/tasks/upgrade.yml
+++ b/ansible/roles/neutron/tasks/upgrade.yml
@@ -1,6 +1,15 @@
---
+- import_tasks: neutron_plugin_agent_check.yml
+
- include_tasks: rolling_upgrade.yml
when: neutron_enable_rolling_upgrade | bool
- include_tasks: legacy_upgrade.yml
when: not neutron_enable_rolling_upgrade | bool
+
+# TODO(mnasiadka): Remove this task in the E cycle.
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ openstack_neutron_auth }}"
+ service_ks_register_user_roles: "{{ neutron_ks_user_roles }}"
diff --git a/ansible/roles/neutron/templates/fwaas_driver.ini.j2 b/ansible/roles/neutron/templates/fwaas_driver.ini.j2
new file mode 100644
index 0000000000..b0df0858fb
--- /dev/null
+++ b/ansible/roles/neutron/templates/fwaas_driver.ini.j2
@@ -0,0 +1,11 @@
+[fwaas]
+enabled = True
+{% if neutron_plugin_agent == 'vmware_nsxv' %}
+driver = vmware_nsxv_edge
+{% else %}
+agent_version = v2
+driver = iptables_v2
+
+[service_providers]
+service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default
+{% endif %}
diff --git a/ansible/roles/neutron/templates/ironic-neutron-agent.json.j2 b/ansible/roles/neutron/templates/ironic-neutron-agent.json.j2
index 33eca34527..f60136490c 100644
--- a/ansible/roles/neutron/templates/ironic-neutron-agent.json.j2
+++ b/ansible/roles/neutron/templates/ironic-neutron-agent.json.j2
@@ -12,7 +12,13 @@
"dest": "/etc/neutron/plugins/ml2/ironic_neutron_agent.ini",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 b/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
index 1dbaae0ede..5b0ae990b8 100644
--- a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
+++ b/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
@@ -5,7 +5,8 @@ extensions = {{ neutron_agent_extensions|map(attribute='name')|join(',') }}
[linux_bridge]
{% if inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool ) %}
-physical_interface_mappings = {% for interface in neutron_external_interface.split(',') %}physnet{{ loop.index0 + 1 }}:{{ interface }}{% if not loop.last %},{% endif %}{% endfor %}
+{# Format: physnet1:br1,physnet2:br2 #}
+physical_interface_mappings = {{ neutron_physical_networks.split(',') | zip(neutron_external_interface.split(',')) | map('join', ':') | join(',') }}
{% endif %}
[securitygroup]
diff --git a/ansible/roles/neutron/templates/metadata_agent.ini.j2 b/ansible/roles/neutron/templates/metadata_agent.ini.j2
index cdee21fcbb..c66cb4dd22 100644
--- a/ansible/roles/neutron/templates/metadata_agent.ini.j2
+++ b/ansible/roles/neutron/templates/metadata_agent.ini.j2
@@ -5,3 +5,4 @@ nova_metadata_host = {{ nova_internal_fqdn }}
nova_metadata_port = {{ nova_metadata_port }}
metadata_proxy_shared_secret = {{ metadata_secret }}
nova_metadata_protocol = {{ internal_protocol }}
+metadata_workers = {{ neutron_metadata_workers }}
diff --git a/ansible/roles/neutron/templates/ml2_conf.ini.j2 b/ansible/roles/neutron/templates/ml2_conf.ini.j2
index e55423e33c..0e34477691 100644
--- a/ansible/roles/neutron/templates/ml2_conf.ini.j2
+++ b/ansible/roles/neutron/templates/ml2_conf.ini.j2
@@ -15,7 +15,7 @@ extension_drivers = {{ neutron_extension_drivers | map(attribute='name') | join(
[ml2_type_vlan]
{% if enable_ironic | bool %}
-network_vlan_ranges = physnet1
+network_vlan_ranges = {{ neutron_physical_networks }}
{% else %}
network_vlan_ranges =
{% endif %}
@@ -24,7 +24,7 @@ network_vlan_ranges =
{% if enable_ironic | bool %}
flat_networks = *
{% else %}
-flat_networks = {% for interface in neutron_external_interface.split(',') %}physnet{{ loop.index0 + 1 }}{% if not loop.last %},{% endif %}{% endfor %}
+flat_networks = {{ neutron_physical_networks }}
{% endif %}
[ml2_type_vxlan]
@@ -40,4 +40,5 @@ ovn_nb_connection = {{ ovn_nb_connection }}
ovn_sb_connection = {{ ovn_sb_connection }}
ovn_metadata_enabled = True
enable_distributed_floating_ip = {{ neutron_ovn_distributed_fip | bool }}
+ovn_emit_need_to_frag = True
{% endif %}
diff --git a/ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2 b/ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2
index cfce2042d2..1bae040c70 100644
--- a/ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2 b/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2
index f913957a10..bcf8ecd456 100644
--- a/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2
@@ -24,6 +24,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-eswitchd.json.j2 b/ansible/roles/neutron/templates/neutron-eswitchd.json.j2
index 721f4d7660..bb911e05b1 100644
--- a/ansible/roles/neutron/templates/neutron-eswitchd.json.j2
+++ b/ansible/roles/neutron/templates/neutron-eswitchd.json.j2
@@ -12,7 +12,13 @@
"dest": "/etc/neutron/plugins/ml2/eswitchd.conf",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/neutron/templates/neutron-infoblox-ipam-agent.json.j2 b/ansible/roles/neutron/templates/neutron-infoblox-ipam-agent.json.j2
index 24ef182f8c..d91d1b21c2 100644
--- a/ansible/roles/neutron/templates/neutron-infoblox-ipam-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-infoblox-ipam-agent.json.j2
@@ -12,7 +12,13 @@
"dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/neutron/templates/neutron-l3-agent-wrapper.sh.j2 b/ansible/roles/neutron/templates/neutron-l3-agent-wrapper.sh.j2
index 6960ae4bb4..028988e93d 100644
--- a/ansible/roles/neutron/templates/neutron-l3-agent-wrapper.sh.j2
+++ b/ansible/roles/neutron/templates/neutron-l3-agent-wrapper.sh.j2
@@ -7,10 +7,14 @@ set -o errexit
neutron-netns-cleanup \
--config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/l3_agent.ini \
+{% if enable_neutron_fwaas | bool %}
+ --config-file /etc/neutron/fwaas_driver.ini \
+{% endif %}
--force --agent-type l3
{% endif %}
neutron-l3-agent \
--config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/neutron_vpnaas.conf \
- --config-file /etc/neutron/l3_agent.ini \
+ --config-file /etc/neutron/l3_agent.ini{% if enable_neutron_fwaas | bool %} \
+ --config-file /etc/neutron/fwaas_driver.ini{% endif %}
diff --git a/ansible/roles/neutron/templates/neutron-l3-agent.json.j2 b/ansible/roles/neutron/templates/neutron-l3-agent.json.j2
index 5c1d79c330..8e8d77da5d 100644
--- a/ansible/roles/neutron/templates/neutron-l3-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-l3-agent.json.j2
@@ -18,7 +18,13 @@
"dest": "/etc/neutron/neutron_vpnaas.conf",
"owner": "neutron",
"perm": "0600"
- },
+ }{% if enable_neutron_fwaas | bool %},
+ {
+ "source": "{{ container_config_directory }}/fwaas_driver.ini",
+ "dest": "/etc/neutron/fwaas_driver.ini",
+ "owner": "neutron",
+ "perm": "0600"
+ }{% endif %},
{
"source": "{{ container_config_directory }}/l3_agent.ini",
"dest": "/etc/neutron/l3_agent.ini",
@@ -30,6 +36,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 b/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2
index 2ea1dff2a5..e89ee94512 100644
--- a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2
@@ -26,7 +26,13 @@
"dest": "/etc/neutron/plugins/ml2/linuxbridge_agent.ini",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2 b/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2
index 8d96067228..29d781f732 100644
--- a/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-metering-agent.json.j2 b/ansible/roles/neutron/templates/neutron-metering-agent.json.j2
index 6a1d6cef81..1929bbc5d3 100644
--- a/ansible/roles/neutron/templates/neutron-metering-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-metering-agent.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-mlnx-agent.json.j2 b/ansible/roles/neutron/templates/neutron-mlnx-agent.json.j2
index 812bbd0192..98a99c9f21 100644
--- a/ansible/roles/neutron/templates/neutron-mlnx-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-mlnx-agent.json.j2
@@ -12,7 +12,13 @@
"dest": "/etc/neutron/plugins/mlnx/mlnx_agent.ini",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/neutron/templates/neutron-openvswitch-agent-xenapi.json.j2 b/ansible/roles/neutron/templates/neutron-openvswitch-agent-xenapi.json.j2
index 66e969c8ae..06044c858a 100644
--- a/ansible/roles/neutron/templates/neutron-openvswitch-agent-xenapi.json.j2
+++ b/ansible/roles/neutron/templates/neutron-openvswitch-agent-xenapi.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2 b/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2
index 2cca76036c..aa18920601 100644
--- a/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2
@@ -7,6 +7,14 @@
"owner": "neutron",
"perm": "0600"
},
+{% if enable_neutron_taas | bool %}
+ {
+ "source": "{{ container_config_directory }}/neutron_taas.conf",
+ "dest": "/etc/neutron/neutron_taas.conf",
+ "owner": "neutron",
+ "perm": "0600"
+ },
+{% endif %}
{% if check_extra_ml2_plugins is defined and check_extra_ml2_plugins.matched > 0 %}{% for plugin in check_extra_ml2_plugins.files %}
{
"source": "{{ container_config_directory }}/{{ plugin.path | basename }}",
@@ -26,7 +34,13 @@
"dest": "/etc/neutron/plugins/ml2/openvswitch_agent.ini",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/neutron/templates/neutron-ovn-agent.json.j2 b/ansible/roles/neutron/templates/neutron-ovn-agent.json.j2
new file mode 100644
index 0000000000..9f49f86dd1
--- /dev/null
+++ b/ansible/roles/neutron/templates/neutron-ovn-agent.json.j2
@@ -0,0 +1,41 @@
+{
+ "command": "neutron-ovn-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ovn_agent.ini",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/neutron.conf",
+ "dest": "/etc/neutron/neutron.conf",
+ "owner": "neutron",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/ovn_agent.ini",
+ "dest": "/etc/neutron/plugins/ml2/ovn_agent.ini",
+ "owner": "neutron",
+ "perm": "0600"
+ }
+ {% if neutron_policy_file is defined %},{
+ "source": "{{ container_config_directory }}/{{ neutron_policy_file }}",
+ "dest": "/etc/neutron/{{ neutron_policy_file }}",
+ "owner": "neutron",
+ "perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/neutron",
+ "owner": "neutron:neutron",
+ "recurse": true
+ },
+ {
+ "path": "/var/lib/neutron/kolla",
+ "owner": "neutron:neutron",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/neutron/templates/neutron-ovn-metadata-agent.json.j2 b/ansible/roles/neutron/templates/neutron-ovn-metadata-agent.json.j2
index 6c3850b5c5..7116dc185c 100644
--- a/ansible/roles/neutron/templates/neutron-ovn-metadata-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-ovn-metadata-agent.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-server.json.j2 b/ansible/roles/neutron/templates/neutron-server.json.j2
index f1e93a72c3..f6af1ebbbd 100644
--- a/ansible/roles/neutron/templates/neutron-server.json.j2
+++ b/ansible/roles/neutron/templates/neutron-server.json.j2
@@ -1,5 +1,5 @@
{
- "command": "neutron-server --config-file /etc/neutron/neutron.conf {% if neutron_plugin_agent in ['openvswitch', 'linuxbridge', 'ovn'] %} --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/neutron_vpnaas.conf {% elif neutron_plugin_agent in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp', 'vmware_dvs'] %} --config-file /etc/neutron/plugins/vmware/nsx.ini {% endif %}",
+ "command": "neutron-server --config-file /etc/neutron/neutron.conf {% if neutron_plugin_agent in ['openvswitch', 'linuxbridge', 'ovn'] %} --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/neutron_vpnaas.conf {% elif neutron_plugin_agent in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp', 'vmware_dvs'] %} --config-file /etc/neutron/plugins/vmware/nsx.ini {% endif %}{% if enable_neutron_fwaas | bool %}--config-file /etc/neutron/fwaas_driver.ini{% endif %}",
"config_files": [
{
"source": "{{ container_config_directory }}/neutron.conf",
@@ -7,12 +7,28 @@
"owner": "neutron",
"perm": "0600"
},
+{% if enable_neutron_fwaas | bool %}
+ {
+ "source": "{{ container_config_directory }}/fwaas_driver.ini",
+ "dest": "/etc/neutron/fwaas_driver.ini",
+ "owner": "neutron",
+ "perm": "0600"
+ },
+{% endif %}
{
"source": "{{ container_config_directory }}/neutron_vpnaas.conf",
"dest": "/etc/neutron/neutron_vpnaas.conf",
"owner": "neutron",
"perm": "0600"
},
+{% if enable_neutron_taas | bool %}
+ {
+ "source": "{{ container_config_directory }}/neutron_taas.conf",
+ "dest": "/etc/neutron/neutron_taas.conf",
+ "owner": "neutron",
+ "perm": "0600"
+ },
+{% endif %}
{% if neutron_policy_file is defined %}{
"source": "{{ container_config_directory }}/{{ neutron_policy_file }}",
"dest": "/etc/neutron/{{ neutron_policy_file }}",
@@ -44,7 +60,13 @@
"dest": "/var/lib/neutron/.ssh/id_rsa",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/neutron/templates/neutron-sriov-agent.json.j2 b/ansible/roles/neutron/templates/neutron-sriov-agent.json.j2
index 83abe58df3..3baf244f19 100644
--- a/ansible/roles/neutron/templates/neutron-sriov-agent.json.j2
+++ b/ansible/roles/neutron/templates/neutron-sriov-agent.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/neutron/{{ neutron_policy_file }}",
"owner": "neutron",
"perm": "0600"
+ }{% endif %}{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/neutron/templates/neutron-tls-proxy.cfg.j2 b/ansible/roles/neutron/templates/neutron-tls-proxy.cfg.j2
index cd0a1358ed..9304ac34ce 100644
--- a/ansible/roles/neutron/templates/neutron-tls-proxy.cfg.j2
+++ b/ansible/roles/neutron/templates/neutron-tls-proxy.cfg.j2
@@ -6,15 +6,17 @@ global
daemon
log {{ syslog_server }}:{{ syslog_udp_port }} {{ syslog_neutron_tls_proxy_facility }}
maxconn {{ neutron_tls_proxy_max_connections }}
- nbproc {{ neutron_tls_proxy_processes }}
- {% if (neutron_tls_proxy_processes | int > 1) and (neutron_tls_proxy_process_cpu_map | bool) %}
- {% for cpu_idx in range(0, neutron_tls_proxy_processes) %}
- cpu-map {{ cpu_idx + 1 }} {{ cpu_idx }}
- {% endfor %}
+ nbthread {{ neutron_tls_proxy_threads }}
+ {% if (neutron_tls_proxy_threads | int > 1) and (neutron_tls_proxy_thread_cpu_map | bool) %}
+ cpu-map auto:1/all 0-63
{% endif %}
- ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES
- ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11
+ {% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
+ {% for line in haproxy_ssl_settings.split('\n') %}
+ {{ line }}
+ {% endfor %}
tune.ssl.default-dh-param 4096
+ ca-base {{ haproxy_backend_cacert_dir }}
+ {% endif %}
defaults
log global
diff --git a/ansible/roles/neutron/templates/neutron-tls-proxy.json.j2 b/ansible/roles/neutron/templates/neutron-tls-proxy.json.j2
index 0a45cc2f8f..fe66141266 100644
--- a/ansible/roles/neutron/templates/neutron-tls-proxy.json.j2
+++ b/ansible/roles/neutron/templates/neutron-tls-proxy.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/neutron/certs/neutron-cert-and-key.pem",
"owner": "neutron",
"perm": "0600"
- }
+ }{% if neutron_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/neutron/templates/neutron.conf.j2 b/ansible/roles/neutron/templates/neutron.conf.j2
index 62bd1711cb..68e040ba11 100644
--- a/ansible/roles/neutron/templates/neutron.conf.j2
+++ b/ansible/roles/neutron/templates/neutron.conf.j2
@@ -18,14 +18,10 @@ bind_port = {{ neutron_server_listen_port }}
api_paste_config = /etc/neutron/api-paste.ini
api_workers = {{ neutron_api_workers }}
-metadata_workers = {{ neutron_metadata_workers }}
rpc_workers = {{ openstack_service_rpc_workers }}
rpc_state_report_workers = {{ openstack_service_rpc_workers }}
-# NOTE(SamYaple): We must specify this value here rather than the metadata conf
-# because it is used by the l3 and dhcp agents. The reason the path has 'kolla'
-# in it is because we are sharing this socket in a volume which is it's own dir
-metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy
+state_path = /var/lib/neutron/kolla
{% if neutron_plugin_agent == "openvswitch" or (neutron_plugin_agent == "ovn" and neutron_ovn_dhcp_agent | bool) %}
interface_driver = openvswitch
@@ -38,8 +34,6 @@ ovs_integration_bridge = br-int-{{ item }}
host = {{ ansible_facts.hostname }}_{{ item }}
{% endif %}
-allow_overlapping_ips = true
-
{% if neutron_plugin_agent == 'vmware_nsxv' %}
core_plugin = vmware_nsx.plugin.NsxVPlugin
{% elif neutron_plugin_agent == 'vmware_nsxv3' %}
@@ -67,10 +61,8 @@ transport_url = {{ rpc_transport_url }}
router_distributed = True
{% endif %}
-{% if enable_designate | bool %}
-{% if designate_ns_record is string %}
-dns_domain = {{ designate_ns_record }}.
-{% endif %}
+dns_domain = {{ neutron_dns_domain }}
+{% if enable_designate | bool and neutron_dns_integration | bool %}
external_dns_driver = designate
{% if neutron_plugin_agent == 'vmware_nsxv' %}
nsx_extension_drivers = vmware_nsxv_dns
@@ -125,7 +117,7 @@ password = {{ neutron_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -139,11 +131,18 @@ topics = {{ neutron_enabled_notification_topics | map(attribute='name') | join('
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if neutron_policy_file is defined %}
[oslo_policy]
@@ -192,7 +191,6 @@ password = {{ placement_keystone_password }}
user_domain_name = {{ default_user_domain_name }}
project_name = service
project_domain_name = {{ default_project_domain_name }}
-os_region_name = {{ openstack_region_name }}
endpoint_type = internal
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
diff --git a/ansible/roles/neutron/templates/neutron_taas.conf.j2 b/ansible/roles/neutron/templates/neutron_taas.conf.j2
new file mode 100644
index 0000000000..5031871834
--- /dev/null
+++ b/ansible/roles/neutron/templates/neutron_taas.conf.j2
@@ -0,0 +1,6 @@
+[service_providers]
+service_provider = TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc.TaasRpcDriver:default
+
+[taas]
+driver = neutron_taas.services.taas.drivers.linux.ovs_taas.OvsTaasDriver
+enabled = True
diff --git a/ansible/roles/neutron/templates/openvswitch_agent.ini.j2 b/ansible/roles/neutron/templates/openvswitch_agent.ini.j2
index 88834e2dea..8ac25af7e1 100644
--- a/ansible/roles/neutron/templates/openvswitch_agent.ini.j2
+++ b/ansible/roles/neutron/templates/openvswitch_agent.ini.j2
@@ -15,7 +15,8 @@ firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewal
[ovs]
{% if inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool ) %}
-bridge_mappings = {% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}
+{# Format: physnet1:br1,physnet2:br2 #}
+bridge_mappings = {{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}
{% endif %}
datapath_type = {{ ovs_datapath }}
ovsdb_connection = tcp:127.0.0.1:{{ ovsdb_port }}
diff --git a/ansible/roles/neutron/templates/ovn_agent.ini.j2 b/ansible/roles/neutron/templates/ovn_agent.ini.j2
new file mode 100644
index 0000000000..bf5761353e
--- /dev/null
+++ b/ansible/roles/neutron/templates/ovn_agent.ini.j2
@@ -0,0 +1,7 @@
+[ovn]
+ovn_nb_connection = {{ ovn_nb_connection }}
+ovn_sb_connection = {{ ovn_sb_connection }}
+
+[ovs]
+ovsdb_connection = tcp:127.0.0.1:{{ ovsdb_port }}
+ovsdb_timeout = {{ ovsdb_timeout }}
diff --git a/ansible/roles/neutron/templates/sriov_agent.ini.j2 b/ansible/roles/neutron/templates/sriov_agent.ini.j2
index 683b2e8031..fb8ba15077 100644
--- a/ansible/roles/neutron/templates/sriov_agent.ini.j2
+++ b/ansible/roles/neutron/templates/sriov_agent.ini.j2
@@ -1,6 +1,11 @@
+{% if enable_neutron_qos | bool %}
+[agent]
+extensions = qos
+{% endif %}
+
[sriov_nic]
# 'physical_device_mappings' is a comma separated list
-# Maps a physical network to network inferface used for SRIOV
+# Maps a physical network to network interface used for SRIOV
# This template should be modified for specific environments
# See Official OpenStack SRIOV documentation for all available options
physical_device_mappings = {{ neutron_sriov_physnets }}
diff --git a/ansible/roles/nova-cell/defaults/main.yml b/ansible/roles/nova-cell/defaults/main.yml
index a976c769e4..b3f445d0d8 100644
--- a/ansible/roles/nova-cell/defaults/main.yml
+++ b/ansible/roles/nova-cell/defaults/main.yml
@@ -1,4 +1,6 @@
---
+project_name: "nova"
+
nova_cell_services:
nova-libvirt:
container_name: nova_libvirt
@@ -8,7 +10,7 @@ nova_cell_services:
pid_mode: "host"
cgroupns_mode: "host"
privileged: True
- volumes: "{{ nova_libvirt_default_volumes + nova_libvirt_extra_volumes }}"
+ volumes: "{{ nova_libvirt_default_volumes + nova_libvirt_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ nova_libvirt_dimensions }}"
healthcheck: "{{ nova_libvirt_healthcheck }}"
nova-ssh:
@@ -59,7 +61,7 @@ nova_cell_services:
privileged: True
enabled: "{{ not enable_nova_fake | bool }}"
ipc_mode: "host"
- volumes: "{{ nova_compute_default_volumes + nova_compute_extra_volumes }}"
+ volumes: "{{ nova_compute_default_volumes + nova_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ nova_compute_dimensions }}"
healthcheck: "{{ nova_compute_healthcheck }}"
nova-compute-ironic:
@@ -71,6 +73,13 @@ nova_cell_services:
dimensions: "{{ nova_compute_ironic_dimensions }}"
healthcheck: "{{ nova_compute_ironic_healthcheck }}"
+####################
+# Config Validate
+####################
+nova_cell_config_validation:
+ - generator: "/nova/etc/nova/nova-config-generator.conf"
+ config: "/etc/nova/nova.conf"
+
####################
# Ceph options
####################
@@ -78,6 +87,15 @@ nova_cell_services:
# qemu (1, 6, 0) or later. Set to "" to disable.
nova_hw_disk_discard: "unmap"
+nova_cell_ceph_backend:
+ cluster: "{{ ceph_cluster }}"
+ vms:
+ user: "{{ ceph_nova_user }}"
+ pool: "{{ ceph_nova_pool_name }}"
+ volumes:
+ user: "{{ ceph_cinder_user }}"
+ pool: "{{ ceph_cinder_pool_name }}"
+
####################
# Cells Options
####################
@@ -216,36 +234,36 @@ nova_cell_notify_rabbitmq_users:
####################
nova_tag: "{{ openstack_tag }}"
-nova_libvirt_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-libvirt"
+nova_libvirt_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-libvirt"
nova_libvirt_tag: "{{ nova_tag }}"
nova_libvirt_image_full: "{{ nova_libvirt_image }}:{{ nova_libvirt_tag }}"
nova_libvirt_cpu_mode: "{{ 'host-passthrough' if ansible_facts.architecture == 'aarch64' else '' }}"
-nova_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-ssh"
+nova_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-ssh"
nova_ssh_tag: "{{ nova_tag }}"
nova_ssh_image_full: "{{ nova_ssh_image }}:{{ nova_ssh_tag }}"
-nova_novncproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-novncproxy"
+nova_novncproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-novncproxy"
nova_novncproxy_tag: "{{ nova_tag }}"
nova_novncproxy_image_full: "{{ nova_novncproxy_image }}:{{ nova_novncproxy_tag }}"
-nova_spicehtml5proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-spicehtml5proxy"
+nova_spicehtml5proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-spicehtml5proxy"
nova_spicehtml5proxy_tag: "{{ nova_tag }}"
nova_spicehtml5proxy_image_full: "{{ nova_spicehtml5proxy_image }}:{{ nova_spicehtml5proxy_tag }}"
-nova_serialproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-serialproxy"
+nova_serialproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-serialproxy"
nova_serialproxy_tag: "{{ nova_tag }}"
nova_serialproxy_image_full: "{{ nova_serialproxy_image }}:{{ nova_serialproxy_tag }}"
-nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-conductor"
+nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-conductor"
nova_conductor_tag: "{{ nova_tag }}"
nova_conductor_image_full: "{{ nova_conductor_image }}:{{ nova_conductor_tag }}"
-nova_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-compute"
+nova_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-compute"
nova_compute_tag: "{{ nova_tag }}"
nova_compute_image_full: "{{ nova_compute_image }}:{{ nova_compute_tag }}"
-nova_compute_ironic_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-compute-ironic"
+nova_compute_ironic_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-compute-ironic"
nova_compute_ironic_tag: "{{ nova_tag }}"
nova_compute_ironic_image_full: "{{ nova_compute_ironic_image }}:{{ nova_compute_ironic_tag }}"
@@ -364,16 +382,16 @@ nova_libvirt_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- - "/run/:/run/:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "/dev:/dev"
+ - "{{ 'devpts:/dev/pts' if kolla_container_engine == 'podman' else '' }}"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "kolla_logs:/var/log/kolla/"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- "nova_libvirt_qemu:/etc/libvirt/qemu"
- - "nova_libvirt_secrets:/etc/libvirt/secrets"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_ssh_default_volumes:
- "{{ node_config_directory }}/nova-ssh/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -381,57 +399,58 @@ nova_ssh_default_volumes:
- "kolla_logs:/var/log/kolla"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_novncproxy_default_volumes:
- "{{ node_config_directory }}/nova-novncproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_spicehtml5proxy_default_volumes:
- "{{ node_config_directory }}/nova-spicehtml5proxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_serialproxy_default_volumes:
- "{{ node_config_directory }}/nova-serialproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_conductor_default_volumes:
- "{{ node_config_directory }}/nova-conductor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_compute_default_volumes:
- "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "/dev:/dev"
- "kolla_logs:/var/log/kolla/"
- "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "{{ nova_libvirt_volume }}:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_compute_ironic_default_volumes:
- "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
# Used by bootstrapping containers.
nova_cell_bootstrap_default_volumes:
- "{{ node_config_directory }}/nova-cell-bootstrap/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ 'nova-cell:/var/lib/script/' if kolla_container_engine == 'podman' else '' }}"
nova_extra_volumes: "{{ default_extra_volumes }}"
nova_libvirt_extra_volumes: "{{ nova_extra_volumes }}"
@@ -444,6 +463,7 @@ nova_compute_extra_volumes: "{{ nova_extra_volumes }}"
nova_compute_ironic_extra_volumes: "{{ nova_extra_volumes }}"
# Used by bootstrapping containers.
nova_cell_bootstrap_extra_volumes: "{{ nova_extra_volumes }}"
+nova_cell_get_settings_volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
nova_libvirt_volume: "{{ 'libvirtd' if enable_nova_libvirt_container | bool else '/var/lib/libvirt' }}"
@@ -516,8 +536,6 @@ nova_notification_topics:
enabled: "{{ enable_ceilometer | bool or enable_neutron_infoblox_ipam_agent | bool }}"
- name: "{{ designate_notifications_topic_name }}"
enabled: "{{ designate_enable_notifications_sink | bool }}"
- - name: vitrage_notifications
- enabled: "{{ enable_vitrage | bool }}"
nova_enabled_notification_topics: "{{ nova_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
@@ -541,7 +559,7 @@ libvirt_tls: false
# also means the deployer is responsible for restarting the nova_compute and
# nova_libvirt containers when the key changes, as we can't know when to do that
libvirt_tls_manage_certs: true
-# When using tls we are verfiying the hostname we are connected to matches the
+# When using tls we are verifying the hostname we are connected to matches the
# libvirt cert we are presented. As such we can't use IP's here, but keep the
# ability for people to override the hostname to use.
migration_hostname: "{{ ansible_facts.nodename }}"
@@ -568,7 +586,7 @@ nova_dev_mode: "{{ kolla_dev_mode }}"
nova_source_version: "{{ kolla_source_version }}"
###################################
-# Enable Shared Bind Propogation
+# Enable Shared Bind Propagation
###################################
enable_shared_var_lib_nova_mnt: "{{ enable_cinder_backend_nfs | bool or enable_cinder_backend_quobyte | bool }}"
diff --git a/ansible/roles/nova-cell/filter_plugins/filters.py b/ansible/roles/nova-cell/filter_plugins/filters.py
deleted file mode 100644
index 48c9ce8f3f..0000000000
--- a/ansible/roles/nova-cell/filter_plugins/filters.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2019 StackHPC Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from kolla_ansible import nova_filters as filters
-
-
-class FilterModule(object):
- """Nova cell filters."""
-
- def filters(self):
- return filters.get_filters()
diff --git a/ansible/roles/nova-cell/filter_plugins/kolla_nova_cell_filters.py b/ansible/roles/nova-cell/filter_plugins/kolla_nova_cell_filters.py
new file mode 100644
index 0000000000..6e3b38dab7
--- /dev/null
+++ b/ansible/roles/nova-cell/filter_plugins/kolla_nova_cell_filters.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2019 StackHPC Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from kolla_ansible import nova_filters as filters
+
+
+class FilterModule(object):
+ """Nova cell filters."""
+
+ def filters(self):
+ return filters.get_filters()
diff --git a/ansible/roles/nova-cell/handlers/main.yml b/ansible/roles/nova-cell/handlers/main.yml
index 132cc04669..17658ef2ed 100644
--- a/ansible/roles/nova-cell/handlers/main.yml
+++ b/ansible/roles/nova-cell/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "nova-conductor"
service: "{{ nova_cell_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -14,7 +14,6 @@
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
- name: Restart nova-novncproxy container
@@ -22,7 +21,7 @@
service_name: "nova-novncproxy"
service: "{{ nova_cell_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -32,7 +31,6 @@
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
- name: Restart nova-spicehtml5proxy container
@@ -40,7 +38,7 @@
service_name: "nova-spicehtml5proxy"
service: "{{ nova_cell_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -50,7 +48,6 @@
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
- name: Restart nova-serialproxy container
@@ -58,7 +55,7 @@
service_name: "nova-serialproxy"
service: "{{ nova_cell_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -68,7 +65,6 @@
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
- name: Restart nova-ssh container
@@ -76,7 +72,7 @@
service_name: "nova-ssh"
service: "{{ nova_cell_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -86,16 +82,14 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart nova-libvirt container
vars:
service_name: "nova-libvirt"
service: "{{ nova_cell_services[service_name] }}"
- nova_libvirt_notify: "{{ ['Create libvirt SASL user'] if libvirt_enable_sasl | bool else [] }}"
+ nova_libvirt_notify: "{{ ['Checking libvirt container is ready', 'Create libvirt SASL user'] if libvirt_enable_sasl | bool else [] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -111,10 +105,20 @@
# guests running, nova_libvirt will raise error even though it is removed.
retries: 5
until: restart_nova_libvirt is success
- when:
- - kolla_action != "config"
notify: "{{ nova_libvirt_notify }}"
+ # need to wait kolla_set_configs script to overwrite sasl config file
+- name: Checking libvirt container is ready
+ become: true
+ shell:
+ cmd: >
+ set -o pipefail &&
+ {{ kolla_container_engine }} exec -i nova_libvirt ls /run/libvirtd.pid
+ executable: /bin/bash
+ register: libvirt_container_ready
+ until: libvirt_container_ready is succeeded
+ retries: 10
+
# The SASL user needs to exist in order for nova-compute to start successfully.
- name: Create libvirt SASL user
become: true
@@ -133,13 +137,15 @@
service:
name: libvirtd
state: reloaded
+ when:
+ - kolla_action != "config"
- name: Restart nova-compute container
vars:
service_name: "nova-compute"
service: "{{ nova_cell_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -150,15 +156,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart nova-compute-ironic container
vars:
service_name: "nova-compute-ironic"
service: "{{ nova_cell_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -167,14 +171,12 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
# nova-compute-fake is special. It will start multi numbers of container
# so put all variables here rather than defaults/main.yml file
- name: Restart nova-compute-fake containers
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "nova_compute_fake_{{ item }}"
@@ -184,11 +186,14 @@
- "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
+ - "/run/netns:/run/netns:shared"
+ - "/run/lock/nova:/run/lock/nova:shared"
+ - "/run/libvirt:/run/libvirt:shared"
+ - "/run/nova:/run/nova:shared"
+ - "/run/openvswitch:/run/openvswitch:shared"
- "kolla_logs:/var/log/kolla/"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when:
- - kolla_action != "config"
# NOTE(mgoddard): After upgrading nova-compute, services will have an RPC
# version cap in place. We need to restart all services that communicate with
diff --git a/ansible/roles/nova-cell/tasks/bootstrap.yml b/ansible/roles/nova-cell/tasks/bootstrap.yml
index 06855f6e17..f7fb139109 100644
--- a/ansible/roles/nova-cell/tasks/bootstrap.yml
+++ b/ansible/roles/nova-cell/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Nova cell database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ nova_cell_database_address }}"
@@ -16,6 +17,7 @@
- name: Creating Nova cell database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ nova_cell_database_address }}"
diff --git a/ansible/roles/nova-cell/tasks/bootstrap_service.yml b/ansible/roles/nova-cell/tasks/bootstrap_service.yml
index 46b95d66be..e4a4d2b2c7 100644
--- a/ansible/roles/nova-cell/tasks/bootstrap_service.yml
+++ b/ansible/roles/nova-cell/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
become: true
vars:
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "nova_cell_bootstrap"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
register: bootstrap_result
changed_when: bootstrap_result.stdout | default("") | length > 0
diff --git a/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml b/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml
index bcf95227b9..e57839e800 100644
--- a/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml
+++ b/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml
@@ -4,7 +4,7 @@
name: loadbalancer-config
vars:
project_services: "{{ cell_proxy_project_services | namespace_haproxy_for_cell(cell_name) }}"
- # Default is necessary because this play may not be targetting the hosts in
+ # Default is necessary because this play may not be targeting the hosts in
# the cell_proxy_group group, and therefore they would not have role
# defaults defined. If we put this variable in group_vars, then it cannot
# be overridden by the inventory.
diff --git a/ansible/roles/nova-cell/tasks/check-containers.yml b/ansible/roles/nova-cell/tasks/check-containers.yml
index 1213765a3c..b7e2f7c29f 100644
--- a/ansible/roles/nova-cell/tasks/check-containers.yml
+++ b/ansible/roles/nova-cell/tasks/check-containers.yml
@@ -1,22 +1,3 @@
---
-- name: Check nova-cell containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- environment: "{{ item.value.environment | default(omit) }}"
- pid_mode: "{{ item.value.pid_mode | default('') }}"
- cgroupns_mode: "{{ item.value.cgroupns_mode | default(omit) }}"
- ipc_mode: "{{ item.value.ipc_mode | default(omit) }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ nova_cell_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/nova-cell/tasks/config-libvirt-tls.yml b/ansible/roles/nova-cell/tasks/config-libvirt-tls.yml
index d8a1d94421..6a832e9d66 100644
--- a/ansible/roles/nova-cell/tasks/config-libvirt-tls.yml
+++ b/ansible/roles/nova-cell/tasks/config-libvirt-tls.yml
@@ -1,46 +1,27 @@
---
-- name: Copying over libvirt TLS keys (nova-libvirt)
+- name: Copying over libvirt TLS keys to services
become: true
vars:
- service: "{{ nova_cell_services['nova-libvirt'] }}"
- service_name: nova-libvirt
+ services:
+ - "nova-compute"
+ - "nova-libvirt"
+ key_files:
+ - cacert.pem
+ - clientcert.pem
+ - clientkey.pem
+ - servercert.pem
+ - serverkey.pem
+ service_name: "{{ item[0] }}"
+ filename: "{{ item[1] }}"
paths:
- - "{{ node_custom_config }}/nova/nova-libvirt/{{ inventory_hostname }}/{{ item }}"
- - "{{ node_custom_config }}/nova/nova-libvirt/{{ item }}"
+ - "{{ node_custom_config }}/nova/nova-libvirt/{{ inventory_hostname }}/{{ filename }}"
+ - "{{ node_custom_config }}/nova/nova-libvirt/{{ filename }}"
+ service: "{{ nova_cell_services[service_name] }}"
copy:
src: "{{ lookup('first_found', paths) }}"
- dest: "{{ node_config_directory }}/{{ service_name }}/{{ item }}"
+ dest: "{{ node_config_directory }}/{{ service_name }}/{{ filename }}"
mode: "0600"
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- with_items:
- - cacert.pem
- - servercert.pem
- - serverkey.pem
- - clientcert.pem
- - clientkey.pem
- notify:
- - Restart {{ service_name }} container
-
-- name: Copying over libvirt TLS keys (nova-compute)
- become: true
- vars:
- service: "{{ nova_cell_services['nova-compute'] }}"
- service_name: nova-compute
- paths:
- - "{{ node_custom_config }}/nova/nova-libvirt/{{ inventory_hostname }}/{{ item }}"
- - "{{ node_custom_config }}/nova/nova-libvirt/{{ item }}"
- copy:
- src: "{{ lookup('first_found', paths) }}"
- dest: "{{ node_config_directory }}/{{ service_name }}/{{ item }}"
- mode: "0600"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- with_items:
- - cacert.pem
- - clientcert.pem
- - clientkey.pem
- notify:
- - Restart {{ service_name }} container
+ - service | service_enabled_and_mapped_to_host
+ - not (service_name == 'nova-compute' and (filename == 'servercert.pem' or filename == 'serverkey.pem'))
+ loop: "{{ services | product(key_files) | list }}"
diff --git a/ansible/roles/nova-cell/tasks/config-nova-fake.yml b/ansible/roles/nova-cell/tasks/config-nova-fake.yml
index 17c6381e15..8959dfa149 100644
--- a/ansible/roles/nova-cell/tasks/config-nova-fake.yml
+++ b/ansible/roles/nova-cell/tasks/config-nova-fake.yml
@@ -47,7 +47,7 @@
- name: Check nova-compute-fake containers
become: true
- kolla_docker:
+ kolla_container:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "nova_compute_fake_{{ item }}"
@@ -57,7 +57,12 @@
- "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
+ - "/run/netns:/run/netns:shared"
+ - "/run/lock/nova:/run/lock/nova:shared"
+ - "/run/libvirt:/run/libvirt:shared"
+ - "/run/nova:/run/nova:shared"
+ - "/run/openvswitch:/run/openvswitch:shared"
- "kolla_logs:/var/log/kolla/"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
when:
diff --git a/ansible/roles/nova-cell/tasks/config.yml b/ansible/roles/nova-cell/tasks/config.yml
index ed7b495dbb..dd49e56de0 100644
--- a/ansible/roles/nova-cell/tasks/config.yml
+++ b/ansible/roles/nova-cell/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ nova_cell_services }}"
+ with_dict: "{{ nova_cell_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
@@ -40,18 +37,26 @@
when:
- nova_policy.results
+- name: Check for vendordata file
+ stat:
+ path: "{{ node_custom_config }}/nova/vendordata.json"
+ delegate_to: localhost
+ run_once: True
+ register: vendordata_file
+
+- name: Set vendordata file path
+ set_fact:
+ vendordata_file_path: "{{ vendordata_file.stat.path }}"
+ when:
+ - vendordata_file.stat.exists
+
- name: Copying over config.json files for services
become: true
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ nova_cell_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ nova_cell_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over nova.conf
become: true
@@ -64,15 +69,12 @@
- "{{ node_custom_config }}/nova.conf"
- "{{ node_custom_config }}/nova/{{ item.key }}.conf"
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
+ - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ item.key }}.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/nova.conf"
mode: "0660"
when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- item.key in nova_cell_services_require_nova_conf
- with_dict: "{{ nova_cell_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ nova_cell_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over Nova compute provider config
become: true
@@ -83,11 +85,8 @@
dest: "{{ node_config_directory }}/nova-compute/provider_config.yaml"
mode: "0660"
when:
- - inventory_hostname in groups[service.group]
+ - service | service_enabled_and_mapped_to_host
- nova_cell_compute_provider_config is defined
- - service.enabled | bool
- notify:
- - Restart nova-compute container
- name: Copying over libvirt configuration
become: true
@@ -97,14 +96,10 @@
src: "{{ item.src }}"
dest: "{{ node_config_directory }}/nova-libvirt/{{ item.dest }}"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- { src: "qemu.conf.j2", dest: "qemu.conf" }
- { src: "libvirtd.conf.j2", dest: "libvirtd.conf" }
- notify:
- - Restart nova-libvirt container
- name: Copying over libvirt TLS keys
include_tasks: config-libvirt-tls.yml
@@ -124,14 +119,11 @@
mode: "0660"
when:
- libvirt_enable_sasl | bool
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
with_items:
- { src: "auth.conf.j2", dest: "auth.conf", service: "nova-compute" }
- { src: "auth.conf.j2", dest: "auth.conf", service: "nova-libvirt" }
- { src: "sasl.conf.j2", dest: "sasl.conf", service: "nova-libvirt" }
- notify:
- - Restart {{ service_name }} container
- name: Copying files for nova-ssh
become: true
@@ -141,16 +133,12 @@
src: "{{ item.src }}"
dest: "{{ node_config_directory }}/nova-ssh/{{ item.dest }}"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- { src: "sshd_config.j2", dest: "sshd_config" }
- { src: "id_rsa", dest: "id_rsa" }
- { src: "id_rsa.pub", dest: "id_rsa.pub" }
- { src: "ssh_config.j2", dest: "ssh_config" }
- notify:
- - Restart nova-ssh container
- name: Copying VMware vCenter CA file
vars:
@@ -162,10 +150,7 @@
when:
- nova_compute_virt_type == "vmware"
- not vmware_vcenter_insecure | bool
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart nova-compute container
+ - service | service_enabled_and_mapped_to_host
- name: Copying 'release' file for nova_compute
vars:
@@ -180,11 +165,18 @@
- "{{ node_custom_config }}/nova_compute/release"
- "{{ node_custom_config }}/nova/release"
skip: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart nova-compute container
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Generating 'hostnqn' file for nova_compute
+ vars:
+ hostnqn: "nqn.2014-08.org.nvmexpress:uuid:{{ ansible_facts.hostname | to_uuid }}"
+ service: "{{ nova_cell_services['nova-compute'] }}"
+ template:
+ src: "templates/hostnqn.j2"
+ dest: "{{ node_config_directory }}/nova-compute/hostnqn"
+ mode: "0660"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over existing policy file
become: true
@@ -193,10 +185,21 @@
dest: "{{ node_config_directory }}/{{ item.key }}/{{ nova_policy_file }}"
mode: "0660"
when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- nova_policy_file is defined
- item.key in nova_cell_services_require_policy_json
- with_dict: "{{ nova_cell_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ nova_cell_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over vendordata file to containers
+ vars:
+ service: "{{ nova_cell_services[item] }}"
+ copy:
+ src: "{{ vendordata_file_path }}"
+ dest: "{{ node_config_directory }}/{{ item }}/vendordata.json"
+ mode: "0660"
+ become: True
+ when:
+ - vendordata_file_path is defined
+ - service | service_enabled_and_mapped_to_host
+ with_items:
+ - nova-compute
+ - nova-compute-ironic
diff --git a/ansible/roles/nova-cell/tasks/config_validate.yml b/ansible/roles/nova-cell/tasks/config_validate.yml
new file mode 100644
index 0000000000..70ed89f91a
--- /dev/null
+++ b/ansible/roles/nova-cell/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ nova_cell_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ nova_cell_config_validation }}"
diff --git a/ansible/roles/nova-cell/tasks/create_cells.yml b/ansible/roles/nova-cell/tasks/create_cells.yml
index a7b901eb01..ac29e61efc 100644
--- a/ansible/roles/nova-cell/tasks/create_cells.yml
+++ b/ansible/roles/nova-cell/tasks/create_cells.yml
@@ -7,7 +7,7 @@
vars:
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 create_cell{% if nova_cell_name %} --name {{ nova_cell_name }}{% endif %}'
common_options: "{{ docker_common_options }}"
@@ -16,7 +16,7 @@
labels:
BOOTSTRAP:
name: "create_cell_nova"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
register: nova_cell_create
changed_when:
@@ -33,7 +33,7 @@
nova_cell_database_url: "mysql+pymysql://{{ nova_cell_database_user }}:{{ nova_cell_database_password }}@{{ nova_cell_database_address | put_address_in_context('url') }}:{{ nova_cell_database_port }}/{{ nova_cell_database_name }}" # noqa 204
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
command: "bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 update_cell --cell_uuid {{ nova_cell_settings.cell_uuid }}'"
common_options: "{{ docker_common_options }}"
@@ -42,7 +42,7 @@
labels:
BOOTSTRAP:
name: "create_cell_nova"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
register: nova_cell_updated
changed_when:
diff --git a/ansible/roles/nova-cell/tasks/deploy.yml b/ansible/roles/nova-cell/tasks/deploy.yml
index beecc7d448..adb73caab2 100644
--- a/ansible/roles/nova-cell/tasks/deploy.yml
+++ b/ansible/roles/nova-cell/tasks/deploy.yml
@@ -35,3 +35,5 @@
# and there is a cell conductor in the inventory to delegate to.
- all_computes_in_batch | length > 0
- groups[nova_cell_conductor_group] | length > 0
+
+- import_tasks: post-config.yml
diff --git a/ansible/roles/nova-cell/tasks/external_ceph.yml b/ansible/roles/nova-cell/tasks/external_ceph.yml
index 07324e99c1..4bf94c44d4 100644
--- a/ansible/roles/nova-cell/tasks/external_ceph.yml
+++ b/ansible/roles/nova-cell/tasks/external_ceph.yml
@@ -1,9 +1,13 @@
---
- name: Check nova keyring file
+ vars:
+ keyring: "{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['vms']['user'] }}.keyring"
+ paths:
+ - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ keyring }}"
+ - "{{ node_custom_config }}/nova/{{ keyring }}"
stat:
- path: "{{ node_custom_config }}/nova/{{ ceph_nova_keyring }}"
+ path: "{{ lookup('first_found', paths) }}"
delegate_to: localhost
- run_once: True
register: nova_cephx_keyring_file
failed_when: not nova_cephx_keyring_file.stat.exists
when:
@@ -11,10 +15,14 @@
- external_ceph_cephx_enabled | bool
- name: Check cinder keyring file
+ vars:
+ keyring: "{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['volumes']['user'] }}.keyring"
+ paths:
+ - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ keyring }}"
+ - "{{ node_custom_config }}/nova/{{ keyring }}"
stat:
- path: "{{ node_custom_config }}/nova/{{ ceph_cinder_keyring }}"
+ path: "{{ lookup('first_found', paths) }}"
delegate_to: localhost
- run_once: True
register: cinder_cephx_keyring_file
failed_when: not cinder_cephx_keyring_file.stat.exists
when:
@@ -26,7 +34,6 @@
nova_cephx_raw_key:
"{{ lookup('template', nova_cephx_keyring_file.stat.path) | regex_search('key\\s*=.*$', multiline=True) | regex_replace('key\\s*=\\s*(.*)\\s*', '\\1') }}"
changed_when: false
- run_once: True
when:
- nova_backend == "rbd"
- external_ceph_cephx_enabled | bool
@@ -34,9 +41,8 @@
- name: Extract cinder key from file
set_fact:
cinder_cephx_raw_key:
- "{{ lookup('file', cinder_cephx_keyring_file.stat.path) | regex_search('key\\s*=.*$', multiline=True) | regex_replace('key\\s*=\\s*(.*)\\s*', '\\1') }}"
+ "{{ lookup('template', cinder_cephx_keyring_file.stat.path) | regex_search('key\\s*=.*$', multiline=True) | regex_replace('key\\s*=\\s*(.*)\\s*', '\\1') }}"
changed_when: false
- run_once: True
when:
- cinder_backend_ceph | bool
- external_ceph_cephx_enabled | bool
@@ -55,8 +61,6 @@
- inventory_hostname in groups[nova_cell_compute_group]
- nova_backend == "rbd"
- external_ceph_cephx_enabled | bool
- notify:
- - Restart {{ item }} container
- name: Copy over ceph cinder keyring file
template:
@@ -72,14 +76,15 @@
- inventory_hostname in groups[nova_cell_compute_group]
- nova_backend == "rbd"
- external_ceph_cephx_enabled | bool
- notify:
- - Restart {{ item }} container
- name: Copy over ceph.conf
vars:
service: "{{ nova_cell_services[item] }}"
+ paths:
+ - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_cell_ceph_backend['cluster'] }}.conf"
+ - "{{ node_custom_config }}/nova/{{ nova_cell_ceph_backend['cluster'] }}.conf"
template:
- src: "{{ node_custom_config }}/nova/ceph.conf"
+ src: "{{ lookup('first_found', paths) }}"
dest: "{{ node_config_directory }}/{{ item }}/"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
@@ -89,14 +94,15 @@
- nova-compute
- nova-libvirt
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- nova_backend == "rbd"
- notify:
- - Restart {{ item }} container
- block:
- name: Ensure /etc/ceph directory exists (host libvirt)
+ vars:
+ paths:
+ - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_cell_ceph_backend['cluster'] }}.conf"
+ - "{{ node_custom_config }}/nova/{{ nova_cell_ceph_backend['cluster'] }}.conf"
file:
path: "/etc/ceph/"
state: "directory"
@@ -106,9 +112,13 @@
become: true
- name: Copy over ceph.conf (host libvirt)
+ vars:
+ paths:
+ - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/{{ nova_cell_ceph_backend['cluster'] }}.conf"
+ - "{{ node_custom_config }}/nova/{{ nova_cell_ceph_backend['cluster'] }}.conf"
template:
- src: "{{ node_custom_config }}/nova/ceph.conf"
- dest: "/etc/ceph/ceph.conf"
+ src: "{{ lookup('first_found', paths) }}"
+ dest: "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf"
owner: "root"
group: "root"
mode: "0644"
@@ -129,8 +139,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[service.group]
+ when: service | service_enabled_and_mapped_to_host
- name: Pushing nova secret xml for libvirt
vars:
@@ -142,15 +151,15 @@
group: "{{ config_owner_group }}"
mode: "0600"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - item.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items:
- uuid: "{{ rbd_secret_uuid }}"
- name: "client.nova secret"
+ name: "ceph-ephemeral-nova"
+ desc: "Ceph Client Secret for Ephemeral Storage (Nova)"
enabled: "{{ nova_backend == 'rbd' }}"
- uuid: "{{ cinder_rbd_secret_uuid }}"
- name: "client.cinder secret"
+ name: "ceph-persistent-cinder"
+ desc: "Ceph Client Secret for Persistent Storage (Cinder)"
enabled: "{{ cinder_backend_ceph }}"
notify: "{{ libvirt_restart_handlers }}"
@@ -165,7 +174,7 @@
mode: "0600"
become: true
when:
- - inventory_hostname in groups[service.group]
+ - service | service_enabled_and_mapped_to_host
- item.enabled | bool
- external_ceph_cephx_enabled | bool
with_items:
diff --git a/ansible/roles/nova-cell/tasks/get_cell_settings.yml b/ansible/roles/nova-cell/tasks/get_cell_settings.yml
index 120b515ca1..d98f30cdbb 100644
--- a/ansible/roles/nova-cell/tasks/get_cell_settings.yml
+++ b/ansible/roles/nova-cell/tasks/get_cell_settings.yml
@@ -3,7 +3,7 @@
vars:
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 list_cells --verbose'
common_options: "{{ docker_common_options }}"
@@ -12,12 +12,13 @@
labels:
BOOTSTRAP:
name: "nova_list_cells"
- restart_policy: no
- volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
+ restart_policy: oneshot
+ volumes: "{{ nova_cell_get_settings_volumes }}"
register: existing_cells_list
changed_when: false
failed_when:
- existing_cells_list.rc != 0
+ check_mode: false
- name: Extract current cell settings from list
vars:
diff --git a/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml b/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml
index 1e9c24cde2..2a61585e81 100644
--- a/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml
+++ b/ansible/roles/nova-cell/tasks/libvirt-cleanup.yml
@@ -9,6 +9,8 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- nova_libvirt
register: container_facts
@@ -35,14 +37,16 @@
- name: Stop and remove nova_libvirt container
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
action: "stop_and_remove_container"
name: nova_libvirt
when: container_facts['nova_libvirt'] is defined
- name: Remove nova_libvirt Docker volumes
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
action: "remove_volume"
name: "{{ item }}"
loop:
diff --git a/ansible/roles/nova-cell/tasks/loadbalancer.yml b/ansible/roles/nova-cell/tasks/loadbalancer.yml
index 354fb7003a..16b6e53cbe 100644
--- a/ansible/roles/nova-cell/tasks/loadbalancer.yml
+++ b/ansible/roles/nova-cell/tasks/loadbalancer.yml
@@ -20,7 +20,7 @@
- import_tasks: proxy_loadbalancer.yml
vars:
- # Default is necessary because this play may not be targetting the hosts in
+ # Default is necessary because this play may not be targeting the hosts in
# the nova-novncproxy group, and therefore they would not have role
# defaults defined. If we put these variables in group_vars, then they
# cannot be overridden by the inventory.
@@ -48,7 +48,8 @@
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'novnc' }}"
mode: "http"
external: true
- port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_port'] }}"
+ external_fqdn: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_fqdn'] }}"
+ port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_public_port'] }}"
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_listen_port'] }}"
backend_http_extra:
- "timeout tunnel 1h"
@@ -56,7 +57,7 @@
- import_tasks: proxy_loadbalancer.yml
vars:
- # Default is necessary because this play may not be targetting the hosts in
+ # Default is necessary because this play may not be targeting the hosts in
# the nova-spicehtml5proxy group, and therefore they would not have role
# defaults defined. If we put these variables in group_vars, then they
# cannot be overridden by the inventory.
@@ -84,7 +85,8 @@
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'spice' }}"
mode: "http"
external: true
- port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_port'] }}"
+ external_fqdn: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_fqdn'] }}"
+ port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_public_port'] }}"
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_listen_port'] }}"
backend_http_extra:
- "timeout tunnel {{ haproxy_nova_spicehtml5_proxy_tunnel_timeout }}"
@@ -92,7 +94,7 @@
- import_tasks: proxy_loadbalancer.yml
vars:
- # Default is necessary because this play may not be targetting the hosts in
+ # Default is necessary because this play may not be targeting the hosts in
# the nova-serialproxy group, and therefore they would not have role
# defaults defined. If we put these variables in group_vars, then they
# cannot be overridden by the inventory.
@@ -120,7 +122,8 @@
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['enable_nova_serialconsole_proxy'] | bool }}"
mode: "http"
external: true
- port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_port'] }}"
+ external_fqdn: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_fqdn'] }}"
+ port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_public_port'] }}"
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_listen_port'] }}"
backend_http_extra:
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
diff --git a/ansible/roles/nova-cell/tasks/online_data_migrations.yml b/ansible/roles/nova-cell/tasks/online_data_migrations.yml
index a9a3260edf..9240c14e0d 100644
--- a/ansible/roles/nova-cell/tasks/online_data_migrations.yml
+++ b/ansible/roles/nova-cell/tasks/online_data_migrations.yml
@@ -3,7 +3,7 @@
vars:
nova_conductor: "{{ nova_cell_services['nova-conductor'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,6 +14,6 @@
labels:
BOOTSTRAP:
name: "nova_cell_online_data_migrations"
- restart_policy: "no"
+ restart_policy: "oneshot"
volumes: "{{ nova_cell_bootstrap_default_volumes + nova_cell_bootstrap_extra_volumes }}"
when: inventory_hostname == groups[nova_cell_conductor_group][0]
diff --git a/ansible/roles/nova-cell/tasks/post-config.yml b/ansible/roles/nova-cell/tasks/post-config.yml
new file mode 100644
index 0000000000..804b5da142
--- /dev/null
+++ b/ansible/roles/nova-cell/tasks/post-config.yml
@@ -0,0 +1,8 @@
+---
+# TODO(kevko) Remove in E cycle
+- name: Remove old nova_libvirt_secrets container volume
+ become: true
+ kolla_container:
+ container_engine: "{{ kolla_container_engine }}"
+ action: "remove_volume"
+ name: nova_libvirt_secrets
diff --git a/ansible/roles/nova-cell/tasks/precheck.yml b/ansible/roles/nova-cell/tasks/precheck.yml
index e4aef860a5..410debff00 100644
--- a/ansible/roles/nova-cell/tasks/precheck.yml
+++ b/ansible/roles/nova-cell/tasks/precheck.yml
@@ -8,12 +8,15 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- nova_libvirt
- nova_novncproxy
- nova_serialproxy
- nova_spicehtml5proxy
- nova_ssh
+ check_mode: false
register: container_facts
- name: Checking available compute nodes in inventory
@@ -28,7 +31,7 @@
- name: Checking free port for Nova NoVNC Proxy
vars:
- nova_novncproxy: "{{ nova_cell_services['nova-novncproxy'] }}"
+ service: "{{ nova_cell_services['nova-novncproxy'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_novncproxy_listen_port }}"
@@ -37,12 +40,11 @@
state: stopped
when:
- container_facts['nova_novncproxy'] is not defined
- - nova_novncproxy.enabled | bool
- - inventory_hostname in groups[nova_novncproxy.group]
+ - service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Serial Proxy
vars:
- nova_serialproxy: "{{ nova_cell_services['nova-serialproxy'] }}"
+ service: "{{ nova_cell_services['nova-serialproxy'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_serialproxy_listen_port }}"
@@ -51,12 +53,11 @@
state: stopped
when:
- container_facts['nova_serialproxy'] is not defined
- - nova_serialproxy.enabled | bool
- - inventory_hostname in groups[nova_serialproxy.group]
+ - service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Spice HTML5 Proxy
vars:
- nova_spicehtml5proxy: "{{ nova_cell_services['nova-spicehtml5proxy'] }}"
+ service: "{{ nova_cell_services['nova-spicehtml5proxy'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_spicehtml5proxy_listen_port }}"
@@ -65,12 +66,11 @@
state: stopped
when:
- container_facts['nova_spicehtml5proxy'] is not defined
- - nova_spicehtml5proxy.enabled | bool
- - inventory_hostname in groups[nova_spicehtml5proxy.group]
+ - service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova SSH (API interface)
vars:
- nova_ssh: "{{ nova_cell_services['nova-ssh'] }}"
+ service: "{{ nova_cell_services['nova-ssh'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_ssh_port }}"
@@ -79,12 +79,11 @@
state: stopped
when:
- container_facts['nova_ssh'] is not defined
- - nova_ssh.enabled | bool
- - inventory_hostname in groups[nova_ssh.group]
+ - service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova SSH (migration interface)
vars:
- nova_ssh: "{{ nova_cell_services['nova-ssh'] }}"
+ service: "{{ nova_cell_services['nova-ssh'] }}"
wait_for:
host: "{{ migration_interface_address }}"
port: "{{ nova_ssh_port }}"
@@ -94,12 +93,11 @@
when:
- migration_interface_address != api_interface_address
- container_facts['nova_ssh'] is not defined
- - nova_ssh.enabled | bool
- - inventory_hostname in groups[nova_ssh.group]
+ - service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Libvirt
vars:
- nova_libvirt: "{{ nova_cell_services['nova-libvirt'] }}"
+ service: "{{ nova_cell_services['nova-libvirt'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_libvirt_port }}"
@@ -108,19 +106,17 @@
state: stopped
when:
- container_facts['nova_libvirt'] is not defined
- - nova_libvirt.enabled | bool
- - inventory_hostname in groups[nova_libvirt.group]
+ - service | service_enabled_and_mapped_to_host
- name: Checking that host libvirt is not running
vars:
- nova_libvirt: "{{ nova_cell_services['nova-libvirt'] }}"
+ service: "{{ nova_cell_services['nova-libvirt'] }}"
stat: path=/var/run/libvirt/libvirt-sock
register: result
failed_when: result.stat.exists
when:
- container_facts['nova_libvirt'] is not defined
- - nova_libvirt.enabled | bool
- - inventory_hostname in groups[nova_libvirt.group]
+ - service | service_enabled_and_mapped_to_host
- name: Checking that nova_libvirt container is not running
vars:
diff --git a/ansible/roles/nova-cell/tasks/reload.yml b/ansible/roles/nova-cell/tasks/reload.yml
index 1644cebef4..5724effdf2 100644
--- a/ansible/roles/nova-cell/tasks/reload.yml
+++ b/ansible/roles/nova-cell/tasks/reload.yml
@@ -7,7 +7,7 @@
# Speaking to the nova team, this seems to be an issue in oslo.service,
# with a fix proposed here: https://review.openstack.org/#/c/641907.
# This issue also seems to affect the proxy services, which exit non-zero in
-# reponse to a SIGHUP, so restart those too.
+# response to a SIGHUP, so restart those too.
# The issue actually affects all nova services, since they remain with RPC
# version pinned to the previous release:
# https://bugs.launchpad.net/kolla-ansible/+bug/1833069.
@@ -22,7 +22,7 @@
vars:
service: "{{ nova_cell_services[item] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -36,6 +36,5 @@
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action == 'upgrade'
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
with_items: "{{ nova_cell_services_require_nova_conf }}"
diff --git a/ansible/roles/nova-cell/tasks/rolling_upgrade.yml b/ansible/roles/nova-cell/tasks/rolling_upgrade.yml
index b1637ad8a2..928e8763ab 100644
--- a/ansible/roles/nova-cell/tasks/rolling_upgrade.yml
+++ b/ansible/roles/nova-cell/tasks/rolling_upgrade.yml
@@ -14,3 +14,5 @@
- name: Flush handlers
meta: flush_handlers
+
+- import_tasks: post-config.yml
diff --git a/ansible/roles/nova-cell/tasks/upgrade.yml b/ansible/roles/nova-cell/tasks/upgrade.yml
index 405ab8968a..b9a8c3556b 100644
--- a/ansible/roles/nova-cell/tasks/upgrade.yml
+++ b/ansible/roles/nova-cell/tasks/upgrade.yml
@@ -1,16 +1,14 @@
---
- name: Stopping nova cell services
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
- with_dict: "{{ nova_cell_services }}"
+ with_dict: "{{ nova_cell_services | select_services_enabled_and_mapped_to_host }}"
when:
- "'nova-compute' not in item.key"
- item.key in nova_cell_services_require_nova_conf
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- nova_safety_upgrade | bool
- import_tasks: rolling_upgrade.yml
diff --git a/ansible/roles/nova-cell/tasks/wait_discover_computes.yml b/ansible/roles/nova-cell/tasks/wait_discover_computes.yml
index 1729eed390..1603af5dea 100644
--- a/ansible/roles/nova-cell/tasks/wait_discover_computes.yml
+++ b/ansible/roles/nova-cell/tasks/wait_discover_computes.yml
@@ -11,17 +11,19 @@
{{ kolla_container_engine }} exec kolla_toolbox openstack
--os-interface {{ openstack_interface }}
--os-auth-url {{ openstack_auth.auth_url }}
+ --os-project-domain-name {{ openstack_auth.domain_name }}
+ --os-project-name {{ openstack_auth.project_name }}
--os-username {{ openstack_auth.username }}
--os-password {{ openstack_auth.password }}
--os-identity-api-version 3
--os-user-domain-name {{ openstack_auth.user_domain_name }}
- --os-system-scope {{ openstack_auth.system_scope }}
--os-region-name {{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
compute service list --format json --column Host --service nova-compute
register: nova_compute_services
changed_when: false
failed_when: false
+ check_mode: false
retries: 20
delay: 10
until:
@@ -77,7 +79,7 @@
# configure for [DEFAULT] host in nova.conf.
ironic_compute_service_hosts: >-
{{ ironic_computes_in_batch |
- map('extract', hostvars, ['ansible_facts', 'hostname']) |
+ map('extract', hostvars) | json_query('[].nova_compute_ironic_custom_host || [].ansible_facts.hostname') |
map('regex_replace', '^(.*)$', '\1-ironic') |
list }}
expected_compute_service_hosts: "{{ virt_compute_service_hosts + ironic_compute_service_hosts }}"
diff --git a/ansible/roles/nova-cell/templates/hostnqn.j2 b/ansible/roles/nova-cell/templates/hostnqn.j2
new file mode 100644
index 0000000000..6f10135974
--- /dev/null
+++ b/ansible/roles/nova-cell/templates/hostnqn.j2
@@ -0,0 +1 @@
+{{ hostnqn }}
diff --git a/ansible/roles/nova-cell/templates/nova-compute-ironic.json.j2 b/ansible/roles/nova-cell/templates/nova-compute-ironic.json.j2
index d41811fc70..2385e402de 100644
--- a/ansible/roles/nova-cell/templates/nova-compute-ironic.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-compute-ironic.json.j2
@@ -12,6 +12,18 @@
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
+ }{% endif %}{% if vendordata_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/vendordata.json",
+ "dest": "/etc/nova/vendordata.json",
+ "owner": "nova",
+ "perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/nova-cell/templates/nova-compute.json.j2 b/ansible/roles/nova-cell/templates/nova-compute.json.j2
index 959c5fed69..dc33c3b81d 100644
--- a/ansible/roles/nova-cell/templates/nova-compute.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-compute.json.j2
@@ -14,14 +14,14 @@
"perm": "0600"
}{% endif %}{% if nova_backend == "rbd" %},
{
- "source": "{{ container_config_directory }}/{{ ceph_nova_keyring }}",
- "dest": "/etc/ceph/{{ ceph_nova_keyring }}",
+ "source": "{{ container_config_directory }}/{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['vms']['user'] }}.keyring",
+ "dest": "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.client.{{ nova_cell_ceph_backend['vms']['user'] }}.keyring",
"owner": "nova",
"perm": "0600"
},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/{{ nova_cell_ceph_backend['cluster'] }}.conf",
+ "dest": "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf",
"owner": "nova",
"perm": "0600"
}{% endif %}{% if nova_compute_virt_type == "vmware" and not vmware_vcenter_insecure | bool %},
@@ -55,6 +55,12 @@
"owner": "nova",
"perm": "0600",
"optional": true
+ },
+ {
+ "source": "{{ container_config_directory }}/hostnqn",
+ "dest": "/etc/nvme/hostnqn",
+ "owner": "root",
+ "perm": "0644"
}{% if nova_compute_virt_type in ['kvm', 'qemu'] and libvirt_enable_sasl | bool %},
{
"source": "{{ container_config_directory }}/auth.conf",
@@ -67,6 +73,18 @@
"dest": "/etc/nova/provider_config/provider_config.yaml",
"owner": "nova",
"perm": "0600"
+ }{% endif %}{% if vendordata_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/vendordata.json",
+ "dest": "/etc/nova/vendordata.json",
+ "owner": "nova",
+ "perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/nova-cell/templates/nova-conductor.json.j2 b/ansible/roles/nova-cell/templates/nova-conductor.json.j2
index 6a7328713d..92925888f5 100644
--- a/ansible/roles/nova-cell/templates/nova-conductor.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-conductor.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/nova-cell/templates/nova-libvirt.json.j2 b/ansible/roles/nova-cell/templates/nova-libvirt.json.j2
index d2ddc9e6f9..e81413b358 100644
--- a/ansible/roles/nova-cell/templates/nova-libvirt.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-libvirt.json.j2
@@ -51,8 +51,8 @@
"merge": true
}{% endif %}{% if nova_backend == "rbd" %},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/{{ nova_cell_ceph_backend['cluster'] }}.conf",
+ "dest": "/etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf",
"owner": "nova",
"perm": "0600"
}{% endif %}{% if libvirt_enable_sasl | bool %},
@@ -67,6 +67,12 @@
"dest": "/root/.config/libvirt/auth.conf",
"owner": "root",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
]
}
diff --git a/ansible/roles/nova-cell/templates/nova-novncproxy.json.j2 b/ansible/roles/nova-cell/templates/nova-novncproxy.json.j2
index d34efb3d69..8ff5b39e64 100644
--- a/ansible/roles/nova-cell/templates/nova-novncproxy.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-novncproxy.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/nova-cell/templates/nova-serialproxy.json.j2 b/ansible/roles/nova-cell/templates/nova-serialproxy.json.j2
index 3aac725913..867f9615fb 100644
--- a/ansible/roles/nova-cell/templates/nova-serialproxy.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-serialproxy.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/nova-cell/templates/nova-spicehtml5proxy.json.j2 b/ansible/roles/nova-cell/templates/nova-spicehtml5proxy.json.j2
index e12354bf43..5585803fea 100644
--- a/ansible/roles/nova-cell/templates/nova-spicehtml5proxy.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-spicehtml5proxy.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/nova-cell/templates/nova-ssh.json.j2 b/ansible/roles/nova-cell/templates/nova-ssh.json.j2
index f31f6d95e0..be378238c9 100644
--- a/ansible/roles/nova-cell/templates/nova-ssh.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-ssh.json.j2
@@ -24,6 +24,12 @@
"dest": "/var/lib/nova/.ssh/authorized_keys",
"owner": "nova",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/nova-cell/templates/nova.conf.d/libvirt.conf.j2 b/ansible/roles/nova-cell/templates/nova.conf.d/libvirt.conf.j2
index 69d8f9363b..7c23ff8306 100644
--- a/ansible/roles/nova-cell/templates/nova.conf.d/libvirt.conf.j2
+++ b/ansible/roles/nova-cell/templates/nova.conf.d/libvirt.conf.j2
@@ -9,9 +9,9 @@ live_migration_inbound_addr = "{{ migration_interface_address }}"
{% endif %}
{% if nova_backend == "rbd" %}
images_type = rbd
-images_rbd_pool = {{ ceph_nova_pool_name }}
-images_rbd_ceph_conf = /etc/ceph/ceph.conf
-rbd_user = {{ ceph_nova_user }}
+images_rbd_pool = {{ nova_cell_ceph_backend['vms']['pool'] }}
+images_rbd_ceph_conf = /etc/ceph/{{ nova_cell_ceph_backend['cluster'] }}.conf
+rbd_user = {{ nova_cell_ceph_backend['vms']['user'] }}
disk_cachemodes="network=writeback"
{% if nova_hw_disk_discard != '' %}
hw_disk_discard = {{ nova_hw_disk_discard }}
@@ -27,3 +27,6 @@ cpu_mode = {{ nova_libvirt_cpu_mode }}
{% if enable_multipathd | bool %}
volume_use_multipath = True
{% endif %}
+num_pcie_ports = 16
+[workarounds]
+skip_cpu_compare_on_dest = True
diff --git a/ansible/roles/nova-cell/templates/nova.conf.j2 b/ansible/roles/nova-cell/templates/nova.conf.j2
index 114480def4..47f3d1e21e 100644
--- a/ansible/roles/nova-cell/templates/nova.conf.j2
+++ b/ansible/roles/nova-cell/templates/nova.conf.j2
@@ -9,7 +9,7 @@ state_path = /var/lib/nova
allow_resize_to_same_host = true
{% if service_name == "nova-compute-ironic" %}
-host={{ ansible_facts.hostname }}-ironic
+host={{ nova_compute_ironic_custom_host | default(ansible_facts.hostname) }}-ironic
log_file = /var/log/kolla/nova/nova-compute-ironic.log
compute_driver = ironic.IronicDriver
ram_allocation_ratio = 1.0
@@ -26,7 +26,7 @@ compute_driver = libvirt.LibvirtDriver
# Though my_ip is not used directly, lots of other variables use $my_ip
my_ip = {{ api_interface_address }}
-{% if enable_ceilometer | bool or enable_designate | bool %}
+{% if enable_ceilometer | bool %}
instance_usage_audit = True
instance_usage_audit_period = hour
{% if enable_watcher | bool %}
@@ -36,6 +36,11 @@ compute_monitors=nova.compute.monitors.cpu.virt_driver
transport_url = {{ nova_cell_rpc_transport_url }}
+{% if vendordata_file_path is defined and (service_name == 'nova-compute' or service_name == 'nova-compute-ironic') %}
+[api]
+vendordata_jsonfile_path = /etc/nova/vendordata.json
+{% endif %}
+
[conductor]
workers = {{ nova_cell_conductor_workers }}
@@ -49,7 +54,7 @@ novncproxy_port = {{ nova_novncproxy_listen_port }}
server_listen = {{ api_interface_address }}
server_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups[nova_cell_compute_group] %}
-novncproxy_base_url = {{ public_protocol }}://{{ nova_novncproxy_fqdn | put_address_in_context('url') }}:{{ nova_novncproxy_port }}/vnc_lite.html
+novncproxy_base_url = {{ nova_novncproxy_fqdn | kolla_url(public_protocol, nova_novncproxy_public_port, '/vnc_lite.html') }}
{% endif %}
{% endif %}
{% elif nova_console == 'spice' %}
@@ -61,7 +66,7 @@ enabled = true
server_listen = {{ api_interface_address }}
server_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups[nova_cell_compute_group] %}
-html5proxy_base_url = {{ public_protocol }}://{{ nova_spicehtml5proxy_fqdn | put_address_in_context('url') }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
+html5proxy_base_url = {{ nova_spicehtml5proxy_fqdn | kolla_url(public_protocol, nova_spicehtml5proxy_public_port, '/spice_auto.html') }}
{% endif %}
html5proxy_host = {{ api_interface_address }}
html5proxy_port = {{ nova_spicehtml5proxy_listen_port }}
@@ -74,7 +79,7 @@ enabled = false
{% if enable_nova_serialconsole_proxy | bool %}
[serial_console]
enabled = true
-base_url = {{ nova_serialproxy_protocol }}://{{ nova_serialproxy_fqdn | put_address_in_context('url') }}:{{ nova_serialproxy_port }}/
+base_url = {{ nova_serialproxy_fqdn | kolla_url(nova_serialproxy_protocol, nova_serialproxy_public_port) }}/
serialproxy_host = {{ api_interface_address }}
serialproxy_port = {{ nova_serialproxy_listen_port }}
proxyclient_address = {{ api_interface_address }}
@@ -184,11 +189,18 @@ topics = {{ nova_enabled_notification_topics | map(attribute='name') | join(',')
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if service_name in nova_cell_services_require_policy_json and nova_policy_file is defined %}
[oslo_policy]
@@ -201,6 +213,11 @@ helper_command=sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --confi
[guestfs]
debug = {{ nova_logging_debug }}
+{% if service_name == 'nova-conductor' %}
+[scheduler]
+max_attempts = 10
+{% endif %}
+
[placement]
auth_type = password
auth_url = {{ keystone_internal_url }}
@@ -249,3 +266,16 @@ track_instance_changes = false
[pci]
passthrough_whitelist = {{ nova_pci_passthrough_whitelist | to_json }}
{% endif %}
+
+[service_user]
+send_service_user_token = true
+auth_url = {{ keystone_internal_url }}
+auth_type = password
+project_domain_id = {{ default_project_domain_id }}
+user_domain_id = {{ default_user_domain_id }}
+project_name = service
+username = {{ nova_keystone_user }}
+password = {{ nova_keystone_password }}
+cafile = {{ openstack_cacert }}
+region_name = {{ openstack_region_name }}
+valid_interfaces = internal
diff --git a/ansible/roles/nova-cell/templates/secret.xml.j2 b/ansible/roles/nova-cell/templates/secret.xml.j2
index 9f63543a24..e6d4a9594d 100644
--- a/ansible/roles/nova-cell/templates/secret.xml.j2
+++ b/ansible/roles/nova-cell/templates/secret.xml.j2
@@ -1,5 +1,6 @@
{{ item.uuid }}
+ {{ item.desc }}
{{ item.name }}
diff --git a/ansible/roles/nova-cell/templates/sshd_config.j2 b/ansible/roles/nova-cell/templates/sshd_config.j2
index d51db02d0d..0f607bc0e6 100644
--- a/ansible/roles/nova-cell/templates/sshd_config.j2
+++ b/ansible/roles/nova-cell/templates/sshd_config.j2
@@ -6,3 +6,8 @@ ListenAddress {{ migration_interface_address }}
SyslogFacility AUTHPRIV
UsePAM yes
+{% if kolla_base_distro in ["centos", "rocky"] %}
+Subsystem sftp /usr/libexec/openssh/sftp-server
+{% elif kolla_base_distro in ["debian", "ubuntu"] %}
+Subsystem sftp /usr/lib/openssh/sftp-server
+{% endif %}
diff --git a/ansible/roles/nova/defaults/main.yml b/ansible/roles/nova/defaults/main.yml
index b6a23e2f9d..cbe554b878 100644
--- a/ansible/roles/nova/defaults/main.yml
+++ b/ansible/roles/nova/defaults/main.yml
@@ -21,9 +21,19 @@ nova_services:
enabled: "{{ enable_nova }}"
mode: "http"
external: true
- port: "{{ nova_api_port }}"
+ external_fqdn: "{{ nova_external_fqdn }}"
+ port: "{{ nova_api_public_port }}"
listen_port: "{{ nova_api_listen_port }}"
tls_backend: "{{ nova_enable_tls_backend }}"
+ nova-metadata:
+ container_name: "nova_metadata"
+ group: "nova-metadata"
+ image: "{{ nova_api_image_full }}"
+ enabled: True
+ volumes: "{{ nova_metadata_default_volumes + nova_metadata_extra_volumes }}"
+ dimensions: "{{ nova_metadata_dimensions }}"
+ healthcheck: "{{ nova_metadata_healthcheck }}"
+ haproxy:
nova_metadata:
enabled: "{{ enable_nova }}"
mode: "http"
@@ -35,6 +45,7 @@ nova_services:
enabled: "{{ nova_enable_external_metadata }}"
mode: "http"
external: true
+ external_fqdn: "{{ nova_metadata_external_fqdn }}"
port: "{{ nova_metadata_port }}"
listen_port: "{{ nova_metadata_listen_port }}"
tls_backend: "{{ nova_enable_tls_backend }}"
@@ -55,6 +66,13 @@ nova_services:
dimensions: "{{ nova_super_conductor_dimensions }}"
healthcheck: "{{ nova_super_conductor_healthcheck }}"
+####################
+# Config Validate
+####################
+nova_config_validation:
+ - generator: "/nova/etc/nova/nova-config-generator.conf"
+ config: "/etc/nova/nova.conf"
+
####################
# Database
####################
@@ -96,19 +114,20 @@ nova_database_shard:
####################
nova_tag: "{{ openstack_tag }}"
-nova_super_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-conductor"
+nova_super_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-conductor"
nova_super_conductor_tag: "{{ nova_tag }}"
nova_super_conductor_image_full: "{{ nova_super_conductor_image }}:{{ nova_super_conductor_tag }}"
-nova_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-scheduler"
+nova_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-scheduler"
nova_scheduler_tag: "{{ nova_tag }}"
nova_scheduler_image_full: "{{ nova_scheduler_image }}:{{ nova_scheduler_tag }}"
-nova_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/nova-api"
+nova_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}nova-api"
nova_api_tag: "{{ nova_tag }}"
nova_api_image_full: "{{ nova_api_image }}:{{ nova_api_tag }}"
nova_api_dimensions: "{{ default_container_dimensions }}"
+nova_metadata_dimensions: "{{ default_container_dimensions }}"
nova_scheduler_dimensions: "{{ default_container_dimensions }}"
nova_super_conductor_dimensions: "{{ default_container_dimensions }}"
@@ -125,6 +144,19 @@ nova_api_healthcheck:
test: "{% if nova_api_enable_healthchecks | bool %}{{ nova_api_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ nova_api_healthcheck_timeout }}"
+nova_metadata_enable_healthchecks: "{{ enable_container_healthchecks }}"
+nova_metadata_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+nova_metadata_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+nova_metadata_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+nova_metadata_healthcheck_test: ["CMD-SHELL", "healthcheck_curl {{ 'https' if nova_enable_tls_backend | bool else 'http' }}://{{ api_interface_address | put_address_in_context('url') }}:{{ nova_metadata_listen_port }} "]
+nova_metadata_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+nova_metadata_healthcheck:
+ interval: "{{ nova_metadata_healthcheck_interval }}"
+ retries: "{{ nova_metadata_healthcheck_retries }}"
+ start_period: "{{ nova_metadata_healthcheck_start_period }}"
+ test: "{% if nova_metadata_enable_healthchecks | bool %}{{ nova_metadata_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ nova_metadata_healthcheck_timeout }}"
+
nova_scheduler_enable_healthchecks: "{{ enable_container_healthchecks }}"
nova_scheduler_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
nova_scheduler_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
@@ -157,29 +189,36 @@ nova_api_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
+nova_metadata_default_volumes:
+ - "{{ node_config_directory }}/nova-metadata/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "kolla_logs:/var/log/kolla/"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_scheduler_default_volumes:
- "{{ node_config_directory }}/nova-scheduler/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_super_conductor_default_volumes:
- "{{ node_config_directory }}/nova-super-conductor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
# Used by bootstrapping containers.
nova_api_bootstrap_default_volumes:
- "{{ node_config_directory }}/nova-api-bootstrap/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/nova:/dev-mode/nova' if nova_dev_mode | bool else '' }}"
nova_extra_volumes: "{{ default_extra_volumes }}"
nova_api_extra_volumes: "{{ nova_extra_volumes }}"
+nova_metadata_extra_volumes: "{{ nova_extra_volumes }}"
nova_scheduler_extra_volumes: "{{ nova_extra_volumes }}"
nova_super_conductor_extra_volumes: "{{ nova_extra_volumes }}"
# Used by bootstrapping containers.
@@ -189,9 +228,6 @@ nova_api_bootstrap_extra_volumes: "{{ nova_extra_volumes }}"
####################
# OpenStack
####################
-nova_internal_base_endpoint: "{{ internal_protocol }}://{{ nova_internal_fqdn | put_address_in_context('url') }}:{{ nova_api_port }}"
-nova_public_base_endpoint: "{{ public_protocol }}://{{ nova_external_fqdn | put_address_in_context('url') }}:{{ nova_api_port }}"
-
nova_legacy_internal_endpoint: "{{ nova_internal_base_endpoint }}/v2/%(tenant_id)s"
nova_legacy_public_endpoint: "{{ nova_public_base_endpoint }}/v2/%(tenant_id)s"
@@ -238,6 +274,11 @@ nova_ks_users:
password: "{{ nova_keystone_password }}"
role: "admin"
+nova_ks_user_roles:
+ - project: "service"
+ user: "{{ nova_keystone_user }}"
+ role: "service"
+
####################
# Notification
####################
@@ -246,8 +287,6 @@ nova_notification_topics:
enabled: "{{ enable_ceilometer | bool or enable_neutron_infoblox_ipam_agent | bool }}"
- name: "{{ designate_notifications_topic_name }}"
enabled: "{{ designate_enable_notifications_sink | bool }}"
- - name: vitrage_notifications
- enabled: "{{ enable_vitrage | bool }}"
nova_enabled_notification_topics: "{{ nova_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
@@ -263,3 +302,5 @@ nova_source_version: "{{ kolla_source_version }}"
# TLS
####################
nova_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+nova_copy_certs: "{{ kolla_copy_ca_into_containers | bool or nova_enable_tls_backend | bool }}"
diff --git a/ansible/roles/nova/handlers/main.yml b/ansible/roles/nova/handlers/main.yml
index 0cdd184c50..917113c2d5 100644
--- a/ansible/roles/nova/handlers/main.yml
+++ b/ansible/roles/nova/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "nova-super-conductor"
service: "{{ nova_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -14,7 +14,6 @@
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
- name: Restart nova-scheduler container
@@ -22,7 +21,7 @@
service_name: "nova-scheduler"
service: "{{ nova_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -32,7 +31,6 @@
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
- name: Restart nova-api container
@@ -40,7 +38,24 @@
service_name: "nova-api"
service: "{{ nova_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ privileged: "{{ service.privileged | default(False) }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ when:
+ - kolla_action != "upgrade" or not nova_safety_upgrade | bool
+
+- name: Restart nova-metadata container
+ vars:
+ service_name: "nova-metadata"
+ service: "{{ nova_services[service_name] }}"
+ become: true
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -50,5 +65,4 @@
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- - kolla_action != "config"
- kolla_action != "upgrade" or not nova_safety_upgrade | bool
diff --git a/ansible/roles/nova/tasks/bootstrap.yml b/ansible/roles/nova/tasks/bootstrap.yml
index a1375720bb..57295ef7ec 100644
--- a/ansible/roles/nova/tasks/bootstrap.yml
+++ b/ansible/roles/nova/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Nova databases
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -19,6 +20,7 @@
- name: Creating Nova databases user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/nova/tasks/bootstrap_service.yml b/ansible/roles/nova/tasks/bootstrap_service.yml
index 0d18e1b289..0719ee96ef 100644
--- a/ansible/roles/nova/tasks/bootstrap_service.yml
+++ b/ansible/roles/nova/tasks/bootstrap_service.yml
@@ -1,11 +1,9 @@
---
-# TODO(mgoddard): We could use nova-manage db sync --local_cell, otherwise we
-# sync cell0 twice. Should not be harmful without though.
- name: Running Nova API bootstrap container
become: true
vars:
nova_api: "{{ nova_services['nova-api'] }}"
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -16,7 +14,7 @@
labels:
BOOTSTRAP:
name: "nova_api_bootstrap"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
register: bootstrap_result
changed_when: bootstrap_result.stdout | default("") | length > 0
diff --git a/ansible/roles/nova/tasks/check-containers.yml b/ansible/roles/nova/tasks/check-containers.yml
index 480531574a..b7e2f7c29f 100644
--- a/ansible/roles/nova/tasks/check-containers.yml
+++ b/ansible/roles/nova/tasks/check-containers.yml
@@ -1,21 +1,3 @@
---
-- name: Check nova containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- environment: "{{ item.value.environment | default(omit) }}"
- pid_mode: "{{ item.value.pid_mode | default('') }}"
- ipc_mode: "{{ item.value.ipc_mode | default(omit) }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ nova_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml
index 48094c3f96..4904d496ca 100644
--- a/ansible/roles/nova/tasks/config.yml
+++ b/ansible/roles/nova/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ nova_services }}"
+ with_dict: "{{ nova_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -46,7 +43,7 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or nova_enable_tls_backend | bool
+ - nova_copy_certs
- name: Copying over config.json files for services
become: true
@@ -54,12 +51,7 @@
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ nova_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ nova_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over nova.conf
become: true
@@ -74,12 +66,7 @@
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/nova.conf"
mode: "0660"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ nova_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ nova_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
become: true
@@ -88,25 +75,29 @@
dest: "{{ node_config_directory }}/{{ item.key }}/{{ nova_policy_file }}"
mode: "0660"
when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- nova_policy_file is defined
- item.key in nova_services_require_policy_json
- with_dict: "{{ nova_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ nova_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over nova-api-wsgi.conf
+ vars:
+ service: "{{ nova_services['nova-api'] }}"
template:
src: "nova-api-wsgi.conf.j2"
dest: "{{ node_config_directory }}/nova-api/nova-api-wsgi.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups["nova-api"]
- - nova_services["nova-api"].enabled | bool
- notify:
- - "Restart nova-api container"
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copying over nova-metadata-wsgi.conf
+ vars:
+ service: "{{ nova_services['nova-metadata'] }}"
+ template:
+ src: "nova-metadata-wsgi.conf.j2"
+ dest: "{{ node_config_directory }}/nova-metadata/nova-metadata-wsgi.conf"
+ mode: "0660"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over vendordata file
vars:
@@ -118,7 +109,4 @@
become: True
when:
- vendordata_file_path is defined
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - "Restart nova-api container"
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/nova/tasks/config_validate.yml b/ansible/roles/nova/tasks/config_validate.yml
new file mode 100644
index 0000000000..8f011d3ecb
--- /dev/null
+++ b/ansible/roles/nova/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ nova_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ nova_config_validation }}"
diff --git a/ansible/roles/nova/tasks/map_cell0.yml b/ansible/roles/nova/tasks/map_cell0.yml
index 04da40bd3f..d77553f18e 100644
--- a/ansible/roles/nova/tasks/map_cell0.yml
+++ b/ansible/roles/nova/tasks/map_cell0.yml
@@ -1,24 +1,65 @@
---
-- name: Create cell0 mappings
- vars:
- nova_api: "{{ nova_services['nova-api'] }}"
- nova_cell0_connection: "mysql+pymysql://{{ nova_cell0_database_user }}:{{ nova_cell0_database_password }}@{{ nova_cell0_database_address }}/{{ nova_cell0_database_name }}" # noqa 204
- become: true
- kolla_docker:
- action: "start_container"
- command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 map_cell0 --database_connection {{ nova_cell0_connection }}'
- common_options: "{{ docker_common_options }}"
- detach: False
- image: "{{ nova_api.image }}"
- labels:
- BOOTSTRAP:
- name: "nova_api_map_cell0"
- restart_policy: no
- volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
- register: map_cell0
- changed_when:
- - map_cell0 is success
- - '"Cell0 is already setup" not in map_cell0.stdout'
+- block:
+ - name: Create cell0 mappings
+ vars:
+ nova_api: "{{ nova_services['nova-api'] }}"
+ nova_cell0_connection: "mysql+pymysql://{{ nova_cell0_database_user }}:{{ nova_cell0_database_password }}@{{ nova_cell0_database_address }}/{{ nova_cell0_database_name }}" # noqa 204
+ become: true
+ kolla_container:
+ action: "start_container"
+ command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 map_cell0 --database_connection {{ nova_cell0_connection }}'
+ common_options: "{{ docker_common_options }}"
+ detach: False
+ image: "{{ nova_api.image }}"
+ labels:
+ BOOTSTRAP:
+ name: "nova_api_map_cell0"
+ restart_policy: oneshot
+ volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
+ register: map_cell0
+ changed_when:
+ - map_cell0 is success
+ - '"Cell0 is already setup" not in map_cell0.stdout'
+
+ - import_role:
+ name: nova-cell
+ tasks_from: get_cell_settings.yml
+ vars:
+ nova_cell_name: "cell0"
+ nova_api: "{{ nova_services['nova-api'] }}"
+ nova_cell_get_settings_volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
+
+ - name: Update cell0 mappings
+ vars:
+ nova_cell0_uuid: "00000000-0000-0000-0000-000000000000"
+ nova_cell0_transport_url: "none:/"
+ nova_cell0_connection: "mysql+pymysql://{{ nova_cell0_database_user }}:{{ nova_cell0_database_password }}@{{ nova_cell0_database_address }}/{{ nova_cell0_database_name }}" # noqa 204
+ nova_api: "{{ nova_services['nova-api'] }}"
+ become: true
+ kolla_container:
+ action: "start_container"
+ command: >
+ bash -c 'sudo -E kolla_set_configs &&
+ nova-manage cell_v2 update_cell
+ --cell_uuid {{ nova_cell0_uuid }}
+ --database_connection {{ nova_cell0_connection }}
+ --transport-url {{ nova_cell0_transport_url }}'
+ common_options: "{{ docker_common_options }}"
+ detach: False
+ image: "{{ nova_api.image }}"
+ labels:
+ BOOTSTRAP:
+ name: "nova_api_map_cell0"
+ restart_policy: oneshot
+ volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
+ register: nova_cell0_updated
+ changed_when:
+ - nova_cell0_updated is success
+ failed_when:
+ - nova_cell0_updated.rc != 0
+ when:
+ - nova_cell_settings | length > 0
+ - nova_cell_settings.cell_database != nova_cell0_connection
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
diff --git a/ansible/roles/nova/tasks/online_data_migrations.yml b/ansible/roles/nova/tasks/online_data_migrations.yml
index 2ab13de078..cd9f69bafe 100644
--- a/ansible/roles/nova/tasks/online_data_migrations.yml
+++ b/ansible/roles/nova/tasks/online_data_migrations.yml
@@ -3,7 +3,7 @@
vars:
nova_api: "{{ nova_services['nova-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "nova_api_online_data_migrations"
- restart_policy: "no"
+ restart_policy: "oneshot"
volumes: "{{ nova_api_bootstrap_default_volumes + nova_api_bootstrap_extra_volumes }}"
run_once: true
delegate_to: "{{ groups[nova_api.group][0] }}"
diff --git a/ansible/roles/nova/tasks/precheck.yml b/ansible/roles/nova/tasks/precheck.yml
index dd52dfdf05..ee68197c37 100644
--- a/ansible/roles/nova/tasks/precheck.yml
+++ b/ansible/roles/nova/tasks/precheck.yml
@@ -8,13 +8,16 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- nova_api
+ check_mode: false
register: container_facts
- name: Checking free port for Nova API
vars:
- nova_api: "{{ nova_services['nova-api'] }}"
+ service: "{{ nova_services['nova-api'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_api_listen_port }}"
@@ -23,12 +26,11 @@
state: stopped
when:
- container_facts['nova_api'] is not defined
- - inventory_hostname in groups[nova_api.group]
- - nova_api.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- name: Checking free port for Nova Metadata
vars:
- nova_api: "{{ nova_services['nova-api'] }}"
+ service: "{{ nova_services['nova-api'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_metadata_listen_port }}"
@@ -37,5 +39,4 @@
state: stopped
when:
- container_facts['nova_api'] is not defined
- - inventory_hostname in groups[nova_api.group]
- - nova_api.enabled | bool
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/nova/tasks/register.yml b/ansible/roles/nova/tasks/register.yml
index a9c7cfaf61..c902fec305 100644
--- a/ansible/roles/nova/tasks/register.yml
+++ b/ansible/roles/nova/tasks/register.yml
@@ -5,3 +5,4 @@
service_ks_register_auth: "{{ openstack_nova_auth }}"
service_ks_register_services: "{{ nova_ks_services }}"
service_ks_register_users: "{{ nova_ks_users }}"
+ service_ks_register_user_roles: "{{ nova_ks_user_roles }}"
diff --git a/ansible/roles/nova/tasks/reload_api.yml b/ansible/roles/nova/tasks/reload_api.yml
index 858f595769..10111dcdff 100644
--- a/ansible/roles/nova/tasks/reload_api.yml
+++ b/ansible/roles/nova/tasks/reload_api.yml
@@ -6,7 +6,7 @@
vars:
service: "{{ nova_services[item] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -17,8 +17,7 @@
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action == 'upgrade'
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
with_items:
- nova-scheduler
- nova-api
diff --git a/ansible/roles/nova/tasks/reload_super_conductor.yml b/ansible/roles/nova/tasks/reload_super_conductor.yml
index edc3a165b4..58fe18aacd 100644
--- a/ansible/roles/nova/tasks/reload_super_conductor.yml
+++ b/ansible/roles/nova/tasks/reload_super_conductor.yml
@@ -6,7 +6,7 @@
vars:
service: "{{ nova_services['nova-super-conductor'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -17,5 +17,4 @@
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action == 'upgrade'
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/nova/tasks/rolling_upgrade.yml b/ansible/roles/nova/tasks/rolling_upgrade.yml
deleted file mode 100644
index 1a2c675437..0000000000
--- a/ansible/roles/nova/tasks/rolling_upgrade.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# Create new set of configs on nodes
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-# TODO(donghm): Flush_handlers to restart nova services
-# should be run in serial nodes to decrease downtime if
-# the previous task did not run. Update when the
-# Ansible strategy module for rolling upgrade is finished.
-
-- name: Flush handlers
- meta: flush_handlers
-
-# NOTE(dszumski): The Nova upgrade is not finished here and
-# continues in subsequent tasks.
diff --git a/ansible/roles/nova/tasks/upgrade.yml b/ansible/roles/nova/tasks/upgrade.yml
index d6f3f70c42..787bc7773a 100644
--- a/ansible/roles/nova/tasks/upgrade.yml
+++ b/ansible/roles/nova/tasks/upgrade.yml
@@ -1,10 +1,36 @@
---
-- name: Check nova upgrade status
+# TODO(bbezak): Remove this task in the Dalmatian cycle.
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ openstack_nova_auth }}"
+ service_ks_register_user_roles: "{{ nova_ks_user_roles }}"
+
+# Create new set of configs on nodes
+- import_tasks: config.yml
+
+- name: Run Nova upgrade checks
become: true
- command: "{{ kolla_container_engine }} exec -t nova_api nova-status upgrade check"
+ vars:
+ nova_api: "{{ nova_services['nova-api'] }}"
+ kolla_container:
+ action: "start_container"
+ common_options: "{{ docker_common_options }}"
+ detach: False
+ environment:
+ KOLLA_UPGRADE_CHECK:
+ KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
+ image: "{{ nova_api.image }}"
+ labels:
+ UPGRADE:
+ name: "nova_upgrade_checks"
+ restart_policy: oneshot
+ volumes: "{{ nova_api_default_volumes + nova_api_extra_volumes }}"
+ run_once: True
register: nova_upgrade_check_stdout
- when: inventory_hostname == groups['nova-api'][0]
+ delegate_to: "{{ groups['nova-api'][0] }}"
failed_when: false
+ check_mode: false
- name: Upgrade status check result
fail:
@@ -17,14 +43,23 @@
- name: Stopping top level nova services
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
- with_dict: "{{ nova_services }}"
+ with_dict: "{{ nova_services | select_services_enabled_and_mapped_to_host }}"
when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- nova_safety_upgrade | bool
-- import_tasks: rolling_upgrade.yml
+- import_tasks: check-containers.yml
+
+# TODO(donghm): Flush_handlers to restart nova services
+# should be run in serial nodes to decrease downtime if
+# the previous task did not run. Update when the
+# Ansible strategy module for rolling upgrade is finished.
+
+- name: Flush handlers
+ meta: flush_handlers
+
+# NOTE(dszumski): The Nova upgrade is not finished here and
+# continues in subsequent tasks.
diff --git a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2
index e84c194960..7acd59eb8c 100644
--- a/ansible/roles/nova/templates/nova-api-wsgi.conf.j2
+++ b/ansible/roles/nova/templates/nova-api-wsgi.conf.j2
@@ -8,7 +8,6 @@ LoadModule ssl_module /usr/lib/apache2/modules/mod_ssl.so
{% endif %}
{% endif %}
Listen {{ api_interface_address | put_address_in_context('url') }}:{{ nova_api_listen_port }}
-Listen {{ api_interface_address | put_address_in_context('url') }}:{{ nova_metadata_listen_port }}
ServerSignature Off
ServerTokens Prod
@@ -17,7 +16,7 @@ TimeOut {{ kolla_httpd_timeout }}
KeepAliveTimeout {{ kolla_httpd_keep_alive }}
-
+
Options None
Require all granted
@@ -50,22 +49,3 @@ LogLevel info
SSLCertificateKeyFile /etc/nova/certs/nova-key.pem
{% endif %}
-
-
- WSGIDaemonProcess nova-metadata processes={{ nova_metadata_api_workers }} threads=1 user=nova group=nova display-name=nova-metadata-api
- WSGIProcessGroup nova-metadata
- WSGIScriptAlias / {{ wsgi_directory }}/nova-metadata-wsgi
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog "{{ nova_log_dir }}/nova-metadata-error.log"
- LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
- CustomLog "{{ nova_log_dir }}/nova-metadata-access.log" logformat
-{% if nova_enable_tls_backend | bool %}
- SSLEngine on
- SSLCertificateFile /etc/nova/certs/nova-cert.pem
- SSLCertificateKeyFile /etc/nova/certs/nova-key.pem
-{% endif %}
-
diff --git a/ansible/roles/nova/templates/nova-api.json.j2 b/ansible/roles/nova/templates/nova-api.json.j2
index 8a3bffa801..444e44ec66 100644
--- a/ansible/roles/nova/templates/nova-api.json.j2
+++ b/ansible/roles/nova/templates/nova-api.json.j2
@@ -38,6 +38,12 @@
"dest": "/etc/nova/vendordata.json",
"owner": "nova",
"perm": "0600"
+ }{% endif %}{% if nova_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2 b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2
new file mode 100644
index 0000000000..58ab62302f
--- /dev/null
+++ b/ansible/roles/nova/templates/nova-metadata-wsgi.conf.j2
@@ -0,0 +1,51 @@
+{% set nova_log_dir = '/var/log/kolla/nova' %}
+{% set wsgi_directory = '/var/lib/kolla/venv/bin' %}
+{% if nova_enable_tls_backend | bool %}
+{% if kolla_base_distro in ['centos', 'rocky'] %}
+LoadModule ssl_module /usr/lib64/httpd/modules/mod_ssl.so
+{% else %}
+LoadModule ssl_module /usr/lib/apache2/modules/mod_ssl.so
+{% endif %}
+{% endif %}
+Listen {{ api_interface_address | put_address_in_context('url') }}:{{ nova_metadata_listen_port }}
+
+ServerSignature Off
+ServerTokens Prod
+TraceEnable off
+TimeOut {{ kolla_httpd_timeout }}
+KeepAliveTimeout {{ kolla_httpd_keep_alive }}
+
+
+
+ Options None
+ Require all granted
+
+
+
+ErrorLog "{{ nova_log_dir }}/apache-error.log"
+
+CustomLog "{{ nova_log_dir }}/apache-access.log" common
+
+
+{% if nova_logging_debug | bool %}
+LogLevel info
+{% endif %}
+
+
+ WSGIDaemonProcess nova-metadata processes={{ nova_metadata_api_workers }} threads=1 user=nova group=nova display-name=nova-metadata-api
+ WSGIProcessGroup nova-metadata
+ WSGIScriptAlias / {{ wsgi_directory }}/nova-metadata-wsgi
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ = 2.4>
+ ErrorLogFormat "%{cu}t %M"
+
+ ErrorLog "{{ nova_log_dir }}/nova-metadata-error.log"
+ LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
+ CustomLog "{{ nova_log_dir }}/nova-metadata-access.log" logformat
+{% if nova_enable_tls_backend | bool %}
+ SSLEngine on
+ SSLCertificateFile /etc/nova/certs/nova-cert.pem
+ SSLCertificateKeyFile /etc/nova/certs/nova-key.pem
+{% endif %}
+
diff --git a/ansible/roles/nova/templates/nova-metadata.json.j2 b/ansible/roles/nova/templates/nova-metadata.json.j2
new file mode 100644
index 0000000000..0de4d593c3
--- /dev/null
+++ b/ansible/roles/nova/templates/nova-metadata.json.j2
@@ -0,0 +1,50 @@
+{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
+{% set apache_conf_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
+{
+ "command": "/usr/sbin/{{ apache_binary }} -DFOREGROUND",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/nova.conf",
+ "dest": "/etc/nova/nova.conf",
+ "owner": "nova",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/nova-metadata-wsgi.conf",
+ "dest": "/etc/{{ apache_conf_dir }}/nova-metadata-wsgi.conf",
+ "owner": "nova",
+ "perm": "0600"
+ }{% if nova_policy_file is defined %},
+ {
+ "source": "{{ container_config_directory }}/{{ nova_policy_file }}",
+ "dest": "/etc/nova/{{ nova_policy_file }}",
+ "owner": "nova",
+ "perm": "0600"
+ }{% endif %}{% if nova_enable_tls_backend | bool %},
+ {
+ "source": "{{ container_config_directory }}/nova-cert.pem",
+ "dest": "/etc/nova/certs/nova-cert.pem",
+ "owner": "nova",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/nova-key.pem",
+ "dest": "/etc/nova/certs/nova-key.pem",
+ "owner": "nova",
+ "perm": "0600"
+ }{% endif %}{% if vendordata_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/vendordata.json",
+ "dest": "/etc/nova/vendordata.json",
+ "owner": "nova",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/nova",
+ "owner": "nova:nova",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/nova/templates/nova-scheduler.json.j2 b/ansible/roles/nova/templates/nova-scheduler.json.j2
index 36638987a0..9159bec31f 100644
--- a/ansible/roles/nova/templates/nova-scheduler.json.j2
+++ b/ansible/roles/nova/templates/nova-scheduler.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
- }
+ }{% if nova_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/nova/templates/nova-super-conductor.json.j2 b/ansible/roles/nova/templates/nova-super-conductor.json.j2
index 6a7328713d..1f633f7599 100644
--- a/ansible/roles/nova/templates/nova-super-conductor.json.j2
+++ b/ansible/roles/nova/templates/nova-super-conductor.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
- }
+ }{% if nova_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/nova/templates/nova.conf.j2 b/ansible/roles/nova/templates/nova.conf.j2
index 50f169ab08..31c554fa40 100644
--- a/ansible/roles/nova/templates/nova.conf.j2
+++ b/ansible/roles/nova/templates/nova.conf.j2
@@ -6,6 +6,8 @@ debug = {{ nova_logging_debug }}
log_file = /var/log/kolla/nova/nova-super-conductor.log
{% elif service_name == "nova-api" %}
log_file = /var/log/kolla/nova/nova-api.log
+{% elif service_name == "nova-metadata" %}
+log_file = /var/log/kolla/nova/nova-metadata.log
{% else %}
log_dir = /var/log/kolla/nova
{% endif %}
@@ -16,7 +18,7 @@ allow_resize_to_same_host = true
# Though my_ip is not used directly, lots of other variables use $my_ip
my_ip = {{ api_interface_address }}
-{% if enable_ceilometer | bool or enable_designate | bool %}
+{% if enable_ceilometer | bool %}
instance_usage_audit = True
instance_usage_audit_period = hour
{% endif %}
@@ -27,10 +29,10 @@ transport_url = {{ rpc_transport_url }}
{% if enable_blazar | bool %}
available_filters = nova.scheduler.filters.all_filters
available_filters = blazarnova.scheduler.filters.blazar_filter.BlazarFilter
-enabled_filters = AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter
+enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter
{% endif %}
{% if enable_nova_fake | bool %}
-enabled_filters = AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
+enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
{% endif %}
{% if enable_cells | bool %}
# When in superconductor mode, nova-compute can't send instance
@@ -122,7 +124,7 @@ password = {{ nova_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -138,11 +140,18 @@ topics = {{ nova_enabled_notification_topics | map(attribute='name') | join(',')
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'nova-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if service_name in nova_services_require_policy_json and nova_policy_file is defined %}
[oslo_policy]
@@ -200,3 +209,16 @@ auth_endpoint = {{ keystone_internal_url }}
barbican_endpoint_type = internal
verify_ssl_path = {{ openstack_cacert }}
{% endif %}
+
+[service_user]
+send_service_user_token = true
+auth_url = {{ keystone_internal_url }}
+auth_type = password
+project_domain_id = {{ default_project_domain_id }}
+user_domain_id = {{ default_user_domain_id }}
+project_name = service
+username = {{ nova_keystone_user }}
+password = {{ nova_keystone_password }}
+cafile = {{ openstack_cacert }}
+region_name = {{ openstack_region_name }}
+valid_interfaces = internal
diff --git a/ansible/roles/octavia-certificates/defaults/main.yml b/ansible/roles/octavia-certificates/defaults/main.yml
index 67fe9085af..2061dbe438 100644
--- a/ansible/roles/octavia-certificates/defaults/main.yml
+++ b/ansible/roles/octavia-certificates/defaults/main.yml
@@ -43,3 +43,6 @@ octavia_certs_client_req_organizational_unit: "{{ octavia_certs_organizational_u
# NOTE(yoctozepto): This should ideally be per controller, i.e. controller
# generates its key&CSR and this CA signs it.
octavia_certs_client_req_common_name: client.example.org
+
+# Used with command `kolla-ansible octavia-certificates --check-expiry `.
+octavia_certs_check_expiry: "no"
diff --git a/ansible/roles/octavia-certificates/tasks/check_expiry.yml b/ansible/roles/octavia-certificates/tasks/check_expiry.yml
new file mode 100644
index 0000000000..66ed8e4b0c
--- /dev/null
+++ b/ansible/roles/octavia-certificates/tasks/check_expiry.yml
@@ -0,0 +1,24 @@
+---
+- name: Gather information on certificates
+ community.crypto.x509_certificate_info:
+ path: "{{ node_custom_config }}/octavia/{{ item }}"
+ valid_at:
+ point_1: "+{{ octavia_certs_expiry_limit | int }}d"
+ register: cert_info
+ delegate_to: localhost
+ with_items:
+ - "server_ca.cert.pem"
+ - "client_ca.cert.pem"
+ - "client.cert-and-key.pem"
+
+- name: Check whether certificates are valid within {{ octavia_certs_expiry_limit }} days
+ assert:
+ that:
+ - item.valid_at.point_1
+ fail_msg: "{{ item.item }} will expire within {{ octavia_certs_expiry_limit }} days, on {{ item.not_after }}"
+ success_msg: "{{ item.item }} will not expire within {{ octavia_certs_expiry_limit }} days. It expires on {{ item.not_after }}"
+ quiet: True
+ loop: "{{ cert_info.results }}"
+ loop_control:
+ label: "{{ item.item }}"
+ delegate_to: localhost
diff --git a/ansible/roles/octavia-certificates/tasks/main.yml b/ansible/roles/octavia-certificates/tasks/main.yml
index ed58ec436c..9ba737b2bd 100644
--- a/ansible/roles/octavia-certificates/tasks/main.yml
+++ b/ansible/roles/octavia-certificates/tasks/main.yml
@@ -7,38 +7,45 @@
# Kolla Ansible prepares and controls the Client CA certificate and key.
# Client CA is used to generate certificates for Octavia controllers.
-- name: Ensure server_ca and client_ca directories exist
- file:
- path: "{{ octavia_certs_work_dir }}/{{ item }}"
- state: "directory"
- mode: 0770
- loop:
- - server_ca
- - client_ca
-
-- name: Copy openssl.cnf
- copy:
- src: "{{ octavia_certs_openssl_cnf_path }}"
- dest: "{{ octavia_certs_work_dir }}/openssl.cnf"
-
-- import_tasks: server_ca.yml
-
-- import_tasks: client_ca.yml
-
-- import_tasks: client_cert.yml
-
-- name: Ensure {{ node_custom_config }}/octavia directory exists
- file:
- path: "{{ node_custom_config }}/octavia"
- state: "directory"
- mode: 0770
-
-- name: Copy the to-be-deployed keys and certs to {{ node_custom_config }}/octavia
- copy:
- src: "{{ octavia_certs_work_dir }}/{{ item.src }}"
- dest: "{{ node_custom_config }}/octavia/{{ item.dest }}"
- with_items:
- - { src: "server_ca/server_ca.cert.pem", dest: "server_ca.cert.pem" }
- - { src: "server_ca/server_ca.key.pem", dest: "server_ca.key.pem" }
- - { src: "client_ca/client_ca.cert.pem", dest: "client_ca.cert.pem" }
- - { src: "client_ca/client.cert-and-key.pem", dest: "client.cert-and-key.pem" }
+- name: Check if any certificates are going to expire
+ include_tasks: check_expiry.yml
+ when: octavia_certs_check_expiry | bool
+
+- block:
+ - name: Ensure server_ca and client_ca directories exist
+ file:
+ path: "{{ octavia_certs_work_dir }}/{{ item }}"
+ state: "directory"
+ mode: 0770
+ loop:
+ - server_ca
+ - client_ca
+
+ - name: Copy openssl.cnf
+ copy:
+ src: "{{ octavia_certs_openssl_cnf_path }}"
+ dest: "{{ octavia_certs_work_dir }}/openssl.cnf"
+
+ - import_tasks: server_ca.yml
+
+ - import_tasks: client_ca.yml
+
+ - import_tasks: client_cert.yml
+
+ - name: Ensure {{ node_custom_config }}/octavia directory exists
+ file:
+ path: "{{ node_custom_config }}/octavia"
+ state: "directory"
+ mode: 0770
+
+ - name: Copy the to-be-deployed keys and certs to {{ node_custom_config }}/octavia
+ copy:
+ src: "{{ octavia_certs_work_dir }}/{{ item.src }}"
+ dest: "{{ node_custom_config }}/octavia/{{ item.dest }}"
+ with_items:
+ - { src: "server_ca/server_ca.cert.pem", dest: "server_ca.cert.pem" }
+ - { src: "server_ca/server_ca.key.pem", dest: "server_ca.key.pem" }
+ - { src: "client_ca/client_ca.cert.pem", dest: "client_ca.cert.pem" }
+ - { src: "client_ca/client.cert-and-key.pem", dest: "client.cert-and-key.pem" }
+
+ when: not octavia_certs_check_expiry | bool
diff --git a/ansible/roles/octavia/defaults/main.yml b/ansible/roles/octavia/defaults/main.yml
index ae428d4c78..38f0e1c3e5 100644
--- a/ansible/roles/octavia/defaults/main.yml
+++ b/ansible/roles/octavia/defaults/main.yml
@@ -20,7 +20,8 @@ octavia_services:
enabled: "{{ enable_octavia }}"
mode: "http"
external: true
- port: "{{ octavia_api_port }}"
+ external_fqdn: "{{ octavia_external_fqdn }}"
+ port: "{{ octavia_api_public_port }}"
listen_port: "{{ octavia_api_listen_port }}"
tls_backend: "{{ octavia_enable_tls_backend }}"
octavia-driver-agent:
@@ -62,6 +63,12 @@ octavia_required_roles:
- load-balancer_admin
- load-balancer_quota_admin
+####################
+# Config Validate
+####################
+octavia_config_validation:
+ - generator: "/octavia/etc/config/octavia-config-generator.conf"
+ config: "/etc/octavia/octavia.conf"
####################
# Database
@@ -70,18 +77,28 @@ octavia_database_name: "octavia"
octavia_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}octavia{% endif %}"
octavia_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
+octavia_persistence_database_name: "octavia_persistence"
+octavia_persistence_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}octavia_persistence{% endif %}"
+octavia_persistence_database_address: "{{ octavia_database_address }}"
+
####################
# Database sharding
####################
octavia_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ octavia_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
octavia_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
+octavia_persistence_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ octavia_persistence_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
+octavia_persistence_database_shard_id: "{{ octavia_database_shard_id | int }}"
octavia_database_shard:
users:
- user: "{{ octavia_database_user }}"
password: "{{ octavia_database_password }}"
+ - user: "{{ octavia_persistence_database_user }}"
+ password: "{{ octavia_persistence_database_password }}"
rules:
- schema: "{{ octavia_database_name }}"
shard_id: "{{ octavia_database_shard_id }}"
+ - schema: "{{ octavia_persistence_database_name }}"
+ shard_id: "{{ octavia_persistence_database_shard_id }}"
####################
@@ -89,23 +106,23 @@ octavia_database_shard:
####################
octavia_tag: "{{ openstack_tag }}"
-octavia_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/octavia-api"
+octavia_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}octavia-api"
octavia_api_tag: "{{ octavia_tag }}"
octavia_api_image_full: "{{ octavia_api_image }}:{{ octavia_api_tag }}"
-octavia_driver_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/octavia-driver-agent"
+octavia_driver_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}octavia-driver-agent"
octavia_driver_agent_tag: "{{ octavia_tag }}"
octavia_driver_agent_image_full: "{{ octavia_driver_agent_image }}:{{ octavia_driver_agent_tag }}"
-octavia_health_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/octavia-health-manager"
+octavia_health_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}octavia-health-manager"
octavia_health_manager_tag: "{{ octavia_tag }}"
octavia_health_manager_image_full: "{{ octavia_health_manager_image }}:{{ octavia_health_manager_tag }}"
-octavia_housekeeping_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/octavia-housekeeping"
+octavia_housekeeping_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}octavia-housekeeping"
octavia_housekeeping_tag: "{{ octavia_tag }}"
octavia_housekeeping_image_full: "{{ octavia_housekeeping_image }}:{{ octavia_housekeeping_tag }}"
-octavia_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/octavia-worker"
+octavia_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}octavia-worker"
octavia_worker_tag: "{{ octavia_tag }}"
octavia_worker_image_full: "{{ octavia_worker_image }}:{{ octavia_worker_tag }}"
@@ -172,33 +189,33 @@ octavia_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/octavia/octavia:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/octavia' if octavia_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/octavia:/dev-mode/octavia' if octavia_dev_mode | bool else '' }}"
- "octavia_driver_agent:/var/run/octavia/"
octavia_health_manager_default_volumes:
- "{{ node_config_directory }}/octavia-health-manager/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/octavia/octavia:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/octavia' if octavia_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/octavia:/dev-mode/octavia' if octavia_dev_mode | bool else '' }}"
octavia_driver_agent_default_volumes:
- "{{ node_config_directory }}/octavia-driver-agent/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/octavia/octavia:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/octavia' if octavia_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/octavia:/dev-mode/octavia' if octavia_dev_mode | bool else '' }}"
- "octavia_driver_agent:/var/run/octavia/"
octavia_housekeeping_default_volumes:
- "{{ node_config_directory }}/octavia-housekeeping/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/octavia/octavia:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/octavia' if octavia_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/octavia:/dev-mode/octavia' if octavia_dev_mode | bool else '' }}"
octavia_worker_default_volumes:
- "{{ node_config_directory }}/octavia-worker/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/octavia/octavia:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/octavia' if octavia_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/octavia:/dev-mode/octavia' if octavia_dev_mode | bool else '' }}"
octavia_extra_volumes: "{{ default_extra_volumes }}"
octavia_api_extra_volumes: "{{ octavia_extra_volumes }}"
@@ -299,7 +316,7 @@ octavia_amp_security_groups:
name: "lb-mgmt-sec-grp"
enabled: true
rules:
- - protocol: icmp
+ - protocol: "{{ 'ipv6-icmp' if octavia_network_address_family == 'ipv6' else 'icmp' }}"
- protocol: tcp
src_port: 22
dst_port: 22
@@ -347,6 +364,10 @@ octavia_amp_network:
# Octavia management network subnet CIDR.
octavia_amp_network_cidr: 10.1.0.0/24
+octavia_amp_router:
+ name: lb-mgmt-router
+ subnet: "{{ octavia_amp_network['subnet']['name'] }}"
+
# Octavia provider drivers
octavia_provider_drivers: "amphora:Amphora provider{% if neutron_plugin_agent == 'ovn' %}, ovn:OVN provider{% endif %}"
octavia_provider_agents: "amphora_agent{% if neutron_plugin_agent == 'ovn' %}, ovn{% endif %}"
@@ -355,3 +376,5 @@ octavia_provider_agents: "amphora_agent{% if neutron_plugin_agent == 'ovn' %}, o
# TLS
####################
octavia_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+octavia_copy_certs: "{{ kolla_copy_ca_into_containers | bool or octavia_enable_tls_backend | bool }}"
diff --git a/ansible/roles/octavia/handlers/main.yml b/ansible/roles/octavia/handlers/main.yml
index 139538ea91..ff38af7ff5 100644
--- a/ansible/roles/octavia/handlers/main.yml
+++ b/ansible/roles/octavia/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "octavia-api"
service: "{{ octavia_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,30 +12,26 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart octavia-driver-agent container
vars:
service_name: "octavia-driver-agent"
service: "{{ octavia_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart octavia-health-manager container
vars:
service_name: "octavia-health-manager"
service: "{{ octavia_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -43,15 +39,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart octavia-housekeeping container
vars:
service_name: "octavia-housekeeping"
service: "{{ octavia_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -59,15 +53,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart octavia-worker container
vars:
service_name: "octavia-worker"
service: "{{ octavia_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -75,5 +67,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/octavia/tasks/bootstrap.yml b/ansible/roles/octavia/tasks/bootstrap.yml
index 9fb146b02e..1d6667b951 100644
--- a/ansible/roles/octavia/tasks/bootstrap.yml
+++ b/ansible/roles/octavia/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Octavia database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -14,9 +15,26 @@
when:
- not use_preconfigured_databases | bool
+- name: Creating Octavia persistence database
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: mysql_db
+ module_args:
+ login_host: "{{ database_address }}"
+ login_port: "{{ database_port }}"
+ login_user: "{{ octavia_persistence_database_shard_root_user }}"
+ login_password: "{{ database_password }}"
+ name: "{{ octavia_persistence_database_name }}"
+ run_once: True
+ delegate_to: "{{ groups['octavia-api'][0] }}"
+ when:
+ - not use_preconfigured_databases | bool
+
- name: Creating Octavia database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
@@ -33,4 +51,24 @@
when:
- not use_preconfigured_databases | bool
+- name: Creating Octavia persistence database user and setting permissions
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: mysql_user
+ module_args:
+ login_host: "{{ database_address }}"
+ login_port: "{{ database_port }}"
+ login_user: "{{ octavia_persistence_database_shard_root_user }}"
+ login_password: "{{ database_password }}"
+ name: "{{ octavia_persistence_database_user }}"
+ password: "{{ octavia_persistence_database_password }}"
+ host: "%"
+ priv: "{{ octavia_persistence_database_name }}.*:ALL"
+ append_privs: "yes"
+ run_once: True
+ delegate_to: "{{ groups['octavia-api'][0] }}"
+ when:
+ - not use_preconfigured_databases | bool
+
- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/octavia/tasks/bootstrap_service.yml b/ansible/roles/octavia/tasks/bootstrap_service.yml
index ce5ce74635..c1c7b95010 100644
--- a/ansible/roles/octavia/tasks/bootstrap_service.yml
+++ b/ansible/roles/octavia/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
octavia_api: "{{ octavia_services['octavia-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_octavia"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ octavia_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[octavia_api.group][0] }}"
diff --git a/ansible/roles/octavia/tasks/check-containers.yml b/ansible/roles/octavia/tasks/check-containers.yml
index 1c3a518e34..b7e2f7c29f 100644
--- a/ansible/roles/octavia/tasks/check-containers.yml
+++ b/ansible/roles/octavia/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check octavia containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ octavia_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/octavia/tasks/config.yml b/ansible/roles/octavia/tasks/config.yml
index ee0b1d9ea9..c187280393 100644
--- a/ansible/roles/octavia/tasks/config.yml
+++ b/ansible/roles/octavia/tasks/config.yml
@@ -1,4 +1,7 @@
---
+- include_tasks: get_resources_info.yml
+ when: octavia_auto_configure | bool
+
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
@@ -7,10 +10,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ octavia_services }}"
+ with_dict: "{{ octavia_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -39,15 +39,11 @@
become: true
when:
- octavia_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ octavia_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ octavia_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or octavia_enable_tls_backend | bool
+ - octavia_copy_certs
- name: Copying over config.json files for services
template:
@@ -55,12 +51,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ octavia_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ octavia_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over octavia-wsgi.conf
vars:
@@ -74,11 +65,7 @@
- "{{ node_custom_config }}/octavia/{{ inventory_hostname }}/octavia-wsgi.conf"
- "{{ node_custom_config }}/octavia/octavia-wsgi.conf"
- "octavia-wsgi.conf.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart octavia-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over octavia.conf
vars:
@@ -93,12 +80,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/octavia.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ octavia_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ octavia_services | select_services_enabled_and_mapped_to_host }}"
- block:
@@ -122,12 +104,8 @@
dest: "{{ node_config_directory }}/octavia-worker/{{ item }}"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items: "{{ octavia_amphora_keys }}"
- notify:
- - Restart octavia-worker container
- name: Copying certificate files for octavia-housekeeping
vars:
@@ -137,12 +115,8 @@
dest: "{{ node_config_directory }}/octavia-housekeeping/{{ item }}"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items: "{{ octavia_amphora_keys }}"
- notify:
- - Restart octavia-housekeeping container
- name: Copying certificate files for octavia-health-manager
vars:
@@ -152,12 +126,8 @@
dest: "{{ node_config_directory }}/octavia-health-manager/{{ item }}"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_items: "{{ octavia_amphora_keys }}"
- notify:
- - Restart octavia-health-manager container
when: "'amphora' in octavia_provider_drivers"
vars:
diff --git a/ansible/roles/octavia/tasks/config_validate.yml b/ansible/roles/octavia/tasks/config_validate.yml
new file mode 100644
index 0000000000..e9069eb7ec
--- /dev/null
+++ b/ansible/roles/octavia/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ octavia_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ octavia_config_validation }}"
diff --git a/ansible/roles/octavia/tasks/get_resources_info.yml b/ansible/roles/octavia/tasks/get_resources_info.yml
index d5e13f01e3..ce6841d544 100644
--- a/ansible/roles/octavia/tasks/get_resources_info.yml
+++ b/ansible/roles/octavia/tasks/get_resources_info.yml
@@ -2,7 +2,8 @@
- name: Get amphora flavor info
become: true
kolla_toolbox:
- module_name: os_flavor_info
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.compute_flavor_info
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -10,13 +11,15 @@
region_name: "{{ openstack_region_name }}"
name: "{{ octavia_amp_flavor.name }}"
run_once: True
+ check_mode: false
delegate_to: "{{ groups['octavia-api'][0] }}"
register: flavor_results
- name: Get {{ octavia_service_auth_project }} project id
become: True
kolla_toolbox:
- module_name: os_project_info
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.project_info
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -25,6 +28,7 @@
name: "{{ octavia_service_auth_project }}"
run_once: True
delegate_to: "{{ groups['octavia-api'][0] }}"
+ check_mode: false
register: project_info
# NOTE(wuchunyang): ansible doesn't have a module to query security groups
@@ -32,7 +36,8 @@
- name: Get security groups for octavia
become: true
kolla_toolbox:
- module_name: os_security_group
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.security_group
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -45,12 +50,14 @@
label: "{{ item.name }}"
run_once: True
delegate_to: "{{ groups['octavia-api'][0] }}"
+ check_mode: false
register: sec_grp_info
- name: Get loadbalancer management network
become: true
kolla_toolbox:
- module_name: os_networks_info
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.networks_info
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -60,8 +67,9 @@
register: network_results
run_once: True
delegate_to: "{{ groups['octavia-api'][0] }}"
+ check_mode: false
- name: Set octavia resources facts
set_fact:
- network_info: "{{ network_results.openstack_networks.0 }}"
- amphora_flavor_info: "{{ flavor_results.openstack_flavors.0 }}"
+ network_info: "{{ network_results.networks | first }}"
+ amphora_flavor_info: "{{ flavor_results.flavors | first }}"
diff --git a/ansible/roles/octavia/tasks/hm-interface.yml b/ansible/roles/octavia/tasks/hm-interface.yml
index 4ebf07aaf3..d1900cc2bb 100644
--- a/ansible/roles/octavia/tasks/hm-interface.yml
+++ b/ansible/roles/octavia/tasks/hm-interface.yml
@@ -2,7 +2,8 @@
- name: Create ports for Octavia health-manager nodes
become: true
kolla_toolbox:
- module_name: os_port
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.port
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -21,7 +22,7 @@
- name: Update Octavia health manager port host_id
become: True
vars:
- port_id: "{{ port_info.id }}"
+ port_id: "{{ port_info.port.id }}"
command: >
{{ kolla_container_engine }} exec kolla_toolbox openstack
--os-interface {{ openstack_interface }}
@@ -40,7 +41,7 @@
- name: Add Octavia port to openvswitch br-int
vars:
port_mac: "{{ port_info.port.mac_address }}"
- port_id: "{{ port_info.id }}"
+ port_id: "{{ port_info.port.id }}"
become: True
command: >
{{ kolla_container_engine }} exec openvswitch_vswitchd ovs-vsctl --may-exist \
diff --git a/ansible/roles/octavia/tasks/precheck.yml b/ansible/roles/octavia/tasks/precheck.yml
index 5b2b094499..be672853e7 100644
--- a/ansible/roles/octavia/tasks/precheck.yml
+++ b/ansible/roles/octavia/tasks/precheck.yml
@@ -8,9 +8,12 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- octavia_api
- octavia_health_manager
+ check_mode: false
register: container_facts
- name: Checking free port for Octavia API
@@ -61,3 +64,11 @@
- octavia_auto_configure | bool
- octavia_network_type == "tenant"
- neutron_plugin_agent != 'openvswitch'
+
+- name: Checking whether Redis is enabled for octavia jobboard
+ assert:
+ that: enable_redis | bool
+ fail_msg: "Redis must be enabled when using octavia jobboard"
+ run_once: True
+ when:
+ - enable_octavia_jobboard | bool
diff --git a/ansible/roles/octavia/tasks/prepare.yml b/ansible/roles/octavia/tasks/prepare.yml
index b3815f0867..b75bcd548b 100644
--- a/ansible/roles/octavia/tasks/prepare.yml
+++ b/ansible/roles/octavia/tasks/prepare.yml
@@ -2,7 +2,8 @@
- name: Create amphora flavor
become: true
kolla_toolbox:
- module_name: os_nova_flavor
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.compute_flavor
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -25,7 +26,8 @@
- name: Create nova keypair for amphora
become: True
kolla_toolbox:
- module_name: os_keypair
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.keypair
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -40,7 +42,8 @@
- name: Get {{ octavia_service_auth_project }} project id
become: True
kolla_toolbox:
- module_name: os_project_info
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.project_info
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -54,7 +57,8 @@
- name: Create security groups for octavia
become: true
kolla_toolbox:
- module_name: os_security_group
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.security_group
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -73,7 +77,8 @@
- name: Add rules for security groups
become: true
kolla_toolbox:
- module_name: os_security_group_rule
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.security_group_rule
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -83,6 +88,7 @@
protocol: "{{ item.1.protocol }}"
port_range_min: "{{ item.1.src_port | default(omit) }}"
port_range_max: "{{ item.1.dst_port | default(omit) }}"
+ ethertype: "IPv{{ octavia_network_address_family[-1] }}"
with_subelements:
- "{{ octavia_amp_security_groups }}"
- rules
@@ -93,7 +99,8 @@
- name: Create loadbalancer management network
become: true
kolla_toolbox:
- module_name: os_network
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.network
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -114,7 +121,8 @@
- name: Create loadbalancer management subnet
become: true
kolla_toolbox:
- module_name: os_subnet
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.subnet
module_args:
auth: "{{ octavia_user_auth }}"
cacert: "{{ openstack_cacert }}"
@@ -134,3 +142,20 @@
ipv6_ra_mode: "{{ octavia_amp_network['subnet']['ipv6_ra_mode'] | default(omit) }}"
run_once: True
delegate_to: "{{ groups['octavia-api'][0] }}"
+
+- name: Create loadbalancer management router for IPv6
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.router
+ module_args:
+ auth: "{{ octavia_user_auth }}"
+ cacert: "{{ openstack_cacert }}"
+ endpoint_type: "{{ openstack_interface }}"
+ region_name: "{{ openstack_region_name }}"
+ state: present
+ name: "{{ octavia_amp_router['name'] }}"
+ interfaces: "{{ octavia_amp_router['subnet'] }}"
+ run_once: True
+ when: octavia_network_address_family == "ipv6"
+ delegate_to: "{{ groups['octavia-api'][0] }}"
diff --git a/ansible/roles/octavia/tasks/register.yml b/ansible/roles/octavia/tasks/register.yml
index 079a59a022..7bf995e765 100644
--- a/ansible/roles/octavia/tasks/register.yml
+++ b/ansible/roles/octavia/tasks/register.yml
@@ -9,7 +9,8 @@
- name: Adding octavia related roles
become: true
kolla_toolbox:
- module_name: "os_keystone_role"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.identity_role
module_args:
name: "{{ item }}"
auth: "{{ openstack_octavia_auth }}"
diff --git a/ansible/roles/octavia/tasks/upgrade.yml b/ansible/roles/octavia/tasks/upgrade.yml
index becb086045..6ba9f99799 100644
--- a/ansible/roles/octavia/tasks/upgrade.yml
+++ b/ansible/roles/octavia/tasks/upgrade.yml
@@ -1,7 +1,4 @@
---
-- include_tasks: get_resources_info.yml
- when: octavia_auto_configure | bool
-
- import_tasks: config.yml
- import_tasks: check-containers.yml
diff --git a/ansible/roles/octavia/templates/octavia-api.json.j2 b/ansible/roles/octavia/templates/octavia-api.json.j2
index cb470987e9..92fd67b227 100644
--- a/ansible/roles/octavia/templates/octavia-api.json.j2
+++ b/ansible/roles/octavia/templates/octavia-api.json.j2
@@ -32,8 +32,14 @@
"dest": "/etc/octavia/certs/octavia-key.pem",
"owner": "octavia",
"perm": "0600"
- }
- {% endif %}],
+ }{% endif %}{% if octavia_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
"permissions": [
{
"path": "/var/log/kolla/octavia",
diff --git a/ansible/roles/octavia/templates/octavia-driver-agent.json.j2 b/ansible/roles/octavia/templates/octavia-driver-agent.json.j2
index cde7b33607..7f024d9f65 100644
--- a/ansible/roles/octavia/templates/octavia-driver-agent.json.j2
+++ b/ansible/roles/octavia/templates/octavia-driver-agent.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/octavia/{{ octavia_policy_file }}",
"owner": "octavia",
"perm": "0600"
+ }{% endif %}{% if octavia_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/octavia/templates/octavia-health-manager.json.j2 b/ansible/roles/octavia/templates/octavia-health-manager.json.j2
index 9c4696e7d0..59d4c3784b 100644
--- a/ansible/roles/octavia/templates/octavia-health-manager.json.j2
+++ b/ansible/roles/octavia/templates/octavia-health-manager.json.j2
@@ -30,6 +30,12 @@
"dest": "/etc/octavia/certs/server_ca.key.pem",
"owner": "octavia",
"perm": "0600"
+ }{% endif %}{% if octavia_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
]
}
diff --git a/ansible/roles/octavia/templates/octavia-housekeeping.json.j2 b/ansible/roles/octavia/templates/octavia-housekeeping.json.j2
index 6631bf8466..290d128bfc 100644
--- a/ansible/roles/octavia/templates/octavia-housekeeping.json.j2
+++ b/ansible/roles/octavia/templates/octavia-housekeeping.json.j2
@@ -30,6 +30,12 @@
"dest": "/etc/octavia/certs/server_ca.key.pem",
"owner": "octavia",
"perm": "0600"
+ }{% endif %}{% if octavia_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
]
}
diff --git a/ansible/roles/octavia/templates/octavia-interface.service.j2 b/ansible/roles/octavia/templates/octavia-interface.service.j2
index 7f04d9fb42..532cdc72e5 100644
--- a/ansible/roles/octavia/templates/octavia-interface.service.j2
+++ b/ansible/roles/octavia/templates/octavia-interface.service.j2
@@ -7,6 +7,10 @@ After=docker.service
Type=oneshot
User=root
Group=root
+Restart=on-failure
+{% if octavia_interface_wait_timeout is defined %}
+TimeoutStartSec={{ octavia_interface_wait_timeout }}
+{% endif %}
RemainAfterExit=true
ExecStartPre=/sbin/ip link set dev {{ octavia_network_interface }} address {{ port_info.port.mac_address }}
ExecStart=/sbin/dhclient -v {{ octavia_network_interface }} -cf /etc/dhcp/octavia-dhclient.conf
diff --git a/ansible/roles/octavia/templates/octavia-openrc.sh.j2 b/ansible/roles/octavia/templates/octavia-openrc.sh.j2
index f0f38e0c89..9256e80a3c 100644
--- a/ansible/roles/octavia/templates/octavia-openrc.sh.j2
+++ b/ansible/roles/octavia/templates/octavia-openrc.sh.j2
@@ -1,10 +1,10 @@
# Clear any old environment that may conflict.
for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done
-export OS_PROJECT_DOMAIN_NAME=Default
-export OS_USER_DOMAIN_NAME=Default
-export OS_PROJECT_NAME={{ octavia_service_auth_project }}
-export OS_USERNAME={{ octavia_keystone_user }}
-export OS_PASSWORD={{ octavia_keystone_password }}
-export OS_AUTH_URL={{ keystone_internal_url }}
-export OS_INTERFACE=internal
-export OS_ENDPOINT_TYPE=internalURL
+export OS_PROJECT_DOMAIN_NAME='Default'
+export OS_USER_DOMAIN_NAME='Default'
+export OS_PROJECT_NAME='{{ octavia_service_auth_project }}'
+export OS_USERNAME='{{ octavia_keystone_user }}'
+export OS_PASSWORD='{{ octavia_keystone_password }}'
+export OS_AUTH_URL='{{ keystone_internal_url }}'
+export OS_INTERFACE='internal'
+export OS_ENDPOINT_TYPE='internalURL'
diff --git a/ansible/roles/octavia/templates/octavia-worker.json.j2 b/ansible/roles/octavia/templates/octavia-worker.json.j2
index 9aa32872d5..7b1df642fe 100644
--- a/ansible/roles/octavia/templates/octavia-worker.json.j2
+++ b/ansible/roles/octavia/templates/octavia-worker.json.j2
@@ -30,6 +30,12 @@
"dest": "/etc/octavia/certs/server_ca.key.pem",
"owner": "octavia",
"perm": "0600"
+ }{% endif %}{% if octavia_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
]
}
diff --git a/ansible/roles/octavia/templates/octavia.conf.j2 b/ansible/roles/octavia/templates/octavia.conf.j2
index 5a23a0f563..ca59acd35f 100644
--- a/ansible/roles/octavia/templates/octavia.conf.j2
+++ b/ansible/roles/octavia/templates/octavia.conf.j2
@@ -53,7 +53,7 @@ project_name = {{ octavia_service_auth_project }}
project_domain_name = {{ default_project_domain_name }}
cafile = {{ openstack_cacert }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -70,7 +70,7 @@ password = {{ octavia_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -101,9 +101,9 @@ amp_secgroup_list = {{ octavia_amp_secgroup_list }}
amp_flavor_id = {{ octavia_amp_flavor_id }}
{% endif %}
{% else %}
-amp_image_owner_id = {{ project_info.openstack_projects.0.id }}
+amp_image_owner_id = {{ project_info.projects.0.id }}
amp_boot_network_list = {{ network_info.id }}
-amp_secgroup_list = {{ (sec_grp_info.results | selectattr('item.name', 'equalto', octavia_amp_security_groups['mgmt-sec-grp'].name) | list).0.secgroup.id }}
+amp_secgroup_list = {{ (sec_grp_info.results | selectattr('item.name', 'equalto', octavia_amp_security_groups['mgmt-sec-grp'].name) | list).0.security_group.id }}
amp_flavor_id = {{ amphora_flavor_info.id }}
{% endif %}
@@ -122,11 +122,18 @@ rpc_thread_pool_size = 2
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'octavia-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if octavia_policy_file is defined %}
[oslo_policy]
@@ -147,3 +154,12 @@ ca_certificates_file = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
endpoint_type = internal
ca_certificates_file = {{ openstack_cacert }}
+{% if enable_octavia_jobboard | bool %}
+
+[task_flow]
+persistence_connection = mysql+pymysql://{{ octavia_persistence_database_user }}:{{ octavia_persistence_database_password }}@{{ octavia_persistence_database_address }}/{{ octavia_persistence_database_name }}
+jobboard_enabled = true
+jobboard_backend_password = "{{ redis_master_password }}"
+jobboard_backend_port = "{{ redis_port }}"
+jobboard_backend_hosts = {% for host in groups['redis'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}{% if not loop.last %},{% endif %}{% endfor %}
+{% endif %}
diff --git a/ansible/roles/opensearch/defaults/main.yml b/ansible/roles/opensearch/defaults/main.yml
new file mode 100644
index 0000000000..5d419e0b49
--- /dev/null
+++ b/ansible/roles/opensearch/defaults/main.yml
@@ -0,0 +1,181 @@
+---
+opensearch_services:
+ opensearch:
+ container_name: opensearch
+ group: opensearch
+ enabled: true
+ image: "{{ opensearch_image_full }}"
+ environment:
+ OPENSEARCH_JAVA_OPTS: "{{ opensearch_java_opts }}"
+ volumes: "{{ opensearch_default_volumes + opensearch_extra_volumes }}"
+ dimensions: "{{ opensearch_dimensions }}"
+ healthcheck: "{{ opensearch_healthcheck }}"
+ haproxy:
+ opensearch:
+ enabled: "{{ enable_opensearch }}"
+ mode: "http"
+ external: false
+ port: "{{ opensearch_port }}"
+ frontend_http_extra:
+ - "option dontlog-normal"
+ opensearch-dashboards:
+ container_name: opensearch_dashboards
+ group: opensearch-dashboards
+ enabled: "{{ enable_opensearch_dashboards }}"
+ environment:
+ OPENSEARCH_DASHBOARDS_SECURITY_PLUGIN: "False"
+ image: "{{ opensearch_dashboards_image_full }}"
+ volumes: "{{ opensearch_dashboards_default_volumes + opensearch_dashboards_extra_volumes }}"
+ dimensions: "{{ opensearch_dashboards_dimensions }}"
+ healthcheck: "{{ opensearch_dashboards_healthcheck }}"
+ haproxy:
+ opensearch-dashboards:
+ enabled: "{{ enable_opensearch_dashboards }}"
+ mode: "http"
+ external: false
+ port: "{{ opensearch_dashboards_port }}"
+ auth_user: "{{ opensearch_dashboards_user }}"
+ auth_pass: "{{ opensearch_dashboards_password }}"
+ opensearch_dashboards_external:
+ enabled: "{{ enable_opensearch_dashboards_external | bool }}"
+ mode: "http"
+ external: true
+ external_fqdn: "{{ opensearch_dashboards_external_fqdn }}"
+ port: "{{ opensearch_dashboards_port_external }}"
+ listen_port: "{{ opensearch_dashboards_listen_port }}"
+ auth_user: "{{ opensearch_dashboards_user }}"
+ auth_pass: "{{ opensearch_dashboards_password }}"
+
+
+####################
+# Opensearch
+####################
+
+# Register Opensearch internal endpoint in the Keystone service catalogue
+opensearch_enable_keystone_registration: False
+
+opensearch_cluster_name: "kolla_logging"
+opensearch_heap_size: "1g"
+opensearch_java_opts: "{% if opensearch_heap_size %}-Xms{{ opensearch_heap_size }} -Xmx{{ opensearch_heap_size }}{% endif %} -Dlog4j2.formatMsgNoLookups=true"
+
+opensearch_apply_log_retention_policy: true
+
+# Duration after which an index is staged for deletion. This is implemented
+# by closing the index. Whilst in this state the index contributes negligible
+# load on the cluster and may be manually re-opened if required.
+# NOTE: We carry over legacy settings from ElasticSearch Curator if they
+# are set. This may be removed in a later release.
+opensearch_soft_retention_period_days: "{{ elasticsearch_curator_soft_retention_period_days | default(30) }}"
+
+# Duration after which an index is permanently erased from the cluster.
+opensearch_hard_retention_period_days: "{{ elasticsearch_curator_hard_retention_period_days | default(60) }}"
+
+opensearch_retention_policy: |
+ policy:
+ description: Retention policy for OpenStack logs
+ error_notification:
+ default_state: open
+ states:
+ - name: open
+ actions: []
+ transitions:
+ - state_name: close
+ conditions:
+ min_index_age: "{{ opensearch_soft_retention_period_days }}d"
+ - name: close
+ actions:
+ - retry:
+ count: 3
+ backoff: exponential
+ delay: 1m
+ close: {}
+ transitions:
+ - state_name: delete
+ conditions:
+ min_index_age: "{{ opensearch_hard_retention_period_days }}d"
+ - name: delete
+ actions:
+ - retry:
+ count: 3
+ backoff: exponential
+ delay: 1m
+ delete: {}
+ transitions: []
+ ism_template:
+ - index_patterns:
+ - "{{ opensearch_log_index_prefix }}-*"
+ priority: 1
+
+####################
+# Keystone
+####################
+opensearch_openstack_auth: "{{ openstack_auth }}"
+
+opensearch_ks_services:
+ - name: "opensearch"
+ type: "log-storage"
+ description: "Opensearch"
+ endpoints:
+ - {'interface': 'internal', 'url': '{{ opensearch_internal_endpoint }}'}
+
+#######################
+# OpenSearch Dashboards
+#######################
+opensearch_dashboards_default_app_id: "discover"
+opensearch_dashboards_opensearch_request_timeout: 300000
+opensearch_dashboards_opensearch_shard_timeout: 0
+opensearch_dashboards_opensearch_ssl_verify: true
+
+####################
+# Docker
+####################
+opensearch_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}opensearch"
+opensearch_tag: "{{ openstack_tag }}"
+opensearch_image_full: "{{ opensearch_image }}:{{ opensearch_tag }}"
+
+opensearch_dashboards_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}opensearch-dashboards"
+opensearch_dashboards_tag: "{{ opensearch_tag }}"
+opensearch_dashboards_image_full: "{{ opensearch_dashboards_image }}:{{ opensearch_dashboards_tag }}"
+
+opensearch_dimensions: "{{ default_container_dimensions }}"
+opensearch_dashboards_dimensions: "{{ default_container_dimensions }}"
+
+opensearch_enable_healthchecks: "{{ enable_container_healthchecks }}"
+opensearch_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+opensearch_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+opensearch_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+opensearch_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }}"]
+opensearch_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+opensearch_healthcheck:
+ interval: "{{ opensearch_healthcheck_interval }}"
+ retries: "{{ opensearch_healthcheck_retries }}"
+ start_period: "{{ opensearch_healthcheck_start_period }}"
+ test: "{% if opensearch_enable_healthchecks | bool %}{{ opensearch_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ opensearch_healthcheck_timeout }}"
+opensearch_dashboards_enable_healthchecks: "{{ enable_container_healthchecks }}"
+opensearch_dashboards_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+opensearch_dashboards_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+opensearch_dashboards_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+opensearch_dashboards_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_dashboards_port }}"]
+opensearch_dashboards_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+opensearch_dashboards_healthcheck:
+ interval: "{{ opensearch_dashboards_healthcheck_interval }}"
+ retries: "{{ opensearch_dashboards_healthcheck_retries }}"
+ start_period: "{{ opensearch_dashboards_healthcheck_start_period }}"
+ test: "{% if opensearch_dashboards_enable_healthchecks | bool %}{{ opensearch_dashboards_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ opensearch_dashboards_healthcheck_timeout }}"
+
+opensearch_default_volumes:
+ - "{{ node_config_directory }}/opensearch/:{{ container_config_directory }}/"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "{{ opensearch_datadir_volume }}:/var/lib/opensearch/data"
+ - "kolla_logs:/var/log/kolla/"
+opensearch_dashboards_default_volumes:
+ - "{{ node_config_directory }}/opensearch-dashboards/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "kolla_logs:/var/log/kolla/"
+
+opensearch_extra_volumes: "{{ default_extra_volumes }}"
+opensearch_dashboards_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/opensearch/handlers/main.yml b/ansible/roles/opensearch/handlers/main.yml
new file mode 100644
index 0000000000..9791d1b82e
--- /dev/null
+++ b/ansible/roles/opensearch/handlers/main.yml
@@ -0,0 +1,73 @@
+---
+- name: Disable shard allocation
+ become: true
+ vars:
+ opensearch_shard_body: {"transient": {"cluster.routing.allocation.enable": "none"}}
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ opensearch_internal_endpoint }}/_cluster/settings"
+ method: PUT
+ status_code: 200
+ return_content: yes
+ body: "{{ opensearch_shard_body | to_json }}" # noqa jinja[invalid]
+ body_format: json
+ ca_path: "{{ openstack_cacert }}"
+ delegate_to: "{{ groups['opensearch'][0] }}"
+ run_once: true
+ listen: "Restart opensearch container"
+ when:
+ - kolla_action == "upgrade"
+
+- name: Perform a flush
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ opensearch_internal_endpoint }}/_flush"
+ method: POST
+ status_code: 200
+ return_content: yes
+ body_format: json
+ ca_path: "{{ openstack_cacert }}"
+ delegate_to: "{{ groups['opensearch'][0] }}"
+ run_once: true
+ retries: 10
+ delay: 5
+ register: result
+ until: ('status' in result) and result.status == 200
+ listen: "Restart opensearch container"
+ when:
+ - kolla_action == "upgrade"
+
+- name: Restart opensearch container
+ vars:
+ service_name: "opensearch"
+ service: "{{ opensearch_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ environment: "{{ service.environment }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+
+- name: Restart opensearch-dashboards container
+ vars:
+ service_name: "opensearch-dashboards"
+ service: "{{ opensearch_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ dimensions: "{{ service.dimensions }}"
+ environment: "{{ service.environment | default(omit) }}"
+ image: "{{ service.image }}"
+ name: "{{ service.container_name }}"
+ volumes: "{{ service.volumes }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
diff --git a/ansible/roles/opensearch/tasks/check-containers.yml b/ansible/roles/opensearch/tasks/check-containers.yml
new file mode 100644
index 0000000000..b7e2f7c29f
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/check-containers.yml
@@ -0,0 +1,3 @@
+---
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/opensearch/tasks/check.yml b/ansible/roles/opensearch/tasks/check.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/check.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/elasticsearch/tasks/config-host.yml b/ansible/roles/opensearch/tasks/config-host.yml
similarity index 90%
rename from ansible/roles/elasticsearch/tasks/config-host.yml
rename to ansible/roles/opensearch/tasks/config-host.yml
index cf9a8f9969..8e092b487b 100644
--- a/ansible/roles/elasticsearch/tasks/config-host.yml
+++ b/ansible/roles/opensearch/tasks/config-host.yml
@@ -14,4 +14,4 @@
when:
- set_sysctl | bool
- item.value != 'KOLLA_SKIP'
- - inventory_hostname in groups['elasticsearch']
+ - inventory_hostname in groups['opensearch']
diff --git a/ansible/roles/opensearch/tasks/config.yml b/ansible/roles/opensearch/tasks/config.yml
new file mode 100644
index 0000000000..719377724c
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/config.yml
@@ -0,0 +1,49 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ with_dict: "{{ opensearch_services | select_services_enabled_and_mapped_to_host }}"
+
+- include_tasks: copy-certs.yml
+ when:
+ - kolla_copy_ca_into_containers | bool
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ with_dict: "{{ opensearch_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over opensearch service config file
+ vars:
+ service: "{{ opensearch_services['opensearch'] }}"
+ merge_yaml:
+ sources:
+ - "{{ role_path }}/templates/opensearch.yml.j2"
+ - "{{ node_custom_config }}/opensearch.yml"
+ - "{{ node_custom_config }}/opensearch/opensearch.yml"
+ - "{{ node_custom_config }}/opensearch/{{ inventory_hostname }}/opensearch.yml"
+ dest: "{{ node_config_directory }}/opensearch/opensearch.yml"
+ mode: "0660"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
+
+- name: Copying over opensearch-dashboards config file
+ vars:
+ service: "{{ opensearch_services['opensearch-dashboards'] }}"
+ merge_yaml:
+ sources:
+ - "{{ role_path }}/templates/opensearch_dashboards.yml.j2"
+ - "{{ node_custom_config }}/opensearch/opensearch_dashboards.yml"
+ - "{{ node_custom_config }}/opensearch/{{ inventory_hostname }}/opensearch_dashboards.yml"
+ dest: "{{ node_config_directory }}/opensearch-dashboards/opensearch_dashboards.yml"
+ mode: "0660"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/opensearch/tasks/config_validate.yml b/ansible/roles/opensearch/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/opensearch/tasks/copy-certs.yml b/ansible/roles/opensearch/tasks/copy-certs.yml
new file mode 100644
index 0000000000..554ac38618
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/copy-certs.yml
@@ -0,0 +1,6 @@
+---
+- name: "Copy certificates and keys for {{ project_name }}"
+ import_role:
+ role: service-cert-copy
+ vars:
+ project_services: "{{ opensearch_services }}"
diff --git a/ansible/roles/freezer/tasks/deploy-containers.yml b/ansible/roles/opensearch/tasks/deploy-containers.yml
similarity index 100%
rename from ansible/roles/freezer/tasks/deploy-containers.yml
rename to ansible/roles/opensearch/tasks/deploy-containers.yml
diff --git a/ansible/roles/opensearch/tasks/deploy.yml b/ansible/roles/opensearch/tasks/deploy.yml
new file mode 100644
index 0000000000..a0ebfaf7d7
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/deploy.yml
@@ -0,0 +1,15 @@
+---
+- import_tasks: config-host.yml
+
+- import_tasks: config.yml
+
+- import_tasks: check-containers.yml
+
+- include_tasks: register.yml
+ when: opensearch_enable_keystone_registration | bool
+
+- name: Flush handlers
+ meta: flush_handlers
+
+- include_tasks: post-config.yml
+ when: opensearch_apply_log_retention_policy | bool
diff --git a/ansible/roles/opensearch/tasks/loadbalancer.yml b/ansible/roles/opensearch/tasks/loadbalancer.yml
new file mode 100644
index 0000000000..33033984c8
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/loadbalancer.yml
@@ -0,0 +1,7 @@
+---
+- name: "Configure loadbalancer for {{ project_name }}"
+ import_role:
+ name: loadbalancer-config
+ vars:
+ project_services: "{{ opensearch_services }}"
+ tags: always
diff --git a/ansible/roles/freezer/tasks/main.yml b/ansible/roles/opensearch/tasks/main.yml
similarity index 100%
rename from ansible/roles/freezer/tasks/main.yml
rename to ansible/roles/opensearch/tasks/main.yml
diff --git a/ansible/roles/opensearch/tasks/post-config.yml b/ansible/roles/opensearch/tasks/post-config.yml
new file mode 100644
index 0000000000..5fd2f590ba
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/post-config.yml
@@ -0,0 +1,69 @@
+---
+- name: Wait for OpenSearch to become ready
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ opensearch_internal_endpoint }}/_cluster/stats"
+ status_code: 200
+ ca_path: "{{ openstack_cacert }}"
+ register: result
+ until: result.get('status') == 200
+ retries: 30
+ delay: 2
+ run_once: true
+
+- name: Check if a log retention policy exists
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ opensearch_internal_endpoint }}/_plugins/_ism/policies/retention"
+ method: GET
+ status_code: 200, 404
+ return_content: yes
+ ca_path: "{{ openstack_cacert }}"
+ register: opensearch_retention_policy_check
+ delegate_to: "{{ groups['opensearch'][0] }}"
+ run_once: true
+
+- name: Create new log retention policy
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ opensearch_internal_endpoint }}/_plugins/_ism/policies/retention"
+ method: PUT
+ status_code: 201
+ return_content: yes
+ body: "{{ opensearch_retention_policy | from_yaml | to_json }}"
+ body_format: json
+ ca_path: "{{ openstack_cacert }}"
+ register: opensearch_retention_policy_create
+ delegate_to: "{{ groups['opensearch'][0] }}"
+ run_once: true
+ changed_when: opensearch_retention_policy_create.status == 201
+ when: opensearch_retention_policy_check.status == 404
+
+- name: Apply retention policy to existing indices
+ become: true
+ vars:
+ opensearch_set_policy_body: {"policy_id": "retention"}
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: uri
+ module_args:
+ url: "{{ opensearch_internal_endpoint }}/_plugins/_ism/add/{{ opensearch_log_index_prefix }}-*"
+ method: POST
+ status_code: 200
+ return_content: yes
+ body: "{{ opensearch_set_policy_body | to_json }}"
+ body_format: json
+ ca_path: "{{ openstack_cacert }}"
+ delegate_to: "{{ groups['opensearch'][0] }}"
+ run_once: true
+ changed_when: opensearch_retention_policy_create.status == 201
+ when: opensearch_retention_policy_check.status == 404
diff --git a/ansible/roles/opensearch/tasks/precheck.yml b/ansible/roles/opensearch/tasks/precheck.yml
new file mode 100644
index 0000000000..47e2dff163
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/precheck.yml
@@ -0,0 +1,27 @@
+---
+- import_role:
+ name: service-precheck
+ vars:
+ service_precheck_services: "{{ opensearch_services }}"
+ service_name: "{{ project_name }}"
+
+- name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - opensearch
+ check_mode: false
+ register: container_facts
+
+- name: Checking free port for Opensearch
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ opensearch_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['opensearch'] is not defined
+ - inventory_hostname in groups['opensearch']
diff --git a/ansible/roles/freezer/tasks/pull.yml b/ansible/roles/opensearch/tasks/pull.yml
similarity index 100%
rename from ansible/roles/freezer/tasks/pull.yml
rename to ansible/roles/opensearch/tasks/pull.yml
diff --git a/ansible/roles/freezer/tasks/reconfigure.yml b/ansible/roles/opensearch/tasks/reconfigure.yml
similarity index 100%
rename from ansible/roles/freezer/tasks/reconfigure.yml
rename to ansible/roles/opensearch/tasks/reconfigure.yml
diff --git a/ansible/roles/opensearch/tasks/register.yml b/ansible/roles/opensearch/tasks/register.yml
new file mode 100644
index 0000000000..1fcb8be39b
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/register.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ opensearch_openstack_auth }}"
+ service_ks_register_services: "{{ opensearch_ks_services }}"
+ tags: always
diff --git a/ansible/roles/opensearch/tasks/stop.yml b/ansible/roles/opensearch/tasks/stop.yml
new file mode 100644
index 0000000000..8a9d328909
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/stop.yml
@@ -0,0 +1,6 @@
+---
+- import_role:
+ name: service-stop
+ vars:
+ project_services: "{{ opensearch_services }}"
+ service_name: "{{ project_name }}"
diff --git a/ansible/roles/opensearch/tasks/upgrade.yml b/ansible/roles/opensearch/tasks/upgrade.yml
new file mode 100644
index 0000000000..cb376892a9
--- /dev/null
+++ b/ansible/roles/opensearch/tasks/upgrade.yml
@@ -0,0 +1,16 @@
+---
+- import_tasks: config-host.yml
+
+- import_tasks: config.yml
+
+- import_tasks: check-containers.yml
+
+- include_tasks: register.yml
+ when:
+ - opensearch_enable_keystone_registration | bool
+
+- name: Flush handlers
+ meta: flush_handlers
+
+- include_tasks: post-config.yml
+ when: opensearch_apply_log_retention_policy | bool
diff --git a/ansible/roles/opensearch/templates/opensearch-dashboards.json.j2 b/ansible/roles/opensearch/templates/opensearch-dashboards.json.j2
new file mode 100644
index 0000000000..6cb96cb4a1
--- /dev/null
+++ b/ansible/roles/opensearch/templates/opensearch-dashboards.json.j2
@@ -0,0 +1,34 @@
+{
+ "command": "/usr/share/opensearch-dashboards/bin/opensearch-dashboards --config /etc/opensearch-dashboards/opensearch_dashboards.yml",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/opensearch_dashboards.yml",
+ "dest": "/etc/opensearch-dashboards/opensearch_dashboards.yml",
+ "owner": "opensearch-dashboards",
+ "perm": "0640"
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/opensearch-dashboards",
+ "owner": "opensearch-dashboards:opensearch-dashboards",
+ "recurse": true
+ },
+ {
+ "path": "/usr/share/opensearch-dashboards/optimize/bundles",
+ "owner": "opensearch-dashboards:opensearch-dashboards",
+ "recurse": true
+ },
+ {
+ "path": "/var/lib/opensearch-dashboards",
+ "owner": "opensearch-dashboards:opensearch-dashboards",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/opensearch/templates/opensearch.json.j2 b/ansible/roles/opensearch/templates/opensearch.json.j2
new file mode 100644
index 0000000000..25bb7b7703
--- /dev/null
+++ b/ansible/roles/opensearch/templates/opensearch.json.j2
@@ -0,0 +1,29 @@
+{
+ "command": "/usr/share/opensearch/bin/opensearch",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/opensearch.yml",
+ "dest": "/etc/opensearch/opensearch.yml",
+ "owner": "opensearch",
+ "perm": "0600"
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path": "/var/lib/opensearch",
+ "owner": "opensearch:opensearch",
+ "recurse": true
+ },
+ {
+ "path": "/var/log/kolla/opensearch",
+ "owner": "opensearch:opensearch",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/opensearch/templates/opensearch.yml.j2 b/ansible/roles/opensearch/templates/opensearch.yml.j2
new file mode 100644
index 0000000000..ed4c632190
--- /dev/null
+++ b/ansible/roles/opensearch/templates/opensearch.yml.j2
@@ -0,0 +1,21 @@
+{% set num_nodes = groups['opensearch'] | length %}
+{% set recover_after_nodes = (num_nodes * 2 / 3) | round(0, 'floor') | int if num_nodes > 1 else 1 %}
+plugins.security.disabled: "true"
+
+node.name: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
+network.host: "{{ 'api' | kolla_address | put_address_in_context('url') }}"
+
+cluster.name: "{{ opensearch_cluster_name }}"
+cluster.initial_master_nodes: [{% for host in groups['opensearch'] %}"{{ 'api' | kolla_address(host) }}"{% if not loop.last %},{% endif %}{% endfor %}]
+node.master: true
+node.data: true
+discovery.seed_hosts: [{% for host in groups['opensearch'] %}"{{ 'api' | kolla_address(host) | put_address_in_context('url') }}"{% if not loop.last %},{% endif %}{% endfor %}]
+
+http.port: {{ opensearch_port }}
+gateway.expected_nodes: {{ num_nodes }}
+gateway.recover_after_time: "5m"
+gateway.recover_after_nodes: {{ recover_after_nodes }}
+path.data: "/var/lib/opensearch/data"
+path.logs: "/var/log/kolla/opensearch"
+indices.fielddata.cache.size: 40%
+action.auto_create_index: "true"
diff --git a/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2 b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2
new file mode 100644
index 0000000000..ca2f04886e
--- /dev/null
+++ b/ansible/roles/opensearch/templates/opensearch_dashboards.yml.j2
@@ -0,0 +1,12 @@
+opensearchDashboards.defaultAppId: "{{ opensearch_dashboards_default_app_id }}"
+logging.dest: /var/log/kolla/opensearch-dashboards/opensearch-dashboards.log
+server.port: {{ opensearch_dashboards_port }}
+server.host: "{{ api_interface_address }}"
+opensearch.hosts: "{{ opensearch_internal_endpoint }}"
+opensearch.requestTimeout: {{ opensearch_dashboards_opensearch_request_timeout }}
+opensearch.shardTimeout: {{ opensearch_dashboards_opensearch_shard_timeout }}
+opensearch.ssl.verificationMode: "{{ 'full' if opensearch_dashboards_opensearch_ssl_verify | bool else 'none' }}"
+data.search.usageTelemetry.enabled: false
+{% if openstack_cacert | length > 0 %}
+opensearch.ssl.certificateAuthorities: {{ openstack_cacert }}
+{% endif %}
diff --git a/ansible/roles/opensearch/vars/main.yml b/ansible/roles/opensearch/vars/main.yml
new file mode 100644
index 0000000000..b3253f7773
--- /dev/null
+++ b/ansible/roles/opensearch/vars/main.yml
@@ -0,0 +1,2 @@
+---
+project_name: "opensearch"
diff --git a/ansible/roles/openvswitch/defaults/main.yml b/ansible/roles/openvswitch/defaults/main.yml
index ba765f0499..997081b3e1 100644
--- a/ansible/roles/openvswitch/defaults/main.yml
+++ b/ansible/roles/openvswitch/defaults/main.yml
@@ -39,11 +39,11 @@ openvswitch_services:
####################
openvswitch_tag: "{{ openstack_tag }}"
-openvswitch_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/openvswitch-db-server"
+openvswitch_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}openvswitch-db-server"
openvswitch_db_tag: "{{ openvswitch_tag }}"
openvswitch_db_image_full: "{{ openvswitch_db_image }}:{{ openvswitch_db_tag }}"
-openvswitch_vswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/openvswitch-vswitchd"
+openvswitch_vswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}openvswitch-vswitchd"
openvswitch_vswitchd_tag: "{{ openvswitch_tag }}"
openvswitch_vswitchd_image_full: "{{ openvswitch_vswitchd_image }}:{{ openvswitch_vswitchd_tag }}"
@@ -96,9 +96,12 @@ openvswitch_extra_volumes: "{{ default_extra_volumes }}"
openvswitch_db_extra_volumes: "{{ openvswitch_extra_volumes }}"
openvswitch_vswitchd_extra_volumes: "{{ openvswitch_extra_volumes }}"
+openvswitch_ovs_vsctl_wrapper_enabled: false
+
#############
# OpenvSwitch
#############
openvswitch_system_id: "{{ ansible_facts.hostname }}"
+openvswitch_hostname: "{{ ansible_facts.fqdn }}"
openvswitch_hw_offload: "no"
diff --git a/ansible/roles/openvswitch/handlers/main.yml b/ansible/roles/openvswitch/handlers/main.yml
index a97ea7d59d..7cad1a7d4b 100644
--- a/ansible/roles/openvswitch/handlers/main.yml
+++ b/ansible/roles/openvswitch/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "openvswitch-db-server"
service: "{{ openvswitch_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,8 +12,6 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck }}"
- when:
- - kolla_action != "config"
notify:
- Waiting for openvswitch_db service to be ready
@@ -31,7 +29,7 @@
service_name: "openvswitch-vswitchd"
service: "{{ openvswitch_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -40,5 +38,3 @@
privileged: "{{ service.privileged | default(False) }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/openvswitch/tasks/check-containers.yml b/ansible/roles/openvswitch/tasks/check-containers.yml
index 81f05f012e..b7e2f7c29f 100644
--- a/ansible/roles/openvswitch/tasks/check-containers.yml
+++ b/ansible/roles/openvswitch/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check openvswitch containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck }}"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ openvswitch_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/openvswitch/tasks/config-host.yml b/ansible/roles/openvswitch/tasks/config-host.yml
index 18dae6a073..dfd4f8cff4 100644
--- a/ansible/roles/openvswitch/tasks/config-host.yml
+++ b/ansible/roles/openvswitch/tasks/config-host.yml
@@ -5,3 +5,16 @@
vars:
modules:
- {'name': openvswitch}
+
+# NOTE(m.hiner): Podman considers non-existent mount directory
+# as a error, so it has to be created beforehand.
+# See: https://github.com/containers/podman/issues/14781
+- name: Create /run/openvswitch directory on host
+ become: True
+ file:
+ path: /run/openvswitch
+ state: directory
+ mode: "0770"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ when: kolla_container_engine == 'podman'
diff --git a/ansible/roles/openvswitch/tasks/config.yml b/ansible/roles/openvswitch/tasks/config.yml
index 4089234fe5..43ec133f0f 100644
--- a/ansible/roles/openvswitch/tasks/config.yml
+++ b/ansible/roles/openvswitch/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ openvswitch_services }}"
+ with_dict: "{{ openvswitch_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
become: true
@@ -18,37 +15,16 @@
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ openvswitch_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ openvswitch_services | select_services_enabled_and_mapped_to_host }}"
-- name: Copying over start-ovs file for openvswitch-vswitchd
- become: true
+- name: Copying over ovs-vsctl wrapper
vars:
service: "{{ openvswitch_services['openvswitch-vswitchd'] }}"
template:
- src: "{{ role_path }}/templates/start-ovs.j2"
- dest: "{{ node_config_directory }}/openvswitch-vswitchd/start-ovs"
- mode: "0770"
- when:
- - service.host_in_groups | bool
- - service.enabled | bool
- notify:
- - "Restart openvswitch-vswitchd container"
-
-- name: Copying over start-ovsdb-server files for openvswitch-db-server
+ src: "ovs-vsctl.j2"
+ dest: "/usr/bin/ovs-vsctl"
+ mode: "0755"
become: true
- vars:
- service: "{{ openvswitch_services['openvswitch-db-server'] }}"
- template:
- src: "{{ role_path }}/templates/start-ovsdb-server.j2"
- dest: "{{ node_config_directory }}/openvswitch-db-server/start-ovsdb-server"
- mode: "0770"
when:
- - service.host_in_groups | bool
- - service.enabled | bool
- notify:
- - "Restart openvswitch-db-server container"
+ - service | service_enabled_and_mapped_to_host
+ - openvswitch_ovs_vsctl_wrapper_enabled | bool
diff --git a/ansible/roles/openvswitch/tasks/config_validate.yml b/ansible/roles/openvswitch/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/openvswitch/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/openvswitch/tasks/post-config.yml b/ansible/roles/openvswitch/tasks/post-config.yml
index 7d71387e3b..274183f7b1 100644
--- a/ansible/roles/openvswitch/tasks/post-config.yml
+++ b/ansible/roles/openvswitch/tasks/post-config.yml
@@ -1,8 +1,9 @@
---
# NOTE(mnasiadka): external_ids:system-id uniquely identifies a physical system, used by OVN and other controllers
-- name: Set system-id and hw-offload
+- name: Set system-id, hostname and hw-offload
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
user: root
module_name: openvswitch_db
module_args:
@@ -14,16 +15,39 @@
state: "{{ item.state | default('present') }}"
loop:
- { col: "external_ids", name: "system-id", value: "{{ openvswitch_system_id }}" }
+ - { col: "external_ids", name: "hostname", value: "{{ openvswitch_hostname }}" }
- { col: "other_config", name: "hw-offload", value: true, state: "{{ 'present' if openvswitch_hw_offload | bool else 'absent' }}" }
+ when:
+ - openvswitch_services['openvswitch-vswitchd'].host_in_groups | bool
+ notify:
+ - "Restart openvswitch-vswitchd container"
- name: Ensuring OVS bridge is properly setup
become: true
- command: "{{ kolla_container_engine }} exec openvswitch_db /usr/local/bin/kolla_ensure_openvswitch_configured {{ item.0 }} {{ item.1 }}"
- register: status
- changed_when: status.stdout.find('changed') != -1
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ user: root
+ module_name: openvswitch_bridge
+ module_args:
+ bridge: "{{ item }}"
+ fail_mode: standalone
+ loop: "{{ neutron_bridge_name.split(',') }}"
when:
- inventory_hostname in groups["network"]
or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool )
+
+- name: Ensuring OVS ports are properly setup
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ user: root
+ module_name: openvswitch_port
+ module_args:
+ bridge: "{{ item.0 }}"
+ port: "{{ item.1 }}"
with_together:
- "{{ neutron_bridge_name.split(',') }}"
- "{{ neutron_external_interface.split(',') }}"
+ when:
+ - inventory_hostname in groups["network"]
+ or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool )
diff --git a/ansible/roles/openvswitch/tasks/precheck.yml b/ansible/roles/openvswitch/tasks/precheck.yml
index ab87dd981b..f2b152e970 100644
--- a/ansible/roles/openvswitch/tasks/precheck.yml
+++ b/ansible/roles/openvswitch/tasks/precheck.yml
@@ -8,13 +8,16 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- openvswitch_db
+ check_mode: false
register: container_facts
- name: Checking free port for OVSDB
vars:
- openvswitch_db: "{{ openvswitch_services['openvswitch-db-server'] }}"
+ service: "{{ openvswitch_services['openvswitch-db-server'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ ovsdb_port }}"
@@ -23,5 +26,4 @@
state: stopped
when:
- container_facts['openvswitch_db'] is not defined
- - inventory_hostname in groups[openvswitch_db.group]
- - openvswitch_db.enabled | bool
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2 b/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2
index 7e113fe6b3..a683979a47 100644
--- a/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2
+++ b/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2
@@ -1,11 +1,4 @@
{
- "command": "start-ovsdb-server 127.0.0.1",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/start-ovsdb-server",
- "dest": "/usr/local/bin/start-ovsdb-server",
- "owner": "root",
- "perm": "0655"
- }
- ]
+ "command": "ovs_wrapper ovsdb-server /var/lib/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --remote=ptcp:{{ ovsdb_port }}:127.0.0.1 --remote=db:Open_vSwitch,Open_vSwitch,manager_options --log-file=/var/log/kolla/openvswitch/ovsdb-server.log --pidfile",
+ "config_files": []
}
diff --git a/ansible/roles/openvswitch/templates/openvswitch-vswitchd.json.j2 b/ansible/roles/openvswitch/templates/openvswitch-vswitchd.json.j2
index 0c75c355f6..1d5f221c0d 100644
--- a/ansible/roles/openvswitch/templates/openvswitch-vswitchd.json.j2
+++ b/ansible/roles/openvswitch/templates/openvswitch-vswitchd.json.j2
@@ -1,11 +1,4 @@
{
- "command": "start-ovs",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/start-ovs",
- "dest": "/usr/local/bin/start-ovs",
- "owner": "root",
- "perm": "0655"
- }
- ]
+ "command": "ovs_wrapper ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log --pidfile",
+ "config_files": []
}
diff --git a/ansible/roles/openvswitch/templates/ovs-vsctl.j2 b/ansible/roles/openvswitch/templates/ovs-vsctl.j2
new file mode 100644
index 0000000000..d68a51360b
--- /dev/null
+++ b/ansible/roles/openvswitch/templates/ovs-vsctl.j2
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+exec {{ kolla_container_engine }} exec openvswitch_vswitchd ovs-vsctl "$@"
diff --git a/ansible/roles/openvswitch/templates/start-ovs.j2 b/ansible/roles/openvswitch/templates/start-ovs.j2
deleted file mode 100644
index 4064ed4130..0000000000
--- a/ansible/roles/openvswitch/templates/start-ovs.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/usr/bin/env bash
-/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log --pidfile
diff --git a/ansible/roles/openvswitch/templates/start-ovsdb-server.j2 b/ansible/roles/openvswitch/templates/start-ovsdb-server.j2
deleted file mode 100644
index b76dc40e2b..0000000000
--- a/ansible/roles/openvswitch/templates/start-ovsdb-server.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# NOTE(mnasiadka): ensure existing ovsdb doesn't need to be upgraded
-
-if ([ -f /var/lib/openvswitch/conf.db ] && [ `ovsdb-tool needs-conversion /var/lib/openvswitch/conf.db` == "yes" ]); then
- /usr/bin/ovsdb-tool convert /var/lib/openvswitch/conf.db
-fi
-
-ovsdb_ip=$1
-
-/usr/sbin/ovsdb-server /var/lib/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --remote=ptcp:{{ ovsdb_port }}:$ovsdb_ip --remote=db:Open_vSwitch,Open_vSwitch,manager_options --log-file=/var/log/kolla/openvswitch/ovsdb-server.log --pidfile
diff --git a/ansible/roles/ovn-controller/defaults/main.yml b/ansible/roles/ovn-controller/defaults/main.yml
new file mode 100644
index 0000000000..c888c8c3b6
--- /dev/null
+++ b/ansible/roles/ovn-controller/defaults/main.yml
@@ -0,0 +1,40 @@
+---
+ovn_controller_services:
+ ovn-controller:
+ container_name: ovn_controller
+ group: ovn-controller
+ enabled: true
+ image: "{{ ovn_controller_image_full }}"
+ volumes: "{{ ovn_controller_default_volumes + ovn_controller_extra_volumes }}"
+ dimensions: "{{ ovn_controller_dimensions }}"
+
+####################
+# Docker
+####################
+ovn_tag: "{{ openstack_tag }}"
+
+ovn_controller_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ovn-controller"
+ovn_controller_tag: "{{ ovn_tag }}"
+ovn_controller_image_full: "{{ ovn_controller_image }}:{{ ovn_controller_tag }}"
+
+ovn_controller_dimensions: "{{ default_container_dimensions }}"
+
+ovn_controller_default_volumes:
+ - "{{ node_config_directory }}/ovn-controller/:{{ container_config_directory }}/:ro"
+ - "/run/openvswitch:/run/openvswitch:shared"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "kolla_logs:/var/log/kolla/"
+
+ovn_controller_extra_volumes: "{{ default_extra_volumes }}"
+
+#####
+# OVN
+#####
+# Base MAC for ovn-chassis-mac-mappings generation
+ovn_base_mac: "52:54:00"
+# Configure OVN remote probe interval time in ms
+ovn_remote_probe_interval: "60000"
+# Configure OVN openflow interval in s
+ovn_openflow_probe_interval: "60"
+# Configure OVN monitor-all in boolean
+ovn_monitor_all: "false"
diff --git a/ansible/roles/ovn-controller/handlers/main.yml b/ansible/roles/ovn-controller/handlers/main.yml
new file mode 100644
index 0000000000..81c69df5f9
--- /dev/null
+++ b/ansible/roles/ovn-controller/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+- name: Restart ovn-controller container
+ vars:
+ service_name: "ovn-controller"
+ service: "{{ ovn_controller_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
diff --git a/ansible/roles/ovn-controller/tasks/check-containers.yml b/ansible/roles/ovn-controller/tasks/check-containers.yml
new file mode 100644
index 0000000000..b7e2f7c29f
--- /dev/null
+++ b/ansible/roles/ovn-controller/tasks/check-containers.yml
@@ -0,0 +1,3 @@
+---
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/ovn-controller/tasks/config.yml b/ansible/roles/ovn-controller/tasks/config.yml
new file mode 100644
index 0000000000..0e430eec00
--- /dev/null
+++ b/ansible/roles/ovn-controller/tasks/config.yml
@@ -0,0 +1,18 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ with_dict: "{{ ovn_controller_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ with_dict: "{{ ovn_controller_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/ovn-controller/tasks/config_validate.yml b/ansible/roles/ovn-controller/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ovn-controller/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/kafka/tasks/deploy-containers.yml b/ansible/roles/ovn-controller/tasks/deploy-containers.yml
similarity index 100%
rename from ansible/roles/kafka/tasks/deploy-containers.yml
rename to ansible/roles/ovn-controller/tasks/deploy-containers.yml
diff --git a/ansible/roles/ovn-controller/tasks/deploy.yml b/ansible/roles/ovn-controller/tasks/deploy.yml
new file mode 100644
index 0000000000..52482569d5
--- /dev/null
+++ b/ansible/roles/ovn-controller/tasks/deploy.yml
@@ -0,0 +1,9 @@
+---
+- import_tasks: config.yml
+
+- import_tasks: check-containers.yml
+
+- import_tasks: setup-ovs.yml
+
+- name: Flush handlers
+ meta: flush_handlers
diff --git a/ansible/roles/kafka/tasks/main.yml b/ansible/roles/ovn-controller/tasks/main.yml
similarity index 100%
rename from ansible/roles/kafka/tasks/main.yml
rename to ansible/roles/ovn-controller/tasks/main.yml
diff --git a/ansible/roles/ovn-controller/tasks/precheck.yml b/ansible/roles/ovn-controller/tasks/precheck.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ovn-controller/tasks/precheck.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/kafka/tasks/pull.yml b/ansible/roles/ovn-controller/tasks/pull.yml
similarity index 100%
rename from ansible/roles/kafka/tasks/pull.yml
rename to ansible/roles/ovn-controller/tasks/pull.yml
diff --git a/ansible/roles/kafka/tasks/reconfigure.yml b/ansible/roles/ovn-controller/tasks/reconfigure.yml
similarity index 100%
rename from ansible/roles/kafka/tasks/reconfigure.yml
rename to ansible/roles/ovn-controller/tasks/reconfigure.yml
diff --git a/ansible/roles/ovn-controller/tasks/setup-ovs.yml b/ansible/roles/ovn-controller/tasks/setup-ovs.yml
new file mode 100644
index 0000000000..0d9ab0e30c
--- /dev/null
+++ b/ansible/roles/ovn-controller/tasks/setup-ovs.yml
@@ -0,0 +1,42 @@
+---
+- name: Create br-int bridge on OpenvSwitch
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ user: root
+ module_name: openvswitch_bridge
+ module_args:
+ bridge: br-int
+ state: present
+ fail_mode: secure
+
+- name: Configure OVN in OVSDB
+ vars:
+ # Format: physnet1:br1,physnet2:br2
+ ovn_mappings: "{{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
+ # Format: physnet1:00:11:22:33:44:55,physnet2:00:11:22:33:44:56
+ ovn_macs: "{% for physnet, bridge in neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) %}{{ physnet }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname + bridge) }}{% if not loop.last %},{% endif %}{% endfor %}"
+ ovn_cms_opts: "{{ 'enable-chassis-as-gw' if inventory_hostname in groups['ovn-controller-network'] else '' }}{{ ',availability-zones=' + neutron_ovn_availability_zones | join(',') if inventory_hostname in groups['ovn-controller-network'] and neutron_ovn_availability_zones }}"
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ user: root
+ module_name: openvswitch_db
+ module_args:
+ table: Open_vSwitch
+ record: .
+ col: external_ids
+ key: "{{ item.name }}"
+ value: "{{ item.value if item.state | default('present') == 'present' else omit }}"
+ state: "{{ item.state | default('present') }}"
+ loop:
+ - { name: ovn-encap-ip, value: "{{ tunnel_interface_address }}" }
+ - { name: ovn-encap-type, value: geneve }
+ - { name: ovn-remote, value: "{{ ovn_sb_connection }}" }
+ - { name: ovn-remote-probe-interval, value: "{{ ovn_remote_probe_interval }}" }
+ - { name: ovn-openflow-probe-interval, value: "{{ ovn_openflow_probe_interval }}" }
+ - { name: ovn-monitor-all, value: "{{ ovn_monitor_all | bool }}" }
+ - { name: ovn-bridge-mappings, value: "{{ ovn_mappings }}", state: "{{ 'present' if (inventory_hostname in groups['ovn-controller-network'] or computes_need_external_bridge | bool) else 'absent' }}" }
+ - { name: ovn-chassis-mac-mappings, value: "{{ ovn_macs }}", state: "{{ 'present' if inventory_hostname in groups['ovn-controller-compute'] else 'absent' }}" }
+ - { name: ovn-cms-options, value: "{{ ovn_cms_opts }}", state: "{{ 'present' if ovn_cms_opts != '' else 'absent' }}" }
+ when: inventory_hostname in groups.get('ovn-controller', [])
diff --git a/ansible/roles/ovn-controller/tasks/stop.yml b/ansible/roles/ovn-controller/tasks/stop.yml
new file mode 100644
index 0000000000..25f85ac67d
--- /dev/null
+++ b/ansible/roles/ovn-controller/tasks/stop.yml
@@ -0,0 +1,6 @@
+---
+- import_role:
+ role: service-stop
+ vars:
+ project_services: "{{ ovn_controller_services }}"
+ service_name: "{{ project_name }}"
diff --git a/ansible/roles/kibana/tasks/reconfigure.yml b/ansible/roles/ovn-controller/tasks/upgrade.yml
similarity index 100%
rename from ansible/roles/kibana/tasks/reconfigure.yml
rename to ansible/roles/ovn-controller/tasks/upgrade.yml
diff --git a/ansible/roles/ovn/templates/ovn-controller.json.j2 b/ansible/roles/ovn-controller/templates/ovn-controller.json.j2
similarity index 100%
rename from ansible/roles/ovn/templates/ovn-controller.json.j2
rename to ansible/roles/ovn-controller/templates/ovn-controller.json.j2
diff --git a/ansible/roles/ovn-controller/vars/main.yml b/ansible/roles/ovn-controller/vars/main.yml
new file mode 100644
index 0000000000..fbb32f1197
--- /dev/null
+++ b/ansible/roles/ovn-controller/vars/main.yml
@@ -0,0 +1,6 @@
+---
+project_name: "ovn"
+
+# NOTE(mnasiadka): we need this for the ovn-controller role
+# because this role's vars prefix does not match "{{ project_name }}"
+kolla_role_name: "ovn_controller"
diff --git a/ansible/roles/ovn-db/defaults/main.yml b/ansible/roles/ovn-db/defaults/main.yml
new file mode 100644
index 0000000000..ec03a59191
--- /dev/null
+++ b/ansible/roles/ovn-db/defaults/main.yml
@@ -0,0 +1,98 @@
+---
+ovn_db_services:
+ ovn-northd:
+ container_name: ovn_northd
+ group: ovn-northd
+ enabled: true
+ image: "{{ ovn_northd_image_full }}"
+ volumes: "{{ ovn_northd_default_volumes + ovn_northd_extra_volumes }}"
+ dimensions: "{{ ovn_northd_dimensions }}"
+ ovn-nb-db:
+ container_name: ovn_nb_db
+ group: ovn-nb-db
+ enabled: true
+ image: "{{ ovn_nb_db_image_full }}"
+ volumes: "{{ ovn_nb_db_default_volumes + ovn_nb_db_extra_volumes }}"
+ dimensions: "{{ ovn_nb_db_dimensions }}"
+ ovn-sb-db:
+ container_name: ovn_sb_db
+ group: ovn-sb-db
+ enabled: true
+ image: "{{ ovn_sb_db_image_full }}"
+ volumes: "{{ ovn_sb_db_default_volumes + ovn_sb_db_extra_volumes }}"
+ dimensions: "{{ ovn_sb_db_dimensions }}"
+
+
+####################
+# Docker
+####################
+ovn_tag: "{{ openstack_tag }}"
+
+ovn_northd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ovn-northd"
+ovn_northd_tag: "{{ ovn_tag }}"
+ovn_northd_image_full: "{{ ovn_northd_image }}:{{ ovn_northd_tag }}"
+
+ovn_nb_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ovn-nb-db-server"
+ovn_nb_db_tag: "{{ ovn_tag }}"
+ovn_nb_db_image_full: "{{ ovn_nb_db_image }}:{{ ovn_nb_db_tag }}"
+
+ovn_sb_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ovn-sb-db-server"
+ovn_sb_db_tag: "{{ ovn_tag }}"
+ovn_sb_db_image_full: "{{ ovn_sb_db_image }}:{{ ovn_sb_db_tag }}"
+
+ovn_northd_dimensions: "{{ default_container_dimensions }}"
+ovn_nb_db_dimensions: "{{ default_container_dimensions }}"
+ovn_sb_db_dimensions: "{{ default_container_dimensions }}"
+
+ovn_northd_default_volumes:
+ - "{{ node_config_directory }}/ovn-northd/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "kolla_logs:/var/log/kolla/"
+ovn_nb_db_default_volumes:
+ - "{{ node_config_directory }}/ovn-nb-db/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "ovn_nb_db:/var/lib/openvswitch/ovn-nb/"
+ - "kolla_logs:/var/log/kolla/"
+ovn_sb_db_default_volumes:
+ - "{{ node_config_directory }}/ovn-sb-db/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "ovn_sb_db:/var/lib/openvswitch/ovn-sb/"
+ - "kolla_logs:/var/log/kolla/"
+
+ovn_db_extra_volumes: "{{ default_extra_volumes }}"
+ovn_northd_extra_volumes: "{{ ovn_db_extra_volumes }}"
+ovn_nb_db_extra_volumes: "{{ ovn_db_extra_volumes }}"
+ovn_sb_db_extra_volumes: "{{ ovn_db_extra_volumes }}"
+
+#####
+# OVN
+#####
+# Configure OVN remote probe interval time in ms
+ovn_remote_probe_interval: "60000"
+# Configure OVN openflow interval in s
+ovn_openflow_probe_interval: "60"
+# Configure OVN DB inactivity probe time in ms
+ovn_db_inactivity_probe: "60000"
+ovn_sb_db_inactivity_probe: "{{ ovn_db_inactivity_probe }}"
+ovn_nb_db_inactivity_probe: "{{ ovn_db_inactivity_probe }}"
+# OVN startup commands
+ovn_nb_command: >-
+ /usr/share/ovn/scripts/ovn-ctl run_nb_ovsdb
+ --db-nb-addr={{ api_interface_address | put_address_in_context('url') }}
+ --db-nb-cluster-local-addr={{ api_interface_address | put_address_in_context('url') }}
+ {{ ovn_nb_db_bootstrap_args | default('') }}
+ --db-nb-sock=/run/ovn/ovnnb_db.sock
+ --db-nb-pidfile=/run/ovn/ovnnb_db.pid
+ --db-nb-file=/var/lib/openvswitch/ovn-nb/ovnnb.db
+ --ovn-nb-logfile=/var/log/kolla/openvswitch/ovn-nb-db.log
+ovn_sb_command: >-
+ /usr/share/ovn/scripts/ovn-ctl run_sb_ovsdb
+ --db-sb-addr={{ api_interface_address | put_address_in_context('url') }}
+ --db-sb-cluster-local-addr={{ api_interface_address | put_address_in_context('url') }}
+ {{ ovn_sb_db_bootstrap_args | default('') }}
+ --db-sb-sock=/run/ovn/ovnsb_db.sock
+ --db-sb-pidfile=/run/ovn/ovnsb_db.pid
+ --db-sb-file=/var/lib/openvswitch/ovn-sb/ovnsb.db
+ --ovn-sb-logfile=/var/log/kolla/openvswitch/ovn-sb-db.log
+# Workaround: pause after restarting containers to allow for leader election.
+ovn_leader_election_pause: 5
diff --git a/ansible/roles/ovn-db/handlers/main.yml b/ansible/roles/ovn-db/handlers/main.yml
new file mode 100644
index 0000000000..745e0c8032
--- /dev/null
+++ b/ansible/roles/ovn-db/handlers/main.yml
@@ -0,0 +1,39 @@
+---
+- name: Restart ovn-nb-db container
+ vars:
+ service_name: "ovn-nb-db"
+ service: "{{ ovn_db_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+
+- name: Restart ovn-sb-db container
+ vars:
+ service_name: "ovn-sb-db"
+ service: "{{ ovn_db_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+
+- name: Restart ovn-northd container
+ vars:
+ service_name: "ovn-northd"
+ service: "{{ ovn_db_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
diff --git a/ansible/roles/ovn-db/tasks/bootstrap-db.yml b/ansible/roles/ovn-db/tasks/bootstrap-db.yml
new file mode 100644
index 0000000000..deb4e0d5bb
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/bootstrap-db.yml
@@ -0,0 +1,58 @@
+---
+# NOTE(mgoddard): After OVN DB leader restarts there is a period before a new
+# leader has been elected where the old leader is returned in the cluster
+# status. This can result in a failure to apply the connection settings if a
+# different leader is elected. Wait for a few seconds for the leader election
+# to complete.
+- name: Wait for leader election
+ pause:
+ seconds: "{{ ovn_leader_election_pause }}"
+ when: ovn_nb_db_cluster_exists | default(false) or ovn_sb_db_cluster_exists | default(false)
+
+- name: Get OVN_Northbound cluster leader
+ become: true
+ command: "{{ kolla_container_engine }} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound"
+ changed_when: False
+ register: ovn_nb_cluster_status
+
+- name: Configure OVN NB connection settings
+ vars:
+ search_string: "Role: leader"
+ become: true
+ command: "{{ kolla_container_engine }} exec ovn_nb_db ovn-nbctl --inactivity-probe={{ ovn_nb_db_inactivity_probe }} set-connection ptcp:{{ ovn_nb_db_port }}:0.0.0.0"
+ when: ovn_nb_cluster_status is search(search_string)
+
+- name: Get OVN_Southbound cluster leader
+ become: true
+ command: "{{ kolla_container_engine }} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound"
+ changed_when: False
+ register: ovn_sb_cluster_status
+
+- name: Configure OVN SB connection settings
+ vars:
+ search_string: "Role: leader"
+ become: true
+ command: "{{ kolla_container_engine }} exec ovn_sb_db ovn-sbctl --inactivity-probe={{ ovn_sb_db_inactivity_probe }} set-connection ptcp:{{ ovn_sb_db_port }}:0.0.0.0"
+ when: ovn_sb_cluster_status is search(search_string)
+
+- name: Wait for ovn-nb-db
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ ovn_nb_db_port }}"
+ connect_timeout: 1
+ timeout: 60
+ register: check_ovn_nb_db_port
+ until: check_ovn_nb_db_port is success
+ retries: 10
+ delay: 6
+
+- name: Wait for ovn-sb-db
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ ovn_sb_db_port }}"
+ connect_timeout: 1
+ timeout: 60
+ register: check_ovn_sb_db_port
+ until: check_ovn_sb_db_port is success
+ retries: 10
+ delay: 6
diff --git a/ansible/roles/ovn-db/tasks/bootstrap-initial.yml b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml
new file mode 100644
index 0000000000..693e2c1ddf
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/bootstrap-initial.yml
@@ -0,0 +1,83 @@
+---
+- name: Bootstrap new cluster
+ block:
+
+ - name: Set bootstrap args fact for NB (new cluster)
+ set_fact:
+ ovn_nb_db_bootstrap_args: "{% if groups['ovn-nb-db'] | length > 1 and inventory_hostname != groups['ovn-nb-db'][0] %} --db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-nb-db'][0]) | put_address_in_context('url') }} {% endif %}"
+ when: groups['ovn-nb-db_leader'] is not defined and groups['ovn-nb-db_follower'] is not defined
+
+ - name: Set bootstrap args fact for SB (new cluster)
+ set_fact:
+ ovn_sb_db_bootstrap_args: "{% if groups['ovn-sb-db'] | length > 1 and inventory_hostname != groups['ovn-sb-db'][0] %} --db-sb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-sb-db'][0]) | put_address_in_context('url') }} {% endif %}"
+ when: groups['ovn-sb-db_leader'] is not defined and groups['ovn-sb-db_follower'] is not defined
+
+ - name: Check NB cluster status
+ command: >
+ {{ kolla_container_engine }} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl
+ cluster/status OVN_Northbound
+ become: true
+ changed_when: false
+ register: ovn_nb_db_cluster_status
+ when: groups['ovn-nb-db_leader'] is defined and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '')
+ delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}"
+
+ - name: Check SB cluster status
+ command: >
+ {{ kolla_container_engine }} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl
+ cluster/status OVN_Southbound
+ become: true
+ changed_when: false
+ register: ovn_sb_db_cluster_status
+ when: groups['ovn-sb-db_leader'] is defined and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '')
+ delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}"
+
+ - name: Remove an old node with the same ip address as the new node in NB DB
+ vars:
+ ovn_nb_old_node: "{{ ovn_nb_db_cluster_status | regex_search('\\((\\w{4}) at tcp:' + api_interface_address + ':6643\\)', '\\1') | first }}"
+ become: true
+ command: >
+ {{ kolla_container_engine }} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl
+ cluster/kick OVN_Northbound {{ ovn_nb_old_node }}
+ when:
+ - ovn_nb_db_cluster_status.stdout is defined
+ - (ovn_nb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '')
+ delegate_to: "{{ groups['ovn-nb-db_leader'][0] }}"
+
+ - name: Remove an old node with the same ip address as the new node in SB DB
+ vars:
+ ovn_sb_old_node: "{{ ovn_sb_db_cluster_status | regex_search('\\((\\w{4}) at tcp:' + api_interface_address + ':6644\\)', '\\1') | first }}"
+ become: true
+ command: >
+ {{ kolla_container_engine }} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl
+ cluster/kick OVN_Southbound {{ ovn_sb_old_node }}
+ when:
+ - ovn_sb_db_cluster_status.stdout is defined
+ - (ovn_sb_db_cluster_status.stdout is search('at tcp:' + api_interface_address)) and inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '')
+ delegate_to: "{{ groups['ovn-sb-db_leader'][0] }}"
+
+ - name: Set bootstrap args fact for NB (new member)
+ set_fact:
+ ovn_nb_db_bootstrap_args: "--db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups.get('ovn-nb-db_leader', groups['ovn-nb-db'])[0] | default()) | put_address_in_context('url') }}"
+ when: inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '') and groups['ovn-nb-db_leader'] is defined
+
+ - name: Set bootstrap args fact for SB (new member)
+ set_fact:
+ ovn_sb_db_bootstrap_args: "--db-sb-cluster-remote-addr={{ 'api' | kolla_address(groups.get('ovn-sb-db_leader', groups['ovn-sb-db'])[0] | default()) | put_address_in_context('url') }}"
+ when: inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '') and groups['ovn-sb-db_leader'] is defined
+
+ - import_tasks: config.yml
+
+ - import_tasks: check-containers.yml
+
+ - name: Flush handlers
+ meta: flush_handlers
+
+ - import_tasks: bootstrap-db.yml
+
+ - name: Unset bootstrap args fact
+ set_fact:
+ ovn_nb_db_bootstrap_args:
+ ovn_sb_db_bootstrap_args:
+
+ any_errors_fatal: true
diff --git a/ansible/roles/ovn-db/tasks/check-containers.yml b/ansible/roles/ovn-db/tasks/check-containers.yml
new file mode 100644
index 0000000000..b7e2f7c29f
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/check-containers.yml
@@ -0,0 +1,3 @@
+---
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/ovn-db/tasks/config.yml b/ansible/roles/ovn-db/tasks/config.yml
new file mode 100644
index 0000000000..162923f103
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/config.yml
@@ -0,0 +1,18 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ with_dict: "{{ ovn_db_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ with_dict: "{{ ovn_db_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/ovn-db/tasks/config_validate.yml b/ansible/roles/ovn-db/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/kibana/tasks/deploy-containers.yml b/ansible/roles/ovn-db/tasks/deploy-containers.yml
similarity index 100%
rename from ansible/roles/kibana/tasks/deploy-containers.yml
rename to ansible/roles/ovn-db/tasks/deploy-containers.yml
diff --git a/ansible/roles/ovn-db/tasks/deploy.yml b/ansible/roles/ovn-db/tasks/deploy.yml
new file mode 100644
index 0000000000..d92bb7b614
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/deploy.yml
@@ -0,0 +1,22 @@
+---
+- include_tasks: lookup_cluster.yml
+ when:
+ - inventory_hostname in groups['ovn-nb-db'] or
+ inventory_hostname in groups['ovn-sb-db']
+
+- include_tasks: bootstrap-initial.yml
+ when:
+ - inventory_hostname in groups.get('ovn-nb-db_had_volume_False', '') or
+ inventory_hostname in groups.get('ovn-sb-db_had_volume_False', '')
+
+- import_tasks: config.yml
+
+- import_tasks: check-containers.yml
+
+- name: Flush handlers
+ meta: flush_handlers
+
+- import_tasks: bootstrap-db.yml
+ when:
+ - inventory_hostname in groups['ovn-nb-db']
+ - inventory_hostname in groups['ovn-sb-db']
diff --git a/ansible/roles/ovn-db/tasks/lookup_cluster.yml b/ansible/roles/ovn-db/tasks/lookup_cluster.yml
new file mode 100644
index 0000000000..d97d358c77
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/lookup_cluster.yml
@@ -0,0 +1,133 @@
+---
+- name: Checking for any existing OVN DB container volumes
+ become: true
+ kolla_container_volume_facts:
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - ovn_nb_db
+ - ovn_sb_db
+ register: ovn_db_container_volume_facts
+
+- name: Divide hosts by their OVN NB volume availability
+ group_by:
+ key: "ovn-nb-db_had_volume_{{ ovn_db_container_volume_facts['ovn_nb_db'] is defined }}"
+ changed_when: false
+
+- name: Divide hosts by their OVN SB volume availability
+ group_by:
+ key: "ovn-sb-db_had_volume_{{ ovn_db_container_volume_facts['ovn_sb_db'] is defined }}"
+ changed_when: false
+
+- name: Establish whether the OVN NB cluster has already existed
+ set_fact:
+ ovn_nb_db_cluster_exists: "{{ groups['ovn-nb-db' + '_had_volume_True'] is defined }}"
+
+- name: Establish whether the OVN SB cluster has already existed
+ set_fact:
+ ovn_sb_db_cluster_exists: "{{ groups['ovn-sb-db' + '_had_volume_True'] is defined }}"
+
+- name: OVN NB checks
+ block:
+
+ - name: Check if running on all OVN NB DB hosts
+ fail:
+ msg: >
+ Some hosts need database bootstrapping, but not all OVN NB DB hosts
+ ({{ groups['ovn-nb-db'] | join(', ') }}) are in the target list
+ ({{ groups['ovn-nb-db'] | difference(ansible_play_batch) | list | join(', ') }}).
+ Stopping as it may be unsafe to proceed. Please run without --limit
+ or --serial to bootstrap these hosts.
+ when:
+ - ovn_nb_db_cluster_exists
+ - groups['ovn-nb-db'] | difference(ansible_play_batch) | list | length > 0
+
+ - name: Check OVN NB service port liveness
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ ovn_nb_db_port }}"
+ connect_timeout: 1
+ timeout: 10
+ register: check_ovn_nb_db_port_liveness
+ ignore_errors: yes
+
+ - name: Divide hosts by their OVN NB service port liveness
+ group_by:
+ key: "ovn-nb-db_port_alive_{{ check_ovn_nb_db_port_liveness is success }}"
+ changed_when: false
+
+ - name: Get OVN NB database information
+ command: >
+ {{ kolla_container_engine }} exec ovn_nb_db ovsdb-client query unix:/run/ovn/ovnnb_db.sock
+ "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Northbound\"]],\"op\":\"select\"}]"
+ become: true
+ when: check_ovn_nb_db_port_liveness is success
+ changed_when: false
+ register: ovn_nb_db_info
+
+ - name: Divide hosts by their OVN NB leader/follower role
+ group_by:
+ key: "ovn-nb-db_{{ 'leader' if (ovn_nb_db_info.stdout | from_json).0.rows.0.leader else 'follower' }}"
+ when: check_ovn_nb_db_port_liveness is success
+ changed_when: false
+
+ - name: Fail on existing OVN NB cluster with no leader
+ fail:
+ msg: OVN NB cluster exists but there is no leader - please check cluster status
+ when:
+ - groups['ovn-nb-db_leader'] is not defined and groups['ovn-nb-db_follower'] is defined
+
+ any_errors_fatal: true
+ when: inventory_hostname in groups.get('ovn-nb-db_had_volume_True', '')
+
+- name: OVN SB checks
+ block:
+
+ - name: Check if running on all OVN SB DB hosts
+ fail:
+ msg: >
+ Some hosts need database bootstrapping, but not all OVN SB DB hosts
+ ({{ groups['ovn-sb-db'] | join(', ') }}) are in the target list
+ ({{ groups['ovn-sb-db'] | difference(ansible_play_batch) | list | join(', ') }}).
+ Stopping as it may be unsafe to proceed. Please run without --limit
+ or --serial to bootstrap these hosts.
+ when:
+ - ovn_sb_db_cluster_exists
+ - groups['ovn-sb-db'] | difference(ansible_play_batch) | list | length > 0
+
+ - name: Check OVN SB service port liveness
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ ovn_sb_db_port }}"
+ connect_timeout: 1
+ timeout: 10
+ register: check_ovn_sb_db_port_liveness
+ ignore_errors: yes
+
+ - name: Divide hosts by their OVN SB service port liveness
+ group_by:
+ key: "ovn-sb-db_port_alive_{{ check_ovn_sb_db_port_liveness is success }}"
+ changed_when: false
+
+ - name: Get OVN SB database information
+ command: >
+ {{ kolla_container_engine }} exec ovn_sb_db ovsdb-client query unix:/run/ovn/ovnsb_db.sock
+ "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Southbound\"]],\"op\":\"select\"}]"
+ become: true
+ when: check_ovn_sb_db_port_liveness is success
+ changed_when: false
+ register: ovn_sb_db_info
+
+ - name: Divide hosts by their OVN SB leader/follower role
+ group_by:
+ key: "ovn-sb-db_{{ 'leader' if (ovn_sb_db_info.stdout | from_json).0.rows.0.leader else 'follower' }}"
+ when: check_ovn_sb_db_port_liveness is success
+ changed_when: false
+
+ - name: Fail on existing OVN SB cluster with no leader
+ fail:
+ msg: OVN SB cluster exists but there is no leader - please check cluster status.
+ when:
+ - groups['ovn-sb-db_leader'] is not defined and groups['ovn-sb-db_follower'] is defined
+
+ any_errors_fatal: true
+ when: inventory_hostname in groups.get('ovn-sb-db_had_volume_True', '')
diff --git a/ansible/roles/kibana/tasks/main.yml b/ansible/roles/ovn-db/tasks/main.yml
similarity index 100%
rename from ansible/roles/kibana/tasks/main.yml
rename to ansible/roles/ovn-db/tasks/main.yml
diff --git a/ansible/roles/ovn-db/tasks/precheck.yml b/ansible/roles/ovn-db/tasks/precheck.yml
new file mode 100644
index 0000000000..e06baddc56
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/precheck.yml
@@ -0,0 +1,33 @@
+---
+- name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - ovn_nb_db
+ - ovn_sb_db
+ check_mode: false
+ register: container_facts
+
+- name: Checking free port for OVN northbound db
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ ovn_nb_db_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['ovn_nb_db'] is not defined
+ - inventory_hostname in groups['ovn-nb-db']
+
+- name: Checking free port for OVN southbound db
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ ovn_sb_db_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['ovn_sb_db'] is not defined
+ - inventory_hostname in groups['ovn-sb-db']
diff --git a/ansible/roles/kibana/tasks/pull.yml b/ansible/roles/ovn-db/tasks/pull.yml
similarity index 100%
rename from ansible/roles/kibana/tasks/pull.yml
rename to ansible/roles/ovn-db/tasks/pull.yml
diff --git a/ansible/roles/monasca/tasks/reconfigure.yml b/ansible/roles/ovn-db/tasks/reconfigure.yml
similarity index 100%
rename from ansible/roles/monasca/tasks/reconfigure.yml
rename to ansible/roles/ovn-db/tasks/reconfigure.yml
diff --git a/ansible/roles/ovn-db/tasks/stop.yml b/ansible/roles/ovn-db/tasks/stop.yml
new file mode 100644
index 0000000000..eadbf02a80
--- /dev/null
+++ b/ansible/roles/ovn-db/tasks/stop.yml
@@ -0,0 +1,6 @@
+---
+- import_role:
+ role: service-stop
+ vars:
+ project_services: "{{ ovn_db_services }}"
+ service_name: "{{ project_name }}"
diff --git a/ansible/roles/murano/tasks/reconfigure.yml b/ansible/roles/ovn-db/tasks/upgrade.yml
similarity index 100%
rename from ansible/roles/murano/tasks/reconfigure.yml
rename to ansible/roles/ovn-db/tasks/upgrade.yml
diff --git a/ansible/roles/ovn-db/templates/ovn-nb-db.json.j2 b/ansible/roles/ovn-db/templates/ovn-nb-db.json.j2
new file mode 100644
index 0000000000..d3ff684b66
--- /dev/null
+++ b/ansible/roles/ovn-db/templates/ovn-nb-db.json.j2
@@ -0,0 +1,10 @@
+{
+ "command": "{{ ovn_nb_command }}",
+ "permissions": [
+ {
+ "path": "/var/log/kolla/openvswitch",
+ "owner": "root:root",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/ovn/templates/ovn-northd.json.j2 b/ansible/roles/ovn-db/templates/ovn-northd.json.j2
similarity index 100%
rename from ansible/roles/ovn/templates/ovn-northd.json.j2
rename to ansible/roles/ovn-db/templates/ovn-northd.json.j2
diff --git a/ansible/roles/ovn-db/templates/ovn-sb-db.json.j2 b/ansible/roles/ovn-db/templates/ovn-sb-db.json.j2
new file mode 100644
index 0000000000..4139f58c3f
--- /dev/null
+++ b/ansible/roles/ovn-db/templates/ovn-sb-db.json.j2
@@ -0,0 +1,10 @@
+{
+ "command": "{{ ovn_sb_command }}",
+ "permissions": [
+ {
+ "path": "/var/log/kolla/openvswitch",
+ "owner": "root:root",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/ovn-db/vars/main.yml b/ansible/roles/ovn-db/vars/main.yml
new file mode 100644
index 0000000000..485229e938
--- /dev/null
+++ b/ansible/roles/ovn-db/vars/main.yml
@@ -0,0 +1,6 @@
+---
+project_name: "ovn"
+
+# NOTE(mnasiadka): we need this for the ovn-db role because this role's
+# vars prefix does not match "{{ project_name }}"
+kolla_role_name: "ovn_db"
diff --git a/ansible/roles/ovn/defaults/main.yml b/ansible/roles/ovn/defaults/main.yml
deleted file mode 100644
index 394cb744b2..0000000000
--- a/ansible/roles/ovn/defaults/main.yml
+++ /dev/null
@@ -1,93 +0,0 @@
----
-ovn_services:
- ovn-controller:
- container_name: ovn_controller
- group: ovn-controller
- enabled: true
- image: "{{ ovn_controller_image_full }}"
- volumes: "{{ ovn_controller_default_volumes + ovn_controller_extra_volumes }}"
- dimensions: "{{ ovn_controller_dimensions }}"
- ovn-northd:
- container_name: ovn_northd
- group: ovn-northd
- enabled: true
- image: "{{ ovn_northd_image_full }}"
- volumes: "{{ ovn_northd_default_volumes + ovn_northd_extra_volumes }}"
- dimensions: "{{ ovn_northd_dimensions }}"
- ovn-nb-db:
- container_name: ovn_nb_db
- group: ovn-nb-db
- enabled: true
- image: "{{ ovn_nb_db_image_full }}"
- volumes: "{{ ovn_nb_db_default_volumes + ovn_nb_db_extra_volumes }}"
- dimensions: "{{ ovn_nb_db_dimensions }}"
- ovn-sb-db:
- container_name: ovn_sb_db
- group: ovn-sb-db
- enabled: true
- image: "{{ ovn_sb_db_image_full }}"
- volumes: "{{ ovn_sb_db_default_volumes + ovn_sb_db_extra_volumes }}"
- dimensions: "{{ ovn_sb_db_dimensions }}"
-
-
-####################
-# Docker
-####################
-ovn_tag: "{{ openstack_tag }}"
-
-ovn_controller_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ovn-controller"
-ovn_controller_tag: "{{ ovn_tag }}"
-ovn_controller_image_full: "{{ ovn_controller_image }}:{{ ovn_controller_tag }}"
-
-ovn_northd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ovn-northd"
-ovn_northd_tag: "{{ ovn_tag }}"
-ovn_northd_image_full: "{{ ovn_northd_image }}:{{ ovn_northd_tag }}"
-
-ovn_nb_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ovn-nb-db-server"
-ovn_nb_db_tag: "{{ ovn_tag }}"
-ovn_nb_db_image_full: "{{ ovn_nb_db_image }}:{{ ovn_nb_db_tag }}"
-
-ovn_sb_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ovn-sb-db-server"
-ovn_sb_db_tag: "{{ ovn_tag }}"
-ovn_sb_db_image_full: "{{ ovn_sb_db_image }}:{{ ovn_sb_db_tag }}"
-
-ovn_controller_dimensions: "{{ default_container_dimensions }}"
-ovn_northd_dimensions: "{{ default_container_dimensions }}"
-ovn_nb_db_dimensions: "{{ default_container_dimensions }}"
-ovn_sb_db_dimensions: "{{ default_container_dimensions }}"
-
-ovn_controller_default_volumes:
- - "{{ node_config_directory }}/ovn-controller/:{{ container_config_directory }}/:ro"
- - "/run/openvswitch:/run/openvswitch:shared"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
-ovn_northd_default_volumes:
- - "{{ node_config_directory }}/ovn-northd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
-ovn_nb_db_default_volumes:
- - "{{ node_config_directory }}/ovn-nb-db/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ovn_nb_db:/var/lib/openvswitch/ovn-nb/"
- - "kolla_logs:/var/log/kolla/"
-ovn_sb_db_default_volumes:
- - "{{ node_config_directory }}/ovn-sb-db/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ovn_sb_db:/var/lib/openvswitch/ovn-sb/"
- - "kolla_logs:/var/log/kolla/"
-
-ovn_extra_volumes: "{{ default_extra_volumes }}"
-ovn_controller_extra_volumes: "{{ ovn_extra_volumes }}"
-ovn_northd_extra_volumes: "{{ ovn_extra_volumes }}"
-ovn_nb_db_extra_volumes: "{{ ovn_extra_volumes }}"
-ovn_sb_db_extra_volumes: "{{ ovn_extra_volumes }}"
-
-#####
-# OVN
-#####
-# Base MAC for ovn-chassis-mac-mappings generation
-ovn_base_mac: "52:54:00"
-# Configure OVN remote probe interval time in ms
-ovn_remote_probe_interval: "60000"
-# Configure OVN openflow interval in s
-ovn_openflow_probe_interval: "60"
diff --git a/ansible/roles/ovn/handlers/main.yml b/ansible/roles/ovn/handlers/main.yml
deleted file mode 100644
index 94977013f9..0000000000
--- a/ansible/roles/ovn/handlers/main.yml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-- name: Restart ovn-nb-db container
- vars:
- service_name: "ovn-nb-db"
- service: "{{ ovn_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart ovn-sb-db container
- vars:
- service_name: "ovn-sb-db"
- service: "{{ ovn_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Wait for ovn-nb-db
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ ovn_nb_db_port }}"
- connect_timeout: 1
- timeout: 60
- register: check_ovn_nb_db_port
- until: check_ovn_nb_db_port is success
- retries: 10
- delay: 6
- listen: "Restart ovn-nb-db container"
- when:
- - kolla_action != "config"
-
-- name: Wait for ovn-sb-db
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ ovn_sb_db_port }}"
- connect_timeout: 1
- timeout: 60
- register: check_ovn_sb_db_port
- until: check_ovn_sb_db_port is success
- retries: 10
- delay: 6
- listen: "Restart ovn-sb-db container"
- when:
- - kolla_action != "config"
-
-- name: Restart ovn-northd container
- vars:
- service_name: "ovn-northd"
- service: "{{ ovn_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart ovn-controller container
- vars:
- service_name: "ovn-controller"
- service: "{{ ovn_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/ovn/tasks/bootstrap.yml b/ansible/roles/ovn/tasks/bootstrap.yml
deleted file mode 100644
index 6da56297b5..0000000000
--- a/ansible/roles/ovn/tasks/bootstrap.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Create br-int bridge on OpenvSwitch
- become: true
- kolla_toolbox:
- user: root
- module_name: openvswitch_bridge
- module_args:
- bridge: br-int
- state: present
- fail_mode: secure
-
-- name: Configure OVN in OVSDB
- vars:
- ovn_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
- ovn_macs: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname + bridge) }}{% if not loop.last %},{% endif %}{% endfor %}"
- ovn_cms_opts: "{{ 'enable-chassis-as-gw' if inventory_hostname in groups['ovn-controller-network'] else '' }}"
- become: true
- kolla_toolbox:
- user: root
- module_name: openvswitch_db
- module_args:
- table: Open_vSwitch
- record: .
- col: external_ids
- key: "{{ item.name }}"
- value: "{{ item.value if item.state | default('present') == 'present' else omit }}"
- state: "{{ item.state | default('present') }}"
- loop:
- - { name: ovn-encap-ip, value: "{{ tunnel_interface_address }}" }
- - { name: ovn-encap-type, value: geneve }
- - { name: ovn-remote, value: "{{ ovn_sb_connection }}" }
- - { name: ovn-remote-probe-interval, value: "{{ ovn_remote_probe_interval }}" }
- - { name: ovn-openflow-probe-interval, value: "{{ ovn_openflow_probe_interval }}" }
- - { name: ovn-bridge-mappings, value: "{{ ovn_mappings }}", state: "{{ 'present' if (inventory_hostname in groups['ovn-controller-network'] or computes_need_external_bridge | bool) else 'absent' }}" }
- - { name: ovn-chassis-mac-mappings, value: "{{ ovn_macs }}", state: "{{ 'present' if inventory_hostname in groups['ovn-controller-compute'] else 'absent' }}" }
- - { name: ovn-cms-options, value: "{{ ovn_cms_opts }}", state: "{{ 'present' if ovn_cms_opts != '' else 'absent' }}" }
- when: inventory_hostname in groups.get('ovn-controller', [])
diff --git a/ansible/roles/ovn/tasks/check-containers.yml b/ansible/roles/ovn/tasks/check-containers.yml
deleted file mode 100644
index c08cec2850..0000000000
--- a/ansible/roles/ovn/tasks/check-containers.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Check ovn containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ovn_services }}"
- notify:
- - Restart {{ item.key }} container
diff --git a/ansible/roles/ovn/tasks/config.yml b/ansible/roles/ovn/tasks/config.yml
deleted file mode 100644
index 4731d581d0..0000000000
--- a/ansible/roles/ovn/tasks/config.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ovn_services }}"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ ovn_services }}"
- notify:
- - Restart {{ item.key }} container
diff --git a/ansible/roles/ovn/tasks/deploy-containers.yml b/ansible/roles/ovn/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/ovn/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/ovn/tasks/deploy.yml b/ansible/roles/ovn/tasks/deploy.yml
deleted file mode 100644
index 3356de912e..0000000000
--- a/ansible/roles/ovn/tasks/deploy.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap.yml
- when: inventory_hostname in groups['ovn-controller']
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/ovn/tasks/main.yml b/ansible/roles/ovn/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/ovn/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/ovn/tasks/precheck.yml b/ansible/roles/ovn/tasks/precheck.yml
deleted file mode 100644
index 77bf882816..0000000000
--- a/ansible/roles/ovn/tasks/precheck.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - ovn_nb_db
- - ovn_sb_db
- register: container_facts
-
-- name: Checking free port for OVN northbound db
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ ovn_nb_db_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['ovn_nb_db'] is not defined
- - inventory_hostname in groups['ovn-nb-db']
-
-- name: Checking free port for OVN southbound db
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ ovn_sb_db_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['ovn_sb_db'] is not defined
- - inventory_hostname in groups['ovn-sb-db']
diff --git a/ansible/roles/ovn/tasks/pull.yml b/ansible/roles/ovn/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/ovn/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/ovn/tasks/reconfigure.yml b/ansible/roles/ovn/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/ovn/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/ovn/tasks/stop.yml b/ansible/roles/ovn/tasks/stop.yml
deleted file mode 100644
index 885576f7de..0000000000
--- a/ansible/roles/ovn/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- role: service-stop
- vars:
- project_services: "{{ ovn_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/ovn/tasks/upgrade.yml b/ansible/roles/ovn/tasks/upgrade.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/ovn/tasks/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/ovn/templates/ovn-nb-db.json.j2 b/ansible/roles/ovn/templates/ovn-nb-db.json.j2
deleted file mode 100644
index bc10ebd5c8..0000000000
--- a/ansible/roles/ovn/templates/ovn-nb-db.json.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "command": "/usr/share/ovn/scripts/ovn-ctl run_nb_ovsdb --db-nb-create-insecure-remote=yes --db-nb-addr={{ api_interface_address | put_address_in_context('url') }} --db-nb-cluster-local-addr={{ api_interface_address | put_address_in_context('url') }} {% if groups['ovn-nb-db'] | length > 1 and inventory_hostname != groups['ovn-nb-db'][0] %} --db-nb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-nb-db'][0]) | put_address_in_context('url') }} {% endif %} --db-nb-sock=/run/ovn/ovnnb_db.sock --db-nb-pid=/run/ovn/ovnnb_db.pid --db-nb-file=/var/lib/openvswitch/ovn-nb/ovnnb.db --ovn-nb-logfile=/var/log/kolla/openvswitch/ovn-nb-db.log",
- "permissions": [
- {
- "path": "/var/log/kolla/openvswitch",
- "owner": "root:root",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/ovn/templates/ovn-sb-db.json.j2 b/ansible/roles/ovn/templates/ovn-sb-db.json.j2
deleted file mode 100644
index 8d3d746394..0000000000
--- a/ansible/roles/ovn/templates/ovn-sb-db.json.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "command": "/usr/share/ovn/scripts/ovn-ctl run_sb_ovsdb --db-sb-create-insecure-remote=yes --db-sb-addr={{ api_interface_address | put_address_in_context('url') }} --db-sb-cluster-local-addr={{ api_interface_address | put_address_in_context('url') }} {% if groups['ovn-sb-db'] | length > 1 and inventory_hostname != groups['ovn-sb-db'][0] %} --db-sb-cluster-remote-addr={{ 'api' | kolla_address(groups['ovn-sb-db'][0]) | put_address_in_context('url') }} {% endif %} --db-sb-sock=/run/ovn/ovnsb_db.sock --db-sb-pid=/run/ovn/ovnsb_db.pid --db-sb-file=/var/lib/openvswitch/ovn-sb/ovnsb.db --ovn-sb-logfile=/var/log/kolla/openvswitch/ovn-sb-db.log",
- "permissions": [
- {
- "path": "/var/log/kolla/openvswitch",
- "owner": "root:root",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/ovn/vars/main.yml b/ansible/roles/ovn/vars/main.yml
deleted file mode 100644
index c1f916170b..0000000000
--- a/ansible/roles/ovn/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "ovn"
diff --git a/ansible/roles/ovs-dpdk/defaults/main.yml b/ansible/roles/ovs-dpdk/defaults/main.yml
index 2b052f27dd..b85b058c87 100644
--- a/ansible/roles/ovs-dpdk/defaults/main.yml
+++ b/ansible/roles/ovs-dpdk/defaults/main.yml
@@ -37,8 +37,10 @@ ovsdpdk_services:
####################
# OVS
####################
-ovs_bridge_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
-ovs_port_mappings: "{% for bridge in neutron_bridge_name.split(',') %} {{ neutron_external_interface.split(',')[loop.index0] }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
+# Format: physnet1:br1,physnet2:br2
+ovs_bridge_mappings: "{{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
+# Format: eth1:br1,eth2:br2
+ovs_port_mappings: "{{ neutron_external_interface.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
tunnel_interface_network: "{{ hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['network'] }}/{{ hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['netmask'] }}"
tunnel_interface_cidr: "{{ dpdk_tunnel_interface_address }}/{{ tunnel_interface_network | ipaddr('prefix') }}"
ovs_cidr_mappings: "{% if neutron_bridge_name.split(',') | length != 1 %} {neutron_bridge_name.split(',')[0]}:{{ tunnel_interface_cidr }} {% else %} {{ neutron_bridge_name }}:{{ tunnel_interface_cidr }} {% endif %}"
@@ -48,7 +50,7 @@ ovs_hugepage_mountpoint: /dev/hugepages
# ovs <2.7 required dpdk phyical port names to be index
# in pci address order as dpdkX where X is the index
-# ovs>=2.7 allows arbitray names but the pci address
+# ovs>=2.7 allows arbitrary names but the pci address
# must be set in a new dpdkdev-opt field
# valid values are indexed or named.
ovs_physical_port_policy: named
@@ -58,11 +60,11 @@ ovs_physical_port_policy: named
####################
ovsdpdk_tag: "{{ openstack_tag }}"
-ovsdpdk_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ovsdpdk-db"
+ovsdpdk_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ovsdpdk-db"
ovsdpdk_db_tag: "{{ ovsdpdk_tag }}"
ovsdpdk_db_image_full: "{{ ovsdpdk_db_image }}:{{ ovsdpdk_db_tag }}"
-ovsdpdk_vswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/ovsdpdk-vswitchd"
+ovsdpdk_vswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}ovsdpdk-vswitchd"
ovsdpdk_vswitchd_tag: "{{ ovsdpdk_tag }}"
ovsdpdk_vswitchd_image_full: "{{ ovsdpdk_vswitchd_image }}:{{ ovsdpdk_vswitchd_tag }}"
ovsdpdk_db_dimensions: "{{ default_container_dimensions }}"
diff --git a/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh b/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh
index ea8cd6e897..14223915e5 100755
--- a/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh
+++ b/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh
@@ -47,9 +47,9 @@ function generate_pciwhitelist {
for nic in $(list_dpdk_nics); do
address="$(get_value $nic address)"
if [ "$_Whitelist" == '' ]; then
- _Whitelist="-w $address"
+ _Whitelist="-a $address"
else
- _Whitelist="$_Whitelist -w $address"
+ _Whitelist="$_Whitelist -a $address"
fi
done
echo $_Whitelist
@@ -386,8 +386,8 @@ function usage {
ovs-dpdkctl.sh: A tool to configure ovs with dpdk.
- This tool automate the process of binding host insterfacesto a dpdk
- compaible driver (uio_pci_generic | vfio-pci) at boot.
-- This tool automate bootstraping ovs so that it can use the
+ compatible driver (uio_pci_generic | vfio-pci) at boot.
+- This tool automate bootstrapping ovs so that it can use the
dpdk accelerated netdev datapath.
commands:
@@ -403,14 +403,14 @@ commands:
- removes ovs-dpdkctl configuration file.
- bind_nics:
- iterates over all dpdk interfaces defined in ovs-dpdkctl config
- and binds the interface to the target driver specifed in the config
+ and binds the interface to the target driver specified in the config
if current driver does not equal target.
- unbind_nics:
- iterates over all dpdk interfaces defined in ovs-dpdkctl config
and restores the interface to its original non dpdk driver.
- init:
- - defines dpdk specific configuration paramater in the ovsdb.
- - creates bridges as spcified by ovs bridge_mappings in
+ - defines dpdk specific configuration parameter in the ovsdb.
+ - creates bridges as specified by ovs bridge_mappings in
ovs-dpdkctl config.
- creates dpdk ports as defined by ovs port_mappings in
ovs-dpdkctl config.
@@ -418,10 +418,10 @@ commands:
- prints this message
options:
- - debuging:
- - To enable debuging export OVS_DPDK_CTL_DEBUG=True
+ - debugging:
+ - To enable debugging export OVS_DPDK_CTL_DEBUG=True
- install:
- - The varibles described below can be defined to customise
+ - The variables described below can be defined to customise
installation of ovs-dpdkctl.
= ovs-dpdkctl.sh install
- bridge_mappings:
@@ -462,7 +462,7 @@ options:
- Example: ovs_mem_channels=2
- Default: "4"
- ovs_socket_mem:
- - A comma separated list of hugepage memory, specifed in MBs per numa node,
+ - A comma separated list of hugepage memory, specified in MBs per numa node,
allocated to the ovs-vswitchd to use for the dpdk dataplane.
- For best performance memory should be allocated evenly across all numa node
that will run a pmd.
@@ -482,7 +482,7 @@ options:
- The pci_whitelist allows multiple dpdk primary process to
utilise different pci devices without resulting in a conflict
of ownership.
- - Example: pci_whitelist="-w -w "
+ - Example: pci_whitelist="-a -a "
- Default: auto generated form port_mappings.
EOF
diff --git a/ansible/roles/ovs-dpdk/handlers/main.yml b/ansible/roles/ovs-dpdk/handlers/main.yml
index 1bd5761e55..7f014efbc5 100644
--- a/ansible/roles/ovs-dpdk/handlers/main.yml
+++ b/ansible/roles/ovs-dpdk/handlers/main.yml
@@ -4,22 +4,20 @@
service_name: "ovsdpdk-db"
service: "{{ ovsdpdk_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
notify:
- Waiting the ovs db service to be ready
- Ensuring ovsdpdk bridges are properly setup indexed
- Restart ovsdpdk-vswitchd container
- Ensuring ovsdpdk bridges are properly setup named
- wait for dpdk tunnel ip
- - ovs-dpdk gather facts
+ - OVS-DPDK gather facts
- name: Waiting the ovs db service to be ready
vars:
@@ -56,7 +54,7 @@
service_name: "ovsdpdk-vswitchd"
service: "{{ ovsdpdk_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -64,8 +62,6 @@
volumes: "{{ service.volumes }}"
privileged: "{{ service.privileged | default(True) }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Ensuring ovsdpdk bridges are properly setup named
vars:
diff --git a/ansible/roles/ovs-dpdk/tasks/check-containers.yml b/ansible/roles/ovs-dpdk/tasks/check-containers.yml
index 0e4f17887b..b7e2f7c29f 100644
--- a/ansible/roles/ovs-dpdk/tasks/check-containers.yml
+++ b/ansible/roles/ovs-dpdk/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check ovs containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ ovsdpdk_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/ovs-dpdk/tasks/config.yml b/ansible/roles/ovs-dpdk/tasks/config.yml
index f318b3abc4..2c563d75eb 100644
--- a/ansible/roles/ovs-dpdk/tasks/config.yml
+++ b/ansible/roles/ovs-dpdk/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ ovsdpdk_services }}"
+ with_dict: "{{ ovsdpdk_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
become: true
@@ -18,12 +15,7 @@
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - item.value.enabled | bool
- - item.value.host_in_groups | bool
- with_dict: "{{ ovsdpdk_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ ovsdpdk_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying ovs-dpdkctl tool
become: true
@@ -45,7 +37,7 @@
hugepage_mountpoint: "{{ ovs_hugepage_mountpoint }}"
ovs_physical_port_policy: "{{ ovs_physical_port_policy }}"
-- name: Binds the interface to the target driver specifed in the config
+- name: Binds the interface to the target driver specified in the config
become: True
command: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.sh bind_nics"
environment:
diff --git a/ansible/roles/ovs-dpdk/tasks/config_validate.yml b/ansible/roles/ovs-dpdk/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/ovs-dpdk/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/ovs-dpdk/vars/main.yml b/ansible/roles/ovs-dpdk/vars/main.yml
index fe77ecdd14..3fffe60c46 100644
--- a/ansible/roles/ovs-dpdk/vars/main.yml
+++ b/ansible/roles/ovs-dpdk/vars/main.yml
@@ -1,2 +1,4 @@
---
project_name: "ovs"
+
+kolla_role_name: "ovsdpdk"
diff --git a/ansible/roles/placement/defaults/main.yml b/ansible/roles/placement/defaults/main.yml
index a287454e62..d189408e88 100644
--- a/ansible/roles/placement/defaults/main.yml
+++ b/ansible/roles/placement/defaults/main.yml
@@ -20,10 +20,18 @@ placement_services:
enabled: "{{ enable_placement }}"
mode: "http"
external: true
- port: "{{ placement_api_port }}"
+ external_fqdn: "{{ placement_external_fqdn }}"
+ port: "{{ placement_api_public_port }}"
listen_port: "{{ placement_api_listen_port }}"
tls_backend: "{{ placement_enable_tls_backend }}"
+####################
+# Config Validate
+####################
+placement_config_validation:
+ - generator: "/placement/etc/placement/config-generator.conf"
+ config: "/etc/placement/placement.conf"
+
####################
# Database
####################
@@ -50,7 +58,7 @@ placement_database_shard:
####################
placement_tag: "{{ openstack_tag }}"
-placement_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/placement-api"
+placement_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}placement-api"
placement_api_tag: "{{ placement_tag }}"
placement_api_image_full: "{{ placement_api_image }}:{{ placement_api_tag }}"
@@ -74,16 +82,13 @@ placement_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/placement/placement:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/placement' if placement_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/placement:/dev-mode/placement' if placement_dev_mode | bool else '' }}"
placement_api_extra_volumes: "{{ default_extra_volumes }}"
####################
# OpenStack
####################
-placement_internal_endpoint: "{{ internal_protocol }}://{{ placement_internal_fqdn | put_address_in_context('url') }}:{{ placement_api_port }}"
-placement_public_endpoint: "{{ public_protocol }}://{{ placement_external_fqdn | put_address_in_context('url') }}:{{ placement_api_port }}"
-
placement_logging_debug: "{{ openstack_logging_debug }}"
openstack_placement_auth: "{{ openstack_auth }}"
@@ -137,3 +142,5 @@ placement_ks_users:
# TLS
####################
placement_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+placement_copy_certs: "{{ kolla_copy_ca_into_containers | bool or placement_enable_tls_backend | bool }}"
diff --git a/ansible/roles/placement/handlers/main.yml b/ansible/roles/placement/handlers/main.yml
index a3d0ad103f..f8ae2e037a 100644
--- a/ansible/roles/placement/handlers/main.yml
+++ b/ansible/roles/placement/handlers/main.yml
@@ -4,7 +4,7 @@
vars:
service_name: "placement-api"
service: "{{ placement_services[service_name] }}"
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,5 +12,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/placement/tasks/bootstrap.yml b/ansible/roles/placement/tasks/bootstrap.yml
index 62b4317a92..43a7872725 100644
--- a/ansible/roles/placement/tasks/bootstrap.yml
+++ b/ansible/roles/placement/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating placement databases
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating placement databases user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/placement/tasks/bootstrap_service.yml b/ansible/roles/placement/tasks/bootstrap_service.yml
index 6669701b8c..9a9ded8e0f 100644
--- a/ansible/roles/placement/tasks/bootstrap_service.yml
+++ b/ansible/roles/placement/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
placement_api: "{{ placement_services['placement-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_placement"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ placement_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[placement_api.group][0] }}"
diff --git a/ansible/roles/placement/tasks/check-containers.yml b/ansible/roles/placement/tasks/check-containers.yml
index be5e92f9de..b7e2f7c29f 100644
--- a/ansible/roles/placement/tasks/check-containers.yml
+++ b/ansible/roles/placement/tasks/check-containers.yml
@@ -1,21 +1,3 @@
---
-- name: Check placement containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- environment: "{{ item.value.environment | default(omit) }}"
- pid_mode: "{{ item.value.pid_mode | default('') }}"
- ipc_mode: "{{ item.value.ipc_mode | default(omit) }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ placement_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/placement/tasks/config.yml b/ansible/roles/placement/tasks/config.yml
index 86ad2dc82a..00bf54ea00 100644
--- a/ansible/roles/placement/tasks/config.yml
+++ b/ansible/roles/placement/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ placement_services }}"
+ with_dict: "{{ placement_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -33,7 +30,7 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool or placement_enable_tls_backend | bool
+ - placement_copy_certs
- name: Copying over config.json files for services
become: true
@@ -41,12 +38,7 @@
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ placement_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ placement_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over placement.conf
become: true
@@ -61,12 +53,7 @@
- "{{ node_custom_config }}/placement/{{ inventory_hostname }}/placement.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/placement.conf"
mode: "0660"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ placement_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ placement_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over placement-api wsgi configuration
become: true
@@ -76,15 +63,11 @@
src: "{{ item }}"
dest: "{{ node_config_directory }}/placement-api/placement-api-wsgi.conf"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/placement/{{ inventory_hostname }}/placement-api-wsgi.conf"
- "{{ node_custom_config }}/placement/placement-api-wsgi.conf"
- "placement-api-wsgi.conf.j2"
- notify:
- - Restart placement-api container
- name: Copying over migrate-db.rc.j2 configuration
become: true
@@ -94,11 +77,7 @@
src: "migrate-db.rc.j2"
dest: "{{ node_config_directory }}/placement-api/migrate-db.rc"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart placement-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over existing policy file
become: true
@@ -107,9 +86,5 @@
dest: "{{ node_config_directory }}/{{ item.key }}/{{ placement_policy_file }}"
mode: "0660"
when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- placement_policy_file is defined
- with_dict: "{{ placement_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ placement_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/placement/tasks/config_validate.yml b/ansible/roles/placement/tasks/config_validate.yml
new file mode 100644
index 0000000000..9411eb44a4
--- /dev/null
+++ b/ansible/roles/placement/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ placement_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ placement_config_validation }}"
diff --git a/ansible/roles/placement/tasks/precheck.yml b/ansible/roles/placement/tasks/precheck.yml
index 97381b93f8..02ff052fb4 100644
--- a/ansible/roles/placement/tasks/precheck.yml
+++ b/ansible/roles/placement/tasks/precheck.yml
@@ -8,13 +8,16 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- placement_api
+ check_mode: false
register: container_facts
- name: Checking free port for Placement API
vars:
- placement_api: "{{ placement_services['placement-api'] }}"
+ service: "{{ placement_services['placement-api'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ placement_api_listen_port }}"
@@ -23,5 +26,4 @@
state: stopped
when:
- container_facts['placement_api'] is not defined
- - inventory_hostname in groups[placement_api.group]
- - placement_api.enabled | bool
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/placement/tasks/upgrade.yml b/ansible/roles/placement/tasks/upgrade.yml
index 7e98e01270..8853cd9f13 100644
--- a/ansible/roles/placement/tasks/upgrade.yml
+++ b/ansible/roles/placement/tasks/upgrade.yml
@@ -17,7 +17,7 @@
vars:
placement_api: "{{ placement_services['placement-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -28,7 +28,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_placement"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ placement_api.volumes }}"
run_once: True
delegate_to: "{{ groups[placement_api.group][0] }}"
diff --git a/ansible/roles/placement/templates/placement-api.json.j2 b/ansible/roles/placement/templates/placement-api.json.j2
index e489cec5af..d2f91f731a 100644
--- a/ansible/roles/placement/templates/placement-api.json.j2
+++ b/ansible/roles/placement/templates/placement-api.json.j2
@@ -38,6 +38,12 @@
"dest": "/etc/placement/certs/placement-key.pem",
"owner": "placement",
"perm": "0600"
+ }{% endif %}{% if placement_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/placement/templates/placement.conf.j2 b/ansible/roles/placement/templates/placement.conf.j2
index 429fd157ea..ba13cc1450 100644
--- a/ansible/roles/placement/templates/placement.conf.j2
+++ b/ansible/roles/placement/templates/placement.conf.j2
@@ -37,7 +37,7 @@ password = {{ placement_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
diff --git a/ansible/roles/prechecks/tasks/host_os_checks.yml b/ansible/roles/prechecks/tasks/host_os_checks.yml
index 12dc80e5dc..6a1ae3fa11 100644
--- a/ansible/roles/prechecks/tasks/host_os_checks.yml
+++ b/ansible/roles/prechecks/tasks/host_os_checks.yml
@@ -1,28 +1,29 @@
---
- name: Checking host OS distribution
- fail:
- msg: >-
+ assert:
+ that: ansible_facts.distribution in host_os_distributions
+ fail_msg: >-
Host OS distribution {{ ansible_facts.distribution }} is not supported.
Supported distributions are: {{ host_os_distributions.keys() | join(', ') }}
- when: ansible_facts.distribution not in host_os_distributions
- name: Checking host OS release or version
- fail:
- msg: >-
+ assert:
+ that:
+ - ansible_facts.distribution_release in host_os_distributions[ansible_facts.distribution] or
+ ansible_facts.distribution_version in host_os_distributions[ansible_facts.distribution] or
+ ansible_facts.distribution_major_version in host_os_distributions[ansible_facts.distribution]
+ fail_msg: >-
{{ ansible_facts.distribution }} release {{ ansible_facts.distribution_release }}
version {{ ansible_facts.distribution_version }} is not supported.
Supported releases are:
{{ host_os_distributions[ansible_facts.distribution] | join(', ') }}
- when:
- - ansible_facts.distribution_release not in host_os_distributions[ansible_facts.distribution]
- - ansible_facts.distribution_version not in host_os_distributions[ansible_facts.distribution]
- - ansible_facts.distribution_major_version not in host_os_distributions[ansible_facts.distribution]
- name: Checking if CentOS is Stream
become: true
command: grep -q Stream /etc/os-release
register: stream_status
changed_when: false
+ check_mode: false
when:
- ansible_facts.distribution == 'CentOS'
diff --git a/ansible/roles/prechecks/tasks/inventory_checks.yml b/ansible/roles/prechecks/tasks/inventory_checks.yml
index f8688f966f..29d917f5c5 100644
--- a/ansible/roles/prechecks/tasks/inventory_checks.yml
+++ b/ansible/roles/prechecks/tasks/inventory_checks.yml
@@ -1,11 +1,12 @@
---
-- name: Fail if group loadbalancer not exists or it is empty
- fail:
- msg: >-
+- name: Checking loadbalancer group
+ assert:
+ that:
+ - groups['loadbalancer'] is defined
+ - groups['loadbalancer'] | length > 0
+ fail_msg: >-
Inventory's group loadbalancer does not exist or it is empty.
Please update inventory, as haproxy group was renamed
to loadbalancer in the Xena release.
when:
- enable_loadbalancer | bool
- - groups['loadbalancer'] is not defined or
- groups['loadbalancer'] | length < 1
diff --git a/ansible/roles/prechecks/tasks/package_checks.yml b/ansible/roles/prechecks/tasks/package_checks.yml
index c86b40aef4..20def920f3 100644
--- a/ansible/roles/prechecks/tasks/package_checks.yml
+++ b/ansible/roles/prechecks/tasks/package_checks.yml
@@ -3,18 +3,30 @@
command: "{{ ansible_facts.python.executable }} -c \"import docker; print(docker.__version__)\""
register: result
changed_when: false
- when: inventory_hostname in groups['baremetal']
+ check_mode: false
+ when:
+ - inventory_hostname in groups['baremetal']
+ - kolla_container_engine == 'docker'
failed_when: result is failed or result.stdout is version(docker_py_version_min, '<')
+- name: Checking dbus-python package
+ command: "{{ ansible_facts.python.executable }} -c \"import dbus\""
+ register: dbus_present
+ changed_when: false
+ when: inventory_hostname in groups['baremetal']
+ failed_when: dbus_present is failed
+
# NOTE(osmanlicilegi): ansible_version.full includes patch number that's useless
# to check. as ansible_version does not provide major.minor in dict, we need to
# set it as variable.
- name: Checking Ansible version
- vars:
- ansible_version_host: "{{ ansible_version.major }}.{{ ansible_version.minor }}"
- fail:
- msg: >-
+ assert:
+ that:
+ - ansible_version_host is version(ansible_version_min, '>=')
+ - ansible_version_host is version(ansible_version_max, '<=')
+ fail_msg: >-
Ansible version should be between {{ ansible_version_min }} and {{ ansible_version_max }}.
Current version is {{ ansible_version.full }} which is not supported.
+ vars:
+ ansible_version_host: "{{ ansible_version.major }}.{{ ansible_version.minor }}"
run_once: true
- when: ansible_version_host is version(ansible_version_min, '<') or ansible_version_host is version(ansible_version_max, '>')
diff --git a/ansible/roles/prechecks/tasks/port_checks.yml b/ansible/roles/prechecks/tasks/port_checks.yml
index ea3e7d72f7..576baa6d2d 100644
--- a/ansible/roles/prechecks/tasks/port_checks.yml
+++ b/ansible/roles/prechecks/tasks/port_checks.yml
@@ -5,7 +5,7 @@
- name: Checking the api_interface is active
fail: "msg='Please check the api_interface settings - interface {{ api_interface }} is not active'"
- when: not hostvars[inventory_hostname].ansible_facts[api_interface]['active']
+ when: not hostvars[inventory_hostname].ansible_facts[api_interface | replace('-', '_')]['active']
# kolla_address handles relevant address check
- name: Checking the api_interface ip address configuration
diff --git a/ansible/roles/prechecks/tasks/service_checks.yml b/ansible/roles/prechecks/tasks/service_checks.yml
index cbab1f6aab..9cdc3fc3a4 100644
--- a/ansible/roles/prechecks/tasks/service_checks.yml
+++ b/ansible/roles/prechecks/tasks/service_checks.yml
@@ -1,10 +1,20 @@
---
+- name: Checking if system uses systemd
+ become: true
+ assert:
+ that:
+ - "ansible_facts.service_mgr == 'systemd'"
+ when: inventory_hostname in groups['baremetal']
+
- name: Checking Docker version
become: true
command: "{{ kolla_container_engine }} --version"
register: result
changed_when: false
- when: inventory_hostname in groups['baremetal']
+ check_mode: false
+ when:
+ - kolla_container_engine == 'docker'
+ - inventory_hostname in groups['baremetal']
failed_when: result is failed
or result.stdout | regex_replace('.*\\b(\\d+\\.\\d+\\.\\d+)\\b.*', '\\1') is version(docker_version_min, '<')
@@ -19,12 +29,14 @@
register: result
changed_when: false
failed_when: result.stdout | regex_replace('(.*ssh_key.*)', '') is search(":")
+ check_mode: false
- name: Check if nscd is running
command: pgrep nscd
ignore_errors: yes
failed_when: false
changed_when: false
+ check_mode: false
register: nscd_status
- name: Fail if nscd is running
@@ -51,3 +63,10 @@
msg: "We are sorry but enable_ceph is no longer supported. Please use external ceph support."
when:
- (enable_ceph | default()) | bool
+
+- name: Validate rabbitmq variables
+ run_once: True
+ fail:
+ msg: |
+ Please set only one of om_enable_rabbitmq_high_availability or om_enable_rabbitmq_quorum_queues
+ when: (om_enable_rabbitmq_high_availability | bool) == (om_enable_rabbitmq_quorum_queues | bool)
diff --git a/ansible/roles/prechecks/tasks/timesync_checks.yml b/ansible/roles/prechecks/tasks/timesync_checks.yml
index d676a69785..c6a9bd2a07 100644
--- a/ansible/roles/prechecks/tasks/timesync_checks.yml
+++ b/ansible/roles/prechecks/tasks/timesync_checks.yml
@@ -14,6 +14,7 @@
register: systemctl_is_active
changed_when: false
failed_when: false
+ check_mode: false
- name: Fail if a host NTP daemon is not running
fail:
@@ -31,6 +32,7 @@
command: timedatectl status
register: timedatectl_status
changed_when: false
+ check_mode: false
- name: Fail if the clock is not synchronized
fail:
diff --git a/ansible/roles/prechecks/tasks/user_checks.yml b/ansible/roles/prechecks/tasks/user_checks.yml
index 82f273e57e..94fd5b4668 100644
--- a/ansible/roles/prechecks/tasks/user_checks.yml
+++ b/ansible/roles/prechecks/tasks/user_checks.yml
@@ -17,3 +17,4 @@
register: result
failed_when: result is failed
changed_when: False
+ check_mode: false
diff --git a/ansible/roles/prechecks/vars/main.yml b/ansible/roles/prechecks/vars/main.yml
index f67ab08b87..7e253b9cfa 100644
--- a/ansible/roles/prechecks/vars/main.yml
+++ b/ansible/roles/prechecks/vars/main.yml
@@ -1,8 +1,8 @@
---
docker_version_min: '18.09'
docker_py_version_min: '3.4.1'
-ansible_version_min: '2.12'
-ansible_version_max: '2.13'
+ansible_version_min: '2.16'
+ansible_version_max: '2.17'
# Top level keys should match ansible_facts.distribution.
# These map to lists of supported releases (ansible_facts.distribution_release) or
@@ -12,11 +12,9 @@ host_os_distributions:
CentOS:
- "9"
Debian:
- - "bullseye"
- openEuler:
- - "22.03"
+ - "bookworm"
Rocky:
- "9"
Ubuntu:
- - "focal"
- "jammy"
+ - "noble"
diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml
index 10745bd99c..71168fb326 100644
--- a/ansible/roles/prometheus/defaults/main.yml
+++ b/ansible/roles/prometheus/defaults/main.yml
@@ -14,6 +14,14 @@ prometheus_services:
external: false
port: "{{ prometheus_port }}"
active_passive: "{{ prometheus_active_passive | bool }}"
+ prometheus_server_external:
+ enabled: "{{ enable_prometheus_server_external | bool }}"
+ mode: "http"
+ external: true
+ external_fqdn: "{{ prometheus_external_fqdn }}"
+ port: "{{ prometheus_public_port }}"
+ listen_port: "{{ prometheus_listen_port }}"
+ active_passive: "{{ prometheus_active_passive | bool }}"
prometheus-node-exporter:
container_name: prometheus_node_exporter
group: prometheus-node-exporter
@@ -29,13 +37,6 @@ prometheus_services:
image: "{{ prometheus_mysqld_exporter_image_full }}"
volumes: "{{ prometheus_mysqld_exporter_default_volumes + prometheus_mysqld_exporter_extra_volumes }}"
dimensions: "{{ prometheus_mysqld_exporter_dimensions }}"
- prometheus-haproxy-exporter:
- container_name: prometheus_haproxy_exporter
- group: prometheus-haproxy-exporter
- enabled: "{{ enable_prometheus_haproxy_exporter | bool }}"
- image: "{{ prometheus_haproxy_exporter_image_full }}"
- volumes: "{{ prometheus_haproxy_exporter_default_volumes + prometheus_haproxy_exporter_extra_volumes }}"
- dimensions: "{{ prometheus_haproxy_exporter_dimensions }}"
prometheus-memcached-exporter:
container_name: prometheus_memcached_exporter
group: prometheus-memcached-exporter
@@ -70,7 +71,9 @@ prometheus_services:
enabled: "{{ enable_prometheus_alertmanager_external | bool }}"
mode: "http"
external: true
- port: "{{ prometheus_alertmanager_port }}"
+ external_fqdn: "{{ prometheus_alertmanager_external_fqdn }}"
+ port: "{{ prometheus_alertmanager_public_port }}"
+ listen_port: "{{ prometheus_alertmanager_listen_port }}"
auth_user: "{{ prometheus_alertmanager_user }}"
auth_pass: "{{ prometheus_alertmanager_password }}"
active_passive: "{{ prometheus_alertmanager_active_passive | bool }}"
@@ -89,11 +92,15 @@ prometheus_services:
mode: "http"
external: false
port: "{{ prometheus_openstack_exporter_port }}"
+ backend_http_extra:
+ - "timeout server {{ prometheus_openstack_exporter_timeout }}"
prometheus_openstack_exporter_external:
enabled: "{{ enable_prometheus_openstack_exporter_external | bool }}"
mode: "http"
external: true
port: "{{ prometheus_openstack_exporter_port }}"
+ backend_http_extra:
+ - "timeout server {{ prometheus_openstack_exporter_timeout }}"
prometheus-elasticsearch-exporter:
container_name: prometheus_elasticsearch_exporter
group: prometheus-elasticsearch-exporter
@@ -115,14 +122,6 @@ prometheus_services:
image: "{{ prometheus_libvirt_exporter_image_full }}"
volumes: "{{ prometheus_libvirt_exporter_default_volumes + prometheus_libvirt_exporter_extra_volumes }}"
dimensions: "{{ prometheus_libvirt_exporter_dimensions }}"
- prometheus-msteams:
- container_name: "prometheus_msteams"
- group: "prometheus-msteams"
- enabled: "{{ enable_prometheus_msteams | bool }}"
- environment: "{{ prometheus_msteams_container_proxy }}"
- image: "{{ prometheus_msteams_image_full }}"
- volumes: "{{ prometheus_msteams_default_volumes + prometheus_msteams_extra_volumes }}"
- dimensions: "{{ prometheus_msteams_dimensions }}"
####################
# Prometheus Server
@@ -130,6 +129,29 @@ prometheus_services:
prometheus_external_labels:
# :
+####################
+# Server
+####################
+enable_prometheus_server_external: false
+
+####################
+# Basic Auth
+####################
+prometheus_basic_auth_users: "{{ prometheus_basic_auth_users_default + prometheus_basic_auth_users_extra }}"
+
+prometheus_basic_auth_users_default:
+ - username: admin
+ password: "{{ prometheus_password }}"
+ enabled: true
+ - username: "{{ prometheus_grafana_user }}"
+ password: "{{ prometheus_grafana_password }}"
+ enabled: "{{ enable_grafana }}"
+ - username: "{{ prometheus_skyline_user }}"
+ password: "{{ prometheus_skyline_password }}"
+ enabled: "{{ enable_skyline }}"
+
+prometheus_basic_auth_users_extra: []
+
####################
# Database
####################
@@ -141,6 +163,11 @@ prometheus_mysql_exporter_database_user: "{% if use_preconfigured_databases | bo
prometheus_active_passive: true
prometheus_alertmanager_active_passive: true
+####################
+# Node Exporter
+####################
+prometheus_node_exporter_targets_extra: []
+
####################
# Blackbox
####################
@@ -148,68 +175,211 @@ prometheus_alertmanager_active_passive: true
# A list of endpoints to monitor. Each target is in the format:
# 'service_name:blackbox_exporter_module:endpoint' for example:
#
-# prometheus_blackbox_exporter_targets:
-# - 'glance:os_endpoint:{{ external_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ glance_api_port}}'
+# prometheus_blackbox_exporter_endpoints_custom:
+# - 'custom_service:http_2xx:{{ public_protocol }}://{{ external_fqdn | put_address_in_context('url') }}:{{ custom_serivce_port }}'
#
+# For a list of default endpoints see
+# prometheus_blackbox_exporter_endpoints_default.
# For a list of modules see the alertmanager config.
-prometheus_blackbox_exporter_endpoints: []
+prometheus_blackbox_exporter_endpoints: "{{ prometheus_blackbox_exporter_endpoints_default | selectattr('enabled', 'true') | map(attribute='endpoints') | flatten | union(prometheus_blackbox_exporter_endpoints_custom) | unique | select | list }}"
+
+prometheus_blackbox_exporter_endpoints_default:
+ # OpenStack endpoints
+ - endpoints:
+ - "aodh:os_endpoint:{{ aodh_public_endpoint }}"
+ - "{{ ('aodh_internal:os_endpoint:' + aodh_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_aodh | bool }}"
+ - endpoints:
+ - "barbican:os_endpoint:{{ barbican_public_endpoint }}"
+ - "{{ ('barbican_internal:os_endpoint:' + barbican_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_barbican | bool }}"
+ - endpoints:
+ - "blazar:os_endpoint:{{ blazar_public_base_endpoint }}"
+ - "{{ ('blazar_internal:os_endpoint:' + blazar_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_blazar | bool }}"
+ - endpoints:
+ - "ceph_rgw:http_2xx:{{ ceph_rgw_public_base_endpoint }}"
+ - "{{ ('ceph_rgw_internal:http_2xx:' + ceph_rgw_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_ceph_rgw | bool }}"
+ - endpoints:
+ - "cinder:os_endpoint:{{ cinder_public_base_endpoint }}"
+ - "{{ ('cinder_internal:os_endpoint:' + cinder_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_cinder | bool }}"
+ - endpoints:
+ - "cloudkitty:os_endpoint:{{ cloudkitty_public_endpoint }}"
+ - "{{ ('cloudkitty_internal:os_endpoint:' + cloudkitty_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_cloudkitty | bool }}"
+ - endpoints:
+ - "designate:os_endpoint:{{ designate_public_endpoint }}"
+ - "{{ ('designate_internal:os_endpoint:' + designate_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_designate | bool }}"
+ - endpoints:
+ - "glance:os_endpoint:{{ glance_public_endpoint }}"
+ - "{{ ('glance_internal:os_endpoint:' + glance_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_glance | bool }}"
+ - endpoints:
+ - "gnocchi:os_endpoint:{{ gnocchi_public_endpoint }}"
+ - "{{ ('gnocchi_internal:os_endpoint:' + gnocchi_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_gnocchi | bool }}"
+ - endpoints:
+ - "heat:os_endpoint:{{ heat_public_base_endpoint }}"
+ - "{{ ('heat_internal:os_endpoint:' + heat_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ - "heat_cfn:os_endpoint:{{ heat_cfn_public_base_endpoint }}"
+ - "{{ ('heat_cfn_internal:os_endpoint:' + heat_cfn_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_heat | bool }}"
+ - endpoints:
+ - "horizon:http_2xx:{{ horizon_public_endpoint }}"
+ - "{{ ('horizon_internal:http_2xx:' + horizon_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_horizon | bool }}"
+ - endpoints:
+ - "ironic:os_endpoint:{{ ironic_public_endpoint }}"
+ - "{{ ('ironic_internal:os_endpoint:' + ironic_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ - "ironic_inspector:os_endpoint:{{ ironic_inspector_public_endpoint }}"
+ - "{{ ('ironic_inspector_internal:os_endpoint:' + ironic_inspector_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_ironic | bool }}"
+ - endpoints:
+ - "keystone:os_endpoint:{{ keystone_public_url }}"
+ - "{{ ('keystone_internal:os_endpoint:' + keystone_internal_url) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_keystone | bool }}"
+ - endpoints:
+ - "magnum:os_endpoint:{{ magnum_public_base_endpoint }}"
+ - "{{ ('magnum_internal:os_endpoint:' + magnum_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_magnum | bool }}"
+ - endpoints:
+ - "manila:os_endpoint:{{ manila_public_base_endpoint }}"
+ - "{{ ('manila_internal:os_endpoint:' + manila_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_manila | bool }}"
+ - endpoints:
+ - "masakari:os_endpoint:{{ masakari_public_endpoint }}"
+ - "{{ ('masakari_internal:os_endpoint:' + masakari_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_masakari | bool }}"
+ - endpoints:
+ - "mistral:os_endpoint:{{ mistral_public_base_endpoint }}"
+ - "{{ ('mistral_internal:os_endpoint:' + mistral_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_mistral | bool }}"
+ - endpoints:
+ - "neutron:os_endpoint:{{ neutron_public_endpoint }}"
+ - "{{ ('neutron_internal:os_endpoint:' + neutron_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_neutron | bool }}"
+ - endpoints:
+ - "nova:os_endpoint:{{ nova_public_base_endpoint }}"
+ - "{{ ('nova_internal:os_endpoint:' + nova_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_nova | bool }}"
+ - endpoints:
+ - "octavia:os_endpoint:{{ octavia_public_endpoint }}"
+ - "{{ ('octavia_internal:os_endpoint:' + octavia_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_octavia | bool }}"
+ - endpoints:
+ - "placement:os_endpoint:{{ placement_public_endpoint }}"
+ - "{{ ('placement_internal:os_endpoint:' + placement_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_placement | bool }}"
+ - endpoints:
+ - "skyline_apiserver:os_endpoint:{{ skyline_apiserver_public_endpoint }}"
+ - "{{ ('skyline_apiserver_internal:os_endpoint:' + skyline_apiserver_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ - "skyline_console:os_endpoint:{{ skyline_console_public_endpoint }}"
+ - "{{ ('skyline_console_internal:os_endpoint:' + skyline_console_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_skyline | bool }}"
+ - endpoints:
+ - "swift:os_endpoint:{{ swift_public_base_endpoint }}"
+ - "{{ ('swift_internal:os_endpoint:' + swift_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_swift | bool }}"
+ - endpoints:
+ - "tacker:os_endpoint:{{ tacker_public_endpoint }}"
+ - "{{ ('tacker_internal:os_endpoint:' + tacker_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_tacker | bool }}"
+ - endpoints:
+ - "trove:os_endpoint:{{ trove_public_base_endpoint }}"
+ - "{{ ('trove_internal:os_endpoint:' + trove_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_trove | bool }}"
+ - endpoints:
+ - "venus:os_endpoint:{{ venus_public_endpoint }}"
+ - "{{ ('venus_internal:os_endpoint:' + venus_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_venus | bool }}"
+ - endpoints:
+ - "watcher:os_endpoint:{{ watcher_public_endpoint }}"
+ - "{{ ('watcher_internal:os_endpoint:' + watcher_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_watcher | bool }}"
+ - endpoints:
+ - "zun:os_endpoint:{{ zun_public_base_endpoint }}"
+ - "{{ ('zun_internal:os_endpoint:' + zun_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_zun | bool }}"
+ # Additional service endpoints
+ - endpoints: "{% set etcd_endpoints = [] %}{% for host in groups.get('etcd', []) %}{{ etcd_endpoints.append('etcd_' + host + ':http_2xx:' + hostvars[host]['etcd_protocol'] + '://' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['etcd_client_port'] + '/metrics')}}{% endfor %}{{ etcd_endpoints }}"
+ enabled: "{{ enable_etcd | bool }}"
+ - endpoints:
+ - "grafana:http_2xx:{{ grafana_public_endpoint }}"
+ - "{{ ('grafana_internal:http_2xx:' + grafana_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
+ enabled: "{{ enable_grafana | bool }}"
+ - endpoints:
+ - "opensearch:http_2xx:{{ opensearch_internal_endpoint }}"
+ enabled: "{{ enable_opensearch | bool }}"
+ - endpoints:
+ - "opensearch_dashboards:http_2xx_opensearch_dashboards:{{ opensearch_dashboards_internal_endpoint }}/api/status"
+ enabled: "{{ enable_opensearch_dashboards | bool }}"
+ - endpoints:
+ - "opensearch_dashboards_external:http_2xx_opensearch_dashboards:{{ opensearch_dashboards_external_endpoint }}/api/status"
+ enabled: "{{ enable_opensearch_dashboards_external | bool }}"
+ - endpoints:
+ - "prometheus:http_2xx_prometheus:{{ prometheus_public_endpoint if enable_prometheus_server_external else prometheus_internal_endpoint }}/-/healthy"
+ enabled: "{{ enable_prometheus | bool }}"
+ - endpoints:
+ - "prometheus_alertmanager:http_2xx_alertmanager:{{ prometheus_alertmanager_public_endpoint if enable_prometheus_alertmanager_external else prometheus_alertmanager_internal_endpoint }}"
+ enabled: "{{ enable_prometheus_alertmanager | bool }}"
+ - endpoints: "{% set rabbitmq_endpoints = [] %}{% for host in groups.get('rabbitmq', []) %}{{ rabbitmq_endpoints.append('rabbitmq_' + host + (':tls_connect:' if rabbitmq_enable_tls else ':tcp_connect:') + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['rabbitmq_port'] ) }}{% endfor %}{{ rabbitmq_endpoints }}"
+ enabled: "{{ enable_rabbitmq | bool }}"
+ - endpoints: "{% set redis_endpoints = [] %}{% for host in groups.get('redis', []) %}{{ redis_endpoints.append('redis_' + host + ':tcp_connect:' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['redis_port']) }}{% endfor %}{{ redis_endpoints }}"
+ enabled: "{{ enable_redis | bool }}"
+
+prometheus_blackbox_exporter_endpoints_custom: []
+
####################
# Docker
####################
prometheus_tag: "{{ openstack_tag }}"
-prometheus_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-v2-server"
+prometheus_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-v2-server"
prometheus_server_tag: "{{ prometheus_tag }}"
prometheus_server_image_full: "{{ prometheus_server_image }}:{{ prometheus_server_tag }}"
-prometheus_haproxy_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-haproxy-exporter"
-prometheus_haproxy_exporter_tag: "{{ prometheus_tag }}"
-prometheus_haproxy_exporter_image_full: "{{ prometheus_haproxy_exporter_image }}:{{ prometheus_haproxy_exporter_tag }}"
-
-prometheus_mysqld_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-mysqld-exporter"
+prometheus_mysqld_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-mysqld-exporter"
prometheus_mysqld_exporter_tag: "{{ prometheus_tag }}"
prometheus_mysqld_exporter_image_full: "{{ prometheus_mysqld_exporter_image }}:{{ prometheus_mysqld_exporter_tag }}"
-prometheus_node_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-node-exporter"
+prometheus_node_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-node-exporter"
prometheus_node_exporter_tag: "{{ prometheus_tag }}"
prometheus_node_exporter_image_full: "{{ prometheus_node_exporter_image }}:{{ prometheus_node_exporter_tag }}"
-prometheus_memcached_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-memcached-exporter"
+prometheus_memcached_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-memcached-exporter"
prometheus_memcached_exporter_tag: "{{ prometheus_tag }}"
prometheus_memcached_exporter_image_full: "{{ prometheus_memcached_exporter_image }}:{{ prometheus_memcached_exporter_tag }}"
-prometheus_cadvisor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-cadvisor"
+prometheus_cadvisor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-cadvisor"
prometheus_cadvisor_tag: "{{ prometheus_tag }}"
prometheus_cadvisor_image_full: "{{ prometheus_cadvisor_image }}:{{ prometheus_cadvisor_tag }}"
-prometheus_alertmanager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-alertmanager"
+prometheus_alertmanager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-alertmanager"
prometheus_alertmanager_tag: "{{ prometheus_tag }}"
prometheus_alertmanager_image_full: "{{ prometheus_alertmanager_image }}:{{ prometheus_alertmanager_tag }}"
# Prometheus openstack_exporter
-prometheus_openstack_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-openstack-exporter"
+prometheus_openstack_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-openstack-exporter"
prometheus_openstack_exporter_tag: "{{ prometheus_tag }}"
prometheus_openstack_exporter_image_full: "{{ prometheus_openstack_exporter_image }}:{{ prometheus_openstack_exporter_tag }}"
-prometheus_elasticsearch_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-elasticsearch-exporter"
+prometheus_elasticsearch_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-elasticsearch-exporter"
prometheus_elasticsearch_exporter_tag: "{{ prometheus_tag }}"
prometheus_elasticsearch_exporter_image_full: "{{ prometheus_elasticsearch_exporter_image }}:{{ prometheus_elasticsearch_exporter_tag }}"
-prometheus_blackbox_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-blackbox-exporter"
+prometheus_blackbox_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-blackbox-exporter"
prometheus_blackbox_exporter_tag: "{{ prometheus_tag }}"
prometheus_blackbox_exporter_image_full: "{{ prometheus_blackbox_exporter_image }}:{{ prometheus_blackbox_exporter_tag }}"
-prometheus_libvirt_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-libvirt-exporter"
+prometheus_libvirt_exporter_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}prometheus-libvirt-exporter"
prometheus_libvirt_exporter_tag: "{{ prometheus_tag }}"
prometheus_libvirt_exporter_image_full: "{{ prometheus_libvirt_exporter_image }}:{{ prometheus_libvirt_exporter_tag }}"
-prometheus_msteams_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/prometheus-msteams"
-prometheus_msteams_tag: "{{ prometheus_tag }}"
-prometheus_msteams_image_full: "{{ prometheus_msteams_image }}:{{ prometheus_msteams_tag }}"
-
prometheus_server_dimensions: "{{ default_container_dimensions }}"
-prometheus_haproxy_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_mysqld_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_node_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_memcached_exporter_dimensions: "{{ default_container_dimensions }}"
@@ -219,7 +389,6 @@ prometheus_openstack_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_elasticsearch_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_blackbox_exporter_dimensions: "{{ default_container_dimensions }}"
prometheus_libvirt_exporter_dimensions: "{{ default_container_dimensions }}"
-prometheus_msteams_dimensions: "{{ default_container_dimensions }}"
prometheus_server_default_volumes:
- "{{ node_config_directory }}/prometheus-server/:{{ container_config_directory }}/:ro"
@@ -227,12 +396,6 @@ prometheus_server_default_volumes:
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "prometheus_v2:/var/lib/prometheus"
- "kolla_logs:/var/log/kolla/"
-prometheus_haproxy_exporter_default_volumes:
- - "{{ node_config_directory }}/prometheus-haproxy-exporter/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "haproxy_socket:/var/lib/kolla/haproxy"
prometheus_mysqld_exporter_default_volumes:
- "{{ node_config_directory }}/prometheus-mysqld-exporter/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -285,15 +448,9 @@ prometheus_libvirt_exporter_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "/run/libvirt:/run/libvirt:ro"
-prometheus_msteams_default_volumes:
- - "{{ node_config_directory }}/prometheus-msteams/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
prometheus_extra_volumes: "{{ default_extra_volumes }}"
prometheus_server_extra_volumes: "{{ prometheus_extra_volumes }}"
-prometheus_haproxy_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_mysqld_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_node_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_memcached_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
@@ -303,9 +460,6 @@ prometheus_openstack_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_elasticsearch_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_blackbox_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
prometheus_libvirt_exporter_extra_volumes: "{{ prometheus_extra_volumes }}"
-prometheus_msteams_extra_volumes: "{{ prometheus_extra_volumes }}"
-
-prometheus_msteams_container_proxy: "{{ container_proxy }}"
prometheus_openstack_exporter_disabled_volume: "{{ '--disable-service.volume' if not enable_cinder | bool else '' }}"
prometheus_openstack_exporter_disabled_dns: "{{ '--disable-service.dns' if not enable_designate | bool else '' }}"
@@ -313,10 +467,15 @@ prometheus_openstack_exporter_disabled_object: "{{ '--disable-service.object-sto
prometheus_openstack_exporter_disabled_lb: "{{ '--disable-service.load-balancer --disable-metric=neutron-loadbalancers --disable-metric=neutron-loadbalancers_not_active' if not enable_octavia | bool else '' }}"
prometheus_openstack_exporter_disabled_items: "{{ [prometheus_openstack_exporter_disabled_volume, prometheus_openstack_exporter_disabled_dns, prometheus_openstack_exporter_disabled_object, prometheus_openstack_exporter_disabled_lb | trim] | join(' ') | trim }}"
+prometheus_server_command: >-
+ /opt/prometheus/prometheus --web.config.file=/etc/prometheus/web.yml --config.file /etc/prometheus/prometheus.yml
+ --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_port }}
+ --web.external-url={{ prometheus_public_endpoint if enable_prometheus_server_external else prometheus_internal_endpoint }}
+ --storage.tsdb.path /var/lib/prometheus{% if prometheus_cmdline_extras %} {{ prometheus_cmdline_extras }}{% endif %}
+
prometheus_blackbox_exporter_cmdline_extras: ""
-prometheus_cadvisor_cmdline_extras: "--docker_only --store_container_labels=false --disable_metrics=percpu,referenced_memory,cpu_topology,resctrl,udp,advtcp,sched,hugetlb,memory_numa,tcp,process"
+prometheus_cadvisor_cmdline_extras: "--docker_only --store_container_labels=false --disable_metrics=percpu,referenced_memory,cpu_topology,resctrl,udp,advtcp,sched,hugetlb,memory_numa,tcp,process --housekeeping_interval={{ prometheus_scrape_interval }}"
prometheus_elasticsearch_exporter_cmdline_extras: ""
-prometheus_haproxy_exporter_cmdline_extras: ""
prometheus_memcached_exporter_cmdline_extras: ""
prometheus_mysqld_exporter_cmdline_extras: ""
prometheus_node_exporter_cmdline_extras: ""
diff --git a/ansible/roles/prometheus/handlers/main.yml b/ansible/roles/prometheus/handlers/main.yml
index ca6b319f86..7243222c54 100644
--- a/ansible/roles/prometheus/handlers/main.yml
+++ b/ansible/roles/prometheus/handlers/main.yml
@@ -4,22 +4,20 @@
service_name: "prometheus-server"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-node-exporter container
vars:
service_name: "prometheus-node-exporter"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -27,90 +25,65 @@
pid_mode: "{{ service.pid_mode | default(omit) }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-mysqld-exporter container
vars:
service_name: "prometheus-mysqld-exporter"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart prometheus-haproxy-exporter container
- vars:
- service_name: "prometheus-haproxy-exporter"
- service: "{{ prometheus_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-memcached-exporter container
vars:
service_name: "prometheus-memcached-exporter"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-cadvisor container
vars:
service_name: "prometheus-cadvisor"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-alertmanager container
vars:
service_name: "prometheus-alertmanager"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-openstack-exporter container
vars:
service_name: "prometheus-openstack-exporter"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -118,66 +91,42 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
environment: "{{ service.environment | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-elasticsearch-exporter container
vars:
service_name: "prometheus-elasticsearch-exporter"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-blackbox-exporter container
vars:
service_name: "prometheus-blackbox-exporter"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
- name: Restart prometheus-libvirt-exporter container
vars:
service_name: "prometheus-libvirt-exporter"
service: "{{ prometheus_services[service_name] }}"
become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
-
-- name: Restart prometheus-msteams container
- vars:
- service_name: "prometheus-msteams"
- service: "{{ prometheus_services[service_name] }}"
- become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
- environment: "{{ service.environment }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/prometheus/tasks/bootstrap.yml b/ansible/roles/prometheus/tasks/bootstrap.yml
index 8086169337..3a488437bb 100644
--- a/ansible/roles/prometheus/tasks/bootstrap.yml
+++ b/ansible/roles/prometheus/tasks/bootstrap.yml
@@ -6,6 +6,7 @@
shard_root_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}{{ mariadb_shard_root_user_prefix }}{{ shard_id | string }}{% endif %}"
shard_host: "{{ mariadb_shards_info.shards[shard_id].hosts[0] }}"
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/prometheus/tasks/check-containers.yml b/ansible/roles/prometheus/tasks/check-containers.yml
index e38d38e4dd..b7e2f7c29f 100644
--- a/ansible/roles/prometheus/tasks/check-containers.yml
+++ b/ansible/roles/prometheus/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check prometheus containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- pid_mode: "{{ item.value.pid_mode | default('') }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- environment: "{{ item.value.environment | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ prometheus_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/prometheus/tasks/config.yml b/ansible/roles/prometheus/tasks/config.yml
index f55f6b5baf..5b9f7a1b79 100644
--- a/ansible/roles/prometheus/tasks/config.yml
+++ b/ansible/roles/prometheus/tasks/config.yml
@@ -7,10 +7,7 @@
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ prometheus_services }}"
+ with_dict: "{{ prometheus_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
@@ -22,12 +19,7 @@
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ prometheus_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ prometheus_services | select_services_enabled_and_mapped_to_host }}"
- name: Find custom prometheus alert rules files
find:
@@ -48,12 +40,10 @@
dest: "{{ node_config_directory }}/prometheus-server/{{ item.path | basename }}"
mode: "0660"
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool and enable_prometheus_alertmanager | bool
+ - service | service_enabled_and_mapped_to_host
+ - enable_prometheus_alertmanager | bool
- prometheus_alert_rules is defined and prometheus_alert_rules.files | length > 0
with_items: "{{ prometheus_alert_rules.files }}"
- notify:
- - Restart prometheus-server container
- name: Find prometheus common config overrides
find:
@@ -87,15 +77,25 @@
dest: "{{ node_config_directory }}/prometheus-server/prometheus.yml"
mode: "0660"
extend_lists: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/prometheus/{{ inventory_hostname }}/prometheus.yml"
- "{{ node_custom_config }}/prometheus/prometheus.yml"
- "{{ role_path }}/templates/prometheus.yml.j2"
- notify:
- - Restart prometheus-server container
+
+- name: Copying over prometheus web config file
+ become: true
+ vars:
+ service: "{{ prometheus_services['prometheus-server'] }}"
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/prometheus-server/web.yml"
+ mode: "0600"
+ when: service | service_enabled_and_mapped_to_host
+ with_first_found:
+ - "{{ node_custom_config }}/prometheus/{{ inventory_hostname }}/web.yml"
+ - "{{ node_custom_config }}/prometheus/web.yml"
+ - "{{ role_path }}/templates/prometheus-web.yml.j2"
- name: Copying over prometheus alertmanager config file
become: true
@@ -105,15 +105,11 @@
src: "{{ item }}"
dest: "{{ node_config_directory }}/prometheus-alertmanager/prometheus-alertmanager.yml"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/prometheus/{{ inventory_hostname }}/prometheus-alertmanager.yml"
- "{{ node_custom_config }}/prometheus/prometheus-alertmanager.yml"
- "{{ role_path }}/templates/prometheus-alertmanager.yml.j2"
- notify:
- - Restart prometheus-alertmanager container
- name: Find custom Alertmanager alert notification templates
find:
@@ -134,12 +130,9 @@
dest: "{{ node_config_directory }}/prometheus-alertmanager/{{ item.path | basename }}"
mode: 0660
when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ - service | service_enabled_and_mapped_to_host
- alertmanager_notification_templates is defined and alertmanager_notification_templates.files | length > 0
with_items: "{{ alertmanager_notification_templates.files }}"
- notify:
- - Restart prometheus-alertmanager container
- name: Copying over my.cnf for mysqld_exporter
become: true
@@ -152,11 +145,7 @@
- "{{ role_path }}/templates/my.cnf.j2"
dest: "{{ node_config_directory }}/prometheus-mysqld-exporter/my.cnf"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart prometheus-mysqld-exporter container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying cloud config file for openstack exporter
become: true
@@ -166,15 +155,11 @@
src: "{{ item }}"
dest: "{{ node_config_directory }}/prometheus-openstack-exporter/clouds.yml"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/prometheus-openstack-exporter/{{ inventory_hostname }}/clouds.yml"
- "{{ node_custom_config }}/prometheus-openstack-exporter/clouds.yml"
- "{{ role_path }}/templates/clouds.yml.j2"
- notify:
- - Restart prometheus-openstack-exporter container
- name: Copying config file for blackbox exporter
become: true
@@ -184,15 +169,11 @@
src: "{{ item }}"
dest: "{{ node_config_directory }}/prometheus-blackbox-exporter/prometheus-blackbox-exporter.yml"
mode: "0660"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/prometheus/{{ inventory_hostname }}/prometheus-blackbox-exporter.yml"
- "{{ node_custom_config }}/prometheus/prometheus-blackbox-exporter.yml"
- "{{ role_path }}/templates/prometheus-blackbox-exporter.yml.j2"
- notify:
- - Restart prometheus-blackbox-exporter container
- block:
- name: Find extra prometheus server config files
@@ -229,45 +210,7 @@
dest: "{{ node_config_directory }}/prometheus-server/{{ relpath }}"
mode: "0660"
with_items: "{{ prometheus_config_extras_result.files | default([]) | map(attribute='path') | list }}"
- notify:
- - Restart prometheus-server container
vars:
base: "{{ node_custom_config }}/prometheus/"
service: "{{ prometheus_services['prometheus-server'] }}"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
-
-- name: Copying over prometheus msteams config file
- vars:
- service: "{{ prometheus_services['prometheus-msteams'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/prometheus-msteams/msteams.yml"
- become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- with_first_found:
- - "{{ node_custom_config }}/prometheus/{{ inventory_hostname }}/prometheus-msteams.yml"
- - "{{ node_custom_config }}/prometheus/prometheus-msteams.yml"
- - "{{ role_path }}/templates/prometheus-msteams.yml.j2"
- notify:
- - Restart prometheus-msteams container
-
-- name: Copying over prometheus msteams template file
- vars:
- service: "{{ prometheus_services['prometheus-msteams'] }}"
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/prometheus-msteams/msteams.tmpl"
- become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- with_first_found:
- - "{{ node_custom_config }}/prometheus/{{ inventory_hostname }}/prometheus-msteams.tmpl"
- - "{{ node_custom_config }}/prometheus/prometheus-msteams.tmpl"
- - "{{ role_path }}/templates/prometheus-msteams.tmpl"
- notify:
- - Restart prometheus-msteams container
+ when: service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/prometheus/tasks/config_validate.yml b/ansible/roles/prometheus/tasks/config_validate.yml
new file mode 100644
index 0000000000..700dba68f5
--- /dev/null
+++ b/ansible/roles/prometheus/tasks/config_validate.yml
@@ -0,0 +1,28 @@
+---
+- name: Validating prometheus config files
+ vars:
+ service: "{{ prometheus_services['prometheus-server'] }}"
+ shell: >-
+ {{ kolla_container_engine }} exec -i {{ service.container_name }} bash -c
+ "/opt/prometheus/promtool check config /etc/prometheus/prometheus.yml;
+ /opt/prometheus/promtool check web-config /etc/prometheus/web.yml"
+ register: "prometheus_config_validation_results"
+ check_mode: false
+ become: true
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+
+- name: Assert prometheus config files are valid
+ vars:
+ service: "{{ prometheus_services['prometheus-server'] }}"
+ assert:
+ that: "prometheus_config_validation_results.rc == 0"
+ fail_msg: >-
+ "{{ service.container_name }} config files are invalid, the output was:
+ {{ prometheus_config_validation_results.stdout }}"
+ success_msg: >-
+ "{{ service.container_name }} config files are valid"
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
diff --git a/ansible/roles/prometheus/tasks/precheck.yml b/ansible/roles/prometheus/tasks/precheck.yml
index cfd2aefe6b..8b48dc138f 100644
--- a/ansible/roles/prometheus/tasks/precheck.yml
+++ b/ansible/roles/prometheus/tasks/precheck.yml
@@ -8,10 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- prometheus_server
- prometheus_node_exporter
- - prometheus_haproxy_exporter
- prometheus_memcached_exporter
- prometheus_mysqld_exporter
- prometheus_cadvisor
@@ -20,9 +21,39 @@
- prometheus_elasticsearch_exporter
- prometheus_blackbox_exporter
- prometheus_libvirt_exporter
- - prometheus_msteams
+ check_mode: false
register: container_facts
+- name: Check that prometheus_bcrypt_salt is correctly set
+ assert:
+ that:
+ - prometheus_bcrypt_salt is defined
+ - prometheus_bcrypt_salt is string
+ - prometheus_bcrypt_salt | length == 22
+
+- name: Check that prometheus_password is correctly set
+ assert:
+ that:
+ - prometheus_password is defined
+ - prometheus_password is string
+ - prometheus_password | length > 0
+
+- name: Check that prometheus_grafana_password is correctly set
+ assert:
+ that:
+ - prometheus_grafana_password is defined
+ - prometheus_grafana_password is string
+ - prometheus_grafana_password | length > 0
+ when: enable_grafana | bool
+
+- name: Check that prometheus_skyline_password is correctly set
+ assert:
+ that:
+ - prometheus_skyline_password is defined
+ - prometheus_skyline_password is string
+ - prometheus_skyline_password | length > 0
+ when: enable_skyline | bool
+
- name: Checking free port for Prometheus server
wait_for:
host: "{{ 'api' | kolla_address }}"
@@ -59,18 +90,6 @@
- inventory_hostname in groups['prometheus-mysqld-exporter']
- enable_prometheus_mysqld_exporter | bool
-- name: Checking free port for Prometheus haproxy_exporter
- wait_for:
- host: "{{ 'api' | kolla_address }}"
- port: "{{ prometheus_haproxy_exporter_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['prometheus_haproxy_exporter'] is not defined
- - inventory_hostname in groups['prometheus-haproxy-exporter']
- - enable_prometheus_haproxy_exporter | bool
-
- name: Checking free port for Prometheus memcached_exporter
wait_for:
host: "{{ 'api' | kolla_address }}"
@@ -165,17 +184,3 @@
- enable_prometheus_libvirt_exporter | bool
with_items:
- "{{ prometheus_libvirt_exporter_port }}"
-
-- name: Checking free ports for Prometheus msteams
- wait_for:
- host: "{{ 'api' | kolla_address }}"
- port: "{{ item }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['prometheus_msteams'] is not defined
- - inventory_hostname in groups['prometheus-msteams']
- - enable_prometheus_msteams | bool
- with_items:
- - "{{ prometheus_msteams_port }}"
diff --git a/ansible/roles/prometheus/templates/prometheus-alertmanager.json.j2 b/ansible/roles/prometheus/templates/prometheus-alertmanager.json.j2
index 06ab4132d0..93c2767dbc 100644
--- a/ansible/roles/prometheus/templates/prometheus-alertmanager.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-alertmanager.json.j2
@@ -13,7 +13,13 @@
"optional": true,
"owner": "prometheus",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/prometheus/templates/prometheus-alertmanager.yml.j2 b/ansible/roles/prometheus/templates/prometheus-alertmanager.yml.j2
index 9e2dbd016e..6968791d24 100644
--- a/ansible/roles/prometheus/templates/prometheus-alertmanager.yml.j2
+++ b/ansible/roles/prometheus/templates/prometheus-alertmanager.yml.j2
@@ -6,26 +6,7 @@ route:
group_wait: 10s
group_interval: 5m
repeat_interval: 3h
-{% if enable_prometheus_msteams | bool %}
- routes:
- - receiver: 'prometheus-msteams'
-{% endif %}
receivers:
- name: default-receiver
-{% if enable_vitrage | bool and enable_vitrage_prometheus_datasource | bool %}
- webhook_configs:
- - send_resolved: true
- url: '{{ vitrage_public_endpoint }}/v1/event'
- http_config:
- basic_auth:
- username: '{{ keystone_admin_user }}'
- password: '{{ keystone_admin_password }}'
-{% endif %}
-{% if enable_prometheus_msteams | bool %}
- - name: 'prometheus-msteams'
- webhook_configs:
- - send_resolved: true
- url: 'http://localhost:{{ prometheus_msteams_port }}/alertmanager'
-{% endif %}
templates:
- '/etc/prometheus/*.tmpl'
diff --git a/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.json.j2
index 68c30fb0d5..924b49550c 100644
--- a/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/prometheus/blackbox.yml",
"owner": "prometheus",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.yml.j2 b/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.yml.j2
index bc2f513e46..acbd1b42fb 100644
--- a/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.yml.j2
+++ b/ansible/roles/prometheus/templates/prometheus-blackbox-exporter.yml.j2
@@ -26,3 +26,24 @@ modules:
- expect: "^SSH-2.0-"
icmp:
prober: icmp
+ http_2xx_opensearch_dashboards:
+ prober: http
+ timeout: 5s
+ http:
+ basic_auth:
+ username: {{ opensearch_dashboards_user }}
+ password: {{ opensearch_dashboards_password }}
+ http_2xx_prometheus:
+ prober: http
+ timeout: 5s
+ http:
+ basic_auth:
+ username: admin
+ password: {{ prometheus_password }}
+ http_2xx_alertmanager:
+ prober: http
+ timeout: 5s
+ http:
+ basic_auth:
+ username: {{ prometheus_alertmanager_user }}
+ password: {{ prometheus_alertmanager_password }}
diff --git a/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 b/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2
index b517b1b1d7..27160a8155 100644
--- a/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2
@@ -6,6 +6,12 @@
"path": "/var/log/kolla/prometheus",
"owner": "prometheus:prometheus",
"recurse": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/prometheus/templates/prometheus-elasticsearch-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-elasticsearch-exporter.json.j2
index fe881babb7..46c25c1267 100644
--- a/ansible/roles/prometheus/templates/prometheus-elasticsearch-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-elasticsearch-exporter.json.j2
@@ -1,11 +1,17 @@
{
- "command": "/opt/elasticsearch_exporter/elasticsearch_exporter --es.uri http://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }} --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_elasticsearch_exporter_port }}{% if prometheus_elasticsearch_exporter_cmdline_extras %} {{ prometheus_elasticsearch_exporter_cmdline_extras }}{% endif %}",
+ "command": "/opt/elasticsearch_exporter/elasticsearch_exporter --es.uri http://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }} --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_elasticsearch_exporter_port }}{% if prometheus_elasticsearch_exporter_cmdline_extras %} {{ prometheus_elasticsearch_exporter_cmdline_extras }}{% endif %}",
"config_files": [],
"permissions": [
{
"path": "/var/log/kolla/prometheus",
"owner": "prometheus:kolla",
"recurse": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/prometheus/templates/prometheus-haproxy-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-haproxy-exporter.json.j2
deleted file mode 100644
index c6d43fa5b8..0000000000
--- a/ansible/roles/prometheus/templates/prometheus-haproxy-exporter.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "/opt/haproxy_exporter/haproxy_exporter --haproxy.scrape-uri unix:/var/lib/kolla/haproxy/haproxy.sock --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_haproxy_exporter_port }}{% if prometheus_haproxy_exporter_cmdline_extras %} {{ prometheus_haproxy_exporter_cmdline_extras }}{% endif %}",
- "config_files": [],
- "permissions": [
- {
- "path": "/var/log/kolla/prometheus",
- "owner": "prometheus:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/prometheus/templates/prometheus-libvirt-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-libvirt-exporter.json.j2
index 518358d8b1..ca67b6a422 100644
--- a/ansible/roles/prometheus/templates/prometheus-libvirt-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-libvirt-exporter.json.j2
@@ -1,4 +1,12 @@
{
"command": "/opt/libvirt-exporter --web.listen-address={{ api_interface_address }}:{{ prometheus_libvirt_exporter_port }}",
- "config_files": []
+ "config_files": [
+ {% if kolla_copy_ca_into_containers | bool %}
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ]
}
diff --git a/ansible/roles/prometheus/templates/prometheus-memcached-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-memcached-exporter.json.j2
index 0974a629f8..1a02e3fb12 100644
--- a/ansible/roles/prometheus/templates/prometheus-memcached-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-memcached-exporter.json.j2
@@ -6,6 +6,12 @@
"path": "/var/log/kolla/prometheus",
"owner": "prometheus:kolla",
"recurse": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/prometheus/templates/prometheus-msteams.json.j2 b/ansible/roles/prometheus/templates/prometheus-msteams.json.j2
deleted file mode 100644
index 758be92a72..0000000000
--- a/ansible/roles/prometheus/templates/prometheus-msteams.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "/opt/prometheus-msteams -http-addr localhost:{{ prometheus_msteams_port }} -config-file /etc/msteams/msteams.yml -template-file /etc/msteams/msteams.tmpl",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/msteams.yml",
- "dest": "/etc/msteams/msteams.yml",
- "owner": "prometheus",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/msteams.tmpl",
- "dest": "/etc/msteams/msteams.tmpl",
- "owner": "prometheus",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/prometheus",
- "owner": "prometheus:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/prometheus/templates/prometheus-msteams.yml.j2 b/ansible/roles/prometheus/templates/prometheus-msteams.yml.j2
deleted file mode 100644
index 49ad1d12dc..0000000000
--- a/ansible/roles/prometheus/templates/prometheus-msteams.yml.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-connectors:
- - alertmanager: "{{ prometheus_msteams_webhook_url }}"
diff --git a/ansible/roles/prometheus/templates/prometheus-mysqld-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-mysqld-exporter.json.j2
index 7d9ada68b9..0ab96dd6af 100644
--- a/ansible/roles/prometheus/templates/prometheus-mysqld-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-mysqld-exporter.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/prometheus/my.cnf",
"owner": "prometheus",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/prometheus/templates/prometheus-node-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-node-exporter.json.j2
index b874469556..3835c93ab4 100644
--- a/ansible/roles/prometheus/templates/prometheus-node-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-node-exporter.json.j2
@@ -6,6 +6,12 @@
"path": "/var/log/kolla/prometheus",
"owner": "prometheus:kolla",
"recurse": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/prometheus/templates/prometheus-openstack-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-openstack-exporter.json.j2
index a405934e4e..48bec881d1 100644
--- a/ansible/roles/prometheus/templates/prometheus-openstack-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-openstack-exporter.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/openstack/clouds.yml",
"owner": "prometheus",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/prometheus/templates/prometheus-server.json.j2 b/ansible/roles/prometheus/templates/prometheus-server.json.j2
index d57469ff2c..4f875bf3bb 100644
--- a/ansible/roles/prometheus/templates/prometheus-server.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-server.json.j2
@@ -1,5 +1,5 @@
{
- "command": "/opt/prometheus/prometheus --config.file /etc/prometheus/prometheus.yml --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_port }} --web.external-url={{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ prometheus_port }} --storage.tsdb.path /var/lib/prometheus{% if prometheus_cmdline_extras %} {{ prometheus_cmdline_extras }}{% endif %}",
+ "command": "{{ prometheus_server_command }}",
"config_files": [
{
"source": "{{ container_config_directory }}/prometheus.yml",
@@ -7,21 +7,32 @@
"owner": "prometheus",
"perm": "0600"
},
+ {
+ "source": "{{ container_config_directory }}/web.yml",
+ "dest": "/etc/prometheus/web.yml",
+ "owner": "prometheus",
+ "perm": "0600"
+ },
{
"source": "{{ container_config_directory }}/extras/*",
"dest": "/etc/prometheus/extras/",
"preserve_properties": true,
"optional": true
}
-{% if enable_prometheus_alertmanager | bool %}
- ,{
+ {% if enable_prometheus_alertmanager | bool %},
+ {
"source": "{{ container_config_directory }}/*.rules",
"dest": "/etc/prometheus/",
"optional": true,
"owner": "prometheus",
"perm": "0600"
- }
-{% endif %}
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/prometheus/templates/prometheus-web.yml.j2 b/ansible/roles/prometheus/templates/prometheus-web.yml.j2
new file mode 100644
index 0000000000..67e0554285
--- /dev/null
+++ b/ansible/roles/prometheus/templates/prometheus-web.yml.j2
@@ -0,0 +1,4 @@
+basic_auth_users:
+{% for user in prometheus_basic_auth_users | selectattr('enabled') | list %}
+ {{ user.username }}: {{ user.password | password_hash('bcrypt', salt=prometheus_bcrypt_salt) }}
+{% endfor %}
diff --git a/ansible/roles/prometheus/templates/prometheus.yml.j2 b/ansible/roles/prometheus/templates/prometheus.yml.j2
index 85483cc918..99d1298a61 100644
--- a/ansible/roles/prometheus/templates/prometheus.yml.j2
+++ b/ansible/roles/prometheus/templates/prometheus.yml.j2
@@ -18,78 +18,121 @@ rule_files:
scrape_configs:
- job_name: prometheus
+ basic_auth:
+ username: admin
+ password: "{{ prometheus_password }}"
static_configs:
- - targets:
{% for host in groups['prometheus'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ prometheus_port }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% if enable_prometheus_node_exporter | bool %}
- job_name: node
static_configs:
- - targets:
{% for host in groups['prometheus-node-exporter'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_node_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
+{% endfor %}
+{% for target in prometheus_node_exporter_targets_extra %}
+ - targets:
+ - '{{ target.target }}'
+{% if target.labels | default({}, true) %}
+ labels: {{ target.labels | to_json }}
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_mysqld_exporter | bool %}
- job_name: mysqld
static_configs:
- - targets:
{% for host in groups['prometheus-mysqld-exporter'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_mysqld_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_haproxy_exporter | bool %}
- job_name: haproxy
static_configs:
+{% for host in groups['loadbalancer'] %}
- targets:
-{% for host in groups['prometheus-haproxy-exporter'] %}
- - '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_haproxy_exporter_port'] }}'
+ - '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ prometheus_haproxy_exporter_port }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_rabbitmq_exporter | bool %}
- job_name: rabbitmq
static_configs:
- - targets:
{% for host in groups['rabbitmq'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_rabbitmq_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_memcached_exporter | bool %}
- job_name: memcached
static_configs:
- - targets:
{% for host in groups['prometheus-memcached-exporter'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_memcached_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_cadvisor | bool %}
- job_name: cadvisor
static_configs:
- - targets:
{% for host in groups["prometheus-cadvisor"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_cadvisor_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_fluentd_integration | bool %}
- job_name: fluentd
static_configs:
- - targets:
{% for host in groups['fluentd'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_fluentd_integration_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_ceph_mgr_exporter | bool %}
- job_name: ceph_mgr_exporter
honor_labels: true
+ scrape_interval: {{ prometheus_ceph_exporter_interval }}
static_configs:
- targets:
{% for exporter in prometheus_ceph_mgr_exporter_endpoints %}
@@ -107,16 +150,20 @@ scrape_configs:
honor_labels: true
static_configs:
- targets:
- - '{{ kolla_internal_vip_address | put_address_in_context('url') }}:{{ prometheus_openstack_exporter_port }}'
+ - '{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ prometheus_openstack_exporter_port }}'
{% endif %}
{% if enable_prometheus_elasticsearch_exporter | bool %}
- job_name: elasticsearch_exporter
scrape_interval: {{ prometheus_elasticsearch_exporter_interval }}
static_configs:
- - targets:
{% for host in groups["prometheus-elasticsearch-exporter"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_elasticsearch_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
@@ -155,34 +202,78 @@ scrape_configs:
scrape_interval: {{ prometheus_libvirt_exporter_interval }}
honor_labels: true
static_configs:
- - targets:
{% for host in groups["prometheus-libvirt-exporter"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_libvirt_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_etcd_integration | bool %}
- job_name: etcd
+{% if etcd_enable_tls | bool %}
+ scheme: https
+{% endif %}
static_configs:
- - targets:
{% for host in groups["etcd"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_etcd_integration_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
+{% endfor %}
+{% endif %}
+
+{% if enable_ironic_prometheus_exporter | bool %}
+ - job_name: ironic_prometheus_exporter
+ static_configs:
+{% for host in groups['ironic-conductor'] %}
+ - targets: ["{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['ironic_prometheus_exporter_port'] }}"]
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_alertmanager | bool %}
- job_name: alertmanager
static_configs:
- - targets:
{% for host in groups['prometheus-alertmanager'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_alertmanager_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
+{% if enable_prometheus_proxysql_exporter | bool %}
+ - job_name: proxysql
+ static_configs:
+{% for host in groups["loadbalancer"] %}
+ - targets:
+ - '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['proxysql_prometheus_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
+{% endfor %}
+{% endif %}
+
alerting:
alertmanagers:
- - static_configs:
- - targets:
+ - static_configs:
{% for host in groups["prometheus-alertmanager"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_alertmanager_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
diff --git a/ansible/roles/prune-images/tasks/prune_images.yml b/ansible/roles/prune-images/tasks/prune_images.yml
index 154e5e5f04..c1cea204fd 100644
--- a/ansible/roles/prune-images/tasks/prune_images.yml
+++ b/ansible/roles/prune-images/tasks/prune_images.yml
@@ -6,3 +6,11 @@
images_filters:
label: kolla_version
timeout: "{{ docker_image_prune_timeout }}"
+ when: kolla_container_engine == 'docker'
+
+# NOTE(m.hiner): Podman does not (yet?) have equivalent of docker_prune
+# and generic module podman_image does not support label filters
+- name: Pruning Kolla images
+ become: true
+ command: podman image prune --force --filter 'label=kolla_version'
+ when: kolla_container_engine == 'podman'
diff --git a/ansible/roles/prune-images/tasks/validate_docker_execute.yml b/ansible/roles/prune-images/tasks/validate_docker_execute.yml
index 0e85832c65..ff17105a76 100644
--- a/ansible/roles/prune-images/tasks/validate_docker_execute.yml
+++ b/ansible/roles/prune-images/tasks/validate_docker_execute.yml
@@ -1,3 +1,5 @@
---
- name: Ensure the docker service is running
+ environment:
+ CONTAINER_ENGINE: "{{ kolla_container_engine }}"
script: ../tools/validate-docker-execute.sh
diff --git a/ansible/roles/rabbitmq/defaults/main.yml b/ansible/roles/rabbitmq/defaults/main.yml
index 867e6cd050..17f25a62eb 100644
--- a/ansible/roles/rabbitmq/defaults/main.yml
+++ b/ansible/roles/rabbitmq/defaults/main.yml
@@ -23,33 +23,11 @@ rabbitmq_services:
mode: "http"
port: "{{ rabbitmq_management_port }}"
host_group: "rabbitmq"
- rabbitmq_outward_management:
- enabled: "{{ enable_outward_rabbitmq }}"
- mode: "http"
- port: "{{ outward_rabbitmq_management_port }}"
- host_group: "outward-rabbitmq"
- rabbitmq_outward_external:
- enabled: "{{ enable_outward_rabbitmq }}"
- mode: "tcp"
- external: true
- port: "{{ outward_rabbitmq_port }}"
- host_group: "outward-rabbitmq"
- frontend_tcp_extra:
- - "timeout client {{ haproxy_outward_rabbitmq_client_timeout }}"
- backend_tcp_extra:
- - "timeout server {{ haproxy_outward_rabbitmq_server_timeout }}"
-
-####################
-# HAProxy
-####################
-haproxy_outward_rabbitmq_client_timeout: "1h"
-haproxy_outward_rabbitmq_server_timeout: "1h"
-
####################
# Docker
####################
-rabbitmq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/rabbitmq"
+rabbitmq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}rabbitmq"
rabbitmq_tag: "{{ openstack_tag }}"
rabbitmq_image_full: "{{ rabbitmq_image }}:{{ rabbitmq_tag }}"
rabbitmq_dimensions: "{{ default_container_dimensions }}"
@@ -71,7 +49,7 @@ rabbitmq_default_volumes:
- "{{ node_config_directory }}/{{ project_name }}/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ project_name }}:/var/lib/rabbitmq/"
+ - "{{ rabbitmq_datadir_volume }}:/var/lib/rabbitmq/"
- "kolla_logs:/var/log/kolla/"
rabbitmq_extra_volumes: "{{ default_extra_volumes }}"
@@ -84,6 +62,28 @@ rabbitmq_server_additional_erl_args: "+S 2:2 +sbwt none +sbwtdcpu none +sbwtdio
rabbitmq_tls_options: {}
# To avoid split-brain
rabbitmq_cluster_partition_handling: "pause_minority"
+# For consistency use "when-synced", for availability use "always"
+# The rabbitmq default for ha queues is "when-synced"
+# More details see:
+# https://www.rabbitmq.com/ha.html#promoting-unsynchronised-mirrors
+rabbitmq_ha_promote_on_shutdown: "always"
+# The number of rabbitmq replicas should follow this advice:
+# https://www.rabbitmq.com/ha.html#replication-factor
+# This means, if you have three rabbit nodes, we request two
+# replicas of all queues and exchanges.
+# Note: this assumes an odd number of rabbitmq nodes.
+# If no replica count is specified, replicates across all nodes with definition
+# "ha-mode":"all". Otherwise, uses
+# "ha-mode":"exactly","ha-params":{{ rabbitmq_ha_replica_count | int }}
+rabbitmq_server_count: "{{ groups[role_rabbitmq_groups] | length }}"
+rabbitmq_ha_replica_count: "{{ (rabbitmq_server_count | int // 2 + 1) }}"
+# If no TTL is specified, messages will not expire
+# Set max message ttl to 10 mins (more than 1 min rpc timeout)
+# so we don't have queues with no consumers growing unbounded
+rabbitmq_message_ttl_ms: 600000
+# If no queue expiry is specified, queues will not expire
+rabbitmq_queue_expiry_ms: 3600000
+rabbitmq_extra_config: {}
####################
# Plugins
@@ -96,3 +96,13 @@ rabbitmq_plugins:
enabled: "{{ rabbitmq_enable_prometheus_plugin | bool }}"
rabbitmq_enabled_plugins: "{{ rabbitmq_plugins | selectattr('enabled', 'equalto', true) | list }}"
+
+kolla_externally_managed_cert: False
+
+rabbitmq_version_suffix: ""
+
+####################
+# TLS
+####################
+rabbitmq_enable_tls_backend: "{{ rabbitmq_enable_tls }}"
+rabbitmq_copy_certs: "{{ kolla_copy_ca_into_containers | bool or rabbitmq_enable_tls | bool }}"
diff --git a/ansible/roles/rabbitmq/handlers/main.yml b/ansible/roles/rabbitmq/handlers/main.yml
index cd5e39eb57..ea7652fa27 100644
--- a/ansible/roles/rabbitmq/handlers/main.yml
+++ b/ansible/roles/rabbitmq/handlers/main.yml
@@ -1,26 +1,4 @@
---
-# NOTE(mgoddard): These tasks perform a 'full stop upgrade', which is necessary when moving between
-# major releases. In future kolla-ansible releases we may be able to change this to a rolling
-# restart. For info on this process see https://www.rabbitmq.com/upgrade.html
-
-- name: Restart first rabbitmq container
- vars:
- service_name: "rabbitmq"
- service: "{{ rabbitmq_services[service_name] }}"
- include_tasks: 'restart_services.yml'
- when:
- - kolla_action != "config"
- - inventory_hostname == groups[service.group] | first
- listen: Restart rabbitmq container
-
-- name: Restart remaining rabbitmq containers
- vars:
- service_name: "rabbitmq"
- service: "{{ rabbitmq_services[service_name] }}"
- include_tasks: 'restart_services.yml'
- when:
- - kolla_action != "config"
- - inventory_hostname == item
- - inventory_hostname != groups[service.group] | first
- loop: "{{ groups[service.group] }}"
- listen: Restart rabbitmq container
+- name: Restart rabbitmq container
+ group_by:
+ key: "{{ project_name }}_restart"
diff --git a/ansible/roles/rabbitmq/tasks/bootstrap.yml b/ansible/roles/rabbitmq/tasks/bootstrap.yml
index 13e0933970..a63e2073c9 100644
--- a/ansible/roles/rabbitmq/tasks/bootstrap.yml
+++ b/ansible/roles/rabbitmq/tasks/bootstrap.yml
@@ -1,7 +1,7 @@
---
- name: Creating rabbitmq volume
become: true
- kolla_docker:
+ kolla_container:
action: "create_volume"
common_options: "{{ docker_common_options }}"
name: "{{ project_name }}"
@@ -12,7 +12,7 @@
service_name: "rabbitmq"
service: "{{ rabbitmq_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -21,6 +21,6 @@
labels:
BOOTSTRAP:
name: "{{ project_name }}_bootstrap"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ service.volumes }}"
when: rabbitmq_volume is changed
diff --git a/ansible/roles/rabbitmq/tasks/check-containers.yml b/ansible/roles/rabbitmq/tasks/check-containers.yml
index 72cbb2ba6b..b7e2f7c29f 100644
--- a/ansible/roles/rabbitmq/tasks/check-containers.yml
+++ b/ansible/roles/rabbitmq/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check rabbitmq containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- environment: "{{ item.value.environment }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ rabbitmq_services }}"
- notify:
- - Restart rabbitmq container
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/rabbitmq/tasks/config.yml b/ansible/roles/rabbitmq/tasks/config.yml
index b8fed458c6..c066d82cdf 100644
--- a/ansible/roles/rabbitmq/tasks/config.yml
+++ b/ansible/roles/rabbitmq/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ rabbitmq_services }}"
+ with_dict: "{{ rabbitmq_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over config.json files for services
template:
@@ -18,12 +15,7 @@
dest: "{{ node_config_directory }}/{{ project_name }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ rabbitmq_services }}"
- notify:
- - Restart rabbitmq container
+ with_dict: "{{ rabbitmq_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over rabbitmq-env.conf
become: true
@@ -37,11 +29,7 @@
- "{{ node_custom_config }}/rabbitmq/{{ inventory_hostname }}/rabbitmq-env.conf"
- "{{ node_custom_config }}/rabbitmq/rabbitmq-env.conf"
- "rabbitmq-env.conf.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart rabbitmq container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over rabbitmq.conf
become: true
@@ -55,11 +43,7 @@
- "{{ node_custom_config }}/rabbitmq/{{ inventory_hostname }}/rabbitmq.conf"
- "{{ node_custom_config }}/rabbitmq/rabbitmq.conf"
- "rabbitmq.conf.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart rabbitmq container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over erl_inetrc
become: true
@@ -73,11 +57,7 @@
- "{{ node_custom_config }}/rabbitmq/{{ inventory_hostname }}/erl_inetrc"
- "{{ node_custom_config }}/rabbitmq/erl_inetrc"
- "erl_inetrc.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart rabbitmq container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over advanced.config
become: true
@@ -91,11 +71,7 @@
- "{{ node_custom_config }}/rabbitmq/{{ inventory_hostname }}/advanced.config"
- "{{ node_custom_config }}/rabbitmq/advanced.config"
- "advanced.config.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart rabbitmq container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over definitions.json
become: true
@@ -109,11 +85,7 @@
- "{{ node_custom_config }}/rabbitmq/{{ inventory_hostname }}/definitions.json"
- "{{ node_custom_config }}/rabbitmq/definitions.json"
- "definitions.json.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart rabbitmq container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over enabled_plugins
become: true
@@ -127,11 +99,8 @@
- "{{ node_custom_config }}/rabbitmq/{{ inventory_hostname }}/enabled_plugins"
- "{{ node_custom_config }}/rabbitmq/enabled_plugins"
- "enabled_plugins.j2"
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart rabbitmq container
+ when: service | service_enabled_and_mapped_to_host
- include_tasks: copy-certs.yml
- when: rabbitmq_enable_tls | bool
+ when:
+ - rabbitmq_copy_certs
diff --git a/ansible/roles/rabbitmq/tasks/config_validate.yml b/ansible/roles/rabbitmq/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/rabbitmq/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/rabbitmq/tasks/copy-certs.yml b/ansible/roles/rabbitmq/tasks/copy-certs.yml
index f3c84a49ab..09b8dfbfe6 100644
--- a/ansible/roles/rabbitmq/tasks/copy-certs.yml
+++ b/ansible/roles/rabbitmq/tasks/copy-certs.yml
@@ -1,52 +1,6 @@
---
-- name: Copying over extra CA certificates
- become: true
+- name: "Copy certificates and keys for {{ project_name }}"
+ import_role:
+ role: service-cert-copy
vars:
- service: "{{ rabbitmq_services['rabbitmq'] }}"
- copy:
- src: "{{ kolla_certificates_dir }}/ca/"
- dest: "{{ node_config_directory }}/{{ project_name }}/ca-certificates"
- mode: "0644"
- when:
- - kolla_copy_ca_into_containers | bool
- - service | service_enabled_and_mapped_to_host
- notify:
- - Restart rabbitmq container
-
-- name: Copying over TLS certificate
- become: true
- vars:
- service: "{{ rabbitmq_services['rabbitmq'] }}"
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/{{ project_name }}/{{ project_name }}-cert.pem"
- mode: "0644"
- with_first_found:
- - files:
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/{{ project_name }}-cert.pem"
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-cert.pem"
- - "{{ kolla_certificates_dir }}/{{ project_name }}-cert.pem"
- skip: true
- when:
- - service | service_enabled_and_mapped_to_host
- notify:
- - Restart rabbitmq container
-
-- name: Copying over TLS key
- become: true
- vars:
- service: "{{ rabbitmq_services['rabbitmq'] }}"
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/{{ project_name }}/{{ project_name }}-key.pem"
- mode: "0600"
- with_first_found:
- - files:
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/{{ project_name }}-key.pem"
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-key.pem"
- - "{{ kolla_certificates_dir }}/{{ project_name }}-key.pem"
- skip: true
- when:
- - service | service_enabled_and_mapped_to_host
- notify:
- - Restart rabbitmq container
+ project_services: "{{ rabbitmq_services }}"
diff --git a/ansible/roles/rabbitmq/tasks/deploy.yml b/ansible/roles/rabbitmq/tasks/deploy.yml
index d0b36cb78b..5686a515c2 100644
--- a/ansible/roles/rabbitmq/tasks/deploy.yml
+++ b/ansible/roles/rabbitmq/tasks/deploy.yml
@@ -1,9 +1,12 @@
---
+- import_tasks: version-check.yml
+
+- include_tasks: remove-ha-all-policy.yml
+ when:
+ - not om_enable_rabbitmq_high_availability | bool
+
- import_tasks: config.yml
- import_tasks: check-containers.yml
- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/rabbitmq/tasks/feature-flags.yml b/ansible/roles/rabbitmq/tasks/feature-flags.yml
new file mode 100644
index 0000000000..01a27ed920
--- /dev/null
+++ b/ansible/roles/rabbitmq/tasks/feature-flags.yml
@@ -0,0 +1,5 @@
+---
+- name: Enable all stable feature flags
+ command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl enable_feature_flag all"
+ become: true
+ changed_when: false
diff --git a/ansible/roles/rabbitmq/tasks/post-deploy.yml b/ansible/roles/rabbitmq/tasks/post-deploy.yml
new file mode 100644
index 0000000000..9c04d85c26
--- /dev/null
+++ b/ansible/roles/rabbitmq/tasks/post-deploy.yml
@@ -0,0 +1,2 @@
+---
+- import_tasks: feature-flags.yml
diff --git a/ansible/roles/rabbitmq/tasks/precheck.yml b/ansible/roles/rabbitmq/tasks/precheck.yml
index 19e50de418..1837c1c1fe 100644
--- a/ansible/roles/rabbitmq/tasks/precheck.yml
+++ b/ansible/roles/rabbitmq/tasks/precheck.yml
@@ -8,9 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- rabbitmq
- - outward_rabbitmq
+ check_mode: false
register: container_facts
- name: Checking free port for RabbitMQ
@@ -62,6 +64,7 @@
nss_database: "{{ 'ahostsv4' if api_address_family == 'ipv4' else 'ahostsv6' }}"
command: "getent {{ nss_database }} {{ hostvars[item].ansible_facts.hostname }}"
changed_when: false
+ check_mode: false
register: rabbitmq_hostnames
with_items: "{{ groups['rabbitmq'] }}"
@@ -75,119 +78,82 @@
- not item.1 is match('^'+('api' | kolla_address(item.0.item))+'\\b')
- name: Check if TLS certificate exists for RabbitMQ
+ assert:
+ that: cert | length > 0
+ fail_msg: No TLS certificate provided for RabbitMQ.
vars:
cert: "{{ query('first_found', paths, errors='ignore') }}"
paths:
- "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/rabbitmq-cert.pem"
- "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-cert.pem"
- "{{ kolla_certificates_dir }}/rabbitmq-cert.pem"
- fail:
- msg: No TLS certificate provided for RabbitMQ.
when:
+ - not kolla_externally_managed_cert | bool
- rabbitmq_enable_tls | bool
- - cert | length == 0
- name: Check if TLS key exists for RabbitMQ
+ assert:
+ that: key | length > 0
+ fail_msg: No TLS key provided for RabbitMQ.
vars:
key: "{{ query('first_found', paths, errors='ignore') }}"
paths:
- "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/rabbitmq-key.pem"
- "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-key.pem"
- "{{ kolla_certificates_dir }}/rabbitmq-key.pem"
- fail:
- msg: No TLS key provided for RabbitMQ.
when:
+ - not kolla_externally_managed_cert | bool
- rabbitmq_enable_tls | bool
- - key | length == 0
-- name: Checking free port for outward RabbitMQ
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ outward_rabbitmq_port }}"
- connect_timeout: 1
- state: stopped
+- block:
+ - name: List RabbitMQ policies
+ become: true
+ command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl list_policies --silent"
+ register: rabbitmq_policies
+ changed_when: false
+ check_mode: false
+
+ - name: Check if RabbitMQ HA needs to be configured
+ assert:
+ that: "'ha-all' in rabbitmq_policies.stdout"
+ fail_msg: >
+ om_enable_rabbitmq_high_availability is True but no mirroring policy has been found.
+ Currently the procedure to migrate from transient non-mirrored queues to durable mirrored queues is manual.
+ Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability.
+ Note that this process may take several hours on larger systems, and may cause a degradation in performance at large scale.
+ If you do not wish to enable this feature, set om_enable_rabbitmq_high_availability to False.
+
+ run_once: true
when:
- - enable_outward_rabbitmq | bool
- - inventory_hostname in groups['outward-rabbitmq']
- - container_facts['outward_rabbitmq'] is not defined
-
-- name: Checking free port for outward RabbitMQ Management
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ outward_rabbitmq_management_port }}"
- connect_timeout: 1
- state: stopped
+ - container_facts['rabbitmq'] is defined
+ - om_enable_rabbitmq_high_availability | bool
+ tags: rabbitmq-ha-precheck
+
+- block:
+ - name: List RabbitMQ queues
+ become: true
+ command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl list_queues --silent name type --formatter json"
+ register: rabbitmq_queues
+ changed_when: false
+ check_mode: false
+
+ - name: Check if RabbitMQ quorum queues need to be configured
+ assert:
+ that: "{{ item.type == 'quorum' }}"
+ fail_msg: >
+ om_enable_rabbitmq_quorum_queues is True but {{ item.name }} is a non-quorum queue.
+ Currently the procedure to migrate to quorum queues is manual.
+ Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability.
+ Note that this process may take several hours on larger systems, and may cause a degradation in performance at large scale.
+ If you do not wish to enable this feature, set om_enable_rabbitmq_quorum_queues to False.
+ loop: "{{ (rabbitmq_queues.stdout | from_json) if rabbitmq_queues is not skipped else [] }}"
+ loop_control:
+ label: "{{ item.name }}"
+ # TODO(mattcress): remove skipping reply and fanout once https://review.opendev.org/c/openstack/oslo.messaging/+/888479 is merged.
+ when: not (item.name is search('reply_') or item.name is search('_fanout_'))
+
+ run_once: true
when:
- - enable_outward_rabbitmq | bool
- - inventory_hostname in groups['outward-rabbitmq']
- - container_facts['outward_rabbitmq'] is not defined
-
-- name: Checking free port for outward RabbitMQ Cluster
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ outward_rabbitmq_cluster_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - enable_outward_rabbitmq | bool
- - inventory_hostname in groups['outward-rabbitmq']
- - container_facts['outward_rabbitmq'] is not defined
-
-- name: Checking free port for outward RabbitMQ EPMD
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ outward_rabbitmq_epmd_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - enable_outward_rabbitmq | bool
- - inventory_hostname in groups['outward-rabbitmq']
- - container_facts['outward_rabbitmq'] is not defined
-
-- name: Check if all outward rabbit hostnames are resolvable
- vars:
- nss_database: "{{ 'ahostsv4' if api_address_family == 'ipv4' else 'ahostsv6' }}"
- command: "getent {{ nss_database }} {{ hostvars[item].ansible_facts.hostname }}"
- changed_when: false
- register: outward_rabbitmq_hostnames
- with_items: "{{ groups['outward-rabbitmq'] }}"
- when:
- - enable_outward_rabbitmq | bool
-
-- name: Check if each rabbit hostname resolves uniquely to the proper IP address
- fail:
- msg: Hostname has to resolve uniquely to the IP address of api_interface
- with_subelements:
- - "{{ outward_rabbitmq_hostnames.results }}"
- - stdout_lines
- when:
- - enable_outward_rabbitmq | bool
- - not item.1 is match('^'+('api' | kolla_address(item.0.item))+'\\b')
-
-- name: Check if TLS certificate exists for outward RabbitMQ
- vars:
- cert: "{{ query('first_found', paths, errors='ignore') }}"
- paths:
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/outward_rabbitmq-cert.pem"
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-cert.pem"
- - "{{ kolla_certificates_dir }}/outward_rabbitmq-cert.pem"
- fail:
- msg: No TLS certificate provided for outward RabbitMQ.
- when:
- - enable_outward_rabbitmq | bool
- - rabbitmq_enable_tls | bool
- - cert | length == 0
-
-- name: Check if TLS key exists for outward RabbitMQ
- vars:
- key: "{{ query('first_found', paths, errors='ignore') }}"
- paths:
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}/outward_rabbitmq-key.pem"
- - "{{ kolla_certificates_dir }}/{{ inventory_hostname }}-key.pem"
- - "{{ kolla_certificates_dir }}/outward_rabbitmq-key.pem"
- fail:
- msg: No TLS key provided for outward RabbitMQ.
- when:
- - enable_outward_rabbitmq | bool
- - rabbitmq_enable_tls | bool
- - key | length == 0
+ - container_facts['rabbitmq'] is defined
+ - om_enable_rabbitmq_quorum_queues | bool
+ tags: rabbitmq-ha-precheck
diff --git a/ansible/roles/rabbitmq/tasks/remove-ha-all-policy.yml b/ansible/roles/rabbitmq/tasks/remove-ha-all-policy.yml
new file mode 100644
index 0000000000..574069cb77
--- /dev/null
+++ b/ansible/roles/rabbitmq/tasks/remove-ha-all-policy.yml
@@ -0,0 +1,30 @@
+---
+- block:
+ - name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - "{{ service.container_name }}"
+ register: container_facts
+
+ - block:
+ - name: List RabbitMQ policies
+ become: true
+ command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl list_policies --silent"
+ register: rabbitmq_policies
+ changed_when: false
+
+ - name: Remove ha-all policy from RabbitMQ
+ become: true
+ command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl clear_policy ha-all"
+ when:
+ - "'ha-all' in rabbitmq_policies.stdout"
+ when: container_facts[service.container_name] is defined
+
+ delegate_to: "{{ groups[role_rabbitmq_groups] | first }}"
+ run_once: true
+ vars:
+ service_name: "rabbitmq"
+ service: "{{ rabbitmq_services[service_name] }}"
diff --git a/ansible/roles/rabbitmq/tasks/reset-state.yml b/ansible/roles/rabbitmq/tasks/reset-state.yml
new file mode 100644
index 0000000000..ac9984467f
--- /dev/null
+++ b/ansible/roles/rabbitmq/tasks/reset-state.yml
@@ -0,0 +1,16 @@
+---
+- name: Stop the RabbitMQ application
+ become: true
+ command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl stop_app"
+
+- name: Reset the state of RabbitMQ
+ become: true
+ command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl force_reset"
+
+- name: Start the RabbitMQ application
+ become: true
+ command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl start_app"
+
+- name: Wait for all RabbitMQ nodes to join the cluster
+ become: true
+ command: "{{ kolla_container_engine }} exec rabbitmq rabbitmqctl await_online_nodes {{ groups['rabbitmq'] | length }}"
diff --git a/ansible/roles/rabbitmq/tasks/restart_services.yml b/ansible/roles/rabbitmq/tasks/restart_services.yml
index d9e2a1a6ab..289015e1c6 100644
--- a/ansible/roles/rabbitmq/tasks/restart_services.yml
+++ b/ansible/roles/rabbitmq/tasks/restart_services.yml
@@ -1,10 +1,28 @@
---
+- name: Get info on RabbitMQ container
+ become: True
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name: "rabbitmq"
+ register: container_info
+
+- name: Put RabbitMQ node into maintenance mode
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: "community.rabbitmq.rabbitmq_upgrade"
+ module_args:
+ action: "drain"
+ user: root
+ become: true
+ when: container_info._containers | length > 0
+
- name: Restart rabbitmq container
vars:
service_name: "rabbitmq"
service: "{{ rabbitmq_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -19,4 +37,4 @@
service_name: "rabbitmq"
service: "{{ rabbitmq_services[service_name] }}"
become: true
- command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl wait {{ rabbitmq_pid_file }}"
+ command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl wait --timeout 60 {{ rabbitmq_pid_file }}"
diff --git a/ansible/roles/rabbitmq/tasks/upgrade.yml b/ansible/roles/rabbitmq/tasks/upgrade.yml
index d51da2f33f..55bda0f48a 100644
--- a/ansible/roles/rabbitmq/tasks/upgrade.yml
+++ b/ansible/roles/rabbitmq/tasks/upgrade.yml
@@ -1,44 +1,12 @@
---
-# NOTE(pbourke): These tasks perform a 'full stop upgrade', which is necessary when moving between
-# major releases. In future kolla-ansible releases we may be able to change this to a rolling
-# restart. For info on this process see https://www.rabbitmq.com/upgrade.html
-- name: Checking if rabbitmq container needs upgrading
- vars:
- service_name: "rabbitmq"
- service: "{{ rabbitmq_services[service_name] }}"
- become: true
- kolla_docker:
- action: "compare_image"
- common_options: "{{ docker_common_options }}"
- name: "{{ project_name }}"
- image: "{{ rabbitmq_image_full }}"
- environment: "{{ service.environment }}"
- when: inventory_hostname in groups[role_rabbitmq_groups]
- register: rabbitmq_differs
+- import_tasks: version-check.yml
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Stopping all rabbitmq instances but the first node
- become: true
- kolla_docker:
- action: "stop_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ project_name }}"
+- include_tasks: remove-ha-all-policy.yml
when:
- - inventory_hostname != groups[role_rabbitmq_groups] | first
- - rabbitmq_differs['result']
+ - not om_enable_rabbitmq_high_availability | bool
-- name: Stopping rabbitmq on the first node
- become: true
- kolla_docker:
- action: "stop_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ project_name }}"
- when:
- - inventory_hostname == groups[role_rabbitmq_groups] | first
- - rabbitmq_differs['result']
+- import_tasks: config.yml
-- name: Flush handlers
- meta: flush_handlers
+- import_tasks: feature-flags.yml
+
+- import_tasks: check-containers.yml
diff --git a/ansible/roles/rabbitmq/tasks/version-check.yml b/ansible/roles/rabbitmq/tasks/version-check.yml
new file mode 100644
index 0000000000..26111f8064
--- /dev/null
+++ b/ansible/roles/rabbitmq/tasks/version-check.yml
@@ -0,0 +1,82 @@
+---
+- block:
+ - name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - "{{ service.container_name }}"
+ register: container_facts
+
+ - block:
+ - name: Get current RabbitMQ version
+ become: true
+ command: "{{ kolla_container_engine }} exec {{ service.container_name }} rabbitmqctl --version"
+ register: rabbitmq_version_current
+ changed_when: false
+
+ - name: Get new RabbitMQ version
+ become: true
+ vars:
+ rabbitmq_container: "{{ rabbitmq_services['rabbitmq'] }}"
+ kolla_container:
+ action: "start_container"
+ command: "rabbitmqctl --version"
+ common_options: "{{ docker_common_options }}"
+ container_engine: "{{ kolla_container_engine }}"
+ detach: false
+ environment:
+ KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
+ image: "{{ rabbitmq_container.image }}"
+ name: "rabbitmq_version_check"
+ restart_policy: oneshot
+ volumes: "{{ rabbitmq_default_volumes + rabbitmq_extra_volumes }}"
+ register: rabbitmq_version_new
+ failed_when: false
+ check_mode: false
+
+ # As an example, when the new RabbitMQ version is 3.13.6:
+ # new_major_version = 3
+ # new_minor_version = 13
+ # new_version = 3.13
+ # And if the current RabbitMQ version is 3.11.28:
+ # upgrade_version = 3.12
+ - name: Check if running RabbitMQ is at most one version behind
+ vars:
+ current_version_major: "{{ rabbitmq_version_current.stdout | regex_search('^[0-9]+') }}"
+ current_version_minor: "{{ rabbitmq_version_current.stdout | regex_search('(?<=.)[^.].') }}"
+ current_version: "{{ rabbitmq_version_current.stdout | regex_replace('.[^.]+$', '') }}"
+ new_version_major: "{{ rabbitmq_version_new.stdout | regex_search('^[0-9]+') }}"
+ new_version_minor: "{{ rabbitmq_version_new.stdout | regex_search('(?<=.)[^.].') }}"
+ new_version: "{{ rabbitmq_version_new.stdout | regex_replace('.[^.]+$', '') }}"
+ # Note: this assumes 3.13 will be the last release before 4.0.
+ upgrade_version: "{{ '4.0' if current_version == '3.13' else current_version_major + '.' + (current_version_minor | int + 1) | string }}"
+ assert:
+ that: (current_version_major == new_version_major and
+ new_version_minor | int - current_version_minor | int <= 1) or
+ (new_version | float == 4.0 and current_version | float == 3.13)
+ fail_msg: >
+ Looks like you're trying to run a skip-release upgrade!
+ RabbitMQ must be at most one version behind the target release version ({{ rabbitmq_version_new.stdout | trim }}) to run this upgrade.
+ You are running {{ rabbitmq_version_current.stdout }}.
+ Please first upgrade to {{ upgrade_version }} with the command ``kolla-ansible rabbitmq-upgrade {{ upgrade_version }}``.
+ See these docs for more details: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#slurp
+
+ - name: Catch when RabbitMQ is being downgraded
+ assert:
+ that: rabbitmq_version_current.stdout is version(rabbitmq_version_new.stdout | trim, 'le', version_type='semver')
+ fail_msg: >
+ Looks like you're about to downgrade RabbitMQ from version {{ rabbitmq_version_current.stdout }} to version {{ rabbitmq_version_new.stdout | trim }}.
+ If you're absolutely certain you want to do this, please skip the tag `rabbitmq-version-check`.
+ Otherwise, see these docs for how to pin the version of RabbitMQ:
+ https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#rabbitmq-versions
+
+ when: container_facts[service.container_name] is defined
+
+ delegate_to: "{{ groups[role_rabbitmq_groups] | first }}"
+ run_once: true
+ tags: rabbitmq-version-check
+ vars:
+ service_name: "rabbitmq"
+ service: "{{ rabbitmq_services[service_name] }}"
diff --git a/ansible/roles/rabbitmq/templates/definitions.json.j2 b/ansible/roles/rabbitmq/templates/definitions.json.j2
index bcb2edcd08..9fcffbb35e 100644
--- a/ansible/roles/rabbitmq/templates/definitions.json.j2
+++ b/ansible/roles/rabbitmq/templates/definitions.json.j2
@@ -1,20 +1,20 @@
{
"vhosts": [
- {"name": "/"}{% if project_name == 'outward_rabbitmq' %},
- {"name": "{{ murano_agent_rabbitmq_vhost }}"}
- {% endif %}
+ {"name": "/"}
],
"users": [
{"name": "{{ role_rabbitmq_user }}", "password": "{{ role_rabbitmq_password }}", "tags": "administrator"}{% if role_rabbitmq_monitoring_user is defined and role_rabbitmq_monitoring_user %},
- {"name": "{{ role_rabbitmq_monitoring_user }}", "password": "{{ role_rabbitmq_monitoring_password }}", "tags": "monitoring"}{% endif %}{% if project_name == 'outward_rabbitmq' %},
- {"name": "{{ murano_agent_rabbitmq_user }}", "password": "{{ murano_agent_rabbitmq_password }}", "tags": "management"}
- {% endif %}
+ {"name": "{{ role_rabbitmq_monitoring_user }}", "password": "{{ role_rabbitmq_monitoring_password }}", "tags": "monitoring"}{% endif %}
],
"permissions": [
{"user": "{{ role_rabbitmq_user }}", "vhost": "/", "configure": ".*", "write": ".*", "read": ".*"}{% if role_rabbitmq_monitoring_user is defined and role_rabbitmq_monitoring_user %},
- {"user": "{{ role_rabbitmq_monitoring_user }}", "vhost": "/", "configure": "^$", "write": "^$", "read": ".*"}{% endif %}{% if project_name == 'outward_rabbitmq' %},
- {"user": "{{ murano_agent_rabbitmq_user }}", "vhost": "{{ murano_agent_rabbitmq_vhost }}", "configure": ".*", "write": ".*", "read": ".*"}
- {% endif %}
+ {"user": "{{ role_rabbitmq_monitoring_user }}", "vhost": "/", "configure": "^$", "write": "^$", "read": ".*"}{% endif %}
],
+{% if om_enable_rabbitmq_high_availability | bool %}
+ "policies":[
+ {"vhost": "/", "name": "ha-all", "pattern": "^(?!(amq\\.)|(.*_fanout_)|(reply_)).*", "apply-to": "all", "definition": {"ha-mode":{% if rabbitmq_ha_replica_count is not none %}"exactly","ha-params":{{ rabbitmq_ha_replica_count | int }}{% else %}"all"{% endif %}{% if rabbitmq_ha_promote_on_shutdown is not none %},"ha-promote-on-shutdown":"{{ rabbitmq_ha_promote_on_shutdown }}"{% endif %}{% if rabbitmq_message_ttl_ms is not none %},"message-ttl":{{ rabbitmq_message_ttl_ms | int }}{% endif %}{% if rabbitmq_queue_expiry_ms is not none %},"expires":{{ rabbitmq_queue_expiry_ms | int }}{% endif %}}, "priority":0}
+ ]
+{% else %}
"policies":[]
+{% endif %}
}
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.conf.j2 b/ansible/roles/rabbitmq/templates/rabbitmq.conf.j2
index eef0db9083..e61031334f 100644
--- a/ansible/roles/rabbitmq/templates/rabbitmq.conf.j2
+++ b/ansible/roles/rabbitmq/templates/rabbitmq.conf.j2
@@ -11,6 +11,9 @@ cluster_partition_handling = {{ rabbitmq_cluster_partition_handling }}
management.listener.ip = {{ api_interface_address }}
management.listener.port = {{ role_rabbitmq_management_port }}
management.load_definitions = /etc/rabbitmq/definitions.json
+{% for key, value in rabbitmq_extra_config.items() %}
+{{ key }} = {{ value }}
+{% endfor %}
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
{% for host in groups[role_rabbitmq_groups] %}
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.json.j2 b/ansible/roles/rabbitmq/templates/rabbitmq.json.j2
index 150b1355d1..b06526eee4 100644
--- a/ansible/roles/rabbitmq/templates/rabbitmq.json.j2
+++ b/ansible/roles/rabbitmq/templates/rabbitmq.json.j2
@@ -48,6 +48,12 @@
"dest": "/etc/rabbitmq/certs/{{ project_name }}-key.pem",
"owner": "rabbitmq",
"perm": "0600"
+ }{% endif %}{% if rabbitmq_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/redis/defaults/main.yml b/ansible/roles/redis/defaults/main.yml
index ea6e603806..e8b7aa388f 100644
--- a/ansible/roles/redis/defaults/main.yml
+++ b/ansible/roles/redis/defaults/main.yml
@@ -23,11 +23,11 @@ redis_services:
####################
# Docker
####################
-redis_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/redis"
+redis_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}redis"
redis_tag: "{{ openstack_tag }}"
redis_image_full: "{{ redis_image }}:{{ redis_tag }}"
-redis_sentinel_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/redis-sentinel"
+redis_sentinel_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}redis-sentinel"
redis_sentinel_tag: "{{ openstack_tag }}"
redis_sentinel_image_full: "{{ redis_sentinel_image }}:{{ redis_tag }}"
redis_dimensions: "{{ default_container_dimensions }}"
diff --git a/ansible/roles/redis/handlers/main.yml b/ansible/roles/redis/handlers/main.yml
index b0a5de770a..d1bd614771 100644
--- a/ansible/roles/redis/handlers/main.yml
+++ b/ansible/roles/redis/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "redis"
service: "{{ redis_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart redis-sentinel container
vars:
service_name: "redis-sentinel"
service: "{{ redis_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -29,5 +27,3 @@
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/redis/tasks/check-containers.yml b/ansible/roles/redis/tasks/check-containers.yml
index 0d32104ee7..b7e2f7c29f 100644
--- a/ansible/roles/redis/tasks/check-containers.yml
+++ b/ansible/roles/redis/tasks/check-containers.yml
@@ -1,18 +1,3 @@
---
-- name: Check redis containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- environment: "{{ item.environment | default(omit) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ redis_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/redis/tasks/config.yml b/ansible/roles/redis/tasks/config.yml
index e6cfc003d6..9f133933da 100644
--- a/ansible/roles/redis/tasks/config.yml
+++ b/ansible/roles/redis/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ redis_services }}"
+ with_dict: "{{ redis_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over default config.json files
template:
@@ -18,12 +15,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ redis_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ redis_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over redis config files
template:
@@ -31,9 +23,4 @@
dest: "{{ node_config_directory }}/{{ item.key }}/redis.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ redis_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ redis_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/redis/tasks/config_validate.yml b/ansible/roles/redis/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/redis/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/redis/tasks/precheck.yml b/ansible/roles/redis/tasks/precheck.yml
index 9cfe2ff3b4..a9225a0807 100644
--- a/ansible/roles/redis/tasks/precheck.yml
+++ b/ansible/roles/redis/tasks/precheck.yml
@@ -8,13 +8,16 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- redis
+ check_mode: false
register: container_facts
- name: Checking free port for Redis
vars:
- redis: "{{ redis_services['redis'] }}"
+ service: "{{ redis_services['redis'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ redis_port }}"
@@ -23,5 +26,4 @@
state: stopped
when:
- container_facts['redis'] is not defined
- - inventory_hostname in groups[redis.group]
- - redis.enabled | bool
+ - service | service_enabled_and_mapped_to_host
diff --git a/ansible/roles/sahara/defaults/main.yml b/ansible/roles/sahara/defaults/main.yml
deleted file mode 100644
index 7d746c23b1..0000000000
--- a/ansible/roles/sahara/defaults/main.yml
+++ /dev/null
@@ -1,162 +0,0 @@
----
-sahara_services:
- sahara-api:
- container_name: sahara_api
- group: sahara-api
- enabled: true
- image: "{{ sahara_api_image_full }}"
- volumes: "{{ sahara_api_default_volumes + sahara_api_extra_volumes }}"
- dimensions: "{{ sahara_api_dimensions }}"
- healthcheck: "{{ sahara_api_healthcheck }}"
- haproxy:
- sahara_api:
- enabled: "{{ enable_sahara }}"
- mode: "http"
- external: false
- port: "{{ sahara_api_port }}"
- sahara_api_external:
- enabled: "{{ enable_sahara }}"
- mode: "http"
- external: true
- port: "{{ sahara_api_port }}"
- sahara-engine:
- container_name: sahara_engine
- group: sahara-engine
- enabled: true
- image: "{{ sahara_engine_image_full }}"
- privileged: True
- volumes: "{{ sahara_engine_default_volumes + sahara_engine_extra_volumes }}"
- dimensions: "{{ sahara_engine_dimensions }}"
- healthcheck: "{{ sahara_engine_healthcheck }}"
-
-
-####################
-# Database
-####################
-sahara_database_name: "sahara"
-sahara_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}sahara{% endif %}"
-sahara_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-
-####################
-# Database sharding
-####################
-sahara_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ sahara_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-sahara_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-sahara_database_shard:
- users:
- - user: "{{ sahara_database_user }}"
- password: "{{ sahara_database_password }}"
- rules:
- - schema: "{{ sahara_database_name }}"
- shard_id: "{{ sahara_database_shard_id }}"
-
-
-####################
-# Docker
-####################
-sahara_tag: "{{ openstack_tag }}"
-
-sahara_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/sahara-engine"
-sahara_engine_tag: "{{ sahara_tag }}"
-sahara_engine_image_full: "{{ sahara_engine_image }}:{{ sahara_engine_tag }}"
-
-sahara_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/sahara-api"
-sahara_api_tag: "{{ sahara_tag }}"
-sahara_api_image_full: "{{ sahara_api_image }}:{{ sahara_api_tag }}"
-
-sahara_api_dimensions: "{{ default_container_dimensions }}"
-sahara_engine_dimensions: "{{ default_container_dimensions }}"
-
-sahara_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
-sahara_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-sahara_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-sahara_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-sahara_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ sahara_api_port }}"]
-sahara_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-sahara_api_healthcheck:
- interval: "{{ sahara_api_healthcheck_interval }}"
- retries: "{{ sahara_api_healthcheck_retries }}"
- start_period: "{{ sahara_api_healthcheck_start_period }}"
- test: "{% if sahara_api_enable_healthchecks | bool %}{{ sahara_api_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ sahara_api_healthcheck_timeout }}"
-
-sahara_engine_enable_healthchecks: "{{ enable_container_healthchecks }}"
-sahara_engine_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-sahara_engine_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-sahara_engine_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-sahara_engine_healthcheck_test: ["CMD-SHELL", "healthcheck_port sahara_engine {{ om_rpc_port }}"]
-sahara_engine_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-sahara_engine_healthcheck:
- interval: "{{ sahara_engine_healthcheck_interval }}"
- retries: "{{ sahara_engine_healthcheck_retries }}"
- start_period: "{{ sahara_engine_healthcheck_start_period }}"
- test: "{% if sahara_engine_enable_healthchecks | bool %}{{ sahara_engine_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ sahara_engine_healthcheck_timeout }}"
-
-sahara_api_default_volumes:
- - "{{ node_config_directory }}/sahara-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "sahara:/var/lib/sahara/"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/sahara/sahara:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/sahara' if sahara_dev_mode | bool else '' }}"
-sahara_engine_default_volumes:
- - "{{ node_config_directory }}/sahara-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "sahara:/var/lib/sahara/"
- - "kolla_logs:/var/log/kolla/"
- - "/run:/run:shared"
- - "{{ kolla_dev_repos_directory ~ '/sahara/sahara:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/sahara' if sahara_dev_mode | bool else '' }}"
-
-sahara_extra_volumes: "{{ default_extra_volumes }}"
-sahara_api_extra_volumes: "{{ sahara_extra_volumes }}"
-sahara_engine_extra_volumes: "{{ sahara_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-sahara_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ sahara_api_port }}"
-sahara_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ sahara_api_port }}"
-
-sahara_logging_debug: "{{ openstack_logging_debug }}"
-
-sahara_keystone_user: "sahara"
-
-openstack_sahara_auth: "{{ openstack_auth }}"
-
-sahara_api_workers: "{{ openstack_service_workers }}"
-
-####################
-## Kolla
-#####################
-sahara_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-sahara_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-sahara_dev_mode: "{{ kolla_dev_mode }}"
-sahara_source_version: "{{ kolla_source_version }}"
-
-####################
-# Notifications
-####################
-sahara_notification_topics:
- - name: notifications
- enabled: "{{ enable_ceilometer | bool }}"
-
-sahara_enabled_notification_topics: "{{ sahara_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
-
-####################
-# Keystone
-####################
-sahara_ks_services:
- - name: "sahara"
- type: "data-processing"
- description: "Sahara Data Processing"
- endpoints:
- - {'interface': 'internal', 'url': '{{ sahara_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ sahara_public_endpoint }}'}
-
-sahara_ks_users:
- - project: "service"
- user: "{{ sahara_keystone_user }}"
- password: "{{ sahara_keystone_password }}"
- role: "admin"
diff --git a/ansible/roles/sahara/handlers/main.yml b/ansible/roles/sahara/handlers/main.yml
deleted file mode 100644
index d96468297e..0000000000
--- a/ansible/roles/sahara/handlers/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- name: Restart sahara-api container
- vars:
- service_name: "sahara-api"
- service: "{{ sahara_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart sahara-engine container
- vars:
- service_name: "sahara-engine"
- service: "{{ sahara_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- privileged: "{{ service.privileged | default(False) }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/sahara/tasks/bootstrap.yml b/ansible/roles/sahara/tasks/bootstrap.yml
deleted file mode 100644
index 0ab7ce99e4..0000000000
--- a/ansible/roles/sahara/tasks/bootstrap.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Creating sahara database
- become: true
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ sahara_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ sahara_database_name }}"
- run_once: True
- delegate_to: "{{ groups['sahara-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating sahara database user and setting permissions
- become: true
- kolla_toolbox:
- module_name: mysql_user
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ sahara_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ sahara_database_user }}"
- password: "{{ sahara_database_password }}"
- host: "%"
- priv: "{{ sahara_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: True
- delegate_to: "{{ groups['sahara-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/sahara/tasks/bootstrap_service.yml b/ansible/roles/sahara/tasks/bootstrap_service.yml
deleted file mode 100644
index 6f78f9d867..0000000000
--- a/ansible/roles/sahara/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Sahara bootstrap container
- vars:
- sahara_api: "{{ sahara_services['sahara-api'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ sahara_api.image }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_sahara"
- restart_policy: no
- volumes: "{{ sahara_api.volumes | reject('equalto', '') | list }}"
- run_once: True
- delegate_to: "{{ groups[sahara_api.group][0] }}"
diff --git a/ansible/roles/sahara/tasks/check-containers.yml b/ansible/roles/sahara/tasks/check-containers.yml
deleted file mode 100644
index c224d54773..0000000000
--- a/ansible/roles/sahara/tasks/check-containers.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Check sahara containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ sahara_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/sahara/tasks/clone.yml b/ansible/roles/sahara/tasks/clone.yml
deleted file mode 100644
index 6385f6c236..0000000000
--- a/ansible/roles/sahara/tasks/clone.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cloning sahara source repository for development
- become: true
- git:
- repo: "{{ sahara_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ sahara_dev_repos_pull }}"
- version: "{{ sahara_source_version }}"
diff --git a/ansible/roles/sahara/tasks/config.yml b/ansible/roles/sahara/tasks/config.yml
deleted file mode 100644
index bed6647e8b..0000000000
--- a/ansible/roles/sahara/tasks/config.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ sahara_services }}"
-
-- name: Check if policies shall be overwritten
- stat:
- path: "{{ item }}"
- delegate_to: localhost
- run_once: True
- register: sahara_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/sahara/"
- skip: true
-
-- name: Set sahara policy file
- set_fact:
- sahara_policy_file: "{{ sahara_policy.results.0.stat.path | basename }}"
- sahara_policy_file_path: "{{ sahara_policy.results.0.stat.path }}"
- when:
- - sahara_policy.results
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ sahara_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over sahara.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/sahara.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/sahara.conf"
- - "{{ node_custom_config }}/sahara/{{ item.key }}.conf"
- - "{{ node_custom_config }}/sahara/{{ inventory_hostname }}/sahara.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/sahara.conf"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ sahara_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over existing policy file
- template:
- src: "{{ sahara_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ sahara_policy_file }}"
- mode: "0660"
- become: true
- when:
- - sahara_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ sahara_services }}"
- notify:
- - Restart {{ item.key }} container
diff --git a/ansible/roles/sahara/tasks/copy-certs.yml b/ansible/roles/sahara/tasks/copy-certs.yml
deleted file mode 100644
index e407b87540..0000000000
--- a/ansible/roles/sahara/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ sahara_services }}"
diff --git a/ansible/roles/sahara/tasks/deploy-containers.yml b/ansible/roles/sahara/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/sahara/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/sahara/tasks/deploy.yml b/ansible/roles/sahara/tasks/deploy.yml
deleted file mode 100644
index 968cdfe078..0000000000
--- a/ansible/roles/sahara/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- import_tasks: register.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: clone.yml
- when: sahara_dev_mode | bool
-
-- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/sahara/tasks/loadbalancer.yml b/ansible/roles/sahara/tasks/loadbalancer.yml
deleted file mode 100644
index 178ba1cafa..0000000000
--- a/ansible/roles/sahara/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ sahara_services }}"
- tags: always
diff --git a/ansible/roles/sahara/tasks/main.yml b/ansible/roles/sahara/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/sahara/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/sahara/tasks/precheck.yml b/ansible/roles/sahara/tasks/precheck.yml
deleted file mode 100644
index 2df581c1fb..0000000000
--- a/ansible/roles/sahara/tasks/precheck.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ sahara_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - sahara_api
- register: container_facts
-
-- name: Checking free port for Sahara API
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ sahara_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['sahara_api'] is not defined
- - inventory_hostname in groups['sahara-api']
diff --git a/ansible/roles/sahara/tasks/pull.yml b/ansible/roles/sahara/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/sahara/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/sahara/tasks/reconfigure.yml b/ansible/roles/sahara/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/sahara/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/sahara/tasks/register.yml b/ansible/roles/sahara/tasks/register.yml
deleted file mode 100644
index 17c71fae28..0000000000
--- a/ansible/roles/sahara/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_sahara_auth }}"
- service_ks_register_services: "{{ sahara_ks_services }}"
- service_ks_register_users: "{{ sahara_ks_users }}"
diff --git a/ansible/roles/sahara/tasks/stop.yml b/ansible/roles/sahara/tasks/stop.yml
deleted file mode 100644
index 28c1c535b1..0000000000
--- a/ansible/roles/sahara/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ sahara_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/sahara/tasks/upgrade.yml b/ansible/roles/sahara/tasks/upgrade.yml
deleted file mode 100644
index 6ba9f99799..0000000000
--- a/ansible/roles/sahara/tasks/upgrade.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap_service.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/sahara/templates/sahara-api.json.j2 b/ansible/roles/sahara/templates/sahara-api.json.j2
deleted file mode 100644
index 4c0378fda3..0000000000
--- a/ansible/roles/sahara/templates/sahara-api.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "sahara-api --config-file /etc/sahara/sahara.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/sahara.conf",
- "dest": "/etc/sahara/sahara.conf",
- "owner": "sahara",
- "perm": "0600"
- }{% if sahara_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ sahara_policy_file }}",
- "dest": "/etc/sahara/{{ sahara_policy_file }}",
- "owner": "sahara",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/lib/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/sahara/templates/sahara-engine.json.j2 b/ansible/roles/sahara/templates/sahara-engine.json.j2
deleted file mode 100644
index 11f32fa6a9..0000000000
--- a/ansible/roles/sahara/templates/sahara-engine.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "sahara-engine --config-file /etc/sahara/sahara.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/sahara.conf",
- "dest": "/etc/sahara/sahara.conf",
- "owner": "sahara",
- "perm": "0600"
- }{% if sahara_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ sahara_policy_file }}",
- "dest": "/etc/sahara/{{ sahara_policy_file }}",
- "owner": "sahara",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/lib/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/sahara/templates/sahara.conf.j2 b/ansible/roles/sahara/templates/sahara.conf.j2
deleted file mode 100644
index 2fc1edd5ec..0000000000
--- a/ansible/roles/sahara/templates/sahara.conf.j2
+++ /dev/null
@@ -1,65 +0,0 @@
-[DEFAULT]
-debug = {{ sahara_logging_debug }}
-log_dir = /var/log/kolla/sahara
-port = {{ sahara_api_port }}
-host = {{ api_interface_address }}
-transport_url = {{ rpc_transport_url }}
-
-api_workers = {{ sahara_api_workers }}
-use_floating_ips = False
-use_namespaces = True
-use_rootwrap = True
-
-[database]
-connection = mysql+pymysql://{{ sahara_database_user }}:{{ sahara_database_password }}@{{ sahara_database_address }}/{{ sahara_database_name }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-
-[keystone_authtoken]
-service_type = data-processing
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-user_domain_name = {{ default_project_domain_name }}
-project_name = service
-project_domain_name = {{ default_project_domain_name }}
-username = {{ sahara_keystone_user }}
-password = {{ sahara_keystone_password }}
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_messaging_notifications]
-transport_url = {{ notify_transport_url }}
-{% if sahara_enabled_notification_topics %}
-driver = messagingv2
-topics = {{ sahara_enabled_notification_topics | map(attribute='name') | join(',') }}
-{% else %}
-driver = noop
-{% endif %}
-
-{% if om_enable_rabbitmq_tls | bool %}
-[oslo_messaging_rabbit]
-ssl = true
-ssl_ca_file = {{ om_rabbitmq_cacert }}
-{% endif %}
-
-{% if sahara_policy_file is defined %}
-[oslo_policy]
-policy_file = {{ sahara_policy_file }}
-{% endif %}
-
-
-[profiler]
-enabled = False
-
-[trustee]
-project_domain_name = {{ default_project_domain_name }}
-project_name = service
-user_domain_name = {{ default_user_domain_name }}
-username = {{ sahara_keystone_user }}
-password = {{ sahara_keystone_password }}
-auth_url = {{ keystone_internal_url }}
-cafile = {{ openstack_cacert }}
diff --git a/ansible/roles/sahara/vars/main.yml b/ansible/roles/sahara/vars/main.yml
deleted file mode 100644
index 8a8430fe65..0000000000
--- a/ansible/roles/sahara/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "sahara"
diff --git a/ansible/roles/senlin/defaults/main.yml b/ansible/roles/senlin/defaults/main.yml
deleted file mode 100644
index ded678fb34..0000000000
--- a/ansible/roles/senlin/defaults/main.yml
+++ /dev/null
@@ -1,229 +0,0 @@
----
-senlin_services:
- senlin-api:
- container_name: senlin_api
- group: senlin-api
- enabled: true
- image: "{{ senlin_api_image_full }}"
- volumes: "{{ senlin_api_default_volumes + senlin_api_extra_volumes }}"
- dimensions: "{{ senlin_api_dimensions }}"
- healthcheck: "{{ senlin_api_healthcheck }}"
- haproxy:
- senlin_api:
- enabled: "{{ enable_senlin }}"
- mode: "http"
- external: false
- port: "{{ senlin_api_port }}"
- listen_port: "{{ senlin_api_listen_port }}"
- senlin_api_external:
- enabled: "{{ enable_senlin }}"
- mode: "http"
- external: true
- port: "{{ senlin_api_port }}"
- listen_port: "{{ senlin_api_listen_port }}"
- senlin-conductor:
- container_name: senlin_conductor
- group: senlin-conductor
- enabled: true
- image: "{{ senlin_conductor_image_full }}"
- volumes: "{{ senlin_conductor_default_volumes + senlin_conductor_extra_volumes }}"
- dimensions: "{{ senlin_conductor_dimensions }}"
- healthcheck: "{{ senlin_conductor_healthcheck }}"
- senlin-engine:
- container_name: senlin_engine
- group: senlin-engine
- enabled: true
- image: "{{ senlin_engine_image_full }}"
- volumes: "{{ senlin_engine_default_volumes + senlin_engine_extra_volumes }}"
- dimensions: "{{ senlin_engine_dimensions }}"
- healthcheck: "{{ senlin_engine_healthcheck }}"
- senlin-health-manager:
- container_name: senlin_health_manager
- group: senlin-health-manager
- enabled: true
- image: "{{ senlin_health_manager_image_full }}"
- volumes: "{{ senlin_health_manager_default_volumes + senlin_health_manager_extra_volumes }}"
- dimensions: "{{ senlin_health_manager_dimensions }}"
- healthcheck: "{{ senlin_health_manager_healthcheck }}"
-
-####################
-# Database
-####################
-senlin_database_name: "senlin"
-senlin_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}senlin{% endif %}"
-senlin_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-
-####################
-# Database sharding
-####################
-senlin_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ senlin_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-senlin_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-senlin_database_shard:
- users:
- - user: "{{ senlin_database_user }}"
- password: "{{ senlin_database_password }}"
- rules:
- - schema: "{{ senlin_database_name }}"
- shard_id: "{{ senlin_database_shard_id }}"
-
-
-####################
-# Docker
-####################
-senlin_tag: "{{ openstack_tag }}"
-
-senlin_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/senlin-conductor"
-senlin_conductor_tag: "{{ senlin_tag }}"
-senlin_conductor_image_full: "{{ senlin_conductor_image }}:{{ senlin_conductor_tag }}"
-
-senlin_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/senlin-engine"
-senlin_engine_tag: "{{ senlin_tag }}"
-senlin_engine_image_full: "{{ senlin_engine_image }}:{{ senlin_engine_tag }}"
-
-senlin_health_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/senlin-health-manager"
-senlin_health_manager_tag: "{{ senlin_tag }}"
-senlin_health_manager_image_full: "{{ senlin_health_manager_image }}:{{ senlin_health_manager_tag }}"
-
-senlin_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/senlin-api"
-senlin_api_tag: "{{ senlin_tag }}"
-senlin_api_image_full: "{{ senlin_api_image }}:{{ senlin_api_tag }}"
-
-senlin_api_dimensions: "{{ default_container_dimensions }}"
-senlin_conductor_dimensions: "{{ default_container_dimensions }}"
-senlin_engine_dimensions: "{{ default_container_dimensions }}"
-senlin_health_manager_dimensions: "{{ default_container_dimensions }}"
-
-senlin_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
-senlin_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-senlin_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-senlin_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-senlin_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ senlin_api_listen_port }}"]
-senlin_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-senlin_api_healthcheck:
- interval: "{{ senlin_api_healthcheck_interval }}"
- retries: "{{ senlin_api_healthcheck_retries }}"
- start_period: "{{ senlin_api_healthcheck_start_period }}"
- test: "{% if senlin_api_enable_healthchecks | bool %}{{ senlin_api_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ senlin_api_healthcheck_timeout }}"
-
-senlin_conductor_enable_healthchecks: "{{ enable_container_healthchecks }}"
-senlin_conductor_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-senlin_conductor_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-senlin_conductor_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-senlin_conductor_healthcheck_test: ["CMD-SHELL", "healthcheck_port senlin-conductor {{ om_rpc_port }}"]
-senlin_conductor_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-senlin_conductor_healthcheck:
- interval: "{{ senlin_conductor_healthcheck_interval }}"
- retries: "{{ senlin_conductor_healthcheck_retries }}"
- start_period: "{{ senlin_conductor_healthcheck_start_period }}"
- test: "{% if senlin_conductor_enable_healthchecks | bool %}{{ senlin_conductor_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ senlin_conductor_healthcheck_timeout }}"
-
-senlin_engine_enable_healthchecks: "{{ enable_container_healthchecks }}"
-senlin_engine_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-senlin_engine_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-senlin_engine_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-senlin_engine_healthcheck_test: ["CMD-SHELL", "healthcheck_port senlin-engine {{ om_rpc_port }}"]
-senlin_engine_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-senlin_engine_healthcheck:
- interval: "{{ senlin_engine_healthcheck_interval }}"
- retries: "{{ senlin_engine_healthcheck_retries }}"
- start_period: "{{ senlin_engine_healthcheck_start_period }}"
- test: "{% if senlin_engine_enable_healthchecks | bool %}{{ senlin_engine_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ senlin_engine_healthcheck_timeout }}"
-
-senlin_health_manager_enable_healthchecks: "{{ enable_container_healthchecks }}"
-senlin_health_manager_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-senlin_health_manager_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-senlin_health_manager_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-senlin_health_manager_healthcheck_test: ["CMD-SHELL", "healthcheck_port senlin-health-manager {{ om_rpc_port }}"]
-senlin_health_manager_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-senlin_health_manager_healthcheck:
- interval: "{{ senlin_health_manager_healthcheck_interval }}"
- retries: "{{ senlin_health_manager_healthcheck_retries }}"
- start_period: "{{ senlin_health_manager_healthcheck_start_period }}"
- test: "{% if senlin_health_manager_enable_healthchecks | bool %}{{ senlin_health_manager_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ senlin_health_manager_healthcheck_timeout }}"
-
-senlin_api_default_volumes:
- - "{{ node_config_directory }}/senlin-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/senlin/senlin:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/senlin' if senlin_dev_mode | bool else '' }}"
-senlin_conductor_default_volumes:
- - "{{ node_config_directory }}/senlin-conductor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/senlin/senlin:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/senlin' if senlin_dev_mode | bool else '' }}"
-senlin_engine_default_volumes:
- - "{{ node_config_directory }}/senlin-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/senlin/senlin:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/senlin' if senlin_dev_mode | bool else '' }}"
-senlin_health_manager_default_volumes:
- - "{{ node_config_directory }}/senlin-health-manager/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/senlin/senlin:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/senlin' if senlin_dev_mode | bool else '' }}"
-
-senlin_extra_volumes: "{{ default_extra_volumes }}"
-senlin_api_extra_volumes: "{{ senlin_extra_volumes }}"
-senlin_conductor_extra_volumes: "{{ senlin_extra_volumes }}"
-senlin_engine_extra_volumes: "{{ senlin_extra_volumes }}"
-senlin_health_manager_extra_volumes: "{{ senlin_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-senlin_internal_endpoint: "{{ internal_protocol }}://{{ senlin_internal_fqdn | put_address_in_context('url') }}:{{ senlin_api_port }}"
-senlin_public_endpoint: "{{ public_protocol }}://{{ senlin_external_fqdn | put_address_in_context('url') }}:{{ senlin_api_port }}"
-
-senlin_logging_debug: "{{ openstack_logging_debug }}"
-
-senlin_keystone_user: "senlin"
-
-openstack_senlin_auth: "{{ openstack_auth }}"
-
-senlin_api_workers: "{{ openstack_service_workers }}"
-senlin_conductor_workers: "{{ openstack_service_workers }}"
-senlin_engine_workers: "{{ openstack_service_workers }}"
-senlin_health_manager_workers: "{{ openstack_service_workers }}"
-
-####################
-# Kolla
-####################
-senlin_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-senlin_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-senlin_dev_mode: "{{ kolla_dev_mode }}"
-senlin_source_version: "{{ kolla_source_version }}"
-
-
-####################
-# Notifications
-####################
-senlin_notification_topics:
- - name: notifications
- enabled: "{{ enable_ceilometer | bool }}"
-
-senlin_enabled_notification_topics: "{{ senlin_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
-
-####################
-# Keystone
-####################
-senlin_ks_services:
- - name: "senlin"
- type: "clustering"
- description: "Senlin Clustering Service"
- endpoints:
- - {'interface': 'internal', 'url': '{{ senlin_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ senlin_public_endpoint }}'}
-
-senlin_ks_users:
- - project: "service"
- user: "{{ senlin_keystone_user }}"
- password: "{{ senlin_keystone_password }}"
- role: "admin"
diff --git a/ansible/roles/senlin/handlers/main.yml b/ansible/roles/senlin/handlers/main.yml
deleted file mode 100644
index bdf06bf743..0000000000
--- a/ansible/roles/senlin/handlers/main.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Restart senlin-api container
- vars:
- service_name: "senlin-api"
- service: "{{ senlin_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart senlin-conductor container
- vars:
- service_name: "senlin-conductor"
- service: "{{ senlin_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart senlin-engine container
- vars:
- service_name: "senlin-engine"
- service: "{{ senlin_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart senlin-health-manager container
- vars:
- service_name: "senlin-health-manager"
- service: "{{ senlin_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/senlin/tasks/bootstrap.yml b/ansible/roles/senlin/tasks/bootstrap.yml
deleted file mode 100644
index 8ff87b92a4..0000000000
--- a/ansible/roles/senlin/tasks/bootstrap.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Creating Senlin database
- become: true
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ senlin_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ senlin_database_name }}"
- run_once: True
- delegate_to: "{{ groups['senlin-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating Senlin database user and setting permissions
- become: true
- kolla_toolbox:
- module_name: mysql_user
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ senlin_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ senlin_database_user }}"
- password: "{{ senlin_database_password }}"
- host: "%"
- priv: "{{ senlin_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: True
- delegate_to: "{{ groups['senlin-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/senlin/tasks/bootstrap_service.yml b/ansible/roles/senlin/tasks/bootstrap_service.yml
deleted file mode 100644
index 318201f5a7..0000000000
--- a/ansible/roles/senlin/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Senlin bootstrap container
- vars:
- senlin_api: "{{ senlin_services['senlin-api'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ senlin_api.image }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_senlin"
- restart_policy: no
- volumes: "{{ senlin_api.volumes | reject('equalto', '') | list }}"
- run_once: True
- delegate_to: "{{ groups[senlin_api.group][0] }}"
diff --git a/ansible/roles/senlin/tasks/check-containers.yml b/ansible/roles/senlin/tasks/check-containers.yml
deleted file mode 100644
index 0fcaa54a2e..0000000000
--- a/ansible/roles/senlin/tasks/check-containers.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Check senlin containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ senlin_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/senlin/tasks/clone.yml b/ansible/roles/senlin/tasks/clone.yml
deleted file mode 100644
index 95709ce358..0000000000
--- a/ansible/roles/senlin/tasks/clone.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cloning senlin source repository for development
- become: true
- git:
- repo: "{{ senlin_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ senlin_dev_repos_pull }}"
- version: "{{ senlin_source_version }}"
diff --git a/ansible/roles/senlin/tasks/config.yml b/ansible/roles/senlin/tasks/config.yml
deleted file mode 100644
index 34e0ea3389..0000000000
--- a/ansible/roles/senlin/tasks/config.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ senlin_services }}"
-
-- name: Check if policies shall be overwritten
- stat:
- path: "{{ item }}"
- delegate_to: localhost
- run_once: True
- register: senlin_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/senlin/"
- skip: true
-
-- name: Set senlin policy file
- set_fact:
- senlin_policy_file: "{{ senlin_policy.results.0.stat.path | basename }}"
- senlin_policy_file_path: "{{ senlin_policy.results.0.stat.path }}"
- when:
- - senlin_policy.results
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ senlin_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over senlin.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/senlin.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/senlin.conf"
- - "{{ node_custom_config }}/senlin/{{ item.key }}.conf"
- - "{{ node_custom_config }}/senlin/{{ inventory_hostname }}/senlin.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/senlin.conf"
- mode: "0660"
- become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ senlin_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over existing policy file
- template:
- src: "{{ senlin_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ senlin_policy_file }}"
- mode: "0660"
- become: true
- when:
- - senlin_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ senlin_services }}"
- notify:
- - Restart {{ item.key }} container
diff --git a/ansible/roles/senlin/tasks/copy-certs.yml b/ansible/roles/senlin/tasks/copy-certs.yml
deleted file mode 100644
index 0614aac758..0000000000
--- a/ansible/roles/senlin/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ senlin_services }}"
diff --git a/ansible/roles/senlin/tasks/deploy-containers.yml b/ansible/roles/senlin/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/senlin/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/senlin/tasks/deploy.yml b/ansible/roles/senlin/tasks/deploy.yml
deleted file mode 100644
index 3b9ca1ece1..0000000000
--- a/ansible/roles/senlin/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- import_tasks: register.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: clone.yml
- when: senlin_dev_mode | bool
-
-- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/senlin/tasks/loadbalancer.yml b/ansible/roles/senlin/tasks/loadbalancer.yml
deleted file mode 100644
index ea92b24be6..0000000000
--- a/ansible/roles/senlin/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ senlin_services }}"
- tags: always
diff --git a/ansible/roles/senlin/tasks/main.yml b/ansible/roles/senlin/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/senlin/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/senlin/tasks/precheck.yml b/ansible/roles/senlin/tasks/precheck.yml
deleted file mode 100644
index a5b351022e..0000000000
--- a/ansible/roles/senlin/tasks/precheck.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ senlin_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - senlin_api
- register: container_facts
-
-- name: Checking free port for Senlin API
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ senlin_api_listen_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['senlin_api'] is not defined
- - inventory_hostname in groups['senlin-api']
diff --git a/ansible/roles/senlin/tasks/pull.yml b/ansible/roles/senlin/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/senlin/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/senlin/tasks/reconfigure.yml b/ansible/roles/senlin/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/senlin/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/senlin/tasks/register.yml b/ansible/roles/senlin/tasks/register.yml
deleted file mode 100644
index 8a2d48d20d..0000000000
--- a/ansible/roles/senlin/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_senlin_auth }}"
- service_ks_register_services: "{{ senlin_ks_services }}"
- service_ks_register_users: "{{ senlin_ks_users }}"
diff --git a/ansible/roles/senlin/tasks/stop.yml b/ansible/roles/senlin/tasks/stop.yml
deleted file mode 100644
index 93b8215b61..0000000000
--- a/ansible/roles/senlin/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ senlin_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/senlin/tasks/upgrade.yml b/ansible/roles/senlin/tasks/upgrade.yml
deleted file mode 100644
index 6ba9f99799..0000000000
--- a/ansible/roles/senlin/tasks/upgrade.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap_service.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/senlin/templates/senlin-api.json.j2 b/ansible/roles/senlin/templates/senlin-api.json.j2
deleted file mode 100644
index 91f0e297d2..0000000000
--- a/ansible/roles/senlin/templates/senlin-api.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "senlin-api --config-file /etc/senlin/senlin.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/senlin.conf",
- "dest": "/etc/senlin/senlin.conf",
- "owner": "senlin",
- "perm": "0600"
- }{% if senlin_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ senlin_policy_file }}",
- "dest": "/etc/senlin/{{ senlin_policy_file }}",
- "owner": "senlin",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/senlin",
- "owner": "senlin:senlin",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/senlin/templates/senlin-conductor.json.j2 b/ansible/roles/senlin/templates/senlin-conductor.json.j2
deleted file mode 100644
index 1a612d8efc..0000000000
--- a/ansible/roles/senlin/templates/senlin-conductor.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "senlin-conductor --config-file /etc/senlin/senlin.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/senlin.conf",
- "dest": "/etc/senlin/senlin.conf",
- "owner": "senlin",
- "perm": "0600"
- }{% if senlin_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ senlin_policy_file }}",
- "dest": "/etc/senlin/{{ senlin_policy_file }}",
- "owner": "senlin",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/senlin",
- "owner": "senlin:senlin",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/senlin/templates/senlin-engine.json.j2 b/ansible/roles/senlin/templates/senlin-engine.json.j2
deleted file mode 100644
index 3e3c579743..0000000000
--- a/ansible/roles/senlin/templates/senlin-engine.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "senlin-engine --config-file /etc/senlin/senlin.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/senlin.conf",
- "dest": "/etc/senlin/senlin.conf",
- "owner": "senlin",
- "perm": "0600"
- }{% if senlin_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ senlin_policy_file }}",
- "dest": "/etc/senlin/{{ senlin_policy_file }}",
- "owner": "senlin",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/senlin",
- "owner": "senlin:senlin",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/senlin/templates/senlin-health-manager.json.j2 b/ansible/roles/senlin/templates/senlin-health-manager.json.j2
deleted file mode 100644
index 68196e1170..0000000000
--- a/ansible/roles/senlin/templates/senlin-health-manager.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "senlin-health-manager --config-file /etc/senlin/senlin.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/senlin.conf",
- "dest": "/etc/senlin/senlin.conf",
- "owner": "senlin",
- "perm": "0600"
- }{% if senlin_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ senlin_policy_file }}",
- "dest": "/etc/senlin/{{ senlin_policy_file }}",
- "owner": "senlin",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/senlin",
- "owner": "senlin:senlin",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/senlin/templates/senlin.conf.j2 b/ansible/roles/senlin/templates/senlin.conf.j2
deleted file mode 100644
index 1a4259d09c..0000000000
--- a/ansible/roles/senlin/templates/senlin.conf.j2
+++ /dev/null
@@ -1,88 +0,0 @@
-[DEFAULT]
-debug = {{ senlin_logging_debug }}
-
-log_dir = /var/log/kolla/senlin
-
-transport_url = {{ rpc_transport_url }}
-
-{% if service_name == 'senlin-api' %}
-[senlin_api]
-bind_host = {{ api_interface_address }}
-bind_port = {{ senlin_api_listen_port }}
-workers = {{ senlin_api_workers }}
-{% endif %}
-
-[authentication]
-auth_url = {{ keystone_internal_url }}
-service_username = {{ senlin_keystone_user }}
-service_password = {{ senlin_keystone_password }}
-service_project_name = service
-service_user_domain = default
-service_project_domain = default
-
-{% if service_name == 'senlin-conductor' %}
-[conductor]
-workers = {{ senlin_conductor_workers }}
-{% endif %}
-
-[database]
-connection = mysql+pymysql://{{ senlin_database_user }}:{{ senlin_database_password }}@{{ senlin_database_address }}/{{ senlin_database_name }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-max_retries = -1
-
-{% if service_name == 'senlin-engine' %}
-[engine]
-workers = {{ senlin_engine_workers }}
-{% endif %}
-
-{% if service_name == 'senlin-health-manager' %}
-[health_manager]
-workers = {{ senlin_health_manager_workers }}
-{% endif %}
-
-[keystone_authtoken]
-service_type = clustering
-www_authenticate_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ senlin_keystone_user }}
-password = {{ senlin_keystone_password }}
-service_token_roles_required = False
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_messaging_notifications]
-transport_url = {{ notify_transport_url }}
-{% if senlin_enabled_notification_topics %}
-driver = messagingv2
-topics = {{ senlin_enabled_notification_topics | map(attribute='name') | join(',') }}
-{% else %}
-driver = noop
-{% endif %}
-
-{% if om_enable_rabbitmq_tls | bool %}
-[oslo_messaging_rabbit]
-ssl = true
-ssl_ca_file = {{ om_rabbitmq_cacert }}
-{% endif %}
-
-{% if senlin_policy_file is defined %}
-[oslo_policy]
-policy_file = {{ senlin_policy_file }}
-{% endif %}
-
-{% if enable_osprofiler | bool %}
-[profiler]
-enabled = true
-trace_sqlalchemy = true
-hmac_keys = {{ osprofiler_secret }}
-connection_string = {{ osprofiler_backend_connection_string }}
-{% endif %}
diff --git a/ansible/roles/senlin/vars/main.yml b/ansible/roles/senlin/vars/main.yml
deleted file mode 100644
index 03c18bdb3d..0000000000
--- a/ansible/roles/senlin/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "senlin"
diff --git a/ansible/roles/service-cert-copy/defaults/main.yml b/ansible/roles/service-cert-copy/defaults/main.yml
index 0238b3c8f4..24b97c760a 100644
--- a/ansible/roles/service-cert-copy/defaults/main.yml
+++ b/ansible/roles/service-cert-copy/defaults/main.yml
@@ -1,3 +1,4 @@
---
kolla_externally_managed_cert: False
+kolla_copy_backend_tls_files: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_enable_tls_backend', default=false) }}"
diff --git a/ansible/roles/service-cert-copy/tasks/main.yml b/ansible/roles/service-cert-copy/tasks/main.yml
index 2e4b8be089..4b540302bd 100644
--- a/ansible/roles/service-cert-copy/tasks/main.yml
+++ b/ansible/roles/service-cert-copy/tasks/main.yml
@@ -8,8 +8,6 @@
when:
- kolla_copy_ca_into_containers | bool
with_dict: "{{ project_services | select_services_enabled_and_mapped_to_host }}"
- notify:
- - "Restart {{ item.key }} container"
- name: "{{ project_name }} | Copying over backend internal TLS certificate"
vars:
@@ -25,13 +23,8 @@
mode: "0644"
become: true
when:
- - item.value.haproxy is defined
- - item.value.haproxy.values() | selectattr('enabled', 'defined') | map(attribute='enabled') | map('bool') | select | list | length > 0
- - item.value.haproxy.values() | selectattr('tls_backend', 'defined') | map(attribute='tls_backend') | map('bool') | select | list | length > 0
- - not kolla_externally_managed_cert | bool
+ - kolla_copy_backend_tls_files | bool
with_dict: "{{ project_services | select_services_enabled_and_mapped_to_host }}"
- notify:
- - "Restart {{ item.key }} container"
- name: "{{ project_name }} | Copying over backend internal TLS key"
vars:
@@ -47,10 +40,5 @@
mode: "0600"
become: true
when:
- - item.value.haproxy is defined
- - item.value.haproxy.values() | selectattr('enabled', 'defined') | map(attribute='enabled') | map('bool') | select | list | length > 0
- - item.value.haproxy.values() | selectattr('tls_backend', 'defined') | map(attribute='tls_backend') | map('bool') | select | list | length > 0
- - not kolla_externally_managed_cert | bool
+ - kolla_copy_backend_tls_files | bool
with_dict: "{{ project_services | select_services_enabled_and_mapped_to_host }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/service-check-containers/tasks/main.yml b/ansible/roles/service-check-containers/tasks/main.yml
new file mode 100644
index 0000000000..41b990cb83
--- /dev/null
+++ b/ansible/roles/service-check-containers/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+# NOTE(r-krcek): List of arguments should follow argument_spec in
+# kolla_container module
+- name: "{{ kolla_role_name | default(project_name) }} | Check containers"
+ become: true
+ vars:
+ service: "{{ item.value }}"
+ kolla_container:
+ action: "compare_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image | default(omit) }}"
+ volumes: "{{ service.volumes | default(omit) }}"
+ dimensions: "{{ service.dimensions | default(omit) }}"
+ tmpfs: "{{ service.tmpfs | default(omit) }}"
+ volumes_from: "{{ service.volumes_from | default(omit) }}"
+ privileged: "{{ service.privileged | default(omit) }}"
+ cap_add: "{{ service.cap_add | default(omit) }}"
+ environment: "{{ service.environment | default(omit) }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ ipc_mode: "{{ service.ipc_mode | default(omit) }}"
+ pid_mode: "{{ service.pid_mode | default(omit) }}"
+ security_opt: "{{ service.security_opt | default(omit) }}"
+ labels: "{{ service.labels | default(omit) }}"
+ command: "{{ service.command | default(omit) }}"
+ cgroupns_mode: "{{ service.cgroupns_mode | default(omit) }}"
+ with_dict: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_services') | select_services_enabled_and_mapped_to_host }}"
+ register: container_check
+
+# NOTE(yoctozepto): Must be a separate task because one cannot see the whole
+# result in the previous task and Ansible has a quirk regarding notifiers.
+# For details see https://github.com/ansible/ansible/issues/22579
+- name: "{{ kolla_role_name | default(project_name) }} | Notify handlers to restart containers"
+ debug:
+ msg: Notifying handlers
+ changed_when: container_check is changed
+ notify: "{{ container_check.results | select('changed') | map(attribute='item.key') | map('regex_replace', '^(.*)$', 'Restart \\1 container') | list }}"
diff --git a/ansible/roles/service-config-validate/defaults/main.yml b/ansible/roles/service-config-validate/defaults/main.yml
new file mode 100644
index 0000000000..933689f8bc
--- /dev/null
+++ b/ansible/roles/service-config-validate/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# Common role for config-validates.
+
+# Dict of services.
+service_config_validate_output_dir: "/var/log/kolla/config-validate"
+service_config_validate_services: {}
diff --git a/ansible/roles/service-config-validate/tasks/main.yml b/ansible/roles/service-config-validate/tasks/main.yml
new file mode 100644
index 0000000000..ae2cbe2ea3
--- /dev/null
+++ b/ansible/roles/service-config-validate/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- name: "{{ project_name }} | Validate configurations for each service"
+ vars:
+ service_name: "{{ outer_item.key }}"
+ service: "{{ outer_item.value }}"
+ output_dir: "{{ service_config_validate_output_dir }}/{{ inventory_hostname }}/{{ project_name }}/{{ service_name }}"
+ include_tasks: validate.yml
+ loop: "{{ query('dict', service_config_validate_services | select_services_enabled_and_mapped_to_host) }}"
+ loop_control:
+ label: "{{ service_name }}"
+ loop_var: outer_item
+ when:
+ - service_config_validation is defined
diff --git a/ansible/roles/service-config-validate/tasks/validate.yml b/ansible/roles/service-config-validate/tasks/validate.yml
new file mode 100644
index 0000000000..025f55b8bd
--- /dev/null
+++ b/ansible/roles/service-config-validate/tasks/validate.yml
@@ -0,0 +1,50 @@
+---
+- name: "{{ project_name }} : {{ service.container_name }} | Get info on container"
+ become: True
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - "{{ service.container_name }}"
+ register: container_info
+
+- name: "{{ project_name }} : {{ service.container_name }} | Validate configurations"
+ become: True
+ command: >
+ {{ kolla_container_engine }} exec {{ service.container_name }}
+ bash -c "[[ -f {{ inner_item['config'] }} ]] && oslo-config-validator --config-file {{ inner_item['generator'] }} --input-file {{ inner_item['config'] }}"
+ when:
+ - container_info._containers | length > 0
+ register: result
+ failed_when: result.rc not in [0, 1] # rc 1 is expected when errors are found in the config file, or when the config file doesn't exist
+ with_items: "{{ service_config_validation }}"
+ loop_control:
+ label: "{{ inner_item['config'] | basename }}"
+ loop_var: inner_item
+ changed_when: False
+
+- name: "{{ project_name }} : {{ service.container_name }} | Ensure log directory exists"
+ become: True
+ file:
+ path: "{{ output_dir }}"
+ state: directory
+ when:
+ - result.results | map(attribute='rc', default=0) | select('equalto', 1) | list | length > 0
+ - result.results | map(attribute='stderr', default="") | select('ne', "") | list | length > 0
+ delegate_to: localhost
+
+- name: "{{ project_name }} : {{ service.container_name }} | Log configuration errors"
+ become: True
+ copy:
+ content: "{{ inner_item.stderr }}"
+ dest: "{{ output_dir }}/{{ inner_item.inner_item.config | basename }}.err"
+ when:
+ - container_info._containers | length > 0
+ - inner_item.rc is defined
+ - inner_item.rc == 1
+ - inner_item.stderr != ""
+ loop: "{{ result.results }}"
+ loop_control:
+ label: "{{ inner_item.inner_item.config | basename }}"
+ loop_var: inner_item
+ delegate_to: localhost
diff --git a/ansible/roles/service-images-pull/tasks/main.yml b/ansible/roles/service-images-pull/tasks/main.yml
index cb526bfb31..fbb9e8b8bc 100644
--- a/ansible/roles/service-images-pull/tasks/main.yml
+++ b/ansible/roles/service-images-pull/tasks/main.yml
@@ -3,7 +3,7 @@
vars:
service: "{{ item.value }}"
become: true
- kolla_docker:
+ kolla_container:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ service.image }}"
@@ -14,3 +14,5 @@
with_dict: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_services') | select_services_enabled_and_mapped_to_host }}"
loop_control:
label: "{{ item.key }}"
+ tags:
+ - service-images-pull
diff --git a/ansible/roles/service-ks-register/tasks/main.yml b/ansible/roles/service-ks-register/tasks/main.yml
index e96bf2b5ca..8e54baa0da 100644
--- a/ansible/roles/service-ks-register/tasks/main.yml
+++ b/ansible/roles/service-ks-register/tasks/main.yml
@@ -2,7 +2,8 @@
- block:
- name: "{{ project_name }} | Creating services"
kolla_toolbox:
- module_name: "os_keystone_service"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.catalog_service
module_args:
name: "{{ item.name }}"
service_type: "{{ item.type }}"
@@ -13,9 +14,7 @@
cacert: "{{ service_ks_cacert }}"
loop: "{{ service_ks_register_services }}"
loop_control:
- label:
- name: "{{ item.name }}"
- service_type: "{{ item.type }}"
+ label: "{{ item.name }} ({{ item.type }})"
register: service_ks_register_result
until: service_ks_register_result is success
retries: "{{ service_ks_register_retries }}"
@@ -24,7 +23,8 @@
- name: "{{ project_name }} | Creating endpoints"
kolla_toolbox:
- module_name: "os_keystone_endpoint"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.endpoint
module_args:
service: "{{ item.0.name }}"
url: "{{ item.1.url }}"
@@ -38,10 +38,7 @@
- "{{ service_ks_register_services }}"
- endpoints
loop_control:
- label:
- service: "{{ item.0.name }}"
- url: "{{ item.1.url }}"
- interface: "{{ item.1.interface }}"
+ label: "{{ item.0.name }} -> {{ item.1.url }} -> {{ item.1.interface }}"
register: service_ks_register_result
until: service_ks_register_result is success
retries: "{{ service_ks_register_retries }}"
@@ -50,7 +47,8 @@
- name: "{{ project_name }} | Creating projects"
kolla_toolbox:
- module_name: "os_project"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.project
module_args:
name: "{{ item }}"
domain: "{{ service_ks_register_domain }}"
@@ -66,11 +64,13 @@
- name: "{{ project_name }} | Creating users"
kolla_toolbox:
- module_name: "os_user"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.identity_user
module_args:
default_project: "{{ item.project }}"
name: "{{ item.user }}"
password: "{{ item.password }}"
+ update_password: "{{ 'always' if update_keystone_service_user_passwords | bool else 'on_create' }}"
domain: "{{ service_ks_register_domain }}"
region_name: "{{ service_ks_register_region_name }}"
auth: "{{ service_ks_register_auth }}"
@@ -78,9 +78,7 @@
cacert: "{{ service_ks_cacert }}"
with_items: "{{ service_ks_register_users }}"
loop_control:
- label:
- user: "{{ item.user }}"
- project: "{{ item.project }}"
+ label: "{{ item.user }} -> {{ item.project }}"
register: service_ks_register_result
until: service_ks_register_result is success
retries: "{{ service_ks_register_retries }}"
@@ -88,7 +86,8 @@
- name: "{{ project_name }} | Creating roles"
kolla_toolbox:
- module_name: "os_keystone_role"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.identity_role
module_args:
name: "{{ item }}"
region_name: "{{ service_ks_register_region_name }}"
@@ -103,22 +102,21 @@
- name: "{{ project_name }} | Granting user roles"
kolla_toolbox:
- module_name: "os_user_role"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.role_assignment
module_args:
user: "{{ item.user }}"
role: "{{ item.role }}"
- project: "{{ item.project }}"
- domain: "{{ service_ks_register_domain }}"
+ project: "{{ item.project | default(omit) }}"
+ domain: "{{ item.domain | default(omit) }}"
+ system: "{{ item.system | default(omit) }}"
region_name: "{{ service_ks_register_region_name }}"
auth: "{{ service_ks_register_auth }}"
interface: "{{ service_ks_register_interface }}"
cacert: "{{ service_ks_cacert }}"
with_items: "{{ service_ks_register_users + service_ks_register_user_roles }}"
loop_control:
- label:
- user: "{{ item.user }}"
- role: "{{ item.role }}"
- project: "{{ item.project }}"
+ label: "{{ item.user }} -> {{ item.project | default(item.domain) | default(item.system) }} -> {{ item.role }}"
register: service_ks_register_result
until: service_ks_register_result is success
retries: "{{ service_ks_register_retries }}"
diff --git a/ansible/roles/service-rabbitmq/tasks/main.yml b/ansible/roles/service-rabbitmq/tasks/main.yml
index 6475d89fcc..bc1cb2c7d0 100644
--- a/ansible/roles/service-rabbitmq/tasks/main.yml
+++ b/ansible/roles/service-rabbitmq/tasks/main.yml
@@ -2,6 +2,7 @@
- block:
- name: "{{ project_name }} | Ensure RabbitMQ vhosts exist"
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: rabbitmq_vhost
module_args:
name: "{{ item }}"
@@ -14,11 +15,12 @@
- name: "{{ project_name }} | Ensure RabbitMQ users exist"
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: rabbitmq_user
module_args:
user: "{{ item.user }}"
password: "{{ item.password }}"
- node: "rabbit@{{ ansible_facts.hostname }}"
+ node: "rabbit@{{ hostvars[service_rabbitmq_delegate_host]['ansible_facts']['hostname'] }}"
update_password: always
vhost: "{{ item.vhost }}"
configure_priv: ".*"
@@ -28,9 +30,7 @@
user: rabbitmq
loop: "{{ service_rabbitmq_users }}"
loop_control:
- label:
- user: "{{ item.user }}"
- vhost: "{{ item.vhost }}"
+ label: "{{ item.user }} -> {{ item.vhost }}"
register: service_rabbitmq_result
until: service_rabbitmq_result is success
retries: "{{ service_rabbitmq_retries }}"
diff --git a/ansible/roles/service-stop/tasks/main.yml b/ansible/roles/service-stop/tasks/main.yml
index c8e5604c6f..54265ed411 100644
--- a/ansible/roles/service-stop/tasks/main.yml
+++ b/ansible/roles/service-stop/tasks/main.yml
@@ -3,7 +3,7 @@
vars:
service: "{{ item.value }}"
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
diff --git a/ansible/roles/service-uwsgi-config/defaults/main.yml b/ansible/roles/service-uwsgi-config/defaults/main.yml
new file mode 100644
index 0000000000..6e5225c926
--- /dev/null
+++ b/ansible/roles/service-uwsgi-config/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+service_uwsgi_config_host: "{{ api_interface_address | put_address_in_context('url') }}"
+service_uwsgi_config_file: "{{ node_config_directory }}/{{ service_name }}/{{ service_name }}-uwsgi.ini"
+service_uwsgi_config_log_dir: "{{ ansible_parent_role_names | first }}"
+service_uwsgi_config_log_file: "{{ service_name }}-uwsgi.log"
+service_uwsgi_config_tls_backend: false
+service_uwsgi_config_worker_timeout: 80
+service_uwsgi_config_workers: "{{ openstack_service_workers }}"
diff --git a/ansible/roles/service-uwsgi-config/tasks/main.yml b/ansible/roles/service-uwsgi-config/tasks/main.yml
new file mode 100644
index 0000000000..80512589c2
--- /dev/null
+++ b/ansible/roles/service-uwsgi-config/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: "Copying over {{ service_name }} uWSGI config"
+ template:
+ src: "uwsgi.ini.j2"
+ dest: "{{ service_uwsgi_config_file }}"
+ mode: "0660"
+ become: true
+ notify:
+ - Restart {{ service_name }} container
diff --git a/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2 b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2
new file mode 100644
index 0000000000..aafe3d79a7
--- /dev/null
+++ b/ansible/roles/service-uwsgi-config/templates/uwsgi.ini.j2
@@ -0,0 +1,28 @@
+[uwsgi]
+add-header = Connection: close
+buffer-size = 65535
+die-on-term = true
+enable-threads = true
+exit-on-reload = false
+hook-master-start = unix_signal:15 gracefully_kill_them_all
+{% if service_uwsgi_config_tls_backend | bool %}
+https = {{ service_uwsgi_config_host }}:{{ service_uwsgi_config_http_port }},{{ service_uwsgi_config_tls_cert }},{{ service_uwsgi_config_tls_key }}
+{% else %}
+http = {{ service_uwsgi_config_host }}:{{ service_uwsgi_config_http_port }}
+{% endif %}
+lazy-apps = true
+logto2 = /var/log/kolla/{{ service_uwsgi_config_log_dir }}/{{ service_uwsgi_config_log_file }}
+master = true
+{% if service_uwsgi_config_module is defined %}
+module = {{ service_uwsgi_config_module }}
+{% elif service_uwsgi_config_wsgi_file is defined %}
+wsgi-file = {{ service_uwsgi_config_wsgi_file }}
+{% endif %}
+plugins-dir = {{ '/usr/lib/uwsgi/plugins' if kolla_base_distro in ['ubuntu', 'debian'] else '/usr/lib64/uwsgi' }}
+plugins = python3
+processes = {{ service_uwsgi_config_workers }}
+thunder-lock = true
+{% if service_uwsgi_config_uid is defined %}
+uid = {{ service_uwsgi_config_uid }}
+{% endif %}
+worker-reload-mercy = {{ service_uwsgi_config_worker_timeout }}
diff --git a/ansible/roles/skydive/defaults/main.yml b/ansible/roles/skydive/defaults/main.yml
deleted file mode 100644
index 6c4364e82c..0000000000
--- a/ansible/roles/skydive/defaults/main.yml
+++ /dev/null
@@ -1,89 +0,0 @@
----
-skydive_services:
- skydive-analyzer:
- container_name: skydive_analyzer
- group: skydive-analyzer
- enabled: true
- image: "{{ skydive_analyzer_image_full }}"
- volumes: "{{ skydive_analyzer_default_volumes + skydive_analyzer_extra_volumes }}"
- dimensions: "{{ skydive_analyzer_dimensions }}"
- healthcheck: "{{ skydive_analyzer_healthcheck }}"
- haproxy:
- skydive_server:
- enabled: "{{ enable_skydive }}"
- mode: "http"
- external: false
- port: "{{ skydive_analyzer_port }}"
- skydive_server_external:
- enabled: "{{ enable_skydive }}"
- mode: "http"
- external: true
- port: "{{ skydive_analyzer_port }}"
- skydive-agent:
- container_name: skydive_agent
- group: skydive-agent
- enabled: true
- image: "{{ skydive_agent_image_full }}"
- privileged: True
- volumes: "{{ skydive_agent_default_volumes + skydive_agent_extra_volumes }}"
- dimensions: "{{ skydive_agent_dimensions }}"
- healthcheck: "{{ skydive_agent_healthcheck }}"
-
-####################
-# Docker
-####################
-skydive_tag: "{{ openstack_tag }}"
-
-skydive_analyzer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/skydive-analyzer"
-skydive_analyzer_tag: "{{ skydive_tag }}"
-skydive_analyzer_image_full: "{{ skydive_analyzer_image }}:{{ skydive_analyzer_tag }}"
-
-skydive_admin_tenant_name: "{{ keystone_admin_project }}"
-skydive_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/skydive-agent"
-skydive_agent_tag: "{{ skydive_tag }}"
-skydive_agent_image_full: "{{ skydive_agent_image }}:{{ skydive_agent_tag }}"
-skydive_analyzer_dimensions: "{{ default_container_dimensions }}"
-skydive_agent_dimensions: "{{ default_container_dimensions }}"
-
-skydive_analyzer_enable_healthchecks: "{{ enable_container_healthchecks }}"
-skydive_analyzer_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-skydive_analyzer_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-skydive_analyzer_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-skydive_analyzer_healthcheck_test: ["CMD-SHELL", "healthcheck_listen skydive {{ skydive_analyzer_port }}"]
-skydive_analyzer_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-skydive_analyzer_healthcheck:
- interval: "{{ skydive_analyzer_healthcheck_interval }}"
- retries: "{{ skydive_analyzer_healthcheck_retries }}"
- start_period: "{{ skydive_analyzer_healthcheck_start_period }}"
- test: "{% if skydive_analyzer_enable_healthchecks | bool %}{{ skydive_analyzer_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ skydive_analyzer_healthcheck_timeout }}"
-
-skydive_agent_enable_healthchecks: "{{ enable_container_healthchecks }}"
-skydive_agent_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-skydive_agent_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-skydive_agent_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-skydive_agent_healthcheck_test: ["CMD-SHELL", "healthcheck_listen skydive {{ skydive_agents_port }}"]
-skydive_agent_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-skydive_agent_healthcheck:
- interval: "{{ skydive_agent_healthcheck_interval }}"
- retries: "{{ skydive_agent_healthcheck_retries }}"
- start_period: "{{ skydive_agent_healthcheck_start_period }}"
- test: "{% if skydive_agent_enable_healthchecks | bool %}{{ skydive_agent_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ skydive_agent_healthcheck_timeout }}"
-
-skydive_analyzer_default_volumes:
- - "{{ node_config_directory }}/skydive-analyzer/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
-skydive_agent_default_volumes:
- - "{{ node_config_directory }}/skydive-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "/var/run/openvswitch:/var/run/openvswitch:ro"
- - "/var/run/netns:/host/run:shared"
- - "kolla_logs:/var/log/kolla/"
-
-skydive_extra_volumes: "{{ default_extra_volumes }}"
-skydive_analyzer_extra_volumes: "{{ skydive_extra_volumes }}"
-skydive_agent_extra_volumes: "{{ skydive_extra_volumes }}"
diff --git a/ansible/roles/skydive/handlers/main.yml b/ansible/roles/skydive/handlers/main.yml
deleted file mode 100644
index 9788ee780b..0000000000
--- a/ansible/roles/skydive/handlers/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- name: Restart skydive-analyzer container
- vars:
- service_name: "skydive-analyzer"
- service: "{{ skydive_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart skydive-agent container
- vars:
- service_name: "skydive-agent"
- service: "{{ skydive_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- privileged: "{{ service.privileged }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/skydive/tasks/check-containers.yml b/ansible/roles/skydive/tasks/check-containers.yml
deleted file mode 100644
index c63f6655f3..0000000000
--- a/ansible/roles/skydive/tasks/check-containers.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Check skydive containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ skydive_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/skydive/tasks/config.yml b/ansible/roles/skydive/tasks/config.yml
deleted file mode 100644
index c58404f8b2..0000000000
--- a/ansible/roles/skydive/tasks/config.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ skydive_services }}"
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over default config.json files
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ skydive_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over skydive config file
- merge_yaml:
- sources:
- - "{{ role_path }}/templates/{{ item.key }}.conf.j2"
- - "{{ node_custom_config }}/skydive.conf"
- - "{{ node_custom_config }}/skydive/{{ item.key }}.conf"
- - "{{ node_custom_config }}/skydive/{{ inventory_hostname }}/{{ item.key }}.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/skydive.conf"
- mode: "0660"
- become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ skydive_services }}"
- notify:
- - Restart {{ item.key }} container
diff --git a/ansible/roles/skydive/tasks/copy-certs.yml b/ansible/roles/skydive/tasks/copy-certs.yml
deleted file mode 100644
index 99a2333cac..0000000000
--- a/ansible/roles/skydive/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ skydive_services }}"
diff --git a/ansible/roles/skydive/tasks/deploy-containers.yml b/ansible/roles/skydive/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/skydive/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/skydive/tasks/deploy.yml b/ansible/roles/skydive/tasks/deploy.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/skydive/tasks/deploy.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/skydive/tasks/loadbalancer.yml b/ansible/roles/skydive/tasks/loadbalancer.yml
deleted file mode 100644
index 0c51ccfdbc..0000000000
--- a/ansible/roles/skydive/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ skydive_services }}"
- tags: always
diff --git a/ansible/roles/skydive/tasks/main.yml b/ansible/roles/skydive/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/skydive/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/skydive/tasks/precheck.yml b/ansible/roles/skydive/tasks/precheck.yml
deleted file mode 100644
index 711d1b855d..0000000000
--- a/ansible/roles/skydive/tasks/precheck.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ skydive_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - skydive_analyzer
- - skydive_agent
- register: container_facts
-
-- name: Checking free port for Skydive Analyzer
- vars:
- skydive_analyzer: "{{ skydive_services['skydive-analyzer'] }}"
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ skydive_analyzer_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - container_facts['skydive_analyzer'] is not defined
- - inventory_hostname in groups[skydive_analyzer.group]
- - skydive_analyzer.enabled | bool
-
-- name: Checking free port for Skydive Agent
- vars:
- skydive_agent: "{{ skydive_services['skydive-agent'] }}"
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ skydive_agents_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - container_facts['skydive_agent'] is not defined
- - inventory_hostname in groups[skydive_agent.group]
- - skydive_agent.enabled | bool
diff --git a/ansible/roles/skydive/tasks/pull.yml b/ansible/roles/skydive/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/skydive/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/skydive/tasks/reconfigure.yml b/ansible/roles/skydive/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/skydive/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/skydive/tasks/stop.yml b/ansible/roles/skydive/tasks/stop.yml
deleted file mode 100644
index 93397a2de5..0000000000
--- a/ansible/roles/skydive/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ skydive_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/skydive/tasks/upgrade.yml b/ansible/roles/skydive/tasks/upgrade.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/skydive/tasks/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/skydive/templates/skydive-agent.conf.j2 b/ansible/roles/skydive/templates/skydive-agent.conf.j2
deleted file mode 100644
index 69fda2b272..0000000000
--- a/ansible/roles/skydive/templates/skydive-agent.conf.j2
+++ /dev/null
@@ -1,70 +0,0 @@
-### Skydive agent config file
-
-auth:
- analyzer_username: {{ openstack_auth['username'] }}
- analyzer_password: {{ openstack_auth['password'] }}
-
-logging:
- level: INFO
- backends:
- - file
- file:
- path: /var/log/kolla/skydive/skydive-agent.log
-
-etcd:
- servers:
-{% if enable_etcd | bool %}
-{% for host in groups['etcd'] %}
- - {{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_client_port }}
-{% endfor %}
-{% else %}
-{% for host in groups['skydive-analyzer'] %}
- - {{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_client_port }}
-{% endfor %}
-{% endif %}
-
-analyzers:
-{% for host in groups['skydive-analyzer'] %}
- - {{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ skydive_analyzer_port }}
-{% endfor %}
-
-agent:
- listen: {{ 'api' | kolla_address | put_address_in_context('url') }}:{{ skydive_agents_port }}
- flow:
- probes:
- - gopacket
-{% if neutron_plugin_agent in ['openvswitch'] %}
- - ovssflow
-{% endif %}
- topology:
- probes:
- - netlink
- - netns
- - neutron
-{% if neutron_plugin_agent in ['openvswitch'] %}
- - ovsdb
-{% endif %}
-
-### TODO migrate from tenant_name to system_scope when supported in skydive
- neutron:
- auth_url: {{ keystone_internal_url }}
- username: {{ openstack_auth['username'] }}
- password: {{ openstack_auth['password'] }}
- tenant_name: {{ skydive_admin_tenant_name }}
- region_name: {{ openstack_region_name }}
- domain_name: Default
- endpoint_type: internal
-
-netns:
- run_path: /host/run
-
-flow:
- expire: 600
- update: 60
-
-{% if neutron_plugin_agent in ['openvswitch'] %}
-ovs:
- ovsdb: tcp://127.0.0.1:{{ ovsdb_port }}
- oflow:
- enable: true
-{% endif %}
diff --git a/ansible/roles/skydive/templates/skydive-agent.json.j2 b/ansible/roles/skydive/templates/skydive-agent.json.j2
deleted file mode 100644
index 2ba3bbd5ad..0000000000
--- a/ansible/roles/skydive/templates/skydive-agent.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "skydive agent --conf /etc/skydive/skydive.conf --listen={{ api_interface_address | put_address_in_context('url') }}:{{ skydive_agents_port }}",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/skydive.conf",
- "dest": "/etc/skydive/skydive.conf",
- "owner": "skydive",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/skydive",
- "owner": "skydive:skydive",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/skydive/templates/skydive-analyzer.conf.j2 b/ansible/roles/skydive/templates/skydive-analyzer.conf.j2
deleted file mode 100644
index 86fe018fd9..0000000000
--- a/ansible/roles/skydive/templates/skydive-analyzer.conf.j2
+++ /dev/null
@@ -1,71 +0,0 @@
-### Skydive analyzer config file
-
-### TODO migrate from tenant_name to system_scope when supported in skydive
-auth:
- keystone:
- type: keystone
- auth_url: {{ keystone_internal_url }}
- region_name: {{ openstack_region_name }}
- tenant_name: {{ skydive_admin_tenant_name }}
- domain_name: Default
-
-logging:
- level: INFO
- backends:
- - file
- file:
- path: /var/log/kolla/skydive/skydive-analyzer.log
-
-analyzers:
-{% for host in groups['skydive-analyzer'] %}
- - {{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ skydive_analyzer_port }}
-{% endfor %}
-
-etcd:
- client_timeout: 100
-{% if enable_etcd | bool %}
- embedded: false
- servers:
-{% for host in groups['etcd'] %}
- - {{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_client_port }}
-{% endfor %}
-{% else %}
- embedded: true
- servers:
-{% for host in groups['skydive-analyzer'] %}
- - {{ etcd_protocol }}://{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ etcd_client_port }}
-{% endfor %}
- listen: {{ api_interface_address | put_address_in_context('url') }}:{{ etcd_client_port }}
-{% endif %}
-
-analyzer:
- auth:
- api:
- backend: keystone
- listen: {{ api_interface_address | put_address_in_context('url') }}:{{ skydive_analyzer_port }}
- storage:
- backend: elasticsearch
-{% if groups['skydive-agent'] | length > 1 %}
- topology:
- fabric:
-{% for interface in [network_interface, neutron_external_interface]|unique %}
-{% set interfaces_loop = loop %}
-{% for host in groups['skydive-agent'] %}
- - TOR{{ interfaces_loop.index }}[Name=tor{{ interfaces_loop.index }}] -> TOR{{ interfaces_loop.index }}_PORT{{ loop.index }}[Name=port{{ loop.index }}, MTU=1500]
- - TOR{{ interfaces_loop.index }}_PORT{{ loop.index }} -> *[Type=host,Name={{ hostvars[host].ansible_facts.hostname }}]/{{ interface }}
-{% endfor %}
-{% endfor %}
-{% endif %}
-
-storage:
- elasticsearch:
- host: {{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}
- maxconns: 10
- retry: 60
-
-graph:
- backend: elasticsearch
-
-flow:
- expire: 600
- update: 60
diff --git a/ansible/roles/skydive/templates/skydive-analyzer.json.j2 b/ansible/roles/skydive/templates/skydive-analyzer.json.j2
deleted file mode 100644
index ce8b525ec4..0000000000
--- a/ansible/roles/skydive/templates/skydive-analyzer.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "skydive analyzer --conf /etc/skydive/skydive.conf --listen={{ api_interface_address | put_address_in_context('url') }}:{{ skydive_analyzer_port }}",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/skydive.conf",
- "dest": "/etc/skydive/skydive.conf",
- "owner": "skydive",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/skydive",
- "owner": "skydive:skydive",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/skydive/vars/main.yml b/ansible/roles/skydive/vars/main.yml
deleted file mode 100644
index 56c3d1521a..0000000000
--- a/ansible/roles/skydive/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "skydive"
diff --git a/ansible/roles/skyline/defaults/main.yml b/ansible/roles/skyline/defaults/main.yml
new file mode 100644
index 0000000000..9cd77327cb
--- /dev/null
+++ b/ansible/roles/skyline/defaults/main.yml
@@ -0,0 +1,206 @@
+---
+skyline_services:
+ skyline-apiserver:
+ container_name: skyline_apiserver
+ group: skyline-apiserver
+ enabled: true
+ image: "{{ skyline_apiserver_image_full }}"
+ volumes: "{{ skyline_apiserver_default_volumes + skyline_apiserver_extra_volumes }}"
+ dimensions: "{{ skyline_apiserver_dimensions }}"
+ healthcheck: "{{ skyline_apiserver_healthcheck }}"
+ haproxy:
+ skyline_apiserver:
+ enabled: "{{ enable_skyline }}"
+ mode: "http"
+ external: false
+ port: "{{ skyline_apiserver_port }}"
+ listen_port: "{{ skyline_apiserver_listen_port }}"
+ tls_backend: "{{ skyline_enable_tls_backend }}"
+ skyline_apiserver_external:
+ enabled: "{{ enable_skyline }}"
+ mode: "http"
+ external: true
+ external_fqdn: "{{ skyline_apiserver_external_fqdn }}"
+ port: "{{ skyline_apiserver_port }}"
+ listen_port: "{{ skyline_apiserver_listen_port }}"
+ tls_backend: "{{ skyline_enable_tls_backend }}"
+ skyline-console:
+ container_name: skyline_console
+ group: skyline-console
+ enabled: true
+ image: "{{ skyline_console_image_full }}"
+ volumes: "{{ skyline_console_default_volumes + skyline_console_extra_volumes }}"
+ dimensions: "{{ skyline_console_dimensions }}"
+ healthcheck: "{{ skyline_console_healthcheck }}"
+ haproxy:
+ skyline_console:
+ enabled: "{{ enable_skyline }}"
+ mode: "http"
+ external: false
+ port: "{{ skyline_console_port }}"
+ listen_port: "{{ skyline_console_listen_port }}"
+ tls_backend: "{{ skyline_enable_tls_backend }}"
+ skyline_console_external:
+ enabled: "{{ enable_skyline }}"
+ mode: "http"
+ external: true
+ external_fqdn: "{{ skyline_console_external_fqdn }}"
+ port: "{{ skyline_console_port }}"
+ listen_port: "{{ skyline_console_listen_port }}"
+ tls_backend: "{{ skyline_enable_tls_backend }}"
+
+####################
+# Database
+####################
+skyline_database_name: "skyline"
+skyline_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}skyline{% endif %}"
+skyline_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
+
+####################
+# Database sharding
+####################
+skyline_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ skyline_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
+skyline_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
+skyline_database_shard:
+ users:
+ - user: "{{ skyline_database_user }}"
+ password: "{{ skyline_database_password }}"
+ rules:
+ - schema: "{{ skyline_database_name }}"
+ shard_id: "{{ skyline_database_shard_id }}"
+
+####################
+# Docker
+####################
+skyline_tag: "{{ openstack_tag }}"
+
+skyline_apiserver_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}skyline-apiserver"
+skyline_apiserver_tag: "{{ skyline_tag }}"
+skyline_apiserver_image_full: "{{ skyline_apiserver_image }}:{{ skyline_apiserver_tag }}"
+
+skyline_console_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}skyline-console"
+skyline_console_tag: "{{ skyline_tag }}"
+skyline_console_image_full: "{{ skyline_console_image }}:{{ skyline_console_tag }}"
+
+skyline_apiserver_dimensions: "{{ default_container_dimensions }}"
+skyline_console_dimensions: "{{ default_container_dimensions }}"
+
+skyline_apiserver_enable_healthchecks: "{{ enable_container_healthchecks }}"
+skyline_apiserver_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+skyline_apiserver_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+skyline_apiserver_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+skyline_apiserver_healthcheck_test: ["CMD-SHELL", "healthcheck_curl {{ 'https' if skyline_enable_tls_backend | bool else 'http' }}://{{ api_interface_address | put_address_in_context('url') }}:{{ skyline_apiserver_listen_port }}/docs"]
+skyline_apiserver_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+skyline_apiserver_healthcheck:
+ interval: "{{ skyline_apiserver_healthcheck_interval }}"
+ retries: "{{ skyline_apiserver_healthcheck_retries }}"
+ start_period: "{{ skyline_apiserver_healthcheck_start_period }}"
+ test: "{% if skyline_apiserver_enable_healthchecks | bool %}{{ skyline_apiserver_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ skyline_apiserver_healthcheck_timeout }}"
+
+skyline_console_enable_healthchecks: "{{ enable_container_healthchecks }}"
+skyline_console_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+skyline_console_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+skyline_console_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+skyline_console_healthcheck_test: ["CMD-SHELL", "healthcheck_curl {{ 'https' if skyline_enable_tls_backend | bool else 'http' }}://{{ api_interface_address | put_address_in_context('url') }}:{{ skyline_console_listen_port }}/docs"]
+skyline_console_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+skyline_console_healthcheck:
+ interval: "{{ skyline_console_healthcheck_interval }}"
+ retries: "{{ skyline_console_healthcheck_retries }}"
+ start_period: "{{ skyline_console_healthcheck_start_period }}"
+ test: "{% if skyline_console_enable_healthchecks | bool %}{{ skyline_console_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ skyline_console_healthcheck_timeout }}"
+
+skyline_apiserver_default_volumes:
+ - "{{ node_config_directory }}/skyline-apiserver/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "kolla_logs:/var/log/kolla/"
+
+skyline_console_default_volumes:
+ - "{{ node_config_directory }}/skyline-console/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "kolla_logs:/var/log/kolla/"
+
+skyline_extra_volumes: "{{ default_extra_volumes }}"
+skyline_apiserver_extra_volumes: "{{ skyline_extra_volumes }}"
+skyline_console_extra_volumes: "{{ skyline_extra_volumes }}"
+
+####################
+# OpenStack
+####################
+skyline_apiserver_internal_base_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}"
+skyline_apiserver_public_base_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}"
+
+skyline_logging_debug: "{{ openstack_logging_debug }}"
+
+openstack_skyline_auth: "{{ openstack_auth }}"
+
+####################
+# Skyline
+####################
+log_dir: /var/log/kolla/skyline
+skyline_access_token_expire_seconds: 3600
+skyline_access_token_renew_seconds: 1800
+skyline_backend_cors_origins: []
+skyline_nginx_prefix: /api/openstack
+# if set skyline_base_domains_ignore as true, we will not display
+# the domains like heat_user_domain when we login from skyline.
+skyline_base_domains_ignore: true
+skyline_system_admin_roles:
+ - admin
+skyline_system_reader_roles:
+ - system_reader
+skyline_keystone_url: "{{ keystone_internal_url }}/v3/"
+skyline_session_name: session
+skyline_reclaim_instance_interval: 604800
+
+skyline_gunicorn_debug_level: "{% if openstack_logging_debug | bool %}DEBUG{% else %}INFO{% endif %}"
+skyline_gunicorn_timeout: 300
+skyline_gunicorn_keepalive: 5
+skyline_gunicorn_workers: "{{ openstack_service_workers }}"
+
+skyline_ssl_certfile: "{{ '/etc/skyline/certs/skyline-cert.pem' if skyline_enable_tls_backend | bool else '' }}"
+skyline_ssl_keyfile: "{{ '/etc/skyline/certs/skyline-key.pem' if skyline_enable_tls_backend | bool else '' }}"
+
+####################
+# Keystone
+####################
+skyline_keystone_user: skyline
+skyline_ks_services:
+ - name: "skyline"
+ type: "panel"
+ description: "OpenStack Dashboard Service"
+ endpoints:
+ - {'interface': 'internal', 'url': '{{ skyline_apiserver_internal_base_endpoint }}'}
+ - {'interface': 'public', 'url': '{{ skyline_apiserver_public_base_endpoint }}'}
+
+skyline_ks_users:
+ - project: "service"
+ user: "{{ skyline_keystone_user }}"
+ password: "{{ skyline_keystone_password }}"
+ role: "admin"
+
+####################
+# SSO
+####################
+skyline_enable_sso: "no"
+
+####################
+# TLS
+####################
+skyline_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+skyline_copy_certs: "{{ kolla_copy_ca_into_containers | bool or skyline_enable_tls_backend | bool }}"
+
+####################
+# Custom logos: files and folders will be copied to static folder
+####################
+skyline_custom_logos: []
+
+####################
+# External Swift: url of external Swift service, e.g. when running standalone Ceph
+####################
+skyline_external_swift: "no"
+skyline_external_swift_url: ""
diff --git a/ansible/roles/skyline/handlers/main.yml b/ansible/roles/skyline/handlers/main.yml
new file mode 100644
index 0000000000..19f9d652a3
--- /dev/null
+++ b/ansible/roles/skyline/handlers/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Restart skyline-apiserver container
+ vars:
+ service_name: "skyline-apiserver"
+ service: "{{ skyline_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+
+- name: Restart skyline-console container
+ vars:
+ service_name: "skyline-console"
+ service: "{{ skyline_services[service_name] }}"
+ become: true
+ kolla_container:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
diff --git a/ansible/roles/skyline/tasks/bootstrap.yml b/ansible/roles/skyline/tasks/bootstrap.yml
new file mode 100644
index 0000000000..38b7d84448
--- /dev/null
+++ b/ansible/roles/skyline/tasks/bootstrap.yml
@@ -0,0 +1,38 @@
+---
+- name: Creating Skyline database
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: mysql_db
+ module_args:
+ login_host: "{{ database_address }}"
+ login_port: "{{ database_port }}"
+ login_user: "{{ skyline_database_shard_root_user }}"
+ login_password: "{{ database_password }}"
+ name: "{{ skyline_database_name }}"
+ run_once: True
+ delegate_to: "{{ groups['skyline-apiserver'][0] }}"
+ when:
+ - not use_preconfigured_databases | bool
+
+- name: Creating Skyline database user and setting permissions
+ become: true
+ kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: mysql_user
+ module_args:
+ login_host: "{{ database_address }}"
+ login_port: "{{ database_port }}"
+ login_user: "{{ skyline_database_shard_root_user }}"
+ login_password: "{{ database_password }}"
+ name: "{{ skyline_database_user }}"
+ password: "{{ skyline_database_password }}"
+ host: "%"
+ priv: "{{ skyline_database_name }}.*:ALL"
+ append_privs: "yes"
+ run_once: True
+ delegate_to: "{{ groups['skyline-apiserver'][0] }}"
+ when:
+ - not use_preconfigured_databases | bool
+
+- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/skyline/tasks/bootstrap_service.yml b/ansible/roles/skyline/tasks/bootstrap_service.yml
new file mode 100644
index 0000000000..61831b2c33
--- /dev/null
+++ b/ansible/roles/skyline/tasks/bootstrap_service.yml
@@ -0,0 +1,20 @@
+---
+- name: Running Skyline bootstrap container
+ vars:
+ skyline_apiserver: "{{ skyline_services['skyline-apiserver'] }}"
+ become: true
+ kolla_container:
+ action: "start_container"
+ common_options: "{{ docker_common_options }}"
+ detach: False
+ environment:
+ KOLLA_BOOTSTRAP:
+ KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
+ image: "{{ skyline_apiserver.image }}"
+ labels:
+ BOOTSTRAP:
+ name: "bootstrap_skyline"
+ restart_policy: oneshot
+ volumes: "{{ skyline_apiserver.volumes | reject('equalto', '') | list }}"
+ run_once: True
+ delegate_to: "{{ groups[skyline_apiserver.group][0] }}"
diff --git a/ansible/roles/skyline/tasks/check-containers.yml b/ansible/roles/skyline/tasks/check-containers.yml
new file mode 100644
index 0000000000..b7e2f7c29f
--- /dev/null
+++ b/ansible/roles/skyline/tasks/check-containers.yml
@@ -0,0 +1,3 @@
+---
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/skyline/tasks/check.yml b/ansible/roles/skyline/tasks/check.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/skyline/tasks/check.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/skyline/tasks/config.yml b/ansible/roles/skyline/tasks/config.yml
new file mode 100644
index 0000000000..2a8921e7e4
--- /dev/null
+++ b/ansible/roles/skyline/tasks/config.yml
@@ -0,0 +1,71 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ with_dict: "{{ skyline_services | select_services_enabled_and_mapped_to_host }}"
+
+- include_tasks: copy-certs.yml
+ when:
+ - skyline_copy_certs
+
+- name: Copying over skyline.yaml files for services
+ merge_yaml:
+ sources:
+ - "{{ role_path }}/templates/skyline.yaml.j2"
+ - "{{ node_custom_config }}/skyline/skyline.yaml"
+ dest: "{{ node_config_directory }}/{{ item.key }}/skyline.yaml"
+ mode: "0660"
+ become: true
+ with_dict: "{{ skyline_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over gunicorn.py files for services
+ vars:
+ service: "{{ skyline_services['skyline-apiserver'] }}"
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/skyline-apiserver/gunicorn.py"
+ mode: "0660"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
+ with_first_found:
+ - "{{ node_custom_config }}/skyline/gunicorn.py"
+ - "gunicorn.py.j2"
+
+- name: Copying over nginx.conf files for services
+ vars:
+ service: "{{ skyline_services['skyline-console'] }}"
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/skyline-console/nginx.conf"
+ mode: "0660"
+ become: true
+ when: service | service_enabled_and_mapped_to_host
+ with_first_found:
+ - "{{ node_custom_config }}/skyline/nginx.conf"
+ - "nginx.conf.j2"
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ with_dict: "{{ skyline_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over custom logos
+ become: true
+ vars:
+ service: "{{ skyline_services['skyline-console'] }}"
+ copy:
+ src: "{{ node_custom_config }}/skyline/logos/{{ item }}"
+ dest: "{{ node_config_directory }}/skyline-console/logos/"
+ mode: 0660
+ when:
+ - service | service_enabled_and_mapped_to_host
+ - skyline_custom_logos | length > 0
+ with_items: "{{ skyline_custom_logos }}"
diff --git a/ansible/roles/skyline/tasks/config_validate.yml b/ansible/roles/skyline/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/skyline/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/skyline/tasks/copy-certs.yml b/ansible/roles/skyline/tasks/copy-certs.yml
new file mode 100644
index 0000000000..3f39794746
--- /dev/null
+++ b/ansible/roles/skyline/tasks/copy-certs.yml
@@ -0,0 +1,6 @@
+---
+- name: "Copy certificates and keys for {{ project_name }}"
+ import_role:
+ role: service-cert-copy
+ vars:
+ project_services: "{{ skyline_services }}"
diff --git a/ansible/roles/monasca/tasks/deploy-containers.yml b/ansible/roles/skyline/tasks/deploy-containers.yml
similarity index 100%
rename from ansible/roles/monasca/tasks/deploy-containers.yml
rename to ansible/roles/skyline/tasks/deploy-containers.yml
diff --git a/ansible/roles/skyline/tasks/deploy.yml b/ansible/roles/skyline/tasks/deploy.yml
new file mode 100644
index 0000000000..d793a349da
--- /dev/null
+++ b/ansible/roles/skyline/tasks/deploy.yml
@@ -0,0 +1,11 @@
+---
+- import_tasks: register.yml
+
+- import_tasks: config.yml
+
+- import_tasks: check-containers.yml
+
+- import_tasks: bootstrap.yml
+
+- name: Flush handlers
+ meta: flush_handlers
diff --git a/ansible/roles/skyline/tasks/loadbalancer.yml b/ansible/roles/skyline/tasks/loadbalancer.yml
new file mode 100644
index 0000000000..82cf637f3b
--- /dev/null
+++ b/ansible/roles/skyline/tasks/loadbalancer.yml
@@ -0,0 +1,7 @@
+---
+- name: "Configure loadbalancer for {{ project_name }}"
+ import_role:
+ name: loadbalancer-config
+ vars:
+ project_services: "{{ skyline_services }}"
+ tags: always
diff --git a/ansible/roles/monasca/tasks/main.yml b/ansible/roles/skyline/tasks/main.yml
similarity index 100%
rename from ansible/roles/monasca/tasks/main.yml
rename to ansible/roles/skyline/tasks/main.yml
diff --git a/ansible/roles/skyline/tasks/precheck.yml b/ansible/roles/skyline/tasks/precheck.yml
new file mode 100644
index 0000000000..0a081b2042
--- /dev/null
+++ b/ansible/roles/skyline/tasks/precheck.yml
@@ -0,0 +1,38 @@
+---
+- import_role:
+ name: service-precheck
+ vars:
+ service_precheck_services: "{{ skyline_services }}"
+ service_name: "{{ project_name }}"
+
+- name: Get container facts
+ become: true
+ kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - skyline_apiserver
+ - skyline_console
+ register: container_facts
+
+- name: Checking free port for Skyline APIServer
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ skyline_apiserver_listen_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['skyline_apiserver'] is not defined
+ - inventory_hostname in groups['skyline-apiserver']
+
+- name: Checking free port for Skyline Console
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ skyline_console_listen_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['skyline_console'] is not defined
+ - inventory_hostname in groups['skyline-console']
diff --git a/ansible/roles/monasca/tasks/pull.yml b/ansible/roles/skyline/tasks/pull.yml
similarity index 100%
rename from ansible/roles/monasca/tasks/pull.yml
rename to ansible/roles/skyline/tasks/pull.yml
diff --git a/ansible/roles/skyline/tasks/reconfigure.yml b/ansible/roles/skyline/tasks/reconfigure.yml
new file mode 100644
index 0000000000..f670a5b78d
--- /dev/null
+++ b/ansible/roles/skyline/tasks/reconfigure.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: deploy.yml
diff --git a/ansible/roles/skyline/tasks/register.yml b/ansible/roles/skyline/tasks/register.yml
new file mode 100644
index 0000000000..8cd7530f9d
--- /dev/null
+++ b/ansible/roles/skyline/tasks/register.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ openstack_skyline_auth }}"
+ service_ks_register_services: "{{ skyline_ks_services }}"
+ service_ks_register_users: "{{ skyline_ks_users }}"
diff --git a/ansible/roles/skyline/tasks/stop.yml b/ansible/roles/skyline/tasks/stop.yml
new file mode 100644
index 0000000000..965ab35b97
--- /dev/null
+++ b/ansible/roles/skyline/tasks/stop.yml
@@ -0,0 +1,6 @@
+---
+- import_role:
+ name: service-stop
+ vars:
+ project_services: "{{ skyline_services }}"
+ service_name: "{{ project_name }}"
diff --git a/ansible/roles/kafka/tasks/upgrade.yml b/ansible/roles/skyline/tasks/upgrade.yml
similarity index 100%
rename from ansible/roles/kafka/tasks/upgrade.yml
rename to ansible/roles/skyline/tasks/upgrade.yml
diff --git a/ansible/roles/skyline/templates/gunicorn.py.j2 b/ansible/roles/skyline/templates/gunicorn.py.j2
new file mode 100644
index 0000000000..790989932e
--- /dev/null
+++ b/ansible/roles/skyline/templates/gunicorn.py.j2
@@ -0,0 +1,70 @@
+# Copyright 2022 99cloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+bind = "{{ api_interface_address }}:{{ skyline_apiserver_port }}"
+workers = {{ skyline_gunicorn_workers }}
+worker_class = "uvicorn.workers.UvicornWorker"
+timeout = {{ skyline_gunicorn_timeout }}
+keepalive = {{ skyline_gunicorn_keepalive }}
+reuse_port = True
+proc_name = "{{ project_name }}"
+{% if skyline_ssl_certfile and skyline_ssl_keyfile %}
+keyfile = "{{ skyline_ssl_keyfile }}"
+certfile = "{{ skyline_ssl_certfile }}"
+{% endif %}
+
+logconfig_dict = {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "root": {"level": "{{ skyline_gunicorn_debug_level }}", "handlers": ["console"]},
+ "loggers": {
+ "gunicorn.error": {
+ "level": "{{ skyline_gunicorn_debug_level }}",
+ "handlers": ["error_file"],
+ "propagate": 0,
+ "qualname": "gunicorn_error",
+ },
+ "gunicorn.access": {
+ "level": "{{ skyline_gunicorn_debug_level }}",
+ "handlers": ["access_file"],
+ "propagate": 0,
+ "qualname": "access",
+ },
+ },
+ "handlers": {
+ "error_file": {
+ "class": "logging.handlers.RotatingFileHandler",
+ "formatter": "generic",
+ "filename": "{{ log_dir }}/skyline-error.log",
+ },
+ "access_file": {
+ "class": "logging.handlers.RotatingFileHandler",
+ "formatter": "generic",
+ "filename": "{{ log_dir }}/skyline-access.log",
+ },
+ "console": {
+ "class": "logging.StreamHandler",
+ "level": "{{ skyline_gunicorn_debug_level }}",
+ "formatter": "generic",
+ },
+ },
+ "formatters": {
+ "generic": {
+ "format": "%(asctime)s.%(msecs)03d %(process)d %(levelname)s [-] %(message)s",
+ "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
+ "class": "logging.Formatter",
+ }
+ },
+}
diff --git a/ansible/roles/skyline/templates/nginx.conf.j2 b/ansible/roles/skyline/templates/nginx.conf.j2
new file mode 100644
index 0000000000..5af5b7b8b7
--- /dev/null
+++ b/ansible/roles/skyline/templates/nginx.conf.j2
@@ -0,0 +1,323 @@
+daemon off;
+worker_processes auto;
+pid /run/nginx.pid;
+include /etc/nginx/modules-enabled/*.conf;
+
+events {
+ worker_connections 1024;
+ multi_accept on;
+}
+
+http {
+
+ ##
+ # Basic Settings
+ ##
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ client_max_body_size 0;
+ types_hash_max_size 2048;
+ proxy_request_buffering off;
+ server_tokens off;
+
+ # server_names_hash_bucket_size 64;
+ # server_name_in_redirect off;
+
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+ {% if skyline_ssl_certfile and skyline_ssl_keyfile %}
+ ##
+ # SSL Settings
+ ##
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_prefer_server_ciphers on;
+
+ # Self signed certs generated by the ssl-cert package
+ # Don't use them in a production server!
+ ssl_certificate {{ skyline_ssl_certfile }};
+ ssl_certificate_key {{ skyline_ssl_keyfile }};
+ {% endif %}
+ ##
+ # Logging Settings
+ ##
+ log_format main '$remote_addr - $remote_user [$time_local] "$request_time" '
+ '"$upstream_response_time" "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+ access_log {{ log_dir | default('/var/log/skyline') }}/skyline-nginx-access.log main;
+ error_log {{ log_dir | default('/var/log/skyline') }}/skyline-nginx-error.log;
+
+ ##
+ # Gzip Settings
+ ##
+ gzip on;
+ gzip_static on;
+ gzip_disable "msie6";
+
+ gzip_vary on;
+ gzip_proxied any;
+ gzip_comp_level 6;
+ gzip_buffers 16 8k;
+ # gzip_http_version 1.1;
+ gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
+
+ ##
+ # Virtual Host Configs
+ ##
+ server {
+ listen {{ api_interface_address | put_address_in_context('url') }}:{{ skyline_console_listen_port }}{% if skyline_ssl_certfile and skyline_ssl_keyfile %} ssl http2{% endif %} default_server;
+
+ root /var/lib/kolla/venv/lib/python{{ distro_python_version }}/site-packages/skyline_console/static;
+
+ # Add index.php to the list if you are using PHP
+ index index.html;
+
+ server_name _;
+
+ error_page 497 https://$http_host$request_uri;
+
+ location / {
+ # First attempt to serve request as file, then
+ # as directory, then fall back to displaying a 404.
+ try_files $uri $uri/ /index.html;
+ expires 1d;
+ add_header Cache-Control "public";
+ }
+
+ # Service: skyline
+ location {{ skyline_nginx_prefix }}/skyline/ {
+ proxy_pass {{ internal_protocol }}://{{ skyline_apiserver_internal_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ skyline_apiserver_internal_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}/ {{ skyline_nginx_prefix }}/skyline/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+
+ {% if enable_keystone | bool %}# Region: {{ openstack_region_name }}, Service: keystone
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/keystone {
+ proxy_pass {{ internal_protocol }}://{{ keystone_internal_fqdn | put_address_in_context('url') }}:{{ keystone_internal_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ keystone_internal_fqdn | put_address_in_context('url') }}:{{ keystone_internal_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/keystone/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_glance | bool %}# Region: {{ openstack_region_name }}, Service: glance
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/glance {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/glance/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_neutron | bool %}# Region: {{ openstack_region_name }}, Service: neutron
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/neutron {
+ proxy_pass {{ internal_protocol }}://{{ neutron_internal_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ neutron_internal_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/neutron/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_nova | bool %}# Region: {{ openstack_region_name }}, Service: nova
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/nova {
+ proxy_pass {{ internal_protocol }}://{{ nova_internal_fqdn | put_address_in_context('url') }}:{{ nova_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ nova_internal_fqdn | put_address_in_context('url') }}:{{ nova_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/nova/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_placement | bool %}# Region: {{ openstack_region_name }}, Service: placement
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/placement {
+ proxy_pass {{ internal_protocol }}://{{ placement_internal_fqdn | put_address_in_context('url') }}:{{ placement_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ placement_internal_fqdn | put_address_in_context('url') }}:{{ placement_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/placement/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_cinder | bool %}# Region: {{ openstack_region_name }}, Service: cinder
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/cinder {
+ proxy_pass {{ internal_protocol }}://{{ cinder_internal_fqdn | put_address_in_context('url') }}:{{ cinder_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ cinder_internal_fqdn | put_address_in_context('url') }}:{{ cinder_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/cinder/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_heat | bool %}# Region: {{ openstack_region_name }}, Service: heat
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/heat {
+ proxy_pass {{ internal_protocol }}://{{ heat_internal_fqdn | put_address_in_context('url') }}:{{ heat_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ heat_internal_fqdn | put_address_in_context('url') }}:{{ heat_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/heat/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_octavia | bool %}# Region: {{ openstack_region_name }}, Service: octavia
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/octavia {
+ proxy_pass {{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/octavia/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_manila | bool %}# Region: {{ openstack_region_name }}, Service: manilav2
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/manilav2 {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ manila_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ manila_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/manilav2/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_ironic | bool %}# Region: {{ openstack_region_name }}, Service: ironic
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/ironic {
+ proxy_pass {{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/ironic/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_zun | bool %}# Region: {{ openstack_region_name }}, Service: zun
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/zun {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/zun/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_magnum | bool %}# Region: {{ openstack_region_name }}, Service: magnum
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/magnum {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ magnum_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ magnum_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/magnum/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_trove | bool %}# Region: {{ openstack_region_name }}, Service: trove
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/trove {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ trove_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ trove_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/trove/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_barbican | bool %}# Region: {{ openstack_region_name }}, Service: barbican
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/barbican {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/barbican/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_designate | bool %}# Region: {{ openstack_region_name }}, Service: designate
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/designate {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/designate/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_masakari | bool %}# Region: {{ openstack_region_name }}, Service: masakari
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/masakari {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ masakari_api_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ masakari_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/masakari/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+
+ {% if enable_swift | bool %}# Region: {{ openstack_region_name }}, Service: swift
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/swift {
+ proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/swift/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% elif enable_ceph_rgw | bool %}# Region: {{ openstack_region_name }}, Service: ceph_rgw
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/swift {
+ proxy_pass {{ internal_protocol }}://{{ ceph_rgw_internal_fqdn }}:{{ ceph_rgw_port }}/{{ 'swift' if not ceph_rgw_swift_compatibility | bool }};
+ proxy_redirect {{ internal_protocol }}://{{ ceph_rgw_internal_fqdn }}:{{ ceph_rgw_port }}/{{ 'swift' if not ceph_rgw_swift_compatibility | bool }} {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/swift/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% elif skyline_external_swift | bool %}# Region: {{ openstack_region_name }}, Service: external swift
+ location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/swift {
+ proxy_pass {{ skyline_external_swift_url }};
+ proxy_redirect {{ skyline_external_swift_url }} {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/swift/;
+ proxy_buffering off;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header Host $http_host;
+ }
+ {% endif %}
+ }
+}
diff --git a/ansible/roles/skyline/templates/skyline-apiserver.json.j2 b/ansible/roles/skyline/templates/skyline-apiserver.json.j2
new file mode 100644
index 0000000000..95bdc61df8
--- /dev/null
+++ b/ansible/roles/skyline/templates/skyline-apiserver.json.j2
@@ -0,0 +1,42 @@
+{
+ "command": "gunicorn -c /etc/skyline/gunicorn.py skyline_apiserver.main:app",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/skyline.yaml",
+ "dest": "/etc/skyline/skyline.yaml",
+ "owner": "skyline",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/gunicorn.py",
+ "dest": "/etc/skyline/gunicorn.py",
+ "owner": "skyline",
+ "perm": "0600"
+ }{% if skyline_enable_tls_backend | bool %},
+ {
+ "source": "{{ container_config_directory }}/skyline-cert.pem",
+ "dest": "/etc/skyline/certs/skyline-cert.pem",
+ "owner": "skyline",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/skyline-key.pem",
+ "dest": "/etc/skyline/certs/skyline-key.pem",
+ "owner": "skyline",
+ "perm": "0600"
+ }{% endif %}{% if skyline_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/skyline",
+ "owner": "skyline:skyline",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/skyline/templates/skyline-console.json.j2 b/ansible/roles/skyline/templates/skyline-console.json.j2
new file mode 100644
index 0000000000..44955558c8
--- /dev/null
+++ b/ansible/roles/skyline/templates/skyline-console.json.j2
@@ -0,0 +1,49 @@
+{
+ "command": "nginx",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/skyline.yaml",
+ "dest": "/etc/skyline/skyline.yaml",
+ "owner": "skyline",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/nginx.conf",
+ "dest": "/etc/nginx/nginx.conf",
+ "owner": "skyline",
+ "perm": "0600"
+ }{% if skyline_custom_logos | length > 0 %},
+ {
+ "source": "{{ container_config_directory}}/logos",
+ "dest": "/var/lib/kolla/venv/lib/python{{ distro_python_version }}/site-packages/skyline_console/static",
+ "owner": "root",
+ "perm": "0644",
+ "merge": true
+ }{% endif %}{% if skyline_enable_tls_backend | bool %},
+ {
+ "source": "{{ container_config_directory }}/skyline-cert.pem",
+ "dest": "/etc/skyline/certs/skyline-cert.pem",
+ "owner": "skyline",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/skyline-key.pem",
+ "dest": "/etc/skyline/certs/skyline-key.pem",
+ "owner": "skyline",
+ "perm": "0600"
+ }{% endif %}{% if skyline_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/skyline",
+ "owner": "skyline:skyline",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/skyline/templates/skyline.yaml.j2 b/ansible/roles/skyline/templates/skyline.yaml.j2
new file mode 100644
index 0000000000..d529506450
--- /dev/null
+++ b/ansible/roles/skyline/templates/skyline.yaml.j2
@@ -0,0 +1,110 @@
+default:
+ access_token_expire: {{ skyline_access_token_expire_seconds }}
+ access_token_renew: {{ skyline_access_token_renew_seconds }}
+ cors_allow_origins: {{ skyline_backend_cors_origins }}
+ database_url: mysql://{{ skyline_database_user }}:{{ skyline_database_password }}@{{ skyline_database_address }}/{{ skyline_database_name }}
+ debug: {{ skyline_logging_debug }}
+ log_dir: {{ log_dir }}
+{% if enable_prometheus | bool %}
+ prometheus_basic_auth_password: "{{ prometheus_skyline_password }}"
+ prometheus_basic_auth_user: "{{ prometheus_skyline_user }}"
+ prometheus_enable_basic_auth: true
+ prometheus_endpoint: "{{ prometheus_internal_endpoint }}"
+{% endif %}
+ secret_key: {{ skyline_secret_key }}
+ session_name: {{ skyline_session_name }}
+openstack:
+{% if skyline_base_domains_ignore | bool %}
+ base_domains:
+{% if enable_heat | bool %}
+ - heat_user_domain
+{% endif %}
+{% if enable_magnum | bool %}
+ - magnum
+{% endif %}
+{% endif %}
+ default_region: {{ openstack_region_name }}
+ extension_mapping:
+{% if enable_neutron_port_forwarding | bool %}
+ floating-ip-port-forwarding: neutron_port_forwarding
+{% endif %}
+{% if enable_neutron_qos | bool %}
+ qos: neutron_qos
+{% endif %}
+{% if enable_neutron_vpnaas | bool %}
+ vpnaas: neutron_vpn
+{% endif %}
+ keystone_url: {{ skyline_keystone_url }}
+ nginx_prefix: {{ skyline_nginx_prefix }}
+ reclaim_instance_interval: {{ skyline_reclaim_instance_interval }}
+ service_mapping:
+{% if enable_ironic | bool %}
+ baremetal: ironic
+{% endif %}
+{% if enable_nova | bool %}
+ compute: nova
+{% endif %}
+{% if enable_zun | bool %}
+ container: zun
+{% endif %}
+{% if enable_magnum | bool %}
+ container-infra: magnum
+{% endif %}
+{% if enable_trove | bool %}
+ database: trove
+{% endif %}
+{% if enable_designate | bool %}
+ dns: designate
+{% endif %}
+{% if enable_keystone | bool %}
+ identity: keystone
+{% endif %}
+{% if enable_glance | bool %}
+ image: glance
+{% endif %}
+{% if enable_masakari | bool %}
+ instance-ha: masakari
+{% endif %}
+{% if enable_barbican | bool %}
+ key-manager: barbican
+{% endif %}
+{% if enable_octavia | bool %}
+ load-balancer: octavia
+{% endif %}
+{% if enable_neutron | bool %}
+ network: neutron
+{% endif %}
+{% if enable_swift | bool or enable_ceph_rgw | bool or skyline_external_swift | bool %}
+ object-store: swift
+{% endif %}
+{% if enable_heat | bool %}
+ orchestration: heat
+{% endif %}
+{% if enable_placement | bool %}
+ placement: placement
+{% endif %}
+{% if enable_manila | bool %}
+ sharev2: manilav2
+{% endif %}
+{% if enable_cinder | bool %}
+ volumev3: cinder
+{% endif %}
+ sso_enabled: {{ skyline_enable_sso | bool }}
+{% if skyline_enable_sso | bool %}
+ sso_protocols:
+ - openid
+ sso_region: {{ openstack_region_name }}
+{% endif %}
+ system_admin_roles:
+{% for skyline_system_admin_role in skyline_system_admin_roles %}
+ - {{ skyline_system_admin_role }}
+{% endfor %}
+ system_project: service
+ system_project_domain: {{ default_project_domain_name }}
+ system_reader_roles:
+{% for skyline_system_reader_role in skyline_system_reader_roles %}
+ - {{ skyline_system_reader_role }}
+{% endfor %}
+ system_user_domain: {{ default_user_domain_name }}
+ system_user_name: skyline
+ system_user_password: {{ skyline_keystone_password }}
diff --git a/ansible/roles/skyline/vars/main.yml b/ansible/roles/skyline/vars/main.yml
new file mode 100644
index 0000000000..1969b04758
--- /dev/null
+++ b/ansible/roles/skyline/vars/main.yml
@@ -0,0 +1,2 @@
+---
+project_name: "skyline"
diff --git a/ansible/roles/solum/defaults/main.yml b/ansible/roles/solum/defaults/main.yml
deleted file mode 100644
index 766dea8ec4..0000000000
--- a/ansible/roles/solum/defaults/main.yml
+++ /dev/null
@@ -1,237 +0,0 @@
----
-solum_services:
- solum-api:
- container_name: solum_api
- group: solum-api
- enabled: true
- image: "{{ solum_api_image_full }}"
- volumes: "{{ solum_api_default_volumes + solum_api_extra_volumes }}"
- dimensions: "{{ solum_api_dimensions }}"
- healthcheck: "{{ solum_api_healthcheck }}"
- solum-worker:
- container_name: solum_worker
- group: solum-worker
- enabled: true
- image: "{{ solum_worker_image_full }}"
- volumes: "{{ solum_worker_default_volumes + solum_worker_extra_volumes }}"
- dimensions: "{{ solum_worker_dimensions }}"
- healthcheck: "{{ solum_worker_healthcheck }}"
- solum-deployer:
- container_name: solum_deployer
- group: solum-deployer
- enabled: true
- image: "{{ solum_deployer_image_full }}"
- volumes: "{{ solum_deployer_default_volumes + solum_deployer_extra_volumes }}"
- dimensions: "{{ solum_deployer_dimensions }}"
- healthcheck: "{{ solum_deployer_healthcheck }}"
- haproxy:
- solum_application_deployment:
- enabled: "{{ enable_solum }}"
- mode: "http"
- external: false
- port: "{{ solum_application_deployment_port }}"
- host_group: "solum-application-deployment"
- solum_application_deployment_external:
- enabled: "{{ enable_solum }}"
- mode: "http"
- external: true
- port: "{{ solum_application_deployment_port }}"
- host_group: "solum-application-deployment"
- solum_image_builder:
- enabled: "{{ enable_solum }}"
- mode: "http"
- external: false
- port: "{{ solum_image_builder_port }}"
- host_group: "solum-image-builder"
- solum_image_builder_external:
- enabled: "{{ enable_solum }}"
- mode: "http"
- external: true
- port: "{{ solum_image_builder_port }}"
- host_group: "solum-image-builder"
- solum-conductor:
- container_name: solum_conductor
- group: solum-conductor
- enabled: true
- image: "{{ solum_conductor_image_full }}"
- volumes: "{{ solum_conductor_default_volumes + solum_conductor_extra_volumes }}"
- dimensions: "{{ solum_conductor_dimensions }}"
- healthcheck: "{{ solum_conductor_healthcheck }}"
-
-####################
-# Database
-####################
-solum_database_name: "solum"
-solum_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}solum{% endif %}"
-solum_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-
-####################
-# Database sharding
-####################
-solum_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ solum_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-solum_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-solum_database_shard:
- users:
- - user: "{{ solum_database_user }}"
- password: "{{ solum_database_password }}"
- rules:
- - schema: "{{ solum_database_name }}"
- shard_id: "{{ solum_database_shard_id }}"
-
-
-####################
-# Docker
-####################
-solum_tag: "{{ openstack_tag }}"
-
-solum_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/solum-worker"
-solum_worker_tag: "{{ solum_tag }}"
-solum_worker_image_full: "{{ solum_worker_image }}:{{ solum_worker_tag }}"
-
-solum_deployer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/solum-deployer"
-solum_deployer_tag: "{{ solum_tag }}"
-solum_deployer_image_full: "{{ solum_deployer_image }}:{{ solum_deployer_tag }}"
-
-solum_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/solum-conductor"
-solum_conductor_tag: "{{ solum_tag }}"
-solum_conductor_image_full: "{{ solum_conductor_image }}:{{ solum_conductor_tag }}"
-
-solum_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/solum-api"
-solum_api_tag: "{{ solum_tag }}"
-solum_api_image_full: "{{ solum_api_image }}:{{ solum_api_tag }}"
-
-solum_api_dimensions: "{{ default_container_dimensions }}"
-solum_worker_dimensions: "{{ default_container_dimensions }}"
-solum_deployer_dimensions: "{{ default_container_dimensions }}"
-solum_conductor_dimensions: "{{ default_container_dimensions }}"
-
-solum_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
-solum_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-solum_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-solum_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-solum_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ solum_application_deployment_port }}"]
-solum_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-solum_api_healthcheck:
- interval: "{{ solum_api_healthcheck_interval }}"
- retries: "{{ solum_api_healthcheck_retries }}"
- start_period: "{{ solum_api_healthcheck_start_period }}"
- test: "{% if solum_api_enable_healthchecks | bool %}{{ solum_api_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ solum_api_healthcheck_timeout }}"
-
-solum_worker_enable_healthchecks: "{{ enable_container_healthchecks }}"
-solum_worker_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-solum_worker_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-solum_worker_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-solum_worker_healthcheck_test: ["CMD-SHELL", "healthcheck_port solum-worker {{ om_rpc_port }}"]
-solum_worker_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-solum_worker_healthcheck:
- interval: "{{ solum_worker_healthcheck_interval }}"
- retries: "{{ solum_worker_healthcheck_retries }}"
- start_period: "{{ solum_worker_healthcheck_start_period }}"
- test: "{% if solum_worker_enable_healthchecks | bool %}{{ solum_worker_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ solum_worker_healthcheck_timeout }}"
-
-solum_deployer_enable_healthchecks: "{{ enable_container_healthchecks }}"
-solum_deployer_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-solum_deployer_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-solum_deployer_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-solum_deployer_healthcheck_test: ["CMD-SHELL", "healthcheck_port solum-deployer {{ om_rpc_port }}"]
-solum_deployer_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-solum_deployer_healthcheck:
- interval: "{{ solum_deployer_healthcheck_interval }}"
- retries: "{{ solum_deployer_healthcheck_retries }}"
- start_period: "{{ solum_deployer_healthcheck_start_period }}"
- test: "{% if solum_deployer_enable_healthchecks | bool %}{{ solum_deployer_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ solum_deployer_healthcheck_timeout }}"
-
-solum_conductor_enable_healthchecks: "{{ enable_container_healthchecks }}"
-solum_conductor_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-solum_conductor_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-solum_conductor_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-solum_conductor_healthcheck_test: ["CMD-SHELL", "healthcheck_port solum-conductor {{ om_rpc_port }}"]
-solum_conductor_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-solum_conductor_healthcheck:
- interval: "{{ solum_conductor_healthcheck_interval }}"
- retries: "{{ solum_conductor_healthcheck_retries }}"
- start_period: "{{ solum_conductor_healthcheck_start_period }}"
- test: "{% if solum_conductor_enable_healthchecks | bool %}{{ solum_conductor_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ solum_conductor_healthcheck_timeout }}"
-
-solum_api_default_volumes:
- - "{{ node_config_directory }}/solum-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/solum/solum:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/solum' if solum_dev_mode | bool else '' }}"
-solum_worker_default_volumes:
- - "{{ node_config_directory }}/solum-worker/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/solum/solum:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/solum' if solum_dev_mode | bool else '' }}"
-solum_deployer_default_volumes:
- - "{{ node_config_directory }}/solum-deployer/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/solum/solum:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/solum' if solum_dev_mode | bool else '' }}"
-solum_conductor_default_volumes:
- - "{{ node_config_directory }}/solum-conductor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/solum/solum:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/solum' if solum_dev_mode | bool else '' }}"
-
-solum_extra_volumes: "{{ default_extra_volumes }}"
-solum_api_extra_volumes: "{{ solum_extra_volumes }}"
-solum_worker_extra_volumes: "{{ solum_extra_volumes }}"
-solum_deployer_extra_volumes: "{{ solum_extra_volumes }}"
-solum_conductor_extra_volumes: "{{ solum_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-solum_image_builder_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ solum_image_builder_port }}"
-solum_image_builder_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ solum_image_builder_port }}"
-
-solum_application_deployment_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ solum_application_deployment_port }}"
-solum_application_deployment_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ solum_application_deployment_port }}"
-
-solum_logging_debug: "{{ openstack_logging_debug }}"
-
-solum_keystone_user: "solum"
-
-openstack_solum_auth: "{{ openstack_auth }}"
-
-solum_api_workers: "{{ openstack_service_workers }}"
-
-####################
-# Kolla
-####################
-solum_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-solum_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-solum_dev_mode: "{{ kolla_dev_mode }}"
-solum_source_version: "{{ kolla_source_version }}"
-
-####################
-# Keystone
-####################
-solum_ks_services:
- - name: "solum_image_builder"
- type: "image_builder"
- description: "Openstack Solum Image Builder"
- endpoints:
- - {'interface': 'internal', 'url': '{{ solum_image_builder_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ solum_image_builder_public_endpoint }}'}
- - name: "solum_application_deployment"
- type: "application_deployment"
- description: "Openstack Solum Application Deployment"
- endpoints:
- - {'interface': 'internal', 'url': '{{ solum_application_deployment_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ solum_application_deployment_public_endpoint }}'}
-
-solum_ks_users:
- - project: "service"
- user: "{{ solum_keystone_user }}"
- password: "{{ solum_keystone_password }}"
- role: "admin"
diff --git a/ansible/roles/solum/handlers/main.yml b/ansible/roles/solum/handlers/main.yml
deleted file mode 100644
index c60c27d227..0000000000
--- a/ansible/roles/solum/handlers/main.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Restart solum-api container
- vars:
- service_name: "solum-api"
- service: "{{ solum_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart solum-worker container
- vars:
- service_name: "solum-worker"
- service: "{{ solum_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart solum-deployer container
- vars:
- service_name: "solum-deployer"
- service: "{{ solum_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart solum-conductor container
- vars:
- service_name: "solum-conductor"
- service: "{{ solum_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/solum/tasks/bootstrap.yml b/ansible/roles/solum/tasks/bootstrap.yml
deleted file mode 100644
index cf38cbb42f..0000000000
--- a/ansible/roles/solum/tasks/bootstrap.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Creating Solum database
- become: true
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ solum_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ solum_database_name }}"
- run_once: True
- delegate_to: "{{ groups['solum-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating Solum database user and setting permissions
- become: true
- kolla_toolbox:
- module_name: mysql_user
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ solum_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ solum_database_user }}"
- password: "{{ solum_database_password }}"
- host: "%"
- priv: "{{ solum_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: True
- delegate_to: "{{ groups['solum-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/solum/tasks/bootstrap_service.yml b/ansible/roles/solum/tasks/bootstrap_service.yml
deleted file mode 100644
index 47eadb47e4..0000000000
--- a/ansible/roles/solum/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Solum bootstrap container
- vars:
- solum_api: "{{ solum_services['solum-api'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ solum_api.image }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_solum"
- restart_policy: no
- volumes: "{{ solum_api.volumes | reject('equalto', '') | list }}"
- run_once: True
- delegate_to: "{{ groups[solum_api.group][0] }}"
diff --git a/ansible/roles/solum/tasks/check-containers.yml b/ansible/roles/solum/tasks/check-containers.yml
deleted file mode 100644
index 42772b6462..0000000000
--- a/ansible/roles/solum/tasks/check-containers.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Check solum containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ solum_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/solum/tasks/clone.yml b/ansible/roles/solum/tasks/clone.yml
deleted file mode 100644
index 58c9835d55..0000000000
--- a/ansible/roles/solum/tasks/clone.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cloning solum source repository for development
- become: true
- git:
- repo: "{{ solum_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ solum_dev_repos_pull }}"
- version: "{{ solum_source_version }}"
diff --git a/ansible/roles/solum/tasks/config.yml b/ansible/roles/solum/tasks/config.yml
deleted file mode 100644
index 9b3ff8072b..0000000000
--- a/ansible/roles/solum/tasks/config.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ solum_services }}"
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ solum_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over solum.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/solum.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/solum.conf"
- - "{{ node_custom_config }}/solum/{{ item.key }}.conf"
- - "{{ node_custom_config }}/solum/{{ inventory_hostname }}/solum.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/solum.conf"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ solum_services }}"
- notify:
- - Restart {{ item.key }} container
diff --git a/ansible/roles/solum/tasks/copy-certs.yml b/ansible/roles/solum/tasks/copy-certs.yml
deleted file mode 100644
index ff86842c5c..0000000000
--- a/ansible/roles/solum/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ solum_services }}"
diff --git a/ansible/roles/solum/tasks/deploy-containers.yml b/ansible/roles/solum/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/solum/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/solum/tasks/deploy.yml b/ansible/roles/solum/tasks/deploy.yml
deleted file mode 100644
index 3d2bb2f61d..0000000000
--- a/ansible/roles/solum/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- import_tasks: register.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: clone.yml
- when: solum_dev_mode | bool
-
-- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/solum/tasks/loadbalancer.yml b/ansible/roles/solum/tasks/loadbalancer.yml
deleted file mode 100644
index 8881542192..0000000000
--- a/ansible/roles/solum/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ solum_services }}"
- tags: always
diff --git a/ansible/roles/solum/tasks/main.yml b/ansible/roles/solum/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/solum/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/solum/tasks/precheck.yml b/ansible/roles/solum/tasks/precheck.yml
deleted file mode 100644
index 372ee0d071..0000000000
--- a/ansible/roles/solum/tasks/precheck.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ solum_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - solum_api
- register: container_facts
-
-- name: Checking free port for Solum Application Deployment
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ solum_application_deployment_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['solum_api'] is not defined
- - inventory_hostname in groups['solum-api']
-
-- name: Checking free port for Solum Image Builder
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ solum_image_builder_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['solum_api'] is not defined
- - inventory_hostname in groups['solum-api']
diff --git a/ansible/roles/solum/tasks/pull.yml b/ansible/roles/solum/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/solum/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/solum/tasks/reconfigure.yml b/ansible/roles/solum/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/solum/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/solum/tasks/register.yml b/ansible/roles/solum/tasks/register.yml
deleted file mode 100644
index fb320049a4..0000000000
--- a/ansible/roles/solum/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_solum_auth }}"
- service_ks_register_services: "{{ solum_ks_services }}"
- service_ks_register_users: "{{ solum_ks_users }}"
diff --git a/ansible/roles/solum/tasks/stop.yml b/ansible/roles/solum/tasks/stop.yml
deleted file mode 100644
index 5018e56e6c..0000000000
--- a/ansible/roles/solum/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ solum_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/solum/tasks/upgrade.yml b/ansible/roles/solum/tasks/upgrade.yml
deleted file mode 100644
index 6ba9f99799..0000000000
--- a/ansible/roles/solum/tasks/upgrade.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap_service.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/solum/templates/solum-api.json.j2 b/ansible/roles/solum/templates/solum-api.json.j2
deleted file mode 100644
index 866dcd8132..0000000000
--- a/ansible/roles/solum/templates/solum-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "solum-api --config-file /etc/solum/solum.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/solum.conf",
- "dest": "/etc/solum/solum.conf",
- "owner": "solum",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/solum",
- "owner": "solum:solum",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/solum/templates/solum-conductor.json.j2 b/ansible/roles/solum/templates/solum-conductor.json.j2
deleted file mode 100644
index 0106030a28..0000000000
--- a/ansible/roles/solum/templates/solum-conductor.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "solum-conductor --config-file /etc/solum/solum.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/solum.conf",
- "dest": "/etc/solum/solum.conf",
- "owner": "solum",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/solum",
- "owner": "solum:solum",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/solum/templates/solum-deployer.json.j2 b/ansible/roles/solum/templates/solum-deployer.json.j2
deleted file mode 100644
index 228f42bdad..0000000000
--- a/ansible/roles/solum/templates/solum-deployer.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "solum-deployer --config-file /etc/solum/solum.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/solum.conf",
- "dest": "/etc/solum/solum.conf",
- "owner": "solum",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/solum",
- "owner": "solum:solum",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/solum/templates/solum-worker.json.j2 b/ansible/roles/solum/templates/solum-worker.json.j2
deleted file mode 100644
index 2a883df8dd..0000000000
--- a/ansible/roles/solum/templates/solum-worker.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "solum-worker --config-file /etc/solum/solum.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/solum.conf",
- "dest": "/etc/solum/solum.conf",
- "owner": "solum",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/solum",
- "owner": "solum:solum",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/solum/templates/solum.conf.j2 b/ansible/roles/solum/templates/solum.conf.j2
deleted file mode 100644
index a62dc6fde7..0000000000
--- a/ansible/roles/solum/templates/solum.conf.j2
+++ /dev/null
@@ -1,74 +0,0 @@
-[DEFAULT]
-debug = {{ solum_logging_debug }}
-log_dir = /var/log/kolla/solum
-transport_url = {{ rpc_transport_url }}
-
-{% if service_name == 'solum-api' %}
-bind_host = {{ api_interface_address }}
-bind_port = {{ solum_application_deployment_port }}
-{% endif %}
-
-[api]
-image_format = vm
-port = {{ solum_application_deployment_port }}
-workers = {{ solum_api_workers }}
-{% if service_name == 'solum-api' %}
-host = {{ api_interface_address }}
-{% endif %}
-
-[conductor]
-topic = solum-conductor
-
-[deployer]
-growth_factor = 1.1
-wait_interval = 1
-max_attempts = 2000
-handler = heat
-topic = solum-deployer
-
-[worker]
-proj_dir = /solum
-handler = shell
-topic = solum-worker
-task_log_dir = /var/log/kolla/solum/worker
-image_storage = glance
-docker_build_timeout = 1800
-lp_operator_tenant_name = service
-lp_operator_password = {{ solum_keystone_password }}
-lp_operator_user = {{ solum_keystone_user }}
-
-[builder]
-port = {{ solum_image_builder_public_endpoint }}
-host = {{ ansible_facts.hostname }}_{{ item }}
-
-[database]
-connection = mysql+pymysql://{{ solum_database_user }}:{{ solum_database_password }}@{{ solum_database_address }}/{{ solum_database_name }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-max_retries = -1
-
-[keystone_authtoken]
-service_type = application_deployment
-www_authenticate_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ solum_keystone_user }}
-password = {{ solum_keystone_password }}
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_messaging_notifications]
-transport_url = {{ notify_transport_url }}
-
-{% if om_enable_rabbitmq_tls | bool %}
-[oslo_messaging_rabbit]
-ssl = true
-ssl_ca_file = {{ om_rabbitmq_cacert }}
-{% endif %}
diff --git a/ansible/roles/solum/vars/main.yml b/ansible/roles/solum/vars/main.yml
deleted file mode 100644
index 40bf810d2e..0000000000
--- a/ansible/roles/solum/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "solum"
diff --git a/ansible/roles/storm/defaults/main.yml b/ansible/roles/storm/defaults/main.yml
deleted file mode 100644
index 4d83a780d4..0000000000
--- a/ansible/roles/storm/defaults/main.yml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-storm_services:
- storm-worker:
- container_name: storm_worker
- group: storm-worker
- enabled: "{{ enable_storm | bool }}"
- image: "{{ storm_image_full }}"
- environment:
- STORM_LOG_DIR: /var/log/kolla/storm
- STORM_LOG4J_PROP: "{{ storm_log_settings }}"
- volumes: "{{ storm_worker_default_volumes + storm_nimbus_extra_volumes }}"
- dimensions: "{{ storm_worker_dimensions }}"
- healthcheck: "{{ storm_worker_healthcheck }}"
- storm-nimbus:
- container_name: storm_nimbus
- group: storm-nimbus
- enabled: "{{ enable_storm | bool }}"
- image: "{{ storm_image_full }}"
- environment:
- STORM_LOG_DIR: /var/log/kolla/storm
- STORM_LOG4J_PROP: "{{ storm_log_settings }}"
- volumes: "{{ storm_nimbus_default_volumes + storm_nimbus_extra_volumes }}"
- dimensions: "{{ storm_nimbus_dimensions }}"
- healthcheck: "{{ storm_nimbus_healthcheck }}"
-
-
-####################
-# Storm
-####################
-storm_log_settings: 'INFO,ROLLINGFILE'
-storm_nimbus_servers: "{% for host in groups['storm-nimbus'] %}'{{ 'api' | kolla_address(host) }}'{% if not loop.last %},{% endif %}{% endfor %}"
-
-####################
-# Docker
-####################
-storm_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/storm"
-storm_tag: "{{ openstack_tag }}"
-storm_image_full: "{{ storm_image }}:{{ storm_tag }}"
-
-storm_worker_dimensions: "{{ default_container_dimensions }}"
-storm_nimbus_dimensions: "{{ default_container_dimensions }}"
-
-storm_worker_enable_healthchecks: "{{ enable_container_healthchecks }}"
-storm_worker_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-storm_worker_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-storm_worker_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-storm_worker_healthcheck_test: ["CMD-SHELL", "healthcheck_port java {{ zookeeper_client_port }}"]
-storm_worker_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-storm_worker_healthcheck:
- interval: "{{ storm_worker_healthcheck_interval }}"
- retries: "{{ storm_worker_healthcheck_retries }}"
- start_period: "{{ storm_worker_healthcheck_start_period }}"
- test: "{% if storm_worker_enable_healthchecks | bool %}{{ storm_worker_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ storm_worker_healthcheck_timeout }}"
-
-storm_nimbus_enable_healthchecks: "{{ enable_container_healthchecks }}"
-storm_nimbus_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-storm_nimbus_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-storm_nimbus_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-storm_nimbus_healthcheck_test: ["CMD-SHELL", "healthcheck_listen java {{ storm_nimbus_thrift_port }}"]
-storm_nimbus_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-storm_nimbus_healthcheck:
- interval: "{{ storm_nimbus_healthcheck_interval }}"
- retries: "{{ storm_nimbus_healthcheck_retries }}"
- start_period: "{{ storm_nimbus_healthcheck_start_period }}"
- test: "{% if storm_nimbus_enable_healthchecks | bool %}{{ storm_nimbus_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ storm_nimbus_healthcheck_timeout }}"
-
-storm_worker_default_volumes:
- - "{{ node_config_directory }}/storm-worker/:{{ container_config_directory }}/"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "storm:/var/lib/storm/data"
- - "kolla_logs:/var/log/kolla/"
-storm_nimbus_default_volumes:
- - "{{ node_config_directory }}/storm-nimbus/:{{ container_config_directory }}/"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "storm:/var/lib/storm/data"
- - "kolla_logs:/var/log/kolla/"
-
-storm_extra_volumes: "{{ default_extra_volumes }}"
-storm_worker_extra_volumes: "{{ storm_extra_volumes }}"
-storm_nimbus_extra_volumes: "{{ storm_extra_volumes }}"
diff --git a/ansible/roles/storm/handlers/main.yml b/ansible/roles/storm/handlers/main.yml
deleted file mode 100644
index 3766bef9da..0000000000
--- a/ansible/roles/storm/handlers/main.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Restart storm-worker container
- vars:
- service_name: "storm-worker"
- service: "{{ storm_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart storm-nimbus container
- vars:
- service_name: "storm-nimbus"
- service: "{{ storm_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/storm/tasks/check-containers.yml b/ansible/roles/storm/tasks/check-containers.yml
deleted file mode 100644
index 8d32f26aaf..0000000000
--- a/ansible/roles/storm/tasks/check-containers.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Check storm containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- environment: "{{ item.value.environment }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ storm_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/storm/tasks/cleanup.yml b/ansible/roles/storm/tasks/cleanup.yml
deleted file mode 100644
index 4d0a5a81c8..0000000000
--- a/ansible/roles/storm/tasks/cleanup.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Stop and remove containers for Storm services
- become: true
- kolla_docker:
- action: "stop_and_remove_container"
- name: "{{ item.value.container_name }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - not item.value.enabled | bool
- with_dict: "{{ storm_services }}"
-
-- name: Removing config for any disabled services
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "absent"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - not item.value.enabled | bool
- with_dict: "{{ storm_services }}"
-
-# NOTE(dszumski): Docker volume removal is currently a manual procedure
diff --git a/ansible/roles/storm/tasks/config.yml b/ansible/roles/storm/tasks/config.yml
deleted file mode 100644
index 39301da7ad..0000000000
--- a/ansible/roles/storm/tasks/config.yml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ storm_services }}"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ storm_services }}"
- notify:
- - "Restart {{ item.key }} container"
-
-- name: Copying over storm worker config
- vars:
- service: "{{ storm_services['storm-worker'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/storm-worker/storm.yml"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/storm/{{ inventory_hostname }}/storm.yml"
- - "{{ node_custom_config }}/storm.yml"
- - "storm.yml.j2"
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart storm-worker container
-
-- name: Copying over storm nimbus config
- vars:
- service: "{{ storm_services['storm-nimbus'] }}"
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/storm-nimbus/storm.yml"
- mode: "0660"
- become: true
- with_first_found:
- - "{{ node_custom_config }}/storm/{{ inventory_hostname }}/storm.yml"
- - "{{ node_custom_config }}/storm.yml"
- - "storm.yml.j2"
- when:
- - inventory_hostname in groups[service['group']]
- - service.enabled | bool
- notify:
- - Restart storm-nimbus container
diff --git a/ansible/roles/storm/tasks/deploy-containers.yml b/ansible/roles/storm/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/storm/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/storm/tasks/deploy.yml b/ansible/roles/storm/tasks/deploy.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/storm/tasks/deploy.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/storm/tasks/main.yml b/ansible/roles/storm/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/storm/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/storm/tasks/precheck.yml b/ansible/roles/storm/tasks/precheck.yml
deleted file mode 100644
index cb3ac75bdf..0000000000
--- a/ansible/roles/storm/tasks/precheck.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ storm_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - storm_worker
- - storm_nimbus
- register: container_facts
-
-- name: Checking storm nimbus thrift port is available
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ storm_nimbus_thrift_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['storm_nimbus'] is not defined
- - inventory_hostname in groups['storm-nimbus']
-
-- name: Checking storm supervisor thrift port is available
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ storm_supervisor_thrift_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts['storm_worker'] is not defined
- - inventory_hostname in groups['storm-worker']
-
-- name: Checking storm worker ports are available
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ item }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- with_sequence: "start={{ storm_worker_port_range.start | int }} end={{ storm_worker_port_range.end | int }}"
- when:
- - container_facts['storm_worker'] is not defined
- - inventory_hostname in groups['storm-worker']
diff --git a/ansible/roles/storm/tasks/pull.yml b/ansible/roles/storm/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/storm/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/storm/tasks/reconfigure.yml b/ansible/roles/storm/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/storm/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/storm/tasks/stop.yml b/ansible/roles/storm/tasks/stop.yml
deleted file mode 100644
index eba1e5b2aa..0000000000
--- a/ansible/roles/storm/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ storm_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/storm/templates/storm-nimbus.json.j2 b/ansible/roles/storm/templates/storm-nimbus.json.j2
deleted file mode 100644
index 5ceaeafd2c..0000000000
--- a/ansible/roles/storm/templates/storm-nimbus.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/opt/storm/bin/storm nimbus",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/storm.yml",
- "dest": "/opt/storm/conf/storm.yaml",
- "owner": "storm",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/storm",
- "owner": "storm:storm",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/storm",
- "owner": "storm:storm",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/storm/templates/storm-worker.json.j2 b/ansible/roles/storm/templates/storm-worker.json.j2
deleted file mode 100644
index 564b76ab9f..0000000000
--- a/ansible/roles/storm/templates/storm-worker.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/opt/storm/bin/storm supervisor",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/storm.yml",
- "dest": "/opt/storm/conf/storm.yaml",
- "owner": "storm",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/storm",
- "owner": "storm:storm",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/storm",
- "owner": "storm:storm",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/storm/templates/storm.yml.j2 b/ansible/roles/storm/templates/storm.yml.j2
deleted file mode 100644
index 44158341bc..0000000000
--- a/ansible/roles/storm/templates/storm.yml.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-storm.local.dir: "/var/lib/storm/data"
-storm.log.dir: "/var/log/kolla/storm"
-nimbus.seeds: [{{ storm_nimbus_servers }}]
-storm.zookeeper.port: {{ zookeeper_client_port }}
-storm.zookeeper.servers:
-{% for host in groups['zookeeper'] %}
- - "{{ 'api' | kolla_address(host) }}"
-{% endfor %}
-supervisor.slots.ports:
-{% for port in range(storm_worker_port_range.start|int, storm_worker_port_range.end|int + 1) %}
- - {{ port }}
-{% endfor %}
-supervisor.thrift.port: {{ storm_supervisor_thrift_port }}
-nimbus.thrift.port: {{ storm_nimbus_thrift_port }}
diff --git a/ansible/roles/storm/vars/main.yml b/ansible/roles/storm/vars/main.yml
deleted file mode 100644
index ccb548beb3..0000000000
--- a/ansible/roles/storm/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "storm"
diff --git a/ansible/roles/swift/defaults/main.yml b/ansible/roles/swift/defaults/main.yml
index 4469d7cd68..f1c65f2fa3 100644
--- a/ansible/roles/swift/defaults/main.yml
+++ b/ansible/roles/swift/defaults/main.yml
@@ -13,6 +13,7 @@ swift_services:
enabled: "{{ enable_swift }}"
mode: "http"
external: true
+ external_fqdn: "{{ swift_external_fqdn }}"
port: "{{ swift_proxy_server_listen_port }}"
####################
@@ -20,27 +21,27 @@ swift_services:
####################
swift_tag: "{{ openstack_tag }}"
-swift_proxy_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/swift-proxy-server"
+swift_proxy_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}swift-proxy-server"
swift_proxy_server_tag: "{{ swift_tag }}"
swift_proxy_server_image_full: "{{ swift_proxy_server_image }}:{{ swift_proxy_server_tag }}"
-swift_account_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/swift-account"
+swift_account_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}swift-account"
swift_account_tag: "{{ swift_tag }}"
swift_account_image_full: "{{ swift_account_image }}:{{ swift_account_tag }}"
-swift_container_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/swift-container"
+swift_container_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}swift-container"
swift_container_tag: "{{ swift_tag }}"
swift_container_image_full: "{{ swift_container_image }}:{{ swift_container_tag }}"
-swift_object_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/swift-object"
+swift_object_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}swift-object"
swift_object_tag: "{{ swift_tag }}"
swift_object_image_full: "{{ swift_object_image }}:{{ swift_object_tag }}"
-swift_object_expirer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/swift-object-expirer"
+swift_object_expirer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}swift-object-expirer"
swift_object_expirer_tag: "{{ swift_tag }}"
swift_object_expirer_image_full: "{{ swift_object_expirer_image }}:{{ swift_object_expirer_tag }}"
-swift_rsyncd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/swift-rsyncd"
+swift_rsyncd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}swift-rsyncd"
swift_rsyncd_tag: "{{ swift_tag }}"
swift_rsyncd_image_full: "{{ swift_rsyncd_image }}:{{ swift_rsyncd_tag }}"
@@ -49,6 +50,9 @@ swift_log_level: "{{ 'DEBUG' if openstack_logging_debug | bool else 'INFO' }}"
####################
# OpenStack
####################
+swift_internal_endpoint: "{{ swift_internal_base_endpoint }}/v1/AUTH_%(tenant_id)s"
+swift_public_endpoint: "{{ swift_public_base_endpoint }}/v1/AUTH_%(tenant_id)s"
+
swift_logging_debug: "{{ openstack_logging_debug }}"
swift_keystone_user: "swift"
diff --git a/ansible/roles/swift/tasks/config_validate.yml b/ansible/roles/swift/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/swift/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/swift/tasks/legacy_upgrade.yml b/ansible/roles/swift/tasks/legacy_upgrade.yml
index c894fabf89..f3d1fcb01a 100644
--- a/ansible/roles/swift/tasks/legacy_upgrade.yml
+++ b/ansible/roles/swift/tasks/legacy_upgrade.yml
@@ -6,7 +6,8 @@
# containers or we get a conflict when attempting to start the new ones.
- name: "Cleaning out old Swift containers"
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item }}"
action: "stop_container"
with_items:
diff --git a/ansible/roles/swift/tasks/precheck.yml b/ansible/roles/swift/tasks/precheck.yml
index 2e00c5fd4e..423915afd9 100644
--- a/ansible/roles/swift/tasks/precheck.yml
+++ b/ansible/roles/swift/tasks/precheck.yml
@@ -8,11 +8,14 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- swift_account_server
- swift_container_server
- swift_object_server
- swift_proxy_server
+ check_mode: false
register: container_facts
- name: Checking free port for Swift Account Server
diff --git a/ansible/roles/swift/tasks/pull.yml b/ansible/roles/swift/tasks/pull.yml
index 622c622764..b93f862571 100644
--- a/ansible/roles/swift/tasks/pull.yml
+++ b/ansible/roles/swift/tasks/pull.yml
@@ -1,7 +1,7 @@
---
- name: Pulling rsyncd image
become: true
- kolla_docker:
+ kolla_container:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ swift_rsyncd_image_full }}"
@@ -15,7 +15,7 @@
- name: Pulling swift-proxy-server image
become: true
- kolla_docker:
+ kolla_container:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ swift_proxy_server_image_full }}"
@@ -27,7 +27,7 @@
- name: Pulling swift-account image
become: true
- kolla_docker:
+ kolla_container:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ swift_account_image_full }}"
@@ -39,7 +39,7 @@
- name: Pulling swift-container image
become: true
- kolla_docker:
+ kolla_container:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ swift_container_image_full }}"
@@ -51,7 +51,7 @@
- name: Pulling swift-object image
become: true
- kolla_docker:
+ kolla_container:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ swift_object_image_full }}"
@@ -63,7 +63,7 @@
- name: Pulling swift-object-expirer image
become: true
- kolla_docker:
+ kolla_container:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ swift_object_expirer_image_full }}"
diff --git a/ansible/roles/swift/tasks/reconfigure.yml b/ansible/roles/swift/tasks/reconfigure.yml
index 745626c44d..d3c70e83f6 100644
--- a/ansible/roles/swift/tasks/reconfigure.yml
+++ b/ansible/roles/swift/tasks/reconfigure.yml
@@ -31,7 +31,8 @@
- name: Ensuring the containers up
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item.name }}"
action: "get_container_state"
register: container_state
@@ -44,7 +45,7 @@
- name: Check the configs
become: true
- command: "{{ kolla_container_engine }}exec -u root {{ item.name }} /usr/local/bin/kolla_set_configs --check"
+ command: "{{ kolla_container_engine }} exec -u root {{ item.name }} /usr/local/bin/kolla_set_configs --check"
changed_when: false
failed_when: false
register: check_results
@@ -57,7 +58,8 @@
# just remove the container and start again
- name: Containers config strategy
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item.name }}"
action: "get_container_env"
register: container_envs
@@ -67,7 +69,8 @@
- name: Remove the containers
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
action: "remove_container"
register: remove_containers
@@ -87,7 +90,8 @@
- name: Restart containers
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
name: "{{ item[0]['name'] }}"
action: "restart_container"
when:
diff --git a/ansible/roles/swift/tasks/register.yml b/ansible/roles/swift/tasks/register.yml
index d35b0b9b5c..42d0f97c79 100644
--- a/ansible/roles/swift/tasks/register.yml
+++ b/ansible/roles/swift/tasks/register.yml
@@ -9,7 +9,8 @@
- name: Creating the ResellerAdmin role
become: true
kolla_toolbox:
- module_name: "os_keystone_role"
+ container_engine: "{{ kolla_container_engine }}"
+ module_name: openstack.cloud.identity_role
module_args:
name: "ResellerAdmin"
region_name: "{{ openstack_region_name }}"
diff --git a/ansible/roles/swift/tasks/rolling_upgrade.yml b/ansible/roles/swift/tasks/rolling_upgrade.yml
index 8899927ffc..1af79c1fb1 100644
--- a/ansible/roles/swift/tasks/rolling_upgrade.yml
+++ b/ansible/roles/swift/tasks/rolling_upgrade.yml
@@ -30,7 +30,8 @@
# rolling upgrade will be finished.
- name: Gracefully shutdown swift services in storage nodes
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
action: "stop_container"
name: "{{ item.name }}"
when: inventory_hostname in groups[item.group]
@@ -47,14 +48,15 @@
- name: Gracefully shutdown swift proxy services in proxy nodes
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
action: "stop_container"
name: "swift_proxy_server"
when: inventory_hostname in groups['swift-proxy-server']
- name: Start new swift proxy server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_proxy_server_image_full }}"
diff --git a/ansible/roles/swift/tasks/start.yml b/ansible/roles/swift/tasks/start.yml
index 7848e4d4fa..c97d0affaa 100644
--- a/ansible/roles/swift/tasks/start.yml
+++ b/ansible/roles/swift/tasks/start.yml
@@ -35,7 +35,7 @@
- name: Starting swift-rsyncd container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_rsyncd_image_full }}"
@@ -50,7 +50,7 @@
- name: Starting swift-account-server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
name: "swift_account_server"
@@ -60,11 +60,12 @@
- "{{ node_config_directory }}/swift-account-server/:{{ container_config_directory }}/:ro"
- "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}:shared"
- "/etc/localtime:/etc/localtime:ro"
+ - "swift_recon_cache:/var/cache/swift"
when: inventory_hostname in groups['swift-account-server']
- name: Starting swift-account-auditor container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_account_image_full }}"
@@ -79,7 +80,7 @@
- name: Starting swift-account-replication-server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -97,7 +98,7 @@
- name: Starting swift-account-replicator container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -114,7 +115,7 @@
- name: Starting swift-account-reaper container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_account_image_full }}"
@@ -128,7 +129,7 @@
- name: Starting swift-container-server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_container_image_full }}"
@@ -138,11 +139,12 @@
- "{{ node_config_directory }}/swift-container-server/:{{ container_config_directory }}/:ro"
- "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}:shared"
- "/etc/localtime:/etc/localtime:ro"
+ - "swift_recon_cache:/var/cache/swift"
when: inventory_hostname in groups['swift-container-server']
- name: Starting swift-container-auditor container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_container_image_full }}"
@@ -157,7 +159,7 @@
- name: Starting swift-container-replication-server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -175,7 +177,7 @@
- name: Starting swift-container-replicator container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -192,7 +194,7 @@
- name: Starting swift-container-updater container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_container_image_full }}"
@@ -207,7 +209,7 @@
- name: Starting swift-object-server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_object_image_full }}"
@@ -222,7 +224,7 @@
- name: Starting swift-object-auditor container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_object_image_full }}"
@@ -237,7 +239,7 @@
- name: Starting swift-object-replication-server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -255,7 +257,7 @@
- name: Starting swift-object-replicator container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
@@ -272,7 +274,7 @@
- name: Starting swift-object-updater container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_object_image_full }}"
@@ -287,7 +289,7 @@
- name: Starting swift-object-expirer container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_object_expirer_image_full }}"
@@ -302,7 +304,7 @@
- name: Starting swift-proxy-server container
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
image: "{{ swift_proxy_server_image_full }}"
diff --git a/ansible/roles/swift/tasks/stop.yml b/ansible/roles/swift/tasks/stop.yml
index 39fe0329ac..ae3b39fe3b 100644
--- a/ansible/roles/swift/tasks/stop.yml
+++ b/ansible/roles/swift/tasks/stop.yml
@@ -1,7 +1,7 @@
---
- name: Stopping swift-rsyncd container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_rsyncd"
@@ -12,7 +12,7 @@
- name: Stopping swift-account-server container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_account_server"
@@ -22,7 +22,7 @@
- name: Stopping swift-account-auditor container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_account_auditor"
@@ -32,7 +32,7 @@
- name: Stopping swift-account-replicator container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_account_replicator"
@@ -42,7 +42,7 @@
- name: Stopping swift-account-reaper container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_account_reaper"
@@ -52,7 +52,7 @@
- name: Stopping swift-container-server container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_container_server"
@@ -62,7 +62,7 @@
- name: Stopping swift-container-auditor container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_container_auditor"
@@ -72,7 +72,7 @@
- name: Stopping swift-container-replicator container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_container_replicator"
@@ -82,7 +82,7 @@
- name: Stopping swift-container-updater container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_container_updater"
@@ -92,7 +92,7 @@
- name: Stopping swift-object-server container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_object_server"
@@ -102,7 +102,7 @@
- name: Stopping swift-object-auditor container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_object_auditor"
@@ -112,7 +112,7 @@
- name: Stopping swift-object-replicator container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_object_replicator"
@@ -122,7 +122,7 @@
- name: Stopping swift-object-updater container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_object_updater"
@@ -132,7 +132,7 @@
- name: Stopping swift-object-expirer container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_object_expirer"
@@ -142,7 +142,7 @@
- name: Stopping swift-proxy-server container
become: true
- kolla_docker:
+ kolla_container:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "swift_proxy_server"
diff --git a/ansible/roles/swift/templates/proxy-server.conf.j2 b/ansible/roles/swift/templates/proxy-server.conf.j2
index c2544cd0fa..4b6982a334 100644
--- a/ansible/roles/swift/templates/proxy-server.conf.j2
+++ b/ansible/roles/swift/templates/proxy-server.conf.j2
@@ -46,7 +46,7 @@ password = {{ swift_keystone_password }}
delay_auth_decision = {{ swift_delay_auth_decision }}
cafile = {{ openstack_cacert }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -99,5 +99,5 @@ use = egg:swift#s3api
[filter:s3token]
use = egg:swift#s3token
-auth_uri = {{ keystone_internal_url }}
+auth_uri = {{ keystone_internal_url }}/v3
{% endif %}
diff --git a/ansible/roles/swift/templates/swift-account-auditor.json.j2 b/ansible/roles/swift/templates/swift-account-auditor.json.j2
index 38e65d81aa..4599a6be41 100644
--- a/ansible/roles/swift/templates/swift-account-auditor.json.j2
+++ b/ansible/roles/swift/templates/swift-account-auditor.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-account-reaper.json.j2 b/ansible/roles/swift/templates/swift-account-reaper.json.j2
index b93ccf36cf..ec45a6f6bf 100644
--- a/ansible/roles/swift/templates/swift-account-reaper.json.j2
+++ b/ansible/roles/swift/templates/swift-account-reaper.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-account-replication-server.json.j2 b/ansible/roles/swift/templates/swift-account-replication-server.json.j2
index 1c9c50b44a..296244493e 100644
--- a/ansible/roles/swift/templates/swift-account-replication-server.json.j2
+++ b/ansible/roles/swift/templates/swift-account-replication-server.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-account-replicator.json.j2 b/ansible/roles/swift/templates/swift-account-replicator.json.j2
index a49731935b..ec57074a66 100644
--- a/ansible/roles/swift/templates/swift-account-replicator.json.j2
+++ b/ansible/roles/swift/templates/swift-account-replicator.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-account-server.json.j2 b/ansible/roles/swift/templates/swift-account-server.json.j2
index 998e06b138..ee66f112e6 100644
--- a/ansible/roles/swift/templates/swift-account-server.json.j2
+++ b/ansible/roles/swift/templates/swift-account-server.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-container-auditor.json.j2 b/ansible/roles/swift/templates/swift-container-auditor.json.j2
index 7044109718..0ce8103ad6 100644
--- a/ansible/roles/swift/templates/swift-container-auditor.json.j2
+++ b/ansible/roles/swift/templates/swift-container-auditor.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-container-replication-server.json.j2 b/ansible/roles/swift/templates/swift-container-replication-server.json.j2
index 02c202cab5..4a3415bd07 100644
--- a/ansible/roles/swift/templates/swift-container-replication-server.json.j2
+++ b/ansible/roles/swift/templates/swift-container-replication-server.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-container-replicator.json.j2 b/ansible/roles/swift/templates/swift-container-replicator.json.j2
index 76d0a190df..ac9d7b8822 100644
--- a/ansible/roles/swift/templates/swift-container-replicator.json.j2
+++ b/ansible/roles/swift/templates/swift-container-replicator.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-container-server.json.j2 b/ansible/roles/swift/templates/swift-container-server.json.j2
index a9870e5bd2..ff52bd0922 100644
--- a/ansible/roles/swift/templates/swift-container-server.json.j2
+++ b/ansible/roles/swift/templates/swift-container-server.json.j2
@@ -25,6 +25,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-container-updater.json.j2 b/ansible/roles/swift/templates/swift-container-updater.json.j2
index 0f59961b6f..42f070e074 100644
--- a/ansible/roles/swift/templates/swift-container-updater.json.j2
+++ b/ansible/roles/swift/templates/swift-container-updater.json.j2
@@ -31,6 +31,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-object-auditor.json.j2 b/ansible/roles/swift/templates/swift-object-auditor.json.j2
index 51df1a2eee..c25aadb854 100644
--- a/ansible/roles/swift/templates/swift-object-auditor.json.j2
+++ b/ansible/roles/swift/templates/swift-object-auditor.json.j2
@@ -39,6 +39,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-object-expirer.json.j2 b/ansible/roles/swift/templates/swift-object-expirer.json.j2
index 639f41e812..f000f7ad64 100644
--- a/ansible/roles/swift/templates/swift-object-expirer.json.j2
+++ b/ansible/roles/swift/templates/swift-object-expirer.json.j2
@@ -45,6 +45,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-object-replication-server.json.j2 b/ansible/roles/swift/templates/swift-object-replication-server.json.j2
index c5ddf90d31..f9697a1fee 100644
--- a/ansible/roles/swift/templates/swift-object-replication-server.json.j2
+++ b/ansible/roles/swift/templates/swift-object-replication-server.json.j2
@@ -39,6 +39,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-object-replicator.json.j2 b/ansible/roles/swift/templates/swift-object-replicator.json.j2
index 88c46cd342..a66f7c19b6 100644
--- a/ansible/roles/swift/templates/swift-object-replicator.json.j2
+++ b/ansible/roles/swift/templates/swift-object-replicator.json.j2
@@ -39,6 +39,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-object-server.json.j2 b/ansible/roles/swift/templates/swift-object-server.json.j2
index 2b0687c154..d9344cffe2 100644
--- a/ansible/roles/swift/templates/swift-object-server.json.j2
+++ b/ansible/roles/swift/templates/swift-object-server.json.j2
@@ -39,6 +39,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-object-updater.json.j2 b/ansible/roles/swift/templates/swift-object-updater.json.j2
index 1b6469c36b..2c665e5b98 100644
--- a/ansible/roles/swift/templates/swift-object-updater.json.j2
+++ b/ansible/roles/swift/templates/swift-object-updater.json.j2
@@ -39,6 +39,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-proxy-server.json.j2 b/ansible/roles/swift/templates/swift-proxy-server.json.j2
index a1f4daea59..d0fa0b6f39 100644
--- a/ansible/roles/swift/templates/swift-proxy-server.json.j2
+++ b/ansible/roles/swift/templates/swift-proxy-server.json.j2
@@ -45,6 +45,12 @@
"owner": "swift",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/swift/templates/swift-rsyncd.json.j2 b/ansible/roles/swift/templates/swift-rsyncd.json.j2
index d3580cc77a..29045d9875 100644
--- a/ansible/roles/swift/templates/swift-rsyncd.json.j2
+++ b/ansible/roles/swift/templates/swift-rsyncd.json.j2
@@ -6,6 +6,12 @@
"dest": "/etc/rsyncd.conf",
"owner": "swift",
"perm": "0640"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
]
}
diff --git a/ansible/roles/sysctl/defaults/main.yml b/ansible/roles/sysctl/defaults/main.yml
new file mode 100644
index 0000000000..0a42489d50
--- /dev/null
+++ b/ansible/roles/sysctl/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+sysctl_path: "/usr/sbin/sysctl"
diff --git a/ansible/roles/sysctl/tasks/main.yml b/ansible/roles/sysctl/tasks/main.yml
new file mode 100644
index 0000000000..8d6a72efaf
--- /dev/null
+++ b/ansible/roles/sysctl/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+- name: Check IPv6 support
+ command: "{{ sysctl_path }} -n net.ipv6.conf.all.disable_ipv6"
+ register: ipv6_disabled
+ changed_when: false
+
+- name: Setting sysctl values
+ become: true
+ vars:
+ should_set: "{{ item.value != 'KOLLA_UNSET' }}"
+ sysctl:
+ name: "{{ item.name }}"
+ state: "{{ should_set | ternary('present', 'absent') }}"
+ value: "{{ should_set | ternary(item.value, omit) }}"
+ sysctl_set: "{{ should_set }}"
+ sysctl_file: "{{ kolla_sysctl_conf_path }}"
+ with_items: "{{ settings }}"
+ when:
+ - item.value != 'KOLLA_SKIP'
+ - not ('ipv6' in item.name and ipv6_disabled.stdout | bool)
diff --git a/ansible/roles/tacker/defaults/main.yml b/ansible/roles/tacker/defaults/main.yml
index 8ec8233787..cd3c767cb7 100644
--- a/ansible/roles/tacker/defaults/main.yml
+++ b/ansible/roles/tacker/defaults/main.yml
@@ -15,12 +15,15 @@ tacker_services:
mode: "http"
external: false
port: "{{ tacker_server_port }}"
+ listen_port: "{{ tacker_server_listen_port }}"
custom_member_list: "{{ tacker_haproxy_members.split(';') }}"
tacker_server_external:
enabled: "{{ enable_tacker }}"
mode: "http"
external: true
- port: "{{ tacker_server_port }}"
+ external_fqdn: "{{ tacker_external_fqdn }}"
+ port: "{{ tacker_server_public_port }}"
+ listen_port: "{{ tacker_server_listen_port }}"
custom_member_list: "{{ tacker_haproxy_members.split(';') }}"
tacker-conductor:
container_name: "tacker_conductor"
@@ -32,6 +35,13 @@ tacker_services:
dimensions: "{{ tacker_conductor_dimensions }}"
healthcheck: "{{ tacker_conductor_healthcheck }}"
+####################
+# Config Validate
+####################
+tacker_config_validation:
+ - generator: "/tacker/etc/config-generator.conf"
+ config: "/etc/tacker/tacker.conf"
+
####################
# Database
####################
@@ -58,11 +68,11 @@ tacker_database_shard:
########
tacker_tag: "{{ openstack_tag }}"
-tacker_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/tacker-server"
+tacker_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}tacker-server"
tacker_server_tag: "{{ tacker_tag }}"
tacker_server_image_full: "{{ tacker_server_image }}:{{ tacker_server_tag }}"
-tacker_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/tacker-conductor"
+tacker_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}tacker-conductor"
tacker_conductor_tag: "{{ tacker_tag }}"
tacker_conductor_image_full: "{{ tacker_conductor_image }}:{{ tacker_conductor_tag }}"
@@ -97,7 +107,7 @@ tacker_conductor_healthcheck:
tacker_server_default_volumes:
- "{{ node_config_directory }}/tacker-server/:{{ container_config_directory }}/:ro"
- - "{{ kolla_dev_repos_directory ~ '/tacker/tacker:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/tacker' if tacker_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/tacker:/dev-mode/tacker' if tacker_dev_mode | bool else '' }}"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
@@ -106,7 +116,7 @@ tacker_server_default_volumes:
- "kolla_tacker_csar_files:/var/lib/tacker/csar_files/"
tacker_conductor_default_volumes:
- "{{ node_config_directory }}/tacker-conductor/:{{ container_config_directory }}/:ro"
- - "{{ kolla_dev_repos_directory ~ '/tacker/tacker:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/tacker' if tacker_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/tacker:/dev-mode/tacker' if tacker_dev_mode | bool else '' }}"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
@@ -127,9 +137,6 @@ tacker_hosts: "{{ [groups['tacker'] | first] }}"
####################
# OpenStack
####################
-tacker_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ tacker_server_port }}"
-tacker_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ tacker_server_port }}"
-
tacker_logging_debug: "{{ openstack_logging_debug }}"
tacker_keystone_user: "tacker"
diff --git a/ansible/roles/tacker/handlers/main.yml b/ansible/roles/tacker/handlers/main.yml
index 9976ad07e0..12588132ee 100644
--- a/ansible/roles/tacker/handlers/main.yml
+++ b/ansible/roles/tacker/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "tacker-conductor"
service: "{{ tacker_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart tacker-server container
vars:
service_name: "tacker-server"
service: "{{ tacker_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,5 +26,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/tacker/tasks/bootstrap.yml b/ansible/roles/tacker/tasks/bootstrap.yml
index 623ca52130..4b2c6dcbdc 100644
--- a/ansible/roles/tacker/tasks/bootstrap.yml
+++ b/ansible/roles/tacker/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating tacker database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating tacker database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/tacker/tasks/bootstrap_service.yml b/ansible/roles/tacker/tasks/bootstrap_service.yml
index 7ccfa80888..7075bd44f4 100644
--- a/ansible/roles/tacker/tasks/bootstrap_service.yml
+++ b/ansible/roles/tacker/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
tacker_server: "{{ tacker_services['tacker-server'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_tacker"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ tacker_server.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[tacker_server.group][0] }}"
diff --git a/ansible/roles/tacker/tasks/check-containers.yml b/ansible/roles/tacker/tasks/check-containers.yml
index df89b9e5ef..b7e2f7c29f 100644
--- a/ansible/roles/tacker/tasks/check-containers.yml
+++ b/ansible/roles/tacker/tasks/check-containers.yml
@@ -1,16 +1,3 @@
---
-- name: Check tacker container
- become: true
- kolla_docker:
- action: "compare_container"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - item.value.host_in_groups | bool
- - item.value.enabled | bool
- with_dict: "{{ tacker_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/tacker/tasks/config.yml b/ansible/roles/tacker/tasks/config.yml
index ecdf54db28..d8cc9f6f99 100644
--- a/ansible/roles/tacker/tasks/config.yml
+++ b/ansible/roles/tacker/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - item.value.host_in_groups | bool
- - item.value.enabled | bool
- with_dict: "{{ tacker_services }}"
+ with_dict: "{{ tacker_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -41,12 +38,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- with_dict: "{{ tacker_services }}"
- when:
- - item.value.host_in_groups | bool
- - item.value.enabled
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ tacker_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over tacker.conf
vars:
@@ -61,12 +53,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/tacker.conf"
mode: "0660"
become: true
- with_dict: "{{ tacker_services }}"
- when:
- - item.value.host_in_groups | bool
- - item.value.enabled | bool
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ tacker_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -75,9 +62,5 @@
mode: "0660"
become: true
when:
- - item.value.host_in_groups | bool
- - item.value.enabled | bool
- tacker_policy_file is defined
- with_dict: "{{ tacker_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ tacker_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/tacker/tasks/config_validate.yml b/ansible/roles/tacker/tasks/config_validate.yml
new file mode 100644
index 0000000000..cc200fdbac
--- /dev/null
+++ b/ansible/roles/tacker/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ tacker_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ tacker_config_validation }}"
diff --git a/ansible/roles/tacker/tasks/precheck.yml b/ansible/roles/tacker/tasks/precheck.yml
index 8bb0bfddb6..5bdb592b4f 100644
--- a/ansible/roles/tacker/tasks/precheck.yml
+++ b/ansible/roles/tacker/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- tacker_server
+ check_mode: false
register: container_facts
- name: Checking free port for Tacker Server
diff --git a/ansible/roles/tacker/tasks/upgrade.yml b/ansible/roles/tacker/tasks/upgrade.yml
index 4a53114758..afeb387ddc 100644
--- a/ansible/roles/tacker/tasks/upgrade.yml
+++ b/ansible/roles/tacker/tasks/upgrade.yml
@@ -13,7 +13,8 @@
service_name: "tacker-conductor"
service: "{{ tacker_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
action: "stop_and_remove_container"
name: "{{ service.container_name }}"
when:
@@ -24,7 +25,8 @@
service_name: "tacker-server"
service: "{{ tacker_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
+ common_options: "{{ docker_common_options }}"
action: "stop_and_remove_container"
name: "{{ service.container_name }}"
when:
diff --git a/ansible/roles/tacker/templates/tacker-conductor.json.j2 b/ansible/roles/tacker/templates/tacker-conductor.json.j2
index 009e64acac..02cc0ef094 100644
--- a/ansible/roles/tacker/templates/tacker-conductor.json.j2
+++ b/ansible/roles/tacker/templates/tacker-conductor.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/tacker/{{ tacker_policy_file }}",
"owner": "tacker",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/tacker/templates/tacker-server.json.j2 b/ansible/roles/tacker/templates/tacker-server.json.j2
index b2b17f90f2..950d63dc2f 100644
--- a/ansible/roles/tacker/templates/tacker-server.json.j2
+++ b/ansible/roles/tacker/templates/tacker-server.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/tacker/{{ tacker_policy_file }}",
"owner": "tacker",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/tacker/templates/tacker.conf.j2 b/ansible/roles/tacker/templates/tacker.conf.j2
index c5fff78155..21d2fde582 100644
--- a/ansible/roles/tacker/templates/tacker.conf.j2
+++ b/ansible/roles/tacker/templates/tacker.conf.j2
@@ -44,7 +44,7 @@ password = {{ tacker_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -68,11 +68,18 @@ topics = {{ tacker_enabled_notification_topics | map(attribute='name') | join(',
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if tacker_policy_file is defined %}
[oslo_policy]
diff --git a/ansible/roles/telegraf/defaults/main.yml b/ansible/roles/telegraf/defaults/main.yml
index b826fcbc4f..9a90d56925 100644
--- a/ansible/roles/telegraf/defaults/main.yml
+++ b/ansible/roles/telegraf/defaults/main.yml
@@ -12,7 +12,7 @@ telegraf_services:
####################
# Docker
####################
-telegraf_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/telegraf"
+telegraf_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}telegraf"
telegraf_tag: "{{ openstack_tag }}"
telegraf_image_full: "{{ telegraf_image }}:{{ telegraf_tag }}"
telegraf_dimensions: "{{ default_container_dimensions }}"
@@ -31,9 +31,8 @@ telegraf_extra_volumes: "{{ default_extra_volumes }}"
####################
# Protocols
####################
-elasticsearch_proto: "http"
haproxy_proto: "http"
influxdb_proto: "http"
rabbitmq_proto: "http"
mariadb_proto: "tcp"
-outward_rabbitmq_proto: "http"
+opensearch_proto: "http"
diff --git a/ansible/roles/telegraf/handlers/main.yml b/ansible/roles/telegraf/handlers/main.yml
index 01b85a27ff..0928b259e7 100644
--- a/ansible/roles/telegraf/handlers/main.yml
+++ b/ansible/roles/telegraf/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "telegraf"
service: "{{ telegraf_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -15,5 +15,3 @@
image: "{{ service.image }}"
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/telegraf/tasks/check-containers.yml b/ansible/roles/telegraf/tasks/check-containers.yml
index 744a7bb391..b7e2f7c29f 100644
--- a/ansible/roles/telegraf/tasks/check-containers.yml
+++ b/ansible/roles/telegraf/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check telegraf containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- pid_mode: "{{ item.value.pid_mode }}"
- dimensions: "{{ item.value.dimensions }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ telegraf_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/telegraf/tasks/config.yml b/ansible/roles/telegraf/tasks/config.yml
index a0a947f2ee..f08b4e7123 100644
--- a/ansible/roles/telegraf/tasks/config.yml
+++ b/ansible/roles/telegraf/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ telegraf_services }}"
+ with_dict: "{{ telegraf_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: copy-certs.yml
when:
@@ -22,12 +19,7 @@
dest: "{{ node_config_directory }}/telegraf/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ telegraf_services }}"
- notify:
- - Restart telegraf container
+ with_dict: "{{ telegraf_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over telegraf config file
vars:
@@ -37,15 +29,11 @@
dest: "{{ node_config_directory }}/telegraf/telegraf.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_first_found:
- "{{ node_custom_config }}/telegraf/{{ inventory_hostname }}/telegraf.conf"
- "{{ node_custom_config }}/telegraf/telegraf.conf"
- "telegraf.conf.j2"
- notify:
- - Restart telegraf container
- name: Copying over telegraf plugin files
vars:
@@ -55,10 +43,6 @@
dest: "{{ node_config_directory }}/telegraf/config"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - item.value.enabled | bool
+ when: service | service_enabled_and_mapped_to_host
with_fileglob:
- "{{ role_path }}/templates/config/*.conf"
- notify:
- - Restart telegraf container
diff --git a/ansible/roles/telegraf/tasks/config_validate.yml b/ansible/roles/telegraf/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/telegraf/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/telegraf/templates/telegraf.conf.j2 b/ansible/roles/telegraf/templates/telegraf.conf.j2
index 0e7a0bb251..a8c88f5e44 100644
--- a/ansible/roles/telegraf/templates/telegraf.conf.j2
+++ b/ansible/roles/telegraf/templates/telegraf.conf.j2
@@ -55,9 +55,9 @@
[[inputs.memcached]]
servers = ["{{ api_interface_address | put_address_in_context('url') }}:{{ memcached_port }}"]
{% endif %}
-{% if inventory_hostname in groups['elasticsearch'] and enable_elasticsearch | bool %}
+{% if inventory_hostname in groups['opensearch'] and enable_opensearch | bool %}
[[inputs.elasticsearch]]
- servers = ["{{ elasticsearch_proto }}://{{ api_interface_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"]
+ servers = ["{{ opensearch_proto }}://{{ api_interface_address | put_address_in_context('url') }}:{{ opensearch_port }}"]
local = true
cluster_health = true
{% endif %}
@@ -67,24 +67,10 @@
username = "{{ rabbitmq_user }}"
password = "{{ rabbitmq_password }}"
{% endif %}
-{% if inventory_hostname in groups['outward-rabbitmq'] and enable_outward_rabbitmq | bool %}
-[[inputs.rabbitmq]]
- url = "{{ outward_rabbitmq_proto }}://{{ api_interface_address | put_address_in_context('url') }}:{{ outward_rabbitmq_management_port }}"
- username = "{{ outward_rabbitmq_user }}"
- password = "{{ outward_rabbitmq_password }}"
-{% endif %}
{% if inventory_hostname in groups['redis'] and enable_redis | bool %}
[[inputs.redis]]
servers = ["tcp://:{{ redis_master_password }}@{{ api_interface_address | put_address_in_context('url') }}:{{ redis_port }}"]
{% endif %}
-{% if inventory_hostname in groups['zookeeper'] and enable_zookeeper | bool %}
-[[inputs.zookeeper]]
- servers = ["{{ api_interface_address | put_address_in_context('url') }}:{{ zookeeper_client_port }}"]
-{% endif %}
-{% if inventory_hostname in groups['kafka'] and enable_kafka | bool %}
-[[inputs.kafka_consumer]]
- brokers = ["{{ api_interface_address | put_address_in_context('url') }}:{{ kafka_port }}"]
-{% endif %}
{% if inventory_hostname in groups['mariadb'] and (enable_mariadb or enable_external_mariadb_load_balancer) | bool %}
[[inputs.mysql]]
servers = ["{{ database_user }}:{{ database_password }}@{{ mariadb_proto }}({{ api_interface_address | put_address_in_context('url') }}:{{ database_port }})/"]
diff --git a/ansible/roles/telegraf/templates/telegraf.json.j2 b/ansible/roles/telegraf/templates/telegraf.json.j2
index 5bb8d40b76..8e5bd71a43 100644
--- a/ansible/roles/telegraf/templates/telegraf.json.j2
+++ b/ansible/roles/telegraf/templates/telegraf.json.j2
@@ -13,7 +13,13 @@
"owner": "telegraf",
"perm": "0600",
"optional": true
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/trove/defaults/main.yml b/ansible/roles/trove/defaults/main.yml
index 1971f4a36a..1977860bd9 100644
--- a/ansible/roles/trove/defaults/main.yml
+++ b/ansible/roles/trove/defaults/main.yml
@@ -14,11 +14,16 @@ trove_services:
mode: "http"
external: false
port: "{{ trove_api_port }}"
+ listen_port: "{{ trove_api_listen_port }}"
+ tls_backend: "{{ trove_enable_tls_backend }}"
trove_api_external:
enabled: "{{ enable_trove }}"
mode: "http"
external: true
- port: "{{ trove_api_port }}"
+ listen_port: "{{ trove_api_listen_port }}"
+ tls_backend: "{{ trove_enable_tls_backend }}"
+ external_fqdn: "{{ trove_external_fqdn }}"
+ port: "{{ trove_api_public_port }}"
trove-conductor:
container_name: trove_conductor
group: trove-conductor
@@ -36,6 +41,12 @@ trove_services:
dimensions: "{{ trove_taskmanager_dimensions }}"
healthcheck: "{{ trove_taskmanager_healthcheck }}"
+####################
+# Config Validate
+####################
+trove_config_validation:
+ - generator: "/trove/tools/trove-config-generator.conf"
+ config: "/etc/trove/trove.conf"
####################
# Database
@@ -63,15 +74,15 @@ trove_database_shard:
####################
trove_tag: "{{ openstack_tag }}"
-trove_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/trove-conductor"
+trove_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}trove-conductor"
trove_conductor_tag: "{{ trove_tag }}"
trove_conductor_image_full: "{{ trove_conductor_image }}:{{ trove_conductor_tag }}"
-trove_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/trove-api"
+trove_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}trove-api"
trove_api_tag: "{{ trove_tag }}"
trove_api_image_full: "{{ trove_api_image }}:{{ trove_api_tag }}"
-trove_taskmanager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/trove-taskmanager"
+trove_taskmanager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}trove-taskmanager"
trove_taskmanager_tag: "{{ trove_tag }}"
trove_taskmanager_image_full: "{{ trove_taskmanager_image }}:{{ trove_taskmanager_tag }}"
@@ -123,21 +134,21 @@ trove_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/trove/trove:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/trove' if trove_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/trove:/dev-mode/trove' if trove_dev_mode | bool else '' }}"
- "trove:/var/lib/trove/"
trove_conductor_default_volumes:
- "{{ node_config_directory }}/trove-conductor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/trove/trove:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/trove' if trove_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/trove:/dev-mode/trove' if trove_dev_mode | bool else '' }}"
- "trove:/var/lib/trove/"
trove_taskmanager_default_volumes:
- "{{ node_config_directory }}/trove-taskmanager/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/trove/trove:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/trove' if trove_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/trove:/dev-mode/trove' if trove_dev_mode | bool else '' }}"
- "trove:/var/lib/trove/"
trove_extra_volumes: "{{ default_extra_volumes }}"
@@ -148,8 +159,8 @@ trove_taskmanager_extra_volumes: "{{ trove_extra_volumes }}"
####################
# OpenStack
####################
-trove_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ trove_api_port }}/v1.0/%(tenant_id)s"
-trove_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ trove_api_port }}/v1.0/%(tenant_id)s"
+trove_internal_endpoint: "{{ trove_internal_base_endpoint }}/v1.0/%(tenant_id)s"
+trove_public_endpoint: "{{ trove_public_base_endpoint }}/v1.0/%(tenant_id)s"
trove_logging_debug: "{{ openstack_logging_debug }}"
@@ -192,3 +203,10 @@ trove_ks_users:
user: "{{ trove_keystone_user }}"
password: "{{ trove_keystone_password }}"
role: "admin"
+
+####################
+# TLS
+####################
+trove_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
+
+trove_copy_certs: "{{ kolla_copy_ca_into_containers | bool or trove_enable_tls_backend | bool }}"
diff --git a/ansible/roles/trove/handlers/main.yml b/ansible/roles/trove/handlers/main.yml
index 245a1858ff..78b0346b96 100644
--- a/ansible/roles/trove/handlers/main.yml
+++ b/ansible/roles/trove/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "trove-api"
service: "{{ trove_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart trove-conductor container
vars:
service_name: "trove-conductor"
service: "{{ trove_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart trove-taskmanager container
vars:
service_name: "trove-taskmanager"
service: "{{ trove_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,5 +40,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/trove/tasks/bootstrap.yml b/ansible/roles/trove/tasks/bootstrap.yml
index c3d5ba5787..4848b06b57 100644
--- a/ansible/roles/trove/tasks/bootstrap.yml
+++ b/ansible/roles/trove/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating trove database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating trove database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/trove/tasks/bootstrap_service.yml b/ansible/roles/trove/tasks/bootstrap_service.yml
index 72603c84bc..74766ed07a 100644
--- a/ansible/roles/trove/tasks/bootstrap_service.yml
+++ b/ansible/roles/trove/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
trove_api: "{{ trove_services['trove-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_trove"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ trove_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[trove_api.group][0] }}"
diff --git a/ansible/roles/trove/tasks/check-containers.yml b/ansible/roles/trove/tasks/check-containers.yml
index 3c4cbd8f4c..b7e2f7c29f 100644
--- a/ansible/roles/trove/tasks/check-containers.yml
+++ b/ansible/roles/trove/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check trove containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ trove_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/trove/tasks/config.yml b/ansible/roles/trove/tasks/config.yml
index 1bbe2f24e6..1571e43d7f 100644
--- a/ansible/roles/trove/tasks/config.yml
+++ b/ansible/roles/trove/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ trove_services }}"
+ with_dict: "{{ trove_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -33,7 +30,7 @@
- include_tasks: copy-certs.yml
when:
- - kolla_copy_ca_into_containers | bool
+ - trove_copy_certs
- name: Copying over config.json files for services
template:
@@ -41,12 +38,21 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ trove_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ trove_services | select_services_enabled_and_mapped_to_host }}"
+
+- name: Copying over trove-wsgi.conf
+ vars:
+ service: "{{ trove_services['trove-api'] }}"
+ become: true
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/trove-api/trove-wsgi.conf"
+ mode: "0660"
+ with_first_found:
+ - "{{ node_custom_config }}/trove/{{ inventory_hostname }}/trove-wsgi.conf"
+ - "{{ node_custom_config }}/trove/trove-wsgi.conf"
+ - "trove-wsgi.conf.j2"
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over trove-guestagent.conf
vars:
@@ -63,11 +69,7 @@
become: true
when:
- item.key in services_need_confs
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ trove_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ trove_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over trove.conf
vars:
@@ -82,12 +84,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/trove.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ trove_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ trove_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -96,7 +93,4 @@
mode: "0660"
when:
- trove_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ trove_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ trove_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/trove/tasks/config_validate.yml b/ansible/roles/trove/tasks/config_validate.yml
new file mode 100644
index 0000000000..b8c4c0166e
--- /dev/null
+++ b/ansible/roles/trove/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ trove_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ trove_config_validation }}"
diff --git a/ansible/roles/trove/tasks/precheck.yml b/ansible/roles/trove/tasks/precheck.yml
index 9954dc9367..60aa0a4e73 100644
--- a/ansible/roles/trove/tasks/precheck.yml
+++ b/ansible/roles/trove/tasks/precheck.yml
@@ -8,14 +8,17 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- trove_api
+ check_mode: false
register: container_facts
- name: Checking free port for Trove API
wait_for:
host: "{{ api_interface_address }}"
- port: "{{ trove_api_port }}"
+ port: "{{ trove_api_listen_port }}"
connect_timeout: 1
timeout: 1
state: stopped
diff --git a/ansible/roles/trove/templates/trove-api.json.j2 b/ansible/roles/trove/templates/trove-api.json.j2
index be2acebeae..606c7de8c1 100644
--- a/ansible/roles/trove/templates/trove-api.json.j2
+++ b/ansible/roles/trove/templates/trove-api.json.j2
@@ -1,24 +1,54 @@
+{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
+{% set apache_conf_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
{
- "command": "trove-api --config-file=/etc/trove/trove.conf",
+ "command": "/usr/sbin/{{ apache_binary }} -DFOREGROUND",
"config_files": [
{
"source": "{{ container_config_directory }}/trove.conf",
"dest": "/etc/trove/trove.conf",
"owner": "trove",
"perm": "0600"
- }{% if trove_policy_file is defined %},
+ },
+ {
+ "source": "{{ container_config_directory }}/trove-wsgi.conf",
+ "dest": "/etc/{{ apache_conf_dir }}/trove-wsgi.conf",
+ "owner": "trove",
+ "perm": "0600"
+ }{% if trove_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ trove_policy_file }}",
"dest": "/etc/trove/{{ trove_policy_file }}",
"owner": "trove",
"perm": "0600"
+ }{% endif %}{% if trove_enable_tls_backend | bool %},
+ {
+ "source": "{{ container_config_directory }}/trove-cert.pem",
+ "dest": "/etc/trove/certs/trove-cert.pem",
+ "owner": "trove",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/trove-key.pem",
+ "dest": "/etc/trove/certs/trove-key.pem",
+ "owner": "trove",
+ "perm": "0600"
+ }{% if trove_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
- ],
+ {% endif %}],
"permissions": [
{
"path": "/var/log/kolla/trove",
"owner": "trove:trove",
"recurse": true
+ },
+ {
+ "path": "/var/run/trove",
+ "owner": "trove:trove"
}
]
}
diff --git a/ansible/roles/trove/templates/trove-conductor.json.j2 b/ansible/roles/trove/templates/trove-conductor.json.j2
index 12d884a5d0..d2c763d836 100644
--- a/ansible/roles/trove/templates/trove-conductor.json.j2
+++ b/ansible/roles/trove/templates/trove-conductor.json.j2
@@ -6,12 +6,18 @@
"dest": "/etc/trove/trove.conf",
"owner": "trove",
"perm": "0600"
- }{% if trove_policy_file is defined %},
+ }{% if trove_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ trove_policy_file }}",
"dest": "/etc/trove/{{ trove_policy_file }}",
"owner": "trove",
"perm": "0600"
+ }{% endif %}{% if trove_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/trove/templates/trove-guestagent.conf.j2 b/ansible/roles/trove/templates/trove-guestagent.conf.j2
index cfc1e197da..112b22f7d4 100644
--- a/ansible/roles/trove/templates/trove-guestagent.conf.j2
+++ b/ansible/roles/trove/templates/trove-guestagent.conf.j2
@@ -1,6 +1,26 @@
[DEFAULT]
+log_file = trove-guestagent.log
+log_dir = /var/log/trove/
transport_url = {{ rpc_transport_url }}
control_exchange = trove
root_grant = ALL
root_grant_option = True
debug = {{ trove_logging_debug }}
+
+[service_credentials]
+auth_url = {{ keystone_internal_url }}
+region_name = {{ openstack_region_name }}
+project_name = service
+password = {{ trove_keystone_password }}
+project_domain_name = {{ default_project_domain_name }}
+user_domain_name = {{ default_user_domain_name }}
+username = {{ trove_keystone_user }}
+
+[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
diff --git a/ansible/roles/trove/templates/trove-taskmanager.json.j2 b/ansible/roles/trove/templates/trove-taskmanager.json.j2
index 7298de345f..47dc30d51c 100644
--- a/ansible/roles/trove/templates/trove-taskmanager.json.j2
+++ b/ansible/roles/trove/templates/trove-taskmanager.json.j2
@@ -18,6 +18,12 @@
"dest": "/etc/trove/{{ trove_policy_file }}",
"owner": "trove",
"perm": "0600"
+ }{% endif %}{% if trove_copy_certs | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/trove/templates/trove-wsgi.conf.j2 b/ansible/roles/trove/templates/trove-wsgi.conf.j2
new file mode 100644
index 0000000000..26449a5384
--- /dev/null
+++ b/ansible/roles/trove/templates/trove-wsgi.conf.j2
@@ -0,0 +1,43 @@
+{% set wsgi_directory = '/var/lib/kolla/venv/bin' %}
+{% if trove_enable_tls_backend | bool %}
+{% if kolla_base_distro in ['centos'] %}
+LoadModule ssl_module /usr/lib64/httpd/modules/mod_ssl.so
+{% else %}
+LoadModule ssl_module /usr/lib/apache2/modules/mod_ssl.so
+{% endif %}
+{% endif %}
+Listen {{ api_interface_address | put_address_in_context('url') }}:{{ trove_api_listen_port }}
+
+ServerSignature Off
+ServerTokens Prod
+TraceEnable off
+TimeOut {{ kolla_httpd_timeout }}
+KeepAliveTimeout {{ kolla_httpd_keep_alive }}
+
+{% if trove_logging_debug | bool %}
+LogLevel info
+{% endif %}
+
+
+ WSGIDaemonProcess trove-api processes={{ trove_api_workers }} threads=1 user=trove group=trove display-name=trove-api
+ WSGIProcessGroup trove-api
+ WSGIScriptAlias / {{ wsgi_directory }}/trove-wsgi
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ = 2.4>
+ ErrorLogFormat "%{cu}t %M"
+
+ ErrorLog /var/log/kolla/trove/trove-api-error.log
+ LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
+ CustomLog /var/log/kolla/trove/trove-api-access.log logformat
+
+
+ Require all granted
+
+
+{% if trove_enable_tls_backend | bool %}
+ SSLEngine On
+ SSLCertificateFile /etc/trove/certs/trove-cert.pem
+ SSLCertificateKeyFile /etc/trove/certs/trove-key.pem
+{% endif %}
+
diff --git a/ansible/roles/trove/templates/trove.conf.j2 b/ansible/roles/trove/templates/trove.conf.j2
index 993fe7984c..238575dcdb 100644
--- a/ansible/roles/trove/templates/trove.conf.j2
+++ b/ansible/roles/trove/templates/trove.conf.j2
@@ -2,10 +2,13 @@
debug = {{ trove_logging_debug }}
log_dir = /var/log/kolla/trove
+{% if service_name == "trove-api" %}
+log_file = trove-api.log
+{% endif %}
host = {{ api_interface_address }}
-bind_port = {{ trove_api_port }}
+bind_port = {{ trove_api_listen_port }}
bind_host = {{ api_interface_address }}
trove_api_workers = {{ trove_api_workers }}
auth_strategy = keystone
@@ -16,9 +19,9 @@ transport_url = {{ rpc_transport_url }}
nova_proxy_admin_pass = {{ trove_keystone_password }}
nova_proxy_admin_tenant_name = service
nova_proxy_admin_user = trove
-remote_nova_client = trove.common.single_tenant_remote.nova_client_trove_admin
-remote_cinder_client = trove.common.single_tenant_remote.cinder_client_trove_admin
-remote_neutron_client = trove.common.single_tenant_remote.neutron_client_trove_admin
+remote_nova_client = trove.common.clients_admin.nova_client_trove_admin
+remote_cinder_client = trove.common.clients_admin.cinder_client_trove_admin
+remote_neutron_client = trove.common.clients_admin.neutron_client_trove_admin
{% endif %}
nova_compute_endpoint_type = internalURL
@@ -71,11 +74,18 @@ topics = {{ trove_enabled_notification_topics | map(attribute='name') | join(','
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if enable_osprofiler | bool %}
[profiler]
diff --git a/ansible/roles/venus/defaults/main.yml b/ansible/roles/venus/defaults/main.yml
index a1a7021a11..a8a5ad7a54 100644
--- a/ansible/roles/venus/defaults/main.yml
+++ b/ansible/roles/venus/defaults/main.yml
@@ -18,7 +18,8 @@ venus_services:
enabled: "{{ enable_venus }}"
mode: "http"
external: true
- port: "{{ venus_api_port }}"
+ external_fqdn: "{{ venus_external_fqdn }}"
+ port: "{{ venus_api_public_port }}"
venus-manager:
container_name: venus_manager
group: venus-manager
@@ -27,6 +28,13 @@ venus_services:
volumes: "{{ venus_manager_default_volumes + venus_manager_extra_volumes }}"
dimensions: "{{ venus_manager_dimensions }}"
+####################
+# Config Validate
+####################
+venus_config_validation:
+ - generator: "/venus/tools/config/venus-config-generator.conf"
+ config: "/etc/venus/venus.conf"
+
####################
# Database
####################
@@ -34,17 +42,30 @@ venus_database_name: "venus"
venus_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}venus{% endif %}"
venus_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
+####################
+# Database sharding
+####################
+venus_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ venus_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
+venus_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
+venus_database_shard:
+ users:
+ - user: "{{ venus_database_user }}"
+ password: "{{ venus_database_password }}"
+ rules:
+ - schema: "{{ venus_database_name }}"
+ shard_id: "{{ venus_database_shard_id }}"
+
####################
# Docker
####################
venus_tag: "{{ openstack_tag }}"
-venus_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/venus-api"
+venus_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}venus-api"
venus_api_tag: "{{ venus_tag }}"
venus_api_image_full: "{{ venus_api_image }}:{{ venus_api_tag }}"
-venus_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/venus-manager"
+venus_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}venus-manager"
venus_manager_tag: "{{ venus_tag }}"
venus_manager_image_full: "{{ venus_manager_image }}:{{ venus_manager_tag }}"
@@ -69,14 +90,14 @@ venus_api_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/venus/venus:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/venus' if venus_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}"
- "venus:/var/lib/venus/"
venus_manager_default_volumes:
- "{{ node_config_directory }}/venus-manager/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/venus/venus:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/venus' if venus_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/venus:/dev-mode/venus' if venus_dev_mode | bool else '' }}"
- "venus:/var/lib/venus/"
venus_extra_volumes: "{{ default_extra_volumes }}"
@@ -86,9 +107,6 @@ venus_manager_extra_volumes: "{{ venus_extra_volumes }}"
####################
# OpenStack
####################
-venus_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ venus_api_port }}/v1.0/%(tenant_id)s"
-venus_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ venus_api_port }}/v1.0/%(tenant_id)s"
-
venus_logging_debug: "{{ openstack_logging_debug }}"
venus_keystone_user: "venus"
diff --git a/ansible/roles/venus/handlers/main.yml b/ansible/roles/venus/handlers/main.yml
index f1b17b7fe5..1f8b3fdb50 100644
--- a/ansible/roles/venus/handlers/main.yml
+++ b/ansible/roles/venus/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "venus-api"
service: "{{ venus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart venus-manager container
vars:
service_name: "venus-manager"
service: "{{ venus_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,5 +26,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/venus/tasks/bootstrap.yml b/ansible/roles/venus/tasks/bootstrap.yml
index 0edce5d50b..79d3b500b7 100644
--- a/ansible/roles/venus/tasks/bootstrap.yml
+++ b/ansible/roles/venus/tasks/bootstrap.yml
@@ -2,11 +2,12 @@
- name: Creating venus database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
login_port: "{{ database_port }}"
- login_user: "{{ database_user }}"
+ login_user: "{{ venus_database_shard_root_user }}"
login_password: "{{ database_password }}"
name: "{{ venus_database_name }}"
run_once: True
@@ -17,11 +18,12 @@
- name: Creating venus database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
login_port: "{{ database_port }}"
- login_user: "{{ database_user }}"
+ login_user: "{{ venus_database_shard_root_user }}"
login_password: "{{ database_password }}"
name: "{{ venus_database_user }}"
password: "{{ venus_database_password }}"
diff --git a/ansible/roles/venus/tasks/check-containers.yml b/ansible/roles/venus/tasks/check-containers.yml
index c895320362..b7e2f7c29f 100644
--- a/ansible/roles/venus/tasks/check-containers.yml
+++ b/ansible/roles/venus/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check venus containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ venus_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/venus/tasks/config.yml b/ansible/roles/venus/tasks/config.yml
index 37fccd9767..d0d8119b4f 100644
--- a/ansible/roles/venus/tasks/config.yml
+++ b/ansible/roles/venus/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ venus_services }}"
+ with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -41,12 +38,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ venus_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over venus.conf
vars:
@@ -61,12 +53,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/venus.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ venus_services }}"
- notify:
- - "Restart {{ item.key }} container"
+ with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -75,7 +62,4 @@
mode: "0660"
when:
- venus_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ venus_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ venus_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/venus/tasks/config_validate.yml b/ansible/roles/venus/tasks/config_validate.yml
new file mode 100644
index 0000000000..57ab862017
--- /dev/null
+++ b/ansible/roles/venus/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ venus_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ venus_config_validation }}"
diff --git a/ansible/roles/venus/tasks/precheck.yml b/ansible/roles/venus/tasks/precheck.yml
index cf40ae64b3..b3debf8fd9 100644
--- a/ansible/roles/venus/tasks/precheck.yml
+++ b/ansible/roles/venus/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- venus_api
+ check_mode: false
register: container_facts
- name: Checking free port for Venus API
diff --git a/ansible/roles/venus/tasks/pull.yml b/ansible/roles/venus/tasks/pull.yml
index 7961b96403..53f9c5fda1 100644
--- a/ansible/roles/venus/tasks/pull.yml
+++ b/ansible/roles/venus/tasks/pull.yml
@@ -1,11 +1,3 @@
---
-- name: Pulling venus images
- become: true
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ item.value.image }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ venus_services }}"
+- import_role:
+ role: service-images-pull
diff --git a/ansible/roles/venus/templates/venus-api.json.j2 b/ansible/roles/venus/templates/venus-api.json.j2
index 8db87242d0..0a825529d8 100644
--- a/ansible/roles/venus/templates/venus-api.json.j2
+++ b/ansible/roles/venus/templates/venus-api.json.j2
@@ -1,19 +1,25 @@
-{
- "command": "venus-api --config-file /etc/venus/venus.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/venus.conf",
- "dest": "/etc/venus/venus.conf",
- "owner": "venus",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path":"/var/log/kolla/venus/venus-api.log",
- "owner": "venus:venus",
- "recurse": true
- }
- ]
-}
-
+{
+ "command": "venus_api --config-file /etc/venus/venus.conf",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/venus.conf",
+ "dest": "/etc/venus/venus.conf",
+ "owner": "venus",
+ "perm": "0644"
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path":"/var/log/kolla/venus/venus-api.log",
+ "owner": "venus:venus",
+ "recurse": true
+ }
+ ]
+}
+
diff --git a/ansible/roles/venus/templates/venus-manager.json.j2 b/ansible/roles/venus/templates/venus-manager.json.j2
index 75439d6c6f..02f7503cb3 100644
--- a/ansible/roles/venus/templates/venus-manager.json.j2
+++ b/ansible/roles/venus/templates/venus-manager.json.j2
@@ -1,19 +1,25 @@
-{
- "command": "venus-manager --config-file /etc/venus/venus.conf task start",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/venus.conf",
- "dest": "/etc/venus/venus.conf",
- "owner": "venus",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path":"/var/log/kolla/venus/venus-manager.log",
- "owner": "venus:venus",
- "recurse": true
- }
- ]
-}
-
+{
+ "command": "venus_manager --config-file /etc/venus/venus.conf task start",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/venus.conf",
+ "dest": "/etc/venus/venus.conf",
+ "owner": "venus",
+ "perm": "0644"
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
+ ],
+ "permissions": [
+ {
+ "path":"/var/log/kolla/venus/venus-manager.log",
+ "owner": "venus:venus",
+ "recurse": true
+ }
+ ]
+}
+
diff --git a/ansible/roles/venus/templates/venus.conf.j2 b/ansible/roles/venus/templates/venus.conf.j2
index 7e7b08364b..dbb80c415b 100644
--- a/ansible/roles/venus/templates/venus.conf.j2
+++ b/ansible/roles/venus/templates/venus.conf.j2
@@ -1,35 +1,35 @@
-[DEFAULT]
-my_ip = {{ api_interface_address }}
-periodic_interval = 60
-rootwrap_config = /etc/venus/rootwrap.conf
-api_paste_config = /etc/venus/api-paste.ini
-log_dir = /var/log/kolla/venus/
-debug = {{ venus_logging_debug }}
-auth_strategy = keystone
-os_region_name = {{ openstack_region_name }}
-osapi_venus_listen = {{ api_interface_address }}
-osapi_venus_listen_port = {{ venus_api_port }}
-
-logging_default_format_string = {{ openstack_logging_default_format_string }}
-logging_context_format_string = {{ openstack_logging_context_format_string }}
-
-transport_url = {{ rpc_transport_url }}
-
-[database]
-connection = mysql+pymysql://{{ venus_database_user }}:{{ venus_database_password }}@{{ venus_database_address }}/{{ venus_database_name }}?charset=utf8
-
-[keystone_authtoken]
-cafile = {{ openstack_cacert }}
-project_name = service
-password = {{ venus_keystone_password }}
-username = {{ venus_keystone_user }}
-auth_url = {{ keystone_internal_url }}
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-auth_type = password
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if enable_elasticsearch | bool %}
-[elasticsearch]
-url = {{ elasticsearch_internal_endpoint }}
-{% endif %}
+[DEFAULT]
+my_ip = {{ api_interface_address }}
+periodic_interval = 60
+rootwrap_config = /etc/venus/rootwrap.conf
+api_paste_config = /etc/venus/api-paste.ini
+log_dir = /var/log/kolla/venus/
+debug = {{ venus_logging_debug }}
+auth_strategy = keystone
+os_region_name = {{ openstack_region_name }}
+osapi_venus_listen = {{ api_interface_address }}
+osapi_venus_listen_port = {{ venus_api_port }}
+
+logging_default_format_string = {{ openstack_logging_default_format_string }}
+logging_context_format_string = {{ openstack_logging_context_format_string }}
+
+transport_url = {{ rpc_transport_url }}
+
+[database]
+connection = mysql+pymysql://{{ venus_database_user }}:{{ venus_database_password }}@{{ venus_database_address }}/{{ venus_database_name }}?charset=utf8
+
+[keystone_authtoken]
+cafile = {{ openstack_cacert }}
+project_name = service
+password = {{ venus_keystone_password }}
+username = {{ venus_keystone_user }}
+auth_url = {{ keystone_internal_url }}
+project_domain_id = {{ default_project_domain_id }}
+user_domain_id = {{ default_user_domain_id }}
+auth_type = password
+memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{% if enable_opensearch | bool %}
+[elasticsearch]
+url = {{ opensearch_internal_endpoint }}
+{% endif %}
diff --git a/ansible/roles/vitrage/defaults/main.yml b/ansible/roles/vitrage/defaults/main.yml
deleted file mode 100644
index bbe74ebf5c..0000000000
--- a/ansible/roles/vitrage/defaults/main.yml
+++ /dev/null
@@ -1,292 +0,0 @@
----
-vitrage_services:
- vitrage-api:
- container_name: vitrage_api
- group: vitrage-api
- enabled: true
- image: "{{ vitrage_api_image_full }}"
- volumes: "{{ vitrage_api_default_volumes + vitrage_api_extra_volumes }}"
- dimensions: "{{ vitrage_api_dimensions }}"
- healthcheck: "{{ vitrage_api_healthcheck }}"
- haproxy:
- vitrage_api:
- enabled: "{{ enable_vitrage }}"
- mode: "http"
- external: false
- port: "{{ vitrage_api_port }}"
- vitrage_api_external:
- enabled: "{{ enable_vitrage }}"
- mode: "http"
- external: true
- port: "{{ vitrage_api_port }}"
- vitrage-notifier:
- container_name: vitrage_notifier
- group: vitrage-notifier
- enabled: true
- image: "{{ vitrage_notifier_image_full }}"
- volumes: "{{ vitrage_notifier_default_volumes + vitrage_notifier_extra_volumes }}"
- dimensions: "{{ vitrage_notifier_dimensions }}"
- healthcheck: "{{ vitrage_notifier_healthcheck }}"
- vitrage-graph:
- container_name: vitrage_graph
- group: vitrage-graph
- enabled: true
- image: "{{ vitrage_graph_image_full }}"
- volumes: "{{ vitrage_graph_default_volumes + vitrage_graph_extra_volumes }}"
- dimensions: "{{ vitrage_graph_dimensions }}"
- healthcheck: "{{ vitrage_graph_healthcheck }}"
- vitrage-ml:
- container_name: vitrage_ml
- group: vitrage-ml
- enabled: true
- image: "{{ vitrage_ml_image_full }}"
- volumes: "{{ vitrage_ml_default_volumes + vitrage_ml_extra_volumes }}"
- dimensions: "{{ vitrage_ml_dimensions }}"
- healthcheck: "{{ vitrage_ml_healthcheck }}"
- vitrage-persistor:
- container_name: vitrage_persistor
- group: vitrage-persistor
- enabled: true
- image: "{{ vitrage_persistor_image_full }}"
- volumes: "{{ vitrage_persistor_default_volumes + vitrage_persistor_extra_volumes }}"
- dimensions: "{{ vitrage_persistor_dimensions }}"
- healthcheck: "{{ vitrage_persistor_healthcheck }}"
-
-####################
-## Database
-#####################
-vitrage_database_name: "vitrage"
-vitrage_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}vitrage{% endif %}"
-vitrage_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-
-####################
-# Database sharding
-####################
-vitrage_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ vitrage_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-vitrage_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-vitrage_database_shard:
- users:
- - user: "{{ vitrage_database_user }}"
- password: "{{ vitrage_database_password }}"
- rules:
- - schema: "{{ vitrage_database_name }}"
- shard_id: "{{ vitrage_database_shard_id }}"
-
-
-####################
-# Docker
-####################
-
-vitrage_tag: "{{ openstack_tag }}"
-
-vitrage_graph_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/vitrage-graph"
-vitrage_graph_tag: "{{ vitrage_tag }}"
-vitrage_graph_image_full: "{{ vitrage_graph_image }}:{{ vitrage_graph_tag }}"
-
-vitrage_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/vitrage-api"
-vitrage_api_tag: "{{ vitrage_tag }}"
-vitrage_api_image_full: "{{ vitrage_api_image }}:{{ vitrage_api_tag }}"
-
-vitrage_notifier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/vitrage-notifier"
-vitrage_notifier_tag: "{{ vitrage_tag }}"
-vitrage_notifier_image_full: "{{ vitrage_notifier_image }}:{{ vitrage_notifier_tag }}"
-
-vitrage_ml_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/vitrage-ml"
-vitrage_ml_tag: "{{ vitrage_tag }}"
-vitrage_ml_image_full: "{{ vitrage_ml_image }}:{{ vitrage_ml_tag }}"
-
-vitrage_persistor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/vitrage-persistor"
-vitrage_persistor_tag: "{{ vitrage_tag }}"
-vitrage_persistor_image_full: "{{ vitrage_persistor_image }}:{{ vitrage_persistor_tag }}"
-
-vitrage_api_dimensions: "{{ default_container_dimensions }}"
-vitrage_notifier_dimensions: "{{ default_container_dimensions }}"
-vitrage_graph_dimensions: "{{ default_container_dimensions }}"
-vitrage_ml_dimensions: "{{ default_container_dimensions }}"
-vitrage_persistor_dimensions: "{{ default_container_dimensions }}"
-
-vitrage_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
-vitrage_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-vitrage_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-vitrage_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-vitrage_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ vitrage_api_port }}"]
-vitrage_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-vitrage_api_healthcheck:
- interval: "{{ vitrage_api_healthcheck_interval }}"
- retries: "{{ vitrage_api_healthcheck_retries }}"
- start_period: "{{ vitrage_api_healthcheck_start_period }}"
- test: "{% if vitrage_api_enable_healthchecks | bool %}{{ vitrage_api_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ vitrage_api_healthcheck_timeout }}"
-
-vitrage_notifier_enable_healthchecks: "{{ enable_container_healthchecks }}"
-vitrage_notifier_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-vitrage_notifier_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-vitrage_notifier_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-vitrage_notifier_healthcheck_test: ["CMD-SHELL", "healthcheck_port vitrage-notifier {{ om_rpc_port }}"]
-vitrage_notifier_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-vitrage_notifier_healthcheck:
- interval: "{{ vitrage_notifier_healthcheck_interval }}"
- retries: "{{ vitrage_notifier_healthcheck_retries }}"
- start_period: "{{ vitrage_notifier_healthcheck_start_period }}"
- test: "{% if vitrage_notifier_enable_healthchecks | bool %}{{ vitrage_notifier_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ vitrage_notifier_healthcheck_timeout }}"
-
-vitrage_graph_enable_healthchecks: "{{ enable_container_healthchecks }}"
-vitrage_graph_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-vitrage_graph_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-vitrage_graph_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-vitrage_graph_healthcheck_test: ["CMD-SHELL", "healthcheck_port vitrage-graph {{ om_rpc_port }}"]
-vitrage_graph_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-vitrage_graph_healthcheck:
- interval: "{{ vitrage_graph_healthcheck_interval }}"
- retries: "{{ vitrage_graph_healthcheck_retries }}"
- start_period: "{{ vitrage_graph_healthcheck_start_period }}"
- test: "{% if vitrage_graph_enable_healthchecks | bool %}{{ vitrage_graph_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ vitrage_graph_healthcheck_timeout }}"
-
-vitrage_ml_enable_healthchecks: "{{ enable_container_healthchecks }}"
-vitrage_ml_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-vitrage_ml_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-vitrage_ml_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-vitrage_ml_healthcheck_test: ["CMD-SHELL", "healthcheck_port vitrage-ml {{ om_rpc_port }}"]
-vitrage_ml_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-vitrage_ml_healthcheck:
- interval: "{{ vitrage_ml_healthcheck_interval }}"
- retries: "{{ vitrage_ml_healthcheck_retries }}"
- start_period: "{{ vitrage_ml_healthcheck_start_period }}"
- test: "{% if vitrage_ml_enable_healthchecks | bool %}{{ vitrage_ml_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ vitrage_ml_healthcheck_timeout }}"
-
-vitrage_persistor_enable_healthchecks: "{{ enable_container_healthchecks }}"
-vitrage_persistor_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-vitrage_persistor_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-vitrage_persistor_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-vitrage_persistor_healthcheck_test: ["CMD-SHELL", "healthcheck_port vitrage-persistor {{ om_rpc_port }}"]
-vitrage_persistor_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-vitrage_persistor_healthcheck:
- interval: "{{ vitrage_persistor_healthcheck_interval }}"
- retries: "{{ vitrage_persistor_healthcheck_retries }}"
- start_period: "{{ vitrage_persistor_healthcheck_start_period }}"
- test: "{% if vitrage_persistor_enable_healthchecks | bool %}{{ vitrage_persistor_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ vitrage_persistor_healthcheck_timeout }}"
-
-vitrage_api_default_volumes:
- - "{{ node_config_directory }}/vitrage-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/vitrage/vitrage:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/vitrage' if vitrage_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
-vitrage_notifier_default_volumes:
- - "{{ node_config_directory }}/vitrage-notifier/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/vitrage/vitrage:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/vitrage' if vitrage_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
-vitrage_graph_default_volumes:
- - "{{ node_config_directory }}/vitrage-graph/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/vitrage/vitrage:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/vitrage' if vitrage_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
-vitrage_ml_default_volumes:
- - "{{ node_config_directory }}/vitrage-ml/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/vitrage/vitrage:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/vitrage' if vitrage_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
-vitrage_persistor_default_volumes:
- - "{{ node_config_directory }}/vitrage-persistor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/vitrage/vitrage:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/vitrage' if vitrage_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
-
-vitrage_extra_volumes: "{{ default_extra_volumes }}"
-vitrage_api_extra_volumes: "{{ vitrage_extra_volumes }}"
-vitrage_notifier_extra_volumes: "{{ vitrage_extra_volumes }}"
-vitrage_graph_extra_volumes: "{{ vitrage_extra_volumes }}"
-vitrage_ml_extra_volumes: "{{ vitrage_extra_volumes }}"
-vitrage_persistor_extra_volumes: "{{ vitrage_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-vitrage_logging_debug: "{{ openstack_logging_debug }}"
-
-vitrage_keystone_user: "vitrage"
-
-openstack_vitrage_auth: "{{ openstack_auth }}"
-
-vitrage_api_workers: "{{ openstack_service_workers }}"
-
-#####################
-# Datasources
-#####################
-vitrage_notifier:
- - name: "aodh"
- enabled: "{{ enable_aodh | bool }}"
- - name: "mistral"
- enabled: "{{ enable_mistral | bool }}"
- - name: "nova"
- enabled: "{{ enable_nova | bool }}"
-
-vitrage_notifiers: "{{ vitrage_notifier | selectattr('enabled', 'equalto', true) | list }}"
-
-vitrage_datasource:
- - name: "static"
- enabled: true
- - name: "nova.host,nova.instance,nova.zone"
- enabled: "{{ enable_nova | bool }}"
- - name: "aodh"
- enabled: "{{ enable_aodh | bool }}"
- - name: "collectd"
- enabled: "{{ enable_collectd | bool }}"
- - name: "cinder.volume"
- enabled: "{{ enable_cinder | bool }}"
- - name: "neutron.network,neutron.port"
- enabled: "{{ enable_neutron | bool }}"
- # TODO(egonzalez) Heat cannot be used with default policy.json due stacks:global_index=rule:deny_everybody.
- # Document process to deploy vitrage+heat.
- - name: "heat.stack"
- enabled: "no"
- - name: "prometheus"
- enabled: "{{ enable_vitrage_prometheus_datasource | bool }}"
-
-vitrage_datasources: "{{ vitrage_datasource | selectattr('enabled', 'equalto', true) | list }}"
-
-
-####################
-# Kolla
-####################
-vitrage_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-vitrage_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-vitrage_dev_mode: "{{ kolla_dev_mode }}"
-vitrage_source_version: "{{ kolla_source_version }}"
-
-####################
-# Notifications
-####################
-vitrage_notification_topics:
- - name: notifications
- enabled: "{{ enable_ceilometer | bool }}"
- - name: vitrage_notifications
- enabled: True
-
-vitrage_enabled_notification_topics: "{{ vitrage_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
-
-####################
-# Keystone
-####################
-vitrage_ks_services:
- - name: "vitrage"
- type: "rca"
- description: "Root Cause Analysis Service"
- endpoints:
- - {'interface': 'internal', 'url': '{{ vitrage_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ vitrage_public_endpoint }}'}
-
-vitrage_ks_users:
- - project: "service"
- user: "{{ vitrage_keystone_user }}"
- password: "{{ vitrage_keystone_password }}"
- role: "admin"
diff --git a/ansible/roles/vitrage/handlers/main.yml b/ansible/roles/vitrage/handlers/main.yml
deleted file mode 100644
index 50cdf2aaa1..0000000000
--- a/ansible/roles/vitrage/handlers/main.yml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-- name: Restart vitrage-api container
- vars:
- service_name: "vitrage-api"
- service: "{{ vitrage_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart vitrage-notifier container
- vars:
- service_name: "vitrage-notifier"
- service: "{{ vitrage_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart vitrage-graph container
- vars:
- service_name: "vitrage-graph"
- service: "{{ vitrage_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart vitrage-ml container
- vars:
- service_name: "vitrage-ml"
- service: "{{ vitrage_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
-
-- name: Restart vitrage-persistor container
- vars:
- service_name: "vitrage-persistor"
- service: "{{ vitrage_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/vitrage/tasks/bootstrap.yml b/ansible/roles/vitrage/tasks/bootstrap.yml
deleted file mode 100644
index 2670fbf7a7..0000000000
--- a/ansible/roles/vitrage/tasks/bootstrap.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Creating vitrage database
- become: true
- kolla_toolbox:
- module_name: mysql_db
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ vitrage_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ vitrage_database_name }}"
- run_once: True
- delegate_to: "{{ groups['vitrage-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating vitrage database user and setting permissions
- become: true
- kolla_toolbox:
- module_name: mysql_user
- module_args:
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ vitrage_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ vitrage_database_user }}"
- password: "{{ vitrage_database_password }}"
- host: "%"
- priv: "{{ vitrage_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: True
- delegate_to: "{{ groups['vitrage-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/vitrage/tasks/bootstrap_service.yml b/ansible/roles/vitrage/tasks/bootstrap_service.yml
deleted file mode 100644
index 771be16102..0000000000
--- a/ansible/roles/vitrage/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Vitrage bootstrap container
- vars:
- vitrage_api: "{{ vitrage_services['vitrage-api'] }}"
- become: true
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ vitrage_api.image }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_vitrage"
- restart_policy: no
- volumes: "{{ vitrage_api.volumes | reject('equalto', '') | list }}"
- run_once: True
- delegate_to: "{{ groups[vitrage_api.group][0] }}"
diff --git a/ansible/roles/vitrage/tasks/check-containers.yml b/ansible/roles/vitrage/tasks/check-containers.yml
deleted file mode 100644
index 5840a73352..0000000000
--- a/ansible/roles/vitrage/tasks/check-containers.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Check vitrage containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ vitrage_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/vitrage/tasks/clone.yml b/ansible/roles/vitrage/tasks/clone.yml
deleted file mode 100644
index 439f99f3c3..0000000000
--- a/ansible/roles/vitrage/tasks/clone.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cloning vitrage source repository for development
- become: true
- git:
- repo: "{{ vitrage_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ vitrage_dev_repos_pull }}"
- version: "{{ vitrage_source_version }}"
diff --git a/ansible/roles/vitrage/tasks/config.yml b/ansible/roles/vitrage/tasks/config.yml
deleted file mode 100644
index 60d68e4f46..0000000000
--- a/ansible/roles/vitrage/tasks/config.yml
+++ /dev/null
@@ -1,110 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ vitrage_services }}"
-
-- name: Check if policies shall be overwritten
- stat:
- path: "{{ item }}"
- run_once: True
- delegate_to: localhost
- register: vitrage_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/vitrage/"
- skip: true
-
-- name: Set vitrage policy file
- set_fact:
- vitrage_policy_file: "{{ vitrage_policy.results.0.stat.path | basename }}"
- vitrage_policy_file_path: "{{ vitrage_policy.results.0.stat.path }}"
- when:
- - vitrage_policy.results
-
-- include_tasks: copy-certs.yml
- when:
- - kolla_copy_ca_into_containers | bool
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ vitrage_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over vitrage.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/vitrage.conf.j2"
- - "{{ node_config_directory }}/config/global.conf"
- - "{{ node_config_directory }}/config/messaging.conf"
- - "{{ node_config_directory }}/config/vitrage.conf"
- - "{{ node_config_directory }}/config/vitrage/{{ item.key }}.conf"
- - "{{ node_config_directory }}/config/vitrage/{{ inventory_hostname }}/vitrage.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/vitrage.conf"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ vitrage_services }}"
- notify:
- - Restart {{ item.key }} container
-
-- name: Copying over wsgi-vitrage files for services
- template:
- src: "wsgi-vitrage.conf.j2"
- dest: "{{ node_config_directory }}/{{ item }}/wsgi-vitrage.conf"
- mode: "0660"
- become: true
- with_items:
- - "vitrage-api"
- notify:
- - Restart vitrage-api container
-
-- name: Copying over prometheus_conf.yml file for service
- vars:
- service: "{{ vitrage_services['vitrage-graph'] }}"
- template:
- src: "{{ node_custom_config }}/vitrage/prometheus_conf.yaml"
- dest: "{{ node_config_directory }}/vitrage-graph/prometheus_conf.yaml"
- mode: "0660"
- become: true
- when:
- - enable_vitrage_prometheus_datasource | bool
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart vitrage-graph container
-
-- name: Copying over existing policy file
- template:
- src: "{{ vitrage_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ vitrage_policy_file }}"
- mode: "0660"
- become: true
- when:
- - vitrage_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ vitrage_services }}"
- notify:
- - Restart {{ item.key }} container
diff --git a/ansible/roles/vitrage/tasks/copy-certs.yml b/ansible/roles/vitrage/tasks/copy-certs.yml
deleted file mode 100644
index bdd8fe3581..0000000000
--- a/ansible/roles/vitrage/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ vitrage_services }}"
diff --git a/ansible/roles/vitrage/tasks/deploy-containers.yml b/ansible/roles/vitrage/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/vitrage/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/vitrage/tasks/deploy.yml b/ansible/roles/vitrage/tasks/deploy.yml
deleted file mode 100644
index 28649bcdcb..0000000000
--- a/ansible/roles/vitrage/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- import_tasks: register.yml
-
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- include_tasks: clone.yml
- when: vitrage_dev_mode | bool
-
-- import_tasks: bootstrap.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/vitrage/tasks/loadbalancer.yml b/ansible/roles/vitrage/tasks/loadbalancer.yml
deleted file mode 100644
index 4fba573d24..0000000000
--- a/ansible/roles/vitrage/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ vitrage_services }}"
- tags: always
diff --git a/ansible/roles/vitrage/tasks/main.yml b/ansible/roles/vitrage/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/vitrage/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/vitrage/tasks/precheck.yml b/ansible/roles/vitrage/tasks/precheck.yml
deleted file mode 100644
index d5da64838f..0000000000
--- a/ansible/roles/vitrage/tasks/precheck.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ vitrage_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - vitrage_api
- register: container_facts
-
-- name: Checking free port for vitrage API
- wait_for:
- host: "{{ 'api' | kolla_address }}"
- port: "{{ vitrage_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - container_facts['vitrage_api'] is not defined
- - inventory_hostname in groups['vitrage-api']
-
-- name: Checking custom prometheus_conf.yaml exists
- stat:
- path: "{{ node_custom_config }}/vitrage/prometheus_conf.yaml"
- delegate_to: localhost
- register: result
- run_once: true
- failed_when: not result.stat.exists
- when:
- - enable_vitrage_prometheus_datasource | bool
diff --git a/ansible/roles/vitrage/tasks/pull.yml b/ansible/roles/vitrage/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/vitrage/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/vitrage/tasks/reconfigure.yml b/ansible/roles/vitrage/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/vitrage/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/vitrage/tasks/register.yml b/ansible/roles/vitrage/tasks/register.yml
deleted file mode 100644
index 6bbd0561d3..0000000000
--- a/ansible/roles/vitrage/tasks/register.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_vitrage_auth }}"
- service_ks_register_services: "{{ vitrage_ks_services }}"
- service_ks_register_users: "{{ vitrage_ks_users }}"
-
-- name: Adding vitrage user into admin project
- become: true
- kolla_toolbox:
- module_name: "os_user_role"
- module_args:
- user: "{{ vitrage_keystone_user }}"
- role: "admin"
- project: "admin"
- auth: "{{ openstack_vitrage_auth }}"
- endpoint_type: "{{ openstack_interface }}"
- cacert: "{{ openstack_cacert }}"
- region_name: "{{ openstack_region_name }}"
- run_once: True
diff --git a/ansible/roles/vitrage/tasks/stop.yml b/ansible/roles/vitrage/tasks/stop.yml
deleted file mode 100644
index 411079e19f..0000000000
--- a/ansible/roles/vitrage/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ vitrage_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/vitrage/tasks/upgrade.yml b/ansible/roles/vitrage/tasks/upgrade.yml
deleted file mode 100644
index 6ba9f99799..0000000000
--- a/ansible/roles/vitrage/tasks/upgrade.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- import_tasks: bootstrap_service.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/vitrage/templates/vitrage-api.json.j2 b/ansible/roles/vitrage/templates/vitrage-api.json.j2
deleted file mode 100644
index 34f2a9223e..0000000000
--- a/ansible/roles/vitrage/templates/vitrage-api.json.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'vitrage-api.conf' %}
-{
- "command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/vitrage.conf",
- "dest": "/etc/vitrage/vitrage.conf",
- "owner": "vitrage",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/wsgi-vitrage.conf",
- "dest": "/etc/{{ apache_dir }}/{{ apache_file }}",
- "owner": "vitrage",
- "perm": "0644"
- }{% if vitrage_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ vitrage_policy_file }}",
- "dest": "/etc/vitrage/{{ vitrage_policy_file }}",
- "owner": "vitrage",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/vitrage",
- "owner": "vitrage:vitrage",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/vitrage/templates/vitrage-graph.json.j2 b/ansible/roles/vitrage/templates/vitrage-graph.json.j2
deleted file mode 100644
index 59b350712e..0000000000
--- a/ansible/roles/vitrage/templates/vitrage-graph.json.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "command": "vitrage-graph --config-file /etc/vitrage/vitrage.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/vitrage.conf",
- "dest": "/etc/vitrage/vitrage.conf",
- "owner": "vitrage",
- "perm": "0644"
- }{% if enable_vitrage_prometheus_datasource | bool %},
- {
- "source": "{{ container_config_directory }}/prometheus_conf.yaml",
- "dest": "/etc/vitrage/prometheus_conf.yaml",
- "owner": "vitrage",
- "perm": "0644"
- }{% endif %}{% if vitrage_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ vitrage_policy_file }}",
- "dest": "/etc/vitrage/{{ vitrage_policy_file }}",
- "owner": "vitrage",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/vitrage",
- "owner": "vitrage:vitrage",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/vitrage/templates/vitrage-ml.json.j2 b/ansible/roles/vitrage/templates/vitrage-ml.json.j2
deleted file mode 100644
index 6c629e4008..0000000000
--- a/ansible/roles/vitrage/templates/vitrage-ml.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "vitrage-ml --config-file /etc/vitrage/vitrage.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/vitrage.conf",
- "dest": "/etc/vitrage/vitrage.conf",
- "owner": "vitrage",
- "perm": "0644"
- }{% if vitrage_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ vitrage_policy_file }}",
- "dest": "/etc/vitrage/{{ vitrage_policy_file }}",
- "owner": "vitrage",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/vitrage",
- "owner": "vitrage:vitrage",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/vitrage/templates/vitrage-notifier.json.j2 b/ansible/roles/vitrage/templates/vitrage-notifier.json.j2
deleted file mode 100644
index 8e0047aa09..0000000000
--- a/ansible/roles/vitrage/templates/vitrage-notifier.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "vitrage-notifier --config-file /etc/vitrage/vitrage.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/vitrage.conf",
- "dest": "/etc/vitrage/vitrage.conf",
- "owner": "vitrage",
- "perm": "0644"
- }{% if vitrage_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ vitrage_policy_file }}",
- "dest": "/etc/vitrage/{{ vitrage_policy_file }}",
- "owner": "vitrage",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/vitrage",
- "owner": "vitrage:vitrage",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/vitrage/templates/vitrage-persistor.json.j2 b/ansible/roles/vitrage/templates/vitrage-persistor.json.j2
deleted file mode 100644
index 8a3e917344..0000000000
--- a/ansible/roles/vitrage/templates/vitrage-persistor.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "vitrage-persistor --config-file /etc/vitrage/vitrage.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/vitrage.conf",
- "dest": "/etc/vitrage/vitrage.conf",
- "owner": "vitrage",
- "perm": "0644"
- }{% if vitrage_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ vitrage_policy_file }}",
- "dest": "/etc/vitrage/{{ vitrage_policy_file }}",
- "owner": "vitrage",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/vitrage",
- "owner": "vitrage:vitrage",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/vitrage/templates/vitrage.conf.j2 b/ansible/roles/vitrage/templates/vitrage.conf.j2
deleted file mode 100644
index f4750491b8..0000000000
--- a/ansible/roles/vitrage/templates/vitrage.conf.j2
+++ /dev/null
@@ -1,100 +0,0 @@
-[DEFAULT]
-debug = {{ vitrage_logging_debug }}
-log_dir = /var/log/kolla/vitrage
-
-{% if service_name == 'vitrage-api' %}
-# Force vitrage-api.log or will use app.wsgi
-log_file = /var/log/kolla/vitrage/vitrage-api.log
-{% endif %}
-
-{% if vitrage_notifiers %}
-notifiers = {{ vitrage_notifiers|map(attribute='name')|join(',') }}
-{% endif %}
-
-transport_url = {{ rpc_transport_url }}
-
-[database]
-connection = mysql+pymysql://{{ vitrage_database_user }}:{{ vitrage_database_password }}@{{ vitrage_database_address }}/{{ vitrage_database_name }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-
-{% if vitrage_datasources %}
-[datasources]
-types = {{ vitrage_datasources|map(attribute='name')|join(',') }}
-{% endif %}
-
-[machine_learning]
-plugins = jaccard_correlation
-
-[keystone_authtoken]
-service_type = rca
-www_authenticate_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ vitrage_keystone_user }}
-password = {{ vitrage_keystone_password }}
-service_token_roles_required = True
-cafile = {{ openstack_cacert }}
-region_name = {{ openstack_region_name }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[service_credentials]
-auth_url = {{ keystone_internal_url }}
-region_name = {{ openstack_region_name }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = default
-project_name = admin
-password = {{ vitrage_keystone_password }}
-username = {{ vitrage_keystone_user }}
-interface = internal
-cafile = {{ openstack_cacert }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_messaging_notifications]
-transport_url = {{ notify_transport_url }}
-{% if vitrage_enabled_notification_topics %}
-driver = messagingv2
-topics = {{ vitrage_enabled_notification_topics | map(attribute='name') | join(',') }}
-{% else %}
-driver = noop
-{% endif %}
-
-{% if om_enable_rabbitmq_tls | bool %}
-[oslo_messaging_rabbit]
-ssl = true
-ssl_ca_file = {{ om_rabbitmq_cacert }}
-{% endif %}
-
-[oslo_concurrency]
-lock_path = /var/lib/vitrage/tmp
-
-[oslo_middleware]
-enable_proxy_headers_parsing = True
-
-{% if vitrage_policy_file is defined %}
-[oslo_policy]
-policy_file = {{ vitrage_policy_file }}
-{% endif %}
-
-{% if enable_osprofiler | bool %}
-[profiler]
-enabled = true
-trace_sqlalchemy = true
-hmac_keys = {{ osprofiler_secret }}
-connection_string = {{ osprofiler_backend_connection_string }}
-{% endif %}
-
-{% if enable_vitrage_prometheus_datasource | bool %}
-[prometheus]
-config_file = /etc/vitrage/prometheus_conf.yaml
-{% endif %}
diff --git a/ansible/roles/vitrage/templates/wsgi-vitrage.conf.j2 b/ansible/roles/vitrage/templates/wsgi-vitrage.conf.j2
deleted file mode 100644
index 9b54e8b012..0000000000
--- a/ansible/roles/vitrage/templates/wsgi-vitrage.conf.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{% set vitrage_log_dir = '/var/log/kolla/vitrage' %}
-Listen {{ 'api' | kolla_address | put_address_in_context('url') }}:{{ vitrage_api_port }}
-
-ServerSignature Off
-ServerTokens Prod
-TraceEnable off
-TimeOut {{ kolla_httpd_timeout }}
-KeepAliveTimeout {{ kolla_httpd_keep_alive }}
-
-ErrorLog "{{ vitrage_log_dir }}/apache-error.log"
-
- CustomLog "{{ vitrage_log_dir }}/apache-access.log" common
-
-
-{% if vitrage_logging_debug | bool %}
-LogLevel info
-{% endif %}
-
-
-
- ## Vhost docroot
- DocumentRoot "/var/www/cgi-bin/vitrage"
-
- ## Directories, there should at least be a declaration for /var/www/cgi-bin/vitrage
-
-
- Options Indexes FollowSymLinks MultiViews
- AllowOverride None
- Require all granted
-
-
- ## Logging
- ErrorLog "{{ vitrage_log_dir }}/vitrage_wsgi_error.log"
- ServerSignature Off
- CustomLog "{{ vitrage_log_dir }}/vitrage_wsgi_access.log" combined
- WSGIApplicationGroup %{GLOBAL}
- WSGIDaemonProcess vitrage group=vitrage processes={{ vitrage_api_workers }} threads=1 user=vitrage
- WSGIProcessGroup vitrage
- WSGIScriptAlias / "/var/www/cgi-bin/vitrage/app.wsgi"
-{% if enable_vitrage_prometheus_datasource | bool %}
- WSGIPassAuthorization On
-{% endif %}
-
diff --git a/ansible/roles/vitrage/vars/main.yml b/ansible/roles/vitrage/vars/main.yml
deleted file mode 100644
index ee0d018c05..0000000000
--- a/ansible/roles/vitrage/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "vitrage"
diff --git a/ansible/roles/watcher/defaults/main.yml b/ansible/roles/watcher/defaults/main.yml
index 35df766ecb..f3ea3388a6 100644
--- a/ansible/roles/watcher/defaults/main.yml
+++ b/ansible/roles/watcher/defaults/main.yml
@@ -14,11 +14,14 @@ watcher_services:
mode: "http"
external: false
port: "{{ watcher_api_port }}"
+ listen_port: "{{ watcher_api_listen_port }}"
watcher_api_external:
enabled: "{{ enable_watcher }}"
mode: "http"
external: true
- port: "{{ watcher_api_port }}"
+ external_fqdn: "{{ watcher_external_fqdn }}"
+ port: "{{ watcher_api_public_port }}"
+ listen_port: "{{ watcher_api_listen_port }}"
watcher-applier:
container_name: watcher_applier
group: watcher-applier
@@ -36,6 +39,12 @@ watcher_services:
dimensions: "{{ watcher_engine_dimensions }}"
healthcheck: "{{ watcher_engine_healthcheck }}"
+####################
+# Config Validate
+####################
+watcher_config_validation:
+ - generator: "/watcher/etc/watcher/oslo-config-generator/watcher.conf"
+ config: "/etc/watcher/watcher.conf"
####################
# Database
@@ -63,15 +72,15 @@ watcher_database_shard:
####################
watcher_tag: "{{ openstack_tag }}"
-watcher_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/watcher-engine"
+watcher_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}watcher-engine"
watcher_engine_tag: "{{ watcher_tag }}"
watcher_engine_image_full: "{{ watcher_engine_image }}:{{ watcher_engine_tag }}"
-watcher_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/watcher-api"
+watcher_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}watcher-api"
watcher_api_tag: "{{ watcher_tag }}"
watcher_api_image_full: "{{ watcher_api_image }}:{{ watcher_api_tag }}"
-watcher_applier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/watcher-applier"
+watcher_applier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}watcher-applier"
watcher_applier_tag: "{{ watcher_tag }}"
watcher_applier_image_full: "{{ watcher_applier_image }}:{{ watcher_applier_tag }}"
@@ -122,19 +131,19 @@ watcher_api_default_volumes:
- "{{ node_config_directory }}/watcher-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/watcher/watcher:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/watcher' if watcher_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/watcher:/dev-mode/watcher' if watcher_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
watcher_applier_default_volumes:
- "{{ node_config_directory }}/watcher-applier/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/watcher/watcher:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/watcher' if watcher_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/watcher:/dev-mode/watcher' if watcher_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
watcher_engine_default_volumes:
- "{{ node_config_directory }}/watcher-engine/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/watcher/watcher:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/watcher' if watcher_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/watcher:/dev-mode/watcher' if watcher_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
watcher_extra_volumes: "{{ default_extra_volumes }}"
@@ -145,9 +154,6 @@ watcher_engine_extra_volumes: "{{ watcher_extra_volumes }}"
####################
# OpenStack
####################
-watcher_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ watcher_api_port }}"
-watcher_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ watcher_api_port }}"
-
watcher_logging_debug: "{{ openstack_logging_debug }}"
watcher_keystone_user: "watcher"
diff --git a/ansible/roles/watcher/handlers/main.yml b/ansible/roles/watcher/handlers/main.yml
index 89fe153c4a..fc177bf711 100644
--- a/ansible/roles/watcher/handlers/main.yml
+++ b/ansible/roles/watcher/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "watcher-applier"
service: "{{ watcher_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -12,15 +12,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart watcher-engine container
vars:
service_name: "watcher-engine"
service: "{{ watcher_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -28,15 +26,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
- name: Restart watcher-api container
vars:
service_name: "watcher-api"
service: "{{ watcher_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -44,5 +40,3 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/watcher/tasks/bootstrap.yml b/ansible/roles/watcher/tasks/bootstrap.yml
index d1622da412..294c76351e 100644
--- a/ansible/roles/watcher/tasks/bootstrap.yml
+++ b/ansible/roles/watcher/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Watcher database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Watcher database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/watcher/tasks/bootstrap_service.yml b/ansible/roles/watcher/tasks/bootstrap_service.yml
index 789348a3d3..36a72f3831 100644
--- a/ansible/roles/watcher/tasks/bootstrap_service.yml
+++ b/ansible/roles/watcher/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
watcher_api: "{{ watcher_services['watcher-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_watcher"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ watcher_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[watcher_api.group][0] }}"
diff --git a/ansible/roles/watcher/tasks/check-containers.yml b/ansible/roles/watcher/tasks/check-containers.yml
index 1c20ba4d69..b7e2f7c29f 100644
--- a/ansible/roles/watcher/tasks/check-containers.yml
+++ b/ansible/roles/watcher/tasks/check-containers.yml
@@ -1,17 +1,3 @@
---
-- name: Check watcher containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ watcher_services }}"
- notify:
- - "Restart {{ item.key }} container"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/watcher/tasks/config.yml b/ansible/roles/watcher/tasks/config.yml
index 2b1688a09c..94a8e80d63 100644
--- a/ansible/roles/watcher/tasks/config.yml
+++ b/ansible/roles/watcher/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ watcher_services }}"
+ with_dict: "{{ watcher_services | select_services_enabled_and_mapped_to_host }}"
- name: Check if policies shall be overwritten
stat:
@@ -41,12 +38,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ watcher_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ watcher_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over watcher.conf
vars:
@@ -61,12 +53,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/watcher.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ watcher_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ watcher_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over existing policy file
template:
@@ -76,8 +63,4 @@
become: true
when:
- watcher_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ watcher_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ watcher_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/watcher/tasks/config_validate.yml b/ansible/roles/watcher/tasks/config_validate.yml
new file mode 100644
index 0000000000..1e74980f5a
--- /dev/null
+++ b/ansible/roles/watcher/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ watcher_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ watcher_config_validation }}"
diff --git a/ansible/roles/watcher/tasks/precheck.yml b/ansible/roles/watcher/tasks/precheck.yml
index 75bd1694db..3828d011ef 100644
--- a/ansible/roles/watcher/tasks/precheck.yml
+++ b/ansible/roles/watcher/tasks/precheck.yml
@@ -8,8 +8,11 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- watcher_api
+ check_mode: false
register: container_facts
- name: Checking free port for watcher API
diff --git a/ansible/roles/watcher/templates/watcher-api.json.j2 b/ansible/roles/watcher/templates/watcher-api.json.j2
index 2ff6ac1427..c80bb842e9 100644
--- a/ansible/roles/watcher/templates/watcher-api.json.j2
+++ b/ansible/roles/watcher/templates/watcher-api.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/watcher/{{ watcher_policy_file }}",
"owner": "watcher",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/watcher/templates/watcher-applier.json.j2 b/ansible/roles/watcher/templates/watcher-applier.json.j2
index e8d6ac38a0..2c5fc72686 100644
--- a/ansible/roles/watcher/templates/watcher-applier.json.j2
+++ b/ansible/roles/watcher/templates/watcher-applier.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/watcher/{{ watcher_policy_file }}",
"owner": "watcher",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/watcher/templates/watcher-engine.json.j2 b/ansible/roles/watcher/templates/watcher-engine.json.j2
index 080e88f08a..ee8e20e008 100644
--- a/ansible/roles/watcher/templates/watcher-engine.json.j2
+++ b/ansible/roles/watcher/templates/watcher-engine.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/watcher/{{ watcher_policy_file }}",
"owner": "watcher",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/watcher/templates/watcher.conf.j2 b/ansible/roles/watcher/templates/watcher.conf.j2
index c1c6213b9b..f087b6673d 100644
--- a/ansible/roles/watcher/templates/watcher.conf.j2
+++ b/ansible/roles/watcher/templates/watcher.conf.j2
@@ -32,7 +32,7 @@ service_token_roles_required = True
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@@ -59,11 +59,18 @@ topics = {{ watcher_enabled_notification_topics | map(attribute='name') | join('
driver = noop
{% endif %}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = false
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
{% if watcher_policy_file is defined %}
[oslo_policy]
diff --git a/ansible/roles/zookeeper/defaults/main.yml b/ansible/roles/zookeeper/defaults/main.yml
deleted file mode 100644
index 24e6ec0f0a..0000000000
--- a/ansible/roles/zookeeper/defaults/main.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-zookeeper_services:
- zookeeper:
- container_name: zookeeper
- group: zookeeper
- enabled: true
- image: "{{ zookeeper_image_full }}"
- environment:
- ZOO_LOG_DIR: /var/log/kolla/zookeeper
- ZOO_LOG4J_PROP: "{{ zookeeper_log_settings }}"
- volumes: "{{ zookeeper_default_volumes + zookeeper_extra_volumes }}"
- dimensions: "{{ zookeeper_dimensions }}"
- healthcheck: "{{ zookeeper_healthcheck }}"
-
-
-####################
-# Zookeeper
-####################
-zookeeper_log_settings: 'INFO,ROLLINGFILE'
-
-####################
-# Docker
-####################
-zookeeper_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/zookeeper"
-zookeeper_tag: "{{ openstack_tag }}"
-zookeeper_image_full: "{{ zookeeper_image }}:{{ zookeeper_tag }}"
-zookeeper_dimensions: "{{ default_container_dimensions }}"
-
-zookeeper_enable_healthchecks: "{{ enable_container_healthchecks }}"
-zookeeper_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-zookeeper_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-zookeeper_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-zookeeper_healthcheck_test: ["CMD-SHELL", "healthcheck_listen java {{ zookeeper_client_port }}"]
-zookeeper_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-zookeeper_healthcheck:
- interval: "{{ zookeeper_healthcheck_interval }}"
- retries: "{{ zookeeper_healthcheck_retries }}"
- start_period: "{{ zookeeper_healthcheck_start_period }}"
- test: "{% if zookeeper_enable_healthchecks | bool %}{{ zookeeper_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ zookeeper_healthcheck_timeout }}"
-
-zookeeper_default_volumes:
- - "{{ node_config_directory }}/zookeeper/:{{ container_config_directory }}/"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "zookeeper:/var/lib/zookeeper/data"
- - "kolla_logs:/var/log/kolla/"
-
-zookeeper_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/zookeeper/handlers/main.yml b/ansible/roles/zookeeper/handlers/main.yml
deleted file mode 100644
index 18d063d7ae..0000000000
--- a/ansible/roles/zookeeper/handlers/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Restart zookeeper container
- vars:
- service_name: "zookeeper"
- service: "{{ zookeeper_services[service_name] }}"
- become: true
- kolla_docker:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- environment: "{{ service.environment }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
- when:
- - kolla_action != "config"
diff --git a/ansible/roles/zookeeper/tasks/check-containers.yml b/ansible/roles/zookeeper/tasks/check-containers.yml
deleted file mode 100644
index 3f86d932ba..0000000000
--- a/ansible/roles/zookeeper/tasks/check-containers.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Check zookeeper containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- volumes: "{{ item.value.volumes }}"
- environment: "{{ item.value.environment }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ zookeeper_services }}"
- notify:
- - "Restart {{ item.key }} container"
diff --git a/ansible/roles/zookeeper/tasks/config.yml b/ansible/roles/zookeeper/tasks/config.yml
deleted file mode 100644
index f289882432..0000000000
--- a/ansible/roles/zookeeper/tasks/config.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ zookeeper_services }}"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ zookeeper_services }}"
- notify:
- - Restart zookeeper container
-
-- name: Copying over zookeeper configuration
- merge_configs:
- sources:
- - "{{ role_path }}/templates/{{ item.key }}.cfg.j2"
- - "{{ node_custom_config }}/{{ item.key }}.cfg"
- - "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/{{ item.key }}.cfg"
- whitespace: False
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ item.key }}.cfg"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ zookeeper_services }}"
- notify:
- - Restart zookeeper container
-
-- name: Copying over zookeeper instance id
- template:
- src: "myid.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/myid"
- mode: "0660"
- become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ zookeeper_services }}"
- notify:
- - Restart zookeeper container
diff --git a/ansible/roles/zookeeper/tasks/deploy-containers.yml b/ansible/roles/zookeeper/tasks/deploy-containers.yml
deleted file mode 100644
index eb24ab5c7a..0000000000
--- a/ansible/roles/zookeeper/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: check-containers.yml
diff --git a/ansible/roles/zookeeper/tasks/deploy.yml b/ansible/roles/zookeeper/tasks/deploy.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/zookeeper/tasks/deploy.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/zookeeper/tasks/main.yml b/ansible/roles/zookeeper/tasks/main.yml
deleted file mode 100644
index bc5d1e6257..0000000000
--- a/ansible/roles/zookeeper/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/zookeeper/tasks/precheck.yml b/ansible/roles/zookeeper/tasks/precheck.yml
deleted file mode 100644
index 0547a6ba7f..0000000000
--- a/ansible/roles/zookeeper/tasks/precheck.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ zookeeper_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- name:
- - zookeeper
- register: container_facts
-
-- name: Checking zookeeper ports are available
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ item }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- with_items:
- - "{{ zookeeper_client_port }}"
- - "{{ zookeeper_peer_port }}"
- - "{{ zookeeper_quorum_port }}"
- when:
- - container_facts['zookeeper'] is not defined
- - inventory_hostname in groups['zookeeper']
diff --git a/ansible/roles/zookeeper/tasks/pull.yml b/ansible/roles/zookeeper/tasks/pull.yml
deleted file mode 100644
index 53f9c5fda1..0000000000
--- a/ansible/roles/zookeeper/tasks/pull.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- import_role:
- role: service-images-pull
diff --git a/ansible/roles/zookeeper/tasks/reconfigure.yml b/ansible/roles/zookeeper/tasks/reconfigure.yml
deleted file mode 100644
index 5b10a7e111..0000000000
--- a/ansible/roles/zookeeper/tasks/reconfigure.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- import_tasks: deploy.yml
diff --git a/ansible/roles/zookeeper/tasks/stop.yml b/ansible/roles/zookeeper/tasks/stop.yml
deleted file mode 100644
index 1f2a10437e..0000000000
--- a/ansible/roles/zookeeper/tasks/stop.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- import_role:
- name: service-stop
- vars:
- project_services: "{{ zookeeper_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/zookeeper/tasks/upgrade.yml b/ansible/roles/zookeeper/tasks/upgrade.yml
deleted file mode 100644
index 49edff81e3..0000000000
--- a/ansible/roles/zookeeper/tasks/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- import_tasks: config.yml
-
-- import_tasks: check-containers.yml
-
-- name: Flush handlers
- meta: flush_handlers
diff --git a/ansible/roles/zookeeper/templates/myid.j2 b/ansible/roles/zookeeper/templates/myid.j2
deleted file mode 100644
index 7013873cce..0000000000
--- a/ansible/roles/zookeeper/templates/myid.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-{% for host in groups['zookeeper'] -%}
-{% if hostvars[host].ansible_facts.hostname == ansible_facts.hostname -%}
-{{ loop.index }}
-{%- endif %}
-{%- endfor %}
diff --git a/ansible/roles/zookeeper/templates/zookeeper.cfg.j2 b/ansible/roles/zookeeper/templates/zookeeper.cfg.j2
deleted file mode 100644
index c0a8a52e50..0000000000
--- a/ansible/roles/zookeeper/templates/zookeeper.cfg.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-tickTime=2000
-initLimit=10
-syncLimit=5
-dataDir=/var/lib/zookeeper/data
-clientPort={{ zookeeper_client_port }}
-{% for host in groups['zookeeper'] %}
-server.{{ loop.index }}={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ zookeeper_peer_port }}:{{ zookeeper_quorum_port }}
-{% endfor %}
diff --git a/ansible/roles/zookeeper/templates/zookeeper.json.j2 b/ansible/roles/zookeeper/templates/zookeeper.json.j2
deleted file mode 100644
index 9d9d609901..0000000000
--- a/ansible/roles/zookeeper/templates/zookeeper.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "/opt/zookeeper/bin/zkServer.sh start-foreground /etc/zookeeper/conf/zoo.cfg",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/myid",
- "dest": "/var/lib/zookeeper/data/myid",
- "owner": "zookeeper",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/zookeeper.cfg",
- "dest": "/etc/zookeeper/conf/zoo.cfg",
- "owner": "zookeeper",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/zookeeper",
- "owner": "zookeeper:zookeeper",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/zookeeper",
- "owner": "zookeeper:zookeeper",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/zookeeper/vars/main.yml b/ansible/roles/zookeeper/vars/main.yml
deleted file mode 100644
index e0da94acf3..0000000000
--- a/ansible/roles/zookeeper/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "zookeeper"
diff --git a/ansible/roles/zun/defaults/main.yml b/ansible/roles/zun/defaults/main.yml
index 8a2533d007..009368cb1f 100644
--- a/ansible/roles/zun/defaults/main.yml
+++ b/ansible/roles/zun/defaults/main.yml
@@ -14,11 +14,14 @@ zun_services:
mode: "http"
external: false
port: "{{ zun_api_port }}"
+ listen_port: "{{ zun_api_listen_port }}"
zun_api_external:
enabled: "{{ enable_zun }}"
mode: "http"
external: true
- port: "{{ zun_api_port }}"
+ external_fqdn: "{{ zun_external_fqdn }}"
+ port: "{{ zun_api_public_port }}"
+ listen_port: "{{ zun_api_listen_port }}"
zun-wsproxy:
container_name: zun_wsproxy
group: zun-wsproxy
@@ -44,7 +47,7 @@ zun_services:
enabled: true
image: "{{ zun_compute_image_full }}"
privileged: True
- volumes: "{{ zun_compute_default_volumes + zun_compute_extra_volumes }}"
+ volumes: "{{ zun_compute_default_volumes + zun_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ zun_compute_dimensions }}"
healthcheck: "{{ zun_compute_healthcheck }}"
zun-cni-daemon:
@@ -53,10 +56,17 @@ zun_services:
enabled: true
image: "{{ zun_cni_daemon_image_full }}"
privileged: True
- volumes: "{{ zun_cni_daemon_default_volumes + zun_cni_daemon_extra_volumes }}"
+ volumes: "{{ zun_cni_daemon_default_volumes + zun_cni_daemon_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
dimensions: "{{ zun_cni_daemon_dimensions }}"
healthcheck: "{{ zun_cni_daemon_healthcheck }}"
+####################
+# Config Validate
+####################
+zun_config_validation:
+ - generator: "/zun/etc/zun/zun-config-generator.conf"
+ config: "/etc/zun/zun.conf"
+
####################
## Database
####################
@@ -83,19 +93,19 @@ zun_database_shard:
####################
zun_tag: "{{ openstack_tag }}"
-zun_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/zun-api"
+zun_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}zun-api"
zun_api_tag: "{{ zun_tag }}"
zun_api_image_full: "{{ zun_api_image }}:{{ zun_api_tag }}"
-zun_wsproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/zun-wsproxy"
+zun_wsproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}zun-wsproxy"
zun_wsproxy_tag: "{{ zun_tag }}"
zun_wsproxy_image_full: "{{ zun_wsproxy_image }}:{{ zun_wsproxy_tag }}"
-zun_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/zun-compute"
+zun_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}zun-compute"
zun_compute_tag: "{{ zun_tag }}"
zun_compute_image_full: "{{ zun_compute_image }}:{{ zun_compute_tag }}"
-zun_cni_daemon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/zun-cni-daemon"
+zun_cni_daemon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}zun-cni-daemon"
zun_cni_daemon_tag: "{{ zun_tag }}"
zun_cni_daemon_image_full: "{{ zun_cni_daemon_image }}:{{ zun_cni_daemon_tag }}"
@@ -161,21 +171,21 @@ zun_api_default_volumes:
- "{{ node_config_directory }}/zun-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/zun/zun:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/zun' if zun_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
zun_wsproxy_default_volumes:
- "{{ node_config_directory }}/zun-wsproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/zun/zun:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/zun' if zun_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
- "kolla_logs:/var/log/kolla/"
zun_compute_default_volumes:
- "{{ node_config_directory }}/zun-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/zun/zun:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/zun' if zun_dev_mode | bool else '' }}"
- - "/run:/run:shared"
+ - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- "/usr/lib/docker:/usr/lib/docker"
- "/var/lib/docker:/var/lib/docker"
- "/lib/modules:/lib/modules:ro"
@@ -187,7 +197,8 @@ zun_cni_daemon_default_volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/zun/zun:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/zun' if zun_dev_mode | bool else '' }}"
+ - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
+ - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
zun_extra_volumes: "{{ default_extra_volumes }}"
zun_api_extra_volumes: "{{ zun_extra_volumes }}"
@@ -198,8 +209,8 @@ zun_cni_daemon_extra_volumes: "{{ zun_extra_volumes }}"
####################
## OpenStack
####################
-zun_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/v1/"
-zun_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/v1/"
+zun_internal_endpoint: "{{ zun_internal_base_endpoint }}/v1/"
+zun_public_endpoint: "{{ zun_public_base_endpoint }}/v1/"
zun_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/zun/handlers/main.yml b/ansible/roles/zun/handlers/main.yml
index e1f31f9093..0d21f45753 100644
--- a/ansible/roles/zun/handlers/main.yml
+++ b/ansible/roles/zun/handlers/main.yml
@@ -4,7 +4,7 @@
service_name: "zun-api"
service: "{{ zun_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -13,17 +13,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- listen:
- - zun-api container changed
- when:
- - kolla_action != "config"
- name: Restart zun-wsproxy container
vars:
service_name: "zun-wsproxy"
service: "{{ zun_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -32,17 +28,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- listen:
- - zun-wsproxy container changed
- when:
- - kolla_action != "config"
- name: Restart zun-compute container
vars:
service_name: "zun-compute"
service: "{{ zun_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -51,17 +43,13 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- listen:
- - zun-compute container changed
- when:
- - kolla_action != "config"
- name: Restart zun-cni-daemon container
vars:
service_name: "zun-cni-daemon"
service: "{{ zun_services[service_name] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
@@ -70,10 +58,6 @@
volumes: "{{ service.volumes | reject('equalto', '') | list }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
- listen:
- - zun-cni-daemon container changed
- when:
- - kolla_action != "config"
- name: Copy loopback binary from zun-cni-daemon container to host
vars:
@@ -82,7 +66,6 @@
become: true
command: "{{ kolla_container_engine }} cp {{ service.container_name }}:/opt/loopback /opt/cni/bin/"
# NOTE(yoctozepto): it would be cleaner to listen only on image change
- # but there is no such mechanism (yet) and container change should be
- # good enough (better than including config change triggers)
+ # but there is no such mechanism (yet)
listen:
- - zun-cni-daemon container changed
+ - Restart zun-cni-daemon container
diff --git a/ansible/roles/zun/tasks/bootstrap.yml b/ansible/roles/zun/tasks/bootstrap.yml
index 6b45af6eaf..fd8a9f9a34 100644
--- a/ansible/roles/zun/tasks/bootstrap.yml
+++ b/ansible/roles/zun/tasks/bootstrap.yml
@@ -2,6 +2,7 @@
- name: Creating Zun database
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
@@ -17,6 +18,7 @@
- name: Creating Zun database user and setting permissions
become: true
kolla_toolbox:
+ container_engine: "{{ kolla_container_engine }}"
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
diff --git a/ansible/roles/zun/tasks/bootstrap_service.yml b/ansible/roles/zun/tasks/bootstrap_service.yml
index cc694f57df..d618b07c23 100644
--- a/ansible/roles/zun/tasks/bootstrap_service.yml
+++ b/ansible/roles/zun/tasks/bootstrap_service.yml
@@ -3,7 +3,7 @@
vars:
zun_api: "{{ zun_services['zun-api'] }}"
become: true
- kolla_docker:
+ kolla_container:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
@@ -14,7 +14,7 @@
labels:
BOOTSTRAP:
name: "bootstrap_zun"
- restart_policy: no
+ restart_policy: oneshot
volumes: "{{ zun_api.volumes | reject('equalto', '') | list }}"
run_once: True
delegate_to: "{{ groups[zun_api.group][0] }}"
diff --git a/ansible/roles/zun/tasks/check-containers.yml b/ansible/roles/zun/tasks/check-containers.yml
index b6576eb206..b7e2f7c29f 100644
--- a/ansible/roles/zun/tasks/check-containers.yml
+++ b/ansible/roles/zun/tasks/check-containers.yml
@@ -1,21 +1,3 @@
---
-- name: Check zun containers
- become: true
- kolla_docker:
- action: "compare_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ item.value.container_name }}"
- image: "{{ item.value.image }}"
- privileged: "{{ item.value.privileged | default(False) }}"
- volumes: "{{ item.value.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ item.value.dimensions }}"
- healthcheck: "{{ item.value.healthcheck | default(omit) }}"
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ zun_services }}"
- notify:
- # NOTE(yoctozepto): Zun differs from other projects because we want
- # to differentiate between config change and container property
- # change
- - "{{ item.key }} container changed"
+- import_role:
+ name: service-check-containers
diff --git a/ansible/roles/zun/tasks/config.yml b/ansible/roles/zun/tasks/config.yml
index 5b10fb0677..502deb50fb 100644
--- a/ansible/roles/zun/tasks/config.yml
+++ b/ansible/roles/zun/tasks/config.yml
@@ -7,10 +7,7 @@
group: "{{ config_owner_group }}"
mode: "0770"
become: true
- when:
- - inventory_hostname in groups[item.value.group]
- - item.value.enabled | bool
- with_dict: "{{ zun_services }}"
+ with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
- include_tasks: external_ceph.yml
when:
@@ -46,12 +43,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ zun_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over zun.conf
vars:
@@ -66,12 +58,7 @@
dest: "{{ node_config_directory }}/{{ item.key }}/zun.conf"
mode: "0660"
become: true
- when:
- - item.value.enabled | bool
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ zun_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
- name: Copying over wsgi-zun files for services
vars:
@@ -81,11 +68,7 @@
dest: "{{ node_config_directory }}/zun-api/wsgi-zun.conf"
mode: "0660"
become: true
- when:
- - inventory_hostname in groups[service.group]
- - service.enabled | bool
- notify:
- - Restart zun-api container
+ when: service | service_enabled_and_mapped_to_host
- name: Copying over existing policy file
template:
@@ -95,7 +78,4 @@
become: true
when:
- zun_policy_file is defined
- - inventory_hostname in groups[item.value.group]
- with_dict: "{{ zun_services }}"
- notify:
- - Restart {{ item.key }} container
+ with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/zun/tasks/config_validate.yml b/ansible/roles/zun/tasks/config_validate.yml
new file mode 100644
index 0000000000..9803248961
--- /dev/null
+++ b/ansible/roles/zun/tasks/config_validate.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-config-validate
+ vars:
+ service_config_validate_services: "{{ zun_services }}"
+ service_name: "{{ project_name }}"
+ service_config_validation: "{{ zun_config_validation }}"
diff --git a/ansible/roles/zun/tasks/external_ceph.yml b/ansible/roles/zun/tasks/external_ceph.yml
index 325059eaa7..e57410b6bb 100644
--- a/ansible/roles/zun/tasks/external_ceph.yml
+++ b/ansible/roles/zun/tasks/external_ceph.yml
@@ -1,22 +1,18 @@
---
- name: Copying over ceph.conf for Zun
copy:
- src: "{{ node_custom_config }}/zun/zun-compute/ceph.conf"
+ src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cluster }}.conf"
dest: "{{ node_config_directory }}/zun-compute/"
mode: "0660"
become: true
- notify:
- - Restart zun-compute container
- name: Copy over Ceph keyring files for zun-compute
copy:
- src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cinder_keyring }}"
+ src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring"
dest: "{{ node_config_directory }}/zun-compute/"
mode: "0660"
become: true
when: external_ceph_cephx_enabled | bool
- notify:
- - Restart zun-compute container
- name: Ensuring config directory has correct owner and permission
become: true
diff --git a/ansible/roles/zun/tasks/precheck.yml b/ansible/roles/zun/tasks/precheck.yml
index 38585515b4..244f483b60 100644
--- a/ansible/roles/zun/tasks/precheck.yml
+++ b/ansible/roles/zun/tasks/precheck.yml
@@ -8,10 +8,13 @@
- name: Get container facts
become: true
kolla_container_facts:
+ action: get_containers
+ container_engine: "{{ kolla_container_engine }}"
name:
- zun_api
- zun_wsproxy
- zun_cni_daemon
+ check_mode: false
register: container_facts
- name: Checking free port for Zun API
@@ -48,10 +51,9 @@
- inventory_hostname in groups['zun-cni-daemon']
- name: Ensure kuryr enabled for zun
- fail:
- msg: "kuryr is required but not enabled"
+ assert:
+ that: enable_kuryr | bool
+ fail_msg: "kuryr is required but not enabled"
run_once: True
- changed_when: false
when:
- enable_zun | bool
- - not enable_kuryr | bool
diff --git a/ansible/roles/zun/templates/zun-api.json.j2 b/ansible/roles/zun/templates/zun-api.json.j2
index 8b161c2565..18ed961724 100644
--- a/ansible/roles/zun/templates/zun-api.json.j2
+++ b/ansible/roles/zun/templates/zun-api.json.j2
@@ -20,6 +20,12 @@
"dest": "/etc/zun/{{ zun_policy_file }}",
"owner": "zun",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/zun/templates/zun-cni-daemon.json.j2 b/ansible/roles/zun/templates/zun-cni-daemon.json.j2
index 504fc48f90..3e165e06ba 100644
--- a/ansible/roles/zun/templates/zun-cni-daemon.json.j2
+++ b/ansible/roles/zun/templates/zun-cni-daemon.json.j2
@@ -6,7 +6,13 @@
"dest": "/etc/zun/zun.conf",
"owner": "zun",
"perm": "0600"
- }
+ }{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
+ }{% endif %}
],
"permissions": [
{
diff --git a/ansible/roles/zun/templates/zun-compute.json.j2 b/ansible/roles/zun/templates/zun-compute.json.j2
index 36d6527dce..c9f9a7ccdf 100644
--- a/ansible/roles/zun/templates/zun-compute.json.j2
+++ b/ansible/roles/zun/templates/zun-compute.json.j2
@@ -8,15 +8,15 @@
"perm": "0600"
},
{
- "source": "{{ container_config_directory }}/{{ ceph_cinder_keyring }}",
- "dest": "/etc/ceph/{{ ceph_cinder_keyring }}",
+ "source": "{{ container_config_directory }}/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring",
+ "dest": "/etc/ceph/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring",
"owner": "zun",
"perm": "0600",
"optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}
},
{
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
+ "source": "{{ container_config_directory }}/{{ ceph_cluster }}.conf",
+ "dest": "/etc/ceph/{{ ceph_cluster }}.conf",
"owner": "zun",
"perm": "0600",
"optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}
@@ -26,6 +26,12 @@
"dest": "/etc/zun/{{ zun_policy_file }}",
"owner": "zun",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/zun/templates/zun-wsproxy.json.j2 b/ansible/roles/zun/templates/zun-wsproxy.json.j2
index 90defc385e..89dd398b60 100644
--- a/ansible/roles/zun/templates/zun-wsproxy.json.j2
+++ b/ansible/roles/zun/templates/zun-wsproxy.json.j2
@@ -12,6 +12,12 @@
"dest": "/etc/zun/{{ zun_policy_file }}",
"owner": "zun",
"perm": "0600"
+ }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
+ {
+ "source": "{{ container_config_directory }}/ca-certificates",
+ "dest": "/var/lib/kolla/share/ca-certificates",
+ "owner": "root",
+ "perm": "0600"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/zun/templates/zun.conf.j2 b/ansible/roles/zun/templates/zun.conf.j2
index 55b6613d63..03979ac148 100644
--- a/ansible/roles/zun/templates/zun.conf.j2
+++ b/ansible/roles/zun/templates/zun.conf.j2
@@ -13,6 +13,8 @@ state_path = /var/lib/zun
container_driver = docker
capsule_driver = cri
+sandbox_image = k8s.gcr.io/pause:3.6
+
[network]
driver = kuryr
@@ -39,7 +41,7 @@ region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}
{% if enable_memcached | bool %}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
{% endif %}
@@ -62,7 +64,7 @@ region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}
{% if enable_memcached | bool %}
-memcache_security_strategy = ENCRYPT
+memcache_security_strategy = {{ memcache_security_strategy }}
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
{% endif %}
@@ -114,7 +116,7 @@ host_shared_with_nova = {{ inventory_hostname in groups['compute'] and enable_no
[websocket_proxy]
wsproxy_host = {{ api_interface_address }}
wsproxy_port = {{ zun_wsproxy_port }}
-base_url = {{ zun_wsproxy_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ zun_wsproxy_port }}
+base_url = {{ zun_wsproxy_protocol }}://{{ zun_external_fqdn | put_address_in_context('url') }}:{{ zun_wsproxy_port }}
[docker]
api_url = tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
@@ -124,8 +126,15 @@ docker_remote_api_port = 2375
[cni_daemon]
cni_daemon_port = {{ zun_cni_daemon_port }}
-{% if om_enable_rabbitmq_tls | bool %}
[oslo_messaging_rabbit]
+heartbeat_in_pthread = {{ service_name == 'zun-api' }}
+{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
+{% if om_enable_rabbitmq_high_availability | bool %}
+amqp_durable_queues = true
+{% endif %}
+{% if om_enable_rabbitmq_quorum_queues | bool %}
+rabbit_quorum_queue = true
+{% endif %}
diff --git a/ansible/site.yml b/ansible/site.yml
index 6fbbbe4db9..e4e7ce2337 100644
--- a/ansible/site.yml
+++ b/ansible/site.yml
@@ -7,6 +7,10 @@
- name: Group hosts based on configuration
hosts: all
gather_facts: false
+ max_fail_percentage: >-
+ {{ group_hosts_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tasks:
- name: Group hosts based on Kolla action
group_by:
@@ -28,9 +32,7 @@
- enable_collectd_{{ enable_collectd | bool }}
- enable_cyborg_{{ enable_cyborg | bool }}
- enable_designate_{{ enable_designate | bool }}
- - enable_elasticsearch_{{ enable_elasticsearch | bool }}
- enable_etcd_{{ enable_etcd | bool }}
- - enable_freezer_{{ enable_freezer | bool }}
- enable_glance_{{ enable_glance | bool }}
- enable_gnocchi_{{ enable_gnocchi | bool }}
- enable_grafana_{{ enable_grafana | bool }}
@@ -40,10 +42,9 @@
- enable_influxdb_{{ enable_influxdb | bool }}
- enable_ironic_{{ enable_ironic | bool }}
- enable_iscsid_{{ enable_iscsid | bool }}
- - enable_kafka_{{ enable_kafka | bool }}
- enable_keystone_{{ enable_keystone | bool }}
- - enable_kibana_{{ enable_kibana | bool }}
- enable_kuryr_{{ enable_kuryr | bool }}
+ - enable_letsencrypt_{{ enable_letsencrypt | bool }}
- enable_loadbalancer_{{ enable_loadbalancer | bool }}
- enable_magnum_{{ enable_magnum | bool }}
- enable_manila_{{ enable_manila | bool }}
@@ -51,32 +52,25 @@
- enable_masakari_{{ enable_masakari | bool }}
- enable_memcached_{{ enable_memcached | bool }}
- enable_mistral_{{ enable_mistral | bool }}
- - enable_monasca_{{ enable_monasca | bool }}
- enable_multipathd_{{ enable_multipathd | bool }}
- - enable_murano_{{ enable_murano | bool }}
- enable_neutron_{{ enable_neutron | bool }}
- enable_nova_{{ enable_nova | bool }}
- enable_octavia_{{ enable_octavia | bool }}
+ - enable_opensearch_{{ enable_opensearch | bool }}
+ - enable_opensearch_dashboards_{{ enable_opensearch_dashboards | bool }}
- enable_openvswitch_{{ enable_openvswitch | bool }}_enable_ovs_dpdk_{{ enable_ovs_dpdk | bool }}
- - enable_outward_rabbitmq_{{ enable_outward_rabbitmq | bool }}
- enable_ovn_{{ enable_ovn | bool }}
- enable_placement_{{ enable_placement | bool }}
- enable_prometheus_{{ enable_prometheus | bool }}
- enable_rabbitmq_{{ enable_rabbitmq | bool }}
- enable_redis_{{ enable_redis | bool }}
- - enable_sahara_{{ enable_sahara | bool }}
- - enable_senlin_{{ enable_senlin | bool }}
- - enable_skydive_{{ enable_skydive | bool }}
- - enable_solum_{{ enable_solum | bool }}
- - enable_storm_{{ enable_storm | bool }}
+ - enable_skyline_{{ enable_skyline | bool }}
- enable_swift_{{ enable_swift | bool }}
- enable_tacker_{{ enable_tacker | bool }}
- enable_telegraf_{{ enable_telegraf | bool }}
- enable_trove_{{ enable_trove | bool }}
- enable_venus_{{ enable_venus | bool }}
- - enable_vitrage_{{ enable_vitrage | bool }}
- enable_watcher_{{ enable_watcher | bool }}
- - enable_zookeeper_{{ enable_zookeeper | bool }}
- enable_zun_{{ enable_zun | bool }}
tags: always
@@ -84,6 +78,10 @@
gather_facts: false
# Apply only when kolla action is 'precheck'.
hosts: kolla_action_precheck
+ max_fail_percentage: >-
+ {{ prechecks_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- role: prechecks
@@ -95,6 +93,10 @@
- kolla-logs
- kolla-toolbox
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ common_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tags:
- common
roles:
@@ -106,6 +108,10 @@
- loadbalancer
- '&enable_loadbalancer_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ loadbalancer_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
tags:
- haproxy
- keepalived
@@ -155,15 +161,10 @@
tags: designate
when: enable_designate | bool
- include_role:
- name: elasticsearch
+ name: etcd
tasks_from: loadbalancer
- tags: elasticsearch
- when: enable_elasticsearch | bool
- - include_role:
- name: freezer
- tasks_from: loadbalancer
- tags: freezer
- when: enable_freezer | bool
+ tags: etcd
+ when: enable_etcd | bool
- include_role:
name: glance
tasks_from: loadbalancer
@@ -205,10 +206,10 @@
tags: keystone
when: enable_keystone | bool
- include_role:
- name: kibana
+ name: letsencrypt
tasks_from: loadbalancer
- tags: kibana
- when: enable_kibana | bool
+ tags: letsencrypt
+ when: enable_letsencrypt | bool
- include_role:
name: magnum
tasks_from: loadbalancer
@@ -239,16 +240,6 @@
tasks_from: loadbalancer
tags: mistral
when: enable_mistral | bool
- - include_role:
- name: monasca
- tasks_from: loadbalancer
- tags: monasca
- when: enable_monasca | bool
- - include_role:
- name: murano
- tasks_from: loadbalancer
- tags: murano
- when: enable_murano | bool
- include_role:
name: neutron
tasks_from: loadbalancer
@@ -277,6 +268,11 @@
tasks_from: loadbalancer
tags: octavia
when: enable_octavia | bool
+ - include_role:
+ name: opensearch
+ tasks_from: loadbalancer
+ tags: opensearch
+ when: enable_opensearch | bool
- include_role:
name: prometheus
tasks_from: loadbalancer
@@ -289,27 +285,12 @@
vars:
role_rabbitmq_cluster_cookie:
role_rabbitmq_groups:
- when: enable_rabbitmq | bool or enable_outward_rabbitmq | bool
- - include_role:
- name: sahara
- tasks_from: loadbalancer
- tags: sahara
- when: enable_sahara | bool
- - include_role:
- name: senlin
- tasks_from: loadbalancer
- tags: senlin
- when: enable_senlin | bool
- - include_role:
- name: skydive
- tasks_from: loadbalancer
- tags: skydive
- when: enable_skydive | bool
+ when: enable_rabbitmq | bool
- include_role:
- name: solum
+ name: skyline
tasks_from: loadbalancer
- tags: solum
- when: enable_solum | bool
+ tags: skyline
+ when: enable_skyline | bool
- include_role:
name: swift
tasks_from: loadbalancer
@@ -330,11 +311,6 @@
tasks_from: loadbalancer
tags: venus
when: enable_venus | bool
- - include_role:
- name: vitrage
- tasks_from: loadbalancer
- tags: vitrage
- when: enable_vitrage | bool
- include_role:
name: watcher
tasks_from: loadbalancer
@@ -349,25 +325,29 @@
- enable_haproxy | bool
- kolla_action in ['deploy', 'reconfigure', 'upgrade', 'config']
-- name: Apply role collectd
+- name: Apply role letsencrypt
gather_facts: false
hosts:
- - collectd
- - '&enable_collectd_True'
+ - letsencrypt
+ - '&enable_letsencrypt_True'
serial: '{{ kolla_serial|default("0") }}'
roles:
- - { role: collectd,
- tags: collectd }
+ - { role: letsencrypt,
+ tags: letsencrypt }
-- name: Apply role zookeeper
+- name: Apply role collectd
gather_facts: false
hosts:
- - zookeeper
- - '&enable_zookeeper_True'
+ - collectd
+ - '&enable_collectd_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ collectd_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- - { role: zookeeper,
- tags: zookeeper }
+ - { role: collectd,
+ tags: collectd }
- name: Apply role influxdb
gather_facts: false
@@ -375,6 +355,10 @@
- influxdb
- '&enable_influxdb_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ influxdb_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: influxdb,
tags: influxdb }
@@ -385,6 +369,10 @@
- telegraf
- '&enable_telegraf_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ telegraf_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: telegraf,
tags: telegraf }
@@ -395,18 +383,17 @@
- redis
- '&enable_redis_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ redis_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: redis,
tags: redis }
-- name: Apply role mariadb
- gather_facts: false
- hosts:
- - mariadb
- - '&enable_mariadb_True'
- roles:
- - { role: mariadb,
- tags: mariadb }
+# MariaDB deployment is more complicated than other services, so is covered in
+# its own playbook.
+- import_playbook: mariadb.yml
- name: Apply role memcached
gather_facts: false
@@ -414,6 +401,10 @@
- memcached
- '&enable_memcached_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ memcached_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: memcached,
tags: [memcache, memcached] }
@@ -424,7 +415,6 @@
- prometheus
- prometheus-node-exporter
- prometheus-mysqld-exporter
- - prometheus-haproxy-exporter
- prometheus-memcached-exporter
- prometheus-cadvisor
- prometheus-alertmanager
@@ -434,6 +424,10 @@
- prometheus-libvirt-exporter
- '&enable_prometheus_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ prometheus_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: prometheus,
tags: prometheus }
@@ -445,6 +439,10 @@
- tgtd
- '&enable_iscsid_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ iscsid_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: iscsi,
tags: iscsi }
@@ -455,55 +453,25 @@
- multipathd
- '&enable_multipathd_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ multipathd_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: multipathd,
tags: multipathd }
-- name: Apply role rabbitmq
- gather_facts: false
- hosts:
- - rabbitmq
- - '&enable_rabbitmq_True'
- roles:
- - { role: rabbitmq,
- tags: rabbitmq,
- role_rabbitmq_cluster_cookie: '{{ rabbitmq_cluster_cookie }}',
- role_rabbitmq_cluster_port: '{{ rabbitmq_cluster_port }}',
- role_rabbitmq_epmd_port: '{{ rabbitmq_epmd_port }}',
- role_rabbitmq_groups: rabbitmq,
- role_rabbitmq_management_port: '{{ rabbitmq_management_port }}',
- role_rabbitmq_monitoring_password: '{{ rabbitmq_monitoring_password }}',
- role_rabbitmq_monitoring_user: '{{ rabbitmq_monitoring_user }}',
- role_rabbitmq_password: '{{ rabbitmq_password }}',
- role_rabbitmq_port: '{{ rabbitmq_port }}',
- role_rabbitmq_prometheus_port: '{{ rabbitmq_prometheus_port }}',
- role_rabbitmq_user: '{{ rabbitmq_user }}' }
-
-- name: Apply role rabbitmq (outward)
- gather_facts: false
- hosts:
- - outward-rabbitmq
- - '&enable_outward_rabbitmq_True'
- roles:
- - { role: rabbitmq,
- tags: rabbitmq,
- project_name: outward_rabbitmq,
- role_rabbitmq_cluster_cookie: '{{ outward_rabbitmq_cluster_cookie }}',
- role_rabbitmq_cluster_port: '{{ outward_rabbitmq_cluster_port }}',
- role_rabbitmq_epmd_port: '{{ outward_rabbitmq_epmd_port }}',
- role_rabbitmq_groups: outward-rabbitmq,
- role_rabbitmq_management_port: '{{ outward_rabbitmq_management_port }}',
- role_rabbitmq_password: '{{ outward_rabbitmq_password }}',
- role_rabbitmq_port: '{{ outward_rabbitmq_port }}',
- role_rabbitmq_prometheus_port: '{{ outward_rabbitmq_prometheus_port }}',
- role_rabbitmq_user: '{{ outward_rabbitmq_user }}' }
+- import_playbook: rabbitmq.yml
- name: Apply role etcd
gather_facts: false
hosts:
- etcd
- '&enable_etcd_True'
- serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ etcd_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: etcd,
tags: etcd }
@@ -514,50 +482,27 @@
- keystone
- '&enable_keystone_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ keystone_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: keystone,
tags: keystone }
-- name: Apply role elasticsearch
- gather_facts: false
- hosts:
- - elasticsearch
- - '&enable_elasticsearch_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: elasticsearch,
- tags: elasticsearch }
-
-- name: Apply role kibana
+- name: Apply role opensearch
gather_facts: false
hosts:
- - kibana
- - '&enable_kibana_True'
+ - opensearch
+ - '&enable_opensearch_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ opensearch_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- - { role: kibana,
- tags: kibana }
-
-- name: Apply role kafka
- gather_facts: false
- hosts:
- - kafka
- - '&enable_kafka_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: kafka,
- tags: kafka }
-
-- name: Apply role storm
- gather_facts: false
- hosts:
- - storm-worker
- - storm-nimbus
- - '&enable_storm_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: storm,
- tags: storm }
+ - { role: opensearch,
+ tags: opensearch }
- name: Apply role swift
gather_facts: false
@@ -568,6 +513,10 @@
- swift-proxy-server
- '&enable_swift_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ swift_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: swift,
tags: swift }
@@ -580,6 +529,10 @@
- kolla-toolbox
- '&enable_ceph_rgw_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ ceph_rgw_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: ceph-rgw,
tags: ceph-rgw }
@@ -590,6 +543,10 @@
- glance-api
- '&enable_glance_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ glance_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: glance,
tags: glance }
@@ -604,6 +561,10 @@
- ironic-http
- '&enable_ironic_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ ironic_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: ironic,
tags: ironic }
@@ -617,6 +578,10 @@
- cinder-volume
- '&enable_cinder_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ cinder_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: cinder,
tags: cinder }
@@ -627,20 +592,24 @@
- placement-api
- '&enable_placement_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ placement_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: placement,
tags: placement }
-# Nova deployment is more complicated than other services, so is covered in its
-# own playbook.
-- import_playbook: nova.yml
-
- name: Apply role openvswitch
gather_facts: false
hosts:
- openvswitch
- '&enable_openvswitch_True_enable_ovs_dpdk_False'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ openvswitch_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: openvswitch,
tags: openvswitch,
@@ -652,23 +621,48 @@
- openvswitch
- '&enable_openvswitch_True_enable_ovs_dpdk_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ ovs_dpdk_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: ovs-dpdk,
tags: ovs-dpdk,
when: "(enable_openvswitch | bool) and (enable_ovs_dpdk | bool)"}
-- name: Apply role ovn
+- name: Apply role ovn-controller
gather_facts: false
hosts:
- ovn-controller
+ - '&enable_ovn_True'
+ serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ ovn_controller_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ roles:
+ - { role: ovn-controller,
+ tags: [ovn, ovn-controller] }
+
+- name: Apply role ovn-db
+ gather_facts: false
+ hosts:
- ovn-nb-db
- ovn-northd
- ovn-sb-db
- '&enable_ovn_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ ovn_db_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- - { role: ovn,
- tags: ovn }
+ - { role: ovn-db,
+ tags: [ovn, ovn-db] }
+
+# Nova deployment is more complicated than other services, so is covered in its
+# own playbook.
+- import_playbook: nova.yml
- name: Apply role neutron
gather_facts: false
@@ -686,6 +680,10 @@
- manila-share
- '&enable_neutron_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ neutron_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: neutron,
tags: neutron }
@@ -696,6 +694,10 @@
- compute
- '&enable_kuryr_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ kuryr_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: kuryr,
tags: kuryr }
@@ -707,6 +709,10 @@
- hacluster-remote
- '&enable_hacluster_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ hacluster_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: hacluster,
tags: hacluster }
@@ -719,6 +725,10 @@
- heat-engine
- '&enable_heat_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ heat_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: heat,
tags: heat }
@@ -729,36 +739,14 @@
- horizon
- '&enable_horizon_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ horizon_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: horizon,
tags: horizon }
-- name: Apply role murano
- gather_facts: false
- hosts:
- - murano-api
- - murano-engine
- - '&enable_murano_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: murano,
- tags: murano }
-
-- name: Apply role solum
- gather_facts: false
- hosts:
- - solum-api
- - solum-worker
- - solum-deployer
- - solum-conductor
- - solum-application-deployment
- - solum-image-builder
- - '&enable_solum_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: solum,
- tags: solum }
-
- name: Apply role magnum
gather_facts: false
hosts:
@@ -766,6 +754,10 @@
- magnum-conductor
- '&enable_magnum_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ magnum_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: magnum,
tags: magnum }
@@ -779,21 +771,14 @@
- mistral-event-engine
- '&enable_mistral_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ mistral_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: mistral,
tags: mistral }
-- name: Apply role sahara
- gather_facts: false
- hosts:
- - sahara-api
- - sahara-engine
- - '&enable_sahara_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: sahara,
- tags: sahara }
-
- name: Apply role manila
gather_facts: false
hosts:
@@ -803,6 +788,10 @@
- manila-scheduler
- '&enable_manila_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ manila_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: manila,
tags: manila }
@@ -815,6 +804,10 @@
- gnocchi-statsd
- '&enable_gnocchi_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ gnocchi_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: gnocchi,
tags: gnocchi }
@@ -828,28 +821,14 @@
- ceilometer-ipmi
- '&enable_ceilometer_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ ceilometer_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: ceilometer,
tags: ceilometer }
-- name: Apply role monasca
- gather_facts: false
- hosts:
- - monasca-agent-collector
- - monasca-agent-forwarder
- - monasca-agent-statsd
- - monasca-api
- - monasca-log-persister
- - monasca-log-metrics
- - monasca-thresh
- - monasca-notification
- - monasca-persister
- - '&enable_monasca_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: monasca,
- tags: monasca }
-
- name: Apply role aodh
gather_facts: false
hosts:
@@ -859,6 +838,10 @@
- aodh-notifier
- '&enable_aodh_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ aodh_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: aodh,
tags: aodh }
@@ -871,6 +854,10 @@
- barbican-worker
- '&enable_barbican_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ barbican_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: barbican,
tags: barbican }
@@ -883,6 +870,10 @@
- cyborg-conductor
- '&enable_cyborg_True'
serial: '{{ serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ cyborg_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: cyborg,
tags: cyborg }
@@ -899,6 +890,10 @@
- designate-backend-bind9
- '&enable_designate_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ designate_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: designate,
tags: designate }
@@ -911,6 +906,10 @@
- trove-taskmanager
- '&enable_trove_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ trove_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: trove,
tags: trove }
@@ -923,6 +922,10 @@
- watcher-applier
- '&enable_watcher_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ watcher_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: watcher,
tags: watcher }
@@ -933,6 +936,10 @@
- grafana
- '&enable_grafana_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ grafana_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: grafana,
tags: grafana }
@@ -944,34 +951,14 @@
- cloudkitty-processor
- '&enable_cloudkitty_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ cloudkitty_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: cloudkitty,
tags: cloudkitty }
-- name: Apply role freezer
- gather_facts: false
- hosts:
- - freezer-api
- - freezer-scheduler
- - '&enable_freezer_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: freezer,
- tags: freezer }
-
-- name: Apply role senlin
- gather_facts: false
- hosts:
- - senlin-api
- - senlin-conductor
- - senlin-engine
- - senlin-health-manager
- - '&enable_senlin_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: senlin,
- tags: senlin }
-
- name: Apply role tacker
gather_facts: false
hosts:
@@ -979,6 +966,10 @@
- tacker-conductor
- '&enable_tacker_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ tacker_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: tacker,
tags: tacker }
@@ -992,6 +983,10 @@
- octavia-worker
- '&enable_octavia_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ octavia_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: octavia,
tags: octavia }
@@ -1005,35 +1000,14 @@
- zun-cni-daemon
- '&enable_zun_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ zun_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: zun,
tags: zun }
-- name: Apply role skydive
- gather_facts: false
- hosts:
- - skydive-agent
- - skydive-analyzer
- - '&enable_skydive_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: skydive,
- tags: skydive }
-
-- name: Apply role vitrage
- gather_facts: false
- hosts:
- - vitrage-api
- - vitrage-graph
- - vitrage-notifier
- - vitrage-ml
- - vitrage-persistor
- - '&enable_vitrage_True'
- serial: '{{ kolla_serial|default("0") }}'
- roles:
- - { role: vitrage,
- tags: vitrage }
-
- name: Apply role blazar
gather_facts: false
hosts:
@@ -1041,6 +1015,10 @@
- blazar-manager
- '&enable_blazar_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ blazar_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: blazar,
tags: blazar }
@@ -1054,6 +1032,10 @@
- masakari-instancemonitor
- '&enable_masakari_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ masakari_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: masakari,
tags: masakari }
@@ -1065,6 +1047,24 @@
- venus-manager
- '&enable_venus_True'
serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ venus_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
roles:
- { role: venus,
tags: venus }
+
+- name: Apply role skyline
+ gather_facts: false
+ hosts:
+ - skyline
+ - '&enable_skyline_True'
+ serial: '{{ kolla_serial|default("0") }}'
+ max_fail_percentage: >-
+ {{ skyline_max_fail_percentage |
+ default(kolla_max_fail_percentage) |
+ default(100) }}
+ roles:
+ - { role: skyline,
+ tags: skyline }
diff --git a/contrib/demos/tacker/README.rst b/contrib/demos/tacker/README.rst
deleted file mode 100644
index 22138c0009..0000000000
--- a/contrib/demos/tacker/README.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-A Kolla Demo using Tacker
-=========================
-
-By default, the deploy script will spawn 1 Nova instance on a Neutron
-network created from the tools/init-runonce script.
-
-Then run the deploy script:
-
-::
-
- $ ./deploy-tacker-demo
-
-After the demo is deployed, a cleanup script can be used to remove
-resources created by deploy script.
-
-To run the cleanup script:
-
-::
-
- $ ./cleanup-tacker
diff --git a/contrib/demos/tacker/cleanup-tacker b/contrib/demos/tacker/cleanup-tacker
deleted file mode 100644
index a21dc1bd19..0000000000
--- a/contrib/demos/tacker/cleanup-tacker
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-if [[ -f kolla-sample-vnffgd.yaml ]]; then
- echo "Deleting VNFFG"
- openstack vnf graph delete kolla-sample-vnffg
- echo "Deleting VNFFGD"
- openstack vnf graph descriptor delete kolla-sample-vnffgd
- echo "Deleting sample sfc instances"
- openstack server delete kolla_sfc_server kolla_sfc_client
-fi
-echo "Deleting sample VNF"
-openstack vnf delete kolla-sample-vnf
-while openstack vnf list | grep -q kolla-sample-vnf; do
- sleep 1
-done
-echo "Deleting sample VNFD"
-openstack vnf descriptor delete kolla-sample-vnfd
-echo "Deleting sample VIM"
-openstack vim delete kolla-sample-vim
-echo "Removing sample config"
-rm -rf ./kolla-sample-*
diff --git a/contrib/demos/tacker/deploy-tacker-demo b/contrib/demos/tacker/deploy-tacker-demo
deleted file mode 100644
index 990c3e3349..0000000000
--- a/contrib/demos/tacker/deploy-tacker-demo
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-function gen_config {
- echo "Generating sample config"
- DEMO_NET=$(openstack network list | awk '/demo-net/ { print $2 }')
- IMAGE_ID=$(openstack image list | awk '/cirros/ { print $2 }')
- cat > ./kolla-sample-vim.yaml < ./kolla-sample-vnfd.yaml < /proc/sys/net/ipv4/ip_forward
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: ${DEMO_NET}
- vendor: Tacker
-EOF
-}
-
-function deploy {
- echo "Registering sample VIM"
- openstack vim register --config-file ./kolla-sample-vim.yaml --description "kolla sample vim" --is-default kolla-sample-vim
- echo "Creating sample VNFD"
- openstack vnf descriptor create --vnfd-file ./kolla-sample-vnfd.yaml kolla-sample-vnfd
- echo "Creating sample VNF"
- VNFD_ID=$(openstack vnf descriptor list | awk '/kolla-sample-vnfd/ { print $2 }')
- openstack vnf create --vnfd-id ${VNFD_ID} kolla-sample-vnf
-}
-
-gen_config
-deploy
diff --git a/contrib/demos/tacker/deploy-tacker-demo-sfc b/contrib/demos/tacker/deploy-tacker-demo-sfc
deleted file mode 100644
index 8741d8d64c..0000000000
--- a/contrib/demos/tacker/deploy-tacker-demo-sfc
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/bash
-
-function create_servers {
- echo "Creating SFC demo instances"
- DEMO_NET=$(openstack network list | awk '/demo-net/ { print $2 }')
- IMAGE_ID=$(openstack image list | awk '/cirros/ { print $2 }')
- FLOATING_IP_CLIENT=$(openstack floating ip create public1 -c floating_ip_address -f value)
- FLOATING_IP_SERVER=$(openstack floating ip create public1 -c floating_ip_address -f value)
- openstack server create --wait --flavor m1.tiny --image $IMAGE_ID --nic net-id=$DEMO_NET kolla_sfc_server
- openstack server create --wait --flavor m1.tiny --image $IMAGE_ID --nic net-id=$DEMO_NET kolla_sfc_client
- openstack server add floating ip kolla_sfc_client $FLOATING_IP_CLIENT
- openstack server add floating ip kolla_sfc_server $FLOATING_IP_SERVER
- KOLLA_SFC_CLIENT_PORT=$(openstack port list --server kolla_sfc_client | awk '/ACTIVE/ {print $2}')
-}
-
-function sfc_gen_config {
- echo "Tacker SFC config files"
- cat > ./kolla-sample-vnffgd.yaml <CP12)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: block_http
- classifier:
- network_src_port_id: ${KOLLA_SFC_CLIENT_PORT}
- network_id: ${DEMO_NET}
- ip_proto: 6
- destination_port_range: 80-80
- path:
- - forwarder: kolla-sample-vnfd
- capability: CP11
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP11]
- constituent_vnfs: [kolla-sample-vnfd]
- members: [Forwarding_path1]
-EOF
-}
-
-function deploy_sfc {
- bash ./deploy-tacker-demo
- create_servers
- sfc_gen_config
- echo "Creating VNFFGD"
- openstack vnf graph descriptor create --vnffgd-file kolla-sample-vnffgd.yaml kolla-sample-vnffgd
- echo "Creating VNFFG"
- openstack vnf graph create --vnffgd-name kolla-sample-vnffgd kolla-sample-vnffg
- echo "Tacker sfc client floating ip address: $FLOATING_IP_CLIENT"
- echo "Tacker sfc server floating ip address: $FLOATING_IP_SERVER"
- cat << EOF
-
-Done.
-
-To create simple HTTP server in tacker_sfc_server instance run:
-
-ssh cirros@$FLOATING_IP_SERVER 'while true; \\
- do echo -e "HTTP/1.0 200 OK\r\n\r\nW00t from Kolla HTTP server!" | sudo nc -l -p 80 ; done &'
-
-EOF
-}
-
-deploy_sfc
diff --git a/doc/source/admin/etcd.rst b/doc/source/admin/etcd.rst
new file mode 100644
index 0000000000..6353c33831
--- /dev/null
+++ b/doc/source/admin/etcd.rst
@@ -0,0 +1,100 @@
+.. etcd:
+
+=============
+Managing etcd
+=============
+
+Kolla Ansible can manage the lifecycle of an etcd cluster and supports the
+following operations:
+
+* Bootstrapping a clean multi-node etcd cluster.
+* Adding a new member to the etcd cluster.
+* Optionally, automatically removing a deleted node from the etcd cluster.
+
+It is highly recommended to read the operator documentation for the version
+of etcd deployed in the cluster.
+
+.. note::
+
+ Once an etcd cluster is bootstrapped, the etcd service takes most of its
+ configuration from the etcd database itself.
+
+ This pattern is very different from many other Kolla Ansible services, and
+ is a source of confusion for operators unfamiliar with etcd.
+
+Cluster vs. Node Bootstrapping
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Kolla Ansible distinguishes between two forms of bootstrapping in an etcd
+cluster:
+
+* Bootstrapping multiple nodes at the same time to bring up a new cluster.
+* Bootstrapping a single node to add it to an existing cluster.
+
+These corresponds to the ``new`` and ``existing`` parameters for
+``ETCD_INITIAL_CLUSTER_STATE`` in the upstream documentation. Once an etcd node
+has completed bootstrap, the bootstrap configuration is ignored, even if it is
+changed.
+
+Kolla Ansible will decide to perform a new cluster bootstrap if it detects that
+there is no existing data on the etcd nodes. Otherwise it assumes that there is
+a healthy etcd cluster and it will add a new node to it.
+
+Forcing Bootstrapping
+~~~~~~~~~~~~~~~~~~~~~
+
+Kolla Ansible looks for the ``kolla_etcd`` volume on the node. If this volume
+is available, it assumes that the bootstrap process has run on the node and
+the volume contains the required config.
+
+However, if the process was interrupted (externally, or by an error), this
+volume might be misconfigured. In order to prevent data loss, manual
+intervention is required.
+
+Before retriggering bootstrap make sure that there is no valuable data on the
+volume. This could be because the node was not in service, or that the data
+is persisted elsewhere.
+
+To retrigger a bootstrap (for either the cluster, or for a single node),
+remove the volume from all affected nodes by running:
+
+.. code-block:: console
+
+ docker volume rm kolla_etcd
+
+Rerunning Kolla Ansible will then trigger the appropriate workflow and either
+a blank cluster will be bootstrapped, or an empty member will be added to
+the existing cluster.
+
+Manual Commands
+~~~~~~~~~~~~~~~
+
+In order to manage etcd manually, the ``etcdctl`` command can be used inside
+the ``etcd`` container. This command has been set up with the appropriate
+environment variables for integrating with automation.
+
+``etcdctl`` is configured with json output by default, you can override that
+if you are running it yourself:
+
+.. code-block:: console
+
+ # list cluster members in a human-readable table
+ docker exec -it etcd etcdctl -w table member list
+
+Removing Dead Nodes
+~~~~~~~~~~~~~~~~~~~
+
+If ``globals.yml`` has the value ``etcd_remove_deleted_members: "yes"`` then
+etcd nodes that are not in the inventory will be removed from the etcd cluster.
+
+Any errors in the inventory can therefore cause unintended removal.
+
+To manually remove a dead node from the etcd cluster, use the following
+commands:
+
+.. code-block:: console
+
+ # list cluster members and identify dead member
+ docker exec -it etcd etcdctl -w table member list
+ # remove dead member
+ docker exec -it etcd etcdctl member remove MEMBER_ID_IN_HEX
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 720b663c3f..eb259951c1 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -9,5 +9,7 @@ Admin Guides
tls
acme
mariadb-backup-and-restore
+ etcd
production-architecture-guide
deployment-philosophy
+ password-rotation
diff --git a/doc/source/admin/mariadb-backup-and-restore.rst b/doc/source/admin/mariadb-backup-and-restore.rst
index ac699b62b6..14b69fefd2 100644
--- a/doc/source/admin/mariadb-backup-and-restore.rst
+++ b/doc/source/admin/mariadb-backup-and-restore.rst
@@ -83,7 +83,7 @@ following options on the first database node:
docker run --rm -it --volumes-from mariadb --name dbrestore \
--volume mariadb_backup:/backup \
- quay.io/openstack.kolla/centos-source-mariadb-server:|KOLLA_OPENSTACK_RELEASE| \
+ quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \
/bin/bash
(dbrestore) $ cd /backup
(dbrestore) $ rm -rf /backup/restore
@@ -105,7 +105,7 @@ place, again on the first node:
docker run --rm -it --volumes-from mariadb --name dbrestore \
--volume mariadb_backup:/backup \
- quay.io/openstack.kolla/centos-source-mariadb-server:|KOLLA_OPENSTACK_RELEASE| \
+ quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \
/bin/bash
(dbrestore) $ rm -rf /var/lib/mysql/*
(dbrestore) $ rm -rf /var/lib/mysql/\.[^\.]*
@@ -148,7 +148,7 @@ incremental backup,
docker run --rm -it --volumes-from mariadb --name dbrestore \
--volume mariadb_backup:/backup --tmpfs /backup/restore \
- quay.io/openstack.kolla/centos-source-mariadb-server:|KOLLA_OPENSTACK_RELEASE| \
+ quay.io/openstack.kolla/mariadb-server:|KOLLA_OPENSTACK_RELEASE|-rocky-9 \
/bin/bash
(dbrestore) $ cd /backup
(dbrestore) $ rm -rf /backup/restore
diff --git a/doc/source/admin/password-rotation.rst b/doc/source/admin/password-rotation.rst
new file mode 100644
index 0000000000..0e43c6f88f
--- /dev/null
+++ b/doc/source/admin/password-rotation.rst
@@ -0,0 +1,295 @@
+=================
+Password Rotation
+=================
+
+This guide describes how to change the internal secrets from ``passwords.yml``
+used by Kolla-Ansible. It does not cover every possible ``passwords.yml``
+variable, only the most common ones.
+
+.. warning::
+
+ Always back up your ``passwords.yml`` file before making any changes.
+ Otherwise, it is easy to make unrecoverable mistakes.
+
+.. warning::
+
+ This guide relies on recent changes to Kolla and Kolla-Ansible. You may
+ encounter errors if applying this guide to older deployments. It is
+ recommended that you update your containers and kolla-ansible to the latest
+ available versions before proceeding.
+
+Regenerating secrets
+--------------------
+
+Passwords can be quickly re-generated using ``kolla-genpwd``.
+
+Assuming an existing ``/etc/kolla/passwords.yml`` file, make a backup:
+
+.. code-block:: bash
+
+ cp /etc/kolla/passwords.yml ./passwords.yml.bak
+
+Edit the ``passwords.yml`` file to remove the password strings for any secrets
+that need to be regenerated i.e. change ``foo: "bar"`` to ``foo:``.
+
+Regenerate the removed passwords:
+
+.. code-block:: bash
+
+ kolla-genpwd -p /etc/kolla/passwords.yml
+
+Applying regenerated secrets
+----------------------------
+
+The majority of the secrets can be applied by simply reconfiguring services
+with ``kolla-ansible reconfigure``. Below is a list of secrets that can be
+applied this way.
+
+
+* ``*_keystone_password``
+* ``*_database_password`` (excluding ``nova_database_password``)
+* ``*_ssh_key`` (excluding ``kolla_ssh_key``)
+* ``keystone_admin_password``
+* ``designate_rndc_key``
+* ``keepalived_password``
+* ``libvirt_sasl_password``
+* ``metadata_secret``
+* ``opensearch_dashboards_password``
+* ``osprofiler_secret``
+* ``prometheus_alertmanager_password``
+* ``qdrouterd_password``
+* ``redis_master_password``
+
+It is possible to change more secrets however some require manual steps. The
+manual steps vary depending on the secret. They are listed below in the order
+they should be applied if they are to be changed at the same time. Once all
+manual steps are complete, reconfigure services (``kolla-ansible
+reconfigure``).
+
+For simplicity, this guide assumes Docker is being used. The same commands
+should also work for Podman deployments by replacing instances of ``docker``
+with ``podman`` in all relevant commands.
+
+Kolla SSH key
+^^^^^^^^^^^^^
+There is currently no mechanism within Kolla-Ansible to rotate
+``kolla_ssh_key``. It is however a relatively simple task to perform using a
+standard Ansible playbook, or can be performed by hand on smaller deployments.
+
+Horizon Secret Key
+^^^^^^^^^^^^^^^^^^
+The Horizon secret key (``horizon_secret_key``) is unique because it explicitly
+supports rotation. In reality, it is a Django secret key, and is used for
+cryptographic signing e.g. generating password recovery links. To minimise user
+impact, it is possible to set two secret keys at once. The new one will be used
+for generating new artifacts, while the old one will still be accepted for
+existing artifacts.
+
+Take note of the old password, generate a new one, and take note of it as well.
+
+Add it to the ``passwords.yml`` file, along with the old secret, in this
+exact format (including quotes in the middle):
+
+.. code:: bash
+
+ horizon_secret_key: newsecret' 'oldsecret
+
+It is important to remember to remove the old key and reconfigure services
+again, after all old artifacts have expired e.g. after approximately one to two
+weeks.
+
+Grafana Admin Password
+^^^^^^^^^^^^^^^^^^^^^^
+The Grafana admin password (``grafana_admin_password``) must be rotated
+manually.
+
+#. Generate a new Grafana Admin password.
+
+#. Replace the old password in ``passwords.yml``.
+
+#. Exec into any Grafana container:
+
+ .. code:: bash
+
+ docker exec -it grafana bash
+
+#. Run the password reset command, then enter the new password:
+
+ .. code:: bash
+
+ grafana-cli admin reset-admin-password --password-from-stdin
+
+Database Password
+^^^^^^^^^^^^^^^^^
+The database administrator password (``database_password``) must be rotated
+manually.
+
+#. Generate a new database password.
+
+#. Replace the old password in ``passwords.yml``, take note of both the old and
+ new passwords.
+
+#. SSH to a host running a MariaDB container.
+
+#. Exec into the MariaDB container:
+
+ .. code-block:: bash
+
+ docker exec -it mariadb bash
+
+#. Log in to the database. You will be prompted for the password. Use the
+ old value of ``database_password``:
+
+ .. code:: bash
+
+ mysql --batch -uroot -p
+
+#. Check the current state of the ``root`` user:
+
+ .. code:: bash
+
+ SELECT Host,User,Password FROM mysql.user WHERE User='root';
+
+#. Update the password for the ``root`` user:
+
+ .. code:: bash
+
+ SET PASSWORD FOR 'root'@'%' = PASSWORD('newpassword');
+
+#. Check that the password hash has changed in the user list:
+
+ .. code:: bash
+
+ SELECT Host,User,Password FROM mysql.user WHERE User='root';
+
+#. If there are any remaining root users with the old password e.g.
+ ``root@localhost``, change the password for them too.
+
+Nova Database Password
+^^^^^^^^^^^^^^^^^^^^^^
+The nova database admin user password (``nova_database_password``) must be
+rotated manually.
+
+.. warning::
+
+ From this point onward, API service may be disrupted.
+
+#. Generate a new Nova database password.
+
+#. Replace the old password in ``passwords.yml``.
+
+#. Exec into the ``nova_conductor`` container:
+
+ .. code:: bash
+
+ docker exec -it nova_conductor bash
+
+#. List the cells:
+
+ .. code:: bash
+
+ nova-manage cell_v2 list_cells --verbose
+
+#. Find the entry for ``cell0``, copy the Database Connection value,
+ replace the password in the string with the new value, and update it
+ with the following command:
+
+ .. code:: bash
+
+ nova-manage cell_v2 update_cell --cell_uuid 00000000-0000-0000-0000-000000000000 --database_connection "CONNECTION WITH NEW PASSWORD HERE" --transport-url "none:///"
+
+ (If the ``cell_uuid`` for ``cell0`` is not
+ ``00000000-0000-0000-0000-000000000000``, change the above command
+ accordingly)
+
+Heat Domain Admin Password
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+The keystone password for the heat domain admin service user
+(``heat_domain_admin_password``) must be rotated manually.
+
+It can be changed by an administrator just like any other standard OpenStack
+user password. Generate a new password, replace the old password in
+``passwords.yml``, then apply the change manually:
+
+.. code-block:: bash
+
+ openstack user set --password heat_domain_admin --domain heat_user_domain
+
+RabbitMQ Secrets
+^^^^^^^^^^^^^^^^
+RabbitMQ uses two main secrets. An Erlang cookie for cluster membership
+(``rabbitmq_cluster_cookie``), and a RabbitMQ management user password
+(``rabbitmq_password``). There is currently no documented process for
+seamlessly rotating these secrets. Many OpenStack services use RabbitMQ for
+communication and reconfiguring them with the new credentials can take some
+time, resulting in a relatively long API outage.
+
+It is recommended that you stop all services, then stop and destroy the
+RabbitMQ containers and volumes. Because the RabbitMQ containers are destroyed,
+``kolla-ansible deploy`` should be used to restart services rather than
+``kolla-ansible reconfigure``. Detailed steps are listed below:
+
+#. Generate a new ``rabbitmq_cluster_cookie`` and ``rabbitmq_password``.
+
+#. Replace the old values in ``passwords.yml``.
+
+#. Stop OpenStack services:
+
+ .. code-block:: bash
+
+ kolla-ansible -i inventory stop
+
+#. On each node running RabbitMQ, destroy its containers and volumes:
+
+ .. code-block:: bash
+
+ docker stop rabbitmq
+ docker rm rabbitmq
+ docker volume rm rabbitmq
+
+#. Redeploy services:
+
+ .. code-block:: bash
+
+ kolla-ansible -i inventory deploy
+
+Post-redeploy changes
+^^^^^^^^^^^^^^^^^^^^^
+Once services have been redeployed, the existing Memcached data should be
+flushed. The old Memcached password will no longer be used so any data stored
+using it will be inaccessible.
+
+The instructions below must be run from a host that has access to the network
+the Memcached containers are using. If you are not sure, run them from a host
+that is running Memcached.
+
+#. Install a telnet client:
+
+ .. code-block:: bash
+
+ apt/dnf install telnet
+
+#. Check the config for the IP and port used by Memcached (on every host
+ running Memcached):
+
+ .. code:: bash
+
+ sudo grep command /etc/kolla/memcached/config.json
+
+ The IP and port will be printed after ``-l`` and ``-p`` respectively
+
+#. For each container start a Telnet session, clear all data, then
+ exit:
+
+ .. code:: bash
+
+ telnet
+ flush_all
+ quit
+
+Known out-of-scope secrets
+--------------------------
+Below is a list of passwords that are known to be outside the scope of this
+guide.
+
+* ``docker_registry_password`` - kolla-ansible cannot manage docker registries.
diff --git a/doc/source/admin/production-architecture-guide.rst b/doc/source/admin/production-architecture-guide.rst
index e5cd896f9c..7aa2d7e074 100644
--- a/doc/source/admin/production-architecture-guide.rst
+++ b/doc/source/admin/production-architecture-guide.rst
@@ -78,12 +78,6 @@ In Kolla operators should configure following network interfaces:
with the bare metal cloud hosts in order to provide DHCP leases with
PXE boot options. Defaults to ``network_interface``.
-.. warning::
-
- Ansible facts does not recognize interface names containing dashes,
- in example ``br-ex`` or ``bond-0`` cannot be used because ansible will read
- them as ``br_ex`` and ``bond_0`` respectively.
-
.. _address-family-configuration:
Address family configuration (IPv4/IPv6)
diff --git a/doc/source/admin/tls.rst b/doc/source/admin/tls.rst
index f2acb9bb3d..5a42d00489 100644
--- a/doc/source/admin/tls.rst
+++ b/doc/source/admin/tls.rst
@@ -288,6 +288,50 @@ disable verification of the backend certificate:
.. _admin-tls-generating-a-private-ca:
+Generating TLS certificates with Let's Encrypt
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Let's Encrypt is a free, automated, and open certificate authority.
+
+To enable OpenStack to deploy the Let's Encrypt container to fetch
+certificates from the Let's Encrypt certificate authority, the following
+must be configured in ``globals.yml``:
+
+.. code-block:: yaml
+
+ enable_letsencrypt: "yes"
+ letsencrypt_email: ""
+
+The Let's Encrypt container will attempt to renew your certificates every 12
+hours. If the certificates are renewed, they will automatically be deployed
+to the HAProxy containers using SSH.
+
+.. note::
+
+ If ``letsencrypt_email`` is not valid email, letsencrypt role will
+ not work correctly.
+
+.. note::
+
+ If ``enable_letsencrypt`` is set to true, haproxy's socket will run with
+ admin access level. This is needed so Let's Encrypt can interact
+ with HAProxy.
+
+You can configure separate ACME servers for internal and external
+certificate requests.
+
+.. code-block:: yaml
+
+ letsencrypt_external_cert_server: ""
+ letsencrypt_internal_cert_server: ""
+
+.. note::
+
+ The ``letsencrypt_external_cert_server`` has a default value of
+ ``https://acme-v02.api.letsencrypt.org/directory``. Ensure that
+ ``letsencrypt_internal_cert_server`` is reachable from the controller
+ if you configure it for internal certificate requests.
+
Generating a Private Certificate Authority
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -334,3 +378,29 @@ options for TLS as is.
If using this option, make sure that all certificates are present on the
appropriate hosts in the appropriate location.
+
+.. _haproxy-tls-settings:
+
+HAProxy TLS related settings
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can select between different SSL/TLS ciphers by setting the following
+in ``/etc/kolla/globals.yml``:
+
+.. code-block:: yaml
+
+ kolla_haproxy_ssl_settings: "modern" # or "intermediate" or "legacy"
+
+The default value is "modern". These settings are adapted from the
+`Mozilla SSL Configuration Generator `__.
+
+The setting "modern" is recommended for most deployments. The setting
+"intermediate" is recommended for deployments that need to support older
+clients. The setting "legacy" is not recommended, but is left as a
+compatibility option for older deployments.
+
+See the `Mozilla SSL Configuration Generator `__
+for more information on exact supported client versions.
+
+The ``kolla_haproxy_ssl_settings`` setting also affects the glance and
+neutron haproxy TLS settings, if these proxy services are enabled.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 96ddf85634..35138b7539 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -110,26 +110,48 @@
'octavia',
'oslo.messaging',
'oslotest',
+ 'ovn-octavia-provider',
'swift',
'watcher',
]
# Global variables
# For replacement, use in docs as |VAR_NAME| (note there's no space around variable name)
-# When adding new variables, make sure you add them to GLOBAL_VARIABLE_MAP dictionary as well
+# When adding new variables, that you want to use in documentation, make sure you add
+# them to GLOBAL_VARIABLE_MAP dictionary as well. KOLLA_OPENSTACK_RELEASE_UNMAINTAINED is
+# used only to denote unmaintained branches, and it is not intended to be used for
+# replacing anything in documentation.
KOLLA_OPENSTACK_RELEASE = openstackdocstheme.ext._get_series_name()
+KOLLA_OPENSTACK_RELEASE_UNMAINTAINED = [
+ 'yoga',
+ 'zed',
+]
+
if KOLLA_OPENSTACK_RELEASE == 'latest':
KOLLA_OPENSTACK_RELEASE = 'master'
KOLLA_BRANCH_NAME = 'master'
TESTED_RUNTIMES_GOVERNANCE_URL = 'https://governance.openstack.org/tc/reference/runtimes/'
+elif KOLLA_OPENSTACK_RELEASE in KOLLA_OPENSTACK_RELEASE_UNMAINTAINED:
+ KOLLA_BRANCH_NAME = 'unmaintained/{}'.format(KOLLA_OPENSTACK_RELEASE)
+ TESTED_RUNTIMES_GOVERNANCE_URL =\
+ 'https://governance.openstack.org/tc/reference/runtimes/{}.html'.format(KOLLA_OPENSTACK_RELEASE)
else:
KOLLA_BRANCH_NAME = 'stable/{}'.format(KOLLA_OPENSTACK_RELEASE)
TESTED_RUNTIMES_GOVERNANCE_URL =\
'https://governance.openstack.org/tc/reference/runtimes/{}.html'.format(KOLLA_OPENSTACK_RELEASE)
+ANSIBLE_CORE_VERSION_MIN = '2.16'
+ANSIBLE_CORE_VERSION_MAX = '2.17'
+ANSIBLE_VERSION_MIN = '9'
+ANSIBLE_VERSION_MAX = '10'
+
GLOBAL_VARIABLE_MAP = {
+ '|ANSIBLE_CORE_VERSION_MIN|': ANSIBLE_CORE_VERSION_MIN,
+ '|ANSIBLE_CORE_VERSION_MAX|': ANSIBLE_CORE_VERSION_MAX,
+ '|ANSIBLE_VERSION_MIN|': ANSIBLE_VERSION_MIN,
+ '|ANSIBLE_VERSION_MAX|': ANSIBLE_VERSION_MAX,
'|KOLLA_OPENSTACK_RELEASE|': KOLLA_OPENSTACK_RELEASE,
'|KOLLA_BRANCH_NAME|': KOLLA_BRANCH_NAME,
'|KOLLA_BRANCH_NAME_DASHED|': KOLLA_BRANCH_NAME.replace('/', '-'),
diff --git a/doc/source/contributor/ci.rst b/doc/source/contributor/ci.rst
index aa3f003656..71f9e30324 100644
--- a/doc/source/contributor/ci.rst
+++ b/doc/source/contributor/ci.rst
@@ -2,5 +2,29 @@
Continuous Integration (CI) & Testing
=====================================
-This page is a placeholder for information on the Kolla Ansible Continuous
-Integration (CI) & testing setup.
+Kolla-Ansible uses
+`Zuul `__
+for continuous integration. Similar to testing performed using
+`devstack `__, Kolla-Ansible is
+capable of integrating and testing pre-merged dependencies from many other
+projects.
+
+Debugging with ARA in CI
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Frequently, the need arises to obtain more verbose ansible logging in CI.
+`ARA `__ is an ansible plugin that collects a
+large amount of execution information and can render it into a browser
+friendly format.
+
+This plugin is not enabled by default because there is a per-task overhead.
+However, it's possible to trigger it when trying to debug a failing job.
+
+By adding the text `#ara` to the git commit message of the review, the CI jobs
+will enable the plugin and generate a sqlite database containing comprehensive
+logging. It's possible to render an HTML version of this by using
+`#ara_verbose`. Generating the HTML is not very efficient, however, and
+consumes a large amount of logging resources.
+
+Please note that git usually strips lines beginning with `#` from the commit
+message. This can be avoided by preceding the string with a space.
diff --git a/doc/source/contributor/kolla-for-openstack-development.rst b/doc/source/contributor/kolla-for-openstack-development.rst
index f420b49f5c..25125ce5e7 100644
--- a/doc/source/contributor/kolla-for-openstack-development.rst
+++ b/doc/source/contributor/kolla-for-openstack-development.rst
@@ -5,15 +5,9 @@ Using Kolla For OpenStack Development
Kolla-ansible can be used to deploy containers in a way suitable for doing
development on OpenStack services.
-.. note::
-
- This functionality is new in the Pike release.
-
Heat was the first service to be supported, and so the following will use
submitting a patch to Heat using Kolla as an example.
-Only source containers are supported.
-
.. warning::
Kolla dev mode is intended for OpenStack hacking or development only.
@@ -41,8 +35,9 @@ Usage
-----
When enabled, the source repo for the service in question will be cloned under
-``/opt/stack/`` on the target node(s). This will be bind mounted into the
-container's virtualenv under the location expected by the service on startup.
+``/opt/stack/`` on the target node(s). This will be bind mounted to
+container's ``/dev-mode`` directory. From there, it will be installed at every
+startup of the container using ``kolla_install_projects`` script.
After making code changes, simply restart the container to pick them up:
diff --git a/doc/source/contributor/running-tests.rst b/doc/source/contributor/running-tests.rst
index d5f4e66947..8dadda695a 100644
--- a/doc/source/contributor/running-tests.rst
+++ b/doc/source/contributor/running-tests.rst
@@ -68,25 +68,25 @@ directory use:
tox -e py38 kolla-ansible.tests
To run the tests of a specific file
-``kolla-ansible/tests/test_kolla_docker.py``:
+``kolla-ansible/tests/test_kolla_container.py``:
.. code-block:: console
- tox -e py38 test_kolla_docker
+ tox -e py38 test_kolla_container
To run the tests in the ``ModuleArgsTest`` class in
-the ``kolla-ansible/tests/test_kolla_docker.py`` file:
+the ``kolla-ansible/tests/test_kolla_container.py`` file:
.. code-block:: console
- tox -e py38 test_kolla_docker.ModuleArgsTest
+ tox -e py38 test_kolla_container.ModuleArgsTest
To run the ``ModuleArgsTest.test_module_args`` test method in
-the ``kolla-ansible/tests/test_kolla_docker.py`` file:
+the ``kolla-ansible/tests/test_kolla_container.py`` file:
.. code-block:: console
- tox -e py38 test_kolla_docker.ModuleArgsTest.test_module_args
+ tox -e py38 test_kolla_container.ModuleArgsTest.test_module_args
Debugging unit tests
--------------------
diff --git a/doc/source/contributor/vagrant-dev-env.rst b/doc/source/contributor/vagrant-dev-env.rst
index ed56654f19..ae1ebc43f8 100644
--- a/doc/source/contributor/vagrant-dev-env.rst
+++ b/doc/source/contributor/vagrant-dev-env.rst
@@ -67,12 +67,6 @@ Next install the hostmanager plugin so all hosts are recorded in ``/etc/hosts``
vagrant plugin install vagrant-hostmanager
-If you are going to use VirtualBox, then install vagrant-vbguest:
-
-.. code-block:: console
-
- vagrant plugin install vagrant-vbguest
-
Vagrant supports a wide range of virtualization technologies. If VirtualBox is
used, the vbguest plugin will be required to install the VirtualBox Guest
Additions in the virtual machine:
diff --git a/doc/source/reference/bare-metal/ironic-guide.rst b/doc/source/reference/bare-metal/ironic-guide.rst
index 3ae418e2f2..62fc93673d 100644
--- a/doc/source/reference/bare-metal/ironic-guide.rst
+++ b/doc/source/reference/bare-metal/ironic-guide.rst
@@ -42,6 +42,16 @@ are possible by separating addresses with commas):
- range: "192.168.5.100,192.168.5.110"
routers: "192.168.5.1"
+Together with an router there can be provided the NTP (time source) server.
+For example it can be the same address as default router for the range:
+
+.. code-block:: yaml
+
+ ironic_dnsmasq_dhcp_ranges:
+ - range: "192.168.5.100,192.168.5.110"
+ routers: "192.168.5.1"
+ ntp_server: "192.168.5.1"
+
To support DHCP relay, it is also possible to define a netmask in the range.
It is advisable to also provide a router to allow the traffic to reach the
Ironic server.
@@ -69,7 +79,7 @@ The default lease time for each range can be configured globally via
In the same file, specify the PXE bootloader file for Ironic Inspector. The
file is relative to the ``/var/lib/ironic/tftpboot`` directory. The default is
``pxelinux.0``, and should be correct for x86 systems. Other platforms may
-require a differentvalue, for example aarch64 on Debian requires
+require a different value, for example aarch64 on Debian requires
``debian-installer/arm64/bootnetaa64.efi``.
.. code-block:: yaml
@@ -148,6 +158,49 @@ variable ``ironic_enable_keystone_integration`` to ``"yes"``
ironic_enable_keystone_integration: "yes"
+Avoiding problems with high availability
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+
+ This section assumes that you have not yet deployed the Nova Compute
+ Ironic service. If you have already deployed multiple instances of the
+ service and have one or more baremetal nodes registered, the following
+ operations are non-trivial. You will likely have to use the `nova-manage`
+ command (or pre-Caracal edit the DB) to ensure that all Ironic nodes
+ are registered with a single Nova Compute Ironic instance. This is
+ an advanced subject and is not covered here. Stop now if you don't
+ know what you are doing.
+
+Nova Compute Ironic HA is known to be unstable. Pending a better solution,
+a workaround is to avoid the feature by running a single Nova Compute Ironic
+instance. For example:
+
+.. code-block:: diff
+
+ - [nova-compute-ironic:children]
+ - nova
+ + [nova-compute-ironic]
+ + controller1
+
+If you choose to do this, it is helpful to pin the service host name
+to a 'synthetic' constant. This means that if you need to re-deploy the
+service to another host, the Ironic nodes will automatically use the new
+service instance. Otherwise you will need to manually move active Ironic nodes
+to the new service, with either the `nova-manage` CLI, or pre-Caracal, by
+editing the Nova database.
+
+The config option to pin the host name is `nova_compute_ironic_custom_host`
+and must be set as a group or host var. Note that, unless you know what you
+are doing, you must not change or set this option if you have already deployed
+Ironic nodes.
+
+This config option is also useful for Ironic Shards. Whilst these are not
+explicitly supported by Kolla Ansible, some further information can be found
+`here `__.
+
+Note that Ironic HA is not affected, and continues to work as normal.
+
Deployment
~~~~~~~~~~
Run the deploy as usual:
diff --git a/doc/source/reference/compute/libvirt-guide.rst b/doc/source/reference/compute/libvirt-guide.rst
index 690d6bce44..e226d2fe3d 100644
--- a/doc/source/reference/compute/libvirt-guide.rst
+++ b/doc/source/reference/compute/libvirt-guide.rst
@@ -29,14 +29,15 @@ authenticated with SASL. This should not be considered as providing a secure,
encrypted channel, since the username/password SASL mechanisms available for
TCP are no longer considered cryptographically secure. However, it does at
least provide some authentication for the libvirt API. For a more secure
-encrypted channel, use :ref`libvirt TLS `.
+encrypted channel, use :ref:`libvirt TLS `.
SASL is enabled according to the ``libvirt_enable_sasl`` flag, which defaults
to ``true``.
The username is configured via ``libvirt_sasl_authname``, and defaults to
-``kolla``. The password is configured via ``libvirt_sasl_password``, and is
-generated with other passwords using and stored in ``passwords.yml``.
+``nova``. The password is configured via ``libvirt_sasl_password``, and is
+generated with other passwords using ``kolla-mergepwd`` and ``kolla-genpwd``
+and stored in ``passwords.yml``.
The list of enabled authentication mechanisms is configured via
``libvirt_sasl_mech_list``, and defaults to ``["SCRAM-SHA-256"]`` if libvirt
@@ -54,6 +55,23 @@ libvirt as a host daemon. However, since the Yoga release, if a libvirt daemon
has already been set up, then Kolla Ansible may be configured to use it. This
may be achieved by setting ``enable_nova_libvirt_container`` to ``false``.
+When the firewall driver is set to ``openvswitch``, libvirt will plug VMs
+directly into the integration bridge, ``br-int``. To do this it uses the
+``ovs-vsctl`` utility. The search path for this binary is controlled by the
+``$PATH`` environment variable (as seen by the libvirt process). There are a
+few options to ensure that this binary can be found:
+
+* Set ``openvswitch_ovs_vsctl_wrapper_enabled`` to ``True``. This will install
+ a wrapper script to the path: ``/usr/bin/ovs-vsctl`` that will execute
+ ``ovs-vsctl`` in the context of the ``openvswitch_vswitchd`` container. This
+ option is useful if you do not have openvswitch installed on the host. It
+ also has the advantage that the ``ovs-vsctl`` utility will match the version
+ of the server.
+
+* Install openvswitch on the hypervisor. Kolla mounts ``/run/openvswitch`` from
+ the host into the ``openvswitch_vswitchd`` container. This means that socket
+ is in the location ``ovs-vsctl`` expects with its default options.
+
Migration from container to host
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -76,7 +94,7 @@ to ``true`` will cause the Docker volumes to be removed.
A future extension could support migration of existing VMs, but this is
currently out of scope.
-.. libvirt-tls:
+.. _libvirt-tls:
Libvirt TLS
===========
diff --git a/doc/source/reference/containers/kuryr-guide.rst b/doc/source/reference/containers/kuryr-guide.rst
index 68532e9120..fbcbfd4192 100644
--- a/doc/source/reference/containers/kuryr-guide.rst
+++ b/doc/source/reference/containers/kuryr-guide.rst
@@ -20,7 +20,7 @@ To allow Docker daemon connect to the etcd, add the following in the
.. code-block:: ini
- ExecStart= -H tcp://172.16.1.13:2375 -H unix:///var/run/docker.sock --cluster-store=etcd://172.16.1.13:2379 --cluster-advertise=172.16.1.13:2375
+ ExecStart= -H tcp://172.16.1.13:2375 -H unix:///var/run/docker.sock --cluster-advertise=172.16.1.13:2375
The IP address is host running the etcd service. ```2375``` is port that
allows Docker daemon to be accessed remotely. ```2379``` is the etcd listening
diff --git a/doc/source/reference/databases/mariadb-guide.rst b/doc/source/reference/databases/mariadb-guide.rst
index 305bc67123..71b51f77c4 100644
--- a/doc/source/reference/databases/mariadb-guide.rst
+++ b/doc/source/reference/databases/mariadb-guide.rst
@@ -29,8 +29,9 @@ Deployment
----------
Each shard is identified by an integer ID, defined by ``mariadb_shard_id``.
-The default shard, defined by ``mariadb_default_database_shard_id`` (default 0),
-identifies the shard that will be accessible via HAProxy and available for backing up.
+The default shard, defined by ``mariadb_default_database_shard_id``
+(default 0), identifies the shard that will be accessible via HAProxy and
+available for backing up.
In order to deploy several MariaDB cluster, you will need to edit
inventory file in the way described below:
diff --git a/doc/source/reference/deployment-and-bootstrapping/bifrost.rst b/doc/source/reference/deployment-and-bootstrapping/bifrost.rst
index 669fe893b0..c22208d0ea 100644
--- a/doc/source/reference/deployment-and-bootstrapping/bifrost.rst
+++ b/doc/source/reference/deployment-and-bootstrapping/bifrost.rst
@@ -284,13 +284,15 @@ For development:
.. code-block:: console
- cd kolla-ansible
- tools/kolla-ansible deploy-bifrost
+ pip install -e ./kolla-ansible
+ kolla-ansible deploy-bifrost
+
For Production:
.. code-block:: console
+ pip install -U ./kolla-ansible
kolla-ansible deploy-bifrost
Deploy Bifrost manually
@@ -376,12 +378,14 @@ For Development:
.. code-block:: console
- tools/kolla-ansible deploy-servers
+ pip install -e ./kolla-ansible
+ kolla-ansible deploy-servers
For Production:
.. code-block:: console
+ pip install -U ./kolla-ansible
kolla-ansible deploy-servers
Manually
diff --git a/doc/source/reference/high-availability/haproxy-guide.rst b/doc/source/reference/high-availability/haproxy-guide.rst
index d3337ecab0..0d1219789c 100644
--- a/doc/source/reference/high-availability/haproxy-guide.rst
+++ b/doc/source/reference/high-availability/haproxy-guide.rst
@@ -6,7 +6,7 @@ HAProxy Guide
Kolla Ansible supports a Highly Available (HA) deployment of
Openstack and other services. High-availability in Kolla
-is implented as via Keepalived and HAProxy. Keepalived manages virtual IP
+is implemented as via Keepalived and HAProxy. Keepalived manages virtual IP
addresses, while HAProxy load-balances traffic to service backends.
These two components must be installed on the same hosts
and they are deployed to hosts in the ``loadbalancer`` group.
@@ -22,6 +22,26 @@ setting the following in ``/etc/kolla/globals.yml``:
enable_haproxy: "no"
enable_keepalived: "no"
+Single external frontend for services
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Single external frontend for particular service can be enabled by adding the
+following in ``/etc/kolla/globals.yml`` (feature and example services):
+
+.. code-block:: yaml
+
+ haproxy_single_external_frontend: true
+
+ nova_external_fqdn: "nova.example.com"
+ neutron_external_fqdn: "neutron.example.com"
+ horizon_external_fqdn: "horizon.example.com"
+ opensearch_external_fqdn: "opensearch.example.com"
+ grafana_external_fqdn: "grafana.example.com"
+
+
+The abovementioned functionality allows for exposing of services on separate
+fqdns on commonly used port i.e. 443 instead of the usual high ports.
+
Configuration
~~~~~~~~~~~~~
@@ -51,7 +71,7 @@ Backend weights
When different baremetal are used in infrastructure as haproxy backends
or they are overloaded for some reason, kolla-ansible is able to change
-weight of backend per sevice. Weight can be any integer value from 1 to
+weight of backend per service. Weight can be any integer value from 1 to
256.
To set weight of backend per service, modify inventory file as below:
@@ -62,3 +82,18 @@ To set weight of backend per service, modify inventory file as below:
server1 haproxy_nova_api_weight=10
server2 haproxy_nova_api_weight=2 haproxy_keystone_internal_weight=10
server3 haproxy_keystone_admin_weight=50
+
+HTTP/2 Support
+---------------
+
+HAProxy with HTTP/2 frontend support is enabled by default. It may be
+disabled by setting the following in ``/etc/kolla/globals.yml``:
+
+.. code-block:: yaml
+
+ haproxy_enable_http2: "no"
+
+SSL/TLS Settings
+----------------
+
+For SSL/TLS related settings refer to the :ref:`haproxy-tls-settings` section.
diff --git a/doc/source/reference/logging-and-monitoring/central-logging-guide.rst b/doc/source/reference/logging-and-monitoring/central-logging-guide.rst
index f2a41ab76b..edfdc845d6 100644
--- a/doc/source/reference/logging-and-monitoring/central-logging-guide.rst
+++ b/doc/source/reference/logging-and-monitoring/central-logging-guide.rst
@@ -18,76 +18,90 @@ the following:
enable_central_logging: "yes"
-Elasticsearch
-~~~~~~~~~~~~~
+OpenSearch
+~~~~~~~~~~
-Kolla deploys Elasticsearch as part of the E*K stack to store, organize
-and make logs easily accessible.
+Kolla deploys OpenSearch to store, organize and make logs easily accessible.
-By default Elasticsearch is deployed on port ``9200``.
+By default OpenSearch is deployed on port ``9200``.
.. note::
- Elasticsearch stores a lot of logs, so if you are running centralized logging,
+ OpenSearch stores a lot of logs, so if you are running centralized logging,
remember to give ``/var/lib/docker`` adequate space.
Alternatively it is possible to use a local directory instead of the volume
- ``elasticsearch`` to store the data of Elasticsearch. The path can be set via
- the variable ``elasticsearch_datadir_volume``.
+ ``opensearch`` to store the data of OpenSearch. The path can be set via
+ the variable ``opensearch_datadir_volume``.
-Curator
--------
+Applying log retention policies
+-------------------------------
-To stop your disks filling up, retention policies can be set. These are
-enforced by Elasticsearch Curator which can be enabled by setting the
-following in ``/etc/kolla/globals.yml``:
+To stop your disks filling up, the Index State Management plugin for
+OpenSearch can be used to define log retention policies. A default
+retention policy is applied to all indices which match the
+``opensearch_log_index_prefix``. This policy first closes old indices,
+and then eventually deletes them. It can be customised via the following
+variables:
-.. code-block:: yaml
+- ``opensearch_apply_log_retention_policy``
+- ``opensearch_soft_retention_period_days``
+- ``opensearch_hard_retention_period_days``
- enable_elasticsearch_curator: "yes"
+By default the soft and hard retention periods are 30 and 60 days
+respectively. If you are upgrading from ElasticSearch, and have previously
+configured ``elasticsearch_curator_soft_retention_period_days`` or
+``elasticsearch_curator_hard_retention_period_days``, those variables will
+be used instead of the defaults. You should migrate your configuration to
+use the new variable names before the Caracal release.
-Elasticsearch Curator is configured via an actions file. The format of the
-actions file is described in the `Elasticsearch Curator documentation `_.
-A default actions file is provided which closes indices and then deletes them
-some time later. The periods for these operations, as well as the prefix for
-determining which indicies should be managed are defined in the Elasticsearch
-role defaults and can be overridden in ``/etc/kolla/globals.yml`` if required.
+Advanced users may wish to customise the retention policy, which
+is possible by overriding ``opensearch_retention_policy`` with
+a valid policy. See the `Index Management plugin documentation `__
+for further details.
-If the default actions file is not malleable enough, a custom actions file can
-be placed in the Kolla custom config directory, for example:
-``/etc/kolla/config/elasticsearch/elasticsearch-curator-actions.yml``.
+Updating log retention policies
+-------------------------------
-When testing the actions file you may wish to perform a dry run to be certain
-of what Curator will actually do. A dry run can be enabled by setting the
-following in ``/etc/kolla/globals.yml``:
+By design, Kolla Ansible will NOT update an existing retention
+policy in OpenSearch. This is to prevent policy changes that may have
+been made via the OpenSearch Dashboards UI, or external tooling,
+from being wiped out.
-.. code-block:: yaml
+There are three options for modifying an existing policy:
- elasticsearch_curator_dry_run: "yes"
+1. Via the OpenSearch Dashboards UI. See the `Index Management plugin documentation `__
+for further details.
-The actions which *would* be taken if a dry run were to be disabled are then
-logged in the Elasticsearch Kolla logs folder under
-``/var/log/kolla/elasticsearch/elasticsearch-curator.log``.
+2. Via the OpenSearch API using external tooling.
-Kibana
-~~~~~~
+3. By manually removing the existing policy via the OpenSearch Dashboards
+ UI (or API), before re-applying the updated policy with Kolla Ansible.
-Kolla deploys Kibana as part of the E*K stack in order to allow operators to
+OpenSearch Dashboards
+~~~~~~~~~~~~~~~~~~~~~
+
+Kolla deploys OpenSearch dashboards to allow operators to
search and visualise logs in a centralised manner.
-After successful deployment, Kibana can be accessed using a browser on
-``:5601``.
+After a successful deployment, OpenSearch Dashboards can be accessed using a
+browser on ``:5601`` or
+``:5601``.
+
+The default username is ``opensearch``, the password can be located under
+```` in ``/etc/kolla/passwords.yml``.
-The default username is ``kibana``, the password can be located under
-```` in ``/etc/kolla/passwords.yml``.
+If you want to prevent OpenSearch Dashboards being exposed on the external
+VIP, you can set ``enable_opensearch_dashboards_external`` to ``false`` in
+``/etc/kolla/globals.yml``.
First Login
-----------
-When Kibana is opened for the first time, it requires creating a default index
-pattern. To view, analyse and search logs, at least one index pattern has to
-be created. To match indices stored in ElasticSearch, we suggest using the
-following configuration:
+When OpenSearch Dashboards is opened for the first time, it requires creating
+a default index pattern. To view, analyse and search logs, at least one
+index pattern has to be created. To match indices stored in OpenSearch,
+we suggest using the following configuration:
#. Index pattern - flog-*
#. Time Filter field name - @timestamp
@@ -125,12 +139,12 @@ services across the cluster.
The current search can also be saved by clicking the ``Save Search`` icon
available from the menu on the right hand side.
-Example: using Kibana to diagnose a common failure
---------------------------------------------------
+Example: using OpenSearch Dashboards to diagnose a common failure
+-----------------------------------------------------------------
-The following example demonstrates how Kibana can be used to diagnose a common
-OpenStack problem, where an instance fails to launch with the error 'No valid
-host was found'.
+The following example demonstrates how OpenSearch can be used to diagnose a
+common OpenStack problem, where an instance fails to launch with the error
+'No valid host was found'.
First, re-run the server creation with ``--debug``:
@@ -148,17 +162,18 @@ example ID looks like this:
X-Compute-Request-Id: req-c076b50a-6a22-48bf-8810-b9f41176a6d5
-Taking the value of ``X-Compute-Request-Id``, enter the value into the Kibana
-search bar, minus the leading ``req-``. Assuming some basic filters have been
-added as shown in the previous section, Kibana should now show the path this
-request made through the OpenStack deployment, starting at a ``nova-api`` on
-a control node, through the ``nova-scheduler``, ``nova-conductor``, and finally
+Taking the value of ``X-Compute-Request-Id``, enter the value into the
+OpenSearch Dashboards search bar, minus the leading ``req-``. Assuming some
+basic filters have been added as shown in the previous section, OpenSearch
+Dashboards should now show the path this request made through the
+OpenStack deployment, starting at a ``nova-api`` on a control node,
+through the ``nova-scheduler``, ``nova-conductor``, and finally
``nova-compute``. Inspecting the ``Payload`` of the entries marked ``ERROR``
should quickly lead to the source of the problem.
While some knowledge is still required of how Nova works in this instance, it
-can still be seen how Kibana helps in tracing this data, particularly in a
-large scale deployment scenario.
+can still be seen how OpenSearch Dashboards helps in tracing this data,
+particularly in a large scale deployment scenario.
Visualize data - Visualize tab
------------------------------
@@ -261,3 +276,13 @@ network equipment. This can be done by configuring custom fluentd inputs.
Configuration of custom fluentd inputs is possible by placing input
configuration files in ``/etc/kolla/config/fluentd/input/*.conf`` on the
control host.
+
+Systemd Logs
+------------
+
+By default, when enabling central logging, we also enable reading ``systemd``
+logs from the ``/var/log/journal`` file.
+
+To disable this behavior when central logging is enabled, set the value of
+the variable ``enable_fluentd_systemd`` to ``false`` in the configuration
+file ``/etc/kolla/globals.yml``.
diff --git a/doc/source/reference/logging-and-monitoring/index.rst b/doc/source/reference/logging-and-monitoring/index.rst
index 7c164728e5..09d0175479 100644
--- a/doc/source/reference/logging-and-monitoring/index.rst
+++ b/doc/source/reference/logging-and-monitoring/index.rst
@@ -11,8 +11,5 @@ logging and monitoring services available in kolla.
central-logging-guide
grafana-guide
influxdb-guide
- kafka-guide
- monasca-guide
osprofiler-guide
prometheus-guide
- skydive-guide
diff --git a/doc/source/reference/logging-and-monitoring/kafka-guide.rst b/doc/source/reference/logging-and-monitoring/kafka-guide.rst
deleted file mode 100644
index 2867e9f2fc..0000000000
--- a/doc/source/reference/logging-and-monitoring/kafka-guide.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-.. _kafka-guide:
-
-============
-Apache Kafka
-============
-
-Overview
-~~~~~~~~
-
-`Kafka `_ is a distributed stream processing
-system. It forms the central component of Monasca and in an OpenStack context
-can also be used as an experimental messaging backend in `Oslo messaging
-`_.
-
-Kafka
-~~~~~
-
-A spinning disk array is normally sufficient for Kafka. The data directory
-defaults to a docker volume, ``kafka``. Since it can use a lot of disk space,
-you may wish to store the data on a dedicated device. This can be achieved by
-setting ``kafka_datadir_volume`` in ``/etc/kolla/globals.yml``:
-
-.. code-block:: yaml
-
- kafka_datadir_volume: /mnt/spinning_array/kafka/
diff --git a/doc/source/reference/logging-and-monitoring/monasca-guide.rst b/doc/source/reference/logging-and-monitoring/monasca-guide.rst
deleted file mode 100644
index 12fa09a9e4..0000000000
--- a/doc/source/reference/logging-and-monitoring/monasca-guide.rst
+++ /dev/null
@@ -1,471 +0,0 @@
-.. _monasca-guide:
-
-============================
-Monasca - Monitoring service
-============================
-
-Overview
-~~~~~~~~
-
-Monasca provides monitoring and logging as-a-service for OpenStack. It
-consists of a large number of micro-services coupled together by Apache
-Kafka. If it is enabled in Kolla, it is automatically configured to collect
-logs and metrics from across the control plane. These logs and metrics
-are accessible from the Monasca APIs to anyone with credentials for
-the OpenStack project to which they are posted.
-
-Monasca is not just for the control plane. Monitoring data can just as
-easily be gathered from tenant deployments, by for example baking the
-Monasca Agent into the tenant image, or installing it post-deployment
-using an orchestration tool.
-
-Finally, one of the key tenets of Monasca is that it is scalable. In Kolla
-Ansible, the deployment has been designed from the beginning to work in a
-highly available configuration across multiple nodes. Traffic is typically
-balanced across multiple instances of a service by HAProxy, or in other
-cases using the native load balancing mechanism provided by the service.
-For example, topic partitions in Kafka. Of course, if you start out with
-a single server that's fine too, and if you find that you need to improve
-capacity later on down the line, adding additional nodes should be a
-fairly straightforward exercise.
-
-Pre-deployment configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Before enabling Monasca, read the :ref:`Security impact` section and
-decide whether you need to configure a firewall, and/or wish to prevent
-users from accessing Monasca services.
-
-Enable Monasca in ``/etc/kolla/globals.yml``:
-
-.. code-block:: yaml
-
- enable_monasca: "yes"
-
-If you wish to disable the alerting and notification pipeline to reduce
-resource usage you can set ``/etc/kolla/globals.yml``:
-
-.. code-block:: yaml
-
- monasca_enable_alerting_pipeline: "no"
-
-You can optionally bypass Monasca for control plane logs, and instead have
-them sent directly to Elasticsearch. This should be avoided if you have
-deployed Monasca as a standalone service for the purpose of storing
-logs in a protected silo for security purposes. However, if this is not
-a relevant consideration, for example you have deployed Monasca alongside the
-existing OpenStack control plane, then you may free up some resources by
-setting:
-
-.. code-block:: yaml
-
- monasca_ingest_control_plane_logs: "no"
-
-You should note that when making this change with the default
-``kibana_log_prefix`` prefix of ``flog-``, you will need to create a new
-index pattern in Kibana accordingly. If you wish to continue to search all
-logs using the same index pattern in Kibana, then you can override
-``kibana_log_prefix`` to ``monasca`` or similar in ``/etc/kolla/globals.yml``:
-
-.. code-block:: yaml
-
- kibana_log_prefix: "monasca"
-
-If you have enabled Elasticsearch Curator, it will be configured to rotate
-logs with index patterns matching either ``^flog-.*`` or ``^monasca-.*`` by
-default. If this is undesirable, then you can update the
-``elasticsearch_curator_index_pattern`` variable accordingly.
-
-Stand-alone configuration (optional)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Monasca can be deployed via Kolla Ansible in a standalone configuration. The
-deployment will include all supporting services such as HAProxy, Keepalived,
-MariaDB and Memcached. It can also include Keystone, but you will likely
-want to integrate with the Keystone instance provided by your existing
-OpenStack deployment. Some reasons to perform a standalone deployment are:
-
-* Your OpenStack deployment is *not* managed by Kolla Ansible, but you want
- to take advantage of Monasca support in Kolla Ansible.
-* Your OpenStack deployment *is* managed by Kolla Ansible, but you do not
- want the Monasca deployment to share services with your OpenStack
- deployment. For example, in a combined deployment Monasca will share HAProxy
- and MariaDB with the core OpenStack services.
-* Your OpenStack deployment *is* managed by Kolla Ansible, but you want
- Monasca to be decoupled from the core OpenStack services. For example, you
- may have a dedicated monitoring and logging team, and wish to prevent that
- team accidentally breaking, or redeploying core OpenStack services.
-* You want to deploy Monasca for testing. In this case you will likely want
- to deploy Keystone as well.
-
-To configure a standalone installation you will need to add the following to
-`/etc/kolla/globals.yml``:
-
-.. code-block:: yaml
-
- enable_openstack_core: "no"
- enable_rabbitmq: "no"
- enable_keystone: "yes"
-
-With the above configuration alone Keystone *will* be deployed. If you want
-Monasca to be registered with an external instance of Keystone remove
-`enable_keystone: "yes"` from `/etc/kolla/globals.yml` and add the following,
-additional configuration:
-
-.. code-block:: yaml
-
- keystone_internal_url: "http://172.28.128.254:5000"
- monasca_openstack_auth:
- auth_url: "{{ keystone_internal_url }}"
- username: "admin"
- password: "{{ external_keystone_admin_password }}"
- project_name: "admin"
- domain_name: "default"
- user_domain_name: "default"
-
-In this example it is assumed that the external Keystone's internal URL is
-`http://172.28.128.254:5000`, and that the external Keystone's admin password
-is defined by
-the variable `external_keystone_admin_password` which you will most likely
-want to save in `/etc/kolla/passwords.yml`. Note that the Keystone URLs can
-be obtained from the external OpenStack CLI, for example:
-
-.. code-block:: console
-
- openstack endpoint list --service identity
- +----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
- | ID | Region | Service Name | Service Type | Enabled | Interface | URL |
- +----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
- | 6d768ee2ce1c4302a49e9b7ac2af472c | RegionOne | keystone | identity | True | public | http://172.28.128.254:5000 |
- | e02067a58b1946c7ae53abf0cfd0bf11 | RegionOne | keystone | identity | True | internal | http://172.28.128.254:5000 |
- +----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
-
-If you are also using Kolla Ansible to manage the external OpenStack
-installation, the external Keystone admin password will most likely
-be defined in the *external* `/etc/kolla/passwords.yml` file. For other
-deployment methods you will need to consult the relevant documentation.
-
-Building images
-~~~~~~~~~~~~~~~
-
-To build any custom images required by Monasca see the instructions in the
-Kolla repo: `kolla/doc/source/admin/template-override/monasca.rst`. The
-remaining images may be pulled from a public registry, but if you need to build
-them manually you can use the following commands:
-
-.. code-block:: console
-
- $ kolla-build -t source monasca
- $ kolla-build kafka zookeeper storm elasticsearch logstash kibana
-
-If you are deploying Monasca standalone you will also need the following
-images:
-
-.. code-block:: console
-
- $ kolla-build cron fluentd mariadb kolla-toolbox keystone memcached keepalived haproxy
-
-Deployment
-~~~~~~~~~~
-
-Run the deploy as usual, following whichever procedure you normally use
-to decrypt secrets if you have encrypted them with Ansible Vault:
-
-.. code-block:: console
-
- $ kolla-genpwd
- $ kolla-ansible deploy
-
-Quick start
-~~~~~~~~~~~
-
-The first thing you will want to do is to create a Monasca user to view
-metrics harvested by the Monasca Agent. By default these are saved into the
-`monasca_control_plane` project, which serves as a place to store all
-control plane logs and metrics:
-
-.. code-block:: console
-
- [vagrant@operator kolla]$ openstack project list
- +----------------------------------+-----------------------+
- | ID | Name |
- +----------------------------------+-----------------------+
- | 03cb4b7daf174febbc4362d5c79c5be8 | service |
- | 2642bcc8604f4491a50cb8d47e0ec55b | monasca_control_plane |
- | 6b75784f6bc942c6969bc618b80f4a8c | admin |
- +----------------------------------+-----------------------+
-
-The permissions of Monasca users are governed by the roles which they have
-assigned to them in a given OpenStack project. This is an important point
-and forms the basis of how Monasca supports multi-tenancy.
-
-By default the `admin` role and the `monasca-read-only-user` role are
-configured. The `admin` role grants read/write privileges and the
-`monasca-read-only-user` role grants read privileges to a user.
-
-.. code-block:: console
-
- [vagrant@operator kolla]$ openstack role list
- +----------------------------------+------------------------+
- | ID | Name |
- +----------------------------------+------------------------+
- | 0419463fd5a14ace8e5e1a1a70bbbd84 | agent |
- | 1095e8be44924ae49585adc5d1136f86 | member |
- | 60f60545e65f41749b3612804a7f6558 | admin |
- | 7c184ade893442f78cea8e074b098cfd | _member_ |
- | 7e56318e207a4e85b7d7feeebf4ba396 | reader |
- | fd200a805299455d90444a00db5074b6 | monasca-read-only-user |
- +----------------------------------+------------------------+
-
-Now lets consider the example of creating a monitoring user who has
-read/write privileges in the `monasca_control_plane` project. First
-we create the user:
-
-.. code-block:: console
-
- openstack user create --project monasca_control_plane mon_user
- User Password:
- Repeat User Password:
- +---------------------+----------------------------------+
- | Field | Value |
- +---------------------+----------------------------------+
- | default_project_id | 2642bcc8604f4491a50cb8d47e0ec55b |
- | domain_id | default |
- | enabled | True |
- | id | 088a725872c9410d9c806c24952f9ae1 |
- | name | mon_user |
- | options | {} |
- | password_expires_at | None |
- +---------------------+----------------------------------+
-
-Secondly we assign the user the `admin` role in the `monasca_control_plane`
-project:
-
-.. code-block:: console
-
- openstack role add admin --project monasca_control_plane --user mon_user
-
-Alternatively we could have assigned the user the read only role:
-
-.. code-block:: console
-
- openstack role add monasca_read_only_user --project monasca_control_plane --user mon_user
-
-The user is now active and the credentials can be used to generate an
-OpenStack token which can be added to the Monasca Grafana datasource in
-Grafana. For example, first set the OpenStack credentials for the project
-you wish to view metrics in. This is normally easiest to do by logging into
-Horizon with the user you have configured for monitoring, switching to
-the OpenStack project you wish to view metrics in, and then downloading
-the credentials file for that project. The credentials file can then
-be sourced from the command line. You can then generate a token for the
-datasource using the following command:
-
-.. code-block:: console
-
- openstack token issue
-
-You should then log into Grafana. By default Grafana is available on port
-`3000` on both internal and external VIPs. See the
-:ref:`Grafana guide` for further details. Once in Grafana
-you can select the Monasca datasource and add your token to it. You are
-then ready to view metrics from Monasca.
-
-For log analysis Kibana is also available, by default on port `5601` on both
-internal and external VIPs. Currently the Keystone authentication plugin is
-not configured and the HAProxy endpoints are protected by a password which is
-defined in `/etc/kolla/passwords.yml` under `kibana_password`.
-
-Migrating state from an existing Monasca deployment
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-These steps should be considered after Monasca has been deployed by Kolla. The
-aim here is to provide some general guidelines on how to migrate service
-databases. Migration of time series or log data is not considered.
-
-Migrating service databases
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The first step is to dump copies of the existing Monasca database. For example:
-
-.. code-block:: console
-
- mysqldump -h 10.0.0.1 -u monasca_db_user -p monasca_db > monasca_db.sql
-
-This can then be used to replace the Kolla managed Monasca database. Note that
-it is simplest to get the database password, IP and port from the Monasca API
-Kolla config file in `/etc/kolla/monasca-api`. Also note that the commands
-below drop and recreate the database before loading in the existing database.
-
-.. code-block:: console
-
- mysql -h 192.168.0.1 -u monasca -p -e "drop database monasca; create database monasca;"
- mysql -h 192.198.0.1 -u monasca -p monasca < monasca_db.sql
-
-Migrating passwords
-^^^^^^^^^^^^^^^^^^^
-
-The next step is to set the Kolla Ansible service passwords so that they
-match the legacy services. The alternative of changing the passwords to match
-the passwords generated by Kolla Ansible is not considered here.
-
-The passwords which you may wish to set to match the original passwords are:
-
-.. code-block:: console
-
- monasca_agent_password:
-
-These can be found in the Kolla Ansible passwords file.
-
-Stamping the database with an Alembic revision ID (migrations from pre-Rocky)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Kolla Ansible supports deploying Monasca from the Rocky release onwards. If
-you are migrating from Queens or below, your database will not have been
-stamped with a revision ID by Alembic, and this will not be automatic.
-Support for Alembic migrations was added to Monasca in the Rocky release.
-You will first need to make sure that the database you have loaded in has
-been manually migrated to the Queens schema. You can then stamp the database
-from any Monasca API container running the Rocky release onwards. An example
-of how this can be done is given below:
-
-.. code-block:: console
-
- sudo docker exec -it monasca_api monasca_db stamp --from-fingerprint
-
-Applying the configuration
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Restart Monasca services on all nodes, for example:
-
-.. code-block:: console
-
- for service in `docker ps | grep monasca_ | awk '{print $11}'`; do docker restart $service; done
-
-Apply the password changes by running the following command:
-
-.. code-block:: console
-
- kolla-ansible reconfigure -t monasca
-
-Cleanup
-~~~~~~~
-
-From time-to-time it may be necessary to manually invoke the Monasca cleanup
-command. Normally this will be triggered automatically during an upgrade for
-services which are removed or disabled by default. However, volume cleanup
-will always need to be addressed manually. It may also be necessary to run the
-cleanup command when disabling certain parts of the Monasca pipeline. A full
-list of scenarios in which you must run the cleanup command is given below.
-Those marked as automatic will be triggered as part of an upgrade.
-
-- Upgrading from Victoria to Wallaby to remove the unused Monasca Log
- Transformer service (automatic).
-- Upgrading from Victoria to Wallaby to remove the Monasca Log Metrics
- service, unless the option to disable it by default has been overridden in
- Wallaby (automatic).
-- Upgrading from Wallaby to Xena to remove the Monasca Log Metrics service
- if the option to disable it by default was overridden in Wallaby (automatic).
-- If you have disabled the alerting pipeline via the
- `monasca_enable_alerting_pipeline` flag after you have deployed the alerting
- services.
-
-The cleanup command can be invoked from the Kolla Ansible CLI, for example:
-
-.. code-block:: console
-
- kolla-ansible monasca_cleanup
-
-Following cleanup, you may also choose to remove unused container volumes.
-It is recommended to run this manually on each Monasca service host. Note
-that `docker prune` will indiscriminately remove all unused volumes,
-which may not always be what you want. If you wish to keep a subset of
-unused volumes, you can remove them individually.
-
-To remove all unused volumes on a host:
-
-.. code-block:: console
-
- docker prune
-
-To remove a single unused volume, run for example:
-
-.. code-block:: console
-
- docker volume rm monasca_log_transformer_data
-
-System requirements and performance impact
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Monasca will deploy the following Docker containers:
-
-* Apache Kafka
-* Apache Storm (optional)
-* Apache Zookeeper
-* Elasticsearch
-* Grafana
-* InfluxDB
-* Kibana
-* Monasca Agent Collector
-* Monasca Agent Forwarder
-* Monasca Agent Statsd
-* Monasca API
-* Monasca Log API
-* Monasca Log Metrics (Logstash, optional, deprecated)
-* Monasca Log Persister (Logstash)
-* Monasca Notification (optional)
-* Monasca Persister
-* Monasca Thresh (Apache Storm topology, optional)
-
-In addition to these, Monasca will also utilise Kolla deployed MariaDB,
-Keystone, Memcached and HAProxy/Keepalived. The Monasca Agent containers
-will, by default, be deployed on all nodes managed by Kolla Ansible. This
-includes all nodes in the control plane as well as compute, storage and
-monitoring nodes.
-
-Whilst these services will run on an all-in-one deployment, in a production
-environment it is recommended to use at least one dedicated monitoring node
-to avoid the risk of starving core OpenStack services of resources. As a
-general rule of thumb, for a standalone monitoring server running Monasca
-in a production environment, you will need at least 32GB RAM and a recent
-multi-core CPU. You will also need enough space to store metrics and logs,
-and to buffer these in Kafka. Whilst Kafka is happy with spinning disks,
-you will likely want to use SSDs to back InfluxDB and Elasticsearch.
-
-If resources are tight, it is possible to disable the alerting and
-notification pipeline which removes the need for Apache Storm, Monasca
-Thresh and Monasca Notification. This can have a significant effect.
-
-.. _Security impact:
-
-Security impact
-~~~~~~~~~~~~~~~
-
-The Monasca API, Log API, Grafana and Kibana ports will be exposed on
-public endpoints via HAProxy/Keepalived. If your public endpoints are
-exposed externally, then you should use a firewall to restrict access.
-You should also consider whether you wish to allow tenants to access
-these services on the internal network.
-
-If you are using the multi-tenant capabilities of Monasca there is a risk
-that tenants could gain access to other tenants logs and metrics. This could
-include logs and metrics for the control plane which could reveal sensitive
-information about the size and nature of the deployment.
-
-Another risk is that users may gain access to system logs via Kibana, which
-is not accessed via the Monasca APIs. Whilst Kolla configures a password out
-of the box to restrict access to Kibana, the password will not apply if a
-user has access to the network on which the individual Kibana service(s) bind
-behind HAProxy. Note that Elasticsearch, which is not protected by a
-password, will also be directly accessible on this network, and therefore
-great care should be taken to ensure that untrusted users do not have access
-to it.
-
-A full evaluation of attack vectors is outside the scope of this document.
-
-Assignee
-~~~~~~~~
-
-Monasca support in Kolla was contributed by StackHPC Ltd. and the Kolla
-community. If you have any issues with the deployment please ask in the
-Kolla IRC channel.
diff --git a/doc/source/reference/logging-and-monitoring/prometheus-guide.rst b/doc/source/reference/logging-and-monitoring/prometheus-guide.rst
index bf9103ca76..30966c1cdc 100644
--- a/doc/source/reference/logging-and-monitoring/prometheus-guide.rst
+++ b/doc/source/reference/logging-and-monitoring/prometheus-guide.rst
@@ -34,6 +34,30 @@ In order to remove leftover volume containing Prometheus 1.x data, execute:
on all hosts wherever Prometheus was previously deployed.
+Basic Auth
+~~~~~~~~~~
+
+Prometheus is protected with basic HTTP authentication. Kolla-ansible will
+create the following users: ``admin``, ``grafana`` (if grafana is
+enabled) and ``skyline`` (if skyline is enabled). The grafana username can
+be overridden using the variable
+``prometheus_grafana_user``, the skyline username can
+be overridden using the variable ``prometheus_skyline_user``.
+The passwords are defined by the
+``prometheus_password``, ``prometheus_grafana_password`` and
+``prometheus_skyline_password`` variables in
+``passwords.yml``. The list of basic auth users can be extended using the
+``prometheus_basic_auth_users_extra`` variable:
+
+.. code-block:: yaml
+
+ prometheus_basic_auth_users_extra:
+ - username: user
+ password: hello
+ enabled: true
+
+or completely overridden with the ``prometheus_basic_auth_users`` variable.
+
Extending the default command line options
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -82,7 +106,7 @@ following:
static_configs:
- targets:
{% for host in groups['prometheus'] %}
- - '{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ 3456 }}'
+ - '{{ hostvars[host][('ansible_' + hostvars[host]['api_interface'] | replace('-','_'))]['ipv4']['address'] }}:{{ 3456 }}'
{% endfor %}
The jobs, ``custom``, and ``custom_template`` would be appended to the default
@@ -100,7 +124,7 @@ Extra files
Sometimes it is necessary to reference additional files from within
``prometheus.yml``, for example, when defining file service discovery
-configuration. To enable you to do this, kolla-ansible will resursively
+configuration. To enable you to do this, kolla-ansible will recursively
discover any files in ``{{ node_custom_config }}/prometheus/extras`` and
template them. The templated output is then copied to
``/etc/prometheus/extras`` within the container on startup. For example to
@@ -156,3 +180,66 @@ files:
- 192.168.1.1
labels:
job: ipmi_exporter
+
+Metric Instance labels
+~~~~~~~~~~~~~~~~~~~~~~
+
+Previously, Prometheus metrics used to label instances based on their IP
+addresses. This behaviour can now be changed such that instances can be
+labelled based on their inventory hostname instead. The IP address remains as
+the target address, therefore, even if the hostname is unresolvable, it doesn't
+pose an issue.
+
+The default behavior still labels instances with their IP addresses. However,
+this can be adjusted by changing the ``prometheus_instance_label`` variable.
+This variable accepts the following values:
+
+* ``None``: Instance labels will be IP addresses (default)
+* ``{{ ansible_facts.hostname }}``: Instance labels will be hostnames
+* ``{{ ansible_facts.nodename }}``: Instance labels will FQDNs
+
+To implement this feature, modify the configuration file
+``/etc/kolla/globals.yml`` and update the ``prometheus_instance_label``
+variable accordingly. Remember, changing this variable will cause Prometheus to
+scrape metrics with new names for a short period. This will result in duplicate
+metrics until all metrics are replaced with their new labels.
+
+.. code-block:: yaml
+
+ prometheus_instance_label: "{{ ansible_facts.hostname }}"
+
+This metric labeling feature may become the default setting in future releases.
+Therefore, if you wish to retain the current default (IP address labels), make
+sure to set the ``prometheus_instance_label`` variable to ``None``.
+
+.. note::
+
+ This feature may generate duplicate metrics temporarily while Prometheus
+ updates the metric labels. Please be aware of this while analyzing metrics
+ during the transition period.
+
+Exporter configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+Node Exporter
+-------------
+
+Sometimes it can be useful to monitor hosts outside of the Kolla deployment.
+One method of doing this is to configure a list of additional targets using the
+``prometheus_node_exporter_targets_extra`` variable. The format of which
+should be a list of dictionaries with the following keys:
+
+* target: URL of node exporter to scrape
+* labels: (Optional) A list of labels to set on the metrics scaped from this
+ exporter.
+
+For example:
+
+.. code-block:: yaml
+ :caption: ``/etc/kolla/globals.yml``
+
+ prometheus_node_exporter_targets_extra:
+ - target: http://10.0.0.1:1234
+ labels:
+ instance: host1
+
diff --git a/doc/source/reference/logging-and-monitoring/skydive-guide.rst b/doc/source/reference/logging-and-monitoring/skydive-guide.rst
deleted file mode 100644
index 686031107a..0000000000
--- a/doc/source/reference/logging-and-monitoring/skydive-guide.rst
+++ /dev/null
@@ -1,37 +0,0 @@
-.. _skydive-guide:
-
-====================================
-Skydive - Real time network analyzer
-====================================
-
-Overview
-~~~~~~~~
-Skydive is an open source real-time network topology and protocols analyzer.
-It aims to provide a comprehensive way of understanding what is happening in
-the network infrastructure.
-Skydive agents collect topology information and flows and forward them to a
-central agent for further analysis.
-All the information is stored in an Elasticsearch database.
-
-Configuration on Kolla deployment
----------------------------------
-
-Enable Skydive in ``/etc/kolla/globals.yml`` file:
-
-.. code-block:: yaml
-
- enable_skydive: "yes"
- enable_elasticsearch: "yes"
-
-Verify operation
-----------------
-
-After successful deployment, Skydive can be accessed using a browser on
-``:8085``.
-
-The default username is ``admin``, the password can be located under
-```` in ``/etc/kolla/passwords.yml``.
-
-For more information about how Skydive works, see
-`Skydive – An open source real-time network topology and protocols analyzer
-`__.
diff --git a/doc/source/reference/message-queues/rabbitmq.rst b/doc/source/reference/message-queues/rabbitmq.rst
index b1114c4e08..09395b2552 100644
--- a/doc/source/reference/message-queues/rabbitmq.rst
+++ b/doc/source/reference/message-queues/rabbitmq.rst
@@ -109,3 +109,105 @@ https://erlang.org/doc/man/erl.html#emulator-flags
The ``+sbwt none +sbwtdcpu none +sbwtdio none`` arguments prevent busy waiting
of the scheduler, for more details see:
https://www.rabbitmq.com/runtime.html#busy-waiting.
+
+High Availability
+~~~~~~~~~~~~~~~~~
+
+RabbitMQ offers two options to configure HA:
+ * Quorum queues (enabled by default and controlled by
+ ``om_enable_rabbitmq_quorum_queues`` variable)
+ * Classic queue mirroring and durable queues (deprecated in RabbitMQ and to
+ be dropped in 4.0, controlled by ``om_enable_rabbitmq_high_availability``)
+
+There are some queue types which are intentionally not mirrored
+using the exclusionary pattern ``^(?!(amq\\.)|(.*_fanout_)|(reply_)).*``.
+
+After enabling one of these values on a running system, there are some
+additional steps needed to migrate from transient to durable queues.
+
+.. warning::
+
+ Since the default changed from non-HA to Quorum queues in Bobcat release,
+ following procedure is required to be carried out before a SLURP upgrade to
+ Caracal.
+
+1. Stop all OpenStack services which use RabbitMQ, so that they will not
+ attempt to recreate any queues yet.
+
+ .. code-block:: console
+
+ kolla-ansible stop --tags
+
+2. Generate the new config for all services.
+
+ .. code-block:: console
+
+ kolla-ansible genconfig
+
+3. Reconfigure RabbitMQ if you are using
+ ``om_enable_rabbitmq_high_availability``.
+
+ .. code-block:: console
+
+ kolla-ansible reconfigure --tags rabbitmq
+
+4. Reset the state on each RabbitMQ, to remove the old transient queues and
+ exchanges.
+
+ .. code-block:: console
+
+ kolla-ansible rabbitmq-reset-state
+
+5. Start the OpenStack services again, at which point they will recreate the
+ appropriate queues as durable.
+
+ .. code-block:: console
+
+ kolla-ansible deploy --tags
+
+SLURP
+~~~~~
+
+RabbitMQ has two major version releases per year but does not support jumping
+two versions in one upgrade. So if you want to perform a skip-level upgrade,
+you must first upgrade RabbitMQ to an intermediary version. To do this, Kolla
+provides multiple RabbitMQ versions in the odd OpenStack releases. To use the
+upgrade from Antelope to Caracal as an example, we start on RabbitMQ version
+3.11. In Antelope, you should upgrade to RabbitMQ version 3.12 with the command
+below. You can then proceed with the usual SLURP upgrade to Caracal (and
+therefore RabbitMQ version 3.13).
+
+.. warning::
+
+ This command should be run from the Antelope release.
+
+ Note that this command is NOT idempotent. See "RabbitMQ versions" below for
+ an alternative approach.
+
+.. code-block:: console
+
+ kolla-ansible rabbitmq-upgrade 3.12
+
+RabbitMQ versions
+~~~~~~~~~~~~~~~~~
+
+Alternatively, you can set ``rabbitmq_image`` in your configuration
+``globals.yml`` for idempotence in deployments. As an example, Kolla ships
+versions 3.11, 3.12 and 3.13 of RabbitMQ in Antelope. By default, Antelope
+Kolla-Ansible will deploy version 3.11. If you wish to deploy a later version,
+you must override the image. if you want to use version 3.12 change
+``rabbitmq_image`` in ``globals.yml`` as follows:
+
+.. code-block:: yaml
+
+ rabbitmq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/rabbitmq-3-12"
+
+You can then upgrade RabbitMQ with the usual command:
+
+.. code-block:: console
+
+ kolla-ansible upgrade --tags rabbitmq
+
+Note again that RabbitMQ does not support upgrades between more than one major
+version, so if you wish to upgrade to version 3.13 you must first upgrade to
+3.12.
diff --git a/doc/source/reference/networking/designate-guide.rst b/doc/source/reference/networking/designate-guide.rst
index 8a21a04de6..058af33b5f 100644
--- a/doc/source/reference/networking/designate-guide.rst
+++ b/doc/source/reference/networking/designate-guide.rst
@@ -24,13 +24,23 @@ Enable Designate service in ``/etc/kolla/globals.yml``
.. code-block:: yaml
enable_designate: "yes"
+ neutron_dns_domain: "example.org."
+
+.. important::
+ The ``neutron_dns_domain`` value has to be different to ``openstacklocal``
+ (its default value) and has to end with a period ``.``.
+
+.. important::
+ ``DNS Integration`` is enabled by default and can be disabled by
+ adding ``neutron_dns_integration: no`` to ``/etc/kolla/globals.yml``
+ and reconfiguring with ``--tags`` neutron.
Configure Designate options in ``/etc/kolla/globals.yml``
.. important::
Designate MDNS node requires the ``dns_interface`` to be reachable from
- public network.
+ management network.
.. code-block:: yaml
@@ -64,7 +74,7 @@ Infoblox Backend
.. important::
When using Infoblox as the Designate backend the MDNS node
- requires the container to listen on port 53. As this is a privilaged
+ requires the container to listen on port 53. As this is a privileged
port you will need to build your designate-mdns container to run
as the user root rather than designate.
@@ -91,6 +101,15 @@ For more information about how the Infoblox backend works, see
Neutron and Nova Integration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``designate-sink`` is an optional service which listens for event
+notifications, such as compute.instance.create.end, handlers are
+available for Nova and Neutron. Notification events can then be used
+to trigger record creation & deletion.
+
+.. note::
+ Service ``designate-sink`` in kolla deployments is disabled by default
+ and can be enabled by ``designate_enable_notifications_sink: yes``.
+
Create default Designate Zone for Neutron:
.. code-block:: console
diff --git a/doc/source/reference/networking/neutron-extensions.rst b/doc/source/reference/networking/neutron-extensions.rst
index 8a8dbab05a..2e8be4b138 100644
--- a/doc/source/reference/networking/neutron-extensions.rst
+++ b/doc/source/reference/networking/neutron-extensions.rst
@@ -23,6 +23,25 @@ For setting up a testbed environment and creating a port chain, please refer
to :networking-sfc-doc:`networking-sfc documentation
`.
+Neutron FWaaS (Firewall-as-a-Service)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Preparation and deployment
+--------------------------
+
+.. warning::
+
+ FWaaS has currently no support for OVN.
+
+Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
+
+.. code-block:: yaml
+
+ enable_neutron_fwaas: "yes"
+
+For more information on FWaaS in Neutron refer to the
+:neutron-doc:`Neutron FWaaS docs `.
+
Neutron VPNaaS (VPN-as-a-Service)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -90,7 +109,7 @@ For more information on this and VPNaaS in Neutron refer to the
and the `OpenStack wiki `_.
Trunking
---------
+~~~~~~~~
The network trunk service allows multiple networks to be connected to an
instance using a single virtual NIC (vNIC). Multiple networks can be presented
diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst
index eedbf786b6..8a627572d7 100644
--- a/doc/source/reference/networking/neutron.rst
+++ b/doc/source/reference/networking/neutron.rst
@@ -20,13 +20,20 @@ Neutron external interface is used for communication with the external world,
for example provider networks, routers and floating IPs.
For setting up the neutron external interface modify
``/etc/kolla/globals.yml`` setting ``neutron_external_interface`` to the
-desired interface name. This interface is used by hosts in the ``network``
-group. It is also used by hosts in the ``compute`` group if
+desired interface name or comma-separated list of interface names. Its default
+value is ``eth1``. These external interfaces are used by hosts in the
+``network`` group. They are also used by hosts in the ``compute`` group if
``enable_neutron_provider_networks`` is set or DVR is enabled.
-The interface is plugged into a bridge (Open vSwitch or Linux Bridge, depending
-on the driver) defined by ``neutron_bridge_name``, which defaults to ``br-ex``.
-The default Neutron physical network is ``physnet1``.
+The external interfaces are each plugged into a bridge (Open vSwitch or Linux
+Bridge, depending on the driver) defined by ``neutron_bridge_name``, which
+defaults to ``br-ex``. When there are multiple external interfaces,
+``neutron_bridge_name`` should be a comma-separated list of the same length.
+
+The default Neutron physical network is ``physnet1``, or ``physnet1`` to
+``physnetN`` when there are multiple external network interfaces. This may be
+changed by setting ``neutron_physical_networks`` to a comma-separated list of
+networks of the same length.
Example: single interface
-------------------------
@@ -54,6 +61,30 @@ These two lists are "zipped" together, such that ``eth1`` is plugged into the
Ansible maps these interfaces to Neutron physical networks ``physnet1`` and
``physnet2`` respectively.
+Example: custom physical networks
+---------------------------------
+
+Sometimes we may want to customise the physical network names used. This may be
+to allow for not all hosts having access to all physical networks, or to use
+more descriptive names.
+
+For example, in an environment with a separate physical network for Ironic
+provisioning, controllers might have access to two physical networks:
+
+.. code-block:: yaml
+
+ neutron_external_interface: "eth1,eth2"
+ neutron_bridge_name: "br-ex1,br-ex2"
+ neutron_physical_network: "physnet1,physnet2"
+
+While compute nodes have access only to ``physnet2``.
+
+.. code-block:: yaml
+
+ neutron_external_interface: "eth1"
+ neutron_bridge_name: "br-ex1"
+ neutron_physical_network: "physnet2"
+
Example: shared interface
-------------------------
@@ -87,6 +118,47 @@ created and configured by Ansible (this is also necessary when
``neutron_external_interface`` is configured correctly for hosts in the
``compute`` group.
+Internal DNS resolution
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The Networking service enables users to control the name assigned
+to ports using two attributes associated with ports, networks, and
+floating IPs. The following table shows the attributes available for each
+one of these resources:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 30 30 30
+
+ * - Resource
+ - dns_name
+ - dns_domain
+ * - Ports
+ - Yes
+ - Yes
+ * - Networks
+ - No
+ - Yes
+ * - Floating IPs
+ - Yes
+ - Yes
+
+To enable this functionality, you need to set the following in
+``/etc/kolla/globals.yml``:
+
+.. code-block:: yaml
+
+ neutron_dns_integration: "yes"
+ neutron_dns_domain: "example.org."
+
+.. important::
+ The ``neutron_dns_domain`` value has to be different to ``openstacklocal``
+ (its default value) and has to end with a period ``.``.
+
+.. note::
+ The integration of the Networking service with an external DNSaaS (DNS-as-a-Service)
+ is described in :ref:`designate-guide`.
+
OpenvSwitch (ml2/ovs)
~~~~~~~~~~~~~~~~~~~~~
@@ -109,6 +181,36 @@ to using the native OVS firewall driver by employing a configuration override
[securitygroup]
firewall_driver = openvswitch
+L3 agent high availability
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+L3 and DHCP agents can be created in a high availability (HA) state with:
+
+.. code-block:: yaml
+
+ enable_neutron_agent_ha: "yes"
+
+This allows networking to fail over across controllers if the active agent is
+stopped. If this option is enabled, it can be advantageous to also set:
+
+.. code-block:: yaml
+
+ neutron_l3_agent_failover_delay:
+
+Agents sometimes need to be restarted. This delay (in seconds) is invoked
+between the restart operations of each agent. When set properly, it will stop
+network outages caused by all agents restarting at the same time. The exact
+length of time it takes to restart is dependent on hardware and the number of
+routers present. A general rule of thumb is to set the value to ``40 + 3n``
+where ``n`` is the number of routers. For example, with 5 routers,
+``40 + (3 * 5) = 55`` so the value could be set to 55. A much better approach
+however would be to first time how long an outage lasts, then set the value
+accordingly.
+
+The default value is 0. A nonzero starting value would only result in
+outages if the failover time was greater than the delay, which would be more
+difficult to diagnose than consistent behaviour.
+
OVN (ml2/ovn)
~~~~~~~~~~~~~
@@ -141,11 +243,20 @@ This might be desired for example when Ironic bare metal nodes are
used as a compute service. Currently OVN is not able to answer DHCP
queries on port type external, this is where Neutron agent helps.
+In order to deploy Neutron OVN Agent you need to set the following:
+
+.. path /etc/kolla/globals.yml
+.. code-block:: yaml
+
+ neutron_enable_ovn_agent: "yes"
+
+Currently the agent is only needed for QoS for hardware offloaded ports.
+
Mellanox Infiniband (ml2/mlnx)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In order to add ``mlnx_infiniband`` to the list of mechanism driver
-for ``neutron`` to support Infiniband virtual funtions, you need to
+for ``neutron`` to support Infiniband virtual functions, you need to
set the following (assuming neutron SR-IOV agent is also enabled using
``enable_neutron_sriov`` flag):
@@ -175,3 +286,25 @@ authentication in external systems (e.g. in ``networking-generic-switch`` or
You can set ``neutron_ssh_key`` variable in ``passwords.yml`` to control the
used key.
+
+Custom Kernel Module Configuration for Neutron
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Neutron may require specific kernel modules for certain functionalities.
+While there are predefined default modules in the Ansible role, users have
+the flexibility to add custom modules as needed.
+
+To add custom kernel modules for Neutron, modify the configuration in
+``/etc/kolla/globals.yml``:
+
+.. code-block:: yaml
+
+ neutron_modules_extra:
+ - name: 'nf_conntrack_tftp'
+ params: 'hashsize=4096'
+
+In this example:
+
+- `neutron_modules_extra`: Allows users to specify additional modules and
+ their associated parameters. The given configuration adjusts the
+ `hashsize` parameter for the `nf_conntrack_tftp` module.
diff --git a/doc/source/reference/networking/octavia.rst b/doc/source/reference/networking/octavia.rst
index cbb1772311..53f266065f 100644
--- a/doc/source/reference/networking/octavia.rst
+++ b/doc/source/reference/networking/octavia.rst
@@ -2,10 +2,10 @@
Octavia
=======
-Octavia provides load balancing as a service. This guide covers configuration
-of Octavia for the Amphora driver. See the :octavia-doc:`Octavia documentation
-<>` for full details. The :octavia-doc:`installation guide
-` is a useful reference.
+Octavia provides load balancing as a service. This guide covers two providers:
+
+* Amphora
+* OVN
Enabling Octavia
================
@@ -16,14 +16,22 @@ Enable the octavia service in ``globals.yml``:
enable_octavia: "yes"
+Amphora provider
+================
+
+This section covers configuration of Octavia for the Amphora driver. See the
+:octavia-doc:`Octavia documentation <>` for full details. The
+:octavia-doc:`installation guide ` is a useful
+reference.
+
Certificates
-============
+------------
Octavia requires various TLS certificates for operation. Since the Victoria
release, Kolla Ansible supports generating these certificates automatically.
Option 1: Automatically generating Certificates
------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Kolla Ansible provides default values for the certificate issuer and owner
fields. You can customize this via ``globals.yml``, for example:
@@ -45,7 +53,7 @@ The certificates and keys will be generated under
``/etc/kolla/config/octavia``.
Option 2: Manually generating certificates
-------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Follow the :octavia-doc:`octavia documentation
` to generate certificates for Amphorae. These
@@ -67,8 +75,18 @@ used to encrypt the CA key:
.. _octavia-network:
+Monitoring certificate expiry
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can use the following command to check if any of the certificates will
+expire within a given number of days:
+
+.. code-block:: console
+
+ kolla-ansible octavia-certificates --check-expiry
+
Networking
-==========
+----------
Octavia worker and health manager nodes must have access to the Octavia
management network for communication with Amphorae.
@@ -91,7 +109,7 @@ the traffic is also bridged to Open vSwitch on the controllers.
This interface should have an IP address on the Octavia management subnet.
Registering OpenStack resources
-===============================
+-------------------------------
Since the Victoria release, there are two ways to configure Octavia.
@@ -103,7 +121,7 @@ The first option is simpler, and is recommended for new users. The second
option provides more flexibility, at the cost of complexity for the operator.
Option 1: Automatic resource registration (default, recommended)
-================================================================
+----------------------------------------------------------------
For automatic resource registration, Kolla Ansible will register the following
resources:
@@ -115,8 +133,12 @@ resources:
The configuration for these resources may be customised before deployment.
+Note that for this to work access to the Nova and Neutron APIs is required.
+This is true also for the ``kolla-ansible genconfig`` command and when using
+Ansible check mode.
+
Customize Amphora flavor
-------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~
The default amphora flavor is named ``amphora`` with 1 VCPUs, 1GB RAM and 5GB
disk. you can customize this flavor by changing ``octavia_amp_flavor`` in
@@ -147,7 +169,7 @@ The following defaults are used:
disk: 5
Customise network and subnet
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configure Octavia management network and subnet with ``octavia_amp_network`` in
``globals.yml``. This must be a network that is :ref:`accessible from the
@@ -209,7 +231,7 @@ Once the installation is completed, you need to :ref:`register an amphora image
in glance `.
Option 2: Manual resource registration
-======================================
+--------------------------------------
In this case, Kolla Ansible will not register resources for Octavia. Set
``octavia_auto_configure`` to no in ``globals.yml``:
@@ -241,7 +263,7 @@ as follows:
existing Amphorae.
Amphora flavor
---------------
+~~~~~~~~~~~~~~
Register the flavor in Nova:
@@ -252,7 +274,7 @@ Register the flavor in Nova:
Make a note of the ID of the flavor, or specify one via ``--id``.
Keypair
--------
+~~~~~~~
Register the keypair in Nova:
@@ -261,7 +283,7 @@ Register the keypair in Nova:
openstack keypair create --public-key octavia_ssh_key
Network and subnet
-------------------
+~~~~~~~~~~~~~~~~~~
Register the management network and subnet in Neutron. This must be a network
that is :ref:`accessible from the controllers `. Typically
@@ -281,7 +303,7 @@ a VLAN provider network is used.
Make a note of the ID of the network.
Security group
---------------
+~~~~~~~~~~~~~~
Register the security group in Neutron.
@@ -295,7 +317,7 @@ Register the security group in Neutron.
Make a note of the ID of the security group.
Kolla Ansible configuration
----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following options should be added to ``globals.yml``.
@@ -316,7 +338,7 @@ Now deploy Octavia:
.. _octavia-amphora-image:
Amphora image
-=============
+-------------
It is necessary to build an Amphora image. On CentOS / Rocky 9:
@@ -374,10 +396,10 @@ Register the image in Glance:
the tag is "amphora", octavia uses the tag to determine which image to use.
Debug
-=====
+-----
SSH to an amphora
------------------
+~~~~~~~~~~~~~~~~~
login into one of octavia-worker nodes, and ssh into amphora.
@@ -391,14 +413,14 @@ login into one of octavia-worker nodes, and ssh into amphora.
octavia-worker nodes.
Upgrade
-=======
+-------
If you upgrade from the Ussuri release, you must disable
``octavia_auto_configure`` in ``globals.yml`` and keep your other octavia
config as before.
Development or Testing
-======================
+----------------------
Kolla Ansible provides a simple way to setup Octavia networking for
development or testing, when using the Neutron Open vSwitch ML2 mechanism
@@ -414,3 +436,35 @@ Add ``octavia_network_type`` to ``globals.yml`` and set the value to ``tenant``
octavia_network_type: "tenant"
Next,follow the deployment instructions as normal.
+
+Failure handling
+----------------
+
+On large deployments, where neutron-openvswitch-agent sync could takes
+more then 5 minutes, you can get an error on octavia-interface.service
+systemd unit, because it can't wait either o-hm0 interface is already
+attached to br-int, or octavia management VxLAN is already configured
+on that host. In this case you have to add ``octavia_interface_wait_timeout``
+to ``globals.yml`` and set the value to new timeout in seconds
+
+.. code-block:: yaml
+
+ octavia_interface_wait_timeout: 1800
+
+On deployments with up to 2500 network ports per network node sync process
+could take up to 30mins. But you have to consider this value according
+to your deployment size.
+
+OVN provider
+============
+
+This section covers configuration of Octavia for the OVN driver. See the
+:octavia-doc:`Octavia documentation <>` and :ovn-octavia-provider-doc:`OVN
+Octavia provider documentation <>` for full details.
+
+To enable the OVN provider, set the following options in ``globals.yml``:
+
+.. code-block:: yaml
+
+ octavia_provider_drivers: "ovn:OVN provider"
+ octavia_provider_agents: "ovn"
diff --git a/doc/source/reference/networking/sriov.rst b/doc/source/reference/networking/sriov.rst
index 404dddd8ba..73afa7b0f0 100644
--- a/doc/source/reference/networking/sriov.rst
+++ b/doc/source/reference/networking/sriov.rst
@@ -203,8 +203,8 @@ Compute service on the compute node also require the ``alias`` option under the
available_filters = nova.scheduler.filters.all_filters
[pci]
- passthrough_whitelist = [{"vendor_id": "8086", "product_id": "10fb"}]
- alias = [{"vendor_id":"8086", "product_id":"10ed", "device_type":"type-VF", "name":"vf1"}]
+ device_spec = [{"vendor_id": "8086", "product_id": "10fb"}]
+ alias = {"vendor_id":"8086", "product_id":"10ed", "device_type":"type-VF", "name":"vf1"}
Run deployment.
diff --git a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
index 177d0a9731..78fdf4821b 100644
--- a/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
+++ b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
@@ -17,7 +17,6 @@ to be enabled to operate correctly.
* Core compute stack (nova, neutron, glance, etc)
* Heat
-* Mistral + Redis
* Barbican (Required only for multinode)
Optionally tacker supports the following services and features.
@@ -43,8 +42,6 @@ In order to enable them, you need to edit the file
enable_tacker: "yes"
enable_barbican: "yes"
- enable_mistral: "yes"
- enable_redis: "yes"
.. warning::
@@ -87,11 +84,6 @@ create a very basic VNF from a cirros image in ``demo-net`` network.
Install python-tackerclient.
-.. note::
-
- Barbican, heat and mistral python clients are in tacker's
- requirements and will be installed as dependency.
-
.. code-block:: console
$ pip install python-tackerclient
diff --git a/doc/source/reference/rating/cloudkitty-guide.rst b/doc/source/reference/rating/cloudkitty-guide.rst
index a95ea6c1ba..061e096754 100644
--- a/doc/source/reference/rating/cloudkitty-guide.rst
+++ b/doc/source/reference/rating/cloudkitty-guide.rst
@@ -29,12 +29,8 @@ CloudKitty Collector backend
CloudKitty natively supports multiple collector backends.
-By default Kolla Ansible uses the Gnocchi backend,
-however we also support using the following backend types:
-
-- ``prometheus`` - Use Prometheus metrics as dataset for cloudkitty to process.
-- ``monasca`` - Use Openstack Monasca metrics as dataset for cloudkitty to
- process.
+By default Kolla Ansible uses the Gnocchi backend. Using data
+collected by Prometheus is also supported.
The configuration parameter related to this option is
``cloudkitty_collector_backend``.
@@ -45,12 +41,6 @@ To use the Prometheus collector backend:
cloudkitty_collector_backend: prometheus
-Alternatively, to use the Monasca collector backend:
-
-.. code-block:: yaml
-
- cloudkitty_collector_backend: monasca
-
CloudKitty Fetcher Backend
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -72,14 +62,22 @@ Cloudkitty Storage Backend
As for collectors, CloudKitty supports multiple backend to store ratings.
By default, Kolla Ansible uses the InfluxDB based backend.
-Another famous alternative is Elasticsearch and can be activated in Kolla
-Ansible using the ``cloudkitty_storage_backend`` configuration option in
+Another famous alternative is OpenSearch and can be activated in Kolla
+Ansible using the ``cloudkitty_storage_backend`` configuration option in
your ``globals.yml`` configuration file:
+.. code-block:: yaml
+
+ cloudkitty_storage_backend: opensearch
+
+Using an external Elasticsearch backend is still possible with the following
+configuration:
+
.. code-block:: yaml
cloudkitty_storage_backend: elasticsearch
+ cloudkitty_elasticsearch_url: http://HOST:PORT
-You can only use one backend type at a time, selecting elasticsearch
-will automatically enable Elasticsearch deployment and creation of the
+You can only use one backend type at a time, selecting ``opensearch``
+will automatically enable OpenSearch deployment and creation of the
required CloudKitty index.
diff --git a/doc/source/reference/shared-services/glance-guide.rst b/doc/source/reference/shared-services/glance-guide.rst
index 89b82bb6f0..48e8774aaa 100644
--- a/doc/source/reference/shared-services/glance-guide.rst
+++ b/doc/source/reference/shared-services/glance-guide.rst
@@ -63,6 +63,34 @@ To enable the vmware backend manually:
glance_backend_vmware: "yes"
+Glance with S3 Backend
+~~~~~~~~~~~~~~~~~~~~~~
+
+Configuring Glance for S3 includes the following steps:
+
+#. Enable Glance S3 backend in ``globals.yml``:
+
+.. code-block:: yaml
+
+ glance_backend_s3: "yes"
+
+#. Configure S3 connection details in ``/etc/kolla/globals.yml``:
+
+ * ``glance_backend_s3_url`` (example: ``http://127.0.0.1:9000``)
+ * ``glance_backend_s3_access_key`` (example: ``minio``)
+ * ``glance_backend_s3_bucket`` (example: ``glance``)
+ * ``glance_backend_s3_secret_key`` (example: ``admin``)
+
+#. If you wish to use a single S3 backend for all supported services,
+use the following variables:
+
+ * ``s3_url``
+ * ``s3_access_key``
+ * ``s3_glance_bucket``
+ * ``s3_secret_key``
+
+ All Glance S3 configurations use these options as default values.
+
Swift backend
~~~~~~~~~~~~~
diff --git a/doc/source/reference/shared-services/horizon-guide.rst b/doc/source/reference/shared-services/horizon-guide.rst
index 2660c21fb1..09a1362225 100644
--- a/doc/source/reference/shared-services/horizon-guide.rst
+++ b/doc/source/reference/shared-services/horizon-guide.rst
@@ -18,7 +18,7 @@ Horizon by using a custom python settings file that will override
the default options set on the local_settings file.
As an example, for setting a different (material) theme as the default one,
-a file named custom_local_settings should be created under the directory
+a file named _9999-custom-settings.py should be created under the directory
``{{ node_custom_config }}/horizon/`` with the following contents:
.. code-block:: python
@@ -45,7 +45,7 @@ This entry updates AVAILABLE_THEMES adding the new theme at the list end.
Theme files have to be copied into:
``{{ node_custom_config }}/horizon/themes/my_custom_theme``.
-The new theme can be set as default in custom_local_settings:
+The new theme can be set as default in _9999-custom-settings.py:
.. code-block:: python
diff --git a/doc/source/reference/shared-services/index.rst b/doc/source/reference/shared-services/index.rst
index 89f0830c58..17884cb763 100644
--- a/doc/source/reference/shared-services/index.rst
+++ b/doc/source/reference/shared-services/index.rst
@@ -11,3 +11,4 @@ like backends, dashboards and so on.
glance-guide
horizon-guide
keystone-guide
+ skyline-guide
diff --git a/doc/source/reference/shared-services/keystone-guide.rst b/doc/source/reference/shared-services/keystone-guide.rst
index dc3d766c54..d0958a3f92 100644
--- a/doc/source/reference/shared-services/keystone-guide.rst
+++ b/doc/source/reference/shared-services/keystone-guide.rst
@@ -32,8 +32,8 @@ a buffer key - three in total. If the rotation interval is set lower than the
sum of the token expiry and token allow expired window, more active keys will
be configured in Keystone as necessary.
-Further infomation on Fernet tokens is available in the :keystone-doc:`Keystone
-documentation `.
+Further information on Fernet tokens is available in the
+:keystone-doc:`Keystone documentation `.
Federated identity
------------------
@@ -96,6 +96,14 @@ used by OpenStack command line client. Example config shown below:
keystone_federation_oidc_jwks_uri: "https:////discovery/v2.0/keys"
+Some identity providers need additional mod_auth_openidc config.
+Example for Keycloak shown below:
+
+.. code-block:: yaml
+
+ keystone_federation_oidc_additional_options:
+ OIDCTokenBindingPolicy: disabled
+
Identity providers configurations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/reference/shared-services/skyline-guide.rst b/doc/source/reference/shared-services/skyline-guide.rst
new file mode 100644
index 0000000000..fb735c997c
--- /dev/null
+++ b/doc/source/reference/shared-services/skyline-guide.rst
@@ -0,0 +1,98 @@
+.. _skyline-guide:
+
+===========================
+Skyline OpenStack dashboard
+===========================
+
+Skyline is a dashboard for Openstack with a modern technology stack.
+
+Single Sign On (SSO)
+~~~~~~~~~~~~~~~~~~~~
+
+Skyline supports SSO with an Openid IdP. When you configure an IdP with
+protocol openid, Kolla will automatically enable SSO and set up the trusted
+dashboard url for Keystone. If you don't want to use SSO in Skyline, you can
+disable it by setting ``skyline_enable_sso`` to "no":
+
+.. code-block:: yaml
+
+ skyline_enable_sso: "no"
+
+If you want to enable it without setting up the IdP with Kolla you can simply
+enable it with:
+
+.. code-block:: yaml
+
+ skyline_enable_sso: "yes"
+
+Customize logos
+~~~~~~~~~~~~~~~
+
+To change some of the logos used by Skyline you can overwrite the default
+logos. Not all images can be replaced, you can change the browser icon, the
+two logos on the login screen and the logo in the header once you are logged
+in.
+
+To overwrite the files create the directory
+``{{ node_custom_config }}/skyline/logos`` and place the files you want to use
+there.
+
+Make sure you have the correct filenames and directory structure as described
+below.
+
+Additionally add the files or directories you created to
+``skyline_custom_logos``, a list of files or directories that will be copied
+inside the container.
+
+.. list-table:: Logos/images that can be overwritten
+ :widths: 30 70
+ :header-rows: 1
+
+ * - Logo/image
+ - Path in ``{{ node_custom_config }}/skyline/logos``
+ * - Browser Icon
+ - ./favicon.ico
+ * - Login page left logo
+ - ./asset/image/logo.png
+ * - Login page right logo
+ - ./asset/image/loginRightLogo.png
+ * - Logo header logged in
+ - ./asset/image/cloud-logo.svg
+
+
+To replace only the browser icon set
+
+.. code-block:: yaml
+
+ skyline_custom_logos: ["favicon.ico"]
+
+To replace files in ``asset`` set
+
+.. code-block:: yaml
+
+ skyline_custom_logos: ["asset"]
+
+To replace all use
+
+.. code-block:: yaml
+
+ skyline_custom_logos: ["asset", "favicon.ico"]
+
+Since the files are overwritten inside the container, you have to remove the
+container and recreate it if you want to revert to the default logos. Just
+removing the configuration will not remove the files.
+
+External Swift
+~~~~~~~~~~~~~~
+
+If you are running an external Swift compatible object store you can add
+it to the skyline dashboard. Since Skyline can not use Keystone's
+endpoint api, you have to tell it the url of your external service.
+
+You have to set ``skyline_external_swift`` and
+``skyline_external_swift_url`` in your configuration:
+
+.. code-block:: yaml
+
+ skyline_external_swift: "yes"
+ skyline_external_swift_url: "https:///swift"
diff --git a/doc/source/reference/storage/cinder-guide-hnas.rst b/doc/source/reference/storage/cinder-guide-hnas.rst
deleted file mode 100644
index 3c92a98313..0000000000
--- a/doc/source/reference/storage/cinder-guide-hnas.rst
+++ /dev/null
@@ -1,210 +0,0 @@
-.. _cinder-guide-hnas:
-
-========================================================
-Hitachi NAS Platform iSCSI and NFS drives for OpenStack
-========================================================
-
-Overview
-~~~~~~~~
-
-The Block Storage service provides persistent block storage resources that
-Compute instances can consume. This includes secondary attached storage similar
-to the Amazon Elastic Block Storage (EBS) offering. In addition, you can write
-images to a Block Storage device for Compute to use as a bootable persistent
-instance.
-
-Requirements
-------------
-
-- Hitachi NAS Platform Models 3080, 3090, 4040, 4060, 4080, and 4100.
-
-- HNAS/SMU software version is 12.2 or higher.
-
-- HNAS configuration and management utilities to create a storage pool (span)
- and an EVS.
-
- - GUI (SMU).
-
- - SSC CLI.
-
-- You must set an iSCSI domain to EVS
-
-Supported shared file systems and operations
---------------------------------------------
-
-The NFS and iSCSI drivers support these operations:
-
-- Create, delete, attach, and detach volumes.
-
-- Create, list, and delete volume snapshots.
-
-- Create a volume from a snapshot.
-
-- Copy an image to a volume.
-
-- Copy a volume to an image.
-
-- Clone a volume.
-
-- Extend a volume.
-
-- Get volume statistics.
-
-- Manage and unmanage a volume.
-
-- Manage and unmanage snapshots (HNAS NFS only).
-
-Configuration example for Hitachi NAS Platform NFS
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-NFS backend
------------
-
-Enable cinder hnas backend nfs in ``/etc/kolla/globals.yml``
-
-.. code-block:: yaml
-
- enable_cinder_backend_hnas_nfs: "yes"
-
-Create or modify the file ``/etc/kolla/config/cinder.conf`` and
-add the contents:
-
-.. path /etc/kolla/config/cinder.conf
-.. code-block:: ini
-
- [DEFAULT]
- enabled_backends = hnas-nfs
-
- [hnas-nfs]
- volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver
- volume_nfs_backend = hnas_nfs_backend
- hnas_nfs_username = supervisor
- hnas_nfs_mgmt_ip0 =
- hnas_chap_enabled = True
-
- hnas_nfs_svc0_volume_type = nfs_gold
- hnas_nfs_svc0_hdp = /
-
-Then set password for the backend in ``/etc/kolla/passwords.yml``:
-
-.. code-block:: yaml
-
- hnas_nfs_password: supervisor
-
-Configuration on Kolla deployment
----------------------------------
-
-Enable Shared File Systems service and HNAS driver in
-``/etc/kolla/globals.yml``
-
-.. code-block:: yaml
-
- enable_cinder: "yes"
-
-Configuration on HNAS
----------------------
-
-Create the data HNAS network in Kolla OpenStack:
-
-List the available tenants:
-
-.. code-block:: console
-
- openstack project list
-
-Create a network to the given tenant (service), providing the tenant ID,
-a name for the network, the name of the physical network over which the
-virtual network is implemented, and the type of the physical mechanism by
-which the virtual network is implemented:
-
-.. code-block:: console
-
- neutron net-create --tenant-id hnas_network \
- --provider:physical_network=physnet2 --provider:network_type=flat
-
-Create a subnet to the same tenant (service), the gateway IP of this subnet,
-a name for the subnet, the network ID created before, and the CIDR of
-subnet:
-
-.. code-block:: console
-
- neutron subnet-create --tenant-id --gateway \
- --name hnas_subnet
-
-Add the subnet interface to a router, providing the router ID and subnet
-ID created before:
-
-.. code-block:: console
-
- neutron router-interface-add
-
-Create volume
-~~~~~~~~~~~~~
-
-Create a non-bootable volume.
-
-.. code-block:: console
-
- openstack volume create --size 1 my-volume
-
-Verify Operation.
-
-.. code-block:: console
-
- cinder show my-volume
-
- +--------------------------------+--------------------------------------+
- | Property | Value |
- +--------------------------------+--------------------------------------+
- | attachments | [] |
- | availability_zone | nova |
- | bootable | false |
- | consistencygroup_id | None |
- | created_at | 2017-01-17T19:02:45.000000 |
- | description | None |
- | encrypted | False |
- | id | 4f5b8ae8-9781-411e-8ced-de616ae64cfd |
- | metadata | {} |
- | migration_status | None |
- | multiattach | False |
- | name | my-volume |
- | os-vol-host-attr:host | compute@hnas-iscsi#iscsi_gold |
- | os-vol-mig-status-attr:migstat | None |
- | os-vol-mig-status-attr:name_id | None |
- | os-vol-tenant-attr:tenant_id | 16def9176bc64bd283d419ac2651e299 |
- | replication_status | disabled |
- | size | 1 |
- | snapshot_id | None |
- | source_volid | None |
- | status | available |
- | updated_at | 2017-01-17T19:02:46.000000 |
- | user_id | fb318b96929c41c6949360c4ccdbf8c0 |
- | volume_type | None |
- +--------------------------------+--------------------------------------+
-
- nova volume-attach INSTANCE_ID VOLUME_ID auto
-
- +----------+--------------------------------------+
- | Property | Value |
- +----------+--------------------------------------+
- | device | /dev/vdc |
- | id | 4f5b8ae8-9781-411e-8ced-de616ae64cfd |
- | serverId | 3bf5e176-be05-4634-8cbd-e5fe491f5f9c |
- | volumeId | 4f5b8ae8-9781-411e-8ced-de616ae64cfd |
- +----------+--------------------------------------+
-
- openstack volume list
-
- +--------------------------------------+---------------+----------------+------+-------------------------------------------+
- | ID | Display Name | Status | Size | Attached to |
- +--------------------------------------+---------------+----------------+------+-------------------------------------------+
- | 4f5b8ae8-9781-411e-8ced-de616ae64cfd | my-volume | in-use | 1 | Attached to private-instance on /dev/vdb |
- +--------------------------------------+---------------+----------------+------+-------------------------------------------+
-
-For more information about how to manage volumes, see the
-:cinder-doc:`Manage volumes
-`.
-
-For more information about how HNAS driver works, see
-`Hitachi NAS Platform iSCSI and NFS drives for OpenStack
-`__.
diff --git a/doc/source/reference/storage/cinder-guide-pure.rst b/doc/source/reference/storage/cinder-guide-pure.rst
index 306ee5b496..b38c1ea23c 100644
--- a/doc/source/reference/storage/cinder-guide-pure.rst
+++ b/doc/source/reference/storage/cinder-guide-pure.rst
@@ -25,6 +25,34 @@ configure the ``FlashArray FC`` Cinder driver in ``/etc/kolla/globals.yml``.
.. end
+To use the ``Pure Storage FlashArray NVMe-RoCE`` Cinder backend, enable and
+configure the ``FlashArray NVMe-RoCE`` Cinder driver in
+``/etc/kolla/globals.yml``.
+
+.. code-block:: yaml
+
+ enable_cinder_backend_pure_roce: "yes"
+
+.. end
+
+.. note::
+
+ The NVMe-RoCE driver is only supported from OpenStack Zed and later.
+
+To use the ``Pure Storage FlashArray NVMe-TCP`` Cinder backend, enable and
+configure the ``FlashArray NVMe-TCP`` Cinder driver in
+``/etc/kolla/globals.yml``.
+
+.. code-block:: yaml
+
+ enable_cinder_backend_pure_nvme_tcp: "yes"
+
+.. end
+
+.. note::
+
+ The NVMe-TCP driver is only supported from OpenStack 2023.2 (Bobcat) and later.
+
It is important to note that you cannot mix iSCSI and FC Pure Storage
FlashArray drivers in the same OpenStack cluster.
@@ -34,16 +62,16 @@ Also set the values for the following parameters in ``/etc/kolla/globals.yml``:
- ``pure_san_ip``
For details on how to use these parameters, refer to the
-`Pure Storage Cinder Reference Guide__`.
+`Pure Storage Cinder Reference Guide `_.
There are numerous other parameters that can be set for this driver and
these are detailed in the above link.
If you wish to use any of these parameters then refer to the
-`Service Configuration__`
+`Service Configuration `_
documentation for instructions using the INI update strategy.
The use of this backend requires that the ``purestorage`` SDK package is
installed in the ``cinder-volume`` container. To do this follow the steps
-outlined in the `kolla image building guide__`
+outlined in the `kolla image building guide `_
particularly the ``Package Customisation`` and ``Custom Repos`` sections.
diff --git a/doc/source/reference/storage/cinder-guide.rst b/doc/source/reference/storage/cinder-guide.rst
index 56221d322f..41f956da8d 100644
--- a/doc/source/reference/storage/cinder-guide.rst
+++ b/doc/source/reference/storage/cinder-guide.rst
@@ -11,11 +11,27 @@ Cinder can be deployed using Kolla and supports the following storage
backends:
* ceph
-* hnas_nfs
* iscsi
* lvm
* nfs
+HA
+~~
+
+When using cinder-volume in an HA configuration (more than one host in
+cinder-volume/storage group):
+
+- Make sure that the driver you are using supports `Active/Active High Availability `
+ configuration
+- Add ``cinder_cluster_name: example_cluster_name`` to your ``globals.yml`` (or
+ host_vars for advanced multi-cluster configuration)
+
+.. note::
+
+ In case of non-standard configurations (e.g. mixed HA and non-HA Cinder backends),
+ you can skip the prechecks by setting ``cinder_cluster_skip_precheck`` to
+ ``true``.
+
LVM
~~~
@@ -201,6 +217,34 @@ in Kolla, the following parameter must be specified in ``globals.yml``:
All configuration for custom NFS backend should be performed
via ``cinder.conf`` in config overrides directory.
+Cinder-Backup with S3 Backend
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configuring Cinder-Backup for S3 includes the following steps:
+
+#. Enable Cinder-Backup S3 backend in ``globals.yml``:
+
+.. code-block:: yaml
+
+ cinder_backup_driver: "s3"
+
+#. Configure S3 connection details in ``/etc/kolla/globals.yml``:
+
+ * ``cinder_backup_s3_url`` (example: ``http://127.0.0.1:9000``)
+ * ``cinder_backup_s3_access_key`` (example: ``minio``)
+ * ``cinder_backup_s3_bucket`` (example: ``cinder``)
+ * ``cinder_backup_s3_secret_key`` (example: ``admin``)
+
+#. If you wish to use a single S3 backend for all supported services,
+use the following variables:
+
+ * ``s3_url``
+ * ``s3_access_key``
+ * ``s3_glance_bucket``
+ * ``s3_secret_key``
+
+ All Cinder-Backup S3 configurations use these options as default values.
+
Customizing backend names in cinder.conf
----------------------------------------
@@ -229,9 +273,6 @@ that appears in cinder.conf:
* - Network File System (NFS)
- cinder_backend_nfs_name
- nfs-1
- * - Hitachi NAS Platform NFS
- - cinder_backend_hnas_nfs_name
- - hnas-nfs
* - VMware Virtual Machine Disk File
- cinder_backend_vmwarevc_vmdk_name
- vmwarevc-vmdk
@@ -247,6 +288,12 @@ that appears in cinder.conf:
* - Pure Storage FlashArray for OpenStack
- cinder_backend_pure_fc_name
- Pure-FlashArray-fc
+ * - Pure Storage FlashArray for OpenStack
+ - cinder_backend_pure_roce_name
+ - Pure-FlashArray-roce
+ * - Pure Storage FlashArray for OpenStack
+ - cinder_backend_pure_nvme_tcp_name
+ - Pure-FlashArray-nvme-tcp
These are the names you use when
`configuring `_
diff --git a/doc/source/reference/storage/external-ceph-guide.rst b/doc/source/reference/storage/external-ceph-guide.rst
index fe47442f4e..cf3bd0e5a8 100644
--- a/doc/source/reference/storage/external-ceph-guide.rst
+++ b/doc/source/reference/storage/external-ceph-guide.rst
@@ -30,52 +30,145 @@ Configuring External Ceph
Ceph integration is configured for different OpenStack services independently.
+.. note::
+
+ Commands like ``ceph config generate-minimal-conf`` generate configuration
+ files that have leading tabs. These tabs break Kolla Ansible's ini parser.
+ Be sure to remove the leading tabs from your ``ceph.conf`` files when
+ copying them in the following sections.
+
+When openstack services access Ceph via a Ceph client, the Ceph client will
+look for a local keyring. Ceph presets the keyring setting with four keyring
+names by default.
+
+* The four default keyring names are as follows:
+
+ * ``/etc/ceph/$cluster.$name.keyring``
+ * ``/etc/ceph/$cluster.keyring``
+ * ``/etc/ceph/keyring``
+ * ``/etc/ceph/keyring.bin``
+
+The ``$cluster`` metavariable found in the first two default keyring names
+above is your Ceph cluster name as defined by the name of the Ceph
+configuration file: for example, if the Ceph configuration file is named
+``ceph.conf``, then your Ceph cluster name is ceph and the second name above
+would be ``ceph.keyring``. The ``$name`` metavariable is the user type and
+user ID: for example, given the user ``client.admin``, the first name above
+would be ``ceph.client.admin.keyring``. This principle is applied in the
+services documentation below.
+
+.. note::
+
+ More information about user configuration and related keyrings can be found in the
+ official Ceph documentation at https://docs.ceph.com/en/latest/rados/operations/user-management/#keyring-management
+
+.. note::
+
+ Below examples uses default ``$cluster`` and ``$user`` which can be configured
+ via kolla-ansible by setting ``ceph_cluster``,``$user`` per project or on the
+ host level (nova) in inventory file.
+
Glance
------
Ceph RBD can be used as a storage backend for Glance images. Configuring Glance
for Ceph includes the following steps:
-#. Enable Glance Ceph backend in ``globals.yml``:
+* Enable Glance Ceph backend in ``globals.yml``:
- .. code-block:: yaml
+ .. code-block:: yaml
- glance_backend_ceph: "yes"
+ glance_backend_ceph: "yes"
-#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
+* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
- * ``ceph_glance_keyring`` (default: ``ceph.client.glance.keyring``)
- * ``ceph_glance_user`` (default: ``glance``)
- * ``ceph_glance_pool_name`` (default: ``images``)
+ * ``ceph_glance_user`` (default: ``glance``)
+ * ``ceph_glance_pool_name`` (default: ``images``)
-#. Copy Ceph configuration file to ``/etc/kolla/config/glance/ceph.conf``
+* Copy Ceph configuration file to ``/etc/kolla/config/glance/ceph.conf``
- .. path /etc/kolla/config/glance/ceph.conf
- .. code-block:: ini
+ .. path /etc/kolla/config/glance/ceph.conf
+ .. code-block:: ini
- [global]
- fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
- mon_initial_members = ceph-0
- mon_host = 192.168.0.56
- auth_cluster_required = cephx
- auth_service_required = cephx
- auth_client_required = cephx
+ [global]
+ fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
+ keyring = /etc/ceph/ceph.client.glance.keyring
+ mon_initial_members = ceph-0
+ mon_host = 192.168.0.56
+ auth_cluster_required = cephx
+ auth_service_required = cephx
+ auth_client_required = cephx
-#. Copy Ceph keyring to ``/etc/kolla/config/glance/``
+* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph.client.glance.keyring``
-#. For copy-on-write set following in ``/etc/kolla/config/glance.conf``:
+To configure multiple Ceph backends with Glance, which is useful
+for multistore:
- .. path /etc/kolla/config/glance.conf
- .. code-block:: ini
+* Copy the Ceph configuration files into ``/etc/kolla/config/glance/`` using
+ different names for each
+
+ ``/etc/kolla/config/glance/ceph1.conf``
+
+ .. path /etc/kolla/config/glance/ceph1.conf
+ .. code-block:: ini
+
+ [global]
+ fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
+ keyring = /etc/ceph/ceph1.client.glance.keyring
+ mon_initial_members = ceph-0
+ mon_host = 192.168.0.56
+ auth_cluster_required = cephx
+ auth_service_required = cephx
+ auth_client_required = cephx
+
+ ``/etc/kolla/config/glance/ceph2.conf``
+
+ .. path /etc/kolla/config/glance/ceph2.conf
+ .. code-block:: ini
+
+ [global]
+ fsid = dbfea068-89ca-4d04-bba0-1b8a56c3abc8
+ keyring = /etc/ceph/ceph2.client.glance.keyring
+ mon_initial_members = ceph-0
+ mon_host = 192.10.0.100
+ auth_cluster_required = cephx
+ auth_service_required = cephx
+ auth_client_required = cephx
+
+* Declare Ceph backends in ``globals.yml``
+
+ .. code-block:: yaml
+
+ glance_ceph_backends:
+ - name: "ceph1-rbd"
+ type: "rbd"
+ cluster: "ceph1"
+ user: "glance"
+ pool: "images"
+ enabled: "{{ glance_backend_ceph | bool }}"
+ - name: "ceph2-rbd"
+ type: "rbd"
+ cluster: "ceph2"
+ user: "glance"
+ pool: "images"
+ enabled: "{{ glance_backend_ceph | bool }}"
+
+* Copy Ceph keyring to ``/etc/kolla/config/glance/ceph1.client.glance.keyring``
+ and analogously to ``/etc/kolla/config/glance/ceph2.client.glance.keyring``
+
+* For copy-on-write set following in ``/etc/kolla/config/glance.conf``:
+
+ .. path /etc/kolla/config/glance.conf
+ .. code-block:: ini
- [GLOBAL]
- show_image_direct_url = True
+ [DEFAULT]
+ show_image_direct_url = True
.. warning::
- ``show_image_direct_url`` can present a security risk if using more
- than just Ceph as Glance backend(s). Please see
- :glance-doc:`Glance show_image_direct_url `
+ ``show_image_direct_url`` can present a security risk if using more
+ than just Ceph as Glance backend(s). Please see
+ :glance-doc:`Glance show_image_direct_url `
Cinder
------
@@ -83,76 +176,160 @@ Cinder
Ceph RBD can be used as a storage backend for Cinder volumes. Configuring
Cinder for Ceph includes following steps:
-#. When using external Ceph, there may be no nodes defined in the storage
- group. This will cause Cinder and related services relying on this group to
- fail. In this case, operator should add some nodes to the storage group,
- all the nodes where ``cinder-volume`` and ``cinder-backup`` will run:
+* When using external Ceph, there may be no nodes defined in the storage
+ group. This will cause Cinder and related services relying on this group to
+ fail. In this case, operator should add some nodes to the storage group,
+ all the nodes where ``cinder-volume`` and ``cinder-backup`` will run:
- .. code-block:: ini
+ .. code-block:: ini
+
+ [storage]
+ control01
- [storage]
- control01
+* Enable Cinder Ceph backend in ``globals.yml``:
-#. Enable Cinder Ceph backend in ``globals.yml``:
+ .. code-block:: yaml
- .. code-block:: yaml
+ cinder_backend_ceph: "yes"
- cinder_backend_ceph: "yes"
+* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
-#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
+ * ``ceph_cinder_user`` (default: ``cinder``)
+ * ``ceph_cinder_pool_name`` (default: ``volumes``)
+ * ``ceph_cinder_backup_user`` (default: ``cinder-backup``)
+ * ``ceph_cinder_backup_pool_name`` (default: ``backups``)
- * ``ceph_cinder_keyring`` (default: ``ceph.client.cinder.keyring``)
- * ``ceph_cinder_user`` (default: ``cinder``)
- * ``ceph_cinder_pool_name`` (default: ``volumes``)
- * ``ceph_cinder_backup_keyring``
- (default: ``ceph.client.cinder-backup.keyring``)
- * ``ceph_cinder_backup_user`` (default: ``cinder-backup``)
- * ``ceph_cinder_backup_pool_name`` (default: ``backups``)
+* Copy Ceph configuration file to ``/etc/kolla/config/cinder/ceph.conf``
-#. Copy Ceph configuration file to ``/etc/kolla/config/cinder/ceph.conf``
+ Separate configuration options can be configured for
+ cinder-volume and cinder-backup by adding ceph.conf files to
+ ``/etc/kolla/config/cinder/cinder-volume`` and
+ ``/etc/kolla/config/cinder/cinder-backup`` respectively. They
+ will be merged with ``/etc/kolla/config/cinder/ceph.conf``.
- Separate configuration options can be configured for
- cinder-volume and cinder-backup by adding ceph.conf files to
- ``/etc/kolla/config/cinder/cinder-volume`` and
- ``/etc/kolla/config/cinder/cinder-backup`` respectively. They
- will be merged with ``/etc/kolla/config/cinder/ceph.conf``.
+* Copy Ceph keyring files to:
-#. Copy Ceph keyring files to:
+ * ``/etc/kolla/config/cinder/cinder-volume/ceph.client.cinder.keyring``
+ * ``/etc/kolla/config/cinder/cinder-backup/ceph.client.cinder.keyring``
+ * ``/etc/kolla/config/cinder/cinder-backup/
+ ceph.client.cinder-backup.keyring``
+
+.. note::
- * ``/etc/kolla/config/cinder/cinder-volume/``
- * ``/etc/kolla/config/cinder/cinder-backup/``
- * ``/etc/kolla/config/cinder/cinder-backup/``
+ ``cinder-backup`` requires keyrings for accessing volumes
+ and backups pools.
+
+To configure ``multiple Ceph backends`` with Cinder, which is useful for
+the use with availability zones:
+
+* Copy their Ceph configuration files into ``/etc/kolla/config/cinder/`` using
+ different names for each
+
+ ``/etc/kolla/config/cinder/ceph1.conf``
+
+ .. path /etc/kolla/config/cinder/ceph1.conf
+ .. code-block:: ini
+
+ [global]
+ fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
+ mon_initial_members = ceph-0
+ mon_host = 192.168.0.56
+ auth_cluster_required = cephx
+ auth_service_required = cephx
+ auth_client_required = cephx
+
+ ``/etc/kolla/config/cinder/ceph2.conf``
+
+ .. path /etc/kolla/config/cinder/ceph2.conf
+ .. code-block:: ini
+
+ [global]
+ fsid = dbfea068-89ca-4d04-bba0-1b8a56c3abc8
+ mon_initial_members = ceph-0
+ mon_host = 192.10.0.100
+ auth_cluster_required = cephx
+ auth_service_required = cephx
+ auth_client_required = cephx
+
+* Declare Ceph backends in ``globals.yml``
+
+ .. code-block:: yaml
+
+ cinder_ceph_backends:
+ - name: "ceph1-rbd"
+ cluster: "ceph1"
+ user: "cinder"
+ pool: "volumes"
+ enabled: "{{ cinder_backend_ceph | bool }}"
+ - name: "ceph2-rbd"
+ cluster: "ceph2"
+ user: "cinder"
+ pool: "volumes"
+ availability_zone: "az2"
+ enabled: "{{ cinder_backend_ceph | bool }}"
+
+ cinder_backup_ceph_backend:
+ name: "ceph2-backup-rbd"
+ cluster: "ceph2"
+ user: "cinder-backup"
+ pool: "backups"
+ type: rbd
+ enabled: "{{ enable_cinder_backup | bool }}"
+
+* Copy Ceph keyring files for all Ceph backends:
+
+ * ``/etc/kolla/config/cinder/cinder-volume/ceph1.client.cinder.keyring``
+ * ``/etc/kolla/config/cinder/cinder-backup/ceph1.client.cinder.keyring``
+ * ``/etc/kolla/config/cinder/cinder-backup/ceph2.client.cinder.keyring``
+ * ``/etc/kolla/config/cinder/cinder-backup/
+ ceph2.client.cinder-backup.keyring``
.. note::
- ``cinder-backup`` requires two keyrings for accessing volumes
- and backup pool.
+ ``cinder-backup`` requires keyrings for accessing volumes
+ and backups pool.
Nova must also be configured to allow access to Cinder volumes:
-#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
+* Copy Ceph config and keyring file(s) to:
- * ``ceph_cinder_keyring`` (default: ``ceph.client.cinder.keyring``)
+ * ``/etc/kolla/config/nova/ceph.conf``
+ * ``/etc/kolla/config/nova/ceph.client.cinder.keyring``
-#. Copy Ceph keyring file(s) to:
+To configure ``different Ceph backend`` for nova-compute host, which
+is useful for the use with availability zones:
+
+* Edit inventory file in the way described below:
+
+ .. code-block:: ini
- * ``/etc/kolla/config/nova/``
+ [compute]
+ hostname1 ceph_cluster=ceph1
+ hostname2 ceph_cluster=ceph2
+
+* Copy Ceph config and keyring file(s):
+
+ * ``/etc/kolla/config/nova//ceph1.conf``
+ * ``/etc/kolla/config/nova//ceph1.client.cinder.keyring``
+ * ``/etc/kolla/config/nova//ceph2.conf``
+ * ``/etc/kolla/config/nova//ceph2.client.cinder.keyring``
If ``zun`` is enabled, and you wish to use cinder volumes with zun,
it must also be configured to allow access to Cinder volumes:
-#. Enable Cinder Ceph backend for Zun in ``globals.yml``:
+* Enable Cinder Ceph backend for Zun in ``globals.yml``:
+
+ .. code-block:: yaml
- .. code-block:: yaml
+ zun_configure_for_cinder_ceph: "yes"
- zun_configure_for_cinder_ceph: "yes"
+* Copy Ceph configuration file to:
-#. Copy Ceph configuration file to:
- * ``/etc/kolla/config/zun/zun-compute/ceph.conf``
+ * ``/etc/kolla/config/zun/zun-compute/ceph.conf``
-#. Copy Ceph keyring file(s) to:
+* Copy Ceph keyring file(s) to:
- * ``/etc/kolla/config/zun/zun-compute/``
+ * ``/etc/kolla/config/zun/zun-compute/ceph.client.cinder.keyring``
Nova
@@ -165,29 +342,45 @@ not need to be copied between hypervisors.
Configuring Nova for Ceph includes following steps:
-#. Enable Nova Ceph backend in ``globals.yml``:
+* Enable Nova Ceph backend in ``globals.yml``:
+
+ .. code-block:: yaml
+
+ nova_backend_ceph: "yes"
+
+* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
- .. code-block:: yaml
+ * ``ceph_nova_user`` (by default it's the same as ``ceph_cinder_user``)
+ * ``ceph_nova_pool_name`` (default: ``vms``)
- nova_backend_ceph: "yes"
+* Copy Ceph configuration file to ``/etc/kolla/config/nova/ceph.conf``
+* Copy Ceph keyring file(s) to:
-#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
+ * ``/etc/kolla/config/nova/ceph.client.nova.keyring``
- * ``ceph_nova_keyring`` (by default it's the same as
- ``ceph_cinder_keyring``)
- * ``ceph_nova_user`` (by default it's the same as ``ceph_cinder_user``)
- * ``ceph_nova_pool_name`` (default: ``vms``)
+ .. note::
-#. Copy Ceph configuration file to ``/etc/kolla/config/nova/ceph.conf``
-#. Copy Ceph keyring file(s) to:
+ If you are using a Ceph deployment tool that generates separate Ceph
+ keys for Cinder and Nova, you will need to override
+ ``ceph_nova_user`` to match.
- * ``/etc/kolla/config/nova/``
+To configure ``different Ceph backend`` for nova-compute host, which
+is useful for the use with availability zones:
- .. note::
+Edit inventory file in the way described below:
- If you are using a Ceph deployment tool that generates separate Ceph
- keys for Cinder and Nova, you will need to override
- ``ceph_nova_keyring`` and ``ceph_nova_user`` to match.
+ .. code-block:: ini
+
+ [compute]
+ hostname1 ceph_cluster=ceph1
+ hostname2 ceph_cluster=ceph2
+
+* Copy Ceph config and keyring file(s):
+
+ * ``/etc/kolla/config/nova//ceph1.conf``
+ * ``/etc/kolla/config/nova//ceph1.client.nova.keyring``
+ * ``/etc/kolla/config/nova//ceph2.conf``
+ * ``/etc/kolla/config/nova//ceph2.client.nova.keyring``
Gnocchi
-------
@@ -195,21 +388,21 @@ Gnocchi
Ceph object storage can be used as a storage backend for Gnocchi metrics.
Configuring Gnocchi for Ceph includes following steps:
-#. Enable Gnocchi Ceph backend in ``globals.yml``:
+* Enable Gnocchi Ceph backend in ``globals.yml``:
- .. code-block:: yaml
+ .. code-block:: yaml
- gnocchi_backend_storage: "ceph"
+ gnocchi_backend_storage: "ceph"
-#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
+* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
- * ``ceph_gnocchi_keyring``
- (default: ``ceph.client.gnocchi.keyring``)
- * ``ceph_gnocchi_user`` (default: ``gnocchi``)
- * ``ceph_gnocchi_pool_name`` (default: ``gnocchi``)
+ * ``ceph_gnocchi_user`` (default: ``gnocchi``)
+ * ``ceph_gnocchi_pool_name`` (default: ``gnocchi``)
-#. Copy Ceph configuration file to ``/etc/kolla/config/gnocchi/ceph.conf``
-#. Copy Ceph keyring to ``/etc/kolla/config/gnocchi/``
+* Copy Ceph configuration file to
+ ``/etc/kolla/config/gnocchi/ceph.conf``
+* Copy Ceph keyring to
+ ``/etc/kolla/config/gnocchi/ceph.client.gnocchi.keyring``
Manila
------
@@ -217,32 +410,104 @@ Manila
CephFS can be used as a storage backend for Manila shares. Configuring Manila
for Ceph includes following steps:
-#. Enable Manila Ceph backend in ``globals.yml``:
-
- .. code-block:: yaml
-
- enable_manila_backend_cephfs_native: "yes"
-
-#. Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
-
- * ``ceph_manila_keyring`` (default: ``ceph.client.manila.keyring``)
- * ``ceph_manila_user`` (default: ``manila``)
-
- .. note::
-
- Required Ceph identity caps for manila user are documented in
- :manila-doc:`CephFS Native driver `.
-
-#. Copy Ceph configuration file to ``/etc/kolla/config/manila/ceph.conf``
-#. Copy Ceph keyring to ``/etc/kolla/config/manila/``
-
-#. If using multiple filesystems (Ceph Pacific+), set
- ``manila_cephfs_filesystem_name`` in ``/etc/kolla/globals.yml`` to the
- name of the Ceph filesystem Manila should use.
- By default, Manila will use the first filesystem returned by
- the ``ceph fs volume ls`` command.
-
-#. Setup Manila in the usual way
+* Enable Manila Ceph backend in ``globals.yml``:
+
+ .. code-block:: yaml
+
+ enable_manila_backend_cephfs_native: "yes"
+
+* Configure Ceph authentication details in ``/etc/kolla/globals.yml``:
+
+ * ``ceph_manila_user`` (default: ``manila``)
+
+ .. note::
+
+ Required Ceph identity caps for manila user are documented in
+ :manila-doc:`CephFS Native driver `.
+
+* Copy Ceph configuration file to ``/etc/kolla/config/manila/ceph.conf``
+* Copy Ceph keyring to ``/etc/kolla/config/manila/ceph.client.manila.keyring``
+
+To configure ``multiple Ceph backends`` with Manila, which is useful for
+the use with availability zones:
+
+* Copy their Ceph configuration files into ``/etc/kolla/config/manila/`` using
+ different names for each
+
+ ``/etc/kolla/config/manila/ceph1.conf``
+
+ .. path /etc/kolla/config/manila/ceph1.conf
+ .. code-block:: ini
+
+ [global]
+ fsid = 1d89fec3-325a-4963-a950-c4afedd37fe3
+ mon_initial_members = ceph-0
+ mon_host = 192.168.0.56
+ auth_cluster_required = cephx
+ auth_service_required = cephx
+ auth_client_required = cephx
+
+ ``/etc/kolla/config/manila/ceph2.conf``
+
+ .. path /etc/kolla/config/manila/ceph2.conf
+ .. code-block:: ini
+
+ [global]
+ fsid = dbfea068-89ca-4d04-bba0-1b8a56c3abc8
+ mon_initial_members = ceph-0
+ mon_host = 192.10.0.100
+ auth_cluster_required = cephx
+ auth_service_required = cephx
+ auth_client_required = cephx
+
+* Declare Ceph backends in ``globals.yml``
+
+ .. code-block:: yaml
+
+ manila_ceph_backends:
+ - name: "cephfsnative1"
+ share_name: "CEPHFS1"
+ driver: "cephfsnative"
+ cluster: "ceph1"
+ enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
+ protocols:
+ - "CEPHFS"
+ - name: "cephfsnative2"
+ share_name: "CEPHFS2"
+ driver: "cephfsnative"
+ cluster: "ceph2"
+ enabled: "{{ enable_manila_backend_cephfs_native | bool }}"
+ protocols:
+ - "CEPHFS"
+ - name: "cephfsnfs1"
+ share_name: "CEPHFSNFS1"
+ driver: "cephfsnfs"
+ cluster: "ceph1"
+ enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
+ protocols:
+ - "NFS"
+ - "CIFS"
+ - name: "cephfsnfs2"
+ share_name: "CEPHFSNFS2"
+ driver: "cephfsnfs"
+ cluster: "ceph2"
+ enabled: "{{ enable_manila_backend_cephfs_nfs | bool }}"
+ protocols:
+ - "NFS"
+ - "CIFS"
+
+* Copy Ceph keyring files for all Ceph backends:
+
+ * ``/etc/kolla/config/manila/manila-share/ceph1.client.manila.keyring``
+ * ``/etc/kolla/config/manila/manila-share/ceph2.client.manila.keyring``
+
+* If using multiple filesystems (Ceph Pacific+), set
+ ``manila_cephfs_filesystem_name`` in ``/etc/kolla/globals.yml`` to the
+ name of the Ceph filesystem Manila should use.
+ By default, Manila will use the first filesystem returned by
+ the ``ceph fs volume ls`` command.
+
+* Setup Manila in the usual way
For more details on the rest of the Manila setup, such as creating the share
type ``default_share_type``, please see :doc:`Manila in Kolla `.
@@ -327,6 +592,6 @@ When configuring Zun with Cinder volumes, kolla-ansible installs some
Ceph client packages on zun-compute hosts. You can set the version
of the Ceph packages installed by,
-#. Configuring Ceph version details in ``/etc/kolla/globals.yml``:
+* Configuring Ceph version details in ``/etc/kolla/globals.yml``:
- * ``ceph_version`` (default: ``pacific``)
+ * ``ceph_version`` (default: ``pacific``)
diff --git a/doc/source/reference/storage/index.rst b/doc/source/reference/storage/index.rst
index ae37699e00..ad18929a64 100644
--- a/doc/source/reference/storage/index.rst
+++ b/doc/source/reference/storage/index.rst
@@ -10,7 +10,6 @@ supported by kolla.
external-ceph-guide
cinder-guide
- cinder-guide-hnas
cinder-guide-quobyte
cinder-guide-pure
manila-guide
diff --git a/doc/source/user/adding-and-removing-hosts.rst b/doc/source/user/adding-and-removing-hosts.rst
index 60fd396d31..524e3ead3f 100644
--- a/doc/source/user/adding-and-removing-hosts.rst
+++ b/doc/source/user/adding-and-removing-hosts.rst
@@ -171,6 +171,14 @@ For each host, clean up its services:
openstack compute service delete --os-compute-api-version 2.53 $id
done
+If the node is also running the ``etcd`` service, set
+``etcd_remove_deleted_members: "yes"`` in ``globals.yml`` to automatically
+remove nodes from the ``etcd`` cluster that have been removed from the inventory.
+
+Alternatively the ``etcd`` members can be removed manually with ``etcdctl``.
+For more details, please consult the ``runtime reconfiguration`` documentation
+section for the version of etcd in operation.
+
.. _removing-existing-compute-nodes:
Removing existing compute nodes
diff --git a/doc/source/user/ansible-tuning.rst b/doc/source/user/ansible-tuning.rst
index 019dddedd6..aa8db1cd2d 100644
--- a/doc/source/user/ansible-tuning.rst
+++ b/doc/source/user/ansible-tuning.rst
@@ -139,3 +139,24 @@ facts via facter:
.. code-block:: yaml
kolla_ansible_setup_gather_subset: "all,!facter"
+
+Max failure percentage
+----------------------
+
+It is possible to specify a `maximum failure percentage
+`__
+using ``kolla_max_fail_percentage``. By default this is undefined, which is
+equivalent to a value of 100, meaning that Ansible will continue execution
+until all hosts have failed or completed. For example:
+
+.. code-block:: yaml
+
+ kolla_max_fail_percentage: 50
+
+A max fail percentage may be set for specific services using
+``_max_fail_percentage``. For example:
+
+.. code-block:: yaml
+
+ kolla_max_fail_percentage: 50
+ nova_max_fail_percentage: 25
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
index fcd9f96b72..4c4e698a66 100644
--- a/doc/source/user/index.rst
+++ b/doc/source/user/index.rst
@@ -6,6 +6,7 @@ User Guides
:maxdepth: 2
quickstart
+ quickstart-development
support-matrix
virtual-environments
multinode
diff --git a/doc/source/user/multi-regions.rst b/doc/source/user/multi-regions.rst
index 5485bb3246..dcee26d162 100644
--- a/doc/source/user/multi-regions.rst
+++ b/doc/source/user/multi-regions.rst
@@ -76,7 +76,8 @@ the value of ``kolla_internal_fqdn`` in RegionOne:
username: "{{ keystone_admin_user }}"
password: "{{ keystone_admin_password }}"
user_domain_name: "{{ default_user_domain_name }}"
- system_scope: "all"
+ project_name: "{{ keystone_admin_project }}"
+ domain_name: "default"
.. note::
diff --git a/doc/source/user/multinode.rst b/doc/source/user/multinode.rst
index 5074fa728b..21bf67fa45 100644
--- a/doc/source/user/multinode.rst
+++ b/doc/source/user/multinode.rst
@@ -60,7 +60,7 @@ controls how ansible interacts with remote hosts.
Ansible uses SSH to connect the deployment host and target hosts. For more
information about SSH authentication please reference
- `Ansible documentation `__.
+ `Ansible documentation `__.
.. code-block:: ini
@@ -75,7 +75,7 @@ controls how ansible interacts with remote hosts.
Additional inventory parameters might be required according to your
environment setup. Reference `Ansible Documentation
- `__ for more
+ `__ for more
information.
@@ -177,3 +177,19 @@ Run the deployment:
kolla-ansible deploy -i
+Validate generated configuration files of enabled services:
+
+.. code-block:: console
+
+ kolla-ansible validate-config -i
+
+.. note::
+
+ Due to the nature of the configuration generation the validation can
+ currently only be done after the first deployment. For some validations
+ it is necessary to access the running containers.
+ The validation tasks can be found - and altered - in each ansible role under
+ ``kolla-ansible/ansible/roles/$role/tasks/config_validate.yml``.
+ The validation for most openstack services is done by the special role:
+ ``service-config-validate``.
+
diff --git a/doc/source/user/operating-kolla.rst b/doc/source/user/operating-kolla.rst
index 5ddc224864..a54d21eefb 100644
--- a/doc/source/user/operating-kolla.rst
+++ b/doc/source/user/operating-kolla.rst
@@ -56,6 +56,16 @@ deployment.
Limitations and Recommendations
-------------------------------
+.. warning::
+
+ Please notice that using the ansible ``--limit`` option is not recommended.
+ The reason is, that there are known bugs with it, e.g. when `upgrading parts of nova.
+ `__
+ We accept bug reports for this and try to fix issues when they are known.
+ The core problem is how the ``register:`` keyword works and how it
+ interacts with the ``--limit`` option. You can find more information in the above
+ bug report.
+
.. note::
Please note that when the ``use_preconfigured_databases`` flag is set to
@@ -65,25 +75,7 @@ Limitations and Recommendations
.. note::
If you have separate keys for nova and cinder, please be sure to set
- ``ceph_nova_keyring: ceph.client.nova.keyring`` and ``ceph_nova_user: nova``
- in ``/etc/kolla/globals.yml``
-
-Ubuntu Focal 20.04
-------------------
-
-The Victoria release adds support for Ubuntu Focal 20.04 as a host operating
-system. Ubuntu users upgrading from Ussuri should first upgrade OpenStack
-containers to Victoria, which uses the Ubuntu Focal 20.04 base container image.
-Hosts should then be upgraded to Ubuntu Focal 20.04.
-
-CentOS Stream 8
----------------
-
-The Wallaby release adds support for CentOS Stream 8 as a host operating
-system. CentOS Stream 8 support will also be added to a Victoria stable
-release. CentOS Linux users upgrading from Victoria should first migrate hosts
-and container images from CentOS Linux to CentOS Stream before upgrading to
-Wallaby.
+ ``ceph_nova_user: nova`` in ``/etc/kolla/globals.yml``
Preparation (the foreword)
--------------------------
@@ -112,6 +104,13 @@ First, upgrade the ``kolla-ansible`` package:
If you are running from Git repository, then just checkout the desired
branch and run ``pip3 install --upgrade`` with the repository directory.
+If performing a skip-level (SLURP) upgrade, update ``ansible`` or
+``ansible-core`` to a version supported by the release you're upgrading to.
+
+.. code-block:: console
+
+ pip3 install --upgrade 'ansible-core>=|ANSIBLE_CORE_VERSION_MIN|,<|ANSIBLE_CORE_VERSION_MAX|.99'
+
If upgrading to a Yoga release or later, install or upgrade Ansible Galaxy
dependencies:
@@ -176,6 +175,16 @@ issues:
At a convenient time, the upgrade can now be run.
+SLURP extra preparations
+++++++++++++++++++++++++
+
+RabbitMQ has two major version releases per year but does not support jumping
+two versions in one upgrade. So if you want to perform a skip-level upgrade,
+you must first upgrade RabbitMQ to an intermediary version. Please see the
+`RabbitMQ SLURP section
+`__
+for details.
+
Perform the Upgrade
-------------------
@@ -246,6 +255,10 @@ necessary update containers, without generating configuration.
``kolla-ansible -i INVENTORY prune-images`` is used to prune orphaned Docker
images on hosts.
+``kolla-ansible -i INVENTORY genconfig`` is used to generate configuration
+files for enabled OpenStack services, without then restarting the containers so
+it is not applied right away.
+
``kolla-ansible -i INVENTORY1 -i INVENTORY2 ...`` Multiple inventories can be
specified by passing the ``--inventory`` or ``-i`` command line option multiple
times. This can be useful to share configuration between multiple environments.
diff --git a/doc/source/user/quickstart-development.rst b/doc/source/user/quickstart-development.rst
new file mode 100644
index 0000000000..e769ecbb5a
--- /dev/null
+++ b/doc/source/user/quickstart-development.rst
@@ -0,0 +1,355 @@
+.. quickstart-development:
+
+===========================
+Quick Start for development
+===========================
+
+This guide provides step by step instructions to deploy OpenStack using Kolla
+Ansible on bare metal servers or virtual machines. For deployment/evaluation we
+have the :kolla-ansible-doc:`quickstart ` guide.
+
+Recommended reading
+~~~~~~~~~~~~~~~~~~~
+
+It's beneficial to learn basics of both `Ansible `__
+and `Docker `__ before running Kolla Ansible.
+
+Host machine requirements
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The host machine must satisfy the following minimum requirements:
+
+- 2 network interfaces
+- 8GB main memory
+- 40GB disk space
+
+See the :kolla-ansible-doc:`support matrix ` for details
+of supported host Operating Systems. Kolla Ansible supports the default Python
+3.x versions provided by the supported Operating Systems. For more information
+see `tested runtimes <|TESTED_RUNTIMES_GOVERNANCE_URL|>`_.
+
+Install dependencies
+~~~~~~~~~~~~~~~~~~~~
+
+Typically commands that use the system package manager in this section must be
+run with root privileges.
+
+It is generally recommended to use a virtual environment to install Kolla
+Ansible and its dependencies, to avoid conflicts with the system site packages.
+Note that this is independent from the use of a virtual environment for remote
+execution, which is described in
+:kolla-ansible-doc:`Virtual Environments `.
+
+#. For Debian or Ubuntu, update the package index.
+
+ .. code-block:: console
+
+ sudo apt update
+
+#. Install Python build dependencies:
+
+ For CentOS or Rocky, run:
+
+ .. code-block:: console
+
+ sudo dnf install git python3-devel libffi-devel gcc openssl-devel python3-libselinux
+
+ For Debian or Ubuntu, run:
+
+ .. code-block:: console
+
+ sudo apt install git python3-dev libffi-dev gcc libssl-dev
+
+Install dependencies for the virtual environment
+------------------------------------------------
+
+#. Install the virtual environment dependencies.
+
+ For CentOS or Rocky, you don't need to do anything.
+
+ For Debian or Ubuntu, run:
+
+ .. code-block:: console
+
+ sudo apt install python3-venv
+
+#. Create a virtual environment and activate it:
+
+ .. code-block:: console
+
+ python3 -m venv /path/to/venv
+ source /path/to/venv/bin/activate
+
+ The virtual environment should be activated before running any commands that
+ depend on packages installed in it.
+
+#. Ensure the latest version of pip is installed:
+
+ .. code-block:: console
+
+ pip install -U pip
+
+#. Install `Ansible `__. Kolla Ansible requires at least
+ Ansible ``|ANSIBLE_VERSION_MIN|`` (or ansible-core
+ ``|ANSIBLE_CORE_VERSION_MIN|``) and supports up to ``|ANSIBLE_VERSION_MAX|``
+ (or ansible-core ``|ANSIBLE_CORE_VERSION_MAX|``).
+
+ .. code-block:: console
+
+ pip install 'ansible-core>=|ANSIBLE_CORE_VERSION_MIN|,<|ANSIBLE_CORE_VERSION_MAX|.99'
+
+Install Kolla-ansible
+~~~~~~~~~~~~~~~~~~~~~
+
+#. Clone ``kolla-ansible`` repository from git.
+
+ .. code-block:: console
+
+ git clone --branch |KOLLA_BRANCH_NAME| https://opendev.org/openstack/kolla-ansible
+
+#. Install requirements of ``kolla`` and ``kolla-ansible``:
+
+ .. code-block:: console
+
+ pip install ./kolla-ansible
+
+#. Create the ``/etc/kolla`` directory.
+
+ .. code-block:: console
+
+ sudo mkdir -p /etc/kolla
+ sudo chown $USER:$USER /etc/kolla
+
+#. Copy the configuration files to ``/etc/kolla`` directory.
+ ``kolla-ansible`` holds the configuration files (``globals.yml`` and
+ ``passwords.yml``) in ``etc/kolla``.
+
+ .. code-block:: console
+
+ cp -r kolla-ansible/etc/kolla/* /etc/kolla
+
+#. Copy the inventory files to the current directory. ``kolla-ansible`` holds
+ inventory files (``all-in-one`` and ``multinode``) in the
+ ``ansible/inventory`` directory.
+
+ .. code-block:: console
+
+ cp kolla-ansible/ansible/inventory/* .
+
+Install Ansible Galaxy requirements
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install Ansible Galaxy dependencies:
+
+.. code-block:: console
+
+ kolla-ansible install-deps
+
+Prepare initial configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Inventory
+---------
+
+The next step is to prepare our inventory file. An inventory is an Ansible file
+where we specify hosts and the groups that they belong to. We can use this to
+define node roles and access credentials.
+
+Kolla Ansible comes with ``all-in-one`` and ``multinode`` example inventory
+files. The difference between them is that the former is ready for deploying
+single node OpenStack on localhost. In this Guide we will show the
+``all-in-one`` Installation.
+
+Kolla passwords
+---------------
+
+Passwords used in our deployment are stored in ``/etc/kolla/passwords.yml``
+file. All passwords are blank in this file and have to be filled either
+manually or by running random password generator:
+
+.. code-block:: console
+
+ cd kolla-ansible/tools
+ ./generate_passwords.py
+
+Kolla globals.yml
+-----------------
+
+``globals.yml`` is the main configuration file for Kolla Ansible and per
+default stored in /etc/kolla/globals.yml.
+There are a few options that are required to deploy Kolla Ansible:
+
+* Image options
+
+ User has to specify images that are going to be used for our deployment.
+ In this guide
+ `Quay.io `__-provided,
+ pre-built images are going to be used. To learn more about building
+ mechanism, please refer :kolla-doc:`Building Container Images
+ `.
+
+ Kolla provides choice of several Linux distributions in containers:
+
+ - CentOS Stream (``centos``)
+ - Debian (``debian``)
+ - Rocky (``rocky``)
+ - Ubuntu (``ubuntu``)
+
+ For newcomers, we recommend to use Rocky Linux 9 or Ubuntu 22.04.
+
+ .. code-block:: console
+
+ kolla_base_distro: "rocky"
+
+* AArch64 options
+
+ Kolla provides images for both x86-64 and aarch64 architectures. They are not
+ "multiarch" so users of aarch64 need to define "openstack_tag_suffix"
+ setting:
+
+ .. code-block:: console
+
+ openstack_tag_suffix: "-aarch64"
+
+ This way images built for aarch64 architecture will be used.
+
+
+* Networking
+
+ Kolla Ansible requires a few networking options to be set.
+ We need to set network interfaces used by OpenStack.
+
+ First interface to set is "network_interface". This is the default interface
+ for multiple management-type networks.
+
+ .. code-block:: console
+
+ network_interface: "eth0"
+
+ Second interface required is dedicated for Neutron external (or public)
+ networks, can be vlan or flat, depends on how the networks are created.
+ This interface should be active without IP address. If not, instances
+ won't be able to access to the external networks.
+
+ .. code-block:: console
+
+ neutron_external_interface: "eth1"
+
+ To learn more about network configuration, refer
+ :kolla-ansible-doc:`Network overview
+ `.
+
+ Next we need to provide floating IP for management traffic. This IP will be
+ managed by keepalived to provide high availability, and should be set to be
+ *not used* address in management network that is connected to our
+ ``network_interface``. If you use an existing OpenStack installation for your
+ deployment, make sure the IP is allowed in the configuration of your VM.
+
+ .. code-block:: console
+
+ kolla_internal_vip_address: "10.1.0.250"
+
+* Enable additional services
+
+ By default Kolla Ansible provides a bare compute kit, however it does provide
+ support for a vast selection of additional services. To enable them, set
+ ``enable_*`` to "yes".
+
+ Kolla now supports many OpenStack services, there is
+ `a list of available services
+ `_.
+ For more information about service configuration, Please refer to the
+ :kolla-ansible-doc:`Services Reference Guide
+ `.
+
+* Multiple globals files
+
+ For a more granular control, enabling any option from the main
+ ``globals.yml`` file can now be done using multiple yml files. Simply,
+ create a directory called ``globals.d`` under ``/etc/kolla/`` and place
+ all the relevant ``*.yml`` files in there. The ``kolla-ansible`` script
+ will, automatically, add all of them as arguments to the ``ansible-playbook``
+ command.
+
+ An example use case for this would be if an operator wants to enable cinder
+ and all its options, at a later stage than the initial deployment, without
+ tampering with the existing ``globals.yml`` file. That can be achieved, using
+ a separate ``cinder.yml`` file, placed under the ``/etc/kolla/globals.d/``
+ directory and adding all the relevant options in there.
+
+* Virtual environment
+
+ It is recommended to use a virtual environment to execute tasks on the remote
+ hosts. This is covered in
+ :kolla-ansible-doc:`Virtual Environments `.
+
+Deployment
+~~~~~~~~~~
+
+After configuration is set, we can proceed to the deployment phase. First we
+need to setup basic host-level dependencies, like docker.
+
+Kolla Ansible provides a playbook that will install all required services in
+the correct versions.
+
+The following assumes the use of the ``all-in-one`` inventory. If using a
+different inventory, such as ``multinode``, replace the ``-i`` argument
+accordingly.
+
+#. Bootstrap servers with kolla deploy dependencies:
+
+ .. code-block:: console
+
+ cd kolla-ansible/tools
+ ./kolla-ansible -i ../../all-in-one bootstrap-servers
+
+#. Do pre-deployment checks for hosts:
+
+ .. code-block:: console
+
+ kolla-ansible -i ../../all-in-one prechecks
+
+#. Finally proceed to actual OpenStack deployment:
+
+ .. code-block:: console
+
+ kolla-ansible -i ../../all-in-one deploy
+
+When this playbook finishes, OpenStack should be up, running and functional!
+If error occurs during execution, refer to
+:kolla-ansible-doc:`troubleshooting guide `.
+
+Using OpenStack
+~~~~~~~~~~~~~~~
+
+#. Install the OpenStack CLI client:
+
+ .. code-block:: console
+
+ pip install python-openstackclient -c https://releases.openstack.org/constraints/upper/|KOLLA_OPENSTACK_RELEASE|
+
+#. OpenStack requires a ``clouds.yaml`` file where credentials for the
+ admin user are set. To generate this file:
+
+ .. code-block:: console
+
+ cd kolla-ansible/tools
+ ./kolla-ansible post-deploy
+
+ * The file will be generated in /etc/kolla/clouds.yaml, you can use it by
+ copying it to /etc/openstack or ~/.config/openstack or setting
+ OS_CLIENT_CONFIG_FILE environment variable.
+
+#. Depending on how you installed Kolla Ansible, there is a script that will
+ create example networks, images, and so on.
+
+ .. warning::
+
+ You are free to use the following ``init-runonce`` script for demo
+ purposes but note it does **not** have to be run in order to use your
+ cloud. Depending on your customisations, it may not work, or it may
+ conflict with the resources you want to create. You have been warned.
+
+ .. code-block:: console
+
+ kolla-ansible/tools/init-runonce
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index 743a968193..513f495c4b 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -1,11 +1,12 @@
.. quickstart:
-===========
-Quick Start
-===========
+=====================================
+Quick Start for deployment/evaluation
+=====================================
This guide provides step by step instructions to deploy OpenStack using Kolla
-Ansible on bare metal servers or virtual machines.
+Ansible on bare metal servers or virtual machines. For developers we have the
+:kolla-ansible-doc:`developer quickstart `.
Recommended reading
~~~~~~~~~~~~~~~~~~~
@@ -27,7 +28,6 @@ of supported host Operating Systems. Kolla Ansible supports the default Python
3.x versions provided by the supported Operating Systems. For more information
see `tested runtimes <|TESTED_RUNTIMES_GOVERNANCE_URL|>`_.
-
Install dependencies
~~~~~~~~~~~~~~~~~~~~
@@ -48,26 +48,24 @@ execution, which is described in
#. Install Python build dependencies:
- For CentOS, Rocky or openEuler, run:
+ For CentOS or Rocky, run:
.. code-block:: console
- sudo dnf install python3-devel libffi-devel gcc openssl-devel python3-libselinux
+ sudo dnf install git python3-devel libffi-devel gcc openssl-devel python3-libselinux
For Debian or Ubuntu, run:
.. code-block:: console
- sudo apt install python3-dev libffi-dev gcc libssl-dev
+ sudo apt install git python3-dev libffi-dev gcc libssl-dev
-Install dependencies using a virtual environment
+Install dependencies for the virtual environment
------------------------------------------------
-If not installing Kolla Ansible in a virtual environment, skip this section.
-
#. Install the virtual environment dependencies.
- For CentOS, Rocky or openEuler, you don't need to do anything.
+ For CentOS or Rocky, you don't need to do anything.
For Debian or Ubuntu, run:
@@ -92,86 +90,24 @@ If not installing Kolla Ansible in a virtual environment, skip this section.
pip install -U pip
#. Install `Ansible `__. Kolla Ansible requires at least
- Ansible ``4`` and supports up to ``5``.
-
- .. code-block:: console
-
- pip install 'ansible>=4,<6'
-
-Install dependencies not using a virtual environment
-----------------------------------------------------
-
-If installing Kolla Ansible in a virtual environment, skip this section.
-
-#. Install ``pip``.
-
- For CentOS, Rocky or openEuler, run:
-
- .. code-block:: console
-
- sudo dnf install python3-pip
-
- For Debian or Ubuntu, run:
-
- .. code-block:: console
-
- sudo apt install python3-pip
-
-#. Ensure the latest version of pip is installed:
-
- .. code-block:: console
-
- sudo pip3 install -U pip
-
-#. Install `Ansible `__. Kolla Ansible requires at least
- Ansible ``4`` and supports up to ``5``.
-
- For CentOS or Rocky, run:
+ Ansible ``|ANSIBLE_VERSION_MIN|`` (or ansible-core
+ ``|ANSIBLE_CORE_VERSION_MIN|``) and supports up to ``|ANSIBLE_VERSION_MAX|``
+ (or ansible-core ``|ANSIBLE_CORE_VERSION_MAX|``).
.. code-block:: console
- sudo dnf install ansible
-
- For openEuler, run:
-
- .. code-block:: console
-
- sudo pip install ansible
-
- For Debian or Ubuntu, run:
-
- .. code-block:: console
-
- sudo apt install ansible
-
- .. note::
+ pip install 'ansible-core>=|ANSIBLE_CORE_VERSION_MIN|,<|ANSIBLE_CORE_VERSION_MAX|.99'
- If the installed Ansible version does not meet the requirements, one can
- use pip: ``sudo pip install -U 'ansible>=4,<6'``.
- Beware system package upgrades might interfere with that so it
- is recommended to uninstall the system package first. One might be better
- off with the virtual environment method to avoid this pitfall.
Install Kolla-ansible
~~~~~~~~~~~~~~~~~~~~~
-Install Kolla-ansible for deployment or evaluation
---------------------------------------------------
-
#. Install kolla-ansible and its dependencies using ``pip``.
- If using a virtual environment:
-
.. code-block:: console
pip install git+https://opendev.org/openstack/kolla-ansible@|KOLLA_BRANCH_NAME|
- If not using a virtual environment:
-
- .. code-block:: console
-
- sudo pip3 install git+https://opendev.org/openstack/kolla-ansible@|KOLLA_BRANCH_NAME|
-
#. Create the ``/etc/kolla`` directory.
.. code-block:: console
@@ -181,112 +117,25 @@ Install Kolla-ansible for deployment or evaluation
#. Copy ``globals.yml`` and ``passwords.yml`` to ``/etc/kolla`` directory.
- If using a virtual environment:
-
.. code-block:: console
cp -r /path/to/venv/share/kolla-ansible/etc_examples/kolla/* /etc/kolla
- If not using a virtual environment, run:
-
- .. code-block:: console
-
- cp -r /usr/local/share/kolla-ansible/etc_examples/kolla/* /etc/kolla
-
-#. Copy ``all-in-one`` and ``multinode`` inventory files to
- the current directory.
-
- If using a virtual environment:
-
- .. code-block:: console
-
- cp /path/to/venv/share/kolla-ansible/ansible/inventory/* .
-
- For ``all-in-one`` scenario in virtual environment add the following
- to the very beginning of the inventory:
-
- .. code-block:: console
-
- localhost ansible_python_interpreter=python
-
- If not using a virtual environment, run:
-
- .. code-block:: console
-
- cp /usr/local/share/kolla-ansible/ansible/inventory/* .
-
-Install Kolla for development
------------------------------
-
-#. Clone ``kolla-ansible`` repository from git.
-
- .. code-block:: console
-
- git clone --branch |KOLLA_BRANCH_NAME| https://opendev.org/openstack/kolla-ansible
-
-#. Install requirements of ``kolla`` and ``kolla-ansible``:
-
- If using a virtual environment:
-
- .. code-block:: console
-
- pip install ./kolla-ansible
-
- If not using a virtual environment:
-
- .. code-block:: console
-
- sudo pip3 install ./kolla-ansible
-
-#. Create the ``/etc/kolla`` directory.
-
- .. code-block:: console
-
- sudo mkdir -p /etc/kolla
- sudo chown $USER:$USER /etc/kolla
-
-#. Copy the configuration files to ``/etc/kolla`` directory.
- ``kolla-ansible`` holds the configuration files ( ``globals.yml`` and
- ``passwords.yml``) in ``etc/kolla``.
+#. Copy ``all-in-one`` inventory file to the current directory.
.. code-block:: console
- cp -r kolla-ansible/etc/kolla/* /etc/kolla
-
-#. Copy the inventory files to the current directory. ``kolla-ansible`` holds
- inventory files ( ``all-in-one`` and ``multinode``) in the
- ``ansible/inventory`` directory.
-
- .. code-block:: console
-
- cp kolla-ansible/ansible/inventory/* .
+ cp /path/to/venv/share/kolla-ansible/ansible/inventory/all-in-one .
Install Ansible Galaxy requirements
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Install Ansible Galaxy dependencies (Yoga release onwards):
+Install Ansible Galaxy dependencies:
.. code-block:: console
kolla-ansible install-deps
-Configure Ansible
-~~~~~~~~~~~~~~~~~
-
-For best results, Ansible configuration should be tuned for your environment.
-For example, add the following options to the Ansible configuration file
-``/etc/ansible/ansible.cfg``:
-
-.. path /etc/ansible/ansible.cfg
-.. code-block:: ini
-
- [defaults]
- host_key_checking=False
- pipelining=True
- forks=100
-
-Further information on tuning Ansible is available `here
-`__.
Prepare initial configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -300,55 +149,8 @@ define node roles and access credentials.
Kolla Ansible comes with ``all-in-one`` and ``multinode`` example inventory
files. The difference between them is that the former is ready for deploying
-single node OpenStack on localhost. If you need to use separate host or more
-than one node, edit ``multinode`` inventory:
-
-#. Edit the first section of ``multinode`` with connection details of your
- environment, for example:
-
- .. code-block:: ini
-
- [control]
- 10.0.0.[10:12] ansible_user=ubuntu ansible_password=foobar ansible_become=true
- # Ansible supports syntax like [10:12] - that means 10, 11 and 12.
- # Become clause means "use sudo".
-
- [network:children]
- control
- # when you specify group_name:children, it will use contents of group specified.
-
- [compute]
- 10.0.0.[13:14] ansible_user=ubuntu ansible_password=foobar ansible_become=true
-
- [monitoring]
- 10.0.0.10
- # This group is for monitoring node.
- # Fill it with one of the controllers' IP address or some others.
-
- [storage:children]
- compute
-
- [deployment]
- localhost ansible_connection=local become=true
- # use localhost and sudo
-
- To learn more about inventory files, check
- `Ansible documentation `_.
-
-#. Check whether the configuration of inventory is correct or not, run:
-
- .. code-block:: console
-
- ansible -i multinode all -m ping
-
- .. note::
-
- Distributions might not come with Python pre-installed. That will cause
- errors in the ``ping`` module. To quickly install Python with Ansible you
- can run: for Debian or Ubuntu:
- ``ansible -i multinode all -m raw -a "apt -y install python3"``,
- and for CentOS, Rocky or openEuler:
- ``ansible -i multinode all -m raw -a "dnf -y install python3"``.
+single node OpenStack on localhost. In this guide we will show the
+``all-in-one`` installation.
Kolla passwords
---------------
@@ -357,23 +159,15 @@ Passwords used in our deployment are stored in ``/etc/kolla/passwords.yml``
file. All passwords are blank in this file and have to be filled either
manually or by running random password generator:
-For deployment or evaluation, run:
-
.. code-block:: console
kolla-genpwd
-For development, run:
-
-.. code-block:: console
-
- cd kolla-ansible/tools
- ./generate_passwords.py
-
Kolla globals.yml
-----------------
-``globals.yml`` is the main configuration file for Kolla Ansible.
+``globals.yml`` is the main configuration file for Kolla Ansible and per
+default stored in /etc/kolla/globals.yml file.
There are a few options that are required to deploy Kolla Ansible:
* Image options
@@ -398,6 +192,19 @@ There are a few options that are required to deploy Kolla Ansible:
kolla_base_distro: "rocky"
+* AArch64 options
+
+ Kolla provides images for both x86-64 and aarch64 architectures. They are not
+ "multiarch" so users of aarch64 need to define "openstack_tag_suffix"
+ setting:
+
+ .. code-block:: console
+
+ openstack_tag_suffix: "-aarch64"
+
+ This way images built for aarch64 architecture will be used.
+
+
* Networking
Kolla Ansible requires a few networking options to be set.
@@ -426,7 +233,8 @@ There are a few options that are required to deploy Kolla Ansible:
Next we need to provide floating IP for management traffic. This IP will be
managed by keepalived to provide high availability, and should be set to be
*not used* address in management network that is connected to our
- ``network_interface``.
+ ``network_interface``. If you use an existing OpenStack installation for your
+ deployment, make sure the IP is allowed in the configuration of your VM.
.. code-block:: console
@@ -436,18 +244,13 @@ There are a few options that are required to deploy Kolla Ansible:
By default Kolla Ansible provides a bare compute kit, however it does provide
support for a vast selection of additional services. To enable them, set
- ``enable_*`` to "yes". For example, to enable Block Storage service:
-
- .. code-block:: console
-
- enable_cinder: "yes"
+ ``enable_*`` to "yes".
Kolla now supports many OpenStack services, there is
`a list of available services
`_.
For more information about service configuration, Please refer to the
- :kolla-ansible-doc:`Services Reference Guide
- `.
+ :kolla-ansible-doc:`Services Reference Guide `.
* Multiple globals files
@@ -479,50 +282,27 @@ need to setup basic host-level dependencies, like docker.
Kolla Ansible provides a playbook that will install all required services in
the correct versions.
-The following assumes the use of the ``multinode`` inventory. If using a
-different inventory, such as ``all-in-one``, replace the ``-i`` argument
+The following assumes the use of the ``all-in-one`` inventory. If using a
+different inventory, such as ``multinode``, replace the ``-i`` argument
accordingly.
-* For deployment or evaluation, run:
-
- #. Bootstrap servers with kolla deploy dependencies:
-
- .. code-block:: console
-
- kolla-ansible -i ./multinode bootstrap-servers
-
- #. Do pre-deployment checks for hosts:
-
- .. code-block:: console
-
- kolla-ansible -i ./multinode prechecks
-
- #. Finally proceed to actual OpenStack deployment:
-
- .. code-block:: console
-
- kolla-ansible -i ./multinode deploy
-
-* For development, run:
+#. Bootstrap servers with kolla deploy dependencies:
- #. Bootstrap servers with kolla deploy dependencies:
-
- .. code-block:: console
+ .. code-block:: console
- cd kolla-ansible/tools
- ./kolla-ansible -i ../../multinode bootstrap-servers
+ kolla-ansible -i ./all-in-one bootstrap-servers
- #. Do pre-deployment checks for hosts:
+#. Do pre-deployment checks for hosts:
- .. code-block:: console
+ .. code-block:: console
- ./kolla-ansible -i ../../multinode prechecks
+ kolla-ansible -i ./all-in-one prechecks
- #. Finally proceed to actual OpenStack deployment:
+#. Finally proceed to actual OpenStack deployment:
- .. code-block:: console
+ .. code-block:: console
- ./kolla-ansible -i ../../multinode deploy
+ kolla-ansible -i ./all-in-one deploy
When this playbook finishes, OpenStack should be up, running and functional!
If error occurs during execution, refer to
@@ -540,22 +320,15 @@ Using OpenStack
#. OpenStack requires a ``clouds.yaml`` file where credentials for the
admin user are set. To generate this file:
- * For deployment or evaluation, run:
-
- .. code-block:: console
-
- kolla-ansible post-deploy
-
- * For development, run:
+ .. code-block:: console
- .. code-block:: console
+ kolla-ansible post-deploy
- cd kolla-ansible/tools
- ./kolla-ansible post-deploy
+ .. note::
- * The file will be generated in /etc/kolla/clouds.yaml, you can use it by
- copying it to /etc/openstack or ~/.config/openstack or setting
- OS_CLIENT_CONFIG_FILE environment variable.
+ The file will be generated in ``/etc/kolla/clouds.yaml``, you can use it
+ by copying it to ``/etc/openstack`` or ``~/.config/openstack``, or by
+ setting the ``OS_CLIENT_CONFIG_FILE`` environment variable.
#. Depending on how you installed Kolla Ansible, there is a script that will
create example networks, images, and so on.
@@ -567,22 +340,6 @@ Using OpenStack
cloud. Depending on your customisations, it may not work, or it may
conflict with the resources you want to create. You have been warned.
- * For deployment or evaluation, run:
-
- If using a virtual environment:
-
- .. code-block:: console
-
- /path/to/venv/share/kolla-ansible/init-runonce
-
- If not using a virtual environment:
-
- .. code-block:: console
-
- /usr/local/share/kolla-ansible/init-runonce
-
- * For development, run:
-
- .. code-block:: console
+ .. code-block:: console
- kolla-ansible/tools/init-runonce
+ /path/to/venv/share/kolla-ansible/init-runonce
diff --git a/doc/source/user/security.rst b/doc/source/user/security.rst
index d0e185bce5..b1cb4149aa 100644
--- a/doc/source/user/security.rst
+++ b/doc/source/user/security.rst
@@ -41,7 +41,7 @@ This absolutely solves the problem of persistent data, but it introduces
another security issue, permissions. With this host bind mount solution
the data in ``var/lib/mysql`` will be owned by the mysql user in the
container. Unfortunately, that mysql user in the container could have
-any UID/GID and thats who will own the data outside the container
+any UID/GID and that's who will own the data outside the container
introducing a potential security risk. Additionally, this method
dirties the host and requires host permissions to the directories
to bind mount.
@@ -98,8 +98,8 @@ The following variables should be configured in Kolla Ansible's
* Bool - set to true or false
-Prerequsites
-============
+Prerequisites
+=============
Firewalld needs to be installed beforehand.
Kayobe can be used to automate the installation and configuration of firewalld
diff --git a/doc/source/user/support-matrix.rst b/doc/source/user/support-matrix.rst
index be24706c12..a92cf4ba29 100644
--- a/doc/source/user/support-matrix.rst
+++ b/doc/source/user/support-matrix.rst
@@ -7,31 +7,14 @@ Supported Operating Systems
Kolla Ansible supports the following host Operating Systems (OS):
-.. note::
-
- CentOS 7 is no longer supported as a host OS. The Train release supports
- both CentOS 7 and 8, and provides a route for migration. See the `Kolla
- Ansible Train documentation
- `_ for
- information on migrating to CentOS 8.
-
-.. note::
-
- CentOS Linux 8 (as opposed to CentOS Stream 8) is no longer supported as a
- host OS. The Victoria release will in future support both CentOS Linux 8 and
- CentOS Stream 8, and provides a route for migration.
-
.. note::
CentOS Stream 9 is supported as a host OS while Kolla does not publish CS9
based images. Users can build them on their own. We recommend using Rocky
Linux 9 images instead.
-
-
* CentOS Stream 9
-* Debian Bullseye (11)
-* openEuler 22.03 LTS
+* Debian Bookworm (12)
* Rocky Linux 9
* Ubuntu Jammy (22.04)
diff --git a/doc/source/user/virtual-environments.rst b/doc/source/user/virtual-environments.rst
index 5bf780eec0..5d52a7e61f 100644
--- a/doc/source/user/virtual-environments.rst
+++ b/doc/source/user/virtual-environments.rst
@@ -26,7 +26,7 @@ python virtual environment on the Ansible control host. For example:
source /path/to/venv/bin/activate
pip install -U pip
pip install kolla-ansible
- pip install 'ansible>=4,<6'
+ pip install 'ansible>=6,<8'
deactivate
To use the virtual environment, it should first be activated:
diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml
index 63ca05575e..2afc0a1554 100644
--- a/etc/kolla/globals.yml
+++ b/etc/kolla/globals.yml
@@ -24,6 +24,12 @@
# Dummy variable to allow Ansible to accept this file.
workaround_ansible_issue_8743: yes
+# This variable may be used to set the maximum failure percentage for all
+# plays. More fine-grained control is possible via per-service variables, e.g.
+# nova_max_fail_percentage. The default behaviour is to set a max fail
+# percentage of 100, which is equivalent to not setting it.
+#kolla_max_fail_percentage:
+
###############
# Kolla options
###############
@@ -71,12 +77,13 @@ workaround_ansible_issue_8743: yes
# Optionally change the path to sysctl.conf modified by Kolla Ansible plays.
#kolla_sysctl_conf_path: /etc/sysctl.conf
-################
+##################
# Container engine
-################
+##################
+
+# Valid options are [ docker, podman ]
+#kolla_container_engine: docker
-# Valid options are [ docker ]
-# kolla_container_engine: docker
################
# Docker options
@@ -115,7 +122,7 @@ workaround_ansible_issue_8743: yes
# interface by default. This interface must contain an IP address.
# It is possible for hosts to have non-matching names of interfaces - these can
# be set in an inventory file per host or per group or stored separately, see
-# http://docs.ansible.com/ansible/intro_inventory.html
+# http://docs.ansible.com/ansible/latest/intro_inventory.html
# Yet another way to workaround the naming problem is to create a bond for the
# interface on all hosts and give the bond name here. Similar strategy can be
# followed for other types of interfaces.
@@ -238,7 +245,7 @@ workaround_ansible_issue_8743: yes
#kolla_copy_ca_into_containers: "no"
#haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
#haproxy_backend_cacert_dir: "/etc/ssl/certs"
-
+#database_enable_tls_backend: "{{ 'yes' if kolla_enable_tls_backend | bool and enable_proxysql | bool else 'no' }}"
##################
# Backend options
##################
@@ -261,6 +268,26 @@ workaround_ansible_issue_8743: yes
# Please read the docs for more details.
#acme_client_servers: []
+####################
+# LetsEncrypt options
+####################
+# This option is required for letsencrypt role to work properly.
+#letsencrypt_email: ""
+
+####################
+# LetsEncrypt certificate server options
+####################
+#letsencrypt_cert_server: "https://acme-v02.api.letsencrypt.org/directory"
+# attempt to renew Let's Encrypt certificate every 12 hours
+#letsencrypt_cron_renew_schedule: "0 */12 * * *"
+
+####################
+# LetsEncrypt external account binding options
+####################
+#letsencrypt_external_account_binding: "no"
+#letsencrypt_eab_hmac: ""
+#letsencrypt_eab_key_id: ""
+
################
# Region options
################
@@ -294,7 +321,6 @@ workaround_ansible_issue_8743: yes
#enable_neutron: "{{ enable_openstack_core | bool }}"
#enable_nova: "{{ enable_openstack_core | bool }}"
#enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}"
-#enable_outward_rabbitmq: "{{ enable_murano | bool }}"
# OpenStack services can be enabled or disabled with these options
#enable_aodh: "no"
@@ -308,58 +334,49 @@ workaround_ansible_issue_8743: yes
#enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}"
#enable_cinder: "no"
#enable_cinder_backup: "yes"
-#enable_cinder_backend_hnas_nfs: "no"
#enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}"
#enable_cinder_backend_lvm: "no"
#enable_cinder_backend_nfs: "no"
#enable_cinder_backend_quobyte: "no"
#enable_cinder_backend_pure_iscsi: "no"
#enable_cinder_backend_pure_fc: "no"
+#enable_cinder_backend_pure_roce: "no"
+#enable_cinder_backend_pure_nvme_tcp: "no"
#enable_cloudkitty: "no"
#enable_collectd: "no"
#enable_cyborg: "no"
#enable_designate: "no"
#enable_destroy_images: "no"
-#enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
-#enable_elasticsearch_curator: "no"
#enable_etcd: "no"
#enable_fluentd: "yes"
-#enable_freezer: "no"
+#enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}"
#enable_gnocchi: "no"
#enable_gnocchi_statsd: "no"
-#enable_grafana: "{{ enable_monasca | bool }}"
+#enable_grafana: "no"
#enable_grafana_external: "{{ enable_grafana | bool }}"
#enable_heat: "{{ enable_openstack_core | bool }}"
#enable_horizon: "{{ enable_openstack_core | bool }}"
#enable_horizon_blazar: "{{ enable_blazar | bool }}"
#enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
#enable_horizon_designate: "{{ enable_designate | bool }}"
-#enable_horizon_freezer: "{{ enable_freezer | bool }}"
+#enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}"
#enable_horizon_heat: "{{ enable_heat | bool }}"
#enable_horizon_ironic: "{{ enable_ironic | bool }}"
#enable_horizon_magnum: "{{ enable_magnum | bool }}"
#enable_horizon_manila: "{{ enable_manila | bool }}"
#enable_horizon_masakari: "{{ enable_masakari | bool }}"
#enable_horizon_mistral: "{{ enable_mistral | bool }}"
-#enable_horizon_monasca: "{{ enable_monasca | bool }}"
-#enable_horizon_murano: "{{ enable_murano | bool }}"
#enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
#enable_horizon_octavia: "{{ enable_octavia | bool }}"
-#enable_horizon_sahara: "{{ enable_sahara | bool }}"
-#enable_horizon_senlin: "{{ enable_senlin | bool }}"
-#enable_horizon_solum: "{{ enable_solum | bool }}"
#enable_horizon_tacker: "{{ enable_tacker | bool }}"
#enable_horizon_trove: "{{ enable_trove | bool }}"
-#enable_horizon_vitrage: "{{ enable_vitrage | bool }}"
#enable_horizon_watcher: "{{ enable_watcher | bool }}"
#enable_horizon_zun: "{{ enable_zun | bool }}"
-#enable_influxdb: "{{ enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb') }}"
+#enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}"
#enable_ironic: "no"
#enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
+#enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}"
#enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
-#enable_kafka: "{{ enable_monasca | bool }}"
-#enable_kibana: "{{ 'yes' if enable_central_logging | bool or enable_monasca | bool else 'no' }}"
-#enable_kibana_external: "{{ enable_kibana | bool }}"
#enable_kuryr: "no"
#enable_magnum: "no"
#enable_manila: "no"
@@ -371,12 +388,11 @@ workaround_ansible_issue_8743: yes
#enable_mariabackup: "no"
#enable_masakari: "no"
#enable_mistral: "no"
-#enable_monasca: "no"
#enable_multipathd: "no"
-#enable_murano: "no"
#enable_neutron_vpnaas: "no"
#enable_neutron_sriov: "no"
#enable_neutron_dvr: "no"
+#enable_neutron_fwaas: "no"
#enable_neutron_qos: "no"
#enable_neutron_agent_ha: "no"
#enable_neutron_bgp_dragent: "no"
@@ -391,19 +407,19 @@ workaround_ansible_issue_8743: yes
#enable_nova_ssh: "yes"
#enable_octavia: "no"
#enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}"
+#enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}"
+#enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}"
+#enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
+#enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
#enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
#enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
#enable_ovs_dpdk: "no"
#enable_osprofiler: "no"
#enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
#enable_prometheus: "no"
-#enable_proxysql: "no"
+#enable_proxysql: "yes"
#enable_redis: "no"
-#enable_sahara: "no"
-#enable_senlin: "no"
-#enable_skydive: "no"
-#enable_solum: "no"
-#enable_storm: "{{ enable_monasca | bool }}"
+#enable_skyline: "no"
#enable_swift: "no"
#enable_swift_s3api: "no"
#enable_tacker: "no"
@@ -411,11 +427,18 @@ workaround_ansible_issue_8743: yes
#enable_trove: "no"
#enable_trove_singletenant: "no"
#enable_venus: "no"
-#enable_vitrage: "no"
#enable_watcher: "no"
-#enable_zookeeper: "{{ enable_kafka | bool or enable_storm | bool }}"
#enable_zun: "no"
+#############
+# S3 options
+#############
+# Common options for S3 Cinder Backup and Glance S3 backend.
+#s3_url:
+#s3_bucket:
+#s3_access_key:
+#s3_secret_key:
+
##################
# RabbitMQ options
##################
@@ -448,26 +471,20 @@ workaround_ansible_issue_8743: yes
#external_ceph_cephx_enabled: "yes"
# Glance
-#ceph_glance_keyring: "ceph.client.glance.keyring"
#ceph_glance_user: "glance"
#ceph_glance_pool_name: "images"
# Cinder
-#ceph_cinder_keyring: "ceph.client.cinder.keyring"
#ceph_cinder_user: "cinder"
#ceph_cinder_pool_name: "volumes"
-#ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
#ceph_cinder_backup_user: "cinder-backup"
#ceph_cinder_backup_pool_name: "backups"
# Nova
-#ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
-#ceph_nova_user: "nova"
+#ceph_nova_user: "{{ ceph_cinder_user }}"
#ceph_nova_pool_name: "vms"
# Gnocchi
-#ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
#ceph_gnocchi_user: "gnocchi"
#ceph_gnocchi_pool_name: "gnocchi"
# Manila
-#ceph_manila_keyring: "ceph.client.manila.keyring"
#ceph_manila_user: "manila"
#############################
@@ -485,6 +502,9 @@ workaround_ansible_issue_8743: yes
# 28800(8 hour), 43200(12 hour), 86400(1 day), 604800(1 week).
#fernet_token_expiry: 86400
+# Whether or not to apply changes to service user passwords when services are
+# reconfigured
+#update_keystone_service_user_passwords: "true"
########################
# Glance - Image Options
@@ -494,6 +514,7 @@ workaround_ansible_issue_8743: yes
#glance_backend_file: "yes"
#glance_backend_swift: "no"
#glance_backend_vmware: "no"
+#glance_backend_s3: "no"
#enable_glance_image_cache: "no"
#glance_enable_property_protection: "no"
#glance_enable_interoperable_image_import: "no"
@@ -502,6 +523,14 @@ workaround_ansible_issue_8743: yes
# the default value is "no".
#glance_enable_rolling_upgrade: "no"
+####################
+# Glance S3 Backend
+####################
+#glance_backend_s3_url: "{{ s3_url }}"
+#glance_backend_s3_bucket: "{{ s3_bucket }}"
+#glance_backend_s3_access_key: "{{ s3_access_key }}"
+#glance_backend_s3_secret_key: "{{ s3_secret_key }}"
+
####################
# Osprofiler options
####################
@@ -535,11 +564,17 @@ workaround_ansible_issue_8743: yes
# Valid options are [ '', redis, etcd ]
#cinder_coordination_backend: "{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}"
-# Valid options are [ nfs, swift, ceph ]
+# Valid options are [ nfs, swift, ceph, s3 ]
#cinder_backup_driver: "ceph"
#cinder_backup_share: ""
#cinder_backup_mount_options_nfs: ""
+# Cinder backup S3 options
+#cinder_backup_s3_url: "{{ s3_url }}"
+#cinder_backup_s3_bucket: "{{ s3_bucket }}"
+#cinder_backup_s3_access_key: "{{ s3_access_key }}"
+#cinder_backup_s3_secret_key: "{{ s3_secret_key }}"
+
#######################
# Cloudkitty options
#######################
@@ -573,13 +608,11 @@ workaround_ansible_issue_8743: yes
# The number of fake driver per compute node
#num_nova_fake_per_node: 5
-# The flag "nova_safety_upgrade" need to be consider when
-# "nova_enable_rolling_upgrade" is enabled. The "nova_safety_upgrade"
-# controls whether the nova services are all stopped before rolling
-# upgrade to the new version, for the safety and availability.
-# If "nova_safety_upgrade" is "yes", that will stop all nova services (except
-# nova-compute) for no failed API operations before upgrade to the
-# new version. And opposite.
+# The "nova_safety_upgrade" controls whether the nova services
+# are all stopped before rolling upgrade to the new version,
+# for the safety and availability. If "nova_safety_upgrade" is "yes",
+# that will stop all nova services (except nova-compute) for no failed
+# API operations before upgrade to the new version. And opposite.
#nova_safety_upgrade: "no"
# Valid options are [ none, novnc, spice ]
@@ -597,7 +630,7 @@ workaround_ansible_issue_8743: yes
#############################
# Horizon - Dashboard Options
#############################
-#horizon_backend_database: "{{ enable_murano | bool }}"
+#horizon_backend_database: false
#############################
# Ironic options
@@ -715,7 +748,7 @@ workaround_ansible_issue_8743: yes
#enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
#enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
#enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
-#enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable fluentd | bool }}"
+#enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}"
#enable_prometheus_memcached: "{{ enable_prometheus | bool }}"
#enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
#enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}"
@@ -725,7 +758,6 @@ workaround_ansible_issue_8743: yes
#enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
#enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"
#enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}"
-#enable_prometheus_msteams: "no"
# The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager).
# prometheus_external_labels:
@@ -743,7 +775,6 @@ workaround_ansible_issue_8743: yes
# Extra parameters passed to Prometheus exporters.
#prometheus_blackbox_exporter_cmdline_extras:
#prometheus_elasticsearch_exporter_cmdline_extras:
-#prometheus_haproxy_exporter_cmdline_extras:
#prometheus_memcached_exporter_cmdline_extras:
#prometheus_mysqld_exporter_cmdline_extras:
#prometheus_node_exporter_cmdline_extras:
@@ -755,15 +786,6 @@ workaround_ansible_issue_8743: yes
# - host1:port1
# - host2:port2
-#########
-# Freezer
-#########
-# Freezer can utilize two different database backends, elasticsearch or mariadb.
-# Elasticsearch is preferred, however it is not compatible with the version deployed
-# by kolla-ansible. You must first setup an external elasticsearch with 2.3.0.
-# By default, kolla-ansible deployed mariadb is the used database backend.
-#freezer_database_backend: "mariadb"
-
##########
# Telegraf
##########
@@ -860,3 +882,10 @@ workaround_ansible_issue_8743: yes
# this is UDP port
#hacluster_corosync_port: 5405
+
+##############
+# etcd options
+##############
+# If `etcd_remove_deleted_members` is enabled, Kolla Ansible will automatically
+# remove etcd members from the cluster that are no longer in the inventory.
+#etcd_remove_deleted_members: "no"
diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml
index ca1ca5ee40..5ce37b5b54 100644
--- a/etc/kolla/passwords.yml
+++ b/etc/kolla/passwords.yml
@@ -92,12 +92,6 @@ cloudkitty_keystone_password:
cyborg_database_password:
cyborg_keystone_password:
-freezer_database_password:
-freezer_keystone_password:
-
-sahara_database_password:
-sahara_keystone_password:
-
designate_database_password:
designate_keystone_password:
# This option must be UUID4 value in string format
@@ -113,14 +107,6 @@ heat_database_password:
heat_keystone_password:
heat_domain_admin_password:
-murano_database_password:
-murano_keystone_password:
-murano_agent_rabbitmq_password:
-
-monasca_agent_password:
-monasca_database_password:
-monasca_keystone_password:
-
ironic_database_password:
ironic_keystone_password:
@@ -142,12 +128,6 @@ ceilometer_keystone_password:
watcher_database_password:
watcher_keystone_password:
-senlin_database_password:
-senlin_keystone_password:
-
-solum_database_password:
-solum_keystone_password:
-
horizon_secret_key:
horizon_database_password:
@@ -157,6 +137,7 @@ manila_database_password:
manila_keystone_password:
octavia_database_password:
+octavia_persistence_database_password:
octavia_keystone_password:
octavia_ca_password:
octavia_client_ca_password:
@@ -167,9 +148,6 @@ tacker_keystone_password:
zun_database_password:
zun_keystone_password:
-vitrage_database_password:
-vitrage_keystone_password:
-
venus_database_password:
venus_keystone_password:
@@ -178,6 +156,10 @@ masakari_keystone_password:
memcache_secret_key:
+skyline_secret_key:
+skyline_database_password:
+skyline_keystone_password:
+
# HMAC secret key
osprofiler_secret:
@@ -205,6 +187,10 @@ neutron_ssh_key:
private_key:
public_key:
+haproxy_ssh_key:
+ private_key:
+ public_key:
+
####################
# Gnocchi options
####################
@@ -218,8 +204,6 @@ gnocchi_user_id:
rabbitmq_password:
rabbitmq_monitoring_password:
rabbitmq_cluster_cookie:
-outward_rabbitmq_password:
-outward_rabbitmq_cluster_cookie:
####################
# HAProxy options
@@ -227,11 +211,6 @@ outward_rabbitmq_cluster_cookie:
haproxy_password:
keepalived_password:
-####################
-# Kibana options
-####################
-kibana_password:
-
####################
# etcd options
####################
@@ -247,6 +226,10 @@ redis_master_password:
####################
prometheus_mysql_exporter_database_password:
prometheus_alertmanager_password:
+prometheus_password:
+prometheus_grafana_password:
+prometheus_skyline_password:
+prometheus_bcrypt_salt:
###############################
# OpenStack identity federation
@@ -268,3 +251,8 @@ libvirt_sasl_password:
############
proxysql_admin_password:
proxysql_stats_password:
+
+############
+# OpenSearch
+############
+opensearch_dashboards_password:
diff --git a/kolla_ansible/ansible.py b/kolla_ansible/ansible.py
new file mode 100644
index 0000000000..7a29ae9f5c
--- /dev/null
+++ b/kolla_ansible/ansible.py
@@ -0,0 +1,318 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import subprocess # nosec
+import sys
+
+from kolla_ansible import utils
+from typing import List
+from typing import Tuple
+
+
+DEFAULT_CONFIG_PATH = "/etc/kolla"
+
+CONFIG_PATH_ENV = "KOLLA_CONFIG_PATH"
+
+LOG = logging.getLogger(__name__)
+
+
+def add_ansible_args(parser):
+ """Add arguments required for running Ansible playbooks to a parser."""
+ parser.add_argument(
+ "-b",
+ "--become",
+ action="store_true",
+ help="run operations with become (nopasswd implied)",
+ )
+ parser.add_argument(
+ "-C",
+ "--check",
+ action="store_true",
+ help="don't make any changes; instead, try to predict "
+ "some of the changes that may occur",
+ )
+ parser.add_argument(
+ "-D",
+ "--diff",
+ action="store_true",
+ help="when changing (small) files and templates, show "
+ "the differences in those files; works great "
+ "with --check",
+ )
+ parser.add_argument(
+ "-e",
+ "--extra-vars",
+ metavar="EXTRA_VARS",
+ action="append",
+ help="set additional variables as key=value or "
+ "YAML/JSON",
+ )
+ parser.add_argument(
+ "-i",
+ "--inventory",
+ metavar="INVENTORY",
+ action="append",
+ help="specify inventory host path ",
+ )
+ parser.add_argument(
+ "-l",
+ "--limit",
+ metavar="SUBSET",
+ help="further limit selected hosts to an additional "
+ "pattern",
+ )
+ parser.add_argument(
+ "--skip-tags",
+ metavar="TAGS",
+ help="only run plays and tasks whose tags do not "
+ "match these values",
+ )
+ parser.add_argument(
+ "-t",
+ "--tags",
+ metavar="TAGS",
+ help="only run plays and tasks tagged with these "
+ "values",
+ )
+ parser.add_argument(
+ "-lt",
+ "--list-tasks",
+ action="store_true",
+ help="only print names of tasks, don't run them, "
+ "note this has no affect on kolla-ansible.",
+ )
+ parser.add_argument(
+ "-p", "--playbook",
+ metavar="PLAYBOOKS",
+ action="append",
+ help="Specify custom playbooks for kolla ansible "
+ "to use"
+ ),
+ parser.add_argument(
+ "--vault-id",
+ metavar="VAULT_IDS",
+ action="append",
+ help="the vault identity to use. "
+ "This argument may be specified multiple times.",
+ default=[]
+ ),
+ parser.add_argument(
+ "--vault-password-file",
+ "--vault-pass-file",
+ metavar="VAULT_PASSWORD_FILES",
+ action="append",
+ help="vault password file",
+ default=[]
+ ),
+ parser.add_argument(
+ "-J",
+ "--ask-vault-password",
+ "--ask-vault-pass",
+ action="store_true",
+ help="ask for vault password"
+ )
+
+
+def add_kolla_ansible_args(parser):
+ """Add arguments required for running Kolla Ansible to a parser."""
+ default_config_path = os.getenv(CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH)
+ parser.add_argument(
+ "--configdir",
+ default=default_config_path,
+ dest="kolla_config_path",
+ help="path to Kolla configuration."
+ "(default=$%s or %s)" % (CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH),
+ )
+ parser.add_argument(
+ "--passwords",
+ dest="kolla_passwords",
+ help="Path to the kolla ansible passwords file"
+ )
+
+
+def _get_inventory_paths(parsed_args) -> List[str]:
+ """Return path to the Kolla Ansible inventory."""
+ if parsed_args.inventory:
+ return parsed_args.inventory
+
+ default_inventory = os.path.join(
+ os.path.abspath(parsed_args.kolla_config_path),
+ "ansible", "inventory", "all-in-one")
+ return [default_inventory]
+
+
+def _validate_args(parsed_args, playbooks: list) -> None:
+ """Validate Kolla Ansible arguments."""
+ result = utils.is_readable_dir(
+ os.path.abspath(parsed_args.kolla_config_path))
+ if not result["result"]:
+ LOG.error(
+ "Kolla Ansible configuration path %s is invalid: %s",
+ os.path.abspath(parsed_args.kolla_config_path),
+ result["message"],
+ )
+ sys.exit(1)
+
+ inventories = _get_inventory_paths(parsed_args)
+ for inventory in inventories:
+ result = utils.is_readable_dir(inventory)
+ if not result["result"]:
+ # NOTE(mgoddard): Previously the inventory was a file, now it is a
+ # directory to allow us to support inventory host_vars. Support
+ # both formats for now.
+ result_f = utils.is_readable_file(inventory)
+ if not result_f["result"]:
+ LOG.error(
+ "Kolla inventory %s is invalid: %s",
+ inventory, result["message"]
+ )
+ sys.exit(1)
+
+ for playbook in playbooks:
+ result = utils.is_readable_file(playbook)
+ if not result["result"]:
+ LOG.error(
+ "Kolla Ansible playbook %s is invalid: %s",
+ playbook, result["message"]
+ )
+ sys.exit(1)
+
+ if parsed_args.kolla_passwords:
+ passwd_file = parsed_args.kolla_passwords
+ else:
+ passwd_file = os.path.join(
+ os.path.abspath(parsed_args.kolla_config_path), "passwords.yml")
+ result = utils.is_readable_file(passwd_file)
+ if not result["result"]:
+ LOG.error("Kolla Ansible passwords file %s is invalid: %s",
+ passwd_file, result["message"])
+
+ globals_file = os.path.join(os.path.abspath(
+ os.path.abspath(parsed_args.kolla_config_path)), "globals.yml")
+ result = utils.is_readable_file(globals_file)
+ if not result["result"]:
+ LOG.error("Kolla ansible globals file %s is invalid %s",
+ globals_file, result["message"])
+
+
+def _get_vars_files(config_path: os.path) -> List[str]:
+ """Return a list of Kolla Ansible configuration variable files.
+
+ The globals.d directory in config path is searched to create the list of
+ variable files. The files will be sorted alphabetically by name for each
+ file, but ordering of file is kept to allow overrides.
+ """
+ vars_path = os.path.join(config_path, "globals.d")
+ result = utils.is_readable_dir(vars_path)
+ if not result["result"]:
+ return []
+
+ vars_files = []
+ for vars_file in os.listdir(vars_path):
+ abs_path = os.path.join(vars_path, vars_file)
+ if utils.is_readable_file(abs_path)["result"]:
+ root, ext = os.path.splitext(vars_file)
+ if ext in (".yml", ".yaml", ".json"):
+ vars_files.append(abs_path)
+
+ return sorted(vars_files)
+
+
+def build_args(parsed_args,
+ playbooks: list,
+ extra_vars: dict = {},
+ verbose_level: int = None) -> Tuple[str, List[str]]:
+ """Build arguments required for running Ansible playbooks."""
+ args = list()
+ if verbose_level:
+ args += ["-" + "v" * verbose_level]
+ if parsed_args.list_tasks:
+ args += ["--list-tasks"]
+ inventories = _get_inventory_paths(parsed_args)
+ for inventory in inventories:
+ args += ["--inventory", inventory]
+ args += ["-e", "@%s" % os.path.join(
+ os.path.abspath(parsed_args.kolla_config_path),
+ "globals.yml")]
+ args += ["-e", "@%s" % os.path.join(
+ os.path.abspath(parsed_args.kolla_config_path),
+ "passwords.yml")]
+ for vault_id in parsed_args.vault_id:
+ args += ["--vault-id", vault_id]
+ for vault_pass_file in parsed_args.vault_password_file:
+ args += ["--vault-password-file", vault_pass_file]
+ if parsed_args.ask_vault_password:
+ args += "--ask-vault-password"
+ vars_files = _get_vars_files(
+ os.path.abspath(parsed_args.kolla_config_path))
+ for vars_file in vars_files:
+ args += ["-e", "@%s" % vars_file]
+ if parsed_args.extra_vars:
+ for extra_var in parsed_args.extra_vars:
+ args += ["-e", extra_var]
+ if extra_vars:
+ for extra_var_name, extra_var_value in extra_vars.items():
+ args += ["-e", "%s=%s" % (extra_var_name, extra_var_value)]
+ args += ["-e", "CONFIG_DIR=%s" %
+ os.path.abspath(parsed_args.kolla_config_path)]
+ if parsed_args.become:
+ args += ["--become"]
+ if parsed_args.check:
+ args += ["--check"]
+ if parsed_args.diff:
+ args += ["--diff"]
+ if parsed_args.limit:
+ args += ["--limit", parsed_args.limit]
+ if parsed_args.skip_tags:
+ args += ["--skip-tags", parsed_args.skip_tags]
+ if parsed_args.tags:
+ args += ["--tags", parsed_args.tags]
+ args += [" ".join(playbooks)]
+ return ("ansible-playbook", args)
+
+
+def run_playbooks(parsed_args, playbooks: list, extra_vars: dict = {},
+ quiet: bool = False, verbose_level: int = 0) -> None:
+ """Run a Kolla Ansible playbook."""
+ LOG.debug("Parsed arguments: %s" % parsed_args)
+ _validate_args(parsed_args, playbooks)
+ (executable, args) = build_args(
+ parsed_args,
+ playbooks,
+ extra_vars=extra_vars,
+ verbose_level=verbose_level,
+ )
+
+ try:
+ utils.run_command(executable, args, quiet=quiet)
+ except subprocess.CalledProcessError as e:
+ LOG.error(
+ "Kolla Ansible playbook(s) %s exited %d", ", ".join(
+ playbooks), e.returncode
+ )
+ sys.exit(e.returncode)
+
+
+def install_galaxy_collections(force: bool = True) -> None:
+ """Install Ansible Galaxy collection dependencies.
+
+ Installs collection dependencies specified in kolla-ansible,
+ and if present, in kolla-ansibnle configuration.
+
+ :param force: Whether to force reinstallation of roles.
+ """
+ requirements = utils.get_data_files_path("requirements.yml")
+ requirements_core = utils.get_data_files_path("requirements-core.yml")
+ utils.galaxy_collection_install(requirements, force=force)
+ utils.galaxy_collection_install(requirements_core, force=force)
diff --git a/kolla_ansible/cli/__init__.py b/kolla_ansible/cli/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/kolla_ansible/cli/commands.py b/kolla_ansible/cli/commands.py
new file mode 100644
index 0000000000..fa3b1908cc
--- /dev/null
+++ b/kolla_ansible/cli/commands.py
@@ -0,0 +1,471 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cliff.command import Command
+
+from kolla_ansible import ansible
+from kolla_ansible import utils
+
+# Serial is not recommended and disabled by default.
+# Users can enable it by configuring the variable.
+ANSIBLE_SERIAL = 0
+
+
+def _get_playbook_path(playbook):
+ """Return the absolute path of Kolla Ansible playbook"""
+ return utils.get_data_files_path("ansible", "%s.yml" % playbook)
+
+
+def _choose_playbooks(parsed_args, kolla_playbook="site"):
+ """Return user defined playbook if set, otherwise return Kolla playbook"""
+ if parsed_args.playbook:
+ playbooks = parsed_args.playbook
+ else:
+ playbooks = [_get_playbook_path(kolla_playbook)]
+ return playbooks
+
+
+class KollaAnsibleMixin:
+ """Mixin class for commands running Kolla Ansible."""
+
+ def get_parser(self, prog_name):
+ parser = super(KollaAnsibleMixin, self).get_parser(prog_name)
+ ansible_group = parser.add_argument_group("Ansible arguments")
+ ka_group = parser.add_argument_group("Kolla Ansible arguments")
+ self.add_ansible_args(ansible_group)
+ self.add_kolla_ansible_args(ka_group)
+ return parser
+
+ def add_kolla_ansible_args(self, group):
+ ansible.add_kolla_ansible_args(group)
+
+ def add_ansible_args(self, group):
+ ansible.add_ansible_args(group)
+
+ def _get_verbosity_args(self):
+ """Add quietness and verbosity level arguments."""
+ # Cliff's default verbosity level is 1, 0 means quiet.
+ verbosity_args = {}
+ if self.app.options.verbose_level:
+ ansible_verbose_level = self.app.options.verbose_level - 1
+ verbosity_args["verbose_level"] = ansible_verbose_level
+ else:
+ verbosity_args["quiet"] = True
+ return verbosity_args
+
+ def run_playbooks(self, parsed_args, *args, **kwargs):
+ kwargs.update(self._get_verbosity_args())
+ return ansible.run_playbooks(parsed_args, *args, **kwargs)
+
+
+class GatherFacts(KollaAnsibleMixin, Command):
+ """Gather Ansible facts on hosts"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Gathering Ansible facts")
+
+ playbooks = _choose_playbooks(parsed_args, "gather-facts")
+
+ self.run_playbooks(parsed_args, playbooks)
+
+
+class InstallDeps(KollaAnsibleMixin, Command):
+ """Install Ansible Galaxy dependencies"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Installing Ansible Galaxy dependencies")
+ ansible.install_galaxy_collections()
+
+
+class Prechecks(KollaAnsibleMixin, Command):
+ """Do pre-deployment checks for hosts"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Pre-deployment checking")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "precheck"
+
+ playbooks = _choose_playbooks(parsed_args,)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class GenConfig(KollaAnsibleMixin, Command):
+ """Generate configuration files for services. No container changes!"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info(
+ "Generate configuration files for enabled OpenStack services")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "config"
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class Reconfigure(KollaAnsibleMixin, Command):
+ """Reconfigure enabled OpenStack service"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Reconfigure OpenStack service")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "reconfigure"
+ extra_vars["kolla_serial"] = ANSIBLE_SERIAL
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class ValidateConfig(KollaAnsibleMixin, Command):
+ """Validate configuration files for enabled OpenStack services"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Validate configuration files for enabled "
+ "OpenStack services")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "config_validate"
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class BootstrapServers(KollaAnsibleMixin, Command):
+ """Bootstrap servers with Kolla Ansible deploy dependencies"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Bootstrapping servers")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "bootstrap-servers"
+
+ playbooks = _choose_playbooks(parsed_args, "kolla-host")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class Pull(KollaAnsibleMixin, Command):
+ """Pull all images for containers. Only pulls, no container changes."""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Pulling Docker images")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "pull"
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class Certificates(KollaAnsibleMixin, Command):
+ """Generate self-signed certificate for TLS *For Development Only*"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Generate TLS Certificates")
+
+ playbooks = _choose_playbooks(parsed_args, "certificates")
+
+ self.run_playbooks(parsed_args, playbooks)
+
+
+class OctaviaCertificates(KollaAnsibleMixin, Command):
+ """Generate certificates for octavia deployment"""
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ group = parser.add_argument_group("Octavia certificates action")
+ group.add_argument(
+ "--check-expiry",
+ type=int,
+ help="Check if the certificates will expire "
+ "within given number of days",
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ extra_vars = {}
+
+ if hasattr(parsed_args, "check_expiry") \
+ and parsed_args.check_expiry is not None:
+ self.app.LOG.info("Checking if certificates expire "
+ "within given number of days.")
+ extra_vars["octavia_certs_check_expiry"] = "yes"
+ extra_vars["octavia_certs_expiry_limit"] = parsed_args.check_expiry
+ else:
+ self.app.LOG.info("Generate octavia Certificates")
+
+ playbooks = _choose_playbooks(parsed_args, "octavia-certificates")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class Deploy(KollaAnsibleMixin, Command):
+ """Generate config, bootstrap and start all Kolla Ansible containers"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Deploying Playbooks")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "deploy"
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class DeployContainers(KollaAnsibleMixin, Command):
+ """Only deploy and start containers (no config updates or bootstrapping)"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Deploying Containers")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "deploy-containers"
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class Postdeploy(KollaAnsibleMixin, Command):
+ """Do post deploy on deploy node"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Post-Deploying Playbooks")
+
+ playbooks = _choose_playbooks(parsed_args, "post-deploy")
+
+ self.run_playbooks(parsed_args, playbooks)
+
+
+class Upgrade(KollaAnsibleMixin, Command):
+ """Upgrades existing OpenStack Environment"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Upgrading OpenStack Environment")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "upgrade"
+ extra_vars["kolla_serial"] = ANSIBLE_SERIAL
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class Stop(KollaAnsibleMixin, Command):
+ """Stop Kolla Ansible containers"""
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ group = parser.add_argument_group("Stop action")
+ group.add_argument(
+ "--yes-i-really-really-mean-it",
+ action="store_true",
+ required=True,
+ help="WARNING! This action will remove the Openstack deployment!",
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Stop Kolla containers")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "stop"
+
+ playbooks = _choose_playbooks(parsed_args)
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class Destroy(KollaAnsibleMixin, Command):
+ """Destroy Kolla Ansible containers, volumes and host configuration!"""
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ group = parser.add_argument_group("Destroy action")
+ group.add_argument(
+ "--yes-i-really-really-mean-it",
+ action="store_true",
+ required=True,
+ help="WARNING! This action will remove the Openstack deployment!",
+ )
+ group.add_argument(
+ "--include-dev",
+ action="store_true",
+ help="Remove devevelopment environment",
+ )
+ group.add_argument(
+ "--include-images",
+ action="store_true",
+ help="Remove leftover container images",
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.app.LOG.warning("WARNING: This will PERMANENTLY DESTROY "
+ "all deployed kolla containers, volumes "
+ "and host configuration. There is no way "
+ "to recover from this action!")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "destroy"
+ extra_vars["destroy_include_dev"] = (
+ "yes" if parsed_args.include_dev else "no"
+ )
+ extra_vars["destroy_include_images"] = (
+ "yes" if parsed_args.include_images else "no"
+ )
+
+ playbooks = _choose_playbooks(parsed_args, "destroy")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class PruneImages(KollaAnsibleMixin, Command):
+ """Prune orphaned Kolla Ansible docker images"""
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ group = parser.add_argument_group("Prune images action")
+ group.add_argument(
+ "--yes-i-really-really-mean-it",
+ action="store_true",
+ required=True,
+ help="WARNING! This action will remove all orphaned images!",
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Prune orphaned Kolla images")
+
+ playbooks = _choose_playbooks(parsed_args, "prune-images")
+
+ self.run_playbooks(parsed_args, playbooks)
+
+
+class BifrostDeploy(KollaAnsibleMixin, Command):
+ """Deploy and start bifrost container"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Deploying Bifrost")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "deploy"
+
+ playbooks = _choose_playbooks(parsed_args, "bifrost")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class BifrostDeployServers(KollaAnsibleMixin, Command):
+ """Enroll and deploy servers with bifrost"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Deploying servers with bifrost")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "deploy-servers"
+
+ playbooks = _choose_playbooks(parsed_args, "bifrost")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class BifrostUpgrade(KollaAnsibleMixin, Command):
+ """Upgrades an existing bifrost container"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Upgrading Bifrost")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "upgrade"
+
+ playbooks = _choose_playbooks(parsed_args, "bifrost")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class RabbitMQResetState(KollaAnsibleMixin, Command):
+ """Force reset the state of RabbitMQ"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Force reset the state of RabbitMQ")
+
+ playbooks = _choose_playbooks(parsed_args, "rabbitmq-reset-state")
+
+ self.run_playbooks(parsed_args, playbooks)
+
+
+class MariaDBBackup(KollaAnsibleMixin, Command):
+ """Take a backup of MariaDB databases. See help for options."""
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ group = parser.add_argument_group("MariaDB backup type")
+ group.add_argument(
+ "--full",
+ action="store_const",
+ const="full",
+ dest="mariadb_backup_type",
+ default="full"
+ )
+ group.add_argument(
+ "--incremental",
+ action="store_const",
+ const="incremental",
+ dest="mariadb_backup_type"
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Backup MariaDB databases")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "backup"
+ extra_vars["mariadb_backup_type"] = parsed_args.mariadb_backup_type
+
+ playbooks = _choose_playbooks(parsed_args, "mariadb_backup")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class MariaDBRecovery(KollaAnsibleMixin, Command):
+ """Recover a completely stopped MariaDB cluster"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Attempting to restart MariaDB cluster")
+
+ extra_vars = {}
+ extra_vars["kolla_action"] = "deploy"
+
+ playbooks = _choose_playbooks(parsed_args, "mariadb_recovery")
+
+ self.run_playbooks(parsed_args, playbooks, extra_vars=extra_vars)
+
+
+class NovaLibvirtCleanup(KollaAnsibleMixin, Command):
+ """Clean up disabled nova_libvirt containers"""
+
+ def take_action(self, parsed_args):
+ self.app.LOG.info("Cleanup disabled nova_libvirt containers")
+
+ playbooks = _choose_playbooks(parsed_args, "nova-libvirt-cleanup")
+
+ self.run_playbooks(parsed_args, playbooks)
diff --git a/kolla_ansible/cmd/genpwd.py b/kolla_ansible/cmd/genpwd.py
index b8b176e338..89b7bbd0ab 100755
--- a/kolla_ansible/cmd/genpwd.py
+++ b/kolla_ansible/cmd/genpwd.py
@@ -16,9 +16,11 @@
import hmac
import os
import random
+import stat
import string
import sys
+from ansible.utils.encrypt import random_salt
from cryptography import fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
@@ -55,7 +57,7 @@ def generate_RSA(bits=4096):
def genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
- fernet_keys, hmac_md5_keys):
+ fernet_keys, hmac_md5_keys, bcrypt_keys):
try:
with open(passwords_file, 'r') as f:
passwords = yaml.safe_load(f.read())
@@ -63,6 +65,14 @@ def genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
print(f"ERROR: Passwords file \"{passwords_file}\" is missing")
sys.exit(1)
+ if os.stat(passwords_file).st_mode & stat.S_IROTH:
+ print(f"WARNING: Passwords file \"{passwords_file}\" is"
+ " world-readable. The permissions will be changed.")
+
+ if os.stat(passwords_file).st_mode & stat.S_IWOTH:
+ print(f"WARNING: Passwords file \"{passwords_file}\" is"
+ " world-writeable. The permissions will be changed.")
+
if not isinstance(passwords, dict):
print("ERROR: Passwords file not in expected key/value format")
sys.exit(1)
@@ -89,6 +99,11 @@ def genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
.hexdigest())
elif k in fernet_keys:
passwords[k] = fernet.Fernet.generate_key().decode()
+ elif k in bcrypt_keys:
+ # NOTE(wszusmki) To be compatible with the ansible
+ # password_hash filter, we use the utility function from the
+ # ansible library.
+ passwords[k] = random_salt(22)
else:
passwords[k] = ''.join([
random.SystemRandom().choice(
@@ -96,7 +111,15 @@ def genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
for n in range(length)
])
- with open(passwords_file, 'w') as f:
+ try:
+ os.remove(passwords_file)
+ except OSError:
+ pass
+
+ flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
+ mode = 0o640
+
+ with os.fdopen(os.open(passwords_file, flags, mode=mode), 'w') as f:
f.write(yaml.safe_dump(passwords, default_flow_style=False))
@@ -120,8 +143,9 @@ def main():
# SSH key pair
ssh_keys = ['kolla_ssh_key', 'nova_ssh_key',
- 'keystone_ssh_key', 'bifrost_ssh_key', 'octavia_amp_ssh_key',
- 'neutron_ssh_key']
+ 'keystone_ssh_key', 'bifrost_ssh_key',
+ 'octavia_amp_ssh_key', 'neutron_ssh_key',
+ 'haproxy_ssh_key']
# If these keys are None, leave them as None
blank_keys = ['docker_registry_password']
@@ -133,11 +157,14 @@ def main():
# Fernet keys
fernet_keys = ['barbican_crypto_key']
+ # bcrypt salts
+ bcrypt_keys = ['prometheus_bcrypt_salt']
+
# length of password
length = 40
genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
- fernet_keys, hmac_md5_keys)
+ fernet_keys, hmac_md5_keys, bcrypt_keys)
if __name__ == '__main__':
diff --git a/kolla_ansible/cmd/kolla_ansible.py b/kolla_ansible/cmd/kolla_ansible.py
new file mode 100644
index 0000000000..58467e0b10
--- /dev/null
+++ b/kolla_ansible/cmd/kolla_ansible.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from cliff.app import App
+from cliff.commandmanager import CommandManager
+
+from kolla_ansible import version
+
+
+class KollaAnsibleApp(App):
+
+ def __init__(self):
+ release_version = version.version_info.release_string()
+ super().__init__(
+ description="Kolla Ansible Command Line Interface (CLI)",
+ version=release_version,
+ command_manager=CommandManager("kolla_ansible.cli"),
+ deferred_help=True,
+ )
+
+ def initialize_app(self, argv):
+ self.LOG.debug("initialize_app")
+
+ def prepare_to_run_command(self, cmd):
+ self.LOG.debug("prepare_to_run_command %s", cmd.__class__.__name__)
+
+ def clean_up(self, cmd, result, err):
+ self.LOG.debug("clean_up %s", cmd.__class__.__name__)
+ if err:
+ self.LOG.debug("got an error: %s", err)
+
+
+def main(argv=sys.argv[1:]):
+ myapp = KollaAnsibleApp()
+ return myapp.run(argv)
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/kolla_ansible/cmd/mergepwd.py b/kolla_ansible/cmd/mergepwd.py
index f7d3c7c50a..00f38d9760 100755
--- a/kolla_ansible/cmd/mergepwd.py
+++ b/kolla_ansible/cmd/mergepwd.py
@@ -13,6 +13,8 @@
# limitations under the License.
import argparse
+import os
+import stat
import sys
import yaml
@@ -21,9 +23,21 @@ def mergepwd(old, new, final, clean=False):
with open(old, "r") as old_file:
old_passwords = yaml.safe_load(old_file)
+ if os.stat(old).st_mode & stat.S_IROTH:
+ print(f"WARNING: Passwords file \"{old}\" is world-readable.")
+
+ if os.stat(old).st_mode & stat.S_IWOTH:
+ print(f"WARNING: Passwords file \"{old}\" is world-writeable.")
+
with open(new, "r") as new_file:
new_passwords = yaml.safe_load(new_file)
+ if os.stat(new).st_mode & stat.S_IROTH:
+ print(f"WARNING: Passwords file \"{new}\" is world-readable.")
+
+ if os.stat(new).st_mode & stat.S_IWOTH:
+ print(f"WARNING: Passwords file \"{new}\" is world-writeable.")
+
if not isinstance(old_passwords, dict):
print("ERROR: Old passwords file not in expected key/value format")
sys.exit(1)
@@ -41,7 +55,15 @@ def mergepwd(old, new, final, clean=False):
# old behavior
new_passwords.update(old_passwords)
- with open(final, "w") as destination:
+ try:
+ os.remove(final)
+ except OSError:
+ pass
+
+ flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
+ mode = 0o640
+
+ with os.fdopen(os.open(final, flags, mode=mode), 'w') as destination:
yaml.safe_dump(new_passwords, destination, default_flow_style=False)
diff --git a/kolla_ansible/cmd/readpwd.py b/kolla_ansible/cmd/readpwd.py
index 87bdf6ff5b..b06c7f7df8 100755
--- a/kolla_ansible/cmd/readpwd.py
+++ b/kolla_ansible/cmd/readpwd.py
@@ -14,6 +14,7 @@
import argparse
import os
+import stat
import sys
import hvac
@@ -29,6 +30,14 @@ def readpwd(passwords_file, vault_kv_path, vault_mount_point, vault_namespace,
with open(passwords_file, 'r') as f:
passwords = yaml.safe_load(f.read())
+ if os.stat(passwords_file).st_mode & stat.S_IROTH:
+ print(f"WARNING: Passwords file \"{passwords_file}\" is"
+ " world-readable. The permissions will be changed.")
+
+ if os.stat(passwords_file).st_mode & stat.S_IWOTH:
+ print(f"WARNING: Passwords file \"{passwords_file}\" is"
+ " world-writeable. The permissions will be changed.")
+
if not isinstance(passwords, dict):
print("ERROR: Passwords file not in expected key/value format")
sys.exit(1)
@@ -53,7 +62,15 @@ def readpwd(passwords_file, vault_kv_path, vault_mount_point, vault_namespace,
except KeyError:
vault_kv_passwords[password_key] = password_data['data']['data']
- with open(passwords_file, 'w') as f:
+ try:
+ os.remove(passwords_file)
+ except OSError:
+ pass
+
+ flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
+ mode = 0o640
+
+ with os.fdopen(os.open(passwords_file, flags, mode=mode), 'w') as f:
yaml.safe_dump(vault_kv_passwords, f)
diff --git a/kolla_ansible/cmd/writepwd.py b/kolla_ansible/cmd/writepwd.py
index 9a3eb0d810..0227fcdd44 100755
--- a/kolla_ansible/cmd/writepwd.py
+++ b/kolla_ansible/cmd/writepwd.py
@@ -14,6 +14,7 @@
import argparse
import os
+import stat
import sys
import hvac
@@ -25,9 +26,18 @@
def writepwd(passwords_file, vault_kv_path, vault_mount_point, vault_namespace,
vault_addr, vault_role_id, vault_secret_id, vault_token,
vault_cacert):
+
with open(passwords_file, 'r') as f:
passwords = yaml.safe_load(f.read())
+ if os.stat(passwords_file).st_mode & stat.S_IROTH:
+ print(f"WARNING: Passwords file \"{passwords_file}\" is"
+ " world-readable.")
+
+ if os.stat(passwords_file).st_mode & stat.S_IWOTH:
+ print(f"WARNING: Passwords file \"{passwords_file}\" is"
+ " world-writeable.")
+
if not isinstance(passwords, dict):
print("ERROR: Passwords file not in expected key/value format")
sys.exit(1)
diff --git a/kolla_ansible/database_shards.py b/kolla_ansible/database_shards.py
index b607e5bbbc..5afdac8a2c 100644
--- a/kolla_ansible/database_shards.py
+++ b/kolla_ansible/database_shards.py
@@ -87,8 +87,8 @@ def database_shards_info(context, hostnames):
host_shard_id = host.get('mariadb_shard_id')
if host_shard_id is None:
- raise FilterError(f"'mariadb_shard_id' is undefined "
- "for host '{hostname}'")
+ raise FilterError("'mariadb_shard_id' is undefined "
+ f"for host '{hostname}'")
else:
host_shard_id = str(host_shard_id)
@@ -121,6 +121,7 @@ def database_shards_info(context, hostnames):
raise FilterError("'mariadb_shard_backup_user_prefix' "
"variable is unavailable")
db_user = f"{db_backup_prefix}{host_shard_id}"
+ db_password = host.get('mariadb_backup_database_password')
user_dict = {'password': db_password, 'user': db_user,
'shard_id': host_shard_id}
shards_info['users'].append(user_dict)
diff --git a/kolla_ansible/kolla_address.py b/kolla_ansible/kolla_address.py
index 12dff1ad1e..4538e75a2c 100644
--- a/kolla_ansible/kolla_address.py
+++ b/kolla_ansible/kolla_address.py
@@ -48,7 +48,7 @@ def kolla_address(context, network_name, hostname=None):
raise FilterError("'inventory_hostname' variable is unavailable")
hostvars = context.get('hostvars')
- if isinstance(hostvars, Undefined):
+ if hostvars is None or isinstance(hostvars, Undefined):
raise FilterError("'hostvars' variable is unavailable")
host = hostvars.get(hostname)
diff --git a/kolla_ansible/kolla_url.py b/kolla_ansible/kolla_url.py
new file mode 100644
index 0000000000..a217491e6f
--- /dev/null
+++ b/kolla_ansible/kolla_url.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2022 StackHPC Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from kolla_ansible.put_address_in_context import put_address_in_context
+
+
+def kolla_url(fqdn, protocol, port, path='', context='url'):
+ """generates url
+
+ :param fqdn:
+ :param protocol: http, ws, https or wss
+ :param port: port (omits 80 on http and 443 on https in output)
+ :param path: path - optional
+ :returns: string with url
+ """
+
+ fqdn = put_address_in_context(fqdn, context)
+ port = int(port)
+
+ if ((protocol == 'http' and port == 80) or
+ (protocol == 'https' and port == 443) or
+ (protocol == 'ws' and port == 80) or
+ (protocol == 'wss' and port == 443)):
+ address = f"{protocol}://{fqdn}{path}"
+ else:
+ address = f"{protocol}://{fqdn}:{port}{path}"
+
+ return address
diff --git a/kolla_ansible/nova_filters.py b/kolla_ansible/nova_filters.py
index 3a613cf3ea..4bb5cbdf66 100644
--- a/kolla_ansible/nova_filters.py
+++ b/kolla_ansible/nova_filters.py
@@ -36,8 +36,7 @@ def extract_cell(list_cells_cli_output, cell_name):
# NOTE(priteau): regexp doesn't support passwords containing spaces
p = re.compile(
r'\| +(?P[^ ]+)? +'
- r'\| +(?!00000000-0000-0000-0000-000000000000)'
- r'(?P[0-9a-f\-]+) +'
+ r'\| +(?P[0-9a-f\-]+) +'
r'\| +(?P[^ ]+) +'
r'\| +(?P[^ ]+) +'
r'\| +(?P[^ ]+) +'
diff --git a/kolla_ansible/tests/unit/test_address_filters.py b/kolla_ansible/tests/unit/test_address_filters.py
index be2cee78d9..589531639a 100644
--- a/kolla_ansible/tests/unit/test_address_filters.py
+++ b/kolla_ansible/tests/unit/test_address_filters.py
@@ -20,6 +20,7 @@
from kolla_ansible.exception import FilterError
from kolla_ansible.kolla_address import kolla_address
+from kolla_ansible.kolla_url import kolla_url
from kolla_ansible.put_address_in_context import put_address_in_context
from kolla_ansible.tests.unit.helpers import _to_bool
@@ -323,3 +324,66 @@ def test_valid_ipv6_config_do_not_ignore_any_vip_address(self):
},
})
self.assertEqual(addr, kolla_address(context, 'api'))
+
+
+class TestKollaUrlFilter(unittest.TestCase):
+
+ def test_https_443_path(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 443
+ path = '/v2'
+ self.assertEqual("https://kolla.external/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_http_80_path(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 80
+ path = '/v2'
+ self.assertEqual("http://kolla.external/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_https_8443_path(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 8443
+ path = '/v2'
+ self.assertEqual("https://kolla.external:8443/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_http_8080_path(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 8080
+ path = '/v2'
+ self.assertEqual("http://kolla.external:8080/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_https_443_nopath(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 443
+ self.assertEqual("https://kolla.external",
+ kolla_url(fqdn, protocol, port))
+
+ def test_http_80_nopath(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 80
+ self.assertEqual("http://kolla.external",
+ kolla_url(fqdn, protocol, port))
+
+ def test_https_8443_nopath(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 8443
+ self.assertEqual("https://kolla.external:8443",
+ kolla_url(fqdn, protocol, port))
+
+ def test_http_8080_nopath(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 8080
+ self.assertEqual("http://kolla.external:8080",
+ kolla_url(fqdn, protocol, port))
diff --git a/kolla_ansible/tests/unit/test_database_filters.py b/kolla_ansible/tests/unit/test_database_filters.py
index 947bd3d572..79f6947e4c 100644
--- a/kolla_ansible/tests/unit/test_database_filters.py
+++ b/kolla_ansible/tests/unit/test_database_filters.py
@@ -53,6 +53,7 @@ def test_valid_shards_info_with_backup_user(self):
root_prefix = 'root_shard_'
backup_prefix = 'backup_shard_'
db_cred = 'SECRET'
+ backup_db_cred = 'SECRET1'
db_shards = ['0', '1']
context = self._make_context({
@@ -62,6 +63,7 @@ def test_valid_shards_info_with_backup_user(self):
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
+ 'mariadb_backup_database_password': backup_db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
@@ -69,6 +71,7 @@ def test_valid_shards_info_with_backup_user(self):
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
+ 'mariadb_backup_database_password': backup_db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
@@ -76,6 +79,7 @@ def test_valid_shards_info_with_backup_user(self):
'mariadb_shard_id': db_shards[1],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
+ 'mariadb_backup_database_password': backup_db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
@@ -103,7 +107,7 @@ def test_valid_shards_info_with_backup_user(self):
"user": f"{root_prefix}0"
},
{
- "password": db_cred,
+ "password": backup_db_cred,
"shard_id": db_shards[0],
"user": f"{backup_prefix}0"
},
@@ -113,7 +117,7 @@ def test_valid_shards_info_with_backup_user(self):
"user": f"{root_prefix}1"
},
{
- "password": db_cred,
+ "password": backup_db_cred,
"shard_id": db_shards[1],
"user": f"{backup_prefix}1"
}
@@ -188,6 +192,7 @@ def test_valid_shards_info_with_different_users_and_pass(self):
root_prefix_2 = 'batman_shard_'
backup_prefix = 'backupman_shard_'
db_cred = 'kRypTonyte'
+ backup_db_cred = 'kRypTonyte1'
db_shards = ['0', '1']
context = self._make_context({
@@ -197,6 +202,7 @@ def test_valid_shards_info_with_different_users_and_pass(self):
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
+ 'mariadb_backup_database_password': backup_db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
@@ -204,6 +210,7 @@ def test_valid_shards_info_with_different_users_and_pass(self):
'mariadb_shard_id': db_shards[0],
'enable_mariabackup': enable_mariabackup,
'database_password': db_cred,
+ 'mariadb_backup_database_password': backup_db_cred,
'mariadb_shard_root_user_prefix': root_prefix,
'mariadb_shard_backup_user_prefix': backup_prefix,
},
@@ -211,6 +218,7 @@ def test_valid_shards_info_with_different_users_and_pass(self):
'mariadb_shard_id': db_shards[1],
'enable_mariabackup': 'no',
'database_password': db_cred,
+ 'mariadb_backup_database_password': backup_db_cred,
'mariadb_shard_root_user_prefix': root_prefix_2,
},
},
@@ -237,7 +245,7 @@ def test_valid_shards_info_with_different_users_and_pass(self):
"user": f"{root_prefix}0"
},
{
- "password": db_cred,
+ "password": backup_db_cred,
"shard_id": db_shards[0],
"user": f"{backup_prefix}0"
},
diff --git a/kolla_ansible/utils.py b/kolla_ansible/utils.py
new file mode 100644
index 0000000000..d2183de212
--- /dev/null
+++ b/kolla_ansible/utils.py
@@ -0,0 +1,191 @@
+# Copyright (c) 2017 StackHPC Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import glob
+import json
+import logging
+import os
+import subprocess # nosec
+import sys
+import yaml
+
+from importlib.metadata import Distribution
+from time import sleep
+
+LOG = logging.getLogger(__name__)
+
+
+def get_data_files_path(*relative_path) -> os.path:
+ """Given a relative path to a data file, return the absolute path"""
+ # Detect editable pip install / python setup.py develop and use a path
+ # relative to the source directory
+ return os.path.join(_get_base_path(), *relative_path)
+
+
+def _detect_install_prefix(path: os.path) -> str:
+ script_path = os.path.realpath(path)
+ script_path = os.path.normpath(script_path)
+ components = script_path.split(os.sep)
+ # use heuristic: anything before the last 'lib' in path is the prefix
+ if 'lib' not in components:
+ return None
+ last_lib = len(components) - 1 - components[::-1].index('lib')
+ prefix = components[:last_lib]
+ prefix_path = os.sep.join(prefix)
+ return prefix_path
+
+
+def _get_direct_url_if_editable(dist: Distribution) -> str:
+ direct_url = os.path.join(dist._path, 'direct_url.json')
+ editable = None
+ if os.path.isfile(direct_url):
+ with open(direct_url, 'r') as f:
+ direct_url_content = json.loads(f.readline().strip())
+ dir_info = direct_url_content.get('dir_info')
+ if dir_info is not None:
+ editable = dir_info.get('editable')
+ if editable:
+ url = direct_url_content['url']
+ prefix = 'file://'
+ if url.startswith(prefix):
+ return url[len(prefix):]
+
+ return None
+
+
+def _get_base_path() -> os.path:
+ """Return location where kolla-ansible package is installed."""
+ override = os.environ.get("KOLLA_ANSIBLE_DATA_FILES_PATH")
+ if override:
+ return os.path.join(override)
+
+ kolla_ansible_dist = list(Distribution.discover(name="kolla_ansible"))
+ if kolla_ansible_dist:
+ direct_url = _get_direct_url_if_editable(kolla_ansible_dist[0])
+ if direct_url:
+ return direct_url
+
+ egg_glob = os.path.join(
+ sys.prefix, 'lib*', 'python*', '*-packages', 'kolla-ansible.egg-link'
+ )
+ egg_link = glob.glob(egg_glob)
+ if egg_link:
+ with open(egg_link[0], "r") as f:
+ realpath = f.readline().strip()
+ return os.path.join(realpath)
+
+ prefix = _detect_install_prefix(__file__)
+ if prefix:
+ return os.path.join(prefix, "share", "kolla-ansible")
+
+ # Assume uninstalled
+ return os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
+
+
+def galaxy_collection_install(requirements_file: str,
+ collections_path: str = None,
+ force: bool = False) -> None:
+ """Install ansible collections needed by kolla-ansible roles."""
+ requirements = read_yaml_file(requirements_file)
+ if not isinstance(requirements, dict):
+ # Handle legacy role list format, which causes the command to fail.
+ return
+ args = ["collection", "install"]
+ if collections_path:
+ args += ["--collections-path", collections_path]
+ args += ["--requirements-file", requirements_file]
+ if force:
+ args += ["--force"]
+
+ for retry_count in range(1, 6):
+ try:
+ run_command("ansible-galaxy", args)
+ except subprocess.CalledProcessError as e:
+ if retry_count < 5:
+ LOG.warning(f"Failed to install Ansible collections from "
+ f"{requirements_file} using Ansible Galaxy "
+ f"(error: {e}) (retry: {retry_count}/5)")
+ sleep(2)
+ continue
+ else:
+ LOG.error(f"Failed to install Ansible collections from "
+ f"{requirements_file} using Ansible Galaxy "
+ f"(error: {e}) after 5 retries")
+ LOG.error("Exiting")
+ sys.exit(e.returncode)
+ break
+
+
+def read_file(path: os.path, mode: str = "r") -> str | bytes:
+ """Read the content of a file."""
+ with open(path, mode) as f:
+ return f.read()
+
+
+def read_yaml_file(path: os.path):
+ """Read and decode a YAML file."""
+ try:
+ content = read_file(path)
+ except IOError as e:
+ print("Failed to open YAML file %s: %s" %
+ (path, repr(e)))
+ sys.exit(1)
+ try:
+ return yaml.safe_load(content)
+ except yaml.YAMLError as e:
+ print("Failed to decode YAML file %s: %s" %
+ (path, repr(e)))
+ sys.exit(1)
+
+
+def is_readable_dir(path: os.path) -> bool:
+ """Check whether a path references a readable directory."""
+ if not os.path.exists(path):
+ return {"result": False, "message": "Path does not exist"}
+ if not os.path.isdir(path):
+ return {"result": False, "message": "Path is not a directory"}
+ if not os.access(path, os.R_OK):
+ return {"result": False, "message": "Directory is not readable"}
+ return {"result": True}
+
+
+def is_readable_file(path: os.path) -> bool:
+ """Check whether a path references a readable file."""
+ if not os.path.exists(path):
+ return {"result": False, "message": "Path does not exist"}
+ if not os.path.isfile(path):
+ return {"result": False, "message": "Path is not a file"}
+ if not os.access(path, os.R_OK):
+ return {"result": False, "message": "File is not readable"}
+ return {"result": True}
+
+
+def run_command(executable: str,
+ args: list,
+ quiet: bool = False,
+ **kwargs) -> None:
+ """Run a command, checking the output.
+
+ :param quiet: Redirect output to /dev/null
+ """
+ full_cmd = [executable] + args
+ cmd_string = " ".join(full_cmd)
+ LOG.debug("Running command: %s", cmd_string)
+
+ if quiet:
+ kwargs["stdout"] = subprocess.DEVNULL
+ kwargs["stderr"] = subprocess.DEVNULL
+ subprocess.run(full_cmd, check=True, shell=False, **kwargs) # nosec
+ else:
+ subprocess.run(full_cmd, check=True, shell=False, **kwargs) # nosec
diff --git a/lint-requirements.txt b/lint-requirements.txt
index 1b0057f493..1dd34e7f8e 100644
--- a/lint-requirements.txt
+++ b/lint-requirements.txt
@@ -1,8 +1,10 @@
-ansible>=4,<6 # GPLv3
-ansible-lint>=6.0.0,<7.0.0 # MIT
+ansible>=8,<10 # GPLv3
+ansible-lint>=6.22.0,<7.0.0 # MIT
bandit>=1.1.0 # Apache-2.0
bashate>=0.5.1 # Apache-2.0
+codespell<3 # GPLv2
doc8>=0.6.0 # Apache-2.0
-hacking>=3.0.1,<3.1.0 # Apache-2.0
+hacking>=3.0.1 # Apache-2.0
+pycodestyle>=2.11.0 # MIT
reno>=3.1.0 # Apache-2.0
yamllint>=1.22.0 #GPL3
diff --git a/releasenotes/notes/1875223-05552108375d005a.yaml b/releasenotes/notes/1875223-05552108375d005a.yaml
new file mode 100644
index 0000000000..84f61b224f
--- /dev/null
+++ b/releasenotes/notes/1875223-05552108375d005a.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes issues with OVN NB/SB DB deployment, where first node needs to be
+ rebootstrapped. `LP#1875223 `__
diff --git a/releasenotes/notes/Fix-proxysql-mariadb-backup-database-password-54f43ecf102fb897.yaml b/releasenotes/notes/Fix-proxysql-mariadb-backup-database-password-54f43ecf102fb897.yaml
new file mode 100644
index 0000000000..d2cddc5dbd
--- /dev/null
+++ b/releasenotes/notes/Fix-proxysql-mariadb-backup-database-password-54f43ecf102fb897.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix MariaDB backup if enable_proxysql is enable
diff --git a/releasenotes/notes/add-a-flag-to-handle-rabbitmq-high-availability-44c709318be6cb7b.yaml b/releasenotes/notes/add-a-flag-to-handle-rabbitmq-high-availability-44c709318be6cb7b.yaml
new file mode 100644
index 0000000000..87c4e3a66d
--- /dev/null
+++ b/releasenotes/notes/add-a-flag-to-handle-rabbitmq-high-availability-44c709318be6cb7b.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Adds the flag ``om_enable_rabbitmq_high_availablity``. Setting this to
+ ``true`` will enable both durable queues and classic mirrored queues in
+ RabbitMQ. Note that classic queue mirroring and transient (aka non-durable)
+ queues are deprecated and subject to removal in RabbitMQ version 4.0 (date
+ of release unknown).
+ Changes the pattern used in classic mirroring to exclude some queue types.
+ This pattern is ``^(?!(amq\\.)|(.*_fanout_)|(reply_)).*``.
diff --git a/releasenotes/notes/add-ceph-metrics-scrape-interval-3ee39fba696860e9.yaml b/releasenotes/notes/add-ceph-metrics-scrape-interval-3ee39fba696860e9.yaml
new file mode 100644
index 0000000000..ba9a62de80
--- /dev/null
+++ b/releasenotes/notes/add-ceph-metrics-scrape-interval-3ee39fba696860e9.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added a new variable ``prometheus_ceph_exporter_interval`` for controlling
+ the scrape interval of Ceph metrics.
diff --git a/releasenotes/notes/add-docker-image-name-prefix-1503a64978740808.yaml b/releasenotes/notes/add-docker-image-name-prefix-1503a64978740808.yaml
new file mode 100644
index 0000000000..2864a25533
--- /dev/null
+++ b/releasenotes/notes/add-docker-image-name-prefix-1503a64978740808.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Adds ``docker_image_name_prefix`` that allows to define
+ images prefix.
+fixes:
+ - |
+ Fixes `LP#2073541
+ `__
diff --git a/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml b/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml
index 0775df5825..371c0c7daf 100644
--- a/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml
+++ b/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml
@@ -1,4 +1,4 @@
---
features:
- - Add graceful timeout argument to kolla_docker library for stoping,
+ - Add graceful timeout argument to kolla_docker library for stopping,
restaring container.
diff --git a/releasenotes/notes/add-haproxy-http2-support-3a8575889cabe064.yaml b/releasenotes/notes/add-haproxy-http2-support-3a8575889cabe064.yaml
new file mode 100644
index 0000000000..c188f3eaec
--- /dev/null
+++ b/releasenotes/notes/add-haproxy-http2-support-3a8575889cabe064.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds http/2 support to HAProxy frontends.
diff --git a/releasenotes/notes/add-horizon-limitrequestbody-4f79433fa2cf1f6d.yaml b/releasenotes/notes/add-horizon-limitrequestbody-4f79433fa2cf1f6d.yaml
new file mode 100644
index 0000000000..7b93905f15
--- /dev/null
+++ b/releasenotes/notes/add-horizon-limitrequestbody-4f79433fa2cf1f6d.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Since CVE-2022-29404 is fixed the default value for the LimitRequestBody
+ directive in the Apache HTTP Server has been changed from 0 (unlimited) to
+ 1073741824 (1 GiB). This limits the size of images (for example) uploaded
+ in Horizon. Now this limit can be configured via
+ ``horizon_httpd_limitrequestbody``.
+ `LP#2012588 `__
diff --git a/releasenotes/notes/add-ironic-prometheus-exporter-218a9985905602fd.yaml b/releasenotes/notes/add-ironic-prometheus-exporter-218a9985905602fd.yaml
new file mode 100644
index 0000000000..9e8d0761a6
--- /dev/null
+++ b/releasenotes/notes/add-ironic-prometheus-exporter-218a9985905602fd.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Adds support for deploying the ironic-prometheus-exporter, 'a Tool to
+ expose hardware sensor data in the Prometheus format through an HTTP
+ endpoint'.
+ See https://opendev.org/openstack/ironic-prometheus-exporter for more details
+ about the exporter.
diff --git a/releasenotes/notes/add-keystone-oidc-forwarded-headers-option-d153c6292cf20b26.yaml b/releasenotes/notes/add-keystone-oidc-forwarded-headers-option-d153c6292cf20b26.yaml
new file mode 100644
index 0000000000..9414e567dd
--- /dev/null
+++ b/releasenotes/notes/add-keystone-oidc-forwarded-headers-option-d153c6292cf20b26.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Add an option to set OIDCX forwarded headers in keystone. This is useful
+ when keystone is behind a proxy and the proxy is adding headers to the
+ request. The new option is ``keystone_federation_oidc_forwarded_headers``.
+ The default value is empty, to preserve the current behavior.
+ `LP#2080402 `__
diff --git a/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml b/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml
new file mode 100644
index 0000000000..f5b27f8c77
--- /dev/null
+++ b/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - Add Lets Encrypt TLS certificate service integration into Openstack
+ deployment. Enables trusted TLS certificate generation option for
+ secure communication with OpenStack HAProxy instances using
+ ``letsencrypt_email``, ``kolla_internal_fqdn`` and/or
+ ``kolla_external_fqdn`` is required. One container runs an Apache
+ ACME client webserver and one runs Lego for certificate retrieval
+ and renewal. The Lego container starts a cron job which attempts
+ to renew certificates every 12 hours.
diff --git a/releasenotes/notes/add-letsencrypt-eab-support-7951e7a572718ce9.yaml b/releasenotes/notes/add-letsencrypt-eab-support-7951e7a572718ce9.yaml
new file mode 100644
index 0000000000..ac473fb0dd
--- /dev/null
+++ b/releasenotes/notes/add-letsencrypt-eab-support-7951e7a572718ce9.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds support for external account binding (EAB) in Let's Encrypt.
diff --git a/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml b/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml
index aab4c19bdd..ccabf36751 100644
--- a/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml
+++ b/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml
@@ -2,6 +2,6 @@
features:
- |
Add support for deploying the Monasca Notification service. The
- Notification service is responsible for notifiying users when
+ Notification service is responsible for notifying users when
an alert, as defined via the Monasca API, is generated by the
Monasca Thresh topology.
diff --git a/releasenotes/notes/add-neutron-custom-kernel-modules-d105d3f84665e0a4.yaml b/releasenotes/notes/add-neutron-custom-kernel-modules-d105d3f84665e0a4.yaml
new file mode 100644
index 0000000000..3ffba775b6
--- /dev/null
+++ b/releasenotes/notes/add-neutron-custom-kernel-modules-d105d3f84665e0a4.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added capability to specify custom kernel modules for Neutron:
+ `neutron_modules_default`: Lists default modules.
+ `neutron_modules_extra`: For custom modules and parameters.
diff --git a/releasenotes/notes/add-opensearch-53ef174195acce45.yaml b/releasenotes/notes/add-opensearch-53ef174195acce45.yaml
new file mode 100644
index 0000000000..45148297bc
--- /dev/null
+++ b/releasenotes/notes/add-opensearch-53ef174195acce45.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - |
+ Adds support for deploying OpenSearch and OpenSearch dashboards. These
+ services directly replace ElasticSearch and Kibana which are now
+ end-of-life. Support for sending logs to a remote ElasticSearch (or
+ OpenSearch) cluster is maintained.
+upgrade:
+ - |
+ If you are currently deploying ElasticSearch with Kolla Ansible, you
+ should backup the data before starting the upgrade. The contents of
+ the ElasticSearch data volume will be automatically moved to
+ the OpenSearch volume. The ElasticSearch, ElasticSearch Curator and
+ Kibana containers will be removed automatically. The inventory must be
+ updated so that the ``elasticsearch`` group is renamed to
+ ``opensearch``, and the `kibana` group is renamed to
+ ``opensearch-dashboards``.
diff --git a/releasenotes/notes/add-opensearch-uri-68a657c55ce9c9f1.yaml b/releasenotes/notes/add-opensearch-uri-68a657c55ce9c9f1.yaml
new file mode 100644
index 0000000000..720b1866df
--- /dev/null
+++ b/releasenotes/notes/add-opensearch-uri-68a657c55ce9c9f1.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixes an deploy opensearch with enable TLS on the internal VIP.
diff --git a/releasenotes/notes/add-parameter-fluentd-enable-watch-timer-08fbe65ba2b549e8.yaml b/releasenotes/notes/add-parameter-fluentd-enable-watch-timer-08fbe65ba2b549e8.yaml
new file mode 100644
index 0000000000..a0e8316d47
--- /dev/null
+++ b/releasenotes/notes/add-parameter-fluentd-enable-watch-timer-08fbe65ba2b549e8.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ With the boolean parameter ``fluentd_enable_watch_timer`` it is
+ now possible to enable the additional watch timer of Fluentd.
+
+ The default value of ``fluentd_enable_watch_timer`` is set
+ to ``false``.
+
+ More details about the watch timer in Fluentd can be found at
+ https://docs.fluentd.org/input/tail#enable_watch_timer.
diff --git a/releasenotes/notes/add-skyline-support-a3fb6aabeeb1d8da.yaml b/releasenotes/notes/add-skyline-support-a3fb6aabeeb1d8da.yaml
new file mode 100644
index 0000000000..e760ee4da5
--- /dev/null
+++ b/releasenotes/notes/add-skyline-support-a3fb6aabeeb1d8da.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add skyline ansible role
diff --git a/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml b/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml
index 7c25f5c51f..c7fd1ae99f 100644
--- a/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml
+++ b/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml
@@ -1,5 +1,5 @@
---
features:
- |
- Adds possibility for inlcuding custom alert notification templates with
+ Adds possibility for including custom alert notification templates with
Prometheus Alertmanager.
diff --git a/releasenotes/notes/add-systemd-container-control-b85dff9ec5fae313.yaml b/releasenotes/notes/add-systemd-container-control-b85dff9ec5fae313.yaml
new file mode 100644
index 0000000000..33833f6556
--- /dev/null
+++ b/releasenotes/notes/add-systemd-container-control-b85dff9ec5fae313.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Adds support for container state control through systemd in kolla_docker.
+ Every container logs only to journald and has it's own unit file in
+ ``/etc/systemd/system`` named **kolla--container.service**.
+ Systemd control is implemented in new file
+ ``ansible/module_utils/kolla_systemd_worker.py``.
diff --git a/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml b/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml
index 8615136879..bf88cd08e2 100644
--- a/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml
+++ b/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml
@@ -2,5 +2,5 @@
features:
- |
Add "enable_trove_singletenant" option to enable the Trove single
- tenant functionnality. This feature will allow Trove to create
+ tenant functionality. This feature will allow Trove to create
Nova instances in a different tenant than the user tenant.
diff --git a/releasenotes/notes/adds-net-raw-capability-to-ironic-dnsmasq-40f5894c5180b12d.yaml b/releasenotes/notes/adds-net-raw-capability-to-ironic-dnsmasq-40f5894c5180b12d.yaml
new file mode 100644
index 0000000000..bd9022b7f0
--- /dev/null
+++ b/releasenotes/notes/adds-net-raw-capability-to-ironic-dnsmasq-40f5894c5180b12d.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes an issue with ironic dnsmasq failing to start in deployments
+ using podman because it requires the NET_RAW capability. See `LP#2055282
+ `__.
diff --git a/releasenotes/notes/adds-node-exporter-targets-extra-c037d4755d1002e8.yaml b/releasenotes/notes/adds-node-exporter-targets-extra-c037d4755d1002e8.yaml
new file mode 100644
index 0000000000..39a25b042c
--- /dev/null
+++ b/releasenotes/notes/adds-node-exporter-targets-extra-c037d4755d1002e8.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds ``prometheus_node_exporter_targets_extra`` to add additional scrape
+ targets to the node exporter job. See kolla-ansible-doc:`documentation
+ ` for more
+ information.
diff --git a/releasenotes/notes/adds-ovs-vsctl-wrapper-da3dbbb19d5cc6f5.yaml b/releasenotes/notes/adds-ovs-vsctl-wrapper-da3dbbb19d5cc6f5.yaml
new file mode 100644
index 0000000000..56c4a16593
--- /dev/null
+++ b/releasenotes/notes/adds-ovs-vsctl-wrapper-da3dbbb19d5cc6f5.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Adds a new flag, ``openvswitch_ovs_vsctl_wrapper_enabled`` which will
+ install a wrapper script to ``/usr/bin/ovs-vsctl`` to docker exec into
+ the openvswitchd container.
diff --git a/releasenotes/notes/ansible-2.14-d83c5ce197321353.yaml b/releasenotes/notes/ansible-2.14-d83c5ce197321353.yaml
new file mode 100644
index 0000000000..15a4646aa8
--- /dev/null
+++ b/releasenotes/notes/ansible-2.14-d83c5ce197321353.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Minimum supported Ansible version is now ``6`` (ansible-core 2.13)
+ and maximum supported is ``7`` (ansible-core 2.14). Due to a regression in
+ ``ansible-core``, it must not be greater than ``2.14.2``.
diff --git a/releasenotes/notes/ansible-2.15-ee1fb1ff0078fbf5.yaml b/releasenotes/notes/ansible-2.15-ee1fb1ff0078fbf5.yaml
new file mode 100644
index 0000000000..4a7345d583
--- /dev/null
+++ b/releasenotes/notes/ansible-2.15-ee1fb1ff0078fbf5.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Minimum supported Ansible version is now ``7`` (ansible-core 2.14)
+ and maximum supported is ``8`` (ansible-core 2.15).
diff --git a/releasenotes/notes/ansible-core-2-16-257dc4502ede5b88.yaml b/releasenotes/notes/ansible-core-2-16-257dc4502ede5b88.yaml
new file mode 100644
index 0000000000..1af4221535
--- /dev/null
+++ b/releasenotes/notes/ansible-core-2-16-257dc4502ede5b88.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ The minimum supported Ansible version is now ``8`` (ansible-core 2.15),
+ and the maximum supported is ``9`` (ansible-core 2.16).
diff --git a/releasenotes/notes/automated-blackbox-endpoints-b0ffd7bf9e3d16de.yaml b/releasenotes/notes/automated-blackbox-endpoints-b0ffd7bf9e3d16de.yaml
new file mode 100644
index 0000000000..bff81b2403
--- /dev/null
+++ b/releasenotes/notes/automated-blackbox-endpoints-b0ffd7bf9e3d16de.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Blackbox monitoring endpoint configuration is now automated for many common
+ services. The default endpoint list,
+ ``prometheus_blackbox_exporter_endpoints_default``, varies according to the
+ services that are enabled. Custom endpoints can be added to
+ ``prometheus_blackbox_exporter_endpoints_custom``.
+upgrade:
+ - |
+ ``prometheus_blackbox_exporter_endpoints`` will now be automatically
+ populated with endpoints for many common services. Custom endpoints should
+ be migrated to ``prometheus_blackbox_exporter_endpoints_custom`` to avoid
+ overriding the default configuration.
diff --git a/releasenotes/notes/bifrost-deploy-verbosity-f6a90727649285b5.yaml b/releasenotes/notes/bifrost-deploy-verbosity-f6a90727649285b5.yaml
index 009307927a..aef8c9bd76 100644
--- a/releasenotes/notes/bifrost-deploy-verbosity-f6a90727649285b5.yaml
+++ b/releasenotes/notes/bifrost-deploy-verbosity-f6a90727649285b5.yaml
@@ -1,7 +1,6 @@
---
features:
- |
- With the parameter ``bifrost_deploy_verbosity`` it is possible
- to set the verbosity of the bootstrap of Bifrost.
-
- By default ``-vvvv`` is set.
+ Adds ``bifrost_deploy_verbosity`` parameter. It allows to change
+ the verbosity of the Bifrost bootstrap task.
+ ``-vvvv`` is a default value.
diff --git a/releasenotes/notes/bug-1814515-217c5cd59d7d251f.yaml b/releasenotes/notes/bug-1814515-217c5cd59d7d251f.yaml
new file mode 100644
index 0000000000..211e99103d
--- /dev/null
+++ b/releasenotes/notes/bug-1814515-217c5cd59d7d251f.yaml
@@ -0,0 +1,15 @@
+---
+fixes:
+ - |
+ Fixes problems where when package file manifest changes, the changes
+ were not reflected in to devmode-enabled container.
+ `LP#1814515 `__
+
+upgrade:
+ - |
+ Changes the strategy of installing projects in dev mode in containers.
+ Instead of bind mounting the project's git repository to the venv
+ of the container, the repository is bind mounted to
+ /dev-mode/ from which the it is installed using pip
+ on every startup of the container using kolla_install_projects script.
+ Also updates docs to reflect the changes.
diff --git a/releasenotes/notes/bug-1850733-aa3b0d335c8e4e1e.yaml b/releasenotes/notes/bug-1850733-aa3b0d335c8e4e1e.yaml
new file mode 100644
index 0000000000..9883b74b9b
--- /dev/null
+++ b/releasenotes/notes/bug-1850733-aa3b0d335c8e4e1e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Put memcache_security_strategy in single place at all.yml
+ For possible config options see `docs `__
+
+ `LP#1850733 `__
diff --git a/releasenotes/notes/bug-1863510-e39da141cdd07c41.yaml b/releasenotes/notes/bug-1863510-e39da141cdd07c41.yaml
new file mode 100644
index 0000000000..8d33808bdc
--- /dev/null
+++ b/releasenotes/notes/bug-1863510-e39da141cdd07c41.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Fix unintentional trigger of ansible handlers.
+ Due to an Ansible quirk, when one container of a group
+ changes, all containers in that group are restarted.
+ This can cause problems with some services.
+ `LP#1863510 `__
diff --git a/releasenotes/notes/bug-1906306-1247de365435e26a.yaml b/releasenotes/notes/bug-1906306-1247de365435e26a.yaml
new file mode 100644
index 0000000000..811584a6e0
--- /dev/null
+++ b/releasenotes/notes/bug-1906306-1247de365435e26a.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Introduced a separate role for managing sysctl settings. This role
+ automatically detects support for IPv6 and, if unsupported, skips the IPv6
+ sysctl settings. This role expands the previous backportable fix of this
+ issue, reviewed at
+ https://review.opendev.org/c/openstack/kolla-ansible/+/905831. For more
+ details, see `LP#1906306 `__.
diff --git a/releasenotes/notes/bug-1906306-640d5085576656f9.yaml b/releasenotes/notes/bug-1906306-640d5085576656f9.yaml
new file mode 100644
index 0000000000..dafb761401
--- /dev/null
+++ b/releasenotes/notes/bug-1906306-640d5085576656f9.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Add conditionals for IPv6 sysctl settings
+ that have IPV6 disabled in kernel.
+ Changing sysctl settings related to IPv6 on those
+ systems lead to errors.
+ `LP#1906306 `__
diff --git a/releasenotes/notes/bug-1915302-a668b00dddaff476.yaml b/releasenotes/notes/bug-1915302-a668b00dddaff476.yaml
new file mode 100644
index 0000000000..247db8d8ce
--- /dev/null
+++ b/releasenotes/notes/bug-1915302-a668b00dddaff476.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixed nova-cell not updating the cell0 database address when VIP changes.
+ For more details, refer to `LP#1915302 `__.
diff --git a/releasenotes/notes/bug-1937120-cd1ad24a9a4be739.yaml b/releasenotes/notes/bug-1937120-cd1ad24a9a4be739.yaml
new file mode 100644
index 0000000000..2eb07029bb
--- /dev/null
+++ b/releasenotes/notes/bug-1937120-cd1ad24a9a4be739.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes trove module imports.
+ Path to the modules needed by trove-api changed in source trove
+ package so the configuration was updated.
+ `LP#1937120 `__
diff --git a/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml b/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml
index c3c62719dc..028a7143cc 100644
--- a/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml
+++ b/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml
@@ -2,6 +2,6 @@
fixes:
- |
Fixes the copy job for grafana custom home dashboard file.
- The copy job for the grafana home dashboard file needs to run priviliged,
+ The copy job for the grafana home dashboard file needs to run privileged,
otherwise permission denied error occurs.
`LP#1947710 `__
diff --git a/releasenotes/notes/bug-1982777-0cac9753aa18a037.yaml b/releasenotes/notes/bug-1982777-0cac9753aa18a037.yaml
index 0a80e3ce38..0ad2ad327d 100644
--- a/releasenotes/notes/bug-1982777-0cac9753aa18a037.yaml
+++ b/releasenotes/notes/bug-1982777-0cac9753aa18a037.yaml
@@ -1,8 +1,7 @@
---
fixes:
- |
- Fixes 1982777.
+ Fixes `LP#1982777 `__.
Set multipathd user_friendly_names to "no"
to make os-brick able to resize volumes online.
Adds ability to override multipathd config.
- `LP#1982777 `__
diff --git a/releasenotes/notes/bug-1987982-fix-keystone-log_bin_trust_function_creators-variables-8628098891513dac.yaml b/releasenotes/notes/bug-1987982-fix-keystone-log_bin_trust_function_creators-variables-8628098891513dac.yaml
index 5319b1cd01..06681dda98 100644
--- a/releasenotes/notes/bug-1987982-fix-keystone-log_bin_trust_function_creators-variables-8628098891513dac.yaml
+++ b/releasenotes/notes/bug-1987982-fix-keystone-log_bin_trust_function_creators-variables-8628098891513dac.yaml
@@ -1,6 +1,6 @@
---
fixes:
- |
- Fixed `bug #1987982 `_
+ Fixed `bug #1987982 `_.
This bug caused the database log_bin_trust_function_creators variable
not to be set back to "OFF" after a keystone upgrade.
diff --git a/releasenotes/notes/bug-1993285-127fe764e461465a.yaml b/releasenotes/notes/bug-1993285-127fe764e461465a.yaml
new file mode 100644
index 0000000000..068c6e73f1
--- /dev/null
+++ b/releasenotes/notes/bug-1993285-127fe764e461465a.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes handling of openvswitch on ``manila-share`` nodes.
+ `LP#1993285 `__
diff --git a/releasenotes/notes/bug-1995248-f454ec0198bf7c36.yaml b/releasenotes/notes/bug-1995248-f454ec0198bf7c36.yaml
new file mode 100644
index 0000000000..7029e6f6bd
--- /dev/null
+++ b/releasenotes/notes/bug-1995248-f454ec0198bf7c36.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes missing logrotate configuration for proxysql logs.
+ `LP#1995248 `__
diff --git a/releasenotes/notes/bug-1999081-769f1012263a48fd.yaml b/releasenotes/notes/bug-1999081-769f1012263a48fd.yaml
new file mode 100644
index 0000000000..8e8a6c275a
--- /dev/null
+++ b/releasenotes/notes/bug-1999081-769f1012263a48fd.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ The precheck for RabbitMQ failed incorrectly when
+ ``kolla_externally_managed_cert`` was set to ``true``.
+ `LP#1999081 `__
diff --git a/releasenotes/notes/bug-2003079-911114b36ae745be.yaml b/releasenotes/notes/bug-2003079-911114b36ae745be.yaml
new file mode 100644
index 0000000000..326e4de310
--- /dev/null
+++ b/releasenotes/notes/bug-2003079-911114b36ae745be.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes ``kolla_docker`` module which did not take into account
+ the common_options parameter, so there were always module's
+ default values.
+ `LP#2003079 `__
diff --git a/releasenotes/notes/bug-2004224-0e600c99f8e5b83f.yaml b/releasenotes/notes/bug-2004224-0e600c99f8e5b83f.yaml
new file mode 100644
index 0000000000..16fee5a895
--- /dev/null
+++ b/releasenotes/notes/bug-2004224-0e600c99f8e5b83f.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes keystone's task which is connecting via ssh instead
+ locally.
+ `LP#2004224 `__
diff --git a/releasenotes/notes/bug-2006051-135bd20d5c465517.yaml b/releasenotes/notes/bug-2006051-135bd20d5c465517.yaml
new file mode 100644
index 0000000000..2a27a82881
--- /dev/null
+++ b/releasenotes/notes/bug-2006051-135bd20d5c465517.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes 504 timeout when scraping openstack exporter.
+ Ensures that HAProxy server timeout is the same as the
+ scrape timeout for the openstack exporter backend.
+ `LP#2006051 `__
diff --git a/releasenotes/notes/bug-2009884-a13cd185a29faf9a.yaml b/releasenotes/notes/bug-2009884-a13cd185a29faf9a.yaml
new file mode 100644
index 0000000000..944f58849a
--- /dev/null
+++ b/releasenotes/notes/bug-2009884-a13cd185a29faf9a.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes non-persistent Neutron agent state data.
+ `LP2009884 `__
diff --git a/releasenotes/notes/bug-2012292-fix-designate-pools-update-a367caf1d85c7326e.yaml b/releasenotes/notes/bug-2012292-fix-designate-pools-update-a367caf1d85c7326e.yaml
new file mode 100644
index 0000000000..1074a0f0c3
--- /dev/null
+++ b/releasenotes/notes/bug-2012292-fix-designate-pools-update-a367caf1d85c7326e.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix improper use of ``--file`` parameter with
+ ``designate-manage pool update`` command.
+ `LP#2012292 `
diff --git a/releasenotes/notes/bug-2015589-94427c14cd857c98.yaml b/releasenotes/notes/bug-2015589-94427c14cd857c98.yaml
new file mode 100644
index 0000000000..0f4eae173f
--- /dev/null
+++ b/releasenotes/notes/bug-2015589-94427c14cd857c98.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes create sasl account before config file is ready.
+ `LP#2015589 `__
diff --git a/releasenotes/notes/bug-2020152-165c87048d92dedb.yaml b/releasenotes/notes/bug-2020152-165c87048d92dedb.yaml
new file mode 100644
index 0000000000..4b95a26b94
--- /dev/null
+++ b/releasenotes/notes/bug-2020152-165c87048d92dedb.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Set correct permissions for opensearch-dashboard data location
+ `LP#2020152 https://bugs.launchpad.net/kolla-ansible/+bug/2020152`
diff --git a/releasenotes/notes/bug-2023502-68acc8637510cb22.yaml b/releasenotes/notes/bug-2023502-68acc8637510cb22.yaml
new file mode 100644
index 0000000000..195c61b7bc
--- /dev/null
+++ b/releasenotes/notes/bug-2023502-68acc8637510cb22.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fix issue with octavia security group rules creation when using
+ IPv6 configuration for octavia management network.
+ See `LP#2023502 `__
+ for more details.
diff --git a/releasenotes/notes/bug-2024541-98c2864f16f8eb59.yaml b/releasenotes/notes/bug-2024541-98c2864f16f8eb59.yaml
new file mode 100644
index 0000000000..1fe3e25181
--- /dev/null
+++ b/releasenotes/notes/bug-2024541-98c2864f16f8eb59.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes glance-api failed to start privsep daemon when
+ cinder_backend_ceph is set to true.
+ See `LP#2024541 `__
+ for more details.
diff --git a/releasenotes/notes/bug-2024554-6eb811364536f1e8.yaml b/releasenotes/notes/bug-2024554-6eb811364536f1e8.yaml
new file mode 100644
index 0000000000..19de8634b4
--- /dev/null
+++ b/releasenotes/notes/bug-2024554-6eb811364536f1e8.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes 2024554.
+ Adds host and ``mariadb_port`` to the wsrep sync status check.
+ This is so none standard ports can be used for mariadb deployments.
+ `LP#2024554 `__
diff --git a/releasenotes/notes/bug-2036390-d087c5bfd504c9f3.yaml b/releasenotes/notes/bug-2036390-d087c5bfd504c9f3.yaml
new file mode 100644
index 0000000000..601821bda7
--- /dev/null
+++ b/releasenotes/notes/bug-2036390-d087c5bfd504c9f3.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ ``enable_keystone_federation`` and ``keystone_enable_federation_openid``
+ have not been explicitly handled as bool in various templates in the
+ keystone role so far.
+ `LP#2036390 `__
diff --git a/releasenotes/notes/bug-2041864-f19f9a6afd0955e8.yaml b/releasenotes/notes/bug-2041864-f19f9a6afd0955e8.yaml
new file mode 100644
index 0000000000..f5e8ee8e00
--- /dev/null
+++ b/releasenotes/notes/bug-2041864-f19f9a6afd0955e8.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes ``ovs-dpdk`` images pull.
+ `LP#[2041864] `__
diff --git a/releasenotes/notes/bug-2045660-inability-to-override-horizon-policy-files-c405906a9faf8f3b.yaml b/releasenotes/notes/bug-2045660-inability-to-override-horizon-policy-files-c405906a9faf8f3b.yaml
new file mode 100644
index 0000000000..7e3c382a11
--- /dev/null
+++ b/releasenotes/notes/bug-2045660-inability-to-override-horizon-policy-files-c405906a9faf8f3b.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Starting with ansible-core 2.13, list concatenation format is changed
+ which resulted in inability to override horizon policy files.
+ See `LP#2045660 `__
+ for more details.
diff --git a/releasenotes/notes/bug-2048130-23b8174396bd3c69.yaml b/releasenotes/notes/bug-2048130-23b8174396bd3c69.yaml
new file mode 100644
index 0000000000..a1cd5b878a
--- /dev/null
+++ b/releasenotes/notes/bug-2048130-23b8174396bd3c69.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes long service restarts while using systemd
+ `LP#2048130 `__.
diff --git a/releasenotes/notes/bug-2048223-bb66fa11c6b36c5e.yaml b/releasenotes/notes/bug-2048223-bb66fa11c6b36c5e.yaml
new file mode 100644
index 0000000000..eb06cd79c7
--- /dev/null
+++ b/releasenotes/notes/bug-2048223-bb66fa11c6b36c5e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes an issue with high CPU usage of the cAdvisor container by setting the
+ per-container housekeeping interval to the same value as the Prometheus
+ scrape interval. `LP#2048223
+ `__
diff --git a/releasenotes/notes/bug-2048525-53ac0711f2c8ae4b.yaml b/releasenotes/notes/bug-2048525-53ac0711f2c8ae4b.yaml
new file mode 100644
index 0000000000..10d3635bce
--- /dev/null
+++ b/releasenotes/notes/bug-2048525-53ac0711f2c8ae4b.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes glance image import
+ `LP#2048525 `__.
diff --git a/releasenotes/notes/bug-2048700-98eb939b72079173.yaml b/releasenotes/notes/bug-2048700-98eb939b72079173.yaml
new file mode 100644
index 0000000000..a4aec372bf
--- /dev/null
+++ b/releasenotes/notes/bug-2048700-98eb939b72079173.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes Nova operations using the ``scp`` command, such as cold migration or
+ resize, on Debian Bookworm. `LP#2048700
+ `__
diff --git a/releasenotes/notes/bug-2049607-fb79ea2960b91bde.yaml b/releasenotes/notes/bug-2049607-fb79ea2960b91bde.yaml
new file mode 100644
index 0000000000..13a353d55a
--- /dev/null
+++ b/releasenotes/notes/bug-2049607-fb79ea2960b91bde.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes configuration of nova-compute and nova-compute-ironic,
+ that will enable exposing vendordata over configdrive.
+ `LP#2049607 `__
diff --git a/releasenotes/notes/bug-2052501-6dfd9e5443fdc6d1.yaml b/releasenotes/notes/bug-2052501-6dfd9e5443fdc6d1.yaml
new file mode 100644
index 0000000000..7e6116cbe3
--- /dev/null
+++ b/releasenotes/notes/bug-2052501-6dfd9e5443fdc6d1.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes mariadb role deployment when using Ansible check mode.
+ `LP#2052501 `__
diff --git a/releasenotes/notes/bug-2052706-dfbbc75fc72c74d1.yaml b/releasenotes/notes/bug-2052706-dfbbc75fc72c74d1.yaml
new file mode 100644
index 0000000000..6bca1929df
--- /dev/null
+++ b/releasenotes/notes/bug-2052706-dfbbc75fc72c74d1.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes kolla-ansible removing inventory file placed in
+ ``/etc/kolla/``.
+ See `LP#2052706 `__
+ for more details.
diff --git a/releasenotes/notes/bug-2054867-33bf1caa05cd004d.yaml b/releasenotes/notes/bug-2054867-33bf1caa05cd004d.yaml
new file mode 100644
index 0000000000..96bcb629a2
--- /dev/null
+++ b/releasenotes/notes/bug-2054867-33bf1caa05cd004d.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes the incorrect dictionary key reference in 'Copy Policy File' task.
+ `LP#2054867 `__
diff --git a/releasenotes/notes/bug-2056332-0edb6cfd2efc4c1a.yaml b/releasenotes/notes/bug-2056332-0edb6cfd2efc4c1a.yaml
new file mode 100644
index 0000000000..13ef26e904
--- /dev/null
+++ b/releasenotes/notes/bug-2056332-0edb6cfd2efc4c1a.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixed an issue with ``openvswitch`` bridge creation when
+ ``neutron_bridge_name`` was specified as two bridges. For details, see
+ `LP#2056332 `__.
diff --git a/releasenotes/notes/bug-2056667-c6d6896855b393f4.yaml b/releasenotes/notes/bug-2056667-c6d6896855b393f4.yaml
new file mode 100644
index 0000000000..003de7c827
--- /dev/null
+++ b/releasenotes/notes/bug-2056667-c6d6896855b393f4.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixed the use of Redis as coordination backend. For details, see
+ `LP#2056667 `__.
diff --git a/releasenotes/notes/bug-2058372-f94889bae9522ae4.yaml b/releasenotes/notes/bug-2058372-f94889bae9522ae4.yaml
new file mode 100644
index 0000000000..ddab948698
--- /dev/null
+++ b/releasenotes/notes/bug-2058372-f94889bae9522ae4.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixed the wrong configuration of the ovs-dpdk service, which broke the
+ deployment of Kolla-Ansible. For details, see `bug 2058372
+ `__.
diff --git a/releasenotes/notes/bug-2058492-b86e8eceb04eec67.yaml b/releasenotes/notes/bug-2058492-b86e8eceb04eec67.yaml
new file mode 100644
index 0000000000..f1c1e735c3
--- /dev/null
+++ b/releasenotes/notes/bug-2058492-b86e8eceb04eec67.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Incorrect condition in Podman part prevented the retrieval
+ of facts of all the containers when no names were provided.
+ `LP#2058492 `__
diff --git a/releasenotes/notes/bug-2058644-1db8786303234787.yaml b/releasenotes/notes/bug-2058644-1db8786303234787.yaml
new file mode 100644
index 0000000000..ec1286d322
--- /dev/null
+++ b/releasenotes/notes/bug-2058644-1db8786303234787.yaml
@@ -0,0 +1,15 @@
+---
+upgrade:
+ - |
+ MariaDB backup now uses the same image as the running MariaDB server. The
+ following variables relating to MariaDB backups are no longer used and have
+ been removed:
+
+ * ``mariabackup_image``
+ * ``mariabackup_tag``
+ * ``mariabackup_image_full``
+fixes:
+ - |
+ Modifies the MariaDB procedure to use the same container image as the
+ running MariaDB server container. This should prevent compatibility issues
+ that may cause the backup to fail.
diff --git a/releasenotes/notes/bug-2058656-ad68bb260327a267.yaml b/releasenotes/notes/bug-2058656-ad68bb260327a267.yaml
new file mode 100644
index 0000000000..33b4e2c73f
--- /dev/null
+++ b/releasenotes/notes/bug-2058656-ad68bb260327a267.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes keystone service configuration for haproxy when using federation.
+ `LP#2058656 `__
diff --git a/releasenotes/notes/bug-2061889-f54e356f43c0fae3.yaml b/releasenotes/notes/bug-2061889-f54e356f43c0fae3.yaml
new file mode 100644
index 0000000000..b148344d5c
--- /dev/null
+++ b/releasenotes/notes/bug-2061889-f54e356f43c0fae3.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes mariadb's backup failure due to missing `CREATE` privileges
+ on the `mariadb_backup_history` table. `LP#2061889
+ `__
diff --git a/releasenotes/notes/bug-2063896-f6a4853a2e5046da.yaml b/releasenotes/notes/bug-2063896-f6a4853a2e5046da.yaml
new file mode 100644
index 0000000000..ec57688c6f
--- /dev/null
+++ b/releasenotes/notes/bug-2063896-f6a4853a2e5046da.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes a bug where loadbalancer upgrade task fails,
+ when podman was used as container engine.
+ `LP#2063896 `__
diff --git a/releasenotes/notes/bug-2067278-82287115c972a04e.yaml b/releasenotes/notes/bug-2067278-82287115c972a04e.yaml
new file mode 100644
index 0000000000..a79400c9fd
--- /dev/null
+++ b/releasenotes/notes/bug-2067278-82287115c972a04e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes a bug in kolla_podman_worker, where missing commas
+ in list of strings create implicit concatenation of items
+ that should be separate.
+ `LP#2067278 `__
diff --git a/releasenotes/notes/bug-2067999-5d009f15e1cc3185.yaml b/releasenotes/notes/bug-2067999-5d009f15e1cc3185.yaml
new file mode 100644
index 0000000000..ef1c80441d
--- /dev/null
+++ b/releasenotes/notes/bug-2067999-5d009f15e1cc3185.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes redundant copying of grafana custom config files.
+ `LP#2067999 `__
diff --git a/releasenotes/notes/bug-2071912-89d2fba8865ddf40.yaml b/releasenotes/notes/bug-2071912-89d2fba8865ddf40.yaml
new file mode 100644
index 0000000000..f9fae9c1e6
--- /dev/null
+++ b/releasenotes/notes/bug-2071912-89d2fba8865ddf40.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes podman failure when enable_container_healthchecks
+ is set to "no".
+ `LP#2071912 `__
diff --git a/releasenotes/notes/bug-2072554-d113b89975985520.yaml b/releasenotes/notes/bug-2072554-d113b89975985520.yaml
new file mode 100644
index 0000000000..6599ea61a2
--- /dev/null
+++ b/releasenotes/notes/bug-2072554-d113b89975985520.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Adds database configuration necessary for barbican.
+ `LP#2072554 `__
diff --git a/releasenotes/notes/bug-2073159-c54c773c72c8fb11.yaml b/releasenotes/notes/bug-2073159-c54c773c72c8fb11.yaml
new file mode 100644
index 0000000000..15d7087780
--- /dev/null
+++ b/releasenotes/notes/bug-2073159-c54c773c72c8fb11.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes behaviour of Change Password screen in Horizon until
+ `bug #2073639