Skip to content

Commit

Permalink
Merge pull request #559 from stackhpc/upstream/2023.1-2024-04-29
Browse files Browse the repository at this point in the history
Synchronise 2023.1 with upstream
  • Loading branch information
markgoddard authored Apr 29, 2024
2 parents 313b8ba + 7020b9c commit d154133
Show file tree
Hide file tree
Showing 14 changed files with 96 additions and 49 deletions.
7 changes: 2 additions & 5 deletions ansible/roles/keystone/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -204,18 +204,15 @@ keystone_enabled_notification_topics: "{{ keystone_notification_topics | selecta
# Keystone
####################
keystone_service_endpoints:
- {'interface': 'admin', 'url': '{{ keystone_internal_url }}', 'state': 'absent'}
- {'interface': 'internal', 'url': '{{ keystone_internal_url }}'}
- {'interface': 'public', 'url': '{{ keystone_public_url }}'}

# TODO(yoctozepto): Remove admin_endpoint leftovers in Antelope (2023.1).
keystone_service_admin_endpoint: {'interface': 'admin', 'url': '{{ keystone_internal_url }}'}
keystone_create_admin_endpoint: false

keystone_ks_services:
- name: "keystone"
type: "identity"
description: "Openstack Identity Service"
endpoints: "{{ keystone_service_endpoints + ([keystone_service_admin_endpoint] if kolla_action == 'upgrade' or keystone_create_admin_endpoint | bool else []) }}"
endpoints: "{{ keystone_service_endpoints }}"

keystone_ks_roles:
- service
Expand Down
5 changes: 1 addition & 4 deletions ansible/roles/keystone/tasks/upgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,7 @@
when:
- not use_preconfigured_databases | bool

# TODO(yoctozepto): Remove after Zed (in AA).
# This is needed to update the admin endpoint as the port has
# changed in the same release (Zed), i.e., the admin endpoint uses the
# same port as the other ones (public, internal).
# NOTE(mgoddard): Remove the admin endpoint.
- import_role:
name: service-ks-register
vars:
Expand Down
2 changes: 1 addition & 1 deletion ansible/roles/mariadb/tasks/recover_cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
# WSREP: Recovered position: <UUID>:<seqno>.
- name: Get MariaDB wsrep recovery seqno
become: true
shell: tail -n 200 /tmp/mariadb_tmp.log | awk -F" " '$0~/Recovered position/{print $NF;exit;}' | awk -F":" '{print $2}'
shell: awk -F" " '/Recovered position/{seqno=$NF} END{split(seqno, a, ":"); print a[2]}' /tmp/mariadb_tmp.log
register: wsrep_recovery_seqno

- name: Removing MariaDB log file from /tmp
Expand Down
41 changes: 41 additions & 0 deletions ansible/roles/opensearch/handlers/main.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,45 @@
---
- name: Disable shard allocation
become: true
vars:
opensearch_shard_body: {"transient": {"cluster.routing.allocation.enable": "none"}}
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_cluster/settings"
method: PUT
status_code: 200
return_content: yes
body: "{{ opensearch_shard_body | to_json }}" # noqa jinja[invalid]
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true
listen: "Restart opensearch container"
when:
- kolla_action == "upgrade"

- name: Perform a flush
become: true
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_flush"
method: POST
status_code: 200
return_content: yes
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true
retries: 10
delay: 5
register: result
until: ('status' in result) and result.status == 200
listen: "Restart opensearch container"
when:
- kolla_action == "upgrade"

- name: Restart opensearch container
vars:
service_name: "opensearch"
Expand Down
35 changes: 0 additions & 35 deletions ansible/roles/opensearch/tasks/upgrade.yml
Original file line number Diff line number Diff line change
@@ -1,39 +1,4 @@
---
- name: Disable shard allocation
become: true
vars:
opensearch_shard_body: {"transient": {"cluster.routing.allocation.enable": "none"}}
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_cluster/settings"
method: PUT
status_code: 200
return_content: yes
body: "{{ opensearch_shard_body | to_json }}" # noqa jinja[invalid]
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true

- name: Perform a flush
become: true
kolla_toolbox:
container_engine: "{{ kolla_container_engine }}"
module_name: uri
module_args:
url: "{{ opensearch_internal_endpoint }}/_flush"
method: POST
status_code: 200
return_content: yes
body_format: json
delegate_to: "{{ groups['opensearch'][0] }}"
run_once: true
retries: 10
delay: 5
register: result
until: ('status' in result) and result.status == 200

- import_tasks: config-host.yml

- import_tasks: config.yml
Expand Down
1 change: 1 addition & 0 deletions ansible/roles/service-ks-register/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ service_ks_register_domain: "default"
# fields:
# 'url'
# 'interface'
# 'state' (optional)
service_ks_register_services: []
# A list of users and associated roles for this service to register with
# Keystone. Each item should provide the following fields:
Expand Down
1 change: 1 addition & 0 deletions ansible/roles/service-ks-register/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
auth: "{{ service_ks_register_auth }}"
interface: "{{ service_ks_register_interface }}"
cacert: "{{ service_ks_cacert }}"
state: "{{ item.1.state | default(omit) }}"
with_subelements:
- "{{ service_ks_register_services }}"
- endpoints
Expand Down
4 changes: 4 additions & 0 deletions ansible/roles/skyline/templates/gunicorn.py.j2
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ timeout = {{ skyline_gunicorn_timeout }}
keepalive = {{ skyline_gunicorn_keepalive }}
reuse_port = True
proc_name = "{{ project_name }}"
{% if skyline_ssl_certfile and skyline_ssl_keyfile %}
keyfile = "{{ skyline_ssl_keyfile }}"
certfile = "{{ skyline_ssl_certfile }}"
{% endif %}

logconfig_dict = {
"version": 1,
Expand Down
18 changes: 14 additions & 4 deletions doc/source/user/operating-kolla.rst
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,16 @@ deployment.
Limitations and Recommendations
-------------------------------

.. warning::

Please notice that using the ansible ``--limit`` option is not recommended.
The reason is, that there are known bugs with it, e.g. when `upgrading parts of nova.
<https://bugs.launchpad.net/kolla-ansible/+bug/2054348>`__
We accept bug reports for this and try to fix issues when they are known.
The core problem is how the ``register:`` keyword works and how it
interacts with the ``--limit`` option. You can find more information in the above
bug report.

.. note::

Please note that when the ``use_preconfigured_databases`` flag is set to
Expand All @@ -72,10 +82,10 @@ Ubuntu Jammy 22.04
------------------

The Zed release adds support for Ubuntu Jammy 22.04 as a host operating
system. Ubuntu Jammy 22.04 support will also be addeed to a Yoga stable
release. Ubuntu Focal 20.04 users upgrading from Yoga should first upgrade
OpenStack containers to Zed, which uses the Ubuntu Jammy 22.04 base container
image. Hosts should then be upgraded to Ubuntu Jammy 22.04.
system. Ubuntu Jammy 22.04 support was also added to the Yoga stable
release. Ubuntu Focal 20.04 users upgrading from Yoga can thus directly
upgrade to Ubuntu Jammy 22.04 on the host and then upgrade to the Zed release.


CentOS Stream 8
---------------
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
fixes:
- |
Fixes an idempotency issue in the OpenSearch upgrade tasks where subsequent
runs of kolla-ansible upgrade would leave shard allocation disabled.
`LP#2049512 <https://launchpad.net/bugs/2049512>`__
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
fixes:
- |
Fixed an issue where the MariaDB Cluster recovery process would fail if the
sequence number was not found in the logs. The recovery process now checks
the complete log file for the sequence number and recovers the cluster.
See `LP#1821173 <https://bugs.launchpad.net/kolla-ansible/+bug/1821173>`__
for details.
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
fixes:
- |
Fixes an issue where the Keystone admin endpoint would be recreated when
upgrading Keystone. The endpoint is now explicitly removed during the
upgrade process.
1 change: 1 addition & 0 deletions tests/templates/ansible.cfg.j2
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ inject_facts_as_vars = False
[ssh_connection]
pipelining = True
retries = 3
ssh_args = -C -o ControlMaster=auto -o ControlPersist=300
10 changes: 10 additions & 0 deletions tests/test-core-openstack.sh
Original file line number Diff line number Diff line change
Expand Up @@ -407,12 +407,22 @@ function test_instance_boot {
fi
}

function test_keystone_admin_endpoint {
echo "TESTING: Keystone admin endpoint removal"
if [[ $(openstack endpoint list --service keystone --interface admin -f value | wc -l) -ne 0 ]]; then
echo "ERROR: Found Keystone admin endpoint"
exit 1
fi
echo "SUCCESS: Keystone admin endpoint removal"
}

function test_openstack_logged {
. /etc/kolla/admin-openrc.sh
. ~/openstackclient-venv/bin/activate
test_smoke
test_neutron_modules
test_instance_boot
test_keystone_admin_endpoint

# Check for x86_64 architecture to run q35 tests
if [[ $(uname -m) == "x86_64" ]]; then
Expand Down

0 comments on commit d154133

Please sign in to comment.