feat: add OSA migration toolkit (#324)
* wip: add osa migration toolkit
* wip: added nova config extraction
* wip: added nova atmosphere config gen
* wip: clean-up nova configs
* wip: clean-up heat configs
* wip: add heat migration
* chore: refactor to migrate_db_from_osa
* chore: remove extra task
* chore: add magnum migrate
* chore: added haproxy migrate
* chore: misc fixes for osa_config_diff
* chore: misc fixes for osa_config_diff
* chore: add more services to osa migration
* chore: add pubkey generation to nova
* chore: add nova migration for computes
* chore: add multi-config file migration
* fix: multi-config file
* chore: add neutron migration
* chore: add neutron migration
* chore: add horizon migration
* chore: fix horizon migration
* chore: add cinder migration
* chore: add cinder migration for backends
* fix: enable cinder to do online volume resizes
* chore: turn off cinder services
* fix: allow uppercase backends
* fix: allow storage init with uppercase
* fix: allow for octavia migration from OSA
* move resource generation out
* chore: auto-detect health manager ips
* fix: discover facts for other controllers
* chore(octavia): migrate ca for amphora
* fix: use int for private key size
* chore: add designate migration
* fix: use yaml for designate_pools
* chore: add managed_resource_tenant_id
diff --git a/charts/nova/values.yaml b/charts/nova/values.yaml
index 8609d41..1a0da3e 100644
--- a/charts/nova/values.yaml
+++ b/charts/nova/values.yaml
@@ -1356,14 +1356,10 @@
osapi_compute_listen_port: null
osapi_compute_workers: 1
metadata_workers: 1
- use_neutron: true
- firewall_driver: nova.virt.firewall.NoopFirewallDriver
- linuxnet_interface_driver: openvswitch
compute_driver: libvirt.LibvirtDriver
my_ip: 0.0.0.0
instance_usage_audit: True
instance_usage_audit_period: hour
- notify_on_state_change: vm_and_task_state
resume_guests_state_on_host_boot: True
vnc:
novncproxy_host: 0.0.0.0
@@ -1403,6 +1399,8 @@
auth_type: password
auth_version: v3
memcache_security_strategy: ENCRYPT
+ notifications:
+ notify_on_state_change: vm_and_task_state
service_user:
auth_type: password
send_service_user_token: false
diff --git a/galaxy.yml b/galaxy.yml
index f09587c..82b4ded 100644
--- a/galaxy.yml
+++ b/galaxy.yml
@@ -14,6 +14,7 @@
community.general: 4.5.0
kubernetes.core: 2.3.2
openstack.cloud: 1.7.0
+ community.mysql: 3.6.0
tags:
- application
- cloud
diff --git a/hack/sync-charts.sh b/hack/sync-charts.sh
index 1f5853f..53ae45a 100755
--- a/hack/sync-charts.sh
+++ b/hack/sync-charts.sh
@@ -136,6 +136,11 @@
NOVA_VERISON=0.3.1
curl -sL https://tarballs.opendev.org/openstack/openstack-helm/nova-${NOVA_VERISON}.tgz \
| tar -xz -C ${ATMOSPHERE}/charts
+curl 'https://review.opendev.org/changes/openstack%2Fopenstack-helm~873446/revisions/1/patch?download' \
+ | base64 --decode \
+ | filterdiff -p1 -x 'releasenotes/*' \
+ | filterdiff -p2 -x 'Chart.yaml' \
+ | patch -p2 -d ${ATMOSPHERE}/charts/nova
SENLIN_VERSION=0.2.8
curl -sL https://tarballs.opendev.org/openstack/openstack-helm/senlin-${SENLIN_VERSION}.tgz \
diff --git a/playbooks/migrate_from_osa.yml b/playbooks/migrate_from_osa.yml
new file mode 100644
index 0000000..4a718ca
--- /dev/null
+++ b/playbooks/migrate_from_osa.yml
@@ -0,0 +1,111 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This playbook is used to migrate from OpenStack Ansible to Atmosphere, you
+# should run it with the following manner:
+#
+# ansible-playbook \
+# -i inventory/hosts.ini \
+# -i /opt/openstack-ansible/inventory/dynamic_inventory.py \
+# vexxhost.atmosphere.migrate_from_osa
+
+- name: Migrate from OpenStack Ansible
+ hosts: controllers[0]
+ become: true
+ tasks:
+ - name: Migrate Keystone
+ ansible.builtin.import_role:
+ name: keystone
+ tasks_from: migrate_from_osa
+ tags:
+ - keystone
+
+ - name: Migrate Barbican
+ ansible.builtin.import_role:
+ name: barbican
+ tasks_from: migrate_from_osa
+ tags:
+ - barbican
+
+ - name: Migrate Glance
+ ansible.builtin.import_role:
+ name: glance
+ tasks_from: migrate_from_osa
+ tags:
+ - glance
+
+ - name: Migrate Cinder
+ ansible.builtin.import_role:
+ name: cinder
+ tasks_from: migrate_from_osa
+ tags:
+ - cinder
+
+ - name: Migrate Placement
+ ansible.builtin.import_role:
+ name: placement
+ tasks_from: migrate_from_osa
+ tags:
+ - placement
+
+ - name: Migrate Nova
+ ansible.builtin.import_role:
+ name: nova
+ tasks_from: migrate_from_osa
+ tags:
+ - nova
+
+ - name: Migrate Neutron
+ ansible.builtin.import_role:
+ name: neutron
+ tasks_from: migrate_from_osa
+ tags:
+ - neutron
+
+ # TODO(mnaser): Senlin
+
+ - name: Migrate Designate
+ ansible.builtin.import_role:
+ name: designate
+ tasks_from: migrate_from_osa
+ tags:
+ - designate
+
+ - name: Migrate Heat
+ ansible.builtin.import_role:
+ name: heat
+ tasks_from: migrate_from_osa
+ tags:
+ - heat
+
+ - name: Migrate Octavia
+ ansible.builtin.import_role:
+ name: octavia
+ tasks_from: migrate_from_osa
+ tags:
+ - octavia
+
+ - name: Migrate Magnum
+ ansible.builtin.import_role:
+ name: magnum
+ tasks_from: migrate_from_osa
+ tags:
+ - magnum
+
+ - name: Migrate Horizon
+ ansible.builtin.import_role:
+ name: horizon
+ tasks_from: migrate_from_osa
+ tags:
+ - horizon
diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py
new file mode 100644
index 0000000..d77e3ea
--- /dev/null
+++ b/plugins/filter/from_ini.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2022 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from functools import partial
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.collections import is_string
+from ansible.module_utils.six.moves import configparser
+
+DOCUMENTATION = """
+ name: from_ini
+ short_description: Parse an INI file
+ version_added: 1.0.0
+ description:
+ - Parse an INI file and return a dictionary
+ options:
+ _input:
+ type: string
+ required: true
+ description:
+ - The INI file contents to parse
+ author:
+ - Mohammed Naser <mnaser@vexxhost.com>
+"""
+
+EXAMPLES = """
+ - name: Parse an INI file
+ ansible.builtin.debug:
+ msg: "{{ lookup('file', 'config.ini') | from_ini }}"
+"""
+
+RETURN = """
+ _value:
+ description: The parsed INI file
+ type: dict
+"""
+
+
+def from_ini(value):
+ if not is_string(value):
+ raise AnsibleFilterError(
+ "Invalid value type (%s) for from_ini (%r)" % (type(value), value)
+ )
+
+ parser = configparser.RawConfigParser()
+ parser.optionxform = partial(to_text, errors="surrogate_or_strict")
+ parser.read_string(value)
+
+ data = {}
+
+ def _parse_section(section):
+ data = dict(section)
+ data.pop('__name__', None)
+ for opt, val in data.items():
+ if val.isdigit():
+ val = int(val)
+ elif val.lower() in ("true", "false"):
+ val = True if val.lower() == "true" else False
+ elif val.lower() in ("none", "null"):
+ val = None
+ elif isinstance(val, str):
+ val = val.strip('"')
+ else:
+ try:
+ val = float(val)
+ except ValueError:
+ pass
+ data[opt] = val
+
+ return data
+
+ data = dict(parser._sections)
+ for k in data:
+ data[k] = _parse_section(data[k])
+ if parser._defaults:
+ data["DEFAULT"] = _parse_section(parser._defaults)
+
+ return data
+
+
+class FilterModule(object):
+ def filters(self):
+ return {"from_ini": from_ini}
diff --git a/roles/barbican/tasks/migrate_from_osa.yml b/roles/barbican/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..0679ac6
--- /dev/null
+++ b/roles/barbican/tasks/migrate_from_osa.yml
@@ -0,0 +1,46 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: barbican_all
+ osa_config_diff_chart_ref: "{{ barbican_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ barbican_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _barbican_helm_values | combine(barbican_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ barbican.conf: /etc/barbican/barbican.conf
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ barbican_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: barbican_all
+ migrate_db_from_osa_databases:
+ barbican: barbican
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: barbican_all
+ migrate_haproxy_from_osa_service_namespace: "{{ barbican_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: barbican-api
+ migrate_haproxy_from_osa_haproxy_backend: barbican
diff --git a/roles/cinder/tasks/migrate_from_osa.yml b/roles/cinder/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..7ad88cc
--- /dev/null
+++ b/roles/cinder/tasks/migrate_from_osa.yml
@@ -0,0 +1,65 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: cinder_volume
+ osa_config_diff_chart_ref: "{{ cinder_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ cinder_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _cinder_helm_values | combine(cinder_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ backends.conf: /etc/cinder/cinder.conf
+ cinder.conf: /etc/cinder/cinder.conf
+
+- name: Turn off the "cinder-volume" service
+ run_once: true
+ delegate_to: "{{ item }}"
+ ansible.builtin.service:
+ name: cinder-volume
+ state: stopped
+ enabled: false
+ loop: "{{ groups['cinder_volume'] }}"
+
+- name: Turn off the "cinder-backup" service
+ run_once: true
+ delegate_to: "{{ item }}"
+ ansible.builtin.service:
+ name: cinder-backup
+ state: stopped
+ enabled: false
+ loop: "{{ groups['cinder_backup'] }}"
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ cinder_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: cinder_all
+ migrate_db_from_osa_databases:
+ cinder: cinder
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: cinder_all
+ migrate_haproxy_from_osa_service_namespace: "{{ cinder_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: cinder-api
+ migrate_haproxy_from_osa_haproxy_backend: cinder_api
diff --git a/roles/designate/README.md b/roles/designate/README.md
index f6f9647..fd5d092 100644
--- a/roles/designate/README.md
+++ b/roles/designate/README.md
@@ -29,7 +29,7 @@
"registering nameservers"). This is out of the scope of this document.
```yaml
-designate_pools: |
+designate_pools:
- name: default
description: Default PowerDNS Pool
diff --git a/roles/designate/meta/main.yml b/roles/designate/meta/main.yml
index 7887ded..5bf3ab4 100644
--- a/roles/designate/meta/main.yml
+++ b/roles/designate/meta/main.yml
@@ -24,6 +24,7 @@
dependencies:
- role: defaults
+ - role: openstacksdk
- role: openstack_helm_endpoints
vars:
openstack_helm_endpoints_chart: designate
diff --git a/roles/designate/tasks/lookups.yml b/roles/designate/tasks/lookups.yml
new file mode 100644
index 0000000..6b70c9b
--- /dev/null
+++ b/roles/designate/tasks/lookups.yml
@@ -0,0 +1,21 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Get project ID for managed resources
+ run_once: true
+ openstack.cloud.project_info:
+ cloud: atmosphere
+ name: service
+ domain: service
+ register: _designate_project_info
diff --git a/roles/designate/tasks/main.yml b/roles/designate/tasks/main.yml
index bf1022e..612d484 100644
--- a/roles/designate/tasks/main.yml
+++ b/roles/designate/tasks/main.yml
@@ -34,6 +34,10 @@
name: "{{ designate_helm_release_name }}"
namespace: "{{ designate_helm_release_namespace }}"
+- name: Lookup resources
+ ansible.builtin.import_tasks:
+ file: lookups.yml
+
- name: Deploy Helm chart
run_once: true
kubernetes.core.helm:
diff --git a/roles/designate/tasks/migrate_from_osa.yml b/roles/designate/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..3166b42
--- /dev/null
+++ b/roles/designate/tasks/migrate_from_osa.yml
@@ -0,0 +1,51 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Lookup resources
+ ansible.builtin.import_tasks:
+ file: lookups.yml
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: designate_all
+ osa_config_diff_chart_ref: "{{ designate_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ designate_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _designate_helm_values | combine(designate_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ designate.conf: /etc/designate/designate.conf
+ pools.yaml: /etc/designate/pools.yaml
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ designate_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: designate_all
+ migrate_db_from_osa_databases:
+ designate: designate
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: designate_all
+ migrate_haproxy_from_osa_service_namespace: "{{ designate_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: designate-api
+ migrate_haproxy_from_osa_haproxy_backend: designate_api
diff --git a/roles/designate/vars/main.yml b/roles/designate/vars/main.yml
index 7d89daf..09ac03c 100644
--- a/roles/designate/vars/main.yml
+++ b/roles/designate/vars/main.yml
@@ -13,11 +13,14 @@
# under the License.
_designate_helm_values:
- conf:
- pools: "{{ designate_pools }}"
endpoints: "{{ openstack_helm_endpoints }}"
images:
tags: "{{ atmosphere_images | vexxhost.atmosphere.openstack_helm_image_tags('designate') }}"
+ conf:
+ designate:
+ service:central:
+ managed_resource_tenant_id: "{{ _designate_project_info.openstack_projects[0].id }}"
+ pools: "{{ designate_pools | to_yaml }}"
pod:
replicas:
api: 3
diff --git a/roles/glance/tasks/migrate_from_osa.yml b/roles/glance/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..faeb320
--- /dev/null
+++ b/roles/glance/tasks/migrate_from_osa.yml
@@ -0,0 +1,46 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: glance_all
+ osa_config_diff_chart_ref: "{{ glance_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ glance_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _glance_helm_values | combine(glance_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ glance.conf: /etc/glance/glance.conf
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ glance_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: glance_all
+ migrate_db_from_osa_databases:
+ glance: glance
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: glance_all
+ migrate_haproxy_from_osa_service_namespace: "{{ glance_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: glance-api
+ migrate_haproxy_from_osa_haproxy_backend: glance_api
diff --git a/roles/heat/tasks/migrate_from_osa.yml b/roles/heat/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..b37b81d
--- /dev/null
+++ b/roles/heat/tasks/migrate_from_osa.yml
@@ -0,0 +1,57 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: heat_all
+ osa_config_diff_chart_ref: "{{ heat_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ heat_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _heat_helm_values | combine(heat_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ heat.conf: /etc/heat/heat.conf
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ heat_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: heat_all
+ migrate_db_from_osa_databases:
+ heat: heat
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy (API)
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: heat_all
+ migrate_haproxy_from_osa_service_namespace: "{{ heat_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: heat-api
+ migrate_haproxy_from_osa_haproxy_backend: heat_api
+
+- name: Migrate HAproxy (CloudFormation)
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: heat_all
+ migrate_haproxy_from_osa_service_namespace: "{{ heat_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: heat-cfn
+ migrate_haproxy_from_osa_haproxy_backend: heat_api_cfn
+
+# TODO: fix haproxy configs for api and cfn
diff --git a/roles/heat/vars/main.yml b/roles/heat/vars/main.yml
index 370fc07..0374cf7 100644
--- a/roles/heat/vars/main.yml
+++ b/roles/heat/vars/main.yml
@@ -26,11 +26,22 @@
heat:
DEFAULT:
auth_encryption_key: "{{ heat_auth_encryption_key }}"
+ default_deployment_signal_transport: HEAT_SIGNAL
+ default_software_config_transport: POLL_SERVER_HEAT
log_config_append: null
+ num_engine_workers: 8
region_name_for_services: "{{ openstack_helm_endpoints['identity']['auth']['heat']['region_name'] }}"
server_keystone_endpoint_type: public
clients_keystone:
endpoint_type: publicURL
+ ec2authtoken:
+ auth_uri: http://keystone-api.openstack.svc.cluster.local:5000
+ heat_api:
+ workers: 8
+ heat_api_cfn:
+ workers: 8
+ heat_api_cloudwatch:
+ workers: 8
oslo_messaging_notifications:
driver: noop
manifests:
diff --git a/roles/horizon/tasks/migrate_from_osa.yml b/roles/horizon/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..b9e2082
--- /dev/null
+++ b/roles/horizon/tasks/migrate_from_osa.yml
@@ -0,0 +1,42 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(mnaser): Horizon does not use any form of standardized configuration
+# format, so we can't show a `diff` of the configuration files.
+- name: Verify configuration manually
+ ansible.builtin.pause:
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ horizon_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: horizon_all
+ migrate_db_from_osa_databases: {}
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: horizon_all
+ migrate_haproxy_from_osa_service_namespace: "{{ horizon_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: horizon-int
+ migrate_haproxy_from_osa_haproxy_backend: horizon
+ # NOTE(mnaser): The default health checks for some reason return 403, so we
+ # update this accordingly.
+ migrate_haproxy_from_osa_expected_http_code: 403
diff --git a/roles/keystone/tasks/migrate_from_osa.yml b/roles/keystone/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..ecfe4f1
--- /dev/null
+++ b/roles/keystone/tasks/migrate_from_osa.yml
@@ -0,0 +1,46 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: keystone_all
+ osa_config_diff_chart_ref: "{{ keystone_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ keystone_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _keystone_helm_values | combine(keystone_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ keystone.conf: /etc/keystone/keystone.conf
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ keystone_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: keystone_all
+ migrate_db_from_osa_databases:
+ keystone: keystone
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: keystone_all
+ migrate_haproxy_from_osa_service_namespace: "{{ keystone_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: keystone-api
+ migrate_haproxy_from_osa_haproxy_backend: keystone_service
diff --git a/roles/magnum/tasks/migrate_from_osa.yml b/roles/magnum/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..84f1ab6
--- /dev/null
+++ b/roles/magnum/tasks/migrate_from_osa.yml
@@ -0,0 +1,46 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: magnum_all
+ osa_config_diff_chart_ref: "{{ magnum_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ magnum_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _magnum_helm_values | combine(magnum_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ magnum.conf: /etc/magnum/magnum.conf
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ magnum_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: magnum_all
+ migrate_db_from_osa_databases:
+ magnum_service: magnum
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: magnum_all
+ migrate_haproxy_from_osa_service_namespace: "{{ magnum_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: magnum-api
+ migrate_haproxy_from_osa_haproxy_backend: magnum
diff --git a/roles/migrate_db_from_osa/README.md b/roles/migrate_db_from_osa/README.md
new file mode 100644
index 0000000..752bc8d
--- /dev/null
+++ b/roles/migrate_db_from_osa/README.md
@@ -0,0 +1,10 @@
+# `migrate_db_from_osa`
+
+This is a role that is designed to migrate the database of service from an
+OpenStack Ansible deployment to an Atmosphere deployment. It will take care
+of the following:
+
+* Ensure that the database does not exist in the Atmosphere database
+* Shut off all of the containers that are using the database
+* Dump the database from the OpenStack Ansible database
+* Restore the database into the Atmosphere database
diff --git a/roles/migrate_db_from_osa/defaults/main.yml b/roles/migrate_db_from_osa/defaults/main.yml
new file mode 100644
index 0000000..a48c531
--- /dev/null
+++ b/roles/migrate_db_from_osa/defaults/main.yml
@@ -0,0 +1,27 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Namespace where the Percona XtraDB Cluster is deployed
+# migrate_db_from_osa_pxc_namespace:
+
+# Inventory group for the OpenStack Ansible Galera cluster
+migrate_db_from_osa_galera_group: galera_all
+
+# Inventory group for containers to stop
+# migrate_db_from_osa_containers_group:
+
+# Mapping of databases to migrate, where key is OSA database name and value is
+# Atmosphere database name
+# migrate_db_from_osa_databases:
+# magnum_service: magnum
diff --git a/roles/migrate_db_from_osa/meta/main.yml b/roles/migrate_db_from_osa/meta/main.yml
new file mode 100644
index 0000000..3ba8401
--- /dev/null
+++ b/roles/migrate_db_from_osa/meta/main.yml
@@ -0,0 +1,24 @@
+# Copyright (c) 2022 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+galaxy_info:
+ author: VEXXHOST, Inc.
+ description: Ansible role for migrating databases from OSA
+ license: Apache-2.0
+ min_ansible_version: 5.5.0
+ standalone: false
+ platforms:
+ - name: Ubuntu
+ versions:
+ - focal
diff --git a/roles/migrate_db_from_osa/tasks/main.yml b/roles/migrate_db_from_osa/tasks/main.yml
new file mode 100644
index 0000000..6ec3c3c
--- /dev/null
+++ b/roles/migrate_db_from_osa/tasks/main.yml
@@ -0,0 +1,74 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Get the Kuberentes service for Percona XtraDB Cluster
+ run_once: true
+ kubernetes.core.k8s_info:
+ kind: Service
+ name: "{{ openstack_helm_endpoints.oslo_db.hosts.default }}"
+ namespace: "{{ migrate_db_from_osa_pxc_namespace }}"
+ register: _pxc_service
+
+- name: Fail if the databases already exist
+ run_once: true
+ check_mode: true
+ delegate_to: "{{ groups[migrate_db_from_osa_galera_group][0] }}"
+ community.mysql.mysql_db:
+ login_host: "{{ _pxc_service.resources[0].spec.clusterIP }}"
+ login_user: root
+ login_password: "{{ openstack_helm_endpoints.oslo_db.auth.admin.password }}"
+ name: "{{ item.value }}"
+ register: _pxc_db_check
+ failed_when: _pxc_db_check is not changed
+ loop: "{{ migrate_db_from_osa_databases | dict2items }}"
+
+- name: Shut off all the containers
+ run_once: true
+ delegate_to: "{{ hostvars[item]['physical_host'] }}"
+ ansible.builtin.command: "lxc-stop -n {{ item }}"
+ loop: "{{ groups[migrate_db_from_osa_containers_group] }}"
+ register: _lxc_stop
+ changed_when: _lxc_stop.rc == 0
+ failed_when: _lxc_stop.rc != 0 and 'is not running' not in _lxc_stop.stderr
+
+- name: Wait for the containers to stop
+ run_once: true
+ delegate_to: "{{ hostvars[item]['physical_host'] }}"
+ ansible.builtin.wait_for:
+ path: "/var/lib/lxc/{{ item }}/state"
+ state: absent
+ loop: "{{ groups[migrate_db_from_osa_containers_group] }}"
+
+- name: Dump the database
+ run_once: true
+ delegate_to: "{{ groups[migrate_db_from_osa_galera_group][0] }}"
+ community.mysql.mysql_db:
+ state: dump
+ name: "{{ item.key }}"
+ dump_extra_args: --skip_add_locks
+ skip_lock_tables: true
+ target: "/root/{{ item.key }}-{{ ansible_date_time.epoch }}.sql"
+ loop: "{{ migrate_db_from_osa_databases | dict2items }}"
+
+- name: Restore the database
+ run_once: true
+ delegate_to: "{{ groups[migrate_db_from_osa_galera_group][0] }}"
+ community.mysql.mysql_db:
+ login_host: "{{ _pxc_service.resources[0].spec.clusterIP }}"
+ login_user: root
+ login_password: "{{ openstack_helm_endpoints.oslo_db.auth.admin.password }}"
+ state: import
+ name: "{{ item.value }}"
+ target: "/root/{{ item.key }}-{{ ansible_date_time.epoch }}.sql"
+ loop: "{{ migrate_db_from_osa_databases | dict2items }}"
diff --git a/roles/migrate_haproxy_from_osa/README.md b/roles/migrate_haproxy_from_osa/README.md
new file mode 100644
index 0000000..9ea9c7e
--- /dev/null
+++ b/roles/migrate_haproxy_from_osa/README.md
@@ -0,0 +1,10 @@
+# `migrate_haproxy_from_osa`
+
+This is a role that is designed to migrate a specific service from pointing
+to an OpenStack Ansible deployment to an Atmosphere deployment. It will take
+care of the following:
+
+* Comment out all of the previous records pointing at the OpenStack Ansible
+ deployment
+* Add a new record pointing at the Atmosphere deployment
+* Reload HAproxy in order to start sending traffic to Atmosphere
diff --git a/roles/migrate_haproxy_from_osa/defaults/main.yml b/roles/migrate_haproxy_from_osa/defaults/main.yml
new file mode 100644
index 0000000..7291d07
--- /dev/null
+++ b/roles/migrate_haproxy_from_osa/defaults/main.yml
@@ -0,0 +1,31 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Inventory group for the HAProxy containers
+migrate_haproxy_from_osa_haproxy_group: haproxy_all
+
+# Inventory group for the target service
+# migrate_haproxy_from_osa_group:
+
+# Namespace for the target service
+# migrate_haproxy_from_osa_service_namespace:
+
+# Name of the target service
+# migrate_haproxy_from_osa_service_name:
+
+# Name of the HAproxy backend inside OpenStack Ansible
+# migrate_haproxy_from_osa_haproxy_backend:
+
+# Update expected HTTP code if needed
+# migrate_haproxy_from_osa_expected_http_code:
diff --git a/roles/migrate_haproxy_from_osa/handlers/main.yml b/roles/migrate_haproxy_from_osa/handlers/main.yml
new file mode 100644
index 0000000..ba4dff4
--- /dev/null
+++ b/roles/migrate_haproxy_from_osa/handlers/main.yml
@@ -0,0 +1,21 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Reload HAproxy
+ run_once: true
+ delegate_to: "{{ item }}"
+ ansible.builtin.service:
+ name: haproxy
+ state: reloaded
+ loop: "{{ groups[migrate_haproxy_from_osa_haproxy_group] }}"
diff --git a/roles/migrate_haproxy_from_osa/meta/main.yml b/roles/migrate_haproxy_from_osa/meta/main.yml
new file mode 100644
index 0000000..3ba8401
--- /dev/null
+++ b/roles/migrate_haproxy_from_osa/meta/main.yml
@@ -0,0 +1,24 @@
+# Copyright (c) 2022 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+galaxy_info:
+ author: VEXXHOST, Inc.
+ description: Ansible role for migrating databases from OSA
+ license: Apache-2.0
+ min_ansible_version: 5.5.0
+ standalone: false
+ platforms:
+ - name: Ubuntu
+ versions:
+ - focal
diff --git a/roles/migrate_haproxy_from_osa/tasks/main.yml b/roles/migrate_haproxy_from_osa/tasks/main.yml
new file mode 100644
index 0000000..9416815
--- /dev/null
+++ b/roles/migrate_haproxy_from_osa/tasks/main.yml
@@ -0,0 +1,61 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Comment out all of the old containers in HAproxy
+ run_once: true
+ delegate_to: "{{ item.0 }}"
+ ansible.builtin.replace:
+ path: /etc/haproxy/haproxy.cfg
+ regexp: '(\s+)server {{ item.1 }}'
+ replace: '\1#server {{ item.1 }}'
+ with_nested:
+ - "{{ groups[migrate_haproxy_from_osa_haproxy_group] }}"
+ - "{{ groups[migrate_haproxy_from_osa_group] }}"
+
+- name: Get the Cluster IP for the service
+ run_once: true
+ kubernetes.core.k8s_info:
+ api_version: v1
+ kind: Service
+ name: "{{ migrate_haproxy_from_osa_service_name }}"
+ namespace: "{{ migrate_haproxy_from_osa_service_namespace }}"
+ register: _migrate_haproxy_from_osa_service
+
+- name: Add a record to HAproxy for the new service
+ run_once: true
+ delegate_to: "{{ item }}"
+ ansible.builtin.lineinfile:
+ path: /etc/haproxy/haproxy.cfg
+ line: ' server k8s {{ cluster_ip }}:{{ port }} check port {{ port }} inter 1000 rise 3 fall 3'
+ insertafter: 'backend {{ migrate_haproxy_from_osa_haproxy_backend }}-back'
+ state: present
+ loop: "{{ groups[migrate_haproxy_from_osa_haproxy_group] }}"
+ vars:
+ cluster_ip: "{{ _migrate_haproxy_from_osa_service.resources[0].spec.clusterIP }}"
+ port: "{{ _migrate_haproxy_from_osa_service.resources[0].spec.ports[0].port }}"
+ notify:
+ - Reload HAproxy
+
+- name: Update expected HTTP code
+ run_once: true
+ delegate_to: "{{ item }}"
+ ansible.builtin.lineinfile:
+ path: /etc/haproxy/haproxy.cfg
+ line: ' http-check expect status {{ migrate_haproxy_from_osa_expected_http_code }}'
+ insertafter: 'backend {{ migrate_haproxy_from_osa_haproxy_backend }}-back'
+ state: present
+ loop: "{{ groups[migrate_haproxy_from_osa_haproxy_group] }}"
+ when: migrate_haproxy_from_osa_expected_http_code is defined
+ notify:
+ - Reload HAproxy
diff --git a/roles/neutron/tasks/migrate_from_osa.yml b/roles/neutron/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..4f0cc42
--- /dev/null
+++ b/roles/neutron/tasks/migrate_from_osa.yml
@@ -0,0 +1,99 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference (control plane)
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: neutron_server
+ osa_config_diff_chart_ref: "{{ neutron_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ neutron_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _neutron_helm_values | combine(neutron_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ neutron.conf: /etc/neutron/neutron.conf
+ ml2_conf.ini: /etc/neutron/plugins/ml2/ml2_conf.ini
+
+- name: Generate configuration difference (control plane agents)
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: network_hosts
+ osa_config_diff_chart_ref: "{{ neutron_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ neutron_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _neutron_helm_values | combine(neutron_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ ml2_conf.ini: /etc/neutron/plugins/ml2/ml2_conf.ini
+ dhcp_agent.ini: /etc/neutron/dhcp_agent.ini
+ l3_agent.ini: /etc/neutron/l3_agent.ini
+ linuxbridge_agent.ini: /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ metadata_agent.ini: /etc/neutron/metadata_agent.ini
+
+- name: Migrate the databases
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ neutron_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: neutron_server
+ migrate_db_from_osa_databases:
+ neutron: neutron
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Get the Kuberentes service for RabbitMQ
+ run_once: true
+ kubernetes.core.k8s_info:
+ kind: Service
+ name: "{{ openstack_helm_endpoints.oslo_messaging.hosts.default }}"
+ namespace: "{{ neutron_helm_release_namespace }}"
+ register: _neutron_rabbitmq_service
+
+- name: Update RabbitMQ configuration for Neutron agents
+ delegate_to: "{{ item.0 }}"
+ community.general.ini_file:
+ path: /etc/neutron/neutron.conf
+ section: "{{ item.1.section }}"
+ option: "{{ item.1.option }}"
+ value: "{{ item.1.value }}"
+ mode: 0644
+ with_nested:
+ - "{{ groups['neutron_linuxbridge_agent'] }}"
+ - - section: DEFAULT
+ option: transport_url
+ value: "rabbit://neutron:{{ openstack_helm_endpoints.oslo_messaging.auth.neutron.password }}@{{ cluster_ip }}:5672/neutron"
+ - section: oslo_messaging_notifications
+ option: transport_url
+ value: "rabbit://nova:{{ openstack_helm_endpoints.oslo_messaging.auth.neutron.password }}@{{ cluster_ip }}:5672/neutron"
+ - section: oslo_messaging_rabbit
+ option: ssl
+ value: false
+ vars:
+ cluster_ip: "{{ _neutron_rabbitmq_service.resources[0].spec.clusterIP }}"
+
+- name: Restart all Neutron agents
+ delegate_to: "{{ item }}"
+ ansible.builtin.service:
+ name: neutron.slice
+ state: restarted
+ with_items: "{{ groups['neutron_linuxbridge_agent'] }}"
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: neutron_server
+ migrate_haproxy_from_osa_service_namespace: "{{ neutron_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: neutron-server
+ migrate_haproxy_from_osa_haproxy_backend: neutron_server
diff --git a/roles/nova/tasks/generate_public_key.yml b/roles/nova/tasks/generate_public_key.yml
new file mode 100644
index 0000000..872ea71
--- /dev/null
+++ b/roles/nova/tasks/generate_public_key.yml
@@ -0,0 +1,44 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate public key for SSH private key
+ become: false
+ delegate_to: localhost
+ block:
+ - name: Generate temporary file for SSH public key
+ changed_when: false
+ ansible.builtin.tempfile:
+ state: file
+ prefix: nova_ssh_key_
+ register: _nova_ssh_key_tempfile
+ # NOTE(mnaser): It's important to add a trailing newline at the end of this
+ # string or else `ssh-keygen` will not be happy.`
+ - name: Write contents of current private SSH key
+ changed_when: false
+ ansible.builtin.copy:
+ dest: "{{ _nova_ssh_key_tempfile.path }}"
+ content: "{{ nova_ssh_key }}\n"
+ mode: "0600"
+ - name: Generate public key for SSH private key
+ changed_when: false
+ community.crypto.openssh_keypair:
+ path: "{{ _nova_ssh_key_tempfile.path }}"
+ regenerate: never
+ register: _nova_ssh_publickey
+ always:
+ - name: Delete temporary file for public SSH key
+ changed_when: false
+ ansible.builtin.file:
+ path: "{{ _nova_ssh_key_tempfile.path }}"
+ state: absent
diff --git a/roles/nova/tasks/main.yml b/roles/nova/tasks/main.yml
index 518409d..f1d868e 100644
--- a/roles/nova/tasks/main.yml
+++ b/roles/nova/tasks/main.yml
@@ -35,35 +35,8 @@
namespace: "{{ nova_helm_release_namespace }}"
- name: Generate public key for SSH private key
- become: false
- delegate_to: localhost
- block:
- - name: Generate temporary file for SSH public key
- changed_when: false
- ansible.builtin.tempfile:
- state: file
- prefix: nova_ssh_key_
- register: _nova_ssh_key_tempfile
- # NOTE(mnaser): It's important to add a trailing newline at the end of this
- # string or else `ssh-keygen` will not be happy.`
- - name: Write contents of current private SSH key
- changed_when: false
- ansible.builtin.copy:
- dest: "{{ _nova_ssh_key_tempfile.path }}"
- content: "{{ nova_ssh_key }}\n"
- mode: "0600"
- - name: Generate public key for SSH private key
- changed_when: false
- community.crypto.openssh_keypair:
- path: "{{ _nova_ssh_key_tempfile.path }}"
- regenerate: never
- register: _nova_ssh_publickey
- always:
- - name: Delete temporary file for public SSH key
- changed_when: false
- ansible.builtin.file:
- path: "{{ _nova_ssh_key_tempfile.path }}"
- state: absent
+ ansible.builtin.import_tasks:
+ file: generate_public_key.yml
- name: Deploy Helm chart
run_once: true
diff --git a/roles/nova/tasks/migrate_from_osa.yml b/roles/nova/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..62a070c
--- /dev/null
+++ b/roles/nova/tasks/migrate_from_osa.yml
@@ -0,0 +1,135 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate public key for SSH private key
+ ansible.builtin.import_tasks:
+ file: generate_public_key.yml
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: nova_conductor
+ osa_config_diff_chart_ref: "{{ nova_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ nova_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _nova_helm_values | combine(nova_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ nova.conf: /etc/nova/nova.conf
+
+- name: Migrate the databases
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ nova_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: nova_conductor
+ migrate_db_from_osa_databases:
+ nova: nova
+ nova_api: nova_api
+ nova_cell0: nova_cell0
+
+- name: Get the Kuberentes service for Percona XtraDB Cluster
+ run_once: true
+ kubernetes.core.k8s_info:
+ kind: Service
+ name: "{{ openstack_helm_endpoints.oslo_db.hosts.default }}"
+ namespace: "{{ nova_helm_release_namespace }}"
+ register: _nova_pxc_service
+
+- name: Update cell mappings
+ run_once: true
+ delegate_to: "{{ groups['galera_all'][0] }}"
+ community.mysql.mysql_query:
+ login_host: "{{ _nova_pxc_service.resources[0].spec.clusterIP }}"
+ login_user: root
+ login_password: "{{ openstack_helm_endpoints.oslo_db.auth.admin.password }}"
+ login_db: nova_api
+ query: UPDATE cell_mappings SET transport_url = %(transport_url)s, database_connection = %(database_connection)s WHERE name = %(name)s
+ named_args: "{{ item }}"
+ loop:
+ - name: cell0
+ transport_url: none:/
+ database_connection: "mysql+pymysql://nova:{{ openstack_helm_endpoints.oslo_db_cell0.auth.nova.password }}@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova_cell0"
+ - name: cell1
+ transport_url: "rabbit://nova:{{ openstack_helm_endpoints.oslo_messaging.auth.nova.password }}@rabbitmq-nova.openstack.svc.cluster.local:5672/nova"
+ database_connection: "mysql+pymysql://nova:{{ openstack_helm_endpoints.oslo_db.auth.nova.password }}@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova"
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Get the Kuberentes service for RabbitMQ
+ run_once: true
+ kubernetes.core.k8s_info:
+ kind: Service
+ name: "{{ openstack_helm_endpoints.oslo_messaging.hosts.default }}"
+ namespace: "{{ nova_helm_release_namespace }}"
+ register: _nova_rabbitmq_service
+
+- name: Update RabbitMQ configuration for compute nodes
+ delegate_to: "{{ item.0 }}"
+ community.general.ini_file:
+ path: /etc/nova/nova.conf
+ section: "{{ item.1.section }}"
+ option: "{{ item.1.option }}"
+ value: "{{ item.1.value }}"
+ mode: 0644
+ with_nested:
+ - "{{ groups['nova_compute'] }}"
+ - - section: DEFAULT
+ option: transport_url
+ value: "rabbit://nova:{{ openstack_helm_endpoints.oslo_messaging.auth.nova.password }}@{{ cluster_ip }}:5672/nova"
+ - section: oslo_messaging_notifications
+ option: transport_url
+ value: "rabbit://nova:{{ openstack_helm_endpoints.oslo_messaging.auth.nova.password }}@{{ cluster_ip }}:5672/nova"
+ - section: oslo_messaging_rabbit
+ option: ssl
+ value: false
+ vars:
+ cluster_ip: "{{ _nova_rabbitmq_service.resources[0].spec.clusterIP }}"
+
+- name: Restart all compute services
+ delegate_to: "{{ item }}"
+ ansible.builtin.service:
+ name: nova-compute
+ state: restarted
+ with_items: "{{ groups['nova_compute'] }}"
+
+- name: Migrate HAproxy (API)
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: nova_conductor
+ migrate_haproxy_from_osa_service_namespace: "{{ nova_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: nova-api
+ migrate_haproxy_from_osa_haproxy_backend: nova_api_os_compute
+
+- name: Migrate HAproxy (Metadata)
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: nova_conductor
+ migrate_haproxy_from_osa_service_namespace: "{{ nova_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: nova-metadata
+ migrate_haproxy_from_osa_haproxy_backend: nova_api_metadata
+
+- name: Migrate HAproxy (VNC)
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: nova_conductor
+ migrate_haproxy_from_osa_service_namespace: "{{ nova_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: nova-novncproxy
+ migrate_haproxy_from_osa_haproxy_backend: nova_console
diff --git a/roles/octavia/defaults/main.yml b/roles/octavia/defaults/main.yml
index 215b0af..39494fa 100644
--- a/roles/octavia/defaults/main.yml
+++ b/roles/octavia/defaults/main.yml
@@ -22,10 +22,20 @@
# List of annotations to apply to the Ingress
octavia_ingress_annotations: {}
+# TLS settings
+octavia_tls_server_common_name: octavia-server
+octavia_tls_server_private_key_algorithm: ECDSA
+octavia_tls_server_private_key_size: 256
+octavia_tls_client_common_name: octavia-client
+octavia_tls_client_private_key_algorithm: ECDSA
+octavia_tls_client_private_key_size: 256
+
# Heartbeat key
octavia_heartbeat_key: "{{ undef(hint='You must specify a Octavia heartbeat key') }}"
# Octavia management subnet (CIDR)
+octavia_management_network_name: lb-mgmt-net
+octavia_management_subnet_name: lb-mgmt-subnet
octavia_management_subnet_cidr: "172.24.0.0/22"
# Octavia Amphora flavor
@@ -34,6 +44,9 @@
octavia_amphora_flavor_ram: 2048
octavia_amphora_flavor_disk: 0
+# Octavia Amphora security group
+octavia_amphora_security_group_name: lb-mgmt-sec-grp
+
# Octavia Amphora Image
octavia_amphora_image_name: amphora-x64-haproxy
octavia_amphora_image_url: https://tarballs.opendev.org/openstack/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2
diff --git a/roles/octavia/tasks/generate_resources.yml b/roles/octavia/tasks/generate_resources.yml
new file mode 100644
index 0000000..db7ab01
--- /dev/null
+++ b/roles/octavia/tasks/generate_resources.yml
@@ -0,0 +1,149 @@
+# Copyright (c) 2022 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Create management network
+ openstack.cloud.network:
+ cloud: atmosphere
+ # Network settings
+ name: "{{ octavia_management_network_name }}"
+ register: _octavia_management_network
+
+- name: Create management subnet
+ openstack.cloud.subnet:
+ cloud: atmosphere
+ # Subnet settings
+ network_name: "{{ octavia_management_network_name }}"
+ name: "{{ octavia_management_subnet_name }}"
+ cidr: "{{ octavia_management_subnet_cidr }}"
+
+- name: Create health manager security group
+ openstack.cloud.security_group:
+ cloud: atmosphere
+ name: lb-health-mgr-sec-grp
+ register: _octavia_health_manager_sg
+
+- name: Create health manager security group rules
+ openstack.cloud.security_group_rule:
+ cloud: atmosphere
+ security_group: "{{ _octavia_health_manager_sg.id }}"
+ direction: ingress
+ ethertype: IPv4
+ protocol: tcp
+ port_range_min: "{{ item }}"
+ port_range_max: "{{ item }}"
+ loop:
+ - 5555
+ - 10514
+ - 20514
+
+- name: Create health manager networking ports
+ openstack.cloud.port:
+ cloud: atmosphere
+ name: "octavia-health-manager-port-{{ hostvars[item]['inventory_hostname_short'] }}"
+ device_owner: octavia:health-mgr
+ network: "{{ _octavia_management_network.id }}"
+ fixed_ips: >-
+ {{
+ [
+ {
+ "ip_address": hostvars[item]['octavia_health_manager_ip']
+ }
+ ]
+ if hostvars[item]['octavia_health_manager_ip'] is defined else omit
+ }}
+ security_groups:
+ - "{{ _octavia_health_manager_sg.id }}"
+ loop: "{{ groups['controllers'] }}"
+ loop_control:
+ index_var: _octavia_health_manager_port_index
+
+# NOTE(mnaser): Since we're running the playbook targeted at the first
+# controller only, we need to manually discover the facts for the
+# other controllers.
+- name: Discover facts for other controllers
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ ansible.builtin.setup:
+ gather_subset: network
+ loop: "{{ groups['controllers'] }}"
+
+- name: Set binding for ports
+ changed_when: false
+ ansible.builtin.shell: |
+ openstack port set \
+ --host {{ hostvars[item]['ansible_fqdn'] }} \
+ octavia-health-manager-port-{{ hostvars[item]['inventory_hostname_short'] }}
+ environment:
+ OS_CLOUD: atmosphere
+ loop: "{{ groups['controllers'] }}"
+
+- name: Get health manager networking ports
+ openstack.cloud.port_info:
+ cloud: atmosphere
+ port: "octavia-health-manager-port-{{ hostvars[item]['ansible_fqdn'] | split('.') | first }}"
+ loop: "{{ groups['controllers'] }}"
+ register: _octavia_health_manager_ports
+
+- name: Set controller_ip_port_list
+ ansible.builtin.set_fact:
+ _octavia_controller_ip_port_list: "{{ (_octavia_controller_ip_port_list | d([]) + [item.openstack_ports[0].fixed_ips[0].ip_address + ':5555']) | unique }}"
+ loop: "{{ _octavia_health_manager_ports.results }}"
+ loop_control:
+ label: "{{ item.openstack_ports[0].name }}"
+
+- name: Create amphora security group
+ openstack.cloud.security_group:
+ cloud: atmosphere
+ name: "{{ octavia_amphora_security_group_name }}"
+ register: _octavia_amphora_sg
+
+- name: Create amphora security group rules
+ openstack.cloud.security_group_rule:
+ cloud: atmosphere
+ security_group: "{{ _octavia_amphora_sg.id }}"
+ direction: ingress
+ ethertype: IPv4
+ protocol: tcp
+ port_range_min: "{{ item.0 }}"
+ port_range_max: "{{ item.0 }}"
+ remote_ip_prefix: "{{ item.1.openstack_ports[0].fixed_ips[0].ip_address }}/32"
+ with_nested:
+ - [22, 9443]
+ - "{{ _octavia_health_manager_ports.results }}"
+
+- name: Create amphora flavor
+ openstack.cloud.compute_flavor:
+ cloud: atmosphere
+ name: "{{ octavia_amphora_flavor_name }}"
+ vcpus: "{{ octavia_amphora_flavor_vcpus }}"
+ ram: "{{ octavia_amphora_flavor_ram }}"
+ disk: "{{ octavia_amphora_flavor_disk }}"
+ is_public: false
+ register: _octavia_amphora_flavor
+
+- name: Upload Amphora image
+ ansible.builtin.include_role:
+ name: glance_image
+ vars:
+ glance_image_name: "{{ octavia_amphora_image_name }}"
+ glance_image_url: "{{ octavia_amphora_image_url }}"
+ glance_image_container_format: "{{ octavia_amphora_image_container_format }}"
+ glance_image_disk_format: "{{ octavia_amphora_image_disk_format }}"
+ glance_image_tags: "{{ octavia_amphora_image_tags }}"
+
+- name: Get Amphora image information
+ openstack.cloud.image_info:
+ cloud: atmosphere
+ image: "{{ octavia_amphora_image_name }}"
+ register: _octavia_amphora_image
diff --git a/roles/octavia/tasks/main.yml b/roles/octavia/tasks/main.yml
index 101a421..22b21ee 100644
--- a/roles/octavia/tasks/main.yml
+++ b/roles/octavia/tasks/main.yml
@@ -34,120 +34,9 @@
name: "{{ octavia_helm_release_name }}"
namespace: "{{ octavia_helm_release_namespace }}"
-- name: Create management network
- openstack.cloud.network:
- cloud: atmosphere
- # Network settings
- name: lb-mgmt-net
- register: _octavia_management_network
-
-- name: Create management subnet
- openstack.cloud.subnet:
- cloud: atmosphere
- # Subnet settings
- network_name: lb-mgmt-net
- name: lb-mgmt-subnet
- cidr: "{{ octavia_management_subnet_cidr }}"
-
-- name: Create health manager security group
- openstack.cloud.security_group:
- cloud: atmosphere
- name: lb-health-mgr-sec-grp
- register: _octavia_health_manager_sg
-
-- name: Create health manager security group rules
- openstack.cloud.security_group_rule:
- cloud: atmosphere
- security_group: "{{ _octavia_health_manager_sg.id }}"
- direction: ingress
- ethertype: IPv4
- protocol: tcp
- port_range_min: "{{ item }}"
- port_range_max: "{{ item }}"
- loop:
- - 5555
- - 10514
- - 20514
-
-- name: Create health manager networking ports
- openstack.cloud.port:
- cloud: atmosphere
- name: "octavia-health-manager-port-{{ hostvars[item]['inventory_hostname_short'] }}"
- device_owner: octavia:health-mgr
- network: "{{ _octavia_management_network.id }}"
- security_groups:
- - "{{ _octavia_health_manager_sg.id }}"
- loop: "{{ groups['controllers'] }}"
-
-- name: Set binding for ports
- changed_when: false
- ansible.builtin.shell: |
- openstack port set \
- --host {{ hostvars[item]['ansible_fqdn'] }} \
- octavia-health-manager-port-{{ hostvars[item]['inventory_hostname_short'] }}
- environment:
- OS_CLOUD: atmosphere
- loop: "{{ groups['controllers'] }}"
-
-- name: Get health manager networking ports
- openstack.cloud.port_info:
- cloud: atmosphere
- port: "octavia-health-manager-port-{{ hostvars[item]['ansible_fqdn'] | split('.') | first }}"
- loop: "{{ groups['controllers'] }}"
- register: _octavia_health_manager_ports
-
-- name: Set controller_ip_port_list
- ansible.builtin.set_fact:
- _octavia_controller_ip_port_list: "{{ (_octavia_controller_ip_port_list | d([]) + [item.openstack_ports[0].fixed_ips[0].ip_address + ':5555']) | unique }}"
- loop: "{{ _octavia_health_manager_ports.results }}"
- loop_control:
- label: "{{ item.openstack_ports[0].name }}"
-
-- name: Create amphora security group
- openstack.cloud.security_group:
- cloud: atmosphere
- name: lb-mgmt-sec-grp
- register: _octavia_amphora_sg
-
-- name: Create amphora security group rules
- openstack.cloud.security_group_rule:
- cloud: atmosphere
- security_group: "{{ _octavia_amphora_sg.id }}"
- direction: ingress
- ethertype: IPv4
- protocol: tcp
- port_range_min: "{{ item.0 }}"
- port_range_max: "{{ item.0 }}"
- remote_ip_prefix: "{{ item.1.openstack_ports[0].fixed_ips[0].ip_address }}/32"
- with_nested:
- - [22, 9443]
- - "{{ _octavia_health_manager_ports.results }}"
-
-- name: Create amphora flavor
- openstack.cloud.compute_flavor:
- cloud: atmosphere
- name: "{{ octavia_amphora_flavor_name }}"
- vcpus: "{{ octavia_amphora_flavor_vcpus }}"
- ram: "{{ octavia_amphora_flavor_ram }}"
- disk: "{{ octavia_amphora_flavor_disk }}"
- is_public: false
- register: _octavia_amphora_flavor
-
-- name: Upload Amphora image
- ansible.builtin.include_role:
- name: glance_image
- vars:
- glance_image_name: "{{ octavia_amphora_image_name }}"
- glance_image_url: "{{ octavia_amphora_image_url }}"
- glance_image_container_format: "{{ octavia_amphora_image_container_format }}"
- glance_image_disk_format: "{{ octavia_amphora_image_disk_format }}"
- glance_image_tags: "{{ octavia_amphora_image_tags }}"
-
-- name: Get Amphora image information
- openstack.cloud.image_info:
- cloud: atmosphere
- image: "{{ octavia_amphora_image_name }}"
- register: _octavia_amphora_image
+- name: Generate resources
+ ansible.builtin.import_tasks:
+ file: generate_resources.yml
- name: Create CAs & Issuers
kubernetes.core.k8s:
@@ -160,13 +49,11 @@
namespace: openstack
spec:
isCA: true
- commonName: "{{ item }}"
+ commonName: "{{ octavia_tls_server_common_name if item == 'octavia-server' else octavia_tls_client_common_name }}"
secretName: "{{ item }}-ca"
duration: 87600h
renewBefore: 720h
- privateKey:
- algorithm: ECDSA
- size: 256
+ privateKey: "{{ private_key | from_yaml }}"
issuerRef:
name: self-signed
kind: ClusterIssuer
@@ -180,6 +67,12 @@
spec:
ca:
secretName: "{{ item }}-ca"
+ vars:
+ # NOTE(mnaser): Unfortuantely, Ansible renders all variables as strings so
+ # we do this workaround to make sure the size is an integer.
+ private_key: |
+ algorithm: "{{ octavia_tls_server_private_key_algorithm if item == 'octavia-server' else octavia_tls_client_private_key_algorithm }}"
+ size: {{ octavia_tls_server_private_key_size if item == 'octavia-server' else octavia_tls_client_private_key_size }}
loop:
- octavia-client
- octavia-server
@@ -194,16 +87,23 @@
name: octavia-client-certs
namespace: openstack
spec:
- commonName: octavia-client
+ commonName: "{{ octavia_tls_client_common_name }}"
secretName: octavia-client-certs
additionalOutputFormats:
- type: CombinedPEM
duration: 87600h
renewBefore: 720h
+ privateKey: "{{ private_key | from_yaml }}"
issuerRef:
name: octavia-client
kind: Issuer
group: cert-manager.io
+ vars:
+ # NOTE(mnaser): Unfortuantely, Ansible renders all variables as strings so
+ # we do this workaround to make sure the size is an integer.
+ private_key: |
+ algorithm: "{{ octavia_tls_client_private_key_algorithm }}"
+ size: {{ octavia_tls_client_private_key_size }}
- name: Create admin compute quotaset
openstack.cloud.quota:
diff --git a/roles/octavia/tasks/migrate_from_osa.yml b/roles/octavia/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..4f292ea
--- /dev/null
+++ b/roles/octavia/tasks/migrate_from_osa.yml
@@ -0,0 +1,141 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Map existing health manager IP addresses to existing controllers
+ run_once: true
+ delegate_to: "{{ groups['controllers'][_octavia_health_manager_ip] }}"
+ delegate_facts: true
+ ansible.builtin.set_fact:
+ octavia_health_manager_ip: "{{ item }}"
+ loop: "{{ groups['octavia-health-manager'] | map('extract', hostvars, ['container_networks', 'lbaas_address', 'address']) | list }}"
+ loop_control:
+ index_var: _octavia_health_manager_ip
+
+- name: Slurp configuration file for Octavia
+ run_once: true
+ delegate_to: "{{ groups['octavia_all'][0] }}"
+ ansible.builtin.slurp:
+ src: /etc/octavia/octavia.conf
+ register: _octavia_conf
+
+- name: Generate fact with Octavia configuration file
+ run_once: true
+ ansible.builtin.set_fact:
+ _octavia_conf: "{{ _octavia_conf['content'] | b64decode | vexxhost.atmosphere.from_ini }}"
+
+- name: Create secrets for server CA, client CA and client certificates
+ run_once: true
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ - apiVersion: v1
+ kind: Secret
+ metadata:
+ name: octavia-server-ca
+ namespace: "{{ octavia_helm_release_namespace }}"
+ annotations:
+ cert-manager.io/alt-names: ""
+ cert-manager.io/certificate-name: octavia-server-ca
+ cert-manager.io/common-name: octavia-server
+ cert-manager.io/ip-sans: ""
+ cert-manager.io/issuer-group: cert-manager.io
+ cert-manager.io/issuer-kind: ClusterIssuer
+ cert-manager.io/issuer-name: self-signed
+ cert-manager.io/uri-sans: ""
+ type: kuberenetes.io/tls
+ stringData:
+ ca.crt: "{{ lookup('file', _octavia_cert_dir ~ '/ca_server_01.pem') }}"
+ tls.crt: "{{ lookup('file', _octavia_cert_dir ~ '/ca_server_01.pem') }}"
+ tls.key: "{{ lookup('pipe', 'openssl rsa -in ' ~ _octavia_cert_dir ~ '/private/cakey.pem -passin pass:' ~ _octavia_cert_passphrase) }}"
+
+ - apiVersion: v1
+ kind: Secret
+ metadata:
+ name: octavia-client-ca
+ namespace: "{{ octavia_helm_release_namespace }}"
+ annotations:
+ cert-manager.io/alt-names: ""
+ cert-manager.io/certificate-name: octavia-client-ca
+ cert-manager.io/common-name: octavia-client
+ cert-manager.io/ip-sans: ""
+ cert-manager.io/issuer-group: cert-manager.io
+ cert-manager.io/issuer-kind: ClusterIssuer
+ cert-manager.io/issuer-name: self-signed
+ cert-manager.io/uri-sans: ""
+ type: kuberenetes.io/tls
+ stringData:
+ ca.crt: "{{ lookup('file', _octavia_cert_dir ~ '/ca_01.pem') }}"
+ tls.crt: "{{ lookup('file', _octavia_cert_dir ~ '/ca_01.pem') }}"
+ tls.key: "{{ lookup('pipe', 'openssl rsa -in ' ~ _octavia_cert_dir ~ '/ca_01.key -passin pass:' ~ _octavia_cert_passphrase) }}"
+
+ - apiVersion: v1
+ kind: Secret
+ metadata:
+ name: octavia-client-certs
+ namespace: "{{ octavia_helm_release_namespace }}"
+ annotations:
+ cert-manager.io/alt-names: ""
+ cert-manager.io/certificate-name: octavia-client-certs
+ cert-manager.io/common-name: octavia-client
+ cert-manager.io/ip-sans: ""
+ cert-manager.io/issuer-group: cert-manager.io
+ cert-manager.io/issuer-kind: Issuer
+ cert-manager.io/issuer-name: octavia-client
+ cert-manager.io/uri-sans: ""
+ type: kuberenetes.io/tls
+ stringData:
+ ca.crt: "{{ lookup('file', _octavia_cert_dir ~ '/ca_01.pem') }}"
+ tls-combined.pem: "{{ lookup('file', _octavia_cert_dir ~ '/client.pem') }}"
+ tls.crt: "{{ lookup('file', _octavia_cert_dir ~ '/client-.pem') }}"
+ tls.key: "{{ lookup('file', _octavia_cert_dir ~ '/client.key') }}"
+ vars:
+ _octavia_cert_dir: "{{ lookup('env', 'HOME') }}/openstack-ansible/octavia"
+ _octavia_cert_passphrase: "{{ _octavia_conf.certificates.ca_private_key_passphrase }}"
+
+- name: Generate resources
+ ansible.builtin.import_tasks:
+ file: generate_resources.yml
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: octavia_all
+ osa_config_diff_chart_ref: "{{ octavia_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ octavia_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _octavia_helm_values | combine(octavia_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ octavia.conf: /etc/octavia/octavia.conf
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ octavia_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: octavia_all
+ migrate_db_from_osa_databases:
+ octavia: octavia
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: octavia_all
+ migrate_haproxy_from_osa_service_namespace: "{{ octavia_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: octavia-api
+ migrate_haproxy_from_osa_haproxy_backend: octavia
diff --git a/roles/osa_config_diff/README.md b/roles/osa_config_diff/README.md
new file mode 100644
index 0000000..2f1354e
--- /dev/null
+++ b/roles/osa_config_diff/README.md
@@ -0,0 +1,4 @@
+# `osa_config_diff`
+
+This role is designed to compare the configuration of an OpenStack Ansible
+deployment to the configuration of an Atmosphere deployment.
diff --git a/roles/osa_config_diff/defaults/main.yml b/roles/osa_config_diff/defaults/main.yml
new file mode 100644
index 0000000..88ace8b
--- /dev/null
+++ b/roles/osa_config_diff/defaults/main.yml
@@ -0,0 +1,31 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Inventory group for the containers
+# osa_config_diff_containers_group:
+
+# Path to the OpenStack Ansible configuration file
+# osa_config_diff_osa_config_file:
+
+# ConfigMap key for the Atmosphere configuration file
+# osa_config_diff_config_file:
+
+# Path to the Helm chart
+# osa_config_diff_chart_ref:
+
+# Namespace for the Helm chart
+# osa_config_diff_release_namespace:
+
+# Release values for the Helm chart
+# osa_config_diff_release_values:
diff --git a/roles/osa_config_diff/meta/main.yml b/roles/osa_config_diff/meta/main.yml
new file mode 100644
index 0000000..03acaa7
--- /dev/null
+++ b/roles/osa_config_diff/meta/main.yml
@@ -0,0 +1,24 @@
+# Copyright (c) 2022 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+galaxy_info:
+ author: VEXXHOST, Inc.
+ description: Ansible role for OSA config diff
+ license: Apache-2.0
+ min_ansible_version: 5.5.0
+ standalone: false
+ platforms:
+ - name: Ubuntu
+ versions:
+ - focal
diff --git a/roles/osa_config_diff/tasks/main.yml b/roles/osa_config_diff/tasks/main.yml
new file mode 100644
index 0000000..872f017
--- /dev/null
+++ b/roles/osa_config_diff/tasks/main.yml
@@ -0,0 +1,86 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Reset the value of facts used for generating the configuration
+ run_once: true
+ ansible.builtin.set_fact:
+ _osa_config_diff_osa: {}
+ _osa_config_diff_atmosphere: {}
+
+- name: Slurp the configuration files for the service
+ run_once: true
+ delegate_to: "{{ hostvars[container]['physical_host'] }}"
+ ansible.builtin.slurp:
+ path: "{{ prefix }}{{ item.value }}"
+ register: _osa_config_diff_file
+ loop: "{{ osa_config_diff_config_files | dict2items }}"
+ vars:
+ container: "{{ groups[osa_config_diff_containers_group][0] }}"
+ prefix: "{% if hostvars[container].get('is_metal') == False %}/var/lib/lxc/{{ container }}/rootfs{% endif %}"
+
+- name: Generate dictionary with all OpenStack Ansible configuration files (INI)
+ run_once: true
+ when: item.item.key.endswith('.conf')
+ ansible.builtin.set_fact:
+ _osa_config_diff_osa: "{{ _osa_config_diff_osa | combine({ item.item.key: item.content | b64decode | vexxhost.atmosphere.from_ini }) }}"
+ loop: "{{ _osa_config_diff_file.results }}"
+ loop_control:
+ label: "{{ item.item.key }}"
+
+- name: Generate dictionary with all OpenStack Ansible configuration files (YAML)
+ run_once: true
+ when: item.item.key.endswith('.yaml')
+ ansible.builtin.set_fact:
+ _osa_config_diff_osa: "{{ _osa_config_diff_osa | combine({ item.item.key: item.content | b64decode | from_yaml }) }}"
+ loop: "{{ _osa_config_diff_file.results }}"
+ loop_control:
+ label: "{{ item.item.key }}"
+
+- name: Generate configuration using Atmosphere
+ run_once: true
+ changed_when: false
+ kubernetes.core.helm_template:
+ chart_ref: "{{ osa_config_diff_chart_ref }}"
+ release_namespace: "{{ osa_config_diff_release_namespace }}"
+ release_values: "{{ osa_config_diff_release_values }}"
+ show_only:
+ - templates/configmap-etc.yaml
+ register: _osa_config_diff_helm
+
+- name: Generate dictionary with all Atmosphere configuration files (INI)
+ run_once: true
+ when: item.key.endswith('.conf')
+ ansible.builtin.set_fact:
+ _osa_config_diff_atmosphere: "{{ _osa_config_diff_atmosphere | combine({item.key: _file_contents | vexxhost.atmosphere.from_ini }) }}"
+ loop: "{{ osa_config_diff_config_files | dict2items }}"
+ vars:
+ _file_contents: "{{ (_osa_config_diff_helm.stdout | from_yaml).data[item.key] | b64decode }}"
+
+- name: Generate dictionary with all Atmosphere configuration files (YAML)
+ run_once: true
+ when: item.key.endswith('.yaml')
+ ansible.builtin.set_fact:
+ _osa_config_diff_atmosphere: "{{ _osa_config_diff_atmosphere | combine({item.key: _file_contents | from_yaml }) }}"
+ loop: "{{ osa_config_diff_config_files | dict2items }}"
+ vars:
+ _file_contents: "{{ (_osa_config_diff_helm.stdout | from_yaml).data[item.key] | b64decode }}"
+
+- name: Print difference between Atmosphere and OpenStack Ansible
+ run_once: true
+ ansible.utils.fact_diff:
+ before: "{{ _osa_config_diff_osa }}"
+ after: "{{ _osa_config_diff_atmosphere }}"
+
+- name: Pause to verify the configuration
+ ansible.builtin.pause:
diff --git a/roles/placement/tasks/migrate_from_osa.yml b/roles/placement/tasks/migrate_from_osa.yml
new file mode 100644
index 0000000..5cbb28b
--- /dev/null
+++ b/roles/placement/tasks/migrate_from_osa.yml
@@ -0,0 +1,46 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+- name: Generate configuration difference
+ ansible.builtin.include_role:
+ name: osa_config_diff
+ vars:
+ osa_config_diff_containers_group: placement_all
+ osa_config_diff_chart_ref: "{{ placement_helm_chart_ref }}"
+ osa_config_diff_release_namespace: "{{ placement_helm_release_namespace }}"
+ osa_config_diff_release_values: "{{ _placement_helm_values | combine(placement_helm_values, recursive=True) }}"
+ osa_config_diff_config_files:
+ placement.conf: /etc/placement/placement.conf
+
+- name: Migrate the database
+ ansible.builtin.include_role:
+ name: migrate_db_from_osa
+ vars:
+ migrate_db_from_osa_pxc_namespace: "{{ placement_helm_release_namespace }}"
+ migrate_db_from_osa_containers_group: placement_all
+ migrate_db_from_osa_databases:
+ placement: placement
+
+- name: Run deployment flow
+ ansible.builtin.import_tasks:
+ file: main.yml
+
+- name: Migrate HAproxy
+ ansible.builtin.include_role:
+ name: migrate_haproxy_from_osa
+ vars:
+ migrate_haproxy_from_osa_group: placement_all
+ migrate_haproxy_from_osa_service_namespace: "{{ placement_helm_release_namespace }}"
+ migrate_haproxy_from_osa_service_name: placement-api
+ migrate_haproxy_from_osa_haproxy_backend: placement
diff --git a/tests/unit/plugins/filter/test_from_ini.py b/tests/unit/plugins/filter/test_from_ini.py
new file mode 100644
index 0000000..eee4a2e
--- /dev/null
+++ b/tests/unit/plugins/filter/test_from_ini.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2023 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import textwrap
+
+import pytest
+from ansible_collections.vexxhost.atmosphere.plugins.filter.from_ini import (
+ from_ini,
+)
+
+
+@pytest.mark.parametrize(
+ "test_input,expected",
+ [
+ (
+ textwrap.dedent(
+ """
+ """
+ ),
+ {}
+ ),
+ (
+ textwrap.dedent(
+ """
+ [DEFAULT]
+ foo = bar
+
+ [oslo_messaging]
+ transport_url = rabbit://guest:guest@localhost:5672/
+ """
+ ),
+ {
+ "DEFAULT": {
+ "foo": "bar",
+ },
+ "oslo_messaging": {
+ "transport_url": "rabbit://guest:guest@localhost:5672/",
+ },
+ }
+ ),
+ ],
+)
+def test_from_ini(test_input, expected):
+ assert from_ini(test_input) == expected
+