Add Flux CD for Helm deployment

Sem-Ver: feature
Change-Id: I27b08b4be9504045727a4cc2793f7d71c190a1c1
diff --git a/.ansible-lint b/.ansible-lint
index 9b58a7d..15fb549 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -1,3 +1,6 @@
 ---
 exclude_paths:
-  - roles/kube_prometheus_stack/files/
\ No newline at end of file
+  - roles/kube_prometheus_stack/files/
+
+warn_list:
+  - yaml[line-length]
diff --git a/playbooks/openstack.yml b/playbooks/openstack.yml
index 9d8442b..4736047 100644
--- a/playbooks/openstack.yml
+++ b/playbooks/openstack.yml
@@ -20,6 +20,18 @@
       tags:
         - cilium
 
+- hosts: controllers
+  gather_facts: false
+  become: true
+  roles:
+    - role: flux
+      tags:
+        - flux
+
+- hosts: controllers[0]
+  gather_facts: false
+  become: true
+  roles:
     - role: csi
       tags:
         - csi
diff --git a/releasenotes/notes/switch-to-fluxcd-10de5b94a893b285.yaml b/releasenotes/notes/switch-to-fluxcd-10de5b94a893b285.yaml
new file mode 100644
index 0000000..667b232
--- /dev/null
+++ b/releasenotes/notes/switch-to-fluxcd-10de5b94a893b285.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - FluxCD is now used to deploy the Helm charts which will result in speedier
+    deployments and eventually dropping the client-side Helm CLI.
diff --git a/roles/ceph_csi_rbd/meta/main.yml b/roles/ceph_csi_rbd/meta/main.yml
index 38ad5f0..9ad753d 100644
--- a/roles/ceph_csi_rbd/meta/main.yml
+++ b/roles/ceph_csi_rbd/meta/main.yml
@@ -21,10 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: ceph
-      helm_repository_repo_url: https://ceph.github.io/csi-charts
-  - cilium
diff --git a/roles/ceph_csi_rbd/tasks/main.yml b/roles/ceph_csi_rbd/tasks/main.yml
index b8839ee..5cdcbd4 100644
--- a/roles/ceph_csi_rbd/tasks/main.yml
+++ b/roles/ceph_csi_rbd/tasks/main.yml
@@ -54,32 +54,52 @@
     _ceph_rbd_csi_ceph_keyring: "{{ _ceph_key.stdout | from_json | first }}"
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: ceph-csi-rbd
-    chart_ref: ceph/ceph-csi-rbd
-    chart_version: 3.5.1
-    release_namespace: kube-system
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      csiConfig:
-        - clusterID: "{{ ceph_csi_rbd_ceph_fsid }}"
-          monitors: "{{ ceph_monitors }}"
-      nodeplugin:
-        httpMetrics:
-          containerPort: 8081
-      provisioner:
-        nodeSelector:
-          openstack-control-plane: enabled
-      storageClass:
-        create: true
-        name: general
-        annotations:
-          storageclass.kubernetes.io/is-default-class: "true"
-        clusterID: "{{ ceph_csi_rbd_ceph_fsid }}"
-        pool: "{{ ceph_csi_rbd_pool }}"
-        mountOptions:
-          - discard
-      secret:
-        create: true
-        userID: "{{ ceph_csi_rbd_id }}"
-        userKey: "{{ _ceph_rbd_csi_ceph_keyring.key }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: ceph
+          namespace: kube-system
+        spec:
+          interval: 60s
+          url: https://ceph.github.io/csi-charts
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: ceph-csi-rbd
+          namespace: kube-system
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: ceph-csi-rbd
+              version: 3.5.1
+              sourceRef:
+                kind: HelmRepository
+                name: ceph
+          values:
+            csiConfig:
+              - clusterID: "{{ ceph_csi_rbd_ceph_fsid }}"
+                monitors: "{{ ceph_monitors }}"
+            nodeplugin:
+              httpMetrics:
+                containerPort: 8081
+            provisioner:
+              nodeSelector:
+                openstack-control-plane: enabled
+            storageClass:
+              create: true
+              name: general
+              annotations:
+                storageclass.kubernetes.io/is-default-class: "true"
+              clusterID: "{{ ceph_csi_rbd_ceph_fsid }}"
+              pool: "{{ ceph_csi_rbd_pool }}"
+              mountOptions:
+                - discard
+            secret:
+              create: true
+              userID: "{{ ceph_csi_rbd_id }}"
+              userKey: "{{ _ceph_rbd_csi_ceph_keyring.key }}"
diff --git a/roles/cert_manager/meta/main.yml b/roles/cert_manager/meta/main.yml
index 23ab375..a8f7def 100644
--- a/roles/cert_manager/meta/main.yml
+++ b/roles/cert_manager/meta/main.yml
@@ -21,12 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: jetstack
-      helm_repository_repo_url: https://charts.jetstack.io
-  - cilium
-  - ingress_nginx
-  - openstack_namespace
diff --git a/roles/cert_manager/tasks/main.yml b/roles/cert_manager/tasks/main.yml
index 88cbd8e..c2ff92d 100644
--- a/roles/cert_manager/tasks/main.yml
+++ b/roles/cert_manager/tasks/main.yml
@@ -12,37 +12,69 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-- name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: cert-manager
-    chart_ref: jetstack/cert-manager
-    chart_version: v1.7.1
-    release_namespace: cert-manager
-    create_namespace: true
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      installCRDs: true
-      volumes:
-        - name: etc-ssl-certs
-          hostPath:
-            path: /etc/ssl/certs
-      volumeMounts:
-        - name: etc-ssl-certs
-          mountPath: /etc/ssl/certs
-          readOnly: true
-      nodeSelector:
-        openstack-control-plane: enabled
-      webhook:
-        nodeSelector:
-          openstack-control-plane: enabled
-      cainjector:
-        nodeSelector:
-          openstack-control-plane: enabled
-      startupapicheck:
-        nodeSelector:
-          openstack-control-plane: enabled
+- name: Create namespace
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      apiVersion: v1
+      kind: Namespace
+      metadata:
+        name: cert-manager
 
-- name: Create issuer
+- name: Deploy Helm chart
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: jetstack
+          namespace: cert-manager
+        spec:
+          interval: 60s
+          url: https://charts.jetstack.io
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: cert-manager
+          namespace: cert-manager
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: cert-manager
+              version: v1.7.1
+              sourceRef:
+                kind: HelmRepository
+                name: jetstack
+          install:
+            crds: CreateReplace
+          upgrade:
+            crds: CreateReplace
+          values:
+            installCRDs: true
+            volumes:
+              - name: etc-ssl-certs
+                hostPath:
+                  path: /etc/ssl/certs
+            volumeMounts:
+              - name: etc-ssl-certs
+                mountPath: /etc/ssl/certs
+                readOnly: true
+            nodeSelector:
+              openstack-control-plane: enabled
+            webhook:
+              nodeSelector:
+                openstack-control-plane: enabled
+            cainjector:
+              nodeSelector:
+                openstack-control-plane: enabled
+            startupapicheck:
+              nodeSelector:
+                openstack-control-plane: enabled
+
+- name: Create Issuer
   kubernetes.core.k8s:
     state: present
     definition:
@@ -52,6 +84,13 @@
         name: openstack
         namespace: openstack
       spec: "{{ cert_manager_issuer }}"
+  # NOTE(mnaser): Since we haven't moved to the operator pattern yet, we need to
+  #               keep retrying a few times as the CRDs might not be installed
+  #               yet.
+  retries: 60
+  delay: 5
+  register: _result
+  until: _result is not failed
 
 - name: Bootstrap self-signed PKI
   block:
diff --git a/roles/cilium/meta/main.yml b/roles/cilium/meta/main.yml
index 4a45f6f..0db2ae7 100644
--- a/roles/cilium/meta/main.yml
+++ b/roles/cilium/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: cilium
-      helm_repository_repo_url: https://helm.cilium.io/
diff --git a/roles/cilium/tasks/main.yml b/roles/cilium/tasks/main.yml
index dadbf7e..170d1b4 100644
--- a/roles/cilium/tasks/main.yml
+++ b/roles/cilium/tasks/main.yml
@@ -12,6 +12,11 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+- name: Configure Helm repository
+  kubernetes.core.helm_repository:
+    name: cilium
+    repo_url: https://helm.cilium.io/
+
 - name: Deploy Helm chart
   kubernetes.core.helm:
     name: cilium
diff --git a/roles/flux/tasks/main.yml b/roles/flux/tasks/main.yml
new file mode 100644
index 0000000..bb89eff
--- /dev/null
+++ b/roles/flux/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: Install Flux CLI
+  ansible.builtin.unarchive:
+    src: https://github.com/fluxcd/flux2/releases/download/v0.32.0/flux_0.32.0_linux_amd64.tar.gz
+    dest: /usr/local/bin
+    remote_src: true
+
+- name: Install Flux to cluster
+  run_once: true
+  changed_when: false
+  ansible.builtin.command: flux install
diff --git a/roles/helm_repository/meta/main.yml b/roles/helm_repository/meta/main.yml
deleted file mode 100644
index b488e3c..0000000
--- a/roles/helm_repository/meta/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (c) 2022 VEXXHOST, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-galaxy_info:
-  author: VEXXHOST, Inc.
-  description: Ansible role for managing Helm repositories
-  license: Apache-2.0
-  min_ansible_version: 5.5.0
-  platforms:
-    - name: Ubuntu
-      versions:
-        - focal
-
-dependencies:
-  - helm
diff --git a/roles/helm_repository/tasks/main.yml b/roles/helm_repository/tasks/main.yml
deleted file mode 100644
index a7dd7e2..0000000
--- a/roles/helm_repository/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2022 VEXXHOST, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-- name: Configure Helm repository ({{ helm_repository_name }})
-  kubernetes.core.helm_repository:
-    name: "{{ helm_repository_name }}"
-    repo_url: "{{ helm_repository_repo_url }}"
diff --git a/roles/ingress_nginx/meta/main.yml b/roles/ingress_nginx/meta/main.yml
index 54da99c..39fac40 100644
--- a/roles/ingress_nginx/meta/main.yml
+++ b/roles/ingress_nginx/meta/main.yml
@@ -21,10 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: ingress-nginx
-      helm_repository_repo_url: https://kubernetes.github.io/ingress-nginx
-  - openstack_namespace
diff --git a/roles/ingress_nginx/tasks/main.yml b/roles/ingress_nginx/tasks/main.yml
index 0c886ad..73b1b7b 100644
--- a/roles/ingress_nginx/tasks/main.yml
+++ b/roles/ingress_nginx/tasks/main.yml
@@ -13,29 +13,49 @@
 # under the License.
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: ingress-nginx
-    chart_ref: ingress-nginx/ingress-nginx
-    chart_version: 4.0.17
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      controller:
-        config:
-          proxy-buffer-size: 16k
-        dnsPolicy: ClusterFirstWithHostNet
-        hostNetwork: true
-        ingressClassResource:
-          name: openstack
-        ingressClass: openstack
-        extraArgs:
-          default-ssl-certificate: ingress-nginx/wildcard
-        kind: DaemonSet
-        nodeSelector:
-          openstack-control-plane: enabled
-        service:
-          type: ClusterIP
-        admissionWebhooks:
-          port: 7443
-      defaultBackend:
-        enabled: true
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: ingress-nginx
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://kubernetes.github.io/ingress-nginx
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: ingress-nginx
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: ingress-nginx
+              version: 4.0.17
+              sourceRef:
+                kind: HelmRepository
+                name: ingress-nginx
+          values:
+            controller:
+              config:
+                proxy-buffer-size: 16k
+              dnsPolicy: ClusterFirstWithHostNet
+              hostNetwork: true
+              ingressClassResource:
+                name: openstack
+              ingressClass: openstack
+              extraArgs:
+                default-ssl-certificate: ingress-nginx/wildcard
+              kind: DaemonSet
+              nodeSelector:
+                openstack-control-plane: enabled
+              service:
+                type: ClusterIP
+              admissionWebhooks:
+                port: 7443
+            defaultBackend:
+              enabled: true
diff --git a/roles/ipmi_exporter/tasks/main.yml b/roles/ipmi_exporter/tasks/main.yml
index 58056ef..8c2ca9f 100644
--- a/roles/ipmi_exporter/tasks/main.yml
+++ b/roles/ipmi_exporter/tasks/main.yml
@@ -12,167 +12,162 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-- name: Create ConfigMap
+- name: Deploy service
   kubernetes.core.k8s:
     state: present
     definition:
-      apiVersion: v1
-      kind: ConfigMap
-      metadata:
-        name: ipmi-exporter
-        namespace: monitoring
-        labels:
-          application: ipmi-exporter
-      data:
-        config.yml: "{{ ipmi_exporter_config | to_yaml }}"
-
-- name: Create DaemonSet
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: apps/v1
-      kind: DaemonSet
-      metadata:
-        name: ipmi-exporter
-        namespace: monitoring
-        labels:
-          application: ipmi-exporter
-      spec:
-        selector:
-          matchLabels:
+      - apiVersion: v1
+        kind: ConfigMap
+        metadata:
+          name: ipmi-exporter
+          namespace: monitoring
+          labels:
             application: ipmi-exporter
-        template:
-          metadata:
-            annotations:
-              config-hash: "{{ ipmi_exporter_config | to_yaml | hash('md5') }}"
-            labels:
+        data:
+          config.yml: "{{ ipmi_exporter_config | to_yaml }}"
+
+      - apiVersion: apps/v1
+        kind: DaemonSet
+        metadata:
+          name: ipmi-exporter
+          namespace: monitoring
+          labels:
+            application: ipmi-exporter
+        spec:
+          selector:
+            matchLabels:
               application: ipmi-exporter
-              job: ipmi
-          spec:
-            containers:
-              - name: exporter
-                image: "{{ ipmi_exporter_image_repository }}/ipmi-exporter:{{ ipmi_exporter_image_tag }}"
-                ports:
-                  - name: metrics
-                    containerPort: 9290
-                securityContext:
-                  privileged: true
-                volumeMounts:
-                  - name: dev-ipmi0
-                    mountPath: /dev/ipmi0
-                  - name: ipmi-exporter
-                    mountPath: /config.yml
-                    subPath: config.yml
-            volumes:
-              - name: dev-ipmi0
-                hostPath:
-                  path: /dev/ipmi0
-              - name: ipmi-exporter
-                configMap:
-                  name: ipmi-exporter
-            affinity:
-              nodeAffinity:
-                requiredDuringSchedulingIgnoredDuringExecution:
-                  nodeSelectorTerms:
-                    - matchExpressions:
-                        - key: feature.node.kubernetes.io/cpu-cpuid.HYPERVISOR
-                          operator: NotIn
-                          values: ["true"]
+          template:
+            metadata:
+              annotations:
+                config-hash: "{{ ipmi_exporter_config | to_yaml | hash('md5') }}"
+              labels:
+                application: ipmi-exporter
+                job: ipmi
+            spec:
+              containers:
+                - name: exporter
+                  image: "{{ ipmi_exporter_image_repository }}/ipmi-exporter:{{ ipmi_exporter_image_tag }}"
+                  ports:
+                    - name: metrics
+                      containerPort: 9290
+                  securityContext:
+                    privileged: true
+                  volumeMounts:
+                    - name: dev-ipmi0
+                      mountPath: /dev/ipmi0
+                    - name: ipmi-exporter
+                      mountPath: /config.yml
+                      subPath: config.yml
+              volumes:
+                - name: dev-ipmi0
+                  hostPath:
+                    path: /dev/ipmi0
+                - name: ipmi-exporter
+                  configMap:
+                    name: ipmi-exporter
+              affinity:
+                nodeAffinity:
+                  requiredDuringSchedulingIgnoredDuringExecution:
+                    nodeSelectorTerms:
+                      - matchExpressions:
+                          - key: feature.node.kubernetes.io/cpu-cpuid.HYPERVISOR
+                            operator: NotIn
+                            values: ["true"]
 
-- name: Create PodMonitor
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PodMonitor
-      metadata:
-        name: ipmi-exporter
-        namespace: monitoring
-        labels:
-          application: ipmi-exporter
-          release: kube-prometheus-stack
-      spec:
-        jobLabel: job
-        podMetricsEndpoints:
-          - port: metrics
-            path: /metrics
-            interval: 60s
-            relabelings:
-              - sourceLabels: ["__meta_kubernetes_pod_node_name"]
-                targetLabel: instance
-              - action: labeldrop
-                regex: ^(container|endpoint|namespace|pod)$
-        selector:
-          matchLabels:
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PodMonitor
+        metadata:
+          name: ipmi-exporter
+          namespace: monitoring
+          labels:
             application: ipmi-exporter
+            release: kube-prometheus-stack
+        spec:
+          jobLabel: job
+          podMetricsEndpoints:
+            - port: metrics
+              path: /metrics
+              interval: 60s
+              relabelings:
+                - sourceLabels: ["__meta_kubernetes_pod_node_name"]
+                  targetLabel: instance
+                - action: labeldrop
+                  regex: ^(container|endpoint|namespace|pod)$
+          selector:
+            matchLabels:
+              application: ipmi-exporter
 
-- name: Create PrometheusRule
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PrometheusRule
-      metadata:
-        name: ipmi-exporter
-        namespace: monitoring
-        labels:
-          application: ipmi-exporter
-          release: kube-prometheus-stack
-      spec:
-        groups:
-          - name: rules
-            rules:
-              - alert: IpmiCollectorDown
-                expr: ipmi_up == 0
-          - name: collectors-state-warning
-            rules:
-              - alert: IpmiCurrent
-                expr: ipmi_current_state == 1
-                labels:
-                  severity: warning
-              - alert: IpmiFanSpeed
-                expr: ipmi_fan_speed_state == 1
-                labels:
-                  severity: warning
-              - alert: IpmiPower
-                expr: ipmi_power_state == 1
-                labels:
-                  severity: warning
-              - alert: IpmiSensor
-                expr: ipmi_sensor_state == 1
-                labels:
-                  severity: warning
-              - alert: IpmiTemperature
-                expr: ipmi_temperature_state == 1
-                labels:
-                  severity: warning
-              - alert: IpmiVoltage
-                expr: ipmi_voltage_state == 1
-                labels:
-                  severity: warning
-          - name: collectors-state-critical
-            rules:
-              - alert: IpmiCurrent
-                expr: ipmi_current_state == 2
-                labels:
-                  severity: critical
-              - alert: IpmiFanSpeed
-                expr: ipmi_fan_speed_state == 2
-                labels:
-                  severity: critical
-              - alert: IpmiPower
-                expr: ipmi_power_state == 2
-                labels:
-                  severity: critical
-              - alert: IpmiSensor
-                expr: ipmi_sensor_state == 2
-                labels:
-                  severity: critical
-              - alert: IpmiTemperature
-                expr: ipmi_temperature_state == 2
-                labels:
-                  severity: critical
-              - alert: IpmiVoltage
-                expr: ipmi_voltage_state == 2
-                labels:
-                  severity: critical
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PrometheusRule
+        metadata:
+          name: ipmi-exporter
+          namespace: monitoring
+          labels:
+            application: ipmi-exporter
+            release: kube-prometheus-stack
+        spec:
+          groups:
+            - name: rules
+              rules:
+                - alert: IpmiCollectorDown
+                  expr: ipmi_up == 0
+            - name: collectors-state-warning
+              rules:
+                - alert: IpmiCurrent
+                  expr: ipmi_current_state == 1
+                  labels:
+                    severity: warning
+                - alert: IpmiFanSpeed
+                  expr: ipmi_fan_speed_state == 1
+                  labels:
+                    severity: warning
+                - alert: IpmiPower
+                  expr: ipmi_power_state == 1
+                  labels:
+                    severity: warning
+                - alert: IpmiSensor
+                  expr: ipmi_sensor_state == 1
+                  labels:
+                    severity: warning
+                - alert: IpmiTemperature
+                  expr: ipmi_temperature_state == 1
+                  labels:
+                    severity: warning
+                - alert: IpmiVoltage
+                  expr: ipmi_voltage_state == 1
+                  labels:
+                    severity: warning
+            - name: collectors-state-critical
+              rules:
+                - alert: IpmiCurrent
+                  expr: ipmi_current_state == 2
+                  labels:
+                    severity: critical
+                - alert: IpmiFanSpeed
+                  expr: ipmi_fan_speed_state == 2
+                  labels:
+                    severity: critical
+                - alert: IpmiPower
+                  expr: ipmi_power_state == 2
+                  labels:
+                    severity: critical
+                - alert: IpmiSensor
+                  expr: ipmi_sensor_state == 2
+                  labels:
+                    severity: critical
+                - alert: IpmiTemperature
+                  expr: ipmi_temperature_state == 2
+                  labels:
+                    severity: critical
+                - alert: IpmiVoltage
+                  expr: ipmi_voltage_state == 2
+                  labels:
+                    severity: critical
+  # NOTE(mnaser): Since we haven't moved to the operator pattern yet, we need to
+  #               keep retrying a few times as the CRDs might not be installed
+  #               yet.
+  retries: 60
+  delay: 5
+  register: _result
+  until: _result is not failed
diff --git a/roles/keepalived/tasks/main.yml b/roles/keepalived/tasks/main.yml
index 8e4a583..dedddfc 100644
--- a/roles/keepalived/tasks/main.yml
+++ b/roles/keepalived/tasks/main.yml
@@ -12,195 +12,170 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-- name: Create Secret
+- name: Deploy service
   when: keepalived_enabled | bool
   kubernetes.core.k8s:
     state: present
     definition:
-      apiVersion: v1
-      kind: Secret
-      metadata:
-        name: keepalived-etc
-        namespace: openstack
-      stringData:
-        keepalived.conf: |
-          global_defs {
-            default_interface {{ keepalived_interface }}
-          }
-
-          vrrp_instance VI_1 {
-            interface {{ keepalived_interface }}
-
-            state BACKUP
-            virtual_router_id {{ keepalived_vrid }}
-            priority 150
-            nopreempt
-
-            virtual_ipaddress {
-              {{ keepalived_vip }}
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: keepalived-etc
+          namespace: openstack
+        stringData:
+          keepalived.conf: |
+            global_defs {
+              default_interface {{ keepalived_interface }}
             }
 
-            authentication {
-              auth_type PASS
-              auth_pass {{ keepalived_password }}
+            vrrp_instance VI_1 {
+              interface {{ keepalived_interface }}
+
+              state BACKUP
+              virtual_router_id {{ keepalived_vrid }}
+              priority 150
+              nopreempt
+
+              virtual_ipaddress {
+                {{ keepalived_vip }}
+              }
+
+              authentication {
+                auth_type PASS
+                auth_pass {{ keepalived_password }}
+              }
             }
-          }
 
-- name: Create ConfigMap
-  when: keepalived_enabled | bool
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: v1
-      kind: ConfigMap
-      metadata:
-        name: keepalived-bin
-        namespace: openstack
-      data:
-        wait-for-ip.sh: |
-          #!/bin/sh -x
+      - apiVersion: v1
+        kind: ConfigMap
+        metadata:
+          name: keepalived-bin
+          namespace: openstack
+        data:
+          wait-for-ip.sh: |
+            #!/bin/sh -x
 
-          while true; do
-              ip -4 addr list dev {{ keepalived_interface }} | grep {{ keepalived_interface }}
+            while true; do
+                ip -4 addr list dev {{ keepalived_interface }} | grep {{ keepalived_interface }}
 
-              # We detected an IP address
-              if [ $? -eq 0 ]; then
-                  break
-              fi
+                # We detected an IP address
+                if [ $? -eq 0 ]; then
+                    break
+                fi
 
-              sleep 1
-          done
+                sleep 1
+            done
 
-- name: Create Role
-  when: keepalived_enabled | bool
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: rbac.authorization.k8s.io/v1
-      kind: Role
-      metadata:
-        name: keepalived
-        namespace: openstack
-      rules:
-        - apiGroups:
-            - ""
-          resources:
-            - pods
-          verbs:
-            - list
-            - get
-
-- name: Create ServiceAccount
-  when: keepalived_enabled | bool
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: v1
-      automountServiceAccountToken: true
-      kind: ServiceAccount
-      metadata:
-        name: keepalived
-        namespace: openstack
-
-- name: Create ServiceAccount
-  when: keepalived_enabled | bool
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: rbac.authorization.k8s.io/v1
-      kind: RoleBinding
-      metadata:
-        name: keepalived
-        namespace: openstack
-      roleRef:
-        apiGroup: rbac.authorization.k8s.io
+      - apiVersion: rbac.authorization.k8s.io/v1
         kind: Role
-        name: keepalived
-      subjects:
-        - kind: ServiceAccount
+        metadata:
+          name: keepalived
+          namespace: openstack
+        rules:
+          - apiGroups:
+              - ""
+            resources:
+              - pods
+            verbs:
+              - list
+              - get
+
+      - apiVersion: v1
+        automountServiceAccountToken: true
+        kind: ServiceAccount
+        metadata:
           name: keepalived
           namespace: openstack
 
-- name: Create DaemonSet
-  when: keepalived_enabled | bool
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: apps/v1
-      kind: DaemonSet
-      metadata:
-        name: keepalived
-        namespace: openstack
-      spec:
-        selector:
-          matchLabels:
-            application: keepalived
-        template:
-          metadata:
-            labels:
+      - apiVersion: rbac.authorization.k8s.io/v1
+        kind: RoleBinding
+        metadata:
+          name: keepalived
+          namespace: openstack
+        roleRef:
+          apiGroup: rbac.authorization.k8s.io
+          kind: Role
+          name: keepalived
+        subjects:
+          - kind: ServiceAccount
+            name: keepalived
+            namespace: openstack
+
+      - apiVersion: apps/v1
+        kind: DaemonSet
+        metadata:
+          name: keepalived
+          namespace: openstack
+        spec:
+          selector:
+            matchLabels:
               application: keepalived
-          spec:
-            automountServiceAccountToken: true
-            initContainers:
-              - name: init
-                image: "{{ keepalived_image_repository }}/kubernetes-entrypoint:latest"
-                env:
-                  - name: NAMESPACE
-                    valueFrom:
-                      fieldRef:
-                        apiVersion: v1
-                        fieldPath: metadata.namespace
-                  - name: POD_NAME
-                    valueFrom:
-                      fieldRef:
-                        apiVersion: v1
-                        fieldPath: metadata.name
-                  - name: DEPENDENCY_POD_JSON
-                    value: '[{"labels":{"application":"neutron","component":"neutron-ovs-agent"},"requireSameNode":true}]'
-              - name: wait-for-ip
-                image: "{{ keepalived_image_repository }}/keepalived:{{ keepalived_image_tag }}"
-                command:
-                  - /bin/wait-for-ip.sh
-                volumeMounts:
-                  - mountPath: /bin/wait-for-ip.sh
-                    mountPropagation: None
+          template:
+            metadata:
+              labels:
+                application: keepalived
+            spec:
+              automountServiceAccountToken: true
+              initContainers:
+                - name: init
+                  image: "{{ keepalived_image_repository }}/kubernetes-entrypoint:latest"
+                  env:
+                    - name: NAMESPACE
+                      valueFrom:
+                        fieldRef:
+                          apiVersion: v1
+                          fieldPath: metadata.namespace
+                    - name: POD_NAME
+                      valueFrom:
+                        fieldRef:
+                          apiVersion: v1
+                          fieldPath: metadata.name
+                    - name: DEPENDENCY_POD_JSON
+                      value: '[{"labels":{"application":"neutron","component":"neutron-ovs-agent"},"requireSameNode":true}]'
+                - name: wait-for-ip
+                  image: "{{ keepalived_image_repository }}/keepalived:{{ keepalived_image_tag }}"
+                  command:
+                    - /bin/wait-for-ip.sh
+                  volumeMounts:
+                    - mountPath: /bin/wait-for-ip.sh
+                      mountPropagation: None
+                      name: keepalived-bin
+                      readOnly: true
+                      subPath: wait-for-ip.sh
+              containers:
+                - name: keepalived
+                  image: "{{ keepalived_image_repository }}/keepalived:{{ keepalived_image_tag }}"
+                  command:
+                    - keepalived
+                    - -f
+                    - /etc/keepalived/keepalived.conf
+                    - --dont-fork
+                    - --log-console
+                    - --log-detail
+                    - --dump-conf
+                  securityContext:
+                    allowPrivilegeEscalation: true
+                    capabilities:
+                      add:
+                        - NET_ADMIN
+                        - NET_BROADCAST
+                        - NET_RAW
+                  volumeMounts:
+                    - mountPath: /etc/keepalived
+                      mountPropagation: None
+                      name: keepalived-etc
+                      readOnly: true
+              hostNetwork: true
+              nodeSelector:
+                openstack-control-plane: enabled
+              serviceAccountName: keepalived
+              volumes:
+                - name: keepalived-etc
+                  secret:
+                    optional: false
+                    secretName: keepalived-etc
+                - configMap:
+                    defaultMode: 0755
                     name: keepalived-bin
-                    readOnly: true
-                    subPath: wait-for-ip.sh
-            containers:
-              - name: keepalived
-                image: "{{ keepalived_image_repository }}/keepalived:{{ keepalived_image_tag }}"
-                command:
-                  - keepalived
-                  - -f
-                  - /etc/keepalived/keepalived.conf
-                  - --dont-fork
-                  - --log-console
-                  - --log-detail
-                  - --dump-conf
-                securityContext:
-                  allowPrivilegeEscalation: true
-                  capabilities:
-                    add:
-                      - NET_ADMIN
-                      - NET_BROADCAST
-                      - NET_RAW
-                volumeMounts:
-                  - mountPath: /etc/keepalived
-                    mountPropagation: None
-                    name: keepalived-etc
-                    readOnly: true
-            hostNetwork: true
-            nodeSelector:
-              openstack-control-plane: enabled
-            serviceAccountName: keepalived
-            volumes:
-              - name: keepalived-etc
-                secret:
-                  optional: false
-                  secretName: keepalived-etc
-              - configMap:
-                  defaultMode: 0755
+                    optional: false
                   name: keepalived-bin
-                  optional: false
-                name: keepalived-bin
diff --git a/roles/kube_prometheus_stack/meta/main.yml b/roles/kube_prometheus_stack/meta/main.yml
index a2141a2..560fc41 100644
--- a/roles/kube_prometheus_stack/meta/main.yml
+++ b/roles/kube_prometheus_stack/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: prometheus-community
-      helm_repository_repo_url: https://prometheus-community.github.io/helm-charts
diff --git a/roles/kube_prometheus_stack/tasks/main.yml b/roles/kube_prometheus_stack/tasks/main.yml
index 4e866a9..da6a0f3 100644
--- a/roles/kube_prometheus_stack/tasks/main.yml
+++ b/roles/kube_prometheus_stack/tasks/main.yml
@@ -50,34 +50,54 @@
         healthcheck-client.crt: "{{ _etcd_healthcheck_client_crt.content }}"
         healthcheck-client.key: "{{ _etcd_healthcheck_client_key.content }}"
 
-- name: Create CRDs for Prometheus Operator
+- name: Deploy Helm chart
   kubernetes.core.k8s:
     state: present
-    definition: "{{ lookup('ansible.builtin.url', 'https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_' ~ item ~ '.yaml', split_lines=false) | regex_replace('- =$', '- \"=\"', multiline=True) | from_yaml_all }}" # yamllint disable-line rule:line-length
-    apply: true
-    server_side_apply:
-      field_manager: Atmosphere
-      force_conflicts: true
-  loop:
-    - alertmanagerconfigs
-    - alertmanagers
-    - podmonitors
-    - probes
-    - prometheuses
-    - prometheusrules
-    - servicemonitors
-    - thanosrulers
-  # NOTE(mnaser): We replace `- =` with `- "="` to avoid a YAML error, this also
-  #               breaks idempotency so we flip to `changed_when: false`.
-  #               See: https://github.com/yaml/pyyaml/issues/619
-  changed_when: false
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: prometheus-community
+          namespace: monitoring
+        spec:
+          interval: 60s
+          url: https://prometheus-community.github.io/helm-charts
 
-- name: Deploy Helm chart
-  kubernetes.core.helm:
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: kube-prometheus-stack
+          namespace: monitoring
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: kube-prometheus-stack
+              version: 36.2.0
+              sourceRef:
+                kind: HelmRepository
+                name: prometheus-community
+          install:
+            crds: CreateReplace
+          upgrade:
+            crds: CreateReplace
+          values: "{{ _kube_prometheus_stack_values | combine(kube_prometheus_stack_values, recursive=True) }}"
+
+# NOTE(mnaser): Since runs are so fast, the Prometheus Operator resoruces are
+#               created before the MutatingWebhookConfiguration is ready, so
+#               the resources end up being changed on the second apply.
+#
+#               The workaround for now is to wait for the operator to go up
+#               which means the MutatingWebhookConfiguration is also ready.
+- name: Wait for the Helm chart to be deployed
+  kubernetes.core.k8s_info:
+    api_version: helm.toolkit.fluxcd.io/v2beta1
+    kind: HelmRelease
     name: kube-prometheus-stack
-    chart_ref: prometheus-community/kube-prometheus-stack
-    chart_version: 36.2.0
-    release_namespace: monitoring
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _kube_prometheus_stack_values | combine(kube_prometheus_stack_values, recursive=True) }}"
+    namespace: monitoring
+    wait_sleep: 5
+    wait_timeout: 600
     wait: true
+    wait_condition:
+      type: Ready
+      status: true
diff --git a/roles/node_feature_discovery/meta/main.yml b/roles/node_feature_discovery/meta/main.yml
index e8aae1c..09f196a 100644
--- a/roles/node_feature_discovery/meta/main.yml
+++ b/roles/node_feature_discovery/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: node-feature-discovery
-      helm_repository_repo_url: https://kubernetes-sigs.github.io/node-feature-discovery/charts
diff --git a/roles/node_feature_discovery/tasks/main.yml b/roles/node_feature_discovery/tasks/main.yml
index c37e1e5..39d3c14 100644
--- a/roles/node_feature_discovery/tasks/main.yml
+++ b/roles/node_feature_discovery/tasks/main.yml
@@ -13,16 +13,36 @@
 # under the License.
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: node-feature-discovery
-    chart_ref: node-feature-discovery/node-feature-discovery
-    chart_version: 0.10.0
-    release_namespace: monitoring
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      image:
-        repository: "{{ atmosphere_image_repository | default('us-docker.pkg.dev/vexxhost-infra/openstack') }}/node-feature-discovery"
-        tag: 0.10.0
-      master:
-        nodeSelector:
-          openstack-control-plane: enabled
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: node-feature-discovery
+          namespace: monitoring
+        spec:
+          interval: 60s
+          url: https://kubernetes-sigs.github.io/node-feature-discovery/charts
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: node-feature-discovery
+          namespace: monitoring
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: node-feature-discovery
+              version: 0.10.0
+              sourceRef:
+                kind: HelmRepository
+                name: node-feature-discovery
+          values:
+            image:
+              repository: "{{ atmosphere_image_repository | default('us-docker.pkg.dev/vexxhost-infra/openstack') }}/node-feature-discovery"
+              tag: 0.10.0
+            master:
+              nodeSelector:
+                openstack-control-plane: enabled
diff --git a/roles/openstack_exporter/tasks/main.yml b/roles/openstack_exporter/tasks/main.yml
index 18cc16e..d809237 100644
--- a/roles/openstack_exporter/tasks/main.yml
+++ b/roles/openstack_exporter/tasks/main.yml
@@ -29,363 +29,354 @@
     project: service
     domain: service
 
-- name: Create Secret
+- name: Deploy service
   kubernetes.core.k8s:
     state: present
     definition:
-      apiVersion: v1
-      kind: Secret
-      metadata:
-        name: openstack-config
-        namespace: monitoring
-      type: Opaque
-      stringData:
-        clouds.yaml: |
-          clouds:
-            openstack:
-              auth:
-                auth_url: http://keystone-api.openstack.svc.cluster.local:5000
-                project_domain_name: service
-                project_name: service
-                user_domain_name: service
-                username: openstack-exporter-{{ openstack_helm_endpoints_region_name }}
-                password: {{ openstack_helm_endpoints_openstack_exporter_keystone_password }}
-              region_name: {{ openstack_helm_endpoints_region_name }}
-              interface: internal
-              identity_api_version: 3
-              identity_interface: internal
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: openstack-config
+          namespace: monitoring
+        type: Opaque
+        stringData:
+          clouds.yaml: |
+            clouds:
+              openstack:
+                auth:
+                  auth_url: http://keystone-api.openstack.svc.cluster.local:5000
+                  project_domain_name: service
+                  project_name: service
+                  user_domain_name: service
+                  username: openstack-exporter-{{ openstack_helm_endpoints_region_name }}
+                  password: {{ openstack_helm_endpoints_openstack_exporter_keystone_password }}
+                region_name: {{ openstack_helm_endpoints_region_name }}
+                interface: internal
+                identity_api_version: 3
+                identity_interface: internal
 
-- name: Create deploy
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: apps/v1
-      kind: Deployment
-      metadata:
-        name: openstack-exporter
-        namespace: monitoring
-        labels:
-          application: openstack-exporter
-      spec:
-        replicas: 1
-        selector:
-          matchLabels:
+      - apiVersion: apps/v1
+        kind: Deployment
+        metadata:
+          name: openstack-exporter
+          namespace: monitoring
+          labels:
             application: openstack-exporter
-        template:
-          metadata:
-            labels:
+        spec:
+          replicas: 1
+          selector:
+            matchLabels:
               application: openstack-exporter
-          spec:
-            nodeSelector:
-              openstack-control-plane: enabled
-            containers:
-              - name: openstack-exporter
-                image: "{{ openstack_exporter_image_repository }}/openstack-exporter-linux-amd64:{{ openstack_exporter_image_tag }}"
-                args:
-                  - --endpoint-type
-                  - internal
-                  - default
-                  - --collect-metric-time
-                  - --disable-service.identity
-                  - --disable-service.image
-                  - --disable-metric=cinder-limits_volume_max_gb
-                  - --disable-metric=cinder-limits_volume_used_gb
-                  - --disable-metric=cinder-volumes
-                  - --disable-metric=cinder-volume_status
-                  - --disable-metric=neutron-floating_ips
-                  - --disable-metric=neutron-networks
-                  - --disable-metric=neutron-security_groups
-                  - --disable-metric=neutron-subnets
-                  - --disable-metric=neutron-routers
-                  - --disable-metric=nova-flavors
-                  - --disable-metric=nova-availability_zones
-                  - --disable-metric=nova-security_groups
-                  - --disable-metric=nova-limits_vcpus_max
-                  - --disable-metric=nova-limits_vcpus_used
-                  - --disable-metric=nova-limits_memory_max
-                  - --disable-metric=nova-limits_memory_used
-                port:
-                  name: metrics
-                  containerPort: 9180
-                volumeMounts:
-                  - name: openstack-config
-                    mountPath: "/etc/openstack"
-            volumes:
-              - name: openstack-config
-                secret:
-                  secretName: openstack-config
+          template:
+            metadata:
+              labels:
+                application: openstack-exporter
+            spec:
+              nodeSelector:
+                openstack-control-plane: enabled
+              containers:
+                - name: openstack-exporter
+                  image: "{{ openstack_exporter_image_repository }}/openstack-exporter-linux-amd64:{{ openstack_exporter_image_tag }}"
+                  args:
+                    - --endpoint-type
+                    - internal
+                    - default
+                    - --collect-metric-time
+                    - --disable-service.identity
+                    - --disable-service.image
+                    - --disable-metric=cinder-limits_volume_max_gb
+                    - --disable-metric=cinder-limits_volume_used_gb
+                    - --disable-metric=cinder-volumes
+                    - --disable-metric=cinder-volume_status
+                    - --disable-metric=neutron-floating_ips
+                    - --disable-metric=neutron-networks
+                    - --disable-metric=neutron-security_groups
+                    - --disable-metric=neutron-subnets
+                    - --disable-metric=neutron-routers
+                    - --disable-metric=nova-flavors
+                    - --disable-metric=nova-availability_zones
+                    - --disable-metric=nova-security_groups
+                    - --disable-metric=nova-limits_vcpus_max
+                    - --disable-metric=nova-limits_vcpus_used
+                    - --disable-metric=nova-limits_memory_max
+                    - --disable-metric=nova-limits_memory_used
+                  port:
+                    name: metrics
+                    containerPort: 9180
+                  volumeMounts:
+                    - name: openstack-config
+                      mountPath: "/etc/openstack"
+              volumes:
+                - name: openstack-config
+                  secret:
+                    secretName: openstack-config
 
-- name: Create service
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: v1
-      kind: Service
-      metadata:
-        name: openstack-exporter
-        namespace: monitoring
-        labels:
-          application: openstack-exporter
-      spec:
-        clusterIP: None
-        ports:
-          - name: metrics
-            port: 9180
-            targetPort: metrics
-        selector:
-          application: openstack-exporter
-
-- name: Create service monitor
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: ServiceMonitor
-      metadata:
-        name: openstack-exporter
-        namespace: monitoring
-        labels:
-          application: openstack-exporter
-      spec:
-        endpoints:
-          - interval: 1m
-            scrapeTimeout: 30s
-            port: metrics
-            relabelings:
-              - action: replace
-                regex: (.*)
-                replacement: default
-                targetLabel: instance
-        jobLabel: jobLabel
-        namespaceSelector:
-          any: true
-        selector:
-          matchLabels:
+      - apiVersion: v1
+        kind: Service
+        metadata:
+          name: openstack-exporter
+          namespace: monitoring
+          labels:
+            application: openstack-exporter
+        spec:
+          clusterIP: None
+          ports:
+            - name: metrics
+              port: 9180
+              targetPort: metrics
+          selector:
             application: openstack-exporter
 
-- name: Create Prometheus Rule
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PrometheusRule
-      metadata:
-        name: openstack-exporter
-        namespace: monitoring
-        labels:
-          application: openstack-exporter
-      spec:
-        groups:
-          - name: cinder
-            rules:
-              - alert: CinderAgentDown
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running
-                    on {{ $labels.hostname }} is being reported as down.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    down{% endraw %}'
-                expr: |
-                  openstack_cinder_agent_state != 1
-                labels:
-                  severity: warning
-              - alert: CinderAgentDown
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on
-                    {{ $labels.hostname }} is being reported as down for 5 minutes.
-                    This can affect volume operations so it must be resolved as
-                    quickly as possible.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    down{% endraw %}'
-                expr: |
-                  openstack_cinder_agent_state != 1
-                for: 5m
-                labels:
-                  severity: critical
-              - alert: CinderAgentDisabled
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
-                    has been disabled for 60 minutes.  This can affect volume operations so it must be
-                    resolved as quickly as possible.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    disabled{% endraw %}'
-                expr: |
-                  openstack_cinder_agent_state{adminState!="enabled"}
-                for: 1h
-                labels:
-                  severity: warning
-              - alert: CinderVolumeInError
-                annotations:
-                  description: |
-                    '{% raw %}The volume {{ $labels.id }} has been in ERROR state for over 24 hours.
-                    It must be cleaned up or removed in order to provide a consistent customer
-                    experience.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.id }}] Volume in ERROR state{% endraw %}'
-                expr: |
-                  openstack_cinder_volume_status{status=~"error.*"}
-                for: 24h
-                labels:
-                  severity: warning
-          - name: neutron
-            rules:
-              - alert: NeutronAgentDown
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
-                    is being reported as down.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    down{% endraw %}'
-                expr: |
-                  openstack_neutron_agent_state != 1
-                labels:
-                  severity: warning
-              - alert: NeutronAgentDown
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
-                    is being reported as down for 5 minutes. This can affect network operations so it
-                    must be resolved as quickly as possible.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    down{% endraw %}'
-                expr: |
-                  openstack_neutron_agent_state != 1
-                for: 5m
-                labels:
-                  severity: critical
-              - alert: NeutronAgentDisabled
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
-                    has been disabled for 60 minutes.  This can affect network operations so it must be
-                    resolved as quickly as possible.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    disabled{% endraw %}'
-                expr: |
-                  openstack_neutron_agent_state{adminState!="up"}
-                for: 1h
-                labels:
-                  severity: warning
-              - alert: NeutronBindingFailedPorts
-                annotations:
-                  description: |
-                    '{% raw %}The NIC {{ $labels.mac_address }} of {{ $labels.device_owner }}
-                    has binding failed port now.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.device_owner }}] {{ $labels.mac_address }}
-                    binding failed{% endraw %}'
-                expr: |
-                  openstack_neutron_port{binding_vif_type="binding_failed"} != 0
-                labels:
-                  severity: warning
-              - alert: NeutronNetworkOutOfIPs
-                annotations:
-                  description: |
-                    '{% raw %}The subnet {{ $labels.subnet_name }} within {{ $labels.network_name }}
-                    is currently at {{ $value }}% utilization.  If the IP addresses run out, it will
-                    impact the provisioning of new ports.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.network_name }}] {{ $labels.subnet_name }}
-                    running out of IPs{% endraw %}'
-                expr: |
-                  sum by (network_id) (openstack_neutron_network_ip_availabilities_used{project_id!=""}) / sum by (network_id)
-                  (openstack_neutron_network_ip_availabilities_total{project_id!=""}) * 100 > 80
-                labels:
-                  severity: warning
-          - name: nova
-            rules:
-              - alert: NovaAgentDown
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
-                    is being reported as down.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    down{% endraw %}'
-                expr: |
-                  openstack_nova_agent_state != 1
-                labels:
-                  severity: warning
-              - alert: NovaAgentDown
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is
-                    being reported as down.  This can affect compute operations so it must be resolved as
-                    quickly as possible.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    down{% endraw %}'
-                expr: |
-                  openstack_nova_agent_state != 1
-                for: 5m
-                labels:
-                  severity: critical
-              - alert: NovaAgentDisabled
-                annotations:
-                  description: |
-                    '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }} has been
-                    disabled for 60 minutes.  This can affect compute operations so it must be resolved as quickly
-                    as possible.{% endraw %}'
-                  summary: |
-                    '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
-                    disabled{% endraw %}'
-                expr: |
-                  openstack_nova_agent_state{adminState!="enabled"}
-                for: 1h
-                labels:
-                  severity: warning
-              - alert: NovaInstanceInError
-                annotations:
-                  description: |
-                    '{% raw %}The instance {{ $labels.id }} has been in ERROR state for over 24 hours.
-                    It must be cleaned up or removed in order to provide a consistent customer
-                    experience.{% endraw %}'
-                  summary: '{% raw %}[{{ $labels.id }}] Instance in ERROR state{% endraw %}'
-                expr: |
-                  openstack_nova_server_status{status="ERROR"}
-                for: 24h
-                labels:
-                  severity: warning
-              - alert: NovaFailureRisk
-                annotations:
-                  description: |
-                    '{% raw %}The cloud capacity will be at {{ $value }} in the event of the failure of
-                    a single hypervisor which puts the cloud at risk of not being able to recover should
-                    any hypervisor failures occur.  Please ensure that adequate amount of infrastructure
-                    is assigned to this deployment to prevent this.{% endraw %}'
-                  summary: '{% raw %}[nova] Failure risk{% endraw %}'
-                expr: |
-                  (sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) - max(openstack_nova_memory_used_bytes))
-                  / sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) * 100 < 0.25
-                for: 6h
-                labels:
-                  severity: warning
-              - alert: NovaCapacity
-                annotations:
-                  description: |
-                    '{% raw %}The cloud capacity is currently at `{{ $value }}` which means there is a
-                    risk of running out of capacity due to the timeline required to add new nodes.
-                    Please ensure that adequate amount of infrastructure is assigned to this deployment
-                    to prevent this.{% endraw %}'
-                  summary: '{% raw %}[nova] Capacity risk{% endraw %}'
-                expr: |
-                  sum (
-                      openstack_nova_memory_used_bytes
-                    + on(hostname) group_left(adminState)
-                      (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"})
-                  ) / sum (
-                      openstack_nova_memory_available_bytes
-                    + on(hostname) group_left(adminState)
-                      (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"})
-                  ) * 100 > 75
-                for: 6h
-                labels:
-                  severity: warning
+      - apiVersion: monitoring.coreos.com/v1
+        kind: ServiceMonitor
+        metadata:
+          name: openstack-exporter
+          namespace: monitoring
+          labels:
+            application: openstack-exporter
+        spec:
+          endpoints:
+            - interval: 1m
+              scrapeTimeout: 30s
+              port: metrics
+              relabelings:
+                - action: replace
+                  regex: (.*)
+                  replacement: default
+                  targetLabel: instance
+          jobLabel: jobLabel
+          namespaceSelector:
+            any: true
+          selector:
+            matchLabels:
+              application: openstack-exporter
+
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PrometheusRule
+        metadata:
+          name: openstack-exporter
+          namespace: monitoring
+          labels:
+            application: openstack-exporter
+        spec:
+          groups:
+            - name: cinder
+              rules:
+                - alert: CinderAgentDown
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running
+                      on {{ $labels.hostname }} is being reported as down.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      down{% endraw %}'
+                  expr: |
+                    openstack_cinder_agent_state != 1
+                  labels:
+                    severity: warning
+                - alert: CinderAgentDown
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on
+                      {{ $labels.hostname }} is being reported as down for 5 minutes.
+                      This can affect volume operations so it must be resolved as
+                      quickly as possible.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      down{% endraw %}'
+                  expr: |
+                    openstack_cinder_agent_state != 1
+                  for: 5m
+                  labels:
+                    severity: critical
+                - alert: CinderAgentDisabled
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
+                      has been disabled for 60 minutes.  This can affect volume operations so it must be
+                      resolved as quickly as possible.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      disabled{% endraw %}'
+                  expr: |
+                    openstack_cinder_agent_state{adminState!="enabled"}
+                  for: 1h
+                  labels:
+                    severity: warning
+                - alert: CinderVolumeInError
+                  annotations:
+                    description: |
+                      '{% raw %}The volume {{ $labels.id }} has been in ERROR state for over 24 hours.
+                      It must be cleaned up or removed in order to provide a consistent customer
+                      experience.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.id }}] Volume in ERROR state{% endraw %}'
+                  expr: |
+                    openstack_cinder_volume_status{status=~"error.*"}
+                  for: 24h
+                  labels:
+                    severity: warning
+            - name: neutron
+              rules:
+                - alert: NeutronAgentDown
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
+                      is being reported as down.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      down{% endraw %}'
+                  expr: |
+                    openstack_neutron_agent_state != 1
+                  labels:
+                    severity: warning
+                - alert: NeutronAgentDown
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
+                      is being reported as down for 5 minutes. This can affect network operations so it
+                      must be resolved as quickly as possible.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      down{% endraw %}'
+                  expr: |
+                    openstack_neutron_agent_state != 1
+                  for: 5m
+                  labels:
+                    severity: critical
+                - alert: NeutronAgentDisabled
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
+                      has been disabled for 60 minutes.  This can affect network operations so it must be
+                      resolved as quickly as possible.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      disabled{% endraw %}'
+                  expr: |
+                    openstack_neutron_agent_state{adminState!="up"}
+                  for: 1h
+                  labels:
+                    severity: warning
+                - alert: NeutronBindingFailedPorts
+                  annotations:
+                    description: |
+                      '{% raw %}The NIC {{ $labels.mac_address }} of {{ $labels.device_owner }}
+                      has binding failed port now.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.device_owner }}] {{ $labels.mac_address }}
+                      binding failed{% endraw %}'
+                  expr: |
+                    openstack_neutron_port{binding_vif_type="binding_failed"} != 0
+                  labels:
+                    severity: warning
+                - alert: NeutronNetworkOutOfIPs
+                  annotations:
+                    description: |
+                      '{% raw %}The subnet {{ $labels.subnet_name }} within {{ $labels.network_name }}
+                      is currently at {{ $value }}% utilization.  If the IP addresses run out, it will
+                      impact the provisioning of new ports.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.network_name }}] {{ $labels.subnet_name }}
+                      running out of IPs{% endraw %}'
+                  expr: |
+                    sum by (network_id) (openstack_neutron_network_ip_availabilities_used{project_id!=""}) / sum by (network_id)
+                    (openstack_neutron_network_ip_availabilities_total{project_id!=""}) * 100 > 80
+                  labels:
+                    severity: warning
+            - name: nova
+              rules:
+                - alert: NovaAgentDown
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }}
+                      is being reported as down.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      down{% endraw %}'
+                  expr: |
+                    openstack_nova_agent_state != 1
+                  labels:
+                    severity: warning
+                - alert: NovaAgentDown
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is
+                      being reported as down.  This can affect compute operations so it must be resolved as
+                      quickly as possible.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      down{% endraw %}'
+                  expr: |
+                    openstack_nova_agent_state != 1
+                  for: 5m
+                  labels:
+                    severity: critical
+                - alert: NovaAgentDisabled
+                  annotations:
+                    description: |
+                      '{% raw %}The service {{ $labels.exported_service }} running on {{ $labels.hostname }} has been
+                      disabled for 60 minutes.  This can affect compute operations so it must be resolved as quickly
+                      as possible.{% endraw %}'
+                    summary: |
+                      '{% raw %}[{{ $labels.hostname }}] {{ $labels.exported_service }}
+                      disabled{% endraw %}'
+                  expr: |
+                    openstack_nova_agent_state{adminState!="enabled"}
+                  for: 1h
+                  labels:
+                    severity: warning
+                - alert: NovaInstanceInError
+                  annotations:
+                    description: |
+                      '{% raw %}The instance {{ $labels.id }} has been in ERROR state for over 24 hours.
+                      It must be cleaned up or removed in order to provide a consistent customer
+                      experience.{% endraw %}'
+                    summary: '{% raw %}[{{ $labels.id }}] Instance in ERROR state{% endraw %}'
+                  expr: |
+                    openstack_nova_server_status{status="ERROR"}
+                  for: 24h
+                  labels:
+                    severity: warning
+                - alert: NovaFailureRisk
+                  annotations:
+                    description: |
+                      '{% raw %}The cloud capacity will be at {{ $value }} in the event of the failure of
+                      a single hypervisor which puts the cloud at risk of not being able to recover should
+                      any hypervisor failures occur.  Please ensure that adequate amount of infrastructure
+                      is assigned to this deployment to prevent this.{% endraw %}'
+                    summary: '{% raw %}[nova] Failure risk{% endraw %}'
+                  expr: |
+                    (sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) - max(openstack_nova_memory_used_bytes))
+                    / sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) * 100 < 0.25
+                  for: 6h
+                  labels:
+                    severity: warning
+                - alert: NovaCapacity
+                  annotations:
+                    description: |
+                      '{% raw %}The cloud capacity is currently at `{{ $value }}` which means there is a
+                      risk of running out of capacity due to the timeline required to add new nodes.
+                      Please ensure that adequate amount of infrastructure is assigned to this deployment
+                      to prevent this.{% endraw %}'
+                    summary: '{% raw %}[nova] Capacity risk{% endraw %}'
+                  expr: |
+                    sum (
+                        openstack_nova_memory_used_bytes
+                      + on(hostname) group_left(adminState)
+                        (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"})
+                    ) / sum (
+                        openstack_nova_memory_available_bytes
+                      + on(hostname) group_left(adminState)
+                        (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"})
+                    ) * 100 > 75
+                  for: 6h
+                  labels:
+                    severity: warning
+  # NOTE(mnaser): Since we haven't moved to the operator pattern yet, we need to
+  #               keep retrying a few times as the CRDs might not be installed
+  #               yet.
+  retries: 60
+  delay: 5
+  register: _result
+  until: _result is not failed
diff --git a/roles/openstack_helm_barbican/defaults/main.yml b/roles/openstack_helm_barbican/defaults/main.yml
index c752a56..aad757c 100644
--- a/roles/openstack_helm_barbican/defaults/main.yml
+++ b/roles/openstack_helm_barbican/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_barbican_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_barbican_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_barbican_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_barbican_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_barbican_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_barbican_chart_name: barbican
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_barbican_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_barbican/meta/main.yml b/roles/openstack_helm_barbican/meta/main.yml
index d7db878..0a1f600 100644
--- a/roles/openstack_helm_barbican/meta/main.yml
+++ b/roles/openstack_helm_barbican/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_barbican_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_barbican_chart_repo_url }}"
diff --git a/roles/openstack_helm_barbican/tasks/main.yml b/roles/openstack_helm_barbican/tasks/main.yml
index e428b35..11baace 100644
--- a/roles/openstack_helm_barbican/tasks/main.yml
+++ b/roles/openstack_helm_barbican/tasks/main.yml
@@ -16,18 +16,62 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_barbican_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_barbican_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_barbican_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: barbican
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_barbican_chart_name }}"
-    chart_ref: "{{ openstack_helm_barbican_chart_repo_name }}/{{ openstack_helm_barbican_chart_name }}"
-    chart_version: 0.2.12
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_barbican_values | combine(openstack_helm_barbican_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-barbican
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_barbican_values | combine(openstack_helm_barbican_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: barbican
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: barbican
+              version: 0.2.12
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-barbican
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-barbican-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-barbican-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
diff --git a/roles/openstack_helm_cinder/defaults/main.yml b/roles/openstack_helm_cinder/defaults/main.yml
index 5b661bc..97380e2 100644
--- a/roles/openstack_helm_cinder/defaults/main.yml
+++ b/roles/openstack_helm_cinder/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_cinder_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_cinder_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_cinder_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_cinder_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_cinder_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_cinder_chart_name: cinder
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_cinder_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_cinder/meta/main.yml b/roles/openstack_helm_cinder/meta/main.yml
index 655009f..1329a76 100644
--- a/roles/openstack_helm_cinder/meta/main.yml
+++ b/roles/openstack_helm_cinder/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_cinder_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_cinder_chart_repo_url }}"
diff --git a/roles/openstack_helm_cinder/tasks/main.yml b/roles/openstack_helm_cinder/tasks/main.yml
index 51abc1c..fef14b2 100644
--- a/roles/openstack_helm_cinder/tasks/main.yml
+++ b/roles/openstack_helm_cinder/tasks/main.yml
@@ -16,18 +16,62 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_cinder_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_cinder_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_cinder_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: cinder
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_cinder_chart_name }}"
-    chart_ref: "{{ openstack_helm_cinder_chart_repo_name }}/{{ openstack_helm_cinder_chart_name }}"
-    chart_version: 0.2.15
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_cinder_values | combine(openstack_helm_cinder_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-cinder
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_cinder_values | combine(openstack_helm_cinder_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: cinder
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: cinder
+              version: 0.2.25
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-cinder
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-cinder-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-cinder-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
diff --git a/roles/openstack_helm_endpoints/tasks/main.yml b/roles/openstack_helm_endpoints/tasks/main.yml
index 7562a0f..4764ad7 100644
--- a/roles/openstack_helm_endpoints/tasks/main.yml
+++ b/roles/openstack_helm_endpoints/tasks/main.yml
@@ -12,68 +12,24 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-- name: Get Helm values if chart is provided
-  block:
-    - name: Get the default values for the Helm chart
-      ansible.builtin.command: helm show values {{ openstack_helm_endpoints_repo_name }}/{{ openstack_helm_endpoints_chart }}
-      changed_when: false
-      register: _helm_show_values
-
-    - name: Retrieve list of all the needed endpoints
-      ansible.builtin.set_fact:
-        openstack_helm_endpoints_list: |-
-          {{ _helm_show_values.stdout | from_yaml | community.general.json_query('keys(endpoints)') | difference(_openstack_helm_endpoints_ignore) }}
+- name: Retrieve list of all the needed endpoints
+  ansible.builtin.set_fact:
+    openstack_helm_endpoints_list: |-
+      {{ lookup('ansible.builtin.url', 'https://opendev.org/openstack/' ~ openstack_helm_endpoints_repo_name ~ '/raw/branch/master/' ~ openstack_helm_endpoints_chart ~ '/values.yaml', split_lines=False) | from_yaml | community.general.json_query('keys(endpoints)') | difference(_openstack_helm_endpoints_ignore) }}
   when:
     - openstack_helm_endpoints_list is not defined or openstack_helm_endpoints_list == None
 
 # NOTE(mnaser): Since we manage one-RabbitMQ per service, we create the RabbitMQ
 #               cluster here and then append the necessary values to be used
 #               inside the `oslo_messaging` section.
-- name: Configure oslo.messaging
-  block:
-    - name: Create RabbitMQ cluster
-      ansible.builtin.include_role:
-        name: rabbitmq
-      vars:
-        rabbitmq_cluster_name: "{{ openstack_helm_endpoints_chart }}"
-
-    - name: Grab RabbitMQ cluster secret
-      kubernetes.core.k8s_info:
-        api_version: v1
-        kind: Secret
-        name: "rabbitmq-{{ openstack_helm_endpoints_chart }}-default-user"
-        namespace: openstack
-      register: _openstack_helm_endpoints_rabbitmq_cluster_secret
-
-    - name: Cache fact with RabbitMQ cluster credentials
-      ansible.builtin.set_fact:
-        _openstack_helm_endpoints_rabbitmq_cluster_username: |-
-          {{ _openstack_helm_endpoints_rabbitmq_cluster_secret.resources[0]['data']['username'] | b64decode }}
-        _openstack_helm_endpoints_rabbitmq_cluster_password: |-
-          {{ _openstack_helm_endpoints_rabbitmq_cluster_secret.resources[0]['data']['password'] | b64decode }}
+- name: Create RabbitMQ cluster
+  ansible.builtin.include_role:
+    name: rabbitmq
+  vars:
+    rabbitmq_cluster_name: "{{ openstack_helm_endpoints_chart }}"
   when:
     - '"oslo_messaging" in openstack_helm_endpoints_list'
 
-# NOTE(mnaser): Since we deploy the database using the operator and we let it
-#               generate the root password, we look it up if the fact has not
-#               been cached from a previous run.
-- name: Append endpoints for "oslo_db"
-  block:
-    - name: Grab Percona XtraDB cluster secret
-      kubernetes.core.k8s_info:
-        api_version: v1
-        kind: Secret
-        name: percona-xtradb
-        namespace: openstack
-      register: _openstack_helm_endpoints_oslo_db_secret
-
-    - name: Cache fact with Percona XtraDB password
-      ansible.builtin.set_fact:
-        openstack_helm_endpoints_maridb_admin_password: "{{ _openstack_helm_endpoints_oslo_db_secret.resources[0]['data']['root'] | b64decode }}"
-  when:
-    - '"oslo_db" in openstack_helm_endpoints_list'
-    - openstack_helm_endpoints_maridb_admin_password is not defined
-
 - name: Reset value for OpenStack_Helm endpoints
   ansible.builtin.set_fact:
     openstack_helm_endpoints: "{{ openstack_helm_endpoints_config }}"
diff --git a/roles/openstack_helm_endpoints/vars/main.yml b/roles/openstack_helm_endpoints/vars/main.yml
index 799eb09..9bba6ab 100644
--- a/roles/openstack_helm_endpoints/vars/main.yml
+++ b/roles/openstack_helm_endpoints/vars/main.yml
@@ -30,24 +30,11 @@
 
 _openstack_helm_endpoints_oslo_db:
   oslo_db:
-    auth:
-      admin:
-        password: "{{ openstack_helm_endpoints_maridb_admin_password }}"
     hosts:
       default: percona-xtradb-haproxy
 
 _openstack_helm_endpoints_oslo_messaging:
   oslo_messaging:
-    auth:
-      user:
-        username: "{{ _openstack_helm_endpoints_rabbitmq_cluster_username }}"
-        password: "{{ _openstack_helm_endpoints_rabbitmq_cluster_password }}"
-      # NOTE(mnaser): The following is not actually used by the chart, however,
-      #               since we are actually doing dynamic lookups to generate
-      #               endpoints, we add it here.
-      admin:
-        username: "{{ _openstack_helm_endpoints_rabbitmq_cluster_username }}"
-        password: "{{ _openstack_helm_endpoints_rabbitmq_cluster_password }}"
     statefulset: null
     hosts:
       default: "rabbitmq-{{ openstack_helm_endpoints_chart }}"
@@ -229,8 +216,6 @@
 _openstack_helm_endpoints_oslo_db_api:
   oslo_db_api:
     auth:
-      admin:
-        password: "{{ openstack_helm_endpoints_maridb_admin_password }}"
       nova:
         password: "{{ openstack_helm_endpoints_nova_mariadb_password }}"
     hosts:
@@ -239,8 +224,6 @@
 _openstack_helm_endpoints_oslo_db_cell0:
   oslo_db_cell0:
     auth:
-      admin:
-        password: "{{ openstack_helm_endpoints_maridb_admin_password }}"
       nova:
         password: "{{ openstack_helm_endpoints_nova_mariadb_password }}"
     hosts:
diff --git a/roles/openstack_helm_glance/defaults/main.yml b/roles/openstack_helm_glance/defaults/main.yml
index 331d5f3..c9878e2 100644
--- a/roles/openstack_helm_glance/defaults/main.yml
+++ b/roles/openstack_helm_glance/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_glance_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_glance_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_glance_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_glance_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_glance_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_glance_chart_name: glance
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_glance_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_glance/meta/main.yml b/roles/openstack_helm_glance/meta/main.yml
index e9a1e0a..37cf04b 100644
--- a/roles/openstack_helm_glance/meta/main.yml
+++ b/roles/openstack_helm_glance/meta/main.yml
@@ -23,8 +23,4 @@
         - focal
 
 dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_glance_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_glance_chart_repo_url }}"
-  - openstacksdk
+  - role: openstacksdk
diff --git a/roles/openstack_helm_glance/tasks/main.yml b/roles/openstack_helm_glance/tasks/main.yml
index b722cc7..5194df3 100644
--- a/roles/openstack_helm_glance/tasks/main.yml
+++ b/roles/openstack_helm_glance/tasks/main.yml
@@ -16,18 +16,62 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_glance_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_glance_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_glance_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: glance
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_glance_chart_name }}"
-    chart_ref: "{{ openstack_helm_glance_chart_repo_name }}/{{ openstack_helm_glance_chart_name }}"
-    chart_version: 0.2.10
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_glance_values | combine(openstack_helm_glance_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-glance
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_glance_values | combine(openstack_helm_glance_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: glance
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: glance
+              version: 0.2.10
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-glance
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-glance-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-glance-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
@@ -39,6 +83,7 @@
     openstack_helm_ingress_annotations: "{{ _openstack_helm_glance_ingress_annotations | combine(openstack_helm_glance_ingress_annotations) }}"
 
 - name: Create images
+  when: openstack_helm_glance_images | length > 0
   block:
     - name: Wait until image service ready
       kubernetes.core.k8s_info:
@@ -55,8 +100,9 @@
 
     - name: Download images
       ansible.builtin.get_url:
-        url: "{{ item.source_url| regex_replace('\\/$', '') }}/{{ item.image_file }}"
+        url: "{{ item.source_url | regex_replace('\\/$', '') }}/{{ item.image_file }}"
         dest: "/tmp/{{ item.image_file }}"
+        mode: "0600"
       loop: "{{ openstack_helm_glance_images }}"
 
     - name: Upload images
diff --git a/roles/openstack_helm_heat/defaults/main.yml b/roles/openstack_helm_heat/defaults/main.yml
index 2941e21..9cd7010 100644
--- a/roles/openstack_helm_heat/defaults/main.yml
+++ b/roles/openstack_helm_heat/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_heat_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_heat_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_heat_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_heat_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_heat_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_heat_chart_name: heat
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_heat_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_heat/meta/main.yml b/roles/openstack_helm_heat/meta/main.yml
index 3309539..813e309 100644
--- a/roles/openstack_helm_heat/meta/main.yml
+++ b/roles/openstack_helm_heat/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_heat_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_heat_chart_repo_url }}"
diff --git a/roles/openstack_helm_heat/tasks/main.yml b/roles/openstack_helm_heat/tasks/main.yml
index 36cc360..c3616e7 100644
--- a/roles/openstack_helm_heat/tasks/main.yml
+++ b/roles/openstack_helm_heat/tasks/main.yml
@@ -16,46 +16,62 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_heat_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_heat_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_heat_chart_name }}"
-
-- name: Generate Helm values comparison
-  ansible.builtin.include_role:
-    name: helm_diff
-  vars:
-    helm_diff_release_name: "{{ openstack_helm_heat_chart_name }}"
-    helm_diff_release_namespace: openstack
-    helm_diff_values: "{{ _openstack_helm_heat_values }}"
-  when:
-    - openstack_helm_heat_diff | bool
-
-- name: Migrate database from MariaDB to Percona XtraDB Cluster
-  ansible.builtin.include_role:
-    name: openstack_helm_migrate_to_percona_xtradb_cluster
-  vars:
-    openstack_helm_migrate_to_percona_xtradb_cluster_release_name: "{{ openstack_helm_heat_chart_name }}"
-    openstack_helm_migrate_to_percona_xtradb_cluster_release_namespace: openstack
-    openstack_helm_migrate_to_percona_xtradb_cluster_databases:
-      - heat
-    openstack_helm_migrate_to_percona_xtradb_cluster_services:
-      - kind: Deployment
-        name: heat-api
-      - kind: Deployment
-        name: heat-cfn
-      - kind: Deployment
-        name: heat-engine
-  when:
-    - openstack_helm_heat_migrate_from_mariadb | bool
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: heat
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_heat_chart_name }}"
-    chart_ref: "{{ openstack_helm_heat_chart_repo_name }}/{{ openstack_helm_heat_chart_name }}"
-    chart_version: 0.2.8
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_heat_values }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-heat
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_heat_values | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: heat
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: heat
+              version: 0.2.8
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-heat
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-heat-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-heat-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
diff --git a/roles/openstack_helm_horizon/defaults/main.yml b/roles/openstack_helm_horizon/defaults/main.yml
index 2faf05b..1a61592 100644
--- a/roles/openstack_helm_horizon/defaults/main.yml
+++ b/roles/openstack_helm_horizon/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_horizon_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_horizon_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_horizon_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_horizon_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_horizon_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_horizon_chart_name: horizon
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_horizon_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_horizon/meta/main.yml b/roles/openstack_helm_horizon/meta/main.yml
index b468259..67b3399 100644
--- a/roles/openstack_helm_horizon/meta/main.yml
+++ b/roles/openstack_helm_horizon/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_horizon_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_horizon_chart_repo_url }}"
diff --git a/roles/openstack_helm_horizon/tasks/main.yml b/roles/openstack_helm_horizon/tasks/main.yml
index abdc87b..9eb13f5 100644
--- a/roles/openstack_helm_horizon/tasks/main.yml
+++ b/roles/openstack_helm_horizon/tasks/main.yml
@@ -16,18 +16,54 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_horizon_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_horizon_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_horizon_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: horizon
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_horizon_chart_name }}"
-    chart_ref: "{{ openstack_helm_horizon_chart_repo_name }}/{{ openstack_helm_horizon_chart_name }}"
-    chart_version: 0.2.24
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_horizon_values | combine(openstack_helm_horizon_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-horizon
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_horizon_values | combine(openstack_helm_horizon_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: horizon
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: horizon
+              version: 0.2.24
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-horizon
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
diff --git a/roles/openstack_helm_infra_ceph_provisioners/defaults/main.yml b/roles/openstack_helm_infra_ceph_provisioners/defaults/main.yml
index 636d0e9..31f39db 100644
--- a/roles/openstack_helm_infra_ceph_provisioners/defaults/main.yml
+++ b/roles/openstack_helm_infra_ceph_provisioners/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_infra_ceph_provisioners_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_infra_ceph_provisioners_chart_repo_name: openstack-helm-infra
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_ceph_provisioners_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_infra_ceph_provisioners_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_ceph_provisioners_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_infra_ceph_provisioners_chart_name: ceph-provisioners
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_infra_ceph_provisioners_ceph_mon_group [[[
 #
 # Ansible inventory group containing Ceph monitors.
diff --git a/roles/openstack_helm_infra_ceph_provisioners/meta/main.yml b/roles/openstack_helm_infra_ceph_provisioners/meta/main.yml
index 8a0117c..10d37da 100644
--- a/roles/openstack_helm_infra_ceph_provisioners/meta/main.yml
+++ b/roles/openstack_helm_infra_ceph_provisioners/meta/main.yml
@@ -23,8 +23,4 @@
         - focal
 
 dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_infra_ceph_provisioners_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_infra_ceph_provisioners_chart_repo_url }}"
-  - ceph_csi_rbd
+  - role: ceph_csi_rbd
diff --git a/roles/openstack_helm_infra_ceph_provisioners/tasks/main.yml b/roles/openstack_helm_infra_ceph_provisioners/tasks/main.yml
index 17e08e3..8a2113f 100644
--- a/roles/openstack_helm_infra_ceph_provisioners/tasks/main.yml
+++ b/roles/openstack_helm_infra_ceph_provisioners/tasks/main.yml
@@ -103,27 +103,42 @@
         key: "{{ _openstack_helm_infra_ceph_provisioners_keyring.key }}"
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_infra_ceph_provisioners_chart_name }}"
-    chart_ref: "{{ openstack_helm_infra_ceph_provisioners_chart_repo_name }}/{{ openstack_helm_infra_ceph_provisioners_chart_name }}"
-    chart_version: 0.1.17
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      network:
-        public: "{{ openstack_helm_infra_ceph_provisioners_ceph_public_network }}"
-        cluster: "{{ openstack_helm_infra_ceph_provisioners_ceph_cluster_network }}"
-      conf:
-        ceph:
-          global:
-            fsid: "{{ openstack_helm_infra_ceph_provisioners_ceph_fsid }}"
-      manifests:
-        configmap_bin: false
-        configmap_bin_common: false
-        deployment_rbd_provisioner: false
-        deployment_csi_rbd_provisioner: false
-        deployment_cephfs_provisioner: false
-        job_cephfs_client_key: false
-        job_namespace_client_key_cleaner: false
-        job_namespace_client_key: false
-        storageclass: false
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm-infra
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-ceph-provisioners
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ openstack_helm_infra_ceph_provisioners_values | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: ceph-provisioners
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: ceph-provisioners
+              version: 0.1.8
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm-infra
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-ceph-provisioners
diff --git a/roles/openstack_helm_infra_ceph_provisioners/vars/main.yml b/roles/openstack_helm_infra_ceph_provisioners/vars/main.yml
new file mode 100644
index 0000000..df99dca
--- /dev/null
+++ b/roles/openstack_helm_infra_ceph_provisioners/vars/main.yml
@@ -0,0 +1,18 @@
+openstack_helm_infra_ceph_provisioners_values:
+  network:
+    public: "{{ openstack_helm_infra_ceph_provisioners_ceph_public_network }}"
+    cluster: "{{ openstack_helm_infra_ceph_provisioners_ceph_cluster_network }}"
+  conf:
+    ceph:
+      global:
+        fsid: "{{ openstack_helm_infra_ceph_provisioners_ceph_fsid }}"
+  manifests:
+    configmap_bin: false
+    configmap_bin_common: false
+    deployment_rbd_provisioner: false
+    deployment_csi_rbd_provisioner: false
+    deployment_cephfs_provisioner: false
+    job_cephfs_client_key: false
+    job_namespace_client_key_cleaner: false
+    job_namespace_client_key: false
+    storageclass: false
diff --git a/roles/openstack_helm_infra_libvirt/defaults/main.yml b/roles/openstack_helm_infra_libvirt/defaults/main.yml
index 1dc7b6b..0c63a4d 100644
--- a/roles/openstack_helm_infra_libvirt/defaults/main.yml
+++ b/roles/openstack_helm_infra_libvirt/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_infra_libvirt_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_infra_libvirt_chart_repo_name: openstack-helm-infra
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_libvirt_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_infra_libvirt_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_libvirt_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_infra_libvirt_chart_name: libvirt
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_infra_libvirt_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_infra_libvirt/meta/main.yml b/roles/openstack_helm_infra_libvirt/meta/main.yml
index 249b087..bc97584 100644
--- a/roles/openstack_helm_infra_libvirt/meta/main.yml
+++ b/roles/openstack_helm_infra_libvirt/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_infra_libvirt_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_infra_libvirt_chart_repo_url }}"
diff --git a/roles/openstack_helm_infra_libvirt/tasks/main.yml b/roles/openstack_helm_infra_libvirt/tasks/main.yml
index 46df5d1..56e848d 100644
--- a/roles/openstack_helm_infra_libvirt/tasks/main.yml
+++ b/roles/openstack_helm_infra_libvirt/tasks/main.yml
@@ -16,15 +16,47 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_infra_libvirt_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_infra_libvirt_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_infra_libvirt_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm-infra
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
+    openstack_helm_endpoints_chart: libvirt
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_infra_libvirt_chart_name }}"
-    chart_ref: "{{ openstack_helm_infra_libvirt_chart_repo_name }}/{{ openstack_helm_infra_libvirt_chart_name }}"
-    chart_version: 0.1.8
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_infra_libvirt_values | combine(openstack_helm_infra_libvirt_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm-infra
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-libvirt
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_infra_libvirt_values | combine(openstack_helm_infra_libvirt_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: libvirt
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: libvirt
+              version: 0.1.8
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm-infra
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-libvirt
diff --git a/roles/openstack_helm_infra_memcached/defaults/main.yml b/roles/openstack_helm_infra_memcached/defaults/main.yml
index cee6841..7d46c47 100644
--- a/roles/openstack_helm_infra_memcached/defaults/main.yml
+++ b/roles/openstack_helm_infra_memcached/defaults/main.yml
@@ -11,30 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_infra_memcached_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_infra_memcached_chart_repo_name: openstack-helm-infra
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_memcached_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_infra_memcached_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_memcached_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_infra_memcached_chart_name: memcached
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_memcached_diff [[[
-#
-# Disable a diff of the release values and ask for manual confirmation
-openstack_helm_infra_memcached_diff: false
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_infra_memcached_values [[[
 #
 # Overrides for Helm chart values
diff --git a/roles/openstack_helm_infra_memcached/meta/main.yml b/roles/openstack_helm_infra_memcached/meta/main.yml
index ec3be63..cd0fc41 100644
--- a/roles/openstack_helm_infra_memcached/meta/main.yml
+++ b/roles/openstack_helm_infra_memcached/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_infra_memcached_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_infra_memcached_chart_repo_url }}"
diff --git a/roles/openstack_helm_infra_memcached/tasks/main.yml b/roles/openstack_helm_infra_memcached/tasks/main.yml
index 491e3bc..f3b3666 100644
--- a/roles/openstack_helm_infra_memcached/tasks/main.yml
+++ b/roles/openstack_helm_infra_memcached/tasks/main.yml
@@ -16,107 +16,116 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_infra_memcached_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_infra_memcached_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_infra_memcached_chart_name }}"
-
-- name: Generate Helm values comparison
-  ansible.builtin.include_role:
-    name: helm_diff
-  vars:
-    helm_diff_release_name: "{{ openstack_helm_infra_memcached_chart_name }}"
-    helm_diff_release_namespace: openstack
-    helm_diff_values: "{{ _openstack_helm_infra_memcached_values }}"
-  when:
-    - openstack_helm_infra_memcached_diff | bool
+    openstack_helm_endpoints_repo_name: openstack-helm-infra
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
+    openstack_helm_endpoints_chart: memcached
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_infra_memcached_chart_name }}"
-    chart_ref: "{{ openstack_helm_infra_memcached_chart_repo_name }}/{{ openstack_helm_infra_memcached_chart_name }}"
-    chart_version: 0.1.6
-    release_namespace: openstack
-    create_namespace: true
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_infra_memcached_values }}"
-
-- name: Create Service for metrics
   kubernetes.core.k8s:
     state: present
     definition:
-      apiVersion: v1
-      kind: Service
-      metadata:
-        name: memcached-metrics
-        namespace: openstack
-        labels:
-          application: memcached
-          component: server
-      spec:
-        selector:
-          application: memcached
-          component: server
-        ports:
-          - name: metrics
-            port: 9150
-            targetPort: 9150
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm-infra
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
 
-- name: Create ServiceMonitor
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: ServiceMonitor
-      metadata:
-        name: memcached
-        namespace: monitoring
-        labels:
-          release: kube-prometheus-stack
-      spec:
-        jobLabel: application
-        endpoints:
-          - port: "metrics"
-            path: "/metrics"
-            relabelings:
-              - sourceLabels: ["__meta_kubernetes_pod_name"]
-                targetLabel: "instance"
-              - action: "labeldrop"
-                regex: "^(container|endpoint|namespace|pod|service)$"
-        namespaceSelector:
-          matchNames:
-            - openstack
-        selector:
-          matchLabels:
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-memcached
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_infra_memcached_values | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: memcached
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: memcached
+              version: 0.1.6
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm-infra
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-memcached
+
+      - apiVersion: v1
+        kind: Service
+        metadata:
+          name: memcached-metrics
+          namespace: openstack
+          labels:
             application: memcached
             component: server
+        spec:
+          selector:
+            application: memcached
+            component: server
+          ports:
+            - name: metrics
+              port: 9150
+              targetPort: 9150
 
-- name: Create PrometheusRule
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PrometheusRule
-      metadata:
-        name: memcached
-        namespace: monitoring
-        labels:
-          release: kube-prometheus-stack
-      spec:
-        groups:
-          - name: memcached
-            rules:
-              - alert: MemcachedDown
-                expr: memcached_up == 0
-                for: 5m
-                labels:
-                  severity: critical
-              - alert: MemcachedConnectionLimitApproaching
-                expr: (memcached_current_connections / memcached_max_connections * 100) > 80
-                for: 5m
-                labels:
-                  severity: warning
-              - alert: MemcachedConnectionLimitApproaching
-                expr: (memcached_current_connections / memcached_max_connections * 100) > 95
-                for: 5m
-                labels:
-                  severity: critical
+      - apiVersion: monitoring.coreos.com/v1
+        kind: ServiceMonitor
+        metadata:
+          name: memcached
+          namespace: monitoring
+          labels:
+            release: kube-prometheus-stack
+        spec:
+          jobLabel: application
+          endpoints:
+            - port: "metrics"
+              path: "/metrics"
+              relabelings:
+                - sourceLabels: ["__meta_kubernetes_pod_name"]
+                  targetLabel: "instance"
+                - action: "labeldrop"
+                  regex: "^(container|endpoint|namespace|pod|service)$"
+          namespaceSelector:
+            matchNames:
+              - openstack
+          selector:
+            matchLabels:
+              application: memcached
+              component: server
+
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PrometheusRule
+        metadata:
+          name: memcached
+          namespace: monitoring
+          labels:
+            release: kube-prometheus-stack
+        spec:
+          groups:
+            - name: memcached
+              rules:
+                - alert: MemcachedDown
+                  expr: memcached_up == 0
+                  for: 5m
+                  labels:
+                    severity: critical
+                - alert: MemcachedConnectionLimitApproaching
+                  expr: (memcached_current_connections / memcached_max_connections * 100) > 80
+                  for: 5m
+                  labels:
+                    severity: warning
+                - alert: MemcachedConnectionLimitApproaching
+                  expr: (memcached_current_connections / memcached_max_connections * 100) > 95
+                  for: 5m
+                  labels:
+                    severity: critical
diff --git a/roles/openstack_helm_infra_openvswitch/defaults/main.yml b/roles/openstack_helm_infra_openvswitch/defaults/main.yml
index eb1f4e5..fdab6e9 100644
--- a/roles/openstack_helm_infra_openvswitch/defaults/main.yml
+++ b/roles/openstack_helm_infra_openvswitch/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_infra_openvswitch_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_infra_openvswitch_chart_repo_name: openstack-helm-infra
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_openvswitch_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_infra_openvswitch_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_infra_openvswitch_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_infra_openvswitch_chart_name: openvswitch
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_infra_openvswitch_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_infra_openvswitch/meta/main.yml b/roles/openstack_helm_infra_openvswitch/meta/main.yml
index 085b6a5..6cf0382 100644
--- a/roles/openstack_helm_infra_openvswitch/meta/main.yml
+++ b/roles/openstack_helm_infra_openvswitch/meta/main.yml
@@ -21,10 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_infra_openvswitch_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_infra_openvswitch_chart_repo_url }}"
-  - openstack_namespace
diff --git a/roles/openstack_helm_infra_openvswitch/tasks/main.yml b/roles/openstack_helm_infra_openvswitch/tasks/main.yml
index 372a5e4..e82fd3a 100644
--- a/roles/openstack_helm_infra_openvswitch/tasks/main.yml
+++ b/roles/openstack_helm_infra_openvswitch/tasks/main.yml
@@ -16,15 +16,48 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_infra_openvswitch_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_infra_openvswitch_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_infra_openvswitch_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm-infra
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
+    openstack_helm_endpoints_chart: openvswitch
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_infra_openvswitch_chart_name }}"
-    chart_ref: "{{ openstack_helm_infra_openvswitch_chart_repo_name }}/{{ openstack_helm_infra_openvswitch_chart_name }}"
-    chart_version: 0.1.6
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_infra_openvswitch_values | combine(openstack_helm_infra_openvswitch_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm-infra
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm-infra/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-openvswitch
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_infra_openvswitch_values | combine(openstack_helm_infra_openvswitch_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: openvswitch
+          namespace: openstack
+        spec:
+          interval: 60s
+          timeout: 60m
+          chart:
+            spec:
+              chart: openvswitch
+              version: 0.1.6
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm-infra
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-openvswitch
diff --git a/roles/openstack_helm_keystone/defaults/main.yml b/roles/openstack_helm_keystone/defaults/main.yml
index 919050f..bc43a20 100644
--- a/roles/openstack_helm_keystone/defaults/main.yml
+++ b/roles/openstack_helm_keystone/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_keystone_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_keystone_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_keystone_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_keystone_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_keystone_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_keystone_chart_name: keystone
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_keystone_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_keystone/meta/main.yml b/roles/openstack_helm_keystone/meta/main.yml
index 8c7d01b..2b0e0af 100644
--- a/roles/openstack_helm_keystone/meta/main.yml
+++ b/roles/openstack_helm_keystone/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_keystone_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_keystone_chart_repo_url }}"
diff --git a/roles/openstack_helm_keystone/tasks/main.yml b/roles/openstack_helm_keystone/tasks/main.yml
index 97b313a..0f753ff 100644
--- a/roles/openstack_helm_keystone/tasks/main.yml
+++ b/roles/openstack_helm_keystone/tasks/main.yml
@@ -16,19 +16,62 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_keystone_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_keystone_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_keystone_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: keystone
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_keystone_chart_name }}"
-    chart_ref: "{{ openstack_helm_keystone_chart_repo_name }}/{{ openstack_helm_keystone_chart_name }}"
-    chart_version: 0.2.19
-    release_namespace: openstack
-    create_namespace: true
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_keystone_values | combine(openstack_helm_keystone_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-keystone
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_keystone_values | combine(openstack_helm_keystone_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: keystone
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: keystone
+              version: 0.2.19
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-keystone
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-keystone-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-keystone-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
diff --git a/roles/openstack_helm_migrate_to_percona_xtradb_cluster/tasks/main.yml b/roles/openstack_helm_migrate_to_percona_xtradb_cluster/tasks/main.yml
deleted file mode 100644
index a45f7d0..0000000
--- a/roles/openstack_helm_migrate_to_percona_xtradb_cluster/tasks/main.yml
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) 2022 VEXXHOST, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-- name: Get the IP address for the legacy MariaDB service
-  kubernetes.core.k8s_info:
-    api_version: v1
-    kind: Service
-    name: mariadb
-    namespace: openstack
-  register: _openstack_helm_migrate_to_percona_xtradb_cluster_legacy_service
-  when: _openstack_helm_migrate_to_percona_xtradb_cluster_legacy_ip is undefined
-
-- name: Get the IP address for the new Percona XtraDB service
-  kubernetes.core.k8s_info:
-    api_version: v1
-    kind: Service
-    name: percona-xtradb-haproxy
-    namespace: openstack
-  register: _openstack_helm_migrate_to_percona_xtradb_cluster_service
-  when: _openstack_helm_migrate_to_percona_xtradb_cluster_ip is undefined
-
-- name: Get current values for Helm chart & fail if it already points to Percona XtraDB Cluster
-  kubernetes.core.helm_info:
-    name: "{{ openstack_helm_migrate_to_percona_xtradb_cluster_release_name }}"
-    release_namespace: "{{ openstack_helm_migrate_to_percona_xtradb_cluster_release_namespace }}"
-  register: _openstack_helm_migrate_to_percona_xtradb_cluster_helm_info
-  failed_when: _oslo_db_endpoints.get('hosts', {}).get('default', '') == 'percona-xtradb-haproxy'
-  vars:
-    _oslo_db_endpoints: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_helm_info.status['values']['endpoints']['oslo_db'] }}"
-
-- name: Set facts for database endpoints
-  ansible.builtin.set_fact:
-    _openstack_helm_migrate_to_percona_xtradb_cluster_legacy_ip: |-
-      {{ _openstack_helm_migrate_to_percona_xtradb_cluster_legacy_service.resources[0]['spec']['clusterIP'] }}
-    _openstack_helm_migrate_to_percona_xtradb_cluster_legacy_password: |-
-      {{ _openstack_helm_migrate_to_percona_xtradb_cluster_helm_info.status['values']['endpoints']['oslo_db']['auth']['admin']['password'] }}
-    _openstack_helm_migrate_to_percona_xtradb_cluster_ip: |-
-      {{ _openstack_helm_migrate_to_percona_xtradb_cluster_service.resources[0]['spec']['clusterIP'] }}
-    _openstack_helm_migrate_to_percona_xtradb_cluster_password: |-
-      {{ openstack_helm_endpoints['oslo_db']['auth']['admin']['password'] }}
-
-- name: Ensure PyMySQL packages are installed
-  ansible.builtin.pip:
-    name: PyMySQL
-
-- name: Check if database already exists & fail if it already exists
-  community.mysql.mysql_db:
-    login_host: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_ip }}"
-    login_user: root
-    login_password: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_password }}"
-    name: "{{ item }}"
-    state: present
-  check_mode: true
-  register: _openstack_helm_migrate_to_percona_xtradb_cluster_db_check
-  failed_when: _openstack_helm_migrate_to_percona_xtradb_cluster_db_check is not changed
-  loop: "{{ openstack_helm_migrate_to_percona_xtradb_cluster_databases }}"
-
-- name: Scale down replicas to 0 for database facing services
-  kubernetes.core.k8s_scale:
-    api_version: v1
-    kind: "{{ item.kind }}"
-    name: "{{ item.name }}"
-    namespace: "{{ openstack_helm_migrate_to_percona_xtradb_cluster_release_namespace }}"
-    replicas: 0
-  loop: "{{ openstack_helm_migrate_to_percona_xtradb_cluster_services }}"
-
-- name: Create temporary file for database dump
-  ansible.builtin.tempfile:
-    state: file
-    prefix: "{{ openstack_helm_migrate_to_percona_xtradb_cluster_release_name }}"
-    suffix: .sql
-  register: _openstack_helm_migrate_to_percona_xtradb_cluster_file
-
-- name: Dump all of the databases to the local system
-  community.mysql.mysql_db:
-    login_host: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_legacy_ip }}"
-    login_user: root
-    login_password: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_legacy_password }}"
-    name: "{{ openstack_helm_migrate_to_percona_xtradb_cluster_databases }}"
-    state: dump
-    target: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_file.path }}"
-    skip_lock_tables: true
-    dump_extra_args: --skip-add-locks
-  async: 7200
-  poll: 5
-
-- name: Import databases to the new Percona XtraDB Cluster
-  community.mysql.mysql_db:
-    login_host: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_ip }}"
-    login_user: root
-    login_password: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_password }}"
-    name: |-
-      {{
-        (openstack_helm_migrate_to_percona_xtradb_cluster_databases | length > 1) |
-          ternary('all', openstack_helm_migrate_to_percona_xtradb_cluster_databases)
-      }}
-    state: import
-    target: "{{ _openstack_helm_migrate_to_percona_xtradb_cluster_file.path }}"
-  async: 7200
-  poll: 5
diff --git a/roles/openstack_helm_neutron/defaults/main.yml b/roles/openstack_helm_neutron/defaults/main.yml
index 4ddafe0..95ffe95 100644
--- a/roles/openstack_helm_neutron/defaults/main.yml
+++ b/roles/openstack_helm_neutron/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_neutron_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_neutron_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_neutron_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_neutron_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_neutron_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_neutron_chart_name: neutron
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_neutron_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_neutron/meta/main.yml b/roles/openstack_helm_neutron/meta/main.yml
index ce58dc4..ea1d39e 100644
--- a/roles/openstack_helm_neutron/meta/main.yml
+++ b/roles/openstack_helm_neutron/meta/main.yml
@@ -23,8 +23,4 @@
         - focal
 
 dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_neutron_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_neutron_chart_repo_url }}"
-  - openstacksdk
+  - role: openstacksdk
diff --git a/roles/openstack_helm_neutron/tasks/main.yml b/roles/openstack_helm_neutron/tasks/main.yml
index d9f470e..1ab9702 100644
--- a/roles/openstack_helm_neutron/tasks/main.yml
+++ b/roles/openstack_helm_neutron/tasks/main.yml
@@ -16,18 +16,62 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_neutron_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_neutron_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_neutron_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: neutron
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_neutron_chart_name }}"
-    chart_ref: "{{ openstack_helm_neutron_chart_repo_name }}/{{ openstack_helm_neutron_chart_name }}"
-    chart_version: 0.2.14
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_neutron_values | combine(openstack_helm_neutron_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-neutron
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_neutron_values | combine(openstack_helm_neutron_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: neutron
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: neutron
+              version: 0.2.14
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-neutron
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-neutron-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-neutron-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
@@ -38,50 +82,53 @@
     openstack_helm_ingress_service_port: 9696
     openstack_helm_ingress_annotations: "{{ openstack_helm_neutron_ingress_annotations }}"
 
-- name: Wait until network service ready
-  kubernetes.core.k8s_info:
-    api_version: apps/v1
-    kind: Deployment
-    name: neutron-server
-    namespace: openstack
-    wait_sleep: 10
-    wait_timeout: 600
-    wait: true
-    wait_condition:
-      type: Available
-      status: true
-
 - name: Create networks
-  openstack.cloud.network:
-    cloud: atmosphere
-    # Network settings
-    name: "{{ item.name }}"
-    external: "{{ item.external | default(omit) }}"
-    shared: "{{ item.shared | default(omit) }}"
-    mtu_size: "{{ item.mtu_size | default(omit) }}"
-    port_security_enabled: "{{ item.port_security_enabled | default(omit) }}"
-    provider_network_type: "{{ item.provider_network_type | default(omit) }}"
-    provider_physical_network: "{{ item.provider_physical_network | default(omit) }}"
-    provider_segmentation_id: "{{ item.provider_segmentation_id | default(omit) }}"
-  loop: "{{ openstack_helm_neutron_networks }}"
+  when: openstack_helm_neutron_networks | length > 0
+  block:
+    - name: Wait until network service ready
+      kubernetes.core.k8s_info:
+        api_version: apps/v1
+        kind: Deployment
+        name: neutron-server
+        namespace: openstack
+        wait_sleep: 10
+        wait_timeout: 600
+        wait: true
+        wait_condition:
+          type: Available
+          status: true
 
-- name: Create subnets
-  openstack.cloud.subnet:
-    cloud: atmosphere
-    # Subnet settings
-    network_name: "{{ item.0.name }}"
-    name: "{{ item.1.name }}"
-    ip_version: "{{ item.1.ip_version | default(omit) }}"
-    cidr: "{{ item.1.cidr | default(omit) }}"
-    gateway_ip: "{{ item.1.gateway_ip | default(omit) }}"
-    no_gateway_ip: "{{ item.1.no_gateway_ip | default(omit) }}"
-    allocation_pool_start: "{{ item.1.allocation_pool_start | default(omit) }}"
-    allocation_pool_end: "{{ item.1.allocation_pool_end | default(omit) }}"
-    dns_nameservers: "{{ item.1.dns_nameservers | default(omit) }}"
-    enable_dhcp: "{{ item.1.enable_dhcp | default(omit) }}"
-    host_routes: "{{ item.1.host_routes | default(omit) }}"
-    ipv6_address_mode: "{{ item.1.ipv6_address_mode | default(omit) }}"
-    ipv6_ra_mode: "{{ item.1.ipv6_ra_mode | default(omit) }}"
-  with_subelements:
-    - "{{ openstack_helm_neutron_networks }}"
-    - subnets
+    - name: Create networks
+      openstack.cloud.network:
+        cloud: atmosphere
+        # Network settings
+        name: "{{ item.name }}"
+        external: "{{ item.external | default(omit) }}"
+        shared: "{{ item.shared | default(omit) }}"
+        mtu_size: "{{ item.mtu_size | default(omit) }}"
+        port_security_enabled: "{{ item.port_security_enabled | default(omit) }}"
+        provider_network_type: "{{ item.provider_network_type | default(omit) }}"
+        provider_physical_network: "{{ item.provider_physical_network | default(omit) }}"
+        provider_segmentation_id: "{{ item.provider_segmentation_id | default(omit) }}"
+      loop: "{{ openstack_helm_neutron_networks }}"
+
+    - name: Create subnets
+      openstack.cloud.subnet:
+        cloud: atmosphere
+        # Subnet settings
+        network_name: "{{ item.0.name }}"
+        name: "{{ item.1.name }}"
+        ip_version: "{{ item.1.ip_version | default(omit) }}"
+        cidr: "{{ item.1.cidr | default(omit) }}"
+        gateway_ip: "{{ item.1.gateway_ip | default(omit) }}"
+        no_gateway_ip: "{{ item.1.no_gateway_ip | default(omit) }}"
+        allocation_pool_start: "{{ item.1.allocation_pool_start | default(omit) }}"
+        allocation_pool_end: "{{ item.1.allocation_pool_end | default(omit) }}"
+        dns_nameservers: "{{ item.1.dns_nameservers | default(omit) }}"
+        enable_dhcp: "{{ item.1.enable_dhcp | default(omit) }}"
+        host_routes: "{{ item.1.host_routes | default(omit) }}"
+        ipv6_address_mode: "{{ item.1.ipv6_address_mode | default(omit) }}"
+        ipv6_ra_mode: "{{ item.1.ipv6_ra_mode | default(omit) }}"
+      with_subelements:
+        - "{{ openstack_helm_neutron_networks }}"
+        - subnets
diff --git a/roles/openstack_helm_nova/defaults/main.yml b/roles/openstack_helm_nova/defaults/main.yml
index a683ea5..4a81d5a 100644
--- a/roles/openstack_helm_nova/defaults/main.yml
+++ b/roles/openstack_helm_nova/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_nova_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_nova_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_nova_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_nova_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_nova_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_nova_chart_name: nova
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_nova_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_nova/meta/main.yml b/roles/openstack_helm_nova/meta/main.yml
index 9d2a79f..0f9ae3f 100644
--- a/roles/openstack_helm_nova/meta/main.yml
+++ b/roles/openstack_helm_nova/meta/main.yml
@@ -23,8 +23,4 @@
         - focal
 
 dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_nova_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_nova_chart_repo_url }}"
-  - openstacksdk
+  - role: openstacksdk
diff --git a/roles/openstack_helm_nova/tasks/main.yml b/roles/openstack_helm_nova/tasks/main.yml
index 4ed0fe8..1598006 100644
--- a/roles/openstack_helm_nova/tasks/main.yml
+++ b/roles/openstack_helm_nova/tasks/main.yml
@@ -16,41 +16,9 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_nova_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_nova_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_nova_chart_name }}"
-
-- name: Generate Helm values comparison
-  ansible.builtin.include_role:
-    name: helm_diff
-  vars:
-    helm_diff_release_name: "{{ openstack_helm_nova_chart_name }}"
-    helm_diff_release_namespace: openstack
-    helm_diff_values: "{{ _openstack_helm_nova_values }}"
-  when:
-    - openstack_helm_nova_diff | bool
-
-- name: Migrate database from MariaDB to Percona XtraDB Cluster
-  ansible.builtin.include_role:
-    name: openstack_helm_migrate_to_percona_xtradb_cluster
-  vars:
-    openstack_helm_migrate_to_percona_xtradb_cluster_release_name: "{{ openstack_helm_nova_chart_name }}"
-    openstack_helm_migrate_to_percona_xtradb_cluster_release_namespace: openstack
-    openstack_helm_migrate_to_percona_xtradb_cluster_databases:
-      - nova
-      - nova_api
-      - nova_cell0
-    openstack_helm_migrate_to_percona_xtradb_cluster_services:
-      - kind: Deployment
-        name: nova-api-metadata
-      - kind: Deployment
-        name: nova-api-osapi
-      - kind: Deployment
-        name: nova-conductor
-      - kind: Deployment
-        name: nova-scheduler
-  when:
-    - openstack_helm_nova_migrate_from_mariadb | bool
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: nova
 
 - name: Generate public key for SSH private key
   become: false
@@ -69,6 +37,7 @@
       ansible.builtin.copy:
         dest: "{{ _nova_ssh_key_tempfile.path }}"
         content: "{{ openstack_helm_nova_ssh_key }}\n"
+        mode: "0600"
     - name: Generate public key for SSH private key
       changed_when: false
       community.crypto.openssh_keypair:
@@ -83,16 +52,65 @@
         state: absent
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_nova_chart_name }}"
-    chart_ref: "{{ openstack_helm_nova_chart_repo_name }}/{{ openstack_helm_nova_chart_name }}"
-    chart_version: 0.2.32
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_nova_values }}"
-    # NOTE(mnaser): This is a a workaround due to the fact that Nova's online
-    #               data migrations take forever.
-    timeout: 10m
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-nova
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_nova_values | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: nova
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: nova
+              version: 0.2.32
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-nova
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db_api.auth.admin.password
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db_cell0.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-nova-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-nova-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
@@ -112,31 +130,34 @@
     openstack_helm_ingress_service_port: 6080
     openstack_helm_ingress_annotations: "{{ openstack_helm_nova_ingress_annotations }}"
 
-- name: Wait until compute api service ready
-  kubernetes.core.k8s_info:
-    api_version: apps/v1
-    kind: Deployment
-    name: nova-api-osapi
-    namespace: openstack
-    wait_sleep: 10
-    wait_timeout: 600
-    wait: true
-    wait_condition:
-      type: Available
-      status: true
-
 - name: Create flavors
-  openstack.cloud.compute_flavor:
-    cloud: atmosphere
-    # Flavor settings
-    flavorid: "{{ item.flavorid | default(omit) }}"
-    name: "{{ item.name }}"
-    vcpus: "{{ item.vcpus }}"
-    ram: "{{ item.ram }}"
-    disk: "{{ item.disk | default(omit) }}"
-    ephemeral: "{{ item.ephemeral | default(omit) }}"
-    swap: "{{ item.swap | default(omit) }}"
-    is_public: "{{ item.is_public | default(omit) }}"
-    rxtx_factor: "{{ item.rxtx_factor | default(omit) }}"
-    extra_specs: "{{ item.extra_specs | default(omit) }}"
-  loop: "{{ openstack_helm_nova_flavors }}"
+  when: openstack_helm_nova_flavors | length > 0
+  block:
+    - name: Wait until compute api service ready
+      kubernetes.core.k8s_info:
+        api_version: apps/v1
+        kind: Deployment
+        name: nova-api-osapi
+        namespace: openstack
+        wait_sleep: 10
+        wait_timeout: 600
+        wait: true
+        wait_condition:
+          type: Available
+          status: true
+
+    - name: Create flavors
+      openstack.cloud.compute_flavor:
+        cloud: atmosphere
+        # Flavor settings
+        flavorid: "{{ item.flavorid | default(omit) }}"
+        name: "{{ item.name }}"
+        vcpus: "{{ item.vcpus }}"
+        ram: "{{ item.ram }}"
+        disk: "{{ item.disk | default(omit) }}"
+        ephemeral: "{{ item.ephemeral | default(omit) }}"
+        swap: "{{ item.swap | default(omit) }}"
+        is_public: "{{ item.is_public | default(omit) }}"
+        rxtx_factor: "{{ item.rxtx_factor | default(omit) }}"
+        extra_specs: "{{ item.extra_specs | default(omit) }}"
+      loop: "{{ openstack_helm_nova_flavors }}"
diff --git a/roles/openstack_helm_placement/defaults/main.yml b/roles/openstack_helm_placement/defaults/main.yml
index f6f6c5a..8436b2c 100644
--- a/roles/openstack_helm_placement/defaults/main.yml
+++ b/roles/openstack_helm_placement/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_placement_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_placement_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_placement_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_placement_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_placement_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_placement_chart_name: placement
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_placement_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_placement/meta/main.yml b/roles/openstack_helm_placement/meta/main.yml
index 83f5e78..7b99251 100644
--- a/roles/openstack_helm_placement/meta/main.yml
+++ b/roles/openstack_helm_placement/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_placement_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_placement_chart_repo_url }}"
diff --git a/roles/openstack_helm_placement/tasks/main.yml b/roles/openstack_helm_placement/tasks/main.yml
index 9bd589a..88208e7 100644
--- a/roles/openstack_helm_placement/tasks/main.yml
+++ b/roles/openstack_helm_placement/tasks/main.yml
@@ -16,18 +16,54 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_placement_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_placement_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_placement_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: placement
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_placement_chart_name }}"
-    chart_ref: "{{ openstack_helm_placement_chart_repo_name }}/{{ openstack_helm_placement_chart_name }}"
-    chart_version: 0.2.5
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_placement_values | combine(openstack_helm_placement_values, recursive=True) }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-placement
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_placement_values | combine(openstack_helm_placement_values, recursive=True) | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: placement
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: placement
+              version: 0.2.10
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-placement
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
diff --git a/roles/openstack_helm_senlin/defaults/main.yml b/roles/openstack_helm_senlin/defaults/main.yml
index bfe689b..770364b 100644
--- a/roles/openstack_helm_senlin/defaults/main.yml
+++ b/roles/openstack_helm_senlin/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_senlin_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_senlin_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_senlin_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_senlin_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_senlin_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_senlin_chart_name: senlin
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_senlin_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_senlin/meta/main.yml b/roles/openstack_helm_senlin/meta/main.yml
index 77da952..5a0c03c 100644
--- a/roles/openstack_helm_senlin/meta/main.yml
+++ b/roles/openstack_helm_senlin/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_senlin_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_senlin_chart_repo_url }}"
diff --git a/roles/openstack_helm_senlin/tasks/main.yml b/roles/openstack_helm_senlin/tasks/main.yml
index d9a1b3d..0a957dd 100644
--- a/roles/openstack_helm_senlin/tasks/main.yml
+++ b/roles/openstack_helm_senlin/tasks/main.yml
@@ -16,48 +16,62 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_senlin_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_senlin_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_senlin_chart_name }}"
-
-- name: Generate Helm values comparison
-  ansible.builtin.include_role:
-    name: helm_diff
-  vars:
-    helm_diff_release_name: "{{ openstack_helm_senlin_chart_name }}"
-    helm_diff_release_namespace: openstack
-    helm_diff_values: "{{ _openstack_helm_senlin_values }}"
-  when:
-    - openstack_helm_senlin_diff | bool
-
-- name: Migrate database from MariaDB to Percona XtraDB Cluster
-  ansible.builtin.include_role:
-    name: openstack_helm_migrate_to_percona_xtradb_cluster
-  vars:
-    openstack_helm_migrate_to_percona_xtradb_cluster_release_name: "{{ openstack_helm_senlin_chart_name }}"
-    openstack_helm_migrate_to_percona_xtradb_cluster_release_namespace: openstack
-    openstack_helm_migrate_to_percona_xtradb_cluster_databases:
-      - senlin
-    openstack_helm_migrate_to_percona_xtradb_cluster_services:
-      - kind: Deployment
-        name: senlin-api
-      - kind: Deployment
-        name: senlin-conductor
-      - kind: Deployment
-        name: senlin-engine
-      - kind: Deployment
-        name: senlin-health-manager
-  when:
-    - openstack_helm_senlin_migrate_from_mariadb | bool
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: senlin
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: "{{ openstack_helm_senlin_chart_name }}"
-    chart_ref: "{{ openstack_helm_senlin_chart_repo_name }}/{{ openstack_helm_senlin_chart_name }}"
-    chart_version: 0.2.6
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values: "{{ _openstack_helm_senlin_values }}"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: openstack-helm
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://tarballs.opendev.org/openstack/openstack-helm/
+
+      - apiVersion: v1
+        kind: Secret
+        metadata:
+          name: atmosphere-senlin
+          namespace: openstack
+        stringData:
+          values.yaml: "{{ _openstack_helm_senlin_values | to_nice_yaml }}"
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: senlin
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: senlin
+              version: 0.2.6
+              sourceRef:
+                kind: HelmRepository
+                name: openstack-helm
+          install:
+            disableWait: true
+          valuesFrom:
+            - kind: Secret
+              name: atmosphere-senlin
+            - kind: Secret
+              name: percona-xtradb
+              valuesKey: root
+              targetPath: endpoints.oslo_db.auth.admin.password
+            - kind: Secret
+              name: rabbitmq-senlin-default-user
+              valuesKey: username
+              targetPath: endpoints.oslo_messaging.auth.admin.username
+            - kind: Secret
+              name: rabbitmq-senlin-default-user
+              valuesKey: password
+              targetPath: endpoints.oslo_messaging.auth.admin.password
 
 - name: Create Ingress
   ansible.builtin.include_role:
diff --git a/roles/openstack_helm_tempest/defaults/main.yml b/roles/openstack_helm_tempest/defaults/main.yml
index 6ccf3b3..944faa0 100644
--- a/roles/openstack_helm_tempest/defaults/main.yml
+++ b/roles/openstack_helm_tempest/defaults/main.yml
@@ -11,24 +11,6 @@
 #    :local:
 
 
-# .. envvar:: openstack_helm_tempest_chart_repo_name [[[
-#
-# Helm repository name for the chart.
-openstack_helm_tempest_chart_repo_name: openstack-helm
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_tempest_chart_repo_url [[[
-#
-# Helm repository URL for the chart.
-openstack_helm_tempest_chart_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
-
-                                                                   # ]]]
-# .. envvar:: openstack_helm_tempest_chart_name [[[
-#
-# Helm chart name (will also be used for release name)
-openstack_helm_tempest_chart_name: tempest
-
-                                                                   # ]]]
 # .. envvar:: openstack_helm_tempest_image_repository [[[
 #
 # Image repository location to be prefixed for all images
diff --git a/roles/openstack_helm_tempest/meta/main.yml b/roles/openstack_helm_tempest/meta/main.yml
index 53ed300..09e2ee9 100644
--- a/roles/openstack_helm_tempest/meta/main.yml
+++ b/roles/openstack_helm_tempest/meta/main.yml
@@ -23,8 +23,4 @@
         - focal
 
 dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: "{{ openstack_helm_tempest_chart_repo_name }}"
-      helm_repository_repo_url: "{{ openstack_helm_tempest_chart_repo_url }}"
-  - openstacksdk
+  - role: openstacksdk
diff --git a/roles/openstack_helm_tempest/tasks/main.yml b/roles/openstack_helm_tempest/tasks/main.yml
index b0c5033..1237bee 100644
--- a/roles/openstack_helm_tempest/tasks/main.yml
+++ b/roles/openstack_helm_tempest/tasks/main.yml
@@ -16,9 +16,9 @@
   ansible.builtin.include_role:
     name: openstack_helm_endpoints
   vars:
-    openstack_helm_endpoints_repo_name: "{{ openstack_helm_tempest_chart_repo_name }}"
-    openstack_helm_endpoints_repo_url: "{{ openstack_helm_tempest_chart_repo_url }}"
-    openstack_helm_endpoints_chart: "{{ openstack_helm_tempest_chart_name }}"
+    openstack_helm_endpoints_repo_name: openstack-helm
+    openstack_helm_endpoints_repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+    openstack_helm_endpoints_chart: tempest
 
 - name: Configure tempest
   block:
@@ -79,17 +79,22 @@
         - openstack_helm_tempest_values.conf.tempest.network.public_network_id is not defined
         - _openstack_helm_tempest_test_network.openstack_networks[0].id is defined
 
+- name: Configure Helm repository
+  kubernetes.core.helm_repository:
+    name: openstack-helm
+    repo_url: https://tarballs.opendev.org/openstack/openstack-helm/
+
 - name: Deploy Helm chart
+  failed_when: false
   kubernetes.core.helm:
-    name: "{{ openstack_helm_tempest_chart_name }}"
-    chart_ref: "{{ openstack_helm_tempest_chart_repo_name }}/{{ openstack_helm_tempest_chart_name }}"
+    name: tempest
+    chart_ref: openstack-helm/tempest
     chart_version: 0.2.3
     release_namespace: openstack
     kubeconfig: /etc/kubernetes/admin.conf
     wait: true
     wait_timeout: 20m
     values: "{{ _openstack_helm_tempest_values | combine(openstack_helm_tempest_values, recursive=True) }}"
-  ignore_errors: true
 
 - name: Get tempest job object
   kubernetes.core.k8s_info:
diff --git a/roles/openstacksdk/tasks/main.yml b/roles/openstacksdk/tasks/main.yml
index 370ed94..f444d9d 100644
--- a/roles/openstacksdk/tasks/main.yml
+++ b/roles/openstacksdk/tasks/main.yml
@@ -29,6 +29,7 @@
     state: directory
     owner: root
     group: root
+    mode: "0600"
 
 - name: Generate cloud config file
   become: true
diff --git a/roles/percona_xtradb_cluster/meta/main.yml b/roles/percona_xtradb_cluster/meta/main.yml
index 792c6b1..0a13f94 100644
--- a/roles/percona_xtradb_cluster/meta/main.yml
+++ b/roles/percona_xtradb_cluster/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: percona
-      helm_repository_repo_url: https://percona.github.io/percona-helm-charts/
diff --git a/roles/percona_xtradb_cluster/tasks/main.yml b/roles/percona_xtradb_cluster/tasks/main.yml
index f92c767..6aa01fd 100644
--- a/roles/percona_xtradb_cluster/tasks/main.yml
+++ b/roles/percona_xtradb_cluster/tasks/main.yml
@@ -12,173 +12,190 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-- name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: pxc-operator
-    chart_ref: percona/pxc-operator
-    chart_version: 1.10.0
-    release_namespace: openstack
-    create_namespace: true
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      nodeSelector:
-        openstack-control-plane: enabled
+- name: Deploy operator
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: percona
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://percona.github.io/percona-helm-charts/
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: pxc-operator
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: pxc-operator
+              version: 1.10.0
+              sourceRef:
+                kind: HelmRepository
+                name: percona
+          install:
+            crds: CreateReplace
+          upgrade:
+            crds: CreateReplace
+          values:
+            nodeSelector:
+              openstack-control-plane: enabled
 
 - name: Deploy cluster
   kubernetes.core.k8s:
     state: present
     definition:
-      apiVersion: pxc.percona.com/v1-10-0
-      kind: PerconaXtraDBCluster
-      metadata:
-        name: percona-xtradb
-        namespace: openstack
-      spec:
-        crVersion: 1.10.0
-        secretsName: percona-xtradb
-        pxc:
-          size: 3
-          # NOTE(mnaser): https://jira.percona.com/browse/PXC-3914
-          image: us-docker.pkg.dev/vexxhost-infra/openstack/percona-xtradb-cluster:5.7.36-31.55-socatfix
-          autoRecovery: true
-          configuration: |
-            [mysqld]
-            max_connections=8192
-          sidecars:
-            - name: exporter
-              image: quay.io/prometheus/mysqld-exporter:v0.14.0
-              ports:
-                - name: metrics
-                  containerPort: 9104
-              livenessProbe:
-                httpGet:
-                  path: /
-                  port: 9104
-              env:
-                - name: MONITOR_PASSWORD
-                  valueFrom:
-                    secretKeyRef:
-                      name: percona-xtradb
-                      key: monitor
-                - name: DATA_SOURCE_NAME
-                  value: "monitor:$(MONITOR_PASSWORD)@(localhost:3306)/"
-          nodeSelector:
-            openstack-control-plane: enabled
-          volumeSpec:
-            persistentVolumeClaim:
-              resources:
-                requests:
-                  storage: 160Gi
-        haproxy:
-          enabled: true
-          size: 3
-          image: percona/percona-xtradb-cluster-operator:1.10.0-haproxy
-          nodeSelector:
-            openstack-control-plane: enabled
-    wait: true
-    wait_timeout: 600
-    wait_condition:
-      type: "ready"
-      status: "True"
+      - apiVersion: pxc.percona.com/v1-10-0
+        kind: PerconaXtraDBCluster
+        metadata:
+          name: percona-xtradb
+          namespace: openstack
+        spec:
+          crVersion: 1.10.0
+          secretsName: percona-xtradb
+          pxc:
+            size: 3
+            # NOTE(mnaser): https://jira.percona.com/browse/PXC-3914
+            image: us-docker.pkg.dev/vexxhost-infra/openstack/percona-xtradb-cluster:5.7.36-31.55-socatfix
+            autoRecovery: true
+            configuration: |
+              [mysqld]
+              max_connections=8192
+            sidecars:
+              - name: exporter
+                image: quay.io/prometheus/mysqld-exporter:v0.14.0
+                ports:
+                  - name: metrics
+                    containerPort: 9104
+                livenessProbe:
+                  httpGet:
+                    path: /
+                    port: 9104
+                env:
+                  - name: MONITOR_PASSWORD
+                    valueFrom:
+                      secretKeyRef:
+                        name: percona-xtradb
+                        key: monitor
+                  - name: DATA_SOURCE_NAME
+                    value: "monitor:$(MONITOR_PASSWORD)@(localhost:3306)/"
+            nodeSelector:
+              openstack-control-plane: enabled
+            volumeSpec:
+              persistentVolumeClaim:
+                resources:
+                  requests:
+                    storage: 160Gi
+          haproxy:
+            enabled: true
+            size: 3
+            image: percona/percona-xtradb-cluster-operator:1.10.0-haproxy
+            nodeSelector:
+              openstack-control-plane: enabled
 
-- name: Create PodMonitor
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PodMonitor
-      metadata:
-        name: percona-xtradb-pxc
-        namespace: monitoring
-        labels:
-          release: kube-prometheus-stack
-      spec:
-        jobLabel: app.kubernetes.io/component
-        podMetricsEndpoints:
-          - port: metrics
-            path: /metrics
-            relabelings:
-              - sourceLabels: ["__meta_kubernetes_pod_name"]
-                targetLabel: "instance"
-              - action: "labeldrop"
-                regex: "^(container|endpoint|namespace|pod|service)$"
-        namespaceSelector:
-          matchNames:
-            - openstack
-        selector:
-          matchLabels:
-            app.kubernetes.io/component: pxc
-            app.kubernetes.io/instance: percona-xtradb
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PodMonitor
+        metadata:
+          name: percona-xtradb-pxc
+          namespace: monitoring
+          labels:
+            release: kube-prometheus-stack
+        spec:
+          jobLabel: app.kubernetes.io/component
+          podMetricsEndpoints:
+            - port: metrics
+              path: /metrics
+              relabelings:
+                - sourceLabels: ["__meta_kubernetes_pod_name"]
+                  targetLabel: "instance"
+                - action: "labeldrop"
+                  regex: "^(container|endpoint|namespace|pod|service)$"
+          namespaceSelector:
+            matchNames:
+              - openstack
+          selector:
+            matchLabels:
+              app.kubernetes.io/component: pxc
+              app.kubernetes.io/instance: percona-xtradb
 
-- name: Create PrometheusRule
-  kubernetes.core.k8s:
-    state: present
-    definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PrometheusRule
-      metadata:
-        name: percona-xtradb-pxc
-        namespace: monitoring
-        labels:
-          release: kube-prometheus-stack
-      spec:
-        groups:
-          # TODO: basic rules
-          - name: general
-            rules:
-              - alert: MySQLDown
-                expr: mysql_up != 1
-                for: 5m
-                labels:
-                  severity: critical
-              - alert: MysqlTooManyConnections
-                expr: max_over_time(mysql_global_status_threads_connected[1m]) / mysql_global_variables_max_connections * 100 > 80
-                for: 2m
-                labels:
-                  severity: warning
-              - alert: MysqlHighThreadsRunning
-                expr: max_over_time(mysql_global_status_threads_running[1m]) / mysql_global_variables_max_connections * 100 > 60
-                for: 2m
-                labels:
-                  severity: warning
-              - alert: MysqlSlowQueries
-                expr: increase(mysql_global_status_slow_queries[1m]) > 0
-                for: 2m
-                labels:
-                  severity: warning
-          - name: galera
-            rules:
-              - alert: MySQLGaleraNotReady
-                expr: mysql_global_status_wsrep_ready != 1
-                for: 5m
-                labels:
-                  severity: critical
-              - alert: MySQLGaleraOutOfSync
-                expr: mysql_global_status_wsrep_local_state != 4 and mysql_global_variables_wsrep_desync == 0
-                for: 5m
-                labels:
-                  severity: critical
-              - alert: MySQLGaleraDonorFallingBehind
-                expr: mysql_global_status_wsrep_local_state == 2 and mysql_global_status_wsrep_local_recv_queue > 100
-                for: 5m
-                labels:
-                  severity: warning
-              - alert: MySQLReplicationNotRunning
-                expr: mysql_slave_status_slave_io_running == 0 or mysql_slave_status_slave_sql_running == 0
-                for: 2m
-                labels:
-                  severity: critical
-              - alert: MySQLReplicationLag
-                expr: (instance:mysql_slave_lag_seconds > 30) and on(instance) (predict_linear(instance:mysql_slave_lag_seconds[5m], 60 * 2) > 0)
-                for: 1m
-                labels:
-                  severity: critical
-              - alert: MySQLHeartbeatLag
-                expr: (instance:mysql_heartbeat_lag_seconds > 30) and on(instance) (predict_linear(instance:mysql_heartbeat_lag_seconds[5m], 60 * 2) > 0)
-                for: 1m
-                labels:
-                  severity: critical
-              - alert: MySQLInnoDBLogWaits
-                expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10
-                labels:
-                  severity: warning
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PrometheusRule
+        metadata:
+          name: percona-xtradb-pxc
+          namespace: monitoring
+          labels:
+            release: kube-prometheus-stack
+        spec:
+          groups:
+            # TODO: basic rules
+            - name: general
+              rules:
+                - alert: MySQLDown
+                  expr: mysql_up != 1
+                  for: 5m
+                  labels:
+                    severity: critical
+                - alert: MysqlTooManyConnections
+                  expr: max_over_time(mysql_global_status_threads_connected[1m]) / mysql_global_variables_max_connections * 100 > 80
+                  for: 2m
+                  labels:
+                    severity: warning
+                - alert: MysqlHighThreadsRunning
+                  expr: max_over_time(mysql_global_status_threads_running[1m]) / mysql_global_variables_max_connections * 100 > 60
+                  for: 2m
+                  labels:
+                    severity: warning
+                - alert: MysqlSlowQueries
+                  expr: increase(mysql_global_status_slow_queries[1m]) > 0
+                  for: 2m
+                  labels:
+                    severity: warning
+            - name: galera
+              rules:
+                - alert: MySQLGaleraNotReady
+                  expr: mysql_global_status_wsrep_ready != 1
+                  for: 5m
+                  labels:
+                    severity: critical
+                - alert: MySQLGaleraOutOfSync
+                  expr: mysql_global_status_wsrep_local_state != 4 and mysql_global_variables_wsrep_desync == 0
+                  for: 5m
+                  labels:
+                    severity: critical
+                - alert: MySQLGaleraDonorFallingBehind
+                  expr: mysql_global_status_wsrep_local_state == 2 and mysql_global_status_wsrep_local_recv_queue > 100
+                  for: 5m
+                  labels:
+                    severity: warning
+                - alert: MySQLReplicationNotRunning
+                  expr: mysql_slave_status_slave_io_running == 0 or mysql_slave_status_slave_sql_running == 0
+                  for: 2m
+                  labels:
+                    severity: critical
+                - alert: MySQLReplicationLag
+                  expr: (instance:mysql_slave_lag_seconds > 30) and on(instance) (predict_linear(instance:mysql_slave_lag_seconds[5m], 60 * 2) > 0)
+                  for: 1m
+                  labels:
+                    severity: critical
+                - alert: MySQLHeartbeatLag
+                  expr: (instance:mysql_heartbeat_lag_seconds > 30) and on(instance) (predict_linear(instance:mysql_heartbeat_lag_seconds[5m], 60 * 2) > 0)
+                  for: 1m
+                  labels:
+                    severity: critical
+                - alert: MySQLInnoDBLogWaits
+                  expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10
+                  labels:
+                    severity: warning
+  # NOTE(mnaser): Since we haven't moved to the operator pattern yet, we need to
+  #               keep retrying a few times as the CRDs might not be installed
+  #               yet.
+  retries: 60
+  delay: 5
+  register: _result
+  until: _result is not failed
diff --git a/roles/prometheus_pushgateway/meta/main.yml b/roles/prometheus_pushgateway/meta/main.yml
index cadc6e9..0a3a806 100644
--- a/roles/prometheus_pushgateway/meta/main.yml
+++ b/roles/prometheus_pushgateway/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: prometheus-community
-      helm_repository_repo_url: https://prometheus-community.github.io/helm-charts
diff --git a/roles/prometheus_pushgateway/tasks/main.yml b/roles/prometheus_pushgateway/tasks/main.yml
index 5042914..20a1b63 100644
--- a/roles/prometheus_pushgateway/tasks/main.yml
+++ b/roles/prometheus_pushgateway/tasks/main.yml
@@ -13,22 +13,42 @@
 # under the License.
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: prometheus-pushgateway
-    chart_ref: prometheus-community/prometheus-pushgateway
-    chart_version: 1.16.0
-    release_namespace: monitoring
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      nodeSelector:
-        openstack-control-plane: enabled
-      serviceMonitor:
-        enabled: true
-        namespace: monitoring
-        additionalLabels:
-          release: kube-prometheus-stack
-        relabelings:
-          - sourceLabels: ["__meta_kubernetes_pod_name"]
-            targetLabel: "instance"
-          - regex: "^(container|endpoint|namespace|pod|service)$"
-            action: "labeldrop"
+  kubernetes.core.k8s:
+    state: present
+    definition:
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: prometheus-community
+          namespace: monitoring
+        spec:
+          interval: 60s
+          url: https://prometheus-community.github.io/helm-charts
+
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: prometheus-pushgateway
+          namespace: monitoring
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: prometheus-pushgateway
+              version: 1.16.0
+              sourceRef:
+                kind: HelmRepository
+                name: prometheus-community
+          values:
+            nodeSelector:
+              openstack-control-plane: enabled
+            serviceMonitor:
+              enabled: true
+              namespace: monitoring
+              additionalLabels:
+                release: kube-prometheus-stack
+              relabelings:
+                - sourceLabels: ["__meta_kubernetes_pod_name"]
+                  targetLabel: "instance"
+                - regex: "^(container|endpoint|namespace|pod|service)$"
+                  action: "labeldrop"
diff --git a/roles/rabbitmq/meta/main.yml b/roles/rabbitmq/meta/main.yml
index da0f4ee..dbe22a6 100644
--- a/roles/rabbitmq/meta/main.yml
+++ b/roles/rabbitmq/meta/main.yml
@@ -23,4 +23,4 @@
         - focal
 
 dependencies:
-  - rabbitmq_operator
+  - role: rabbitmq_operator
diff --git a/roles/rabbitmq/tasks/main.yml b/roles/rabbitmq/tasks/main.yml
index 1ff2f81..8e51bfe 100644
--- a/roles/rabbitmq/tasks/main.yml
+++ b/roles/rabbitmq/tasks/main.yml
@@ -43,3 +43,10 @@
     wait_condition:
       type: ClusterAvailable
       status: "True"
+  # NOTE(mnaser): Since we haven't moved to the operator pattern yet, we need to
+  #               keep retrying a few times as the CRDs might not be installed
+  #               yet.
+  retries: 60
+  delay: 5
+  register: _result
+  until: _result is not failed
diff --git a/roles/rabbitmq_operator/meta/main.yml b/roles/rabbitmq_operator/meta/main.yml
index 4ff64d0..1b188e7 100644
--- a/roles/rabbitmq_operator/meta/main.yml
+++ b/roles/rabbitmq_operator/meta/main.yml
@@ -21,9 +21,3 @@
     - name: Ubuntu
       versions:
         - focal
-
-dependencies:
-  - role: helm_repository
-    vars:
-      helm_repository_name: bitnami
-      helm_repository_repo_url: https://charts.bitnami.com/bitnami
diff --git a/roles/rabbitmq_operator/tasks/main.yml b/roles/rabbitmq_operator/tasks/main.yml
index 49d2e8b..175bead 100644
--- a/roles/rabbitmq_operator/tasks/main.yml
+++ b/roles/rabbitmq_operator/tasks/main.yml
@@ -13,107 +13,134 @@
 # under the License.
 
 - name: Deploy Helm chart
-  kubernetes.core.helm:
-    name: rabbitmq-cluster-operator
-    chart_ref: bitnami/rabbitmq-cluster-operator
-    chart_version: 2.5.2
-    release_namespace: openstack
-    kubeconfig: /etc/kubernetes/admin.conf
-    values:
-      rabbitmqImage:
-        repository: library/rabbitmq
-        tag: 3.10.2-management
-      credentialUpdaterImage:
-        repository: rabbitmqoperator/default-user-credential-updater
-        tag: 1.0.2
-      clusterOperator:
-        image:
-          repository: rabbitmqoperator/cluster-operator
-          tag: 1.13.1
-        fullnameOverride: rabbitmq-cluster-operator
-        nodeSelector:
-          openstack-control-plane: enabled
-      msgTopologyOperator:
-        image:
-          repository: rabbitmqoperator/messaging-topology-operator
-          tag: 1.6.0
-        fullnameOverride: rabbitmq-messaging-topology-operator
-        nodeSelector:
-          openstack-control-plane: enabled
-      useCertManager: true
-
-- name: Create PodMonitor
   kubernetes.core.k8s:
     state: present
     definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PodMonitor
-      metadata:
-        name: rabbitmq
-        namespace: monitoring
-        labels:
-          release: kube-prometheus-stack
-      spec:
-        jobLabel: app.kubernetes.io/component
-        podMetricsEndpoints:
-          - port: prometheus
-            path: /metrics
-            relabelings:
-              - sourceLabels: ["__meta_kubernetes_pod_name"]
-                targetLabel: "instance"
-              - action: "labeldrop"
-                regex: "^(container|endpoint|namespace|pod|service)$"
-        namespaceSelector:
-          matchNames:
-            - openstack
-        selector:
-          matchLabels:
-            app.kubernetes.io/component: rabbitmq
+      - apiVersion: source.toolkit.fluxcd.io/v1beta2
+        kind: HelmRepository
+        metadata:
+          name: bitnami
+          namespace: openstack
+        spec:
+          interval: 60s
+          url: https://charts.bitnami.com/bitnami
 
-- name: Create PrometheusRule
+      - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+        kind: HelmRelease
+        metadata:
+          name: rabbitmq-cluster-operator
+          namespace: openstack
+        spec:
+          interval: 60s
+          chart:
+            spec:
+              chart: rabbitmq-cluster-operator
+              version: 2.5.2
+              sourceRef:
+                kind: HelmRepository
+                name: bitnami
+          install:
+            crds: CreateReplace
+          upgrade:
+            crds: CreateReplace
+          values:
+            rabbitmqImage:
+              repository: library/rabbitmq
+              tag: 3.10.2-management
+            credentialUpdaterImage:
+              repository: rabbitmqoperator/default-user-credential-updater
+              tag: 1.0.2
+            clusterOperator:
+              image:
+                repository: rabbitmqoperator/cluster-operator
+                tag: 1.13.1
+              fullnameOverride: rabbitmq-cluster-operator
+              nodeSelector:
+                openstack-control-plane: enabled
+            msgTopologyOperator:
+              image:
+                repository: rabbitmqoperator/messaging-topology-operator
+                tag: 1.6.0
+              fullnameOverride: rabbitmq-messaging-topology-operator
+              nodeSelector:
+                openstack-control-plane: enabled
+            useCertManager: true
+
+- name: Deploy monitoring for RabbitMQ
   kubernetes.core.k8s:
     state: present
     definition:
-      apiVersion: monitoring.coreos.com/v1
-      kind: PrometheusRule
-      metadata:
-        name: rabbitmq
-        namespace: monitoring
-        labels:
-          release: kube-prometheus-stack
-      spec:
-        groups:
-          - name: limits
-            rules:
-              - alert: RabbitmqMemoryHigh
-                expr: rabbitmq_process_resident_memory_bytes / rabbitmq_resident_memory_limit_bytes > 0.80
-                labels:
-                  severity: warning
-              - alert: RabbitmqMemoryHigh
-                expr: rabbitmq_process_resident_memory_bytes / rabbitmq_resident_memory_limit_bytes > 0.95
-                labels:
-                  severity: critical
-              - alert: RabbitmqFileDescriptorsUsage
-                expr: rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.80
-                labels:
-                  severity: warning
-              - alert: RabbitmqFileDescriptorsUsage
-                expr: rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.95
-                labels:
-                  severity: critical
-              - alert: RabbitmqConnections
-                expr: rabbitmq_connections > 1000
-                labels:
-                  severity: warning
-          - name: msgs
-            rules:
-              - alert: RabbitmqUnackedMessages
-                expr: sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000
-                for: 5m
-                labels:
-                  severity: warning
-              - alert: RabbitmqUnackedMessages
-                expr: sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000
-                for: 1h
-                labels:
-                  severity: critical
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PodMonitor
+        metadata:
+          name: rabbitmq
+          namespace: monitoring
+          labels:
+            release: kube-prometheus-stack
+        spec:
+          jobLabel: app.kubernetes.io/component
+          podMetricsEndpoints:
+            - port: prometheus
+              path: /metrics
+              relabelings:
+                - sourceLabels: ["__meta_kubernetes_pod_name"]
+                  targetLabel: "instance"
+                - action: "labeldrop"
+                  regex: "^(container|endpoint|namespace|pod|service)$"
+          namespaceSelector:
+            matchNames:
+              - openstack
+          selector:
+            matchLabels:
+              app.kubernetes.io/component: rabbitmq
+
+      - apiVersion: monitoring.coreos.com/v1
+        kind: PrometheusRule
+        metadata:
+          name: rabbitmq
+          namespace: monitoring
+          labels:
+            release: kube-prometheus-stack
+        spec:
+          groups:
+            - name: limits
+              rules:
+                - alert: RabbitmqMemoryHigh
+                  expr: rabbitmq_process_resident_memory_bytes / rabbitmq_resident_memory_limit_bytes > 0.80
+                  labels:
+                    severity: warning
+                - alert: RabbitmqMemoryHigh
+                  expr: rabbitmq_process_resident_memory_bytes / rabbitmq_resident_memory_limit_bytes > 0.95
+                  labels:
+                    severity: critical
+                - alert: RabbitmqFileDescriptorsUsage
+                  expr: rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.80
+                  labels:
+                    severity: warning
+                - alert: RabbitmqFileDescriptorsUsage
+                  expr: rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.95
+                  labels:
+                    severity: critical
+                - alert: RabbitmqConnections
+                  expr: rabbitmq_connections > 1000
+                  labels:
+                    severity: warning
+            - name: msgs
+              rules:
+                - alert: RabbitmqUnackedMessages
+                  expr: sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000
+                  for: 5m
+                  labels:
+                    severity: warning
+                - alert: RabbitmqUnackedMessages
+                  expr: sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000
+                  for: 1h
+                  labels:
+                    severity: critical
+  # NOTE(mnaser): Since we haven't moved to the operator pattern yet, we need to
+  #               keep retrying a few times as the CRDs might not be installed
+  #               yet.
+  retries: 60
+  delay: 5
+  register: _result
+  until: _result is not failed
diff --git a/tox.ini b/tox.ini
index 306e9e9..e668c8b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -29,7 +29,7 @@
 [testenv:linters]
 deps =
     {[testenv:build]deps}
-    ansible-lint<6.3.0
+    ansible-lint==6.4.0
 commands =
     ansible-lint {toxinidir}/roles {posargs}
 
@@ -53,4 +53,4 @@
 deps =
     -r{toxinidir}/doc/requirements.txt
 commands =
-    sphinx-build -E -W -d doc/build/doctrees -b html doc/source/ doc/build/html
\ No newline at end of file
+    sphinx-build -E -W -d doc/build/doctrees -b html doc/source/ doc/build/html