Merge "Add support for exchanging tokens in Keycloak" into main
diff --git a/.ansible-lint b/.ansible-lint
index c81fa73..d6ddfd2 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -1,11 +1,13 @@
 ---
 exclude_paths:
+  - .ansible
   - .github
   - atmosphere
   - charts
   - molecule
   - playbooks
   - plugins/filter
+  - plugins/modules/subnet.py
   - roles/defaults/vars/main.yml
   - roles/kube_prometheus_stack/files/jsonnet
   - roles/storpool_csi/files
diff --git a/.charts.yml b/.charts.yml
index 69b6746..6b1ed14 100644
--- a/.charts.yml
+++ b/.charts.yml
@@ -175,7 +175,6 @@
       gerrit:
         review.opendev.org:
           - 934929
-          - 934931
   - name: placement
     version: 0.3.9
     repository: *openstack_helm_repository
diff --git a/.github/workflows/manila.yml b/.github/workflows/manila.yml
index aae04df..f43339d 100644
--- a/.github/workflows/manila.yml
+++ b/.github/workflows/manila.yml
@@ -23,9 +23,8 @@
       - main
     paths:
       - .github/workflows/manila.yml
-  release:
-    types:
-      - published
+    tags:
+      - v*
   workflow_dispatch:
 
 jobs:
diff --git a/.gitignore b/.gitignore
index 2ad4866..8aca44a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,6 +25,7 @@
 collections/
 *.egg-info
 CHANGELOG.rst
+target
 
 ## Editors
 .idea
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bd2d792..0fe99aa 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,4 +1,4 @@
-exclude: "^(roles/kube_prometheus_stack/files/jsonnet|charts)"
+exclude: "^(roles/kube_prometheus_stack/files/jsonnet|charts|plugins/modules/subnet.py)"
 
 repos:
   - repo: local
@@ -41,6 +41,6 @@
         args: ["--profile", "black", "--filter-files"]
 
   - repo: https://github.com/ansible/ansible-lint.git
-    rev: v24.7.0
+    rev: v25.1.2
     hooks:
       - id: ansible-lint
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000..6c32272
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,2678 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 4
+
+[[package]]
+name = "addr2line"
+version = "0.24.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler2"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "android-tzdata"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
+
+[[package]]
+name = "android_system_properties"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "anstream"
+version = "0.6.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is_terminal_polyfill",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
+
+[[package]]
+name = "anstyle-parse"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "3.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e"
+dependencies = [
+ "anstyle",
+ "once_cell",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
+
+[[package]]
+name = "arc-swap"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
+
+[[package]]
+name = "async-trait"
+version = "0.1.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "atmosphere"
+version = "0.0.0"
+dependencies = [
+ "rustainers",
+ "tokio",
+]
+
+[[package]]
+name = "atomic-waker"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
+
+[[package]]
+name = "autocfg"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
+
+[[package]]
+name = "backtrace"
+version = "0.3.74"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
+dependencies = [
+ "addr2line",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+ "windows-targets",
+]
+
+[[package]]
+name = "base64"
+version = "0.22.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
+
+[[package]]
+name = "bitflags"
+version = "2.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36"
+
+[[package]]
+name = "bollard"
+version = "0.18.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30"
+dependencies = [
+ "base64",
+ "bollard-stubs",
+ "bytes",
+ "futures-core",
+ "futures-util",
+ "hex",
+ "http",
+ "http-body-util",
+ "hyper",
+ "hyper-named-pipe",
+ "hyper-util",
+ "hyperlocal",
+ "log",
+ "pin-project-lite",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "serde_repr",
+ "serde_urlencoded",
+ "thiserror 2.0.11",
+ "tokio",
+ "tokio-util",
+ "tower-service",
+ "url",
+ "winapi",
+]
+
+[[package]]
+name = "bollard-stubs"
+version = "1.47.1-rc.27.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f179cfbddb6e77a5472703d4b30436bff32929c0aa8a9008ecf23d1d3cdd0da"
+dependencies = [
+ "serde",
+ "serde_repr",
+ "serde_with",
+]
+
+[[package]]
+name = "build-it"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb13a73a10ff7e425c3b397e54848825ce396d7e6c16be83bc9af81923e51191"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "bumpalo"
+version = "3.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
+
+[[package]]
+name = "byteorder"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
+
+[[package]]
+name = "bytes"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9"
+
+[[package]]
+name = "cc"
+version = "1.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c3d1b2e905a3a7b00a6141adb0e4c0bb941d11caf55349d863942a1cc44e3c9"
+dependencies = [
+ "shlex",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "chrono"
+version = "0.4.39"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825"
+dependencies = [
+ "android-tzdata",
+ "iana-time-zone",
+ "js-sys",
+ "num-traits",
+ "serde",
+ "wasm-bindgen",
+ "windows-targets",
+]
+
+[[package]]
+name = "clap"
+version = "4.5.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92b7b18d71fad5313a1e320fa9897994228ce274b60faa4d694fe0ea89cd9e6d"
+dependencies = [
+ "clap_builder",
+ "clap_derive",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.5.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a35db2071778a7344791a4fb4f95308b5673d219dee3ae348b86642574ecc90c"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "clap_lex",
+ "strsim",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.5.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
+
+[[package]]
+name = "colorchoice"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
+
+[[package]]
+name = "core-foundation"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "core-foundation"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
+
+[[package]]
+name = "deranged"
+version = "0.3.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
+dependencies = [
+ "powerfmt",
+ "serde",
+]
+
+[[package]]
+name = "displaydoc"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "either"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+
+[[package]]
+name = "encoding_rs"
+version = "0.8.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "env_filter"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
+dependencies = [
+ "log",
+ "regex",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.11.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "env_filter",
+ "humantime",
+ "log",
+]
+
+[[package]]
+name = "equivalent"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
+
+[[package]]
+name = "errno"
+version = "0.3.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
+dependencies = [
+ "libc",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "fastrand"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
+
+[[package]]
+name = "filetime"
+version = "0.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "libredox",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "foreign-types"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+dependencies = [
+ "foreign-types-shared",
+]
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
+
+[[package]]
+name = "form_urlencoded"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
+dependencies = [
+ "percent-encoding",
+]
+
+[[package]]
+name = "futures"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-io",
+ "futures-sink",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
+
+[[package]]
+name = "futures-executor"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
+dependencies = [
+ "futures-core",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
+
+[[package]]
+name = "futures-task"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
+
+[[package]]
+name = "futures-util"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-macro",
+ "futures-sink",
+ "futures-task",
+ "memchr",
+ "pin-project-lite",
+ "pin-utils",
+ "slab",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "libc",
+ "wasi 0.11.0+wasi-snapshot-preview1",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi 0.13.3+wasi-0.2.2",
+ "windows-targets",
+]
+
+[[package]]
+name = "gimli"
+version = "0.31.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
+
+[[package]]
+name = "gitea-sdk"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7afd05f4bb5661b4562f00ad4e37d6af7184205d382b8d697bc5958331ca4909"
+dependencies = [
+ "base64",
+ "build-it",
+ "reqwest",
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "h2"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
+dependencies = [
+ "atomic-waker",
+ "bytes",
+ "fnv",
+ "futures-core",
+ "futures-sink",
+ "http",
+ "indexmap 2.7.1",
+ "slab",
+ "tokio",
+ "tokio-util",
+ "tracing",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "hashbrown"
+version = "0.15.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
+
+[[package]]
+name = "heck"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
+
+[[package]]
+name = "hex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+
+[[package]]
+name = "http"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
+dependencies = [
+ "bytes",
+ "fnv",
+ "itoa",
+]
+
+[[package]]
+name = "http-body"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
+dependencies = [
+ "bytes",
+ "http",
+]
+
+[[package]]
+name = "http-body-util"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
+dependencies = [
+ "bytes",
+ "futures-util",
+ "http",
+ "http-body",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "httparse"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a"
+
+[[package]]
+name = "httpdate"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
+
+[[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
+name = "hyper"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80"
+dependencies = [
+ "bytes",
+ "futures-channel",
+ "futures-util",
+ "h2",
+ "http",
+ "http-body",
+ "httparse",
+ "httpdate",
+ "itoa",
+ "pin-project-lite",
+ "smallvec",
+ "tokio",
+ "want",
+]
+
+[[package]]
+name = "hyper-named-pipe"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278"
+dependencies = [
+ "hex",
+ "hyper",
+ "hyper-util",
+ "pin-project-lite",
+ "tokio",
+ "tower-service",
+ "winapi",
+]
+
+[[package]]
+name = "hyper-rustls"
+version = "0.27.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2"
+dependencies = [
+ "futures-util",
+ "http",
+ "hyper",
+ "hyper-util",
+ "log",
+ "rustls",
+ "rustls-native-certs",
+ "rustls-pki-types",
+ "tokio",
+ "tokio-rustls",
+ "tower-service",
+]
+
+[[package]]
+name = "hyper-timeout"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0"
+dependencies = [
+ "hyper",
+ "hyper-util",
+ "pin-project-lite",
+ "tokio",
+ "tower-service",
+]
+
+[[package]]
+name = "hyper-tls"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0"
+dependencies = [
+ "bytes",
+ "http-body-util",
+ "hyper",
+ "hyper-util",
+ "native-tls",
+ "tokio",
+ "tokio-native-tls",
+ "tower-service",
+]
+
+[[package]]
+name = "hyper-util"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4"
+dependencies = [
+ "bytes",
+ "futures-channel",
+ "futures-util",
+ "http",
+ "http-body",
+ "hyper",
+ "pin-project-lite",
+ "socket2",
+ "tokio",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "hyperlocal"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7"
+dependencies = [
+ "hex",
+ "http-body-util",
+ "hyper",
+ "hyper-util",
+ "pin-project-lite",
+ "tokio",
+ "tower-service",
+]
+
+[[package]]
+name = "iana-time-zone"
+version = "0.1.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "iana-time-zone-haiku",
+ "js-sys",
+ "wasm-bindgen",
+ "windows-core",
+]
+
+[[package]]
+name = "iana-time-zone-haiku"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "icu_collections"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526"
+dependencies = [
+ "displaydoc",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637"
+dependencies = [
+ "displaydoc",
+ "litemap",
+ "tinystr",
+ "writeable",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_locid_transform_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e"
+
+[[package]]
+name = "icu_normalizer"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_normalizer_data",
+ "icu_properties",
+ "icu_provider",
+ "smallvec",
+ "utf16_iter",
+ "utf8_iter",
+ "write16",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_normalizer_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516"
+
+[[package]]
+name = "icu_properties"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_locid_transform",
+ "icu_properties_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_properties_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569"
+
+[[package]]
+name = "icu_provider"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_provider_macros",
+ "stable_deref_trait",
+ "tinystr",
+ "writeable",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_provider_macros"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "idna"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
+dependencies = [
+ "idna_adapter",
+ "smallvec",
+ "utf8_iter",
+]
+
+[[package]]
+name = "idna_adapter"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71"
+dependencies = [
+ "icu_normalizer",
+ "icu_properties",
+]
+
+[[package]]
+name = "imagebumper"
+version = "0.1.0"
+dependencies = [
+ "async-trait",
+ "clap",
+ "env_logger",
+ "gitea-sdk",
+ "log",
+ "octocrab",
+ "regex",
+ "reqwest",
+ "serde_json",
+ "tokio",
+ "url",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
+dependencies = [
+ "autocfg",
+ "hashbrown 0.12.3",
+ "serde",
+]
+
+[[package]]
+name = "indexmap"
+version = "2.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652"
+dependencies = [
+ "equivalent",
+ "hashbrown 0.15.2",
+ "serde",
+]
+
+[[package]]
+name = "ipnet"
+version = "2.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
+
+[[package]]
+name = "iri-string"
+version = "0.7.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc0f0a572e8ffe56e2ff4f769f32ffe919282c3916799f8b68688b6030063bea"
+dependencies = [
+ "memchr",
+ "serde",
+]
+
+[[package]]
+name = "is_terminal_polyfill"
+version = "1.70.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
+
+[[package]]
+name = "itoa"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
+
+[[package]]
+name = "js-sys"
+version = "0.3.77"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
+dependencies = [
+ "once_cell",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "jsonwebtoken"
+version = "9.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde"
+dependencies = [
+ "base64",
+ "js-sys",
+ "pem",
+ "ring",
+ "serde",
+ "serde_json",
+ "simple_asn1",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.169"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
+
+[[package]]
+name = "libredox"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
+dependencies = [
+ "bitflags",
+ "libc",
+ "redox_syscall",
+]
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.4.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
+
+[[package]]
+name = "litemap"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104"
+
+[[package]]
+name = "log"
+version = "0.4.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "mime"
+version = "0.3.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3b1c9bd4fe1f0f8b387f6eb9eb3b4a1aa26185e5750efb9140301703f62cd1b"
+dependencies = [
+ "adler2",
+]
+
+[[package]]
+name = "mio"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd"
+dependencies = [
+ "libc",
+ "wasi 0.11.0+wasi-snapshot-preview1",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "native-tls"
+version = "0.2.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c"
+dependencies = [
+ "libc",
+ "log",
+ "openssl",
+ "openssl-probe",
+ "openssl-sys",
+ "schannel",
+ "security-framework 2.11.1",
+ "security-framework-sys",
+ "tempfile",
+]
+
+[[package]]
+name = "netlink-packet-core"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4"
+dependencies = [
+ "anyhow",
+ "byteorder",
+ "netlink-packet-utils",
+]
+
+[[package]]
+name = "netlink-packet-route"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74c171cd77b4ee8c7708da746ce392440cb7bcf618d122ec9ecc607b12938bf4"
+dependencies = [
+ "anyhow",
+ "byteorder",
+ "libc",
+ "log",
+ "netlink-packet-core",
+ "netlink-packet-utils",
+]
+
+[[package]]
+name = "netlink-packet-utils"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34"
+dependencies = [
+ "anyhow",
+ "byteorder",
+ "paste",
+ "thiserror 1.0.69",
+]
+
+[[package]]
+name = "netlink-proto"
+version = "0.11.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60"
+dependencies = [
+ "bytes",
+ "futures",
+ "log",
+ "netlink-packet-core",
+ "netlink-sys",
+ "thiserror 2.0.11",
+]
+
+[[package]]
+name = "netlink-sys"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23"
+dependencies = [
+ "bytes",
+ "futures",
+ "libc",
+ "log",
+ "tokio",
+]
+
+[[package]]
+name = "nix"
+version = "0.27.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "libc",
+]
+
+[[package]]
+name = "num-bigint"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
+dependencies = [
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-conv"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
+
+[[package]]
+name = "num-integer"
+version = "0.1.46"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "object"
+version = "0.36.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "octocrab"
+version = "0.43.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "27527d68322f4c603319f7958973db8f9fa4be62c0e3fafe084f5562cf6353df"
+dependencies = [
+ "arc-swap",
+ "async-trait",
+ "base64",
+ "bytes",
+ "cfg-if",
+ "chrono",
+ "either",
+ "futures",
+ "futures-util",
+ "http",
+ "http-body",
+ "http-body-util",
+ "hyper",
+ "hyper-rustls",
+ "hyper-timeout",
+ "hyper-util",
+ "jsonwebtoken",
+ "once_cell",
+ "percent-encoding",
+ "pin-project",
+ "secrecy",
+ "serde",
+ "serde_json",
+ "serde_path_to_error",
+ "serde_urlencoded",
+ "snafu",
+ "tokio",
+ "tower",
+ "tower-http",
+ "tracing",
+ "url",
+ "web-time",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.20.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e"
+
+[[package]]
+name = "openssl"
+version = "0.10.71"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "foreign-types",
+ "libc",
+ "once_cell",
+ "openssl-macros",
+ "openssl-sys",
+]
+
+[[package]]
+name = "openssl-macros"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "openssl-probe"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
+
+[[package]]
+name = "openssl-src"
+version = "300.4.2+3.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "168ce4e058f975fe43e89d9ccf78ca668601887ae736090aacc23ae353c298e2"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "openssl-sys"
+version = "0.9.106"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd"
+dependencies = [
+ "cc",
+ "libc",
+ "openssl-src",
+ "pkg-config",
+ "vcpkg",
+]
+
+[[package]]
+name = "ovsinit"
+version = "0.1.0"
+dependencies = [
+ "clap",
+ "env_logger",
+ "futures",
+ "futures-util",
+ "ipnet",
+ "libc",
+ "log",
+ "netlink-packet-route",
+ "rtnetlink",
+ "serde",
+ "serde_json",
+ "thiserror 2.0.11",
+ "tokio",
+]
+
+[[package]]
+name = "passwd"
+version = "0.1.0"
+dependencies = [
+ "thiserror 2.0.11",
+]
+
+[[package]]
+name = "paste"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
+
+[[package]]
+name = "pem"
+version = "3.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae"
+dependencies = [
+ "base64",
+ "serde",
+]
+
+[[package]]
+name = "percent-encoding"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
+
+[[package]]
+name = "pin-project"
+version = "1.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d"
+dependencies = [
+ "pin-project-internal",
+]
+
+[[package]]
+name = "pin-project-internal"
+version = "1.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
+name = "pkg-config"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2"
+
+[[package]]
+name = "powerfmt"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"
+dependencies = [
+ "zerocopy",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom 0.2.15",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
+
+[[package]]
+name = "reqwest"
+version = "0.12.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da"
+dependencies = [
+ "base64",
+ "bytes",
+ "encoding_rs",
+ "futures-core",
+ "futures-util",
+ "h2",
+ "http",
+ "http-body",
+ "http-body-util",
+ "hyper",
+ "hyper-rustls",
+ "hyper-tls",
+ "hyper-util",
+ "ipnet",
+ "js-sys",
+ "log",
+ "mime",
+ "native-tls",
+ "once_cell",
+ "percent-encoding",
+ "pin-project-lite",
+ "rustls-pemfile",
+ "serde",
+ "serde_json",
+ "serde_urlencoded",
+ "sync_wrapper",
+ "system-configuration",
+ "tokio",
+ "tokio-native-tls",
+ "tower",
+ "tower-service",
+ "url",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+ "windows-registry",
+]
+
+[[package]]
+name = "ring"
+version = "0.17.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e75ec5e92c4d8aede845126adc388046234541629e76029599ed35a003c7ed24"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "getrandom 0.2.15",
+ "libc",
+ "untrusted",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "rtnetlink"
+version = "0.14.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b684475344d8df1859ddb2d395dd3dac4f8f3422a1aa0725993cb375fc5caba5"
+dependencies = [
+ "futures",
+ "log",
+ "netlink-packet-core",
+ "netlink-packet-route",
+ "netlink-packet-utils",
+ "netlink-proto",
+ "netlink-sys",
+ "nix",
+ "thiserror 1.0.69",
+ "tokio",
+]
+
+[[package]]
+name = "rustainers"
+version = "0.1.0"
+dependencies = [
+ "bollard",
+ "bytes",
+ "futures-util",
+ "passwd",
+ "rand",
+ "tar",
+ "thiserror 2.0.11",
+ "tokio",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
+
+[[package]]
+name = "rustix"
+version = "0.38.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154"
+dependencies = [
+ "bitflags",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "rustls"
+version = "0.23.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395"
+dependencies = [
+ "log",
+ "once_cell",
+ "ring",
+ "rustls-pki-types",
+ "rustls-webpki",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "rustls-native-certs"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3"
+dependencies = [
+ "openssl-probe",
+ "rustls-pki-types",
+ "schannel",
+ "security-framework 3.2.0",
+]
+
+[[package]]
+name = "rustls-pemfile"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
+dependencies = [
+ "rustls-pki-types",
+]
+
+[[package]]
+name = "rustls-pki-types"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c"
+
+[[package]]
+name = "rustls-webpki"
+version = "0.102.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9"
+dependencies = [
+ "ring",
+ "rustls-pki-types",
+ "untrusted",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4"
+
+[[package]]
+name = "ryu"
+version = "1.0.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd"
+
+[[package]]
+name = "schannel"
+version = "0.1.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "secrecy"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a"
+dependencies = [
+ "zeroize",
+]
+
+[[package]]
+name = "security-framework"
+version = "2.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
+dependencies = [
+ "bitflags",
+ "core-foundation 0.9.4",
+ "core-foundation-sys",
+ "libc",
+ "security-framework-sys",
+]
+
+[[package]]
+name = "security-framework"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316"
+dependencies = [
+ "bitflags",
+ "core-foundation 0.10.0",
+ "core-foundation-sys",
+ "libc",
+ "security-framework-sys",
+]
+
+[[package]]
+name = "security-framework-sys"
+version = "2.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.217"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.217"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.138"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949"
+dependencies = [
+ "itoa",
+ "memchr",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_path_to_error"
+version = "0.1.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6"
+dependencies = [
+ "itoa",
+ "serde",
+]
+
+[[package]]
+name = "serde_repr"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_urlencoded"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
+dependencies = [
+ "form_urlencoded",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_with"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa"
+dependencies = [
+ "base64",
+ "chrono",
+ "hex",
+ "indexmap 1.9.3",
+ "indexmap 2.7.1",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "time",
+]
+
+[[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
+[[package]]
+name = "simple_asn1"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb"
+dependencies = [
+ "num-bigint",
+ "num-traits",
+ "thiserror 2.0.11",
+ "time",
+]
+
+[[package]]
+name = "slab"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd"
+
+[[package]]
+name = "snafu"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "223891c85e2a29c3fe8fb900c1fae5e69c2e42415e3177752e8718475efa5019"
+dependencies = [
+ "snafu-derive",
+]
+
+[[package]]
+name = "snafu-derive"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "socket2"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8"
+dependencies = [
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "stable_deref_trait"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
+
+[[package]]
+name = "strsim"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
+
+[[package]]
+name = "subtle"
+version = "2.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
+
+[[package]]
+name = "syn"
+version = "2.0.98"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "sync_wrapper"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
+dependencies = [
+ "futures-core",
+]
+
+[[package]]
+name = "synstructure"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "system-configuration"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
+dependencies = [
+ "bitflags",
+ "core-foundation 0.9.4",
+ "system-configuration-sys",
+]
+
+[[package]]
+name = "system-configuration-sys"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "tar"
+version = "0.4.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6"
+dependencies = [
+ "filetime",
+ "libc",
+ "xattr",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230"
+dependencies = [
+ "cfg-if",
+ "fastrand",
+ "getrandom 0.3.1",
+ "once_cell",
+ "rustix",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
+dependencies = [
+ "thiserror-impl 1.0.69",
+]
+
+[[package]]
+name = "thiserror"
+version = "2.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc"
+dependencies = [
+ "thiserror-impl 2.0.11",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "2.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "time"
+version = "0.3.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
+dependencies = [
+ "deranged",
+ "itoa",
+ "num-conv",
+ "powerfmt",
+ "serde",
+ "time-core",
+ "time-macros",
+]
+
+[[package]]
+name = "time-core"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
+
+[[package]]
+name = "time-macros"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
+dependencies = [
+ "num-conv",
+ "time-core",
+]
+
+[[package]]
+name = "tinystr"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f"
+dependencies = [
+ "displaydoc",
+ "zerovec",
+]
+
+[[package]]
+name = "tokio"
+version = "1.43.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e"
+dependencies = [
+ "backtrace",
+ "bytes",
+ "libc",
+ "mio",
+ "pin-project-lite",
+ "socket2",
+ "tokio-macros",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "tokio-macros"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tokio-native-tls"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2"
+dependencies = [
+ "native-tls",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-rustls"
+version = "0.26.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37"
+dependencies = [
+ "rustls",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-util"
+version = "0.7.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078"
+dependencies = [
+ "bytes",
+ "futures-core",
+ "futures-sink",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "tower"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
+dependencies = [
+ "futures-core",
+ "futures-util",
+ "pin-project-lite",
+ "sync_wrapper",
+ "tokio",
+ "tokio-util",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "tower-http"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697"
+dependencies = [
+ "bitflags",
+ "bytes",
+ "futures-util",
+ "http",
+ "http-body",
+ "iri-string",
+ "pin-project-lite",
+ "tower",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "tower-layer"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
+
+[[package]]
+name = "tower-service"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
+
+[[package]]
+name = "tracing"
+version = "0.1.41"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
+dependencies = [
+ "log",
+ "pin-project-lite",
+ "tracing-attributes",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-attributes"
+version = "0.1.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tracing-core"
+version = "0.1.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
+dependencies = [
+ "once_cell",
+]
+
+[[package]]
+name = "try-lock"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034"
+
+[[package]]
+name = "untrusted"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
+
+[[package]]
+name = "url"
+version = "2.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
+dependencies = [
+ "form_urlencoded",
+ "idna",
+ "percent-encoding",
+ "serde",
+]
+
+[[package]]
+name = "utf16_iter"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246"
+
+[[package]]
+name = "utf8_iter"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
+
+[[package]]
+name = "utf8parse"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
+
+[[package]]
+name = "vcpkg"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
+
+[[package]]
+name = "want"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"
+dependencies = [
+ "try-lock",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasi"
+version = "0.13.3+wasi-0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2"
+dependencies = [
+ "wit-bindgen-rt",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.100"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "rustversion",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.100"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
+dependencies = [
+ "bumpalo",
+ "log",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-futures"
+version = "0.4.50"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "once_cell",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.100"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.100"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.100"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "web-sys"
+version = "0.3.77"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "web-time"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
+dependencies = [
+ "js-sys",
+ "serde",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-core"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-registry"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0"
+dependencies = [
+ "windows-result",
+ "windows-strings",
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-result"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-strings"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10"
+dependencies = [
+ "windows-result",
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+
+[[package]]
+name = "wit-bindgen-rt"
+version = "0.33.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "write16"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936"
+
+[[package]]
+name = "writeable"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
+
+[[package]]
+name = "xattr"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909"
+dependencies = [
+ "libc",
+ "linux-raw-sys",
+ "rustix",
+]
+
+[[package]]
+name = "yoke"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40"
+dependencies = [
+ "serde",
+ "stable_deref_trait",
+ "yoke-derive",
+ "zerofrom",
+]
+
+[[package]]
+name = "yoke-derive"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "synstructure",
+]
+
+[[package]]
+name = "zerocopy"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
+dependencies = [
+ "byteorder",
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "zerofrom"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e"
+dependencies = [
+ "zerofrom-derive",
+]
+
+[[package]]
+name = "zerofrom-derive"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "synstructure",
+]
+
+[[package]]
+name = "zeroize"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
+
+[[package]]
+name = "zerovec"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079"
+dependencies = [
+ "yoke",
+ "zerofrom",
+ "zerovec-derive",
+]
+
+[[package]]
+name = "zerovec-derive"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..d874e76
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,10 @@
+[package]
+name = "atmosphere"
+edition = "2021"
+
+[dev-dependencies]
+rustainers = { path = "crates/rustainers" }
+tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
+
+[workspace]
+members = [ "crates/*" ]
diff --git a/charts/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/barbican/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/barbican/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/barbican/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/barbican/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/barbican/templates/deployment-api.yaml b/charts/barbican/templates/deployment-api.yaml
index 8ae9ea4..1e5cbba 100644
--- a/charts/barbican/templates/deployment-api.yaml
+++ b/charts/barbican/templates/deployment-api.yaml
@@ -47,6 +47,12 @@
 {{ dict "envAll" $envAll "podName" "barbican-api" "containerNames" (list "init" "barbican-api") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "barbican" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.barbican_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.barbican_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "barbican" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/barbican/templates/pod-test.yaml b/charts/barbican/templates/pod-test.yaml
index f414356..783d328 100644
--- a/charts/barbican/templates/pod-test.yaml
+++ b/charts/barbican/templates/pod-test.yaml
@@ -33,6 +33,12 @@
     {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
 {{ dict "envAll" $envAll "podName" "barbican-test" "containerNames" (list "init" "barbican-test") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 4 }}
 spec:
+{{ with .Values.pod.priorityClassName.barbican_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.barbican_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
   nodeSelector:
diff --git a/charts/barbican/values.yaml b/charts/barbican/values.yaml
index e2e87bf..67bdbbc 100644
--- a/charts/barbican/values.yaml
+++ b/charts/barbican/values.yaml
@@ -55,6 +55,16 @@
       - image_repo_sync
 
 pod:
+  priorityClassName:
+    barbican_api: null
+    barbican_tests: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    barbican_api: null
+    barbican_tests: null
+    bootstrap: null
+    db_sync: null
   security_context:
     barbican:
       pod:
diff --git a/charts/ceph-provisioners/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/ceph-provisioners/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/ceph-provisioners/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/ceph-provisioners/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/cinder/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/cinder/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/cinder/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/cinder/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/cinder/templates/cron-job-cinder-volume-usage-audit.yaml b/charts/cinder/templates/cron-job-cinder-volume-usage-audit.yaml
index 897b5b6..14c18f5 100644
--- a/charts/cinder/templates/cron-job-cinder-volume-usage-audit.yaml
+++ b/charts/cinder/templates/cron-job-cinder-volume-usage-audit.yaml
@@ -50,6 +50,12 @@
 {{ dict "envAll" $envAll "podName" $serviceAccountName "containerNames" (list "cinder-volume-usage-audit" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 12 }}
         spec:
 {{ dict "envAll" $envAll "application" "volume_usage_audit" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
+{{ with .Values.pod.priorityClassName.cinder_api }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.cinder_api }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
           restartPolicy: OnFailure
 {{ if $envAll.Values.pod.tolerations.cinder.enabled }}
diff --git a/charts/cinder/templates/deployment-api.yaml b/charts/cinder/templates/deployment-api.yaml
index 641ed3b..117e503 100644
--- a/charts/cinder/templates/deployment-api.yaml
+++ b/charts/cinder/templates/deployment-api.yaml
@@ -46,6 +46,12 @@
 {{ tuple "cinder_api" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "cinder-api" "containerNames" (list "cinder-api" "ceph-coordination-volume-perms" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.cinder_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.cinder_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "cinder_api" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/cinder/templates/deployment-backup.yaml b/charts/cinder/templates/deployment-backup.yaml
index 3657446..c3abd3b 100644
--- a/charts/cinder/templates/deployment-backup.yaml
+++ b/charts/cinder/templates/deployment-backup.yaml
@@ -48,6 +48,12 @@
 {{ tuple "cinder_backup" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "cinder-backup" "containerNames" (list "cinder-backup" "ceph-coordination-volume-perms" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.cinder_backup }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.cinder_backup }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "cinder_backup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/cinder/templates/deployment-scheduler.yaml b/charts/cinder/templates/deployment-scheduler.yaml
index 03206a8..f5b6638 100644
--- a/charts/cinder/templates/deployment-scheduler.yaml
+++ b/charts/cinder/templates/deployment-scheduler.yaml
@@ -46,6 +46,12 @@
 {{ tuple "cinder_scheduler" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "cinder-scheduler" "containerNames" (list "cinder-scheduler" "ceph-coordination-volume-perms" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.cinder_scheduler }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.cinder_scheduler }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "cinder_scheduler" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/cinder/templates/deployment-volume.yaml b/charts/cinder/templates/deployment-volume.yaml
index a21c13e..fb0d626 100644
--- a/charts/cinder/templates/deployment-volume.yaml
+++ b/charts/cinder/templates/deployment-volume.yaml
@@ -48,6 +48,12 @@
 {{ tuple "cinder_volume" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "cinder-volume" "containerNames" (list "cinder-volume" "ceph-coordination-volume-perms" "init-cinder-conf" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.cinder_volume }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.cinder_volume }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "cinder_volume" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/cinder/templates/pod-rally-test.yaml b/charts/cinder/templates/pod-rally-test.yaml
index 3ed52cd..e44bdcf 100644
--- a/charts/cinder/templates/pod-rally-test.yaml
+++ b/charts/cinder/templates/pod-rally-test.yaml
@@ -38,6 +38,12 @@
 {{ end }}
   nodeSelector:
     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
+{{ with .Values.pod.priorityClassName.cinder_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.cinder_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
   initContainers:
 {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
diff --git a/charts/cinder/values.yaml b/charts/cinder/values.yaml
index 6f1d32a..8a2299c 100644
--- a/charts/cinder/values.yaml
+++ b/charts/cinder/values.yaml
@@ -76,6 +76,22 @@
       failed: 1
 
 pod:
+  priorityClassName:
+    cinder_api: null
+    cinder_backup: null
+    cinder_scheduler: null
+    cinder_tests: null
+    cinder_volume_usage_audit: null
+    cinder_volume: null
+    db_sync: null
+  runtimeClassName:
+    cinder_api: null
+    cinder_backup: null
+    cinder_scheduler: null
+    cinder_tests: null
+    cinder_volume_usage_audit: null
+    cinder_volume: null
+    db_sync: null
   security_context:
     volume_usage_audit:
       pod:
diff --git a/charts/designate/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/designate/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/designate/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/designate/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/designate/templates/deployment-api.yaml b/charts/designate/templates/deployment-api.yaml
index e9df6b6..2f6ed7c 100644
--- a/charts/designate/templates/deployment-api.yaml
+++ b/charts/designate/templates/deployment-api.yaml
@@ -41,6 +41,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.designate_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.designate_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "designate" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/designate/templates/deployment-central.yaml b/charts/designate/templates/deployment-central.yaml
index 02d9f3c..c3e7dcd 100644
--- a/charts/designate/templates/deployment-central.yaml
+++ b/charts/designate/templates/deployment-central.yaml
@@ -41,6 +41,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.designate_central }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.designate_central }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "designate" "central" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/designate/templates/deployment-mdns.yaml b/charts/designate/templates/deployment-mdns.yaml
index d58f630..9312bc7 100644
--- a/charts/designate/templates/deployment-mdns.yaml
+++ b/charts/designate/templates/deployment-mdns.yaml
@@ -41,6 +41,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.designate_mdns }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.designate_mdns }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "designate" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/designate/templates/deployment-producer.yaml b/charts/designate/templates/deployment-producer.yaml
index 491dbad..68e46fb 100644
--- a/charts/designate/templates/deployment-producer.yaml
+++ b/charts/designate/templates/deployment-producer.yaml
@@ -41,6 +41,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.designate_producer }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.designate_producer }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "designate" "producer" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/designate/templates/deployment-sink.yaml b/charts/designate/templates/deployment-sink.yaml
index e577d9d..63222a1 100644
--- a/charts/designate/templates/deployment-sink.yaml
+++ b/charts/designate/templates/deployment-sink.yaml
@@ -41,6 +41,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.designate_sink }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.designate_sink }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "designate" "sink" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/designate/templates/deployment-worker.yaml b/charts/designate/templates/deployment-worker.yaml
index 74f9c99..12d0b96 100644
--- a/charts/designate/templates/deployment-worker.yaml
+++ b/charts/designate/templates/deployment-worker.yaml
@@ -41,6 +41,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.designate_worker }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.designate_worker }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "designate" "worker" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/designate/values.yaml b/charts/designate/values.yaml
index 7169414..6e520ca 100644
--- a/charts/designate/values.yaml
+++ b/charts/designate/values.yaml
@@ -66,6 +66,22 @@
       - image_repo_sync
 
 pod:
+  priorityClassName:
+    designate_api: null
+    designate_central: null
+    designate_mdns: null
+    designate_producer: null
+    designate_sink: null
+    designate_worker: null
+    db_sync: null
+  runtimeClassName:
+    designate_api: null
+    designate_central: null
+    designate_mdns: null
+    designate_producer: null
+    designate_sink: null
+    designate_worker: null
+    db_sync: null
   affinity:
     anti:
       type:
diff --git a/charts/glance/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/glance/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/glance/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/glance/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/glance/templates/deployment-api.yaml b/charts/glance/templates/deployment-api.yaml
index d88856f..9f14d1b 100644
--- a/charts/glance/templates/deployment-api.yaml
+++ b/charts/glance/templates/deployment-api.yaml
@@ -75,6 +75,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "glance-api" "containerNames" ( list "glance-perms" "glance-api" "init" "nginx" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.glance_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.glance_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "glance" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/glance/templates/pod-rally-test.yaml b/charts/glance/templates/pod-rally-test.yaml
index 0ca17eb..1148e8a 100644
--- a/charts/glance/templates/pod-rally-test.yaml
+++ b/charts/glance/templates/pod-rally-test.yaml
@@ -44,6 +44,12 @@
     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
 {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
   restartPolicy: Never
+{{ with .Values.pod.priorityClassName.glance_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.glance_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
   initContainers:
 {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
diff --git a/charts/glance/values.yaml b/charts/glance/values.yaml
index 85ddf18..6eae655 100644
--- a/charts/glance/values.yaml
+++ b/charts/glance/values.yaml
@@ -795,6 +795,14 @@
         default: 80
 
 pod:
+  priorityClassName:
+    glance_api: null
+    glance_tests: null
+    db_sync: null
+  runtimeClassName:
+    glance_api: null
+    glance_tests: null
+    db_sync: null
   security_context:
     glance:
       pod:
diff --git a/charts/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/heat/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/heat/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/heat/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/heat/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/heat/templates/cron-job-engine-cleaner.yaml b/charts/heat/templates/cron-job-engine-cleaner.yaml
index a7eded4..32674b6 100644
--- a/charts/heat/templates/cron-job-engine-cleaner.yaml
+++ b/charts/heat/templates/cron-job-engine-cleaner.yaml
@@ -52,6 +52,12 @@
             configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "heat-engine-cleaner" "containerNames" (list "heat-engine-cleaner" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
         spec:
+{{ with .Values.pod.priorityClassName.heat_engine_cleaner }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.heat_engine_cleaner }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "engine_cleaner" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
           restartPolicy: OnFailure
diff --git a/charts/heat/templates/cron-job-purge-deleted.yaml b/charts/heat/templates/cron-job-purge-deleted.yaml
index 4d83c29..285c2d3 100644
--- a/charts/heat/templates/cron-job-purge-deleted.yaml
+++ b/charts/heat/templates/cron-job-purge-deleted.yaml
@@ -47,6 +47,12 @@
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 12 }}
 {{ dict "envAll" $envAll "podName" "heat-purge-deleted" "containerNames" (list "init" "heat-purge-deleted" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
         spec:
+{{ with .Values.pod.priorityClassName.heat_purge_deleted }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.heat_purge_deleted }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
           restartPolicy: OnFailure
 {{ if $envAll.Values.pod.tolerations.heat.enabled }}
diff --git a/charts/heat/templates/deployment-api.yaml b/charts/heat/templates/deployment-api.yaml
index ca34656..f737c84 100644
--- a/charts/heat/templates/deployment-api.yaml
+++ b/charts/heat/templates/deployment-api.yaml
@@ -45,6 +45,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "heat-api" "containerNames" (list "heat-api" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.heat_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.heat_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/heat/templates/deployment-cfn.yaml b/charts/heat/templates/deployment-cfn.yaml
index 8401543..5f5584e 100644
--- a/charts/heat/templates/deployment-cfn.yaml
+++ b/charts/heat/templates/deployment-cfn.yaml
@@ -45,6 +45,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "heat-cfn" "containerNames" (list "heat-cfn" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.heat_cfn }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.heat_cfn }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/heat/templates/deployment-cloudwatch.yaml b/charts/heat/templates/deployment-cloudwatch.yaml
index f1f7353..0c9dec5 100644
--- a/charts/heat/templates/deployment-cloudwatch.yaml
+++ b/charts/heat/templates/deployment-cloudwatch.yaml
@@ -44,6 +44,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.heat_cloudwatch }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.heat_cloudwatch }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/heat/templates/deployment-engine.yaml b/charts/heat/templates/deployment-engine.yaml
index 7b7b8ad..d228a87 100644
--- a/charts/heat/templates/deployment-engine.yaml
+++ b/charts/heat/templates/deployment-engine.yaml
@@ -53,6 +53,12 @@
 {{ dict "envAll" $envAll "podName" "heat-engine" "containerNames" (list "heat-engine" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
 {{- end }}
     spec:
+{{ with .Values.pod.priorityClassName.heat_engine }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.heat_engine }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/heat/templates/job-trusts.yaml b/charts/heat/templates/job-trusts.yaml
index ae5bc64..e539e7f 100644
--- a/charts/heat/templates/job-trusts.yaml
+++ b/charts/heat/templates/job-trusts.yaml
@@ -43,6 +43,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "heat-trusts" "containerNames" (list "heat-trusts" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.heat_trusts }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.heat_trusts }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.heat_trusts }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.heat_trusts }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "trusts" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       restartPolicy: OnFailure
diff --git a/charts/heat/templates/pod-rally-test.yaml b/charts/heat/templates/pod-rally-test.yaml
index ac6c636..610048c 100644
--- a/charts/heat/templates/pod-rally-test.yaml
+++ b/charts/heat/templates/pod-rally-test.yaml
@@ -43,6 +43,12 @@
 {{ tuple $envAll "heat" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
 {{ end }}
   restartPolicy: Never
+{{ with .Values.pod.priorityClassName.heat_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.heat_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
   initContainers:
 {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
diff --git a/charts/heat/values.yaml b/charts/heat/values.yaml
index 1cf8bdc..e242d7d 100644
--- a/charts/heat/values.yaml
+++ b/charts/heat/values.yaml
@@ -1015,6 +1015,28 @@
         default: 80
 
 pod:
+  priorityClassName:
+    heat_api: null
+    heat_cfn: null
+    heat_cloudwatch: null
+    heat_tests: null
+    heat_engine_cleaner: null
+    heat_purge_deleted: null
+    heat_engine: null
+    heat_trusts: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    heat_api: null
+    heat_cfn: null
+    heat_cloudwatch: null
+    heat_tests: null
+    heat_engine_cleaner: null
+    heat_purge_deleted: null
+    heat_engine: null
+    heat_trusts: null
+    bootstrap: null
+    db_sync: null
   security_context:
     heat:
       pod:
diff --git a/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl b/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
index b8a1dce..1db62b0 100644
--- a/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+++ b/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
@@ -71,6 +71,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_init }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_init }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_init }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_init }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_init" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/horizon/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/horizon/templates/deployment.yaml b/charts/horizon/templates/deployment.yaml
index 0037fef..666edd6 100644
--- a/charts/horizon/templates/deployment.yaml
+++ b/charts/horizon/templates/deployment.yaml
@@ -48,6 +48,12 @@
 {{- end }}
 {{ dict "envAll" $envAll "podName" "horizon" "containerNames" (list "horizon" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.horizon }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.horizon }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "horizon" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/horizon/templates/job-db-sync.yaml b/charts/horizon/templates/job-db-sync.yaml
index c2229cd..8ae8643 100644
--- a/charts/horizon/templates/job-db-sync.yaml
+++ b/charts/horizon/templates/job-db-sync.yaml
@@ -41,6 +41,12 @@
       annotations:
 {{ dict "envAll" $envAll "podName" "horizon-db-sync" "containerNames" (list "horizon-db-sync" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "db_sync" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       restartPolicy: OnFailure
diff --git a/charts/horizon/templates/pod-helm-tests.yaml b/charts/horizon/templates/pod-helm-tests.yaml
index 7d16303..4513053 100644
--- a/charts/horizon/templates/pod-helm-tests.yaml
+++ b/charts/horizon/templates/pod-helm-tests.yaml
@@ -34,6 +34,12 @@
 spec:
 {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
   restartPolicy: Never
+{{ with .Values.pod.priorityClassName.horizon_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.horizon_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.horizon.enabled }}
 {{ tuple $envAll "horizon" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
diff --git a/charts/horizon/values.yaml b/charts/horizon/values.yaml
index 4a06045..33a74d2 100644
--- a/charts/horizon/values.yaml
+++ b/charts/horizon/values.yaml
@@ -1103,6 +1103,16 @@
           service: dashboard
 
 pod:
+  priorityClassName:
+    horizon: null
+    horizon_tests: null
+    db_init: null
+    db_sync: null
+  runtimeClassName:
+    horizon: null
+    horizon_tests: null
+    db_init: null
+    db_sync: null
   security_context:
     horizon:
       pod:
diff --git a/charts/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/ironic/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/ironic/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/ironic/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/ironic/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/ironic/templates/deployment-api.yaml b/charts/ironic/templates/deployment-api.yaml
index 7d4b838..99e25b6 100644
--- a/charts/ironic/templates/deployment-api.yaml
+++ b/charts/ironic/templates/deployment-api.yaml
@@ -45,6 +45,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ tuple "ironic_api" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.ironic_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.ironic_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "ironic" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/ironic/templates/statefulset-conductor.yaml b/charts/ironic/templates/statefulset-conductor.yaml
index bcf6238..fc0031e 100644
--- a/charts/ironic/templates/statefulset-conductor.yaml
+++ b/charts/ironic/templates/statefulset-conductor.yaml
@@ -44,6 +44,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.ironic_conductor }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.ironic_conductor }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "ironic" "conductor" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/ironic/values.yaml b/charts/ironic/values.yaml
index a94bc3f..bc4b034 100644
--- a/charts/ironic/values.yaml
+++ b/charts/ironic/values.yaml
@@ -107,6 +107,7 @@
       api_url: null
     database:
       connection: null
+      max_retries: -1
     deploy:
       http_root: /var/lib/openstack-helm/httpboot
     glance:
@@ -639,6 +640,16 @@
         default: 24220
 
 pod:
+  priorityClassName:
+    ironic_api: null
+    ironic_conductor: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    ironic_api: null
+    ironic_conductor: null
+    bootstrap: null
+    db_sync: null
   affinity:
     anti:
       type:
diff --git a/charts/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl b/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
index b8a1dce..1db62b0 100644
--- a/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+++ b/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
@@ -71,6 +71,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_init }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_init }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_init }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_init }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_init" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/keystone/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/keystone/templates/cron-job-credential-rotate.yaml b/charts/keystone/templates/cron-job-credential-rotate.yaml
index 5906079..6fabd9d 100644
--- a/charts/keystone/templates/cron-job-credential-rotate.yaml
+++ b/charts/keystone/templates/cron-job-credential-rotate.yaml
@@ -70,6 +70,12 @@
           labels:
 {{ tuple $envAll "keystone" "credential-rotate" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
         spec:
+{{ with .Values.pod.priorityClassName.keystone_credential_rotate }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_credential_rotate }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
           initContainers:
 {{ tuple $envAll "credential_rotate" $mounts_keystone_credential_rotate_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 12 }}
diff --git a/charts/keystone/templates/cron-job-fernet-rotate.yaml b/charts/keystone/templates/cron-job-fernet-rotate.yaml
index a059f92..e911842 100644
--- a/charts/keystone/templates/cron-job-fernet-rotate.yaml
+++ b/charts/keystone/templates/cron-job-fernet-rotate.yaml
@@ -71,6 +71,12 @@
           labels:
 {{ tuple $envAll "keystone" "fernet-rotate" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
         spec:
+{{ with .Values.pod.priorityClassName.keystone_fernet_rotate }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_fernet_rotate }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "fernet_rotate" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
           initContainers:
diff --git a/charts/keystone/templates/deployment-api.yaml b/charts/keystone/templates/deployment-api.yaml
index ee4e13c..36050a5 100644
--- a/charts/keystone/templates/deployment-api.yaml
+++ b/charts/keystone/templates/deployment-api.yaml
@@ -54,6 +54,12 @@
 {{ dict "envAll" $envAll "podName" "keystone-api" "containerNames" (list "keystone-api") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "keystone" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.keystone_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "keystone" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/keystone/templates/job-credential-cleanup.yaml b/charts/keystone/templates/job-credential-cleanup.yaml
index 9f26802..3eac2b3 100644
--- a/charts/keystone/templates/job-credential-cleanup.yaml
+++ b/charts/keystone/templates/job-credential-cleanup.yaml
@@ -45,6 +45,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "keystone-credential-cleanup" "containerNames" (list "keystone-credential-cleanup") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.keystone_credential_cleanup }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_credential_cleanup }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceName }}
       restartPolicy: Never
 {{ if $envAll.Values.pod.tolerations.keystone.enabled }}
diff --git a/charts/keystone/templates/job-credential-setup.yaml b/charts/keystone/templates/job-credential-setup.yaml
index 38ff3c8..c8710b6 100644
--- a/charts/keystone/templates/job-credential-setup.yaml
+++ b/charts/keystone/templates/job-credential-setup.yaml
@@ -74,6 +74,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "keystone-credential-setup" "containerNames" (list "keystone-credential-setup") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.keystone_credential_setup }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_credential_setup }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "credential_setup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       initContainers:
diff --git a/charts/keystone/templates/job-domain-manage.yaml b/charts/keystone/templates/job-domain-manage.yaml
index 87c82eb..41bc9b1 100644
--- a/charts/keystone/templates/job-domain-manage.yaml
+++ b/charts/keystone/templates/job-domain-manage.yaml
@@ -42,6 +42,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "keystone-domain-manage" "containerNames" (list "keystone-domain-manage" "keystone-domain-manage-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.keystone_domain_manage }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_domain_manage }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "domain_manage" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       restartPolicy: OnFailure
diff --git a/charts/keystone/templates/job-fernet-setup.yaml b/charts/keystone/templates/job-fernet-setup.yaml
index d52aa6c..f41e788 100644
--- a/charts/keystone/templates/job-fernet-setup.yaml
+++ b/charts/keystone/templates/job-fernet-setup.yaml
@@ -74,6 +74,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "keystone-fernet-setup" "containerNames" (list "keystone-fernet-setup") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.keystone_fernet_setup }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_fernet_setup }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "fernet_setup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       initContainers:
diff --git a/charts/keystone/templates/pod-rally-test.yaml b/charts/keystone/templates/pod-rally-test.yaml
index ad5b23a..155dbae 100644
--- a/charts/keystone/templates/pod-rally-test.yaml
+++ b/charts/keystone/templates/pod-rally-test.yaml
@@ -42,6 +42,12 @@
 {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
   nodeSelector:
     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
+{{ with .Values.pod.priorityClassName.keystone_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.keystone_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
   initContainers:
 {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
diff --git a/charts/keystone/values.yaml b/charts/keystone/values.yaml
index 27e767c..65e6f65 100644
--- a/charts/keystone/values.yaml
+++ b/charts/keystone/values.yaml
@@ -158,6 +158,30 @@
           service: local_image_registry
 
 pod:
+  priorityClassName:
+    keystone_api: null
+    keystone_tests: null
+    keystone_credential_rotate: null
+    keystone_fernet_rotate: null
+    keystone_credential_setup: null
+    keystone_fernet_setup: null
+    keystone_domain_manage: null
+    keystone_credential_cleanup: null
+    bootstrap: null
+    db_init: null
+    db_sync: null
+  runtimeClassName:
+    keystone_api: null
+    keystone_tests: null
+    keystone_credential_rotate: null
+    keystone_fernet_rotate: null
+    keystone_credential_setup: null
+    keystone_fernet_setup: null
+    keystone_domain_manage: null
+    keystone_credential_cleanup: null
+    bootstrap: null
+    db_init: null
+    db_sync: null
   security_context:
     keystone:
       pod:
diff --git a/charts/libvirt/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/libvirt/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/libvirt/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/libvirt/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/magnum/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/magnum/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/magnum/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/magnum/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/magnum/templates/deployment-api.yaml b/charts/magnum/templates/deployment-api.yaml
index 9eb4967..720a4ba 100644
--- a/charts/magnum/templates/deployment-api.yaml
+++ b/charts/magnum/templates/deployment-api.yaml
@@ -44,6 +44,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.magnum_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.magnum_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "magnum" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/magnum/templates/statefulset-conductor.yaml b/charts/magnum/templates/statefulset-conductor.yaml
index 44d8b02..fe1cc84 100644
--- a/charts/magnum/templates/statefulset-conductor.yaml
+++ b/charts/magnum/templates/statefulset-conductor.yaml
@@ -45,6 +45,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.magnum_conductor }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.magnum_conductor }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "magnum" "conductor" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/magnum/values.yaml b/charts/magnum/values.yaml
index 88b4203..29b8198 100644
--- a/charts/magnum/values.yaml
+++ b/charts/magnum/values.yaml
@@ -482,6 +482,16 @@
         default: 24220
 
 pod:
+  priorityClassName:
+    magnum_api: null
+    magnum_conductor: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    magnum_api: null
+    magnum_conductor: null
+    bootstrap: null
+    db_sync: null
   user:
     magnum:
       uid: 42424
diff --git a/charts/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/manila/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/manila/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/manila/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/manila/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/manila/templates/deployment-api.yaml b/charts/manila/templates/deployment-api.yaml
index bcc352f..0338ee1 100644
--- a/charts/manila/templates/deployment-api.yaml
+++ b/charts/manila/templates/deployment-api.yaml
@@ -46,6 +46,12 @@
 {{ dict "envAll" $envAll "podName" "manila-api" "containerNames" (list "init" "manila-api") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.manila_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.manila_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "manila" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/manila/templates/deployment-data.yaml b/charts/manila/templates/deployment-data.yaml
index 21db299..6c981a6 100644
--- a/charts/manila/templates/deployment-data.yaml
+++ b/charts/manila/templates/deployment-data.yaml
@@ -46,6 +46,12 @@
 {{ dict "envAll" $envAll "podName" "manila-data" "containerNames" (list "init" "manila-data") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.manila_data }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.manila_data }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "manila" "data" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/manila/templates/deployment-scheduler.yaml b/charts/manila/templates/deployment-scheduler.yaml
index 4858dfc..c8749a2 100644
--- a/charts/manila/templates/deployment-scheduler.yaml
+++ b/charts/manila/templates/deployment-scheduler.yaml
@@ -46,6 +46,12 @@
 {{ dict "envAll" $envAll "podName" "manila-scheduler" "containerNames" (list "init" "manila-scheduler") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.manila_scheduler }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.manila_scheduler }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "manila" "scheduler" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/manila/templates/deployment-share.yaml b/charts/manila/templates/deployment-share.yaml
index 87bff3b..2d7a6c1 100644
--- a/charts/manila/templates/deployment-share.yaml
+++ b/charts/manila/templates/deployment-share.yaml
@@ -46,6 +46,12 @@
 {{ dict "envAll" $envAll "podName" "manila-share" "containerNames" (list "init" "manila-share" "manila-share-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.manila_share }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.manila_share }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "manila" "share" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/manila/templates/pod-rally-test.yaml b/charts/manila/templates/pod-rally-test.yaml
index 928c183..940989b 100644
--- a/charts/manila/templates/pod-rally-test.yaml
+++ b/charts/manila/templates/pod-rally-test.yaml
@@ -42,6 +42,12 @@
 {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
   nodeSelector:
     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
+{{ with .Values.pod.priorityClassName.manila_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.manila_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
   initContainers:
 {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
@@ -62,27 +68,27 @@
 {{- dict "enabled" .Values.manifests.certificates "name" .Values.secrets.tls.share.api.internal | include "helm-toolkit.snippets.tls_volume_mount"  | indent 8 }}
 {{- end }}
       env:
-{{- with $env := dict "ksUserSecret" .Values.secrets.share.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
-{{- include "helm-toolkit.snippets.manila_openrc_env_vars" $env | indent 8 }}
+{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
+{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 8 }}
 {{- end }}
         - name: SERVICE_OS_SERVICE_NAME
           value: "test"
-{{- with $env := dict "ksUserSecret" .Values.secrets.share.test }}
-{{- include "helm-toolkit.snippets.manila_user_create_env_vars" $env | indent 8 }}
+{{- with $env := dict "ksUserSecret" .Values.secrets.identity.test }}
+{{- include "helm-toolkit.snippets.keystone_user_create_env_vars" $env | indent 8 }}
 {{- end }}
         - name: SERVICE_OS_ROLE
-          value: {{ .Values.endpoints.share.auth.test.role | quote }}
+          value: {{ .Values.endpoints.identity.auth.test.role | quote }}
   containers:
     - name: manila-test
 {{ tuple $envAll "test" | include "helm-toolkit.snippets.image" | indent 6 }}
 {{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include "helm-toolkit.snippets.kubernetes_resources" | indent 6 }}
 {{ dict "envAll" $envAll "application" "test" "container" "manila_test" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 6}}
       env:
-{{- with $env := dict "ksUserSecret" .Values.secrets.share.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
-{{- include "helm-toolkit.snippets.manila_openrc_env_vars" $env | indent 8 }}
+{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
+{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 8 }}
 {{- end }}
-{{- with $env := dict "ksUserSecret" .Values.secrets.share.test }}
-{{- include "helm-toolkit.snippets.manila_user_create_env_vars" $env | indent 8 }}
+{{- with $env := dict "ksUserSecret" .Values.secrets.identity.test }}
+{{- include "helm-toolkit.snippets.keystone_user_create_env_vars" $env | indent 8 }}
 {{- end }}
         - name: RALLY_ENV_NAME
           value: {{.deployment_name}}
diff --git a/charts/manila/values.yaml b/charts/manila/values.yaml
index f820bc1..4219862 100644
--- a/charts/manila/values.yaml
+++ b/charts/manila/values.yaml
@@ -66,6 +66,22 @@
       - image_repo_sync
 
 pod:
+  priorityClassName:
+    manila_api: null
+    manila_data: null
+    manila_scheduler: null
+    manila_tests: null
+    manila_share: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    manila_api: null
+    manila_data: null
+    manila_scheduler: null
+    manila_tests: null
+    manila_share: null
+    bootstrap: null
+    db_sync: null
   security_context:
     manila:
       pod:
@@ -829,6 +845,7 @@
   identity:
     admin: manila-keystone-admin
     manila: manila-keystone-user
+    test: manila-keystone-test
   oslo_db:
     admin: manila-db-admin
     manila: manila-db-user
@@ -890,6 +907,14 @@
         project_name: service
         user_domain_name: service
         project_domain_name: service
+      test:
+        role: admin
+        region_name: RegionOne
+        username: manila-test
+        password: password
+        project_name: test
+        user_domain_name: service
+        project_domain_name: service
     hosts:
       default: keystone
       internal: keystone-api
@@ -1061,7 +1086,7 @@
   job_ks_service: true
   job_ks_user: true
   pdb_api: true
-  pod_test: true
+  pod_rally_test: true
   secret_db: true
   network_policy: false
   secret_ingress_tls: true
diff --git a/charts/memcached/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/memcached/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/memcached/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/memcached/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/neutron/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/neutron/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/neutron/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/neutron/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl b/charts/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl
index 0f73e5a..b6296f4 100644
--- a/charts/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl
+++ b/charts/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl
@@ -17,6 +17,9 @@
 set -x
 exec neutron-dhcp-agent \
   --config-file /etc/neutron/neutron.conf \
+{{- if ( has "ovn" .Values.network.backend ) }}
+  --config-file /tmp/pod-shared/ovn.ini \
+{{- end }}
 {{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}
   --config-file /tmp/pod-shared/neutron-agent.ini \
 {{- end }}
diff --git a/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl b/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
index bd0a64a..c15e40a 100644
--- a/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+++ b/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
@@ -435,13 +435,14 @@
   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
   then
     ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-port $bridge $iface
-    migrate_ip_from_nic $iface $bridge
     if [[ "${DPDK_ENABLED}" != "true" ]]; then
       ip link set dev $iface up
     fi
   fi
 done
 
+/usr/local/bin/ovsinit /tmp/auto_bridge_add
+
 tunnel_types="{{- .Values.conf.plugins.openvswitch_agent.agent.tunnel_types -}}"
 if [[ -n "${tunnel_types}" ]] ; then
     tunnel_interface="{{- .Values.network.interface.tunnel -}}"
diff --git a/charts/neutron/templates/configmap-etc.yaml b/charts/neutron/templates/configmap-etc.yaml
index 82865c0..9d00130 100644
--- a/charts/neutron/templates/configmap-etc.yaml
+++ b/charts/neutron/templates/configmap-etc.yaml
@@ -155,7 +155,7 @@
 
 {{- if empty $envAll.Values.conf.dhcp_agent.DEFAULT.interface_driver -}}
 {{- $_ := set $envAll.Values "__interface_driver" ( list ) }}
-{{- if ( has "openvswitch" $envAll.Values.network.backend ) -}}
+{{- if or ( has "openvswitch" $envAll.Values.network.backend ) ( has "ovn" $envAll.Values.network.backend ) -}}
 {{ $__interface_driver := append $envAll.Values.__interface_driver "openvswitch" }}
 {{- $_ := set $envAll.Values "__interface_driver" $__interface_driver }}
 {{- end -}}
@@ -165,6 +165,9 @@
 {{- end -}}
 {{- $_ := set $envAll.Values.conf.dhcp_agent.DEFAULT "interface_driver" $envAll.Values.__interface_driver -}}
 {{- end -}}
+{{- if and (has "ovn" $envAll.Values.network.backend) (empty $envAll.Values.conf.dhcp_agent.ovs.ovsdb_connection) -}}
+{{- $_ := set $envAll.Values.conf.dhcp_agent.ovs "ovsdb_connection" "unix:/run/openvswitch/db.sock" -}}
+{{- end -}}
 
 {{- if empty $envAll.Values.conf.l3_agent.DEFAULT.interface_driver -}}
 {{- $_ := set $envAll.Values "__interface_driver" ( list ) }}
diff --git a/charts/neutron/templates/daemonset-bagpipe-bgp.yaml b/charts/neutron/templates/daemonset-bagpipe-bgp.yaml
index b6d2157..e2bd90b 100644
--- a/charts/neutron/templates/daemonset-bagpipe-bgp.yaml
+++ b/charts/neutron/templates/daemonset-bagpipe-bgp.yaml
@@ -57,6 +57,12 @@
 {{ tuple "neutron_bagpipe_bgp" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_bagpipe_bgp" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.bagpipe_bgp }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.bagpipe_bgp }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-bgp-dragent.yaml b/charts/neutron/templates/daemonset-bgp-dragent.yaml
index b0494c3..8a6a309 100644
--- a/charts/neutron/templates/daemonset-bgp-dragent.yaml
+++ b/charts/neutron/templates/daemonset-bgp-dragent.yaml
@@ -56,6 +56,12 @@
 {{ tuple "neutron_bgp_dragent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_bgp_dragent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.bgp_dragent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.bgp_dragent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-dhcp-agent.yaml b/charts/neutron/templates/daemonset-dhcp-agent.yaml
index 17e15f8..9fe058d 100644
--- a/charts/neutron/templates/daemonset-dhcp-agent.yaml
+++ b/charts/neutron/templates/daemonset-dhcp-agent.yaml
@@ -79,6 +79,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-dhcp-agent-default" "containerNames" (list "neutron-dhcp-agent" "neutron-dhcp-agent-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_dhcp_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_dhcp_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_dhcp_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
@@ -94,6 +100,19 @@
       {{- end }}
       initContainers:
 {{ tuple $envAll "pod_dependency" $mounts_neutron_dhcp_agent_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        {{- if ( has "ovn" .Values.network.backend ) }}
+        - name: ovn-neutron-init
+{{ tuple $envAll "neutron_dhcp" | include "helm-toolkit.snippets.image" | indent 10 }}
+          command:
+            - /tmp/neutron-ovn-init.sh
+          volumeMounts:
+            - name: pod-shared
+              mountPath: /tmp/pod-shared
+            - name: neutron-bin
+              mountPath: /tmp/neutron-ovn-init.sh
+              subPath: neutron-ovn-init.sh
+              readOnly: true
+        {{- end }}
         - name: neutron-dhcp-agent-init
 {{ tuple $envAll "neutron_dhcp" | include "helm-toolkit.snippets.image" | indent 10 }}
 {{ tuple $envAll $envAll.Values.pod.resources.agent.dhcp | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
@@ -245,6 +264,10 @@
               mountPath: /run/netns
               mountPropagation: Bidirectional
             {{- end }}
+            {{- if ( has "ovn" .Values.network.backend ) }}
+            - name: run-openvswitch
+              mountPath: /run/openvswitch
+            {{- end }}
 {{- dict "enabled" $envAll.Values.manifests.certificates "name" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal "path" "/etc/rabbitmq/certs" | include "helm-toolkit.snippets.tls_volume_mount" | indent 12 }}
 {{ if $mounts_neutron_dhcp_agent.volumeMounts }}{{ toYaml $mounts_neutron_dhcp_agent.volumeMounts | indent 12 }}{{ end }}
       volumes:
@@ -274,6 +297,11 @@
           hostPath:
             path: /run/netns
         {{- end }}
+        {{- if ( has "ovn" .Values.network.backend ) }}
+        - name: run-openvswitch
+          hostPath:
+            path: /run/openvswitch
+        {{- end }}
 {{- dict "enabled" $envAll.Values.manifests.certificates "name" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include "helm-toolkit.snippets.tls_volume" | indent 8 }}
 {{ if $mounts_neutron_dhcp_agent.volumes }}{{ toYaml $mounts_neutron_dhcp_agent.volumes | indent 8 }}{{ end }}
 {{- end }}
diff --git a/charts/neutron/templates/daemonset-l2gw-agent.yaml b/charts/neutron/templates/daemonset-l2gw-agent.yaml
index e948198..ab984a3 100644
--- a/charts/neutron/templates/daemonset-l2gw-agent.yaml
+++ b/charts/neutron/templates/daemonset-l2gw-agent.yaml
@@ -80,6 +80,12 @@
 {{ tuple "neutron_l2gw_agent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_l2gw_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_l2gw_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_l2gw_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-l3-agent.yaml b/charts/neutron/templates/daemonset-l3-agent.yaml
index b4bbd09..21b45d7 100644
--- a/charts/neutron/templates/daemonset-l3-agent.yaml
+++ b/charts/neutron/templates/daemonset-l3-agent.yaml
@@ -80,6 +80,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-l3-agent-default" "containerNames" (list "neutron-l3-agent" "init"  "neutron-l3-agent-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_l3_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_l3_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_l3_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-lb-agent.yaml b/charts/neutron/templates/daemonset-lb-agent.yaml
index 35ff8fe..77337f7 100644
--- a/charts/neutron/templates/daemonset-lb-agent.yaml
+++ b/charts/neutron/templates/daemonset-lb-agent.yaml
@@ -55,6 +55,12 @@
 {{ tuple "neutron_lb_agent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_lb_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_lb_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_lb_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-metadata-agent.yaml b/charts/neutron/templates/daemonset-metadata-agent.yaml
index fc9a75e..229ed5c 100644
--- a/charts/neutron/templates/daemonset-metadata-agent.yaml
+++ b/charts/neutron/templates/daemonset-metadata-agent.yaml
@@ -76,6 +76,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-metadata-agent-default" "containerNames" (list "neutron-metadata-agent" "neutron-metadata-agent-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_metadata_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_metadata_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_metadata_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-netns-cleanup-cron.yaml b/charts/neutron/templates/daemonset-netns-cleanup-cron.yaml
index d43c595..df50c45 100644
--- a/charts/neutron/templates/daemonset-netns-cleanup-cron.yaml
+++ b/charts/neutron/templates/daemonset-netns-cleanup-cron.yaml
@@ -48,6 +48,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-netns-cleanup-cron-default" "containerNames" (list "neutron-netns-cleanup-cron" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_netns_cleanup_cron" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_netns_cleanup_cron }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_netns_cleanup_cron }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml b/charts/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
index fc6d805..c3af50b 100644
--- a/charts/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
+++ b/charts/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
@@ -78,6 +78,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
 {{ dict "envAll" $envAll "application" "ovn_vpn_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.ovn_vpn_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.ovn_vpn_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-ovn-metadata-agent.yaml b/charts/neutron/templates/daemonset-ovn-metadata-agent.yaml
index 47e1256..80ca3f0 100644
--- a/charts/neutron/templates/daemonset-ovn-metadata-agent.yaml
+++ b/charts/neutron/templates/daemonset-ovn-metadata-agent.yaml
@@ -76,6 +76,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-ovn-metadata-agent-default" "containerNames" (list "neutron-ovn-metadata-agent" "neutron-ovn-metadata-agent-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_ovn_metadata_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_ovn_metadata_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_ovn_metadata_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
diff --git a/charts/neutron/templates/daemonset-ovs-agent.yaml b/charts/neutron/templates/daemonset-ovs-agent.yaml
index 0ea60f5..c6eb4c0 100644
--- a/charts/neutron/templates/daemonset-ovs-agent.yaml
+++ b/charts/neutron/templates/daemonset-ovs-agent.yaml
@@ -59,6 +59,12 @@
 {{ dict "envAll" $envAll "podName" "$configMapName" "containerNames" (list "neutron-ovs-agent" "init" "neutron-openvswitch-agent-kernel-modules" "neutron-ovs-agent-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_ovs_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_ovs_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_ovs_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       nodeSelector:
         {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
diff --git a/charts/neutron/templates/daemonset-sriov-agent.yaml b/charts/neutron/templates/daemonset-sriov-agent.yaml
index 5b96cd7..efd48e0 100644
--- a/charts/neutron/templates/daemonset-sriov-agent.yaml
+++ b/charts/neutron/templates/daemonset-sriov-agent.yaml
@@ -62,6 +62,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-sriov-agent-default" "containerNames" (list "neutron-sriov-agent-init" "init" "neutron-sriov-agent") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_sriov_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_sriov_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_sriov_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       nodeSelector:
         {{ .Values.labels.sriov.node_selector_key }}: {{ .Values.labels.sriov.node_selector_value }}
diff --git a/charts/neutron/templates/deployment-ironic-agent.yaml b/charts/neutron/templates/deployment-ironic-agent.yaml
index 1b468e2..8d2663f 100644
--- a/charts/neutron/templates/deployment-ironic-agent.yaml
+++ b/charts/neutron/templates/deployment-ironic-agent.yaml
@@ -49,6 +49,12 @@
 {{ tuple "neutron_ironic_agent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_ironic_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_ironic_agent }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_ironic_agent }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "neutron" "ironic_agent" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/neutron/templates/deployment-rpc_server.yaml b/charts/neutron/templates/deployment-rpc_server.yaml
index 1866e21..f5f7240 100644
--- a/charts/neutron/templates/deployment-rpc_server.yaml
+++ b/charts/neutron/templates/deployment-rpc_server.yaml
@@ -49,6 +49,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-rpc-server" "containerNames" (list "neutron-rpc-server" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_rpc_server" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_rpc_server }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_rpc_server }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "neutron" "rpc_server" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/neutron/templates/deployment-server.yaml b/charts/neutron/templates/deployment-server.yaml
index 457401b..464b3c3 100644
--- a/charts/neutron/templates/deployment-server.yaml
+++ b/charts/neutron/templates/deployment-server.yaml
@@ -81,6 +81,12 @@
 {{ dict "envAll" $envAll "podName" "neutron-server" "containerNames" (list "neutron-server" "init" "nginx") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "neutron_server" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.neutron_server }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_server }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "neutron" "server" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/neutron/templates/pod-rally-test.yaml b/charts/neutron/templates/pod-rally-test.yaml
index 5ef57fa..a1e3e1a 100644
--- a/charts/neutron/templates/pod-rally-test.yaml
+++ b/charts/neutron/templates/pod-rally-test.yaml
@@ -44,6 +44,12 @@
 {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
 {{ end }}
   restartPolicy: Never
+{{ with .Values.pod.priorityClassName.neutron_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.neutron_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
   initContainers:
 {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
diff --git a/charts/neutron/values.yaml b/charts/neutron/values.yaml
index b1ff456..57cb3b7 100644
--- a/charts/neutron/values.yaml
+++ b/charts/neutron/values.yaml
@@ -389,6 +389,44 @@
           service: local_image_registry
 
 pod:
+  priorityClassName:
+    bagpipe_bgp: null
+    bgp_dragent: null
+    neutron_dhcp_agent: null
+    neutron_l2gw_agent: null
+    neutron_l3_agent: null
+    neutron_lb_agent: null
+    neutron_metadata_agent: null
+    neutron_netns_cleanup_cron: null
+    ovn_vpn_agent: null
+    neutron_ovn_metadata_agent: null
+    neutron_ovs_agent: null
+    neutron_sriov_agent: null
+    neutron_ironic_agent: null
+    neutron_rpc_server: null
+    neutron_server: null
+    neutron_tests: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    bagpipe_bgp: null
+    bgp_dragent: null
+    neutron_dhcp_agent: null
+    neutron_l2gw_agent: null
+    neutron_l3_agent: null
+    neutron_lb_agent: null
+    neutron_metadata_agent: null
+    neutron_netns_cleanup_cron: null
+    ovn_vpn_agent: null
+    neutron_ovn_metadata_agent: null
+    neutron_ovs_agent: null
+    neutron_sriov_agent: null
+    neutron_ironic_agent: null
+    neutron_rpc_server: null
+    neutron_server: null
+    neutron_tests: null
+    bootstrap: null
+    db_sync: null
   sidecars:
     neutron_policy_server: false
   use_fqdn:
@@ -2149,6 +2187,8 @@
       interface_driver: null
       dnsmasq_config_file: /etc/neutron/dnsmasq.conf
       force_metadata: True
+    # NOTE(mnaser): This has to be here in order for the DHCP agent to work with OVN.
+    ovs: {}
   dnsmasq: |
       #no-hosts
       #port=5353
diff --git a/charts/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/nova/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/nova/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/nova/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/nova/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/nova/templates/cron-job-archive-deleted-rows.yaml b/charts/nova/templates/cron-job-archive-deleted-rows.yaml
index 7316b3a..48e83ab 100644
--- a/charts/nova/templates/cron-job-archive-deleted-rows.yaml
+++ b/charts/nova/templates/cron-job-archive-deleted-rows.yaml
@@ -42,6 +42,12 @@
           labels:
 {{ tuple $envAll "nova" "archive-deleted-rows" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
         spec:
+{{ with .Values.pod.priorityClassName.nova_archive_deleted_rows }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_archive_deleted_rows }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "archive_deleted_rows" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
           restartPolicy: OnFailure
diff --git a/charts/nova/templates/cron-job-cell-setup.yaml b/charts/nova/templates/cron-job-cell-setup.yaml
index b90b84e..23840ce 100644
--- a/charts/nova/templates/cron-job-cell-setup.yaml
+++ b/charts/nova/templates/cron-job-cell-setup.yaml
@@ -42,6 +42,12 @@
           labels:
 {{ tuple $envAll "nova" "cell-setup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
         spec:
+{{ with .Values.pod.priorityClassName.nova_cell_setup }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_cell_setup }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "cell_setup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
           restartPolicy: OnFailure
diff --git a/charts/nova/templates/cron-job-service-cleaner.yaml b/charts/nova/templates/cron-job-service-cleaner.yaml
index dd61db7..c515304 100644
--- a/charts/nova/templates/cron-job-service-cleaner.yaml
+++ b/charts/nova/templates/cron-job-service-cleaner.yaml
@@ -42,6 +42,12 @@
           labels:
 {{ tuple $envAll "nova" "service-cleaner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
         spec:
+{{ with .Values.pod.priorityClassName.nova_service_cleaner }}
+          priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_service_cleaner }}
+          runtimeClassName: {{ . }}
+{{ end }}
           serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "service_cleaner" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
           restartPolicy: OnFailure
diff --git a/charts/nova/templates/daemonset-compute.yaml b/charts/nova/templates/daemonset-compute.yaml
index 3ad00ff..e62ea76 100644
--- a/charts/nova/templates/daemonset-compute.yaml
+++ b/charts/nova/templates/daemonset-compute.yaml
@@ -90,6 +90,12 @@
 {{ tuple "nova_compute" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "nova-compute-default" "containerNames" (list "nova-compute" "init" "nova-compute-init" "nova-compute-vnc-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_compute }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_compute }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       nodeSelector:
diff --git a/charts/nova/templates/deployment-api-metadata.yaml b/charts/nova/templates/deployment-api-metadata.yaml
index 51e30c9..c4c64f9 100644
--- a/charts/nova/templates/deployment-api-metadata.yaml
+++ b/charts/nova/templates/deployment-api-metadata.yaml
@@ -60,6 +60,12 @@
 {{ tuple "nova_api_metadata" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "nova-api-metadata" "containerNames" (list "nova-api-metadata-init" "nova-api" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_api_metadata }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_api_metadata }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/nova/templates/deployment-api-osapi.yaml b/charts/nova/templates/deployment-api-osapi.yaml
index b203ba6..70a65f5 100644
--- a/charts/nova/templates/deployment-api-osapi.yaml
+++ b/charts/nova/templates/deployment-api-osapi.yaml
@@ -60,6 +60,12 @@
 {{ tuple "nova_api_osapi" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "nova-api-osapi" "containerNames" (list "nova-osapi" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_api_osapi }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_api_osapi }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/nova/templates/deployment-conductor.yaml b/charts/nova/templates/deployment-conductor.yaml
index b58b385..50a7ddb 100644
--- a/charts/nova/templates/deployment-conductor.yaml
+++ b/charts/nova/templates/deployment-conductor.yaml
@@ -69,6 +69,12 @@
 {{ tuple "nova_conductor" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "nova-conductor" "containerNames" (list "nova-conductor" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_conductor }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_conductor }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/nova/templates/deployment-novncproxy.yaml b/charts/nova/templates/deployment-novncproxy.yaml
index f4c1d8b..670e1cc 100644
--- a/charts/nova/templates/deployment-novncproxy.yaml
+++ b/charts/nova/templates/deployment-novncproxy.yaml
@@ -58,6 +58,12 @@
 {{ tuple "nova_novncproxy" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "nova-novncproxy" "containerNames" (list "nova-novncproxy" "nova-novncproxy-init-assets" "nova-novncproxy-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_novncproxy }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_novncproxy }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/nova/templates/deployment-scheduler.yaml b/charts/nova/templates/deployment-scheduler.yaml
index bba444c..a8a529a 100644
--- a/charts/nova/templates/deployment-scheduler.yaml
+++ b/charts/nova/templates/deployment-scheduler.yaml
@@ -69,6 +69,12 @@
 {{ tuple "nova_scheduler" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
 {{ dict "envAll" $envAll "podName" "nova-scheduler" "containerNames" (list "nova-scheduler" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_scheduler }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_scheduler }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/nova/templates/deployment-spiceproxy.yaml b/charts/nova/templates/deployment-spiceproxy.yaml
index eca1628..68b3c00 100644
--- a/charts/nova/templates/deployment-spiceproxy.yaml
+++ b/charts/nova/templates/deployment-spiceproxy.yaml
@@ -55,6 +55,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ tuple "nova_spiceproxy" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_spiceproxy }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_spiceproxy }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/nova/templates/job-bootstrap.yaml b/charts/nova/templates/job-bootstrap.yaml
index de8812d..72cc319 100644
--- a/charts/nova/templates/job-bootstrap.yaml
+++ b/charts/nova/templates/job-bootstrap.yaml
@@ -40,6 +40,12 @@
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "bootstrap" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       nodeSelector:
diff --git a/charts/nova/templates/pod-rally-test.yaml b/charts/nova/templates/pod-rally-test.yaml
index d53f204..659d4b5 100644
--- a/charts/nova/templates/pod-rally-test.yaml
+++ b/charts/nova/templates/pod-rally-test.yaml
@@ -44,6 +44,12 @@
 {{ tuple $envAll "nova" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
 {{ end }}
   restartPolicy: Never
+{{ with .Values.pod.priorityClassName.nova_tests }}
+  priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_tests }}
+  runtimeClassName: {{ . }}
+{{ end }}
   serviceAccountName: {{ $serviceAccountName }}
   initContainers:
 {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
diff --git a/charts/nova/templates/statefulset-compute-ironic.yaml b/charts/nova/templates/statefulset-compute-ironic.yaml
index 377555d..deb6c7c 100644
--- a/charts/nova/templates/statefulset-compute-ironic.yaml
+++ b/charts/nova/templates/statefulset-compute-ironic.yaml
@@ -45,14 +45,18 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "nova-compute-default" "containerNames" (list "nova-compute") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.nova_compute_ironic }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.nova_compute_ironic }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
 {{ tuple $envAll "nova" "compute-ironic" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
       nodeSelector:
         {{ .Values.labels.agent.compute_ironic.node_selector_key }}: {{ .Values.labels.agent.compute_ironic.node_selector_value }}
-      securityContext:
-        runAsUser: 0
       hostPID: true
       dnsPolicy: ClusterFirstWithHostNet
       initContainers:
diff --git a/charts/nova/values.yaml b/charts/nova/values.yaml
index e0c5866..2b1a924 100644
--- a/charts/nova/values.yaml
+++ b/charts/nova/values.yaml
@@ -2037,6 +2037,36 @@
         default: 80
 
 pod:
+  priorityClassName:
+    nova_compute: null
+    nova_api_metadata: null
+    nova_api_osapi: null
+    nova_conductor: null
+    nova_novncproxy: null
+    nova_scheduler: null
+    nova_spiceproxy: null
+    nova_archive_deleted_rows: null
+    nova_cell_setup: null
+    nova_service_cleaner: null
+    nova_compute_ironic: null
+    nova_tests: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    nova_compute: null
+    nova_api_metadata: null
+    nova_api_osapi: null
+    nova_conductor: null
+    nova_novncproxy: null
+    nova_scheduler: null
+    nova_spiceproxy: null
+    nova_archive_deleted_rows: null
+    nova_cell_setup: null
+    nova_service_cleaner: null
+    nova_compute_ironic: null
+    nova_tests: null
+    bootstrap: null
+    db_sync: null
   probes:
     rpc_timeout: 60
     rpc_retries: 2
diff --git a/charts/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 6b77004..da3c481 100644
--- a/charts/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -70,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/octavia/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/octavia/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/octavia/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/octavia/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/octavia/templates/daemonset-health-manager.yaml b/charts/octavia/templates/daemonset-health-manager.yaml
index a355e86..1fcda41 100644
--- a/charts/octavia/templates/daemonset-health-manager.yaml
+++ b/charts/octavia/templates/daemonset-health-manager.yaml
@@ -46,6 +46,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.octavia_health_manager }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.octavia_health_manager }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       dnsPolicy: ClusterFirstWithHostNet
       hostNetwork: true
diff --git a/charts/octavia/templates/deployment-api.yaml b/charts/octavia/templates/deployment-api.yaml
index da15972..e584f76 100644
--- a/charts/octavia/templates/deployment-api.yaml
+++ b/charts/octavia/templates/deployment-api.yaml
@@ -45,6 +45,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.octavia_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.octavia_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "octavia" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/octavia/templates/deployment-housekeeping.yaml b/charts/octavia/templates/deployment-housekeeping.yaml
index 39903b6..584ca18 100644
--- a/charts/octavia/templates/deployment-housekeeping.yaml
+++ b/charts/octavia/templates/deployment-housekeeping.yaml
@@ -45,6 +45,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.octavia_housekeeping }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.octavia_housekeeping }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       dnsPolicy: ClusterFirstWithHostNet
       hostNetwork: true
diff --git a/charts/octavia/templates/deployment-worker.yaml b/charts/octavia/templates/deployment-worker.yaml
index 54cf68d..561a604 100644
--- a/charts/octavia/templates/deployment-worker.yaml
+++ b/charts/octavia/templates/deployment-worker.yaml
@@ -45,6 +45,12 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
+{{ with .Values.pod.priorityClassName.octavia_worker }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.octavia_worker }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       dnsPolicy: ClusterFirstWithHostNet
       hostNetwork: true
@@ -99,4 +105,4 @@
             secretName: octavia-etc
             defaultMode: 0444
 {{ if $mounts_octavia_worker.volumes }}{{ toYaml $mounts_octavia_worker.volumes | indent 8 }}{{ end }}
-{{- end }}
\ No newline at end of file
+{{- end }}
diff --git a/charts/octavia/values.yaml b/charts/octavia/values.yaml
index b15114a..36fcbf0 100644
--- a/charts/octavia/values.yaml
+++ b/charts/octavia/values.yaml
@@ -526,6 +526,20 @@
         public: 80
 
 pod:
+  priorityClassName:
+    octavia_health_manager: null
+    octavia_api: null
+    octavia_housekeeping: null
+    octavia_worker: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    octavia_health_manager: null
+    octavia_api: null
+    octavia_housekeeping: null
+    octavia_worker: null
+    bootstrap: null
+    db_sync: null
   user:
     octavia:
       uid: 42424
diff --git a/charts/openvswitch/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/openvswitch/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/openvswitch/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/openvswitch/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/ovn/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/ovn/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/ovn/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/ovn/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl b/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl
index 1d303c8..049f731 100644
--- a/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl
+++ b/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl
@@ -25,58 +25,6 @@
   echo ${ip}
 }
 
-function get_ip_prefix_from_interface {
-  local interface=$1
-  local prefix=$(ip -4 -o addr s "${interface}" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $2}')
-  if [ -z "${prefix}" ] ; then
-    exit 1
-  fi
-  echo ${prefix}
-}
-
-function migrate_ip_from_nic {
-  src_nic=$1
-  bridge_name=$2
-
-  # Enabling explicit error handling: We must avoid to lose the IP
-  # address in the migration process. Hence, on every error, we
-  # attempt to assign the IP back to the original NIC and exit.
-  set +e
-
-  ip=$(get_ip_address_from_interface ${src_nic})
-  prefix=$(get_ip_prefix_from_interface ${src_nic})
-
-  bridge_ip=$(get_ip_address_from_interface "${bridge_name}")
-  bridge_prefix=$(get_ip_prefix_from_interface "${bridge_name}")
-
-  ip link set ${bridge_name} up
-
-  if [[ -n "${ip}" && -n "${prefix}" ]]; then
-    ip addr flush dev ${src_nic}
-    if [ $? -ne 0 ] ; then
-      ip addr add ${ip}/${prefix} dev ${src_nic}
-      echo "Error while flushing IP from ${src_nic}."
-      exit 1
-    fi
-
-    ip addr add ${ip}/${prefix} dev "${bridge_name}"
-    if [ $? -ne 0 ] ; then
-      echo "Error assigning IP to bridge "${bridge_name}"."
-      ip addr add ${ip}/${prefix} dev ${src_nic}
-      exit 1
-    fi
-  elif [[ -n "${bridge_ip}" && -n "${bridge_prefix}" ]]; then
-    echo "Bridge '${bridge_name}' already has IP assigned. Keeping the same:: IP:[${bridge_ip}]; Prefix:[${bridge_prefix}]..."
-  elif [[ -z "${bridge_ip}" && -z "${ip}" ]]; then
-    echo "Interface and bridge have no ips configured. Leaving as is."
-  else
-    echo "Interface ${src_nic} has invalid IP address. IP:[${ip}]; Prefix:[${prefix}]..."
-    exit 1
-  fi
-
-  set -e
-}
-
 function get_current_system_id {
   ovs-vsctl --if-exists get Open_vSwitch . external_ids:system-id | tr -d '"'
 }
@@ -174,6 +122,7 @@
   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
   then
     ovs-vsctl --may-exist add-port $bridge $iface
-    migrate_ip_from_nic $iface $bridge
   fi
 done
+
+/usr/local/bin/ovsinit /tmp/auto_bridge_add
diff --git a/charts/ovn/templates/deployment-northd.yaml b/charts/ovn/templates/deployment-northd.yaml
index 2dbbb68..baf5a0c 100644
--- a/charts/ovn/templates/deployment-northd.yaml
+++ b/charts/ovn/templates/deployment-northd.yaml
@@ -49,6 +49,8 @@
         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
     spec:
       serviceAccountName: {{ $serviceAccountName }}
+      affinity:
+{{- tuple $envAll "ovn" "ovn_northd" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
       nodeSelector:
         {{ .Values.labels.ovn_northd.node_selector_key }}: {{ .Values.labels.ovn_northd.node_selector_value }}
       initContainers:
diff --git a/charts/patches/0001-fix-Use-more-standard-configs-for-staffeln-DBdropjob.patch b/charts/patches/0001-fix-Use-more-standard-configs-for-staffeln-DBdropjob.patch
deleted file mode 100644
index 1a491cf..0000000
--- a/charts/patches/0001-fix-Use-more-standard-configs-for-staffeln-DBdropjob.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From f74a254e87acaafb9493630cb8521fda145c6c5c Mon Sep 17 00:00:00 2001
-From: ricolin <rlin@vexxhost.com>
-Date: Wed, 8 Jan 2025 21:29:08 +0800
-Subject: [PATCH] fix: Use more standard configs for staffeln DB drop job
-
----
- charts/staffeln/templates/job-db-drop.yaml | 6 ++----
- 1 file changed, 2 insertions(+), 4 deletions(-)
-
-diff --git a/charts/staffeln/templates/job-db-drop.yaml b/charts/staffeln/templates/job-db-drop.yaml
-index dffa5aba..519e0b31 100644
---- a/charts/staffeln/templates/job-db-drop.yaml
-+++ b/charts/staffeln/templates/job-db-drop.yaml
-@@ -13,11 +13,9 @@ limitations under the License.
- */}}
- 
- {{- if .Values.manifests.job_db_drop }}
--{{- $serviceName := "staffeln" -}}
--{{- $dbToDrop := dict "adminSecret" .Values.secrets.oslo_db.admin "configFile" (printf "/etc/%s/%s.conf" $serviceName $serviceName ) "logConfigFile" (printf "/etc/%s/logging.conf" $serviceName ) "configDbSection" "DEFAULT" "configDbKey" "sql_connection" -}}
--{{- $dbDropJob := dict "envAll" . "serviceName" $serviceName "dbToDrop" $dbToDrop -}}
-+{{- $dbDropJob := dict "envAll" . "serviceName" "staffeln" -}}
- {{- if .Values.manifests.certificates -}}
--{{- $_ := set $dbToDrop "dbAdminTlsSecret" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}
-+{{- $_ := set $dbDropJob "dbAdminTlsSecret" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}
- {{- end -}}
- {{- if .Values.pod.tolerations.staffeln.enabled -}}
- {{- $_ := set $dbDropJob "tolerationsEnabled" true -}}
--- 
-2.25.1
-
diff --git a/charts/patches/barbican/0003-Enable-priority-runtime-ClassName-for-Barbican.patch b/charts/patches/barbican/0003-Enable-priority-runtime-ClassName-for-Barbican.patch
new file mode 100644
index 0000000..a956746
--- /dev/null
+++ b/charts/patches/barbican/0003-Enable-priority-runtime-ClassName-for-Barbican.patch
@@ -0,0 +1,63 @@
+From e7640e5fc0179fabfa063e89f7e7a7612e1c807e Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 06:32:12 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Barbican
+
+---
+ barbican/templates/deployment-api.yaml |  6 +++
+ barbican/templates/pod-test.yaml       |  6 +++
+ barbican/values.yaml                   |  8 ++++
+
+diff --git a/barbican/templates/deployment-api.yaml b/barbican/templates/deployment-api.yaml
+index 8ae9ea42..1e5cbbac 100644
+--- a/barbican/templates/deployment-api.yaml
++++ b/barbican/templates/deployment-api.yaml
+@@ -47,6 +47,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "barbican-api" "containerNames" (list "init" "barbican-api") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "barbican" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.barbican_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.barbican_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "barbican" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/barbican/templates/pod-test.yaml b/barbican/templates/pod-test.yaml
+index f4143564..783d328b 100644
+--- a/barbican/templates/pod-test.yaml
++++ b/barbican/templates/pod-test.yaml
+@@ -33,6 +33,12 @@ metadata:
+     {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
+ {{ dict "envAll" $envAll "podName" "barbican-test" "containerNames" (list "init" "barbican-test") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 4 }}
+ spec:
++{{ with .Values.pod.priorityClassName.barbican_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.barbican_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
+   nodeSelector:
+diff --git a/barbican/values.yaml b/barbican/values.yaml
+index e2e87bf7..17aee723 100644
+--- a/barbican/values.yaml
++++ b/barbican/values.yaml
+@@ -55,6 +55,14 @@ images:
+       - image_repo_sync
+ 
+ pod:
++  priorityClassName:
++    barbican_api: null
++    barbican_tests: null
++    db_sync: null
++  runtimeClassName:
++    barbican_api: null
++    barbican_tests: null
++    db_sync: null
+   security_context:
+     barbican:
+       pod:
diff --git a/charts/patches/barbican/0004-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/barbican/0004-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..ea1de3e
--- /dev/null
+++ b/charts/patches/barbican/0004-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,39 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/barbican/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/barbican/values.yaml b/barbican/values.yaml
+index 17aee723..67bdbbc1 100644
+--- a/barbican/values.yaml
++++ b/barbican/values.yaml
+@@ -58,10 +58,12 @@ pod:
+   priorityClassName:
+     barbican_api: null
+     barbican_tests: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+     barbican_api: null
+     barbican_tests: null
++    bootstrap: null
+     db_sync: null
+   security_context:
+     barbican:
diff --git a/charts/patches/cinder/0003-Enable-priority-runtime-ClassName-for-Cinder.patch b/charts/patches/cinder/0003-Enable-priority-runtime-ClassName-for-Cinder.patch
new file mode 100644
index 0000000..7397b0e
--- /dev/null
+++ b/charts/patches/cinder/0003-Enable-priority-runtime-ClassName-for-Cinder.patch
@@ -0,0 +1,146 @@
+From 5fa9fbab45a5cd489007ca0286e7203accd1f398 Mon Sep 17 00:00:00 2001
+From: Mohammed Naser <mnaser@vexxhost.com>
+Date: Thu, 6 Feb 2025 09:33:47 -0500
+Subject: [PATCH] Enable {priority,runtime}ClassName for Cinder
+
+---
+ .../cron-job-cinder-volume-usage-audit.yaml      |  6 ++++++
+ cinder/templates/deployment-api.yaml             |  6 ++++++
+ cinder/templates/deployment-backup.yaml          |  6 ++++++
+ cinder/templates/deployment-scheduler.yaml       |  6 ++++++
+ cinder/templates/deployment-volume.yaml          |  6 ++++++
+ cinder/templates/pod-rally-test.yaml             |  6 ++++++
+ cinder/values.yaml                               | 16 ++++++++++++++++
+ 7 files changed, 52 insertions(+)
+
+diff --git a/cinder/templates/cron-job-cinder-volume-usage-audit.yaml b/cinder/templates/cron-job-cinder-volume-usage-audit.yaml
+index 1d935f12..18d9e28e 100644
+--- a/cinder/templates/cron-job-cinder-volume-usage-audit.yaml
++++ b/cinder/templates/cron-job-cinder-volume-usage-audit.yaml
+@@ -50,6 +50,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" $serviceAccountName "containerNames" (list "cinder-volume-usage-audit" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 12 }}
+         spec:
+ {{ dict "envAll" $envAll "application" "volume_usage_audit" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
++{{ with .Values.pod.priorityClassName.cinder_api }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.cinder_api }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+           restartPolicy: OnFailure
+ {{ if $envAll.Values.pod.tolerations.cinder.enabled }}
+diff --git a/cinder/templates/deployment-api.yaml b/cinder/templates/deployment-api.yaml
+index f29d103f..3b1a30d6 100644
+--- a/cinder/templates/deployment-api.yaml
++++ b/cinder/templates/deployment-api.yaml
+@@ -46,6 +46,12 @@ spec:
+ {{ tuple "cinder_api" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "cinder-api" "containerNames" (list "cinder-api" "ceph-coordination-volume-perms" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.cinder_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.cinder_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "cinder_api" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/cinder/templates/deployment-backup.yaml b/cinder/templates/deployment-backup.yaml
+index ccd84fc0..617031a0 100644
+--- a/cinder/templates/deployment-backup.yaml
++++ b/cinder/templates/deployment-backup.yaml
+@@ -48,6 +48,12 @@ spec:
+ {{ tuple "cinder_backup" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "cinder-backup" "containerNames" (list "cinder-backup" "ceph-coordination-volume-perms" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.cinder_backup }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.cinder_backup }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "cinder_backup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/cinder/templates/deployment-scheduler.yaml b/cinder/templates/deployment-scheduler.yaml
+index 73e14e55..016ade95 100644
+--- a/cinder/templates/deployment-scheduler.yaml
++++ b/cinder/templates/deployment-scheduler.yaml
+@@ -46,6 +46,12 @@ spec:
+ {{ tuple "cinder_scheduler" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "cinder-scheduler" "containerNames" (list "cinder-scheduler" "ceph-coordination-volume-perms" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.cinder_scheduler }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.cinder_scheduler }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "cinder_scheduler" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/cinder/templates/deployment-volume.yaml b/cinder/templates/deployment-volume.yaml
+index c28518da..83430c33 100644
+--- a/cinder/templates/deployment-volume.yaml
++++ b/cinder/templates/deployment-volume.yaml
+@@ -48,6 +48,12 @@ spec:
+ {{ tuple "cinder_volume" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "cinder-volume" "containerNames" (list "cinder-volume" "ceph-coordination-volume-perms" "init-cinder-conf" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.cinder_volume }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.cinder_volume }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "cinder_volume" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/cinder/templates/pod-rally-test.yaml b/cinder/templates/pod-rally-test.yaml
+index 34316c65..a2852359 100644
+--- a/cinder/templates/pod-rally-test.yaml
++++ b/cinder/templates/pod-rally-test.yaml
+@@ -38,6 +38,12 @@ spec:
+ {{ end }}
+   nodeSelector:
+     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
++{{ with .Values.pod.priorityClassName.cinder_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.cinder_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+   initContainers:
+ {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
+diff --git a/cinder/values.yaml b/cinder/values.yaml
+index 6b335be1..1377e4cd 100644
+--- a/cinder/values.yaml
++++ b/cinder/values.yaml
+@@ -76,6 +76,22 @@ jobs:
+       failed: 1
+ 
+ pod:
++  priorityClassName:
++    cinder_api: null
++    cinder_backup: null
++    cinder_scheduler: null
++    cinder_tests: null
++    cinder_volume_usage_audit: null
++    cinder_volume: null
++    db_sync: null
++  runtimeClassName:
++    cinder_api: null
++    cinder_backup: null
++    cinder_scheduler: null
++    cinder_tests: null
++    cinder_volume_usage_audit: null
++    cinder_volume: null
++    db_sync: null
+   security_context:
+     volume_usage_audit:
+       pod:
+-- 
+2.43.0
diff --git a/charts/patches/designate/0002-Enable-priority-runtime-ClassName-for-Designate.patch b/charts/patches/designate/0002-Enable-priority-runtime-ClassName-for-Designate.patch
new file mode 100644
index 0000000..f8db3f7
--- /dev/null
+++ b/charts/patches/designate/0002-Enable-priority-runtime-ClassName-for-Designate.patch
@@ -0,0 +1,147 @@
+From 6d9700b99f7dc8462faed77ee404682c4c042c20 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 08:55:22 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Designate
+
+---
+ designate/templates/deployment-api.yaml   |  6 ++++++
+ designate/templates/deployment-central.yaml  |  6 ++++++
+ designate/templates/deployment-mdns.yaml  |  6 ++++++
+ designate/templates/deployment-producer.yaml |  6 ++++++
+ designate/templates/deployment-sink.yaml  |  6 ++++++
+ designate/templates/deployment-worker.yaml   |  6 ++++++
+ designate/values.yaml                     | 16 ++++++++++++++++
+ 7 files changed, 52 insertions(+)
+
+diff --git a/designate/templates/deployment-api.yaml b/designate/templates/deployment-api.yaml
+index e9df6b6e..2f6ed7ca 100644
+--- a/designate/templates/deployment-api.yaml
++++ b/designate/templates/deployment-api.yaml
+@@ -41,6 +41,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.designate_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.designate_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "designate" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/designate/templates/deployment-central.yaml b/designate/templates/deployment-central.yaml
+index 02d9f3ca..c3e7dcdb 100644
+--- a/designate/templates/deployment-central.yaml
++++ b/designate/templates/deployment-central.yaml
+@@ -41,6 +41,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.designate_central }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.designate_central }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "designate" "central" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/designate/templates/deployment-mdns.yaml b/designate/templates/deployment-mdns.yaml
+index d58f6304..9312bc7d 100644
+--- a/designate/templates/deployment-mdns.yaml
++++ b/designate/templates/deployment-mdns.yaml
+@@ -41,6 +41,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.designate_mdns }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.designate_mdns }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "designate" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/designate/templates/deployment-producer.yaml b/designate/templates/deployment-producer.yaml
+index 491dbad8..68e46fb4 100644
+--- a/designate/templates/deployment-producer.yaml
++++ b/designate/templates/deployment-producer.yaml
+@@ -41,6 +41,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.designate_producer }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.designate_producer }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "designate" "producer" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/designate/templates/deployment-sink.yaml b/designate/templates/deployment-sink.yaml
+index e577d9d8..63222a1a 100644
+--- a/designate/templates/deployment-sink.yaml
++++ b/designate/templates/deployment-sink.yaml
+@@ -41,6 +41,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.designate_sink }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.designate_sink }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "designate" "sink" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/designate/templates/deployment-worker.yaml b/designate/templates/deployment-worker.yaml
+index 74f9c998..12d0b96a 100644
+--- a/designate/templates/deployment-worker.yaml
++++ b/designate/templates/deployment-worker.yaml
+@@ -41,6 +41,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.designate_worker }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.designate_worker }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "designate" "worker" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/designate/values.yaml b/designate/values.yaml
+index 71694146..6e520ca4 100644
+--- a/designate/values.yaml
++++ b/designate/values.yaml
+@@ -66,6 +66,22 @@ images:
+       - image_repo_sync
+ 
+ pod:
++  priorityClassName:
++    designate_api: null
++    designate_central: null
++    designate_mdns: null
++    designate_producer: null
++    designate_sink: null
++    designate_worker: null
++    db_sync: null
++  runtimeClassName:
++    designate_api: null
++    designate_central: null
++    designate_mdns: null
++    designate_producer: null
++    designate_sink: null
++    designate_worker: null
++    db_sync: null
+   affinity:
+     anti:
+       type:
+-- 
+2.34.1
+
diff --git a/charts/patches/glance/0002-Enable-priority-runtime-ClassName-for-Glance.patch b/charts/patches/glance/0002-Enable-priority-runtime-ClassName-for-Glance.patch
new file mode 100644
index 0000000..1f541ae
--- /dev/null
+++ b/charts/patches/glance/0002-Enable-priority-runtime-ClassName-for-Glance.patch
@@ -0,0 +1,67 @@
+From 20be35a44b530e59c93fd6cee36b6518308932bb Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 09:56:42 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Glance
+
+---
+ glance/templates/deployment-api.yaml | 6 ++++++
+ glance/templates/pod-rally-test.yaml | 6 ++++++
+ glance/values.yaml                   | 8 ++++++++
+ 3 files changed, 20 insertions(+)
+
+diff --git a/glance/templates/deployment-api.yaml b/glance/templates/deployment-api.yaml
+index d88856ff..9f14d1b2 100644
+--- a/glance/templates/deployment-api.yaml
++++ b/glance/templates/deployment-api.yaml
+@@ -75,6 +75,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "glance-api" "containerNames" ( list "glance-perms" "glance-api" "init" "nginx" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.glance_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.glance_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "glance" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/glance/templates/pod-rally-test.yaml b/glance/templates/pod-rally-test.yaml
+index 0ca17eb2..1148e8a9 100644
+--- a/glance/templates/pod-rally-test.yaml
++++ b/glance/templates/pod-rally-test.yaml
+@@ -44,6 +44,12 @@ spec:
+     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
+ {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
+   restartPolicy: Never
++{{ with .Values.pod.priorityClassName.glance_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.glance_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+   initContainers:
+ {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
+diff --git a/glance/values.yaml b/glance/values.yaml
+index 85ddf18b..6eae655a 100644
+--- a/glance/values.yaml
++++ b/glance/values.yaml
+@@ -795,6 +795,14 @@ endpoints:
+         default: 80
+ 
+ pod:
++  priorityClassName:
++    glance_api: null
++    glance_tests: null
++    db_sync: null
++  runtimeClassName:
++    glance_api: null
++    glance_tests: null
++    db_sync: null
+   security_context:
+     glance:
+       pod:
+-- 
+2.34.1
+
diff --git a/charts/patches/heat/0002-Enable-priority-runtime-ClassName-for-Heat.patch b/charts/patches/heat/0002-Enable-priority-runtime-ClassName-for-Heat.patch
new file mode 100644
index 0000000..bbcf954
--- /dev/null
+++ b/charts/patches/heat/0002-Enable-priority-runtime-ClassName-for-Heat.patch
@@ -0,0 +1,167 @@
+From 42c3cd6735ecc24f89ae3a0d83cf272eccfb343c Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 11:24:08 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Heat
+
+---
+ heat/templates/cron-job-engine-cleaner.yaml   |  6 ++++++
+ heat/templates/cron-job-purge-deleted.yaml    |  6 ++++++
+ heat/templates/deployment-api.yaml            |  6 ++++++
+ heat/templates/deployment-cfn.yaml            |  6 ++++++
+ heat/templates/deployment-cloudwatch.yaml     |  6 ++++++
+ heat/templates/deployment-engine.yaml         |  6 ++++++
+ heat/templates/pod-rally-test.yaml            |  6 ++++++
+ heat/values.yaml                              | 18 ++++++++++++++++++
+ 8 files changed, 60 insertions(+)
+
+diff --git a/heat/templates/cron-job-engine-cleaner.yaml b/heat/templates/cron-job-engine-cleaner.yaml
+index a7eded42..32674b6a 100644
+--- a/heat/templates/cron-job-engine-cleaner.yaml
++++ b/heat/templates/cron-job-engine-cleaner.yaml
+@@ -52,6 +52,12 @@ spec:
+             configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "heat-engine-cleaner" "containerNames" (list "heat-engine-cleaner" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+         spec:
++{{ with .Values.pod.priorityClassName.heat_engine_cleaner }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.heat_engine_cleaner }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "engine_cleaner" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
+           restartPolicy: OnFailure
+diff --git a/heat/templates/cron-job-purge-deleted.yaml b/heat/templates/cron-job-purge-deleted.yaml
+index 4d83c294..285c2d3e 100644
+--- a/heat/templates/cron-job-purge-deleted.yaml
++++ b/heat/templates/cron-job-purge-deleted.yaml
+@@ -47,6 +47,12 @@ spec:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 12 }}
+ {{ dict "envAll" $envAll "podName" "heat-purge-deleted" "containerNames" (list "init" "heat-purge-deleted" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+         spec:
++{{ with .Values.pod.priorityClassName.heat_purge_deleted }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.heat_purge_deleted }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+           restartPolicy: OnFailure
+ {{ if $envAll.Values.pod.tolerations.heat.enabled }}
+diff --git a/heat/templates/deployment-api.yaml b/heat/templates/deployment-api.yaml
+index ca346563..f737c84f 100644
+--- a/heat/templates/deployment-api.yaml
++++ b/heat/templates/deployment-api.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "heat-api" "containerNames" (list "heat-api" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.heat_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.heat_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/heat/templates/deployment-cfn.yaml b/heat/templates/deployment-cfn.yaml
+index 84015438..5f5584ef 100644
+--- a/heat/templates/deployment-cfn.yaml
++++ b/heat/templates/deployment-cfn.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "heat-cfn" "containerNames" (list "heat-cfn" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.heat_cfn }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.heat_cfn }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/heat/templates/deployment-cloudwatch.yaml b/heat/templates/deployment-cloudwatch.yaml
+index f1f73535..0c9dec54 100644
+--- a/heat/templates/deployment-cloudwatch.yaml
++++ b/heat/templates/deployment-cloudwatch.yaml
+@@ -44,6 +44,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.heat_cloudwatch }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.heat_cloudwatch }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/heat/templates/deployment-engine.yaml b/heat/templates/deployment-engine.yaml
+index 7b7b8adf..d228a876 100644
+--- a/heat/templates/deployment-engine.yaml
++++ b/heat/templates/deployment-engine.yaml
+@@ -53,6 +53,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "heat-engine" "containerNames" (list "heat-engine" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+ {{- end }}
+     spec:
++{{ with .Values.pod.priorityClassName.heat_engine }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.heat_engine }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "heat" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/heat/templates/pod-rally-test.yaml b/heat/templates/pod-rally-test.yaml
+index ac6c636e..610048cf 100644
+--- a/heat/templates/pod-rally-test.yaml
++++ b/heat/templates/pod-rally-test.yaml
+@@ -43,6 +43,12 @@ spec:
+ {{ tuple $envAll "heat" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
+ {{ end }}
+   restartPolicy: Never
++{{ with .Values.pod.priorityClassName.heat_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.heat_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+   initContainers:
+ {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
+diff --git a/heat/values.yaml b/heat/values.yaml
+index 1cf8bdc4..af99c49c 100644
+--- a/heat/values.yaml
++++ b/heat/values.yaml
+@@ -1015,6 +1015,24 @@ endpoints:
+         default: 80
+ 
+ pod:
++  priorityClassName:
++    heat_api: null
++    heat_cfn: null
++    heat_cloudwatch: null
++    heat_tests: null
++    heat_engine_cleaner: null
++    heat_purge_deleted: null
++    heat_engine: null
++    db_sync: null
++  runtimeClassName:
++    heat_api: null
++    heat_cfn: null
++    heat_cloudwatch: null
++    heat_tests: null
++    heat_engine_cleaner: null
++    heat_purge_deleted: null
++    heat_engine: null
++    db_sync: null
+   security_context:
+     heat:
+       pod:
+-- 
+2.34.1
+
diff --git a/charts/patches/heat/0003-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/heat/0003-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..7d29b82
--- /dev/null
+++ b/charts/patches/heat/0003-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,61 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/heat/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/heat/templates/job-trusts.yaml b/heat/templates/job-trusts.yaml
+index ae5bc644..e539e7f9 100644
+--- a/heat/templates/job-trusts.yaml
++++ b/heat/templates/job-trusts.yaml
+@@ -43,6 +43,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "heat-trusts" "containerNames" (list "heat-trusts" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.heat_trusts }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.heat_trusts }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.heat_trusts }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.heat_trusts }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "trusts" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       restartPolicy: OnFailure
+diff --git a/heat/values.yaml b/heat/values.yaml
+index af99c49c..e242d7d3 100644
+--- a/heat/values.yaml
++++ b/heat/values.yaml
+@@ -1023,6 +1023,8 @@ pod:
+     heat_engine_cleaner: null
+     heat_purge_deleted: null
+     heat_engine: null
++    heat_trusts: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+     heat_api: null
+@@ -1032,6 +1034,8 @@ pod:
+     heat_engine_cleaner: null
+     heat_purge_deleted: null
+     heat_engine: null
++    heat_trusts: null
++    bootstrap: null
+     db_sync: null
+   security_context:
+     heat:
diff --git a/charts/patches/helm-toolkit/0002-Add-priority-runtime-ClassName-for-db_sync.patch b/charts/patches/helm-toolkit/0002-Add-priority-runtime-ClassName-for-db_sync.patch
new file mode 100644
index 0000000..1d32782
--- /dev/null
+++ b/charts/patches/helm-toolkit/0002-Add-priority-runtime-ClassName-for-db_sync.patch
@@ -0,0 +1,28 @@
+From ab35df279310c02396f3ef66e58949f3d6556105 Mon Sep 17 00:00:00 2001
+From: Mohammed Naser <mnaser@vexxhost.com>
+Date: Thu, 6 Feb 2025 09:18:59 -0500
+Subject: [PATCH] Add {priority,runtime}ClassName for db_sync
+
+---
+ helm-toolkit/templates/manifests/_job-db-sync.tpl | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/helm-toolkit/templates/manifests/_job-db-sync.tpl b/helm-toolkit/templates/manifests/_job-db-sync.tpl
+index 4696c88f..67fac86b 100644
+--- a/helm-toolkit/templates/manifests/_job-db-sync.tpl
++++ b/helm-toolkit/templates/manifests/_job-db-sync.tpl
+@@ -68,6 +68,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+-- 
+2.43.0
diff --git a/charts/patches/horizon/0004-Enable-priority-runtime-ClassName-for-Horizon.patch b/charts/patches/horizon/0004-Enable-priority-runtime-ClassName-for-Horizon.patch
new file mode 100644
index 0000000..12f018f
--- /dev/null
+++ b/charts/patches/horizon/0004-Enable-priority-runtime-ClassName-for-Horizon.patch
@@ -0,0 +1,85 @@
+From 2db1eab6b5df7070e93d11cbbbcd492aa849ad9c Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 13:14:30 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Horizon
+
+---
+ horizon/templates/deployment.yaml     | 6 ++++++
+ horizon/templates/job-db-sync.yaml    | 6 ++++++
+ horizon/templates/pod-helm-tests.yaml | 6 ++++++
+ horizon/values.yaml                   | 8 ++++++++
+ 4 files changed, 26 insertions(+)
+
+diff --git a/horizon/templates/deployment.yaml b/horizon/templates/deployment.yaml
+index 0037fef3..666edd65 100644
+--- a/horizon/templates/deployment.yaml
++++ b/horizon/templates/deployment.yaml
+@@ -48,6 +48,12 @@ spec:
+ {{- end }}
+ {{ dict "envAll" $envAll "podName" "horizon" "containerNames" (list "horizon" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.horizon }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.horizon }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "horizon" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/horizon/templates/job-db-sync.yaml b/horizon/templates/job-db-sync.yaml
+index c2229cdd..8ae8643f 100644
+--- a/horizon/templates/job-db-sync.yaml
++++ b/horizon/templates/job-db-sync.yaml
+@@ -41,6 +41,12 @@ spec:
+       annotations:
+ {{ dict "envAll" $envAll "podName" "horizon-db-sync" "containerNames" (list "horizon-db-sync" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.db_sync }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.db_sync }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "db_sync" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       restartPolicy: OnFailure
+diff --git a/horizon/templates/pod-helm-tests.yaml b/horizon/templates/pod-helm-tests.yaml
+index 7d163039..4513053b 100644
+--- a/horizon/templates/pod-helm-tests.yaml
++++ b/horizon/templates/pod-helm-tests.yaml
+@@ -34,6 +34,12 @@ metadata:
+ spec:
+ {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
+   restartPolicy: Never
++{{ with .Values.pod.priorityClassName.horizon_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.horizon_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.horizon.enabled }}
+ {{ tuple $envAll "horizon" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
+diff --git a/horizon/values.yaml b/horizon/values.yaml
+index 4a060452..b3782c57 100644
+--- a/horizon/values.yaml
++++ b/horizon/values.yaml
+@@ -1103,6 +1103,14 @@ dependencies:
+           service: dashboard
+ 
+ pod:
++  priorityClassName:
++    horizon: null
++    horizon_tests: null
++    db_sync: null
++  runtimeClassName:
++    horizon: null
++    horizon_tests: null
++    db_sync: null
+   security_context:
+     horizon:
+       pod:
+-- 
+2.34.1
+
diff --git a/charts/patches/horizon/0005-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/horizon/0005-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..80160cd
--- /dev/null
+++ b/charts/patches/horizon/0005-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,39 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl b/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+index b8a1dce3..1db62b01 100644
+--- a/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
++++ b/horizon/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+@@ -71,6 +71,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_init }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_init }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_init }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_init }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "db_init" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/horizon/values.yaml b/horizon/values.yaml
+index b3782c57..33a74d2e 100644
+--- a/horizon/values.yaml
++++ b/horizon/values.yaml
+@@ -1106,10 +1106,12 @@ pod:
+   priorityClassName:
+     horizon: null
+     horizon_tests: null
++    db_init: null
+     db_sync: null
+   runtimeClassName:
+     horizon: null
+     horizon_tests: null
++    db_init: null
+     db_sync: null
+   security_context:
+     horizon:
diff --git a/charts/patches/ironic/0001-Enable-priority-runtime-ClassName-for-Ironic.patch b/charts/patches/ironic/0001-Enable-priority-runtime-ClassName-for-Ironic.patch
new file mode 100644
index 0000000..71e821b
--- /dev/null
+++ b/charts/patches/ironic/0001-Enable-priority-runtime-ClassName-for-Ironic.patch
@@ -0,0 +1,47 @@
+From 0b672d9b62ce39a07969bb1910aba9c4c4cdadc1 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 13:28:41 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Ironic
+
+---
+ ironic/templates/deployment-api.yaml | 6 ++++++
+ ironic/values.yaml                   | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/ironic/templates/deployment-api.yaml b/ironic/templates/deployment-api.yaml
+index 7d4b8387..99e25b65 100644
+--- a/ironic/templates/deployment-api.yaml
++++ b/ironic/templates/deployment-api.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ tuple "ironic_api" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.ironic_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.ironic_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "ironic" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/ironic/values.yaml b/ironic/values.yaml
+index a94bc3f2..d28463aa 100644
+--- a/ironic/values.yaml
++++ b/ironic/values.yaml
+@@ -639,6 +639,12 @@ endpoints:
+         default: 24220
+ 
+ pod:
++  priorityClassName:
++    ironic_api: null
++    db_sync: null
++  runtimeClassName:
++    ironic_api: null
++    db_sync: null
+   affinity:
+     anti:
+       type:
+-- 
+2.34.1
+
diff --git a/charts/patches/ironic/0002-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/ironic/0002-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..9c908e0
--- /dev/null
+++ b/charts/patches/ironic/0002-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,65 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/ironic/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/ironic/templates/statefulset-conductor.yaml b/ironic/templates/statefulset-conductor.yaml
+index bcf6238c..fc0031ef 100644
+--- a/ironic/templates/statefulset-conductor.yaml
++++ b/ironic/templates/statefulset-conductor.yaml
+@@ -44,6 +44,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.ironic_conductor }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.ironic_conductor }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "ironic" "conductor" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/ironic/values.yaml b/ironic/values.yaml
+index d28463aa..bc4b0349 100644
+--- a/ironic/values.yaml
++++ b/ironic/values.yaml
+@@ -107,6 +107,7 @@ conf:
+       api_url: null
+     database:
+       connection: null
++      max_retries: -1
+     deploy:
+       http_root: /var/lib/openstack-helm/httpboot
+     glance:
+@@ -641,9 +642,13 @@ endpoints:
+ pod:
+   priorityClassName:
+     ironic_api: null
++    ironic_conductor: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+     ironic_api: null
++    ironic_conductor: null
++    bootstrap: null
+     db_sync: null
+   affinity:
+     anti:
diff --git a/charts/patches/keystone/0001-Enable-priority-runtime-ClassName-for-Keystone.patch b/charts/patches/keystone/0001-Enable-priority-runtime-ClassName-for-Keystone.patch
new file mode 100644
index 0000000..3212d71
--- /dev/null
+++ b/charts/patches/keystone/0001-Enable-priority-runtime-ClassName-for-Keystone.patch
@@ -0,0 +1,107 @@
+From 9f293fef1eed2e6ef7aa372ad5a7c9293b11a15f Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 14:58:50 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Keystone
+
+---
+ keystone/templates/cron-job-credential-rotate.yaml  |  6 ++++++
+ keystone/templates/cron-job-fernet-rotate.yaml      |  6 ++++++
+ keystone/templates/deployment-api.yaml              |  6 ++++++
+ keystone/templates/pod-rally-test.yaml              |  6 ++++++
+ keystone/values.yaml                                | 12 ++++++++++++
+ 5 files changed, 36 insertions(+)
+
+diff --git a/keystone/templates/cron-job-credential-rotate.yaml b/keystone/templates/cron-job-credential-rotate.yaml
+index 59060796..6fabd9dd 100644
+--- a/keystone/templates/cron-job-credential-rotate.yaml
++++ b/keystone/templates/cron-job-credential-rotate.yaml
+@@ -70,6 +70,12 @@ spec:
+           labels:
+ {{ tuple $envAll "keystone" "credential-rotate" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
+         spec:
++{{ with .Values.pod.priorityClassName.keystone_credential_rotate }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_credential_rotate }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+           initContainers:
+ {{ tuple $envAll "credential_rotate" $mounts_keystone_credential_rotate_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 12 }}
+diff --git a/keystone/templates/cron-job-fernet-rotate.yaml b/keystone/templates/cron-job-fernet-rotate.yaml
+index a059f929..e9118423 100644
+--- a/keystone/templates/cron-job-fernet-rotate.yaml
++++ b/keystone/templates/cron-job-fernet-rotate.yaml
+@@ -71,6 +71,12 @@ spec:
+           labels:
+ {{ tuple $envAll "keystone" "fernet-rotate" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
+         spec:
++{{ with .Values.pod.priorityClassName.keystone_fernet_rotate }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_fernet_rotate }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "fernet_rotate" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
+           initContainers:
+diff --git a/keystone/templates/deployment-api.yaml b/keystone/templates/deployment-api.yaml
+index ee4e13c9..36050a5c 100644
+--- a/keystone/templates/deployment-api.yaml
++++ b/keystone/templates/deployment-api.yaml
+@@ -54,6 +54,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "keystone-api" "containerNames" (list "keystone-api") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "keystone" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.keystone_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "keystone" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/keystone/templates/pod-rally-test.yaml b/keystone/templates/pod-rally-test.yaml
+index ad5b23a0..155dbae3 100644
+--- a/keystone/templates/pod-rally-test.yaml
++++ b/keystone/templates/pod-rally-test.yaml
+@@ -42,6 +42,12 @@ spec:
+ {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
+   nodeSelector:
+     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
++{{ with .Values.pod.priorityClassName.keystone_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+   initContainers:
+ {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
+diff --git a/keystone/values.yaml b/keystone/values.yaml
+index 27e767cf..c23a4ac0 100644
+--- a/keystone/values.yaml
++++ b/keystone/values.yaml
+@@ -158,6 +158,18 @@ dependencies:
+           service: local_image_registry
+ 
+ pod:
++  priorityClassName:
++    keystone_api: null
++    keystone_tests: null
++    keystone_credential_rotate: null
++    keystone_fernet_rotate: null
++    db_sync: null
++  runtimeClassName:
++    keystone_api: null
++    keystone_tests: null
++    keystone_credential_rotate: null
++    keystone_fernet_rotate: null
++    db_sync: null
+   security_context:
+     keystone:
+       pod:
+-- 
+2.34.1
+
diff --git a/charts/patches/keystone/0002-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/keystone/0002-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..71059c7
--- /dev/null
+++ b/charts/patches/keystone/0002-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,136 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/keystone/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl b/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+index b8a1dce3..1db62b01 100644
+--- a/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
++++ b/keystone/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+@@ -71,6 +71,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_init }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_init }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_init }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_init }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "db_init" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/keystone/templates/job-credential-cleanup.yaml b/keystone/templates/job-credential-cleanup.yaml
+index 9f268027..3eac2b39 100644
+--- a/keystone/templates/job-credential-cleanup.yaml
++++ b/keystone/templates/job-credential-cleanup.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "keystone-credential-cleanup" "containerNames" (list "keystone-credential-cleanup") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.keystone_credential_cleanup }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_credential_cleanup }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceName }}
+       restartPolicy: Never
+ {{ if $envAll.Values.pod.tolerations.keystone.enabled }}
+diff --git a/keystone/templates/job-credential-setup.yaml b/keystone/templates/job-credential-setup.yaml
+index 38ff3c8b..c8710b63 100644
+--- a/keystone/templates/job-credential-setup.yaml
++++ b/keystone/templates/job-credential-setup.yaml
+@@ -74,6 +74,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "keystone-credential-setup" "containerNames" (list "keystone-credential-setup") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.keystone_credential_setup }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_credential_setup }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "credential_setup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       initContainers:
+diff --git a/keystone/templates/job-domain-manage.yaml b/keystone/templates/job-domain-manage.yaml
+index 87c82ebd..41bc9b11 100644
+--- a/keystone/templates/job-domain-manage.yaml
++++ b/keystone/templates/job-domain-manage.yaml
+@@ -42,6 +42,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "keystone-domain-manage" "containerNames" (list "keystone-domain-manage" "keystone-domain-manage-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.keystone_domain_manage }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_domain_manage }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "domain_manage" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       restartPolicy: OnFailure
+diff --git a/keystone/templates/job-fernet-setup.yaml b/keystone/templates/job-fernet-setup.yaml
+index d52aa6ce..f41e788d 100644
+--- a/keystone/templates/job-fernet-setup.yaml
++++ b/keystone/templates/job-fernet-setup.yaml
+@@ -74,6 +74,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "keystone-fernet-setup" "containerNames" (list "keystone-fernet-setup") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.keystone_fernet_setup }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.keystone_fernet_setup }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "fernet_setup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       initContainers:
+diff --git a/keystone/values.yaml b/keystone/values.yaml
+index c23a4ac0..65e6f658 100644
+--- a/keystone/values.yaml
++++ b/keystone/values.yaml
+@@ -163,12 +163,24 @@ pod:
+     keystone_tests: null
+     keystone_credential_rotate: null
+     keystone_fernet_rotate: null
++    keystone_credential_setup: null
++    keystone_fernet_setup: null
++    keystone_domain_manage: null
++    keystone_credential_cleanup: null
++    bootstrap: null
++    db_init: null
+     db_sync: null
+   runtimeClassName:
+     keystone_api: null
+     keystone_tests: null
+     keystone_credential_rotate: null
+     keystone_fernet_rotate: null
++    keystone_credential_setup: null
++    keystone_fernet_setup: null
++    keystone_domain_manage: null
++    keystone_credential_cleanup: null
++    bootstrap: null
++    db_init: null
+     db_sync: null
+   security_context:
+     keystone:
diff --git a/charts/patches/magnum/0002-Enable-priority-runtime-ClassName-for-Magnum.patch b/charts/patches/magnum/0002-Enable-priority-runtime-ClassName-for-Magnum.patch
new file mode 100644
index 0000000..1cff876
--- /dev/null
+++ b/charts/patches/magnum/0002-Enable-priority-runtime-ClassName-for-Magnum.patch
@@ -0,0 +1,67 @@
+From 88432c586b587bdd7d44a8d529d62e88d811f281 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Mon, 10 Feb 2025 15:37:34 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Magnum
+
+---
+ magnum/templates/deployment-api.yaml        | 6 ++++++
+ magnum/templates/statefulset-conductor.yaml | 6 ++++++
+ magnum/values.yaml                          | 8 ++++++++
+ 3 files changed, 20 insertions(+)
+
+diff --git a/magnum/templates/deployment-api.yaml b/magnum/templates/deployment-api.yaml
+index 9eb4967f..720a4bac 100644
+--- a/magnum/templates/deployment-api.yaml
++++ b/magnum/templates/deployment-api.yaml
+@@ -44,6 +44,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.magnum_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.magnum_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "magnum" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/magnum/templates/statefulset-conductor.yaml b/magnum/templates/statefulset-conductor.yaml
+index 44d8b027..fe1cc84e 100644
+--- a/magnum/templates/statefulset-conductor.yaml
++++ b/magnum/templates/statefulset-conductor.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.magnum_conductor }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.magnum_conductor }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "magnum" "conductor" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/magnum/values.yaml b/magnum/values.yaml
+index 88b4203e..341f1ecd 100644
+--- a/magnum/values.yaml
++++ b/magnum/values.yaml
+@@ -482,6 +482,14 @@ endpoints:
+         default: 24220
+ 
+ pod:
++  priorityClassName:
++    magnum_api: null
++    magnum_conductor: null
++    db_sync: null
++  runtimeClassName:
++    magnum_api: null
++    magnum_conductor: null
++    db_sync: null
+   user:
+     magnum:
+       uid: 42424
+-- 
+2.34.1
+
diff --git a/charts/patches/magnum/0003-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/magnum/0003-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..9a80b3a
--- /dev/null
+++ b/charts/patches/magnum/0003-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,39 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/magnum/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/magnum/values.yaml b/magnum/values.yaml
+index 341f1ecd..29b8198b 100644
+--- a/magnum/values.yaml
++++ b/magnum/values.yaml
+@@ -485,10 +485,12 @@ pod:
+   priorityClassName:
+     magnum_api: null
+     magnum_conductor: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+     magnum_api: null
+     magnum_conductor: null
++    bootstrap: null
+     db_sync: null
+   user:
+     magnum:
diff --git a/charts/patches/manila/0003-Enable-priority-runtime-ClassName-for-Manila.patch b/charts/patches/manila/0003-Enable-priority-runtime-ClassName-for-Manila.patch
new file mode 100644
index 0000000..50c5783
--- /dev/null
+++ b/charts/patches/manila/0003-Enable-priority-runtime-ClassName-for-Manila.patch
@@ -0,0 +1,198 @@
+From 01653c9c731dc2b66a79426f1021052186eebe40 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Tue, 11 Feb 2025 05:18:42 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Manila
+
+Also fix pod-rally-test.yaml template
+
+---
+ manila/templates/deployment-api.yaml        |  6 +++++
+ manila/templates/deployment-data.yaml       |  6 +++++
+ manila/templates/deployment-scheduler.yaml  |  6 +++++
+ manila/templates/deployment-share.yaml      |  6 +++++
+ manila/templates/pod-rally-test.yaml        | 24 +++++++++++-------
+ manila/values.yaml                          | 25 ++++++++++++++++++-
+ 6 files changed, 63 insertions(+), 10 deletions(-)
+
+diff --git a/manila/templates/deployment-api.yaml b/manila/templates/deployment-api.yaml
+index bcc352f0..0338ee17 100644
+--- a/manila/templates/deployment-api.yaml
++++ b/manila/templates/deployment-api.yaml
+@@ -46,6 +46,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "manila-api" "containerNames" (list "init" "manila-api") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.manila_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.manila_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "manila" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/manila/templates/deployment-data.yaml b/manila/templates/deployment-data.yaml
+index 21db299e..6c981a67 100644
+--- a/manila/templates/deployment-data.yaml
++++ b/manila/templates/deployment-data.yaml
+@@ -46,6 +46,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "manila-data" "containerNames" (list "init" "manila-data") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.manila_data }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.manila_data }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "manila" "data" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/manila/templates/deployment-scheduler.yaml b/manila/templates/deployment-scheduler.yaml
+index 4858dfce..c8749a25 100644
+--- a/manila/templates/deployment-scheduler.yaml
++++ b/manila/templates/deployment-scheduler.yaml
+@@ -46,6 +46,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "manila-scheduler" "containerNames" (list "init" "manila-scheduler") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.manila_scheduler }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.manila_scheduler }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "manila" "scheduler" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/manila/templates/deployment-share.yaml b/manila/templates/deployment-share.yaml
+index 87bff3b1..2d7a6c19 100644
+--- a/manila/templates/deployment-share.yaml
++++ b/manila/templates/deployment-share.yaml
+@@ -46,6 +46,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "manila-share" "containerNames" (list "init" "manila-share" "manila-share-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "manila" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.manila_share }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.manila_share }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "manila" "share" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/manila/templates/pod-rally-test.yaml b/manila/templates/pod-rally-test.yaml
+index 928c1831..940989bb 100644
+--- a/manila/templates/pod-rally-test.yaml
++++ b/manila/templates/pod-rally-test.yaml
+@@ -42,6 +42,12 @@ spec:
+ {{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
+   nodeSelector:
+     {{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
++{{ with .Values.pod.priorityClassName.manila_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.manila_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+   initContainers:
+ {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
+@@ -62,27 +68,27 @@ spec:
+ {{- dict "enabled" .Values.manifests.certificates "name" .Values.secrets.tls.share.api.internal | include "helm-toolkit.snippets.tls_volume_mount"  | indent 8 }}
+ {{- end }}
+       env:
+-{{- with $env := dict "ksUserSecret" .Values.secrets.share.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
+-{{- include "helm-toolkit.snippets.manila_openrc_env_vars" $env | indent 8 }}
++{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
++{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 8 }}
+ {{- end }}
+         - name: SERVICE_OS_SERVICE_NAME
+           value: "test"
+-{{- with $env := dict "ksUserSecret" .Values.secrets.share.test }}
+-{{- include "helm-toolkit.snippets.manila_user_create_env_vars" $env | indent 8 }}
++{{- with $env := dict "ksUserSecret" .Values.secrets.identity.test }}
++{{- include "helm-toolkit.snippets.keystone_user_create_env_vars" $env | indent 8 }}
+ {{- end }}
+         - name: SERVICE_OS_ROLE
+-          value: {{ .Values.endpoints.share.auth.test.role | quote }}
++          value: {{ .Values.endpoints.identity.auth.test.role | quote }}
+   containers:
+     - name: manila-test
+ {{ tuple $envAll "test" | include "helm-toolkit.snippets.image" | indent 6 }}
+ {{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include "helm-toolkit.snippets.kubernetes_resources" | indent 6 }}
+ {{ dict "envAll" $envAll "application" "test" "container" "manila_test" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 6}}
+       env:
+-{{- with $env := dict "ksUserSecret" .Values.secrets.share.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
+-{{- include "helm-toolkit.snippets.manila_openrc_env_vars" $env | indent 8 }}
++{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin "useCA" (and .Values.manifests.certificates .Values.secrets.tls.share.api.internal) }}
++{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 8 }}
+ {{- end }}
+-{{- with $env := dict "ksUserSecret" .Values.secrets.share.test }}
+-{{- include "helm-toolkit.snippets.manila_user_create_env_vars" $env | indent 8 }}
++{{- with $env := dict "ksUserSecret" .Values.secrets.identity.test }}
++{{- include "helm-toolkit.snippets.keystone_user_create_env_vars" $env | indent 8 }}
+ {{- end }}
+         - name: RALLY_ENV_NAME
+           value: {{.deployment_name}}
+diff --git a/manila/values.yaml b/manila/values.yaml
+index f820bc1f..400d52da 100644
+--- a/manila/values.yaml
++++ b/manila/values.yaml
+@@ -66,6 +66,20 @@ images:
+       - image_repo_sync
+ 
+ pod:
++  priorityClassName:
++    manila_api: null
++    manila_data: null
++    manila_scheduler: null
++    manila_tests: null
++    manila_share: null
++    db_sync: null
++  runtimeClassName:
++    manila_api: null
++    manila_data: null
++    manila_scheduler: null
++    manila_tests: null
++    manila_share: null
++    db_sync: null
+   security_context:
+     manila:
+       pod:
+@@ -829,6 +843,7 @@ secrets:
+   identity:
+     admin: manila-keystone-admin
+     manila: manila-keystone-user
++    test: manila-keystone-test
+   oslo_db:
+     admin: manila-db-admin
+     manila: manila-db-user
+@@ -890,6 +905,14 @@ endpoints:
+         project_name: service
+         user_domain_name: service
+         project_domain_name: service
++      test:
++        role: admin
++        region_name: RegionOne
++        username: manila-test
++        password: password
++        project_name: test
++        user_domain_name: service
++        project_domain_name: service
+     hosts:
+       default: keystone
+       internal: keystone-api
+@@ -1061,7 +1084,7 @@ manifests:
+   job_ks_service: true
+   job_ks_user: true
+   pdb_api: true
+-  pod_test: true
++  pod_rally_test: true
+   secret_db: true
+   network_policy: false
+   secret_ingress_tls: true
+-- 
+2.34.1
+
diff --git a/charts/patches/manila/0004-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/manila/0004-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..ea30640
--- /dev/null
+++ b/charts/patches/manila/0004-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,42 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/manila/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/manila/values.yaml b/manila/values.yaml
+index 400d52da..42198625 100644
+--- a/manila/values.yaml
++++ b/manila/values.yaml
+@@ -72,6 +72,7 @@ pod:
+     manila_scheduler: null
+     manila_tests: null
+     manila_share: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+     manila_api: null
+@@ -79,6 +80,7 @@ pod:
+     manila_scheduler: null
+     manila_tests: null
+     manila_share: null
++    bootstrap: null
+     db_sync: null
+   security_context:
+     manila:
diff --git a/charts/patches/neutron/0001-Switch-Neutron-to-ovsinit.patch b/charts/patches/neutron/0001-Switch-Neutron-to-ovsinit.patch
new file mode 100644
index 0000000..0c1d7d3
--- /dev/null
+++ b/charts/patches/neutron/0001-Switch-Neutron-to-ovsinit.patch
@@ -0,0 +1,32 @@
+From 3e0120d8457faf947f6f5d3ed79a1f08a0d271cd Mon Sep 17 00:00:00 2001
+From: Mohammed Naser <mnaser@vexxhost.com>
+Date: Mon, 17 Feb 2025 10:58:17 -0500
+Subject: [PATCH] Switch Neutron to ovsinit
+
+---
+ neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl b/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+index bd0a64ac..c15e40a5 100644
+--- a/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
++++ b/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+@@ -435,13 +435,14 @@ do
+   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
+   then
+     ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-port $bridge $iface
+-    migrate_ip_from_nic $iface $bridge
+     if [[ "${DPDK_ENABLED}" != "true" ]]; then
+       ip link set dev $iface up
+     fi
+   fi
+ done
+ 
++/usr/local/bin/ovsinit /tmp/auto_bridge_add
++
+ tunnel_types="{{- .Values.conf.plugins.openvswitch_agent.agent.tunnel_types -}}"
+ if [[ -n "${tunnel_types}" ]] ; then
+     tunnel_interface="{{- .Values.network.interface.tunnel -}}"
+-- 
+2.47.0
+
diff --git a/charts/patches/neutron/0004-add-support-for-ovn-dhcp-agent.patch b/charts/patches/neutron/0004-add-support-for-ovn-dhcp-agent.patch
new file mode 100644
index 0000000..1146298
--- /dev/null
+++ b/charts/patches/neutron/0004-add-support-for-ovn-dhcp-agent.patch
@@ -0,0 +1,97 @@
+diff --git a/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl b/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl
+index 0f73e5a3..b6296f48 100644
+--- a/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl
++++ b/neutron/templates/bin/_neutron-dhcp-agent.sh.tpl
+@@ -17,6 +17,9 @@ limitations under the License.
+ set -x
+ exec neutron-dhcp-agent \
+   --config-file /etc/neutron/neutron.conf \
++{{- if ( has "ovn" .Values.network.backend ) }}
++  --config-file /tmp/pod-shared/ovn.ini \
++{{- end }}
+ {{- if and ( empty .Values.conf.neutron.DEFAULT.host ) ( .Values.pod.use_fqdn.neutron_agent ) }}
+   --config-file /tmp/pod-shared/neutron-agent.ini \
+ {{- end }}
+diff --git a/neutron/templates/configmap-etc.yaml b/neutron/templates/configmap-etc.yaml
+index 82865c09..9d001304 100644
+--- a/neutron/templates/configmap-etc.yaml
++++ b/neutron/templates/configmap-etc.yaml
+@@ -155,7 +155,7 @@ limitations under the License.
+ 
+ {{- if empty $envAll.Values.conf.dhcp_agent.DEFAULT.interface_driver -}}
+ {{- $_ := set $envAll.Values "__interface_driver" ( list ) }}
+-{{- if ( has "openvswitch" $envAll.Values.network.backend ) -}}
++{{- if or ( has "openvswitch" $envAll.Values.network.backend ) ( has "ovn" $envAll.Values.network.backend ) -}}
+ {{ $__interface_driver := append $envAll.Values.__interface_driver "openvswitch" }}
+ {{- $_ := set $envAll.Values "__interface_driver" $__interface_driver }}
+ {{- end -}}
+@@ -165,6 +165,9 @@ limitations under the License.
+ {{- end -}}
+ {{- $_ := set $envAll.Values.conf.dhcp_agent.DEFAULT "interface_driver" $envAll.Values.__interface_driver -}}
+ {{- end -}}
++{{- if and (has "ovn" $envAll.Values.network.backend) (empty $envAll.Values.conf.dhcp_agent.ovs.ovsdb_connection) -}}
++{{- $_ := set $envAll.Values.conf.dhcp_agent.ovs "ovsdb_connection" "unix:/run/openvswitch/db.sock" -}}
++{{- end -}}
+ 
+ {{- if empty $envAll.Values.conf.l3_agent.DEFAULT.interface_driver -}}
+ {{- $_ := set $envAll.Values "__interface_driver" ( list ) }}
+diff --git a/neutron/templates/daemonset-dhcp-agent.yaml b/neutron/templates/daemonset-dhcp-agent.yaml
+index 17e15f8e..af724875 100644
+--- a/neutron/templates/daemonset-dhcp-agent.yaml
++++ b/neutron/templates/daemonset-dhcp-agent.yaml
+@@ -94,6 +94,19 @@ spec:
+       {{- end }}
+       initContainers:
+ {{ tuple $envAll "pod_dependency" $mounts_neutron_dhcp_agent_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
++        {{- if ( has "ovn" .Values.network.backend ) }}
++        - name: ovn-neutron-init
++{{ tuple $envAll "neutron_dhcp" | include "helm-toolkit.snippets.image" | indent 10 }}
++          command:
++            - /tmp/neutron-ovn-init.sh
++          volumeMounts:
++            - name: pod-shared
++              mountPath: /tmp/pod-shared
++            - name: neutron-bin
++              mountPath: /tmp/neutron-ovn-init.sh
++              subPath: neutron-ovn-init.sh
++              readOnly: true
++        {{- end }}
+         - name: neutron-dhcp-agent-init
+ {{ tuple $envAll "neutron_dhcp" | include "helm-toolkit.snippets.image" | indent 10 }}
+ {{ tuple $envAll $envAll.Values.pod.resources.agent.dhcp | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+@@ -245,6 +258,10 @@ spec:
+               mountPath: /run/netns
+               mountPropagation: Bidirectional
+             {{- end }}
++            {{- if ( has "ovn" .Values.network.backend ) }}
++            - name: run-openvswitch
++              mountPath: /run/openvswitch
++            {{- end }}
+ {{- dict "enabled" $envAll.Values.manifests.certificates "name" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal "path" "/etc/rabbitmq/certs" | include "helm-toolkit.snippets.tls_volume_mount" | indent 12 }}
+ {{ if $mounts_neutron_dhcp_agent.volumeMounts }}{{ toYaml $mounts_neutron_dhcp_agent.volumeMounts | indent 12 }}{{ end }}
+       volumes:
+@@ -274,6 +291,11 @@ spec:
+           hostPath:
+             path: /run/netns
+         {{- end }}
++        {{- if ( has "ovn" .Values.network.backend ) }}
++        - name: run-openvswitch
++          hostPath:
++            path: /run/openvswitch
++        {{- end }}
+ {{- dict "enabled" $envAll.Values.manifests.certificates "name" $envAll.Values.endpoints.oslo_messaging.auth.admin.secret.tls.internal | include "helm-toolkit.snippets.tls_volume" | indent 8 }}
+ {{ if $mounts_neutron_dhcp_agent.volumes }}{{ toYaml $mounts_neutron_dhcp_agent.volumes | indent 8 }}{{ end }}
+ {{- end }}
+diff --git a/neutron/values.yaml b/neutron/values.yaml
+index b1ff4569..83e34c08 100644
+--- a/neutron/values.yaml
++++ b/neutron/values.yaml
+@@ -2149,6 +2149,8 @@ conf:
+       interface_driver: null
+       dnsmasq_config_file: /etc/neutron/dnsmasq.conf
+       force_metadata: True
++    # NOTE(mnaser): This has to be here in order for the DHCP agent to work with OVN.
++    ovs: {}
+   dnsmasq: |
+       #no-hosts
+       #port=5353
\ No newline at end of file
diff --git a/charts/patches/neutron/0004-nic-name-feature.patch b/charts/patches/neutron/0004-nic-name-feature.patch
new file mode 100644
index 0000000..c2325e7
--- /dev/null
+++ b/charts/patches/neutron/0004-nic-name-feature.patch
@@ -0,0 +1,61 @@
+diff --git a/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl b/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+index bd0a64a..08833a5 100644
+--- a/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
++++ b/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+@@ -196,6 +196,12 @@ function process_dpdk_nics {
+   while IFS= read -r nic; do
+     local port_name=$(get_dpdk_config_value ${nic} '.name')
+     local pci_id=$(get_dpdk_config_value ${nic} '.pci_id')
++    local iface=$(get_dpdk_config_value ${nic} '.iface')
++    if [ -n ${iface} ] && [ -z ${pci_id} ]; then
++      local pci_id=$(get_address_by_nicname ${iface})
++    else
++      iface=$(get_name_by_pci_id "${pci_id}")
++    fi
+     local bridge=$(get_dpdk_config_value ${nic} '.bridge')
+     local vf_index=$(get_dpdk_config_value ${nic} '.vf_index')
+
+@@ -203,8 +209,6 @@ function process_dpdk_nics {
+       migrate_ip "${pci_id}" "${bridge}"
+     fi
+
+-    iface=$(get_name_by_pci_id "${pci_id}")
+-
+     if [ -n "${iface}" ]; then
+       ip link set ${iface} promisc on
+       if [ -n "${vf_index}" ]; then
+@@ -292,6 +296,12 @@ function process_dpdk_bonds {
+     echo $bond | jq -r -c '.nics[]' > /tmp/nics_array
+     while IFS= read -r nic; do
+       local pci_id=$(get_dpdk_config_value ${nic} '.pci_id')
++      local iface=$(get_dpdk_config_value ${nic} '.iface')
++      if [ -n ${iface} ] && [ -z ${pci_id} ]; then
++        local pci_id=$(get_address_by_nicname ${iface})
++      else
++        iface=$(get_name_by_pci_id "${pci_id}")
++      fi
+       local nic_name=$(get_dpdk_config_value ${nic} '.name')
+       local pmd_rxq_affinity=$(get_dpdk_config_value ${nic} '.pmd_rxq_affinity')
+       local vf_index=$(get_dpdk_config_value ${nic} '.vf_index')
+@@ -302,8 +312,6 @@ function process_dpdk_bonds {
+         ip_migrated=true
+       fi
+
+-      iface=$(get_name_by_pci_id "${pci_id}")
+-
+       if [ -n "${iface}" ]; then
+         ip link set ${iface} promisc on
+         if [ -n "${vf_index}" ]; then
+@@ -407,6 +415,12 @@ function get_driver_by_address {
+   fi
+ }
+
++function get_address_by_nicname {
++  if [[ -e /sys/class/net/$1/device ]]; then
++    readlink -f /sys/class/net/$1/device | xargs basename
++  fi
++}
++
+ function init_ovs_dpdk_bridge {
+   bridge=$1
+   ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-br ${bridge} \
diff --git a/charts/patches/neutron/0005-Enable-priority-runtime-ClassName-for-Neutron.patch b/charts/patches/neutron/0005-Enable-priority-runtime-ClassName-for-Neutron.patch
new file mode 100644
index 0000000..83020c1
--- /dev/null
+++ b/charts/patches/neutron/0005-Enable-priority-runtime-ClassName-for-Neutron.patch
@@ -0,0 +1,347 @@
+From c11ca88302027988fa44059991141395475c103e Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Tue, 11 Feb 2025 11:03:48 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Neutron
+
+---
+ .../templates/daemonset-bagpipe-bgp.yaml      |  6 ++++
+ .../templates/daemonset-bgp-dragent.yaml      |  6 ++++
+ .../templates/daemonset-dhcp-agent.yaml       |  6 ++++
+ .../templates/daemonset-l2gw-agent.yaml       |  6 ++++
+ .../neutron/templates/daemonset-l3-agent.yaml |  6 ++++
+ .../neutron/templates/daemonset-lb-agent.yaml |  6 ++++
+ .../templates/daemonset-metadata-agent.yaml   |  6 ++++
+ .../daemonset-netns-cleanup-cron.yaml         |  6 ++++
+ .../daemonset-neutron-ovn-vpn-agent.yaml      |  6 ++++
+ .../daemonset-ovn-metadata-agent.yaml         |  6 ++++
+ .../templates/daemonset-ovs-agent.yaml        |  6 ++++
+ .../templates/daemonset-sriov-agent.yaml      |  6 ++++
+ .../templates/deployment-ironic-agent.yaml    |  6 ++++
+ .../templates/deployment-rpc_server.yaml      |  6 ++++
+ .../neutron/templates/deployment-server.yaml  |  6 ++++
+ neutron/templates/pod-rally-test.yaml  |  6 ++++
+ neutron/values.yaml                    | 36 +++++++++++++++++++
+ 17 files changed, 132 insertions(+)
+
+diff --git a/neutron/templates/daemonset-bagpipe-bgp.yaml b/neutron/templates/daemonset-bagpipe-bgp.yaml
+index b6d2157e..fd4f0930 100644
+--- a/neutron/templates/daemonset-bagpipe-bgp.yaml
++++ b/neutron/templates/daemonset-bagpipe-bgp.yaml
+@@ -57,6 +57,12 @@ spec:
+ {{ tuple "neutron_bagpipe_bgp" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_bagpipe_bgp" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_bagpipe_bgp }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_bagpipe_bgp }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-bgp-dragent.yaml b/neutron/templates/daemonset-bgp-dragent.yaml
+index b0494c3e..caa61391 100644
+--- a/neutron/templates/daemonset-bgp-dragent.yaml
++++ b/neutron/templates/daemonset-bgp-dragent.yaml
+@@ -56,6 +56,12 @@ spec:
+ {{ tuple "neutron_bgp_dragent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_bgp_dragent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_bgp_dragent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_bgp_dragent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-dhcp-agent.yaml b/neutron/templates/daemonset-dhcp-agent.yaml
+index 17e15f8e..b51d179d 100644
+--- a/neutron/templates/daemonset-dhcp-agent.yaml
++++ b/neutron/templates/daemonset-dhcp-agent.yaml
+@@ -79,6 +79,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-dhcp-agent-default" "containerNames" (list "neutron-dhcp-agent" "neutron-dhcp-agent-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_dhcp_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_dhcp_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_dhcp_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-l2gw-agent.yaml b/neutron/templates/daemonset-l2gw-agent.yaml
+index e9481981..ab984a30 100644
+--- a/neutron/templates/daemonset-l2gw-agent.yaml
++++ b/neutron/templates/daemonset-l2gw-agent.yaml
+@@ -80,6 +80,12 @@ spec:
+ {{ tuple "neutron_l2gw_agent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_l2gw_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_l2gw_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_l2gw_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-l3-agent.yaml b/neutron/templates/daemonset-l3-agent.yaml
+index b4bbd096..21b45d71 100644
+--- a/neutron/templates/daemonset-l3-agent.yaml
++++ b/neutron/templates/daemonset-l3-agent.yaml
+@@ -80,6 +80,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-l3-agent-default" "containerNames" (list "neutron-l3-agent" "init"  "neutron-l3-agent-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_l3_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_l3_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_l3_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-lb-agent.yaml b/neutron/templates/daemonset-lb-agent.yaml
+index 35ff8fe0..77337f7a 100644
+--- a/neutron/templates/daemonset-lb-agent.yaml
++++ b/neutron/templates/daemonset-lb-agent.yaml
+@@ -55,6 +55,12 @@ spec:
+ {{ tuple "neutron_lb_agent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_lb_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_lb_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_lb_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-metadata-agent.yaml b/neutron/templates/daemonset-metadata-agent.yaml
+index fc9a75ee..229ed5c6 100644
+--- a/neutron/templates/daemonset-metadata-agent.yaml
++++ b/neutron/templates/daemonset-metadata-agent.yaml
+@@ -76,6 +76,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-metadata-agent-default" "containerNames" (list "neutron-metadata-agent" "neutron-metadata-agent-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_metadata_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_metadata_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_metadata_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-netns-cleanup-cron.yaml b/neutron/templates/daemonset-netns-cleanup-cron.yaml
+index d43c5950..df50c45a 100644
+--- a/neutron/templates/daemonset-netns-cleanup-cron.yaml
++++ b/neutron/templates/daemonset-netns-cleanup-cron.yaml
+@@ -48,6 +48,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-netns-cleanup-cron-default" "containerNames" (list "neutron-netns-cleanup-cron" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_netns_cleanup_cron" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_netns_cleanup_cron }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_netns_cleanup_cron }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml b/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
+index fc6d8055..30c372f9 100644
+--- a/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
++++ b/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
+@@ -78,6 +78,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
+ {{ dict "envAll" $envAll "application" "ovn_vpn_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_ovn_vpn_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_ovn_vpn_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-ovn-metadata-agent.yaml b/neutron/templates/daemonset-ovn-metadata-agent.yaml
+index 47e12567..80ca3f07 100644
+--- a/neutron/templates/daemonset-ovn-metadata-agent.yaml
++++ b/neutron/templates/daemonset-ovn-metadata-agent.yaml
+@@ -76,6 +76,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-ovn-metadata-agent-default" "containerNames" (list "neutron-ovn-metadata-agent" "neutron-ovn-metadata-agent-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_ovn_metadata_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_ovn_metadata_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_ovn_metadata_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ if $envAll.Values.pod.tolerations.neutron.enabled }}
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+diff --git a/neutron/templates/daemonset-ovs-agent.yaml b/neutron/templates/daemonset-ovs-agent.yaml
+index 0ea60f58..c6eb4c01 100644
+--- a/neutron/templates/daemonset-ovs-agent.yaml
++++ b/neutron/templates/daemonset-ovs-agent.yaml
+@@ -59,6 +59,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "$configMapName" "containerNames" (list "neutron-ovs-agent" "init" "neutron-openvswitch-agent-kernel-modules" "neutron-ovs-agent-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_ovs_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_ovs_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_ovs_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       nodeSelector:
+         {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
+diff --git a/neutron/templates/daemonset-sriov-agent.yaml b/neutron/templates/daemonset-sriov-agent.yaml
+index 5b96cd7c..efd48e01 100644
+--- a/neutron/templates/daemonset-sriov-agent.yaml
++++ b/neutron/templates/daemonset-sriov-agent.yaml
+@@ -62,6 +62,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-sriov-agent-default" "containerNames" (list "neutron-sriov-agent-init" "init" "neutron-sriov-agent") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_sriov_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_sriov_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_sriov_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       nodeSelector:
+         {{ .Values.labels.sriov.node_selector_key }}: {{ .Values.labels.sriov.node_selector_value }}
+diff --git a/neutron/templates/deployment-ironic-agent.yaml b/neutron/templates/deployment-ironic-agent.yaml
+index 1b468e2b..8d2663f9 100644
+--- a/neutron/templates/deployment-ironic-agent.yaml
++++ b/neutron/templates/deployment-ironic-agent.yaml
+@@ -49,6 +49,12 @@ spec:
+ {{ tuple "neutron_ironic_agent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_ironic_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_ironic_agent }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_ironic_agent }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "neutron" "ironic_agent" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/neutron/templates/deployment-rpc_server.yaml b/neutron/templates/deployment-rpc_server.yaml
+index 1866e21e..f5f72403 100644
+--- a/neutron/templates/deployment-rpc_server.yaml
++++ b/neutron/templates/deployment-rpc_server.yaml
+@@ -49,6 +49,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-rpc-server" "containerNames" (list "neutron-rpc-server" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_rpc_server" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_rpc_server }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_rpc_server }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "neutron" "rpc_server" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/neutron/templates/deployment-server.yaml b/neutron/templates/deployment-server.yaml
+index 457401b4..464b3c3c 100644
+--- a/neutron/templates/deployment-server.yaml
++++ b/neutron/templates/deployment-server.yaml
+@@ -81,6 +81,12 @@ spec:
+ {{ dict "envAll" $envAll "podName" "neutron-server" "containerNames" (list "neutron-server" "init" "nginx") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_server" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.neutron_server }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_server }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "neutron" "server" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/neutron/templates/pod-rally-test.yaml b/neutron/templates/pod-rally-test.yaml
+index 5ef57fa3..a1e3e1ad 100644
+--- a/neutron/templates/pod-rally-test.yaml
++++ b/neutron/templates/pod-rally-test.yaml
+@@ -44,6 +44,12 @@ spec:
+ {{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
+ {{ end }}
+   restartPolicy: Never
++{{ with .Values.pod.priorityClassName.neutron_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.neutron_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+   initContainers:
+ {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
+diff --git a/neutron/values.yaml b/neutron/values.yaml
+index b1ff4569..0f559976 100644
+--- a/neutron/values.yaml
++++ b/neutron/values.yaml
+@@ -389,6 +389,42 @@ dependencies:
+           service: local_image_registry
+ 
+ pod:
++  priorityClassName:
++    neutron_bagpipe_bgp: null
++    neutron_bgp_dragent: null
++    neutron_dhcp_agent: null
++    neutron_l2gw_agent: null
++    neutron_l3_agent: null
++    neutron_lb_agent: null
++    neutron_metadata_agent: null
++    neutron_netns_cleanup_cron: null
++    neutron_ovn_vpn_agent: null
++    neutron_ovn_metadata_agent: null
++    neutron_ovs_agent: null
++    neutron_sriov_agent: null
++    neutron_ironic_agent: null
++    neutron_rpc_server: null
++    neutron_server: null
++    neutron_tests: null
++    db_sync: null
++  runtimeClassName:
++    neutron_bagpipe_bgp: null
++    neutron_bgp_dragent: null
++    neutron_dhcp_agent: null
++    neutron_l2gw_agent: null
++    neutron_l3_agent: null
++    neutron_lb_agent: null
++    neutron_metadata_agent: null
++    neutron_netns_cleanup_cron: null
++    neutron_ovn_vpn_agent: null
++    neutron_ovn_metadata_agent: null
++    neutron_ovs_agent: null
++    neutron_sriov_agent: null
++    neutron_ironic_agent: null
++    neutron_rpc_server: null
++    neutron_server: null
++    neutron_tests: null
++    db_sync: null
+   sidecars:
+     neutron_policy_server: false
+   use_fqdn:
+-- 
+2.34.1
+
diff --git a/charts/patches/neutron/0006-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/neutron/0006-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..10c015b
--- /dev/null
+++ b/charts/patches/neutron/0006-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,126 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/neutron/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/neutron/templates/daemonset-bagpipe-bgp.yaml b/neutron/templates/daemonset-bagpipe-bgp.yaml
+index fd4f0930..e2bd90b5 100644
+--- a/neutron/templates/daemonset-bagpipe-bgp.yaml
++++ b/neutron/templates/daemonset-bagpipe-bgp.yaml
+@@ -57,10 +57,10 @@ spec:
+ {{ tuple "neutron_bagpipe_bgp" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_bagpipe_bgp" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+-{{ with .Values.pod.priorityClassName.neutron_bagpipe_bgp }}
++{{ with .Values.pod.priorityClassName.bagpipe_bgp }}
+       priorityClassName: {{ . }}
+ {{ end }}
+-{{ with .Values.pod.runtimeClassName.neutron_bagpipe_bgp }}
++{{ with .Values.pod.runtimeClassName.bagpipe_bgp }}
+       runtimeClassName: {{ . }}
+ {{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+diff --git a/neutron/templates/daemonset-bgp-dragent.yaml b/neutron/templates/daemonset-bgp-dragent.yaml
+index caa61391..8a6a3099 100644
+--- a/neutron/templates/daemonset-bgp-dragent.yaml
++++ b/neutron/templates/daemonset-bgp-dragent.yaml
+@@ -56,10 +56,10 @@ spec:
+ {{ tuple "neutron_bgp_dragent" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "neutron_bgp_dragent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+-{{ with .Values.pod.priorityClassName.neutron_bgp_dragent }}
++{{ with .Values.pod.priorityClassName.bgp_dragent }}
+       priorityClassName: {{ . }}
+ {{ end }}
+-{{ with .Values.pod.runtimeClassName.neutron_bgp_dragent }}
++{{ with .Values.pod.runtimeClassName.bgp_dragent }}
+       runtimeClassName: {{ . }}
+ {{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+diff --git a/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml b/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
+index 30c372f9..c3af50b6 100644
+--- a/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
++++ b/neutron/templates/daemonset-neutron-ovn-vpn-agent.yaml
+@@ -78,10 +78,10 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
+ {{ dict "envAll" $envAll "application" "ovn_vpn_agent" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+-{{ with .Values.pod.priorityClassName.neutron_ovn_vpn_agent }}
++{{ with .Values.pod.priorityClassName.ovn_vpn_agent }}
+       priorityClassName: {{ . }}
+ {{ end }}
+-{{ with .Values.pod.runtimeClassName.neutron_ovn_vpn_agent }}
++{{ with .Values.pod.runtimeClassName.ovn_vpn_agent }}
+       runtimeClassName: {{ . }}
+ {{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+diff --git a/neutron/values.yaml b/neutron/values.yaml
+index dbb4fd00..57cb3b70 100644
+--- a/neutron/values.yaml
++++ b/neutron/values.yaml
+@@ -390,15 +390,15 @@ dependencies:
+ 
+ pod:
+   priorityClassName:
+-    neutron_bagpipe_bgp: null
+-    neutron_bgp_dragent: null
++    bagpipe_bgp: null
++    bgp_dragent: null
+     neutron_dhcp_agent: null
+     neutron_l2gw_agent: null
+     neutron_l3_agent: null
+     neutron_lb_agent: null
+     neutron_metadata_agent: null
+     neutron_netns_cleanup_cron: null
+-    neutron_ovn_vpn_agent: null
++    ovn_vpn_agent: null
+     neutron_ovn_metadata_agent: null
+     neutron_ovs_agent: null
+     neutron_sriov_agent: null
+@@ -406,17 +406,18 @@ pod:
+     neutron_rpc_server: null
+     neutron_server: null
+     neutron_tests: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+-    neutron_bagpipe_bgp: null
+-    neutron_bgp_dragent: null
++    bagpipe_bgp: null
++    bgp_dragent: null
+     neutron_dhcp_agent: null
+     neutron_l2gw_agent: null
+     neutron_l3_agent: null
+     neutron_lb_agent: null
+     neutron_metadata_agent: null
+     neutron_netns_cleanup_cron: null
+-    neutron_ovn_vpn_agent: null
++    ovn_vpn_agent: null
+     neutron_ovn_metadata_agent: null
+     neutron_ovs_agent: null
+     neutron_sriov_agent: null
+@@ -424,6 +425,7 @@ pod:
+     neutron_rpc_server: null
+     neutron_server: null
+     neutron_tests: null
++    bootstrap: null
+     db_sync: null
+   sidecars:
+     neutron_policy_server: false
diff --git a/charts/patches/nova/0001-Resolve-two-redundant-securityContext-problems.patch b/charts/patches/nova/0001-Resolve-two-redundant-securityContext-problems.patch
new file mode 100644
index 0000000..e76bfde
--- /dev/null
+++ b/charts/patches/nova/0001-Resolve-two-redundant-securityContext-problems.patch
@@ -0,0 +1,25 @@
+From f2940941f44ee41bc631941ea5fc316ac8b8253b Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Tue, 11 Feb 2025 15:19:31 +0000
+Subject: [PATCH] Resolve two redundant securityContext problems
+
+---
+ nova/templates/statefulset-compute-ironic.yaml | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/nova/templates/statefulset-compute-ironic.yaml b/nova/templates/statefulset-compute-ironic.yaml
+index 377555d6..37d3fc5a 100644
+--- a/nova/templates/statefulset-compute-ironic.yaml
++++ b/nova/templates/statefulset-compute-ironic.yaml
+@@ -51,8 +51,6 @@ spec:
+ {{ tuple $envAll "nova" "compute-ironic" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+       nodeSelector:
+         {{ .Values.labels.agent.compute_ironic.node_selector_key }}: {{ .Values.labels.agent.compute_ironic.node_selector_value }}
+-      securityContext:
+-        runAsUser: 0
+       hostPID: true
+       dnsPolicy: ClusterFirstWithHostNet
+       initContainers:
+-- 
+2.34.1
+
diff --git a/charts/patches/nova/0002-Enable-priority-runtime-ClassName-for-Nova.patch b/charts/patches/nova/0002-Enable-priority-runtime-ClassName-for-Nova.patch
new file mode 100644
index 0000000..ce6b1e0
--- /dev/null
+++ b/charts/patches/nova/0002-Enable-priority-runtime-ClassName-for-Nova.patch
@@ -0,0 +1,267 @@
+From 5be27382cc31cf6540abe3d6bcfd05f33ba1077e Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Tue, 11 Feb 2025 16:47:00 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Nova
+
+---
+ .../cron-job-archive-deleted-rows.yaml        |  6 ++++
+ .../nova/templates/cron-job-cell-setup.yaml   |  6 ++++
+ .../templates/cron-job-service-cleaner.yaml   |  6 ++++
+ .../nova/templates/daemonset-compute.yaml     |  6 ++++
+ .../templates/deployment-api-metadata.yaml    |  6 ++++
+ .../nova/templates/deployment-api-osapi.yaml  |  6 ++++
+ .../nova/templates/deployment-conductor.yaml  |  6 ++++
+ .../nova/templates/deployment-novncproxy.yaml |  6 ++++
+ .../nova/templates/deployment-scheduler.yaml  |  6 ++++
+ .../nova/templates/deployment-spiceproxy.yaml |  6 ++++
+ .../nova/templates/pod-rally-test.yaml        |  6 ++++
+ .../templates/statefulset-compute-ironic.yaml |  6 ++++
+ .../nova/values.yaml                          | 28 +++++++++++++++++++
+ 13 files changed, 100 insertions(+)
+
+diff --git a/nova/templates/cron-job-archive-deleted-rows.yaml b/nova/templates/cron-job-archive-deleted-rows.yaml
+index 7316b3ac..48e83ab2 100644
+--- a/nova/templates/cron-job-archive-deleted-rows.yaml
++++ b/nova/templates/cron-job-archive-deleted-rows.yaml
+@@ -42,6 +42,12 @@ spec:
+           labels:
+ {{ tuple $envAll "nova" "archive-deleted-rows" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
+         spec:
++{{ with .Values.pod.priorityClassName.nova_archive_deleted_rows }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_archive_deleted_rows }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "archive_deleted_rows" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
+           restartPolicy: OnFailure
+diff --git a/nova/templates/cron-job-cell-setup.yaml b/nova/templates/cron-job-cell-setup.yaml
+index b90b84e8..23840ce4 100644
+--- a/nova/templates/cron-job-cell-setup.yaml
++++ b/nova/templates/cron-job-cell-setup.yaml
+@@ -42,6 +42,12 @@ spec:
+           labels:
+ {{ tuple $envAll "nova" "cell-setup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
+         spec:
++{{ with .Values.pod.priorityClassName.nova_cell_setup }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_cell_setup }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "cell_setup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
+           restartPolicy: OnFailure
+diff --git a/nova/templates/cron-job-service-cleaner.yaml b/nova/templates/cron-job-service-cleaner.yaml
+index dd61db79..c5153043 100644
+--- a/nova/templates/cron-job-service-cleaner.yaml
++++ b/nova/templates/cron-job-service-cleaner.yaml
+@@ -42,6 +42,12 @@ spec:
+           labels:
+ {{ tuple $envAll "nova" "service-cleaner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
+         spec:
++{{ with .Values.pod.priorityClassName.nova_service_cleaner }}
++          priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_service_cleaner }}
++          runtimeClassName: {{ . }}
++{{ end }}
+           serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "service_cleaner" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
+           restartPolicy: OnFailure
+diff --git a/nova/templates/daemonset-compute.yaml b/nova/templates/daemonset-compute.yaml
+index 3ad00ff2..e62ea760 100644
+--- a/nova/templates/daemonset-compute.yaml
++++ b/nova/templates/daemonset-compute.yaml
+@@ -90,6 +90,12 @@ spec:
+ {{ tuple "nova_compute" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "nova-compute-default" "containerNames" (list "nova-compute" "init" "nova-compute-init" "nova-compute-vnc-init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_compute }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_compute }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       nodeSelector:
+diff --git a/nova/templates/deployment-api-metadata.yaml b/nova/templates/deployment-api-metadata.yaml
+index 51e30c9d..c4c64f92 100644
+--- a/nova/templates/deployment-api-metadata.yaml
++++ b/nova/templates/deployment-api-metadata.yaml
+@@ -60,6 +60,12 @@ spec:
+ {{ tuple "nova_api_metadata" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "nova-api-metadata" "containerNames" (list "nova-api-metadata-init" "nova-api" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_api_metadata }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_api_metadata }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/nova/templates/deployment-api-osapi.yaml b/nova/templates/deployment-api-osapi.yaml
+index b203ba6c..70a65f59 100644
+--- a/nova/templates/deployment-api-osapi.yaml
++++ b/nova/templates/deployment-api-osapi.yaml
+@@ -60,6 +60,12 @@ spec:
+ {{ tuple "nova_api_osapi" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "nova-api-osapi" "containerNames" (list "nova-osapi" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_api_osapi }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_api_osapi }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/nova/templates/deployment-conductor.yaml b/nova/templates/deployment-conductor.yaml
+index b58b3855..50a7ddbf 100644
+--- a/nova/templates/deployment-conductor.yaml
++++ b/nova/templates/deployment-conductor.yaml
+@@ -69,6 +69,12 @@ spec:
+ {{ tuple "nova_conductor" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "nova-conductor" "containerNames" (list "nova-conductor" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_conductor }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_conductor }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/nova/templates/deployment-novncproxy.yaml b/nova/templates/deployment-novncproxy.yaml
+index f4c1d8ba..670e1ccb 100644
+--- a/nova/templates/deployment-novncproxy.yaml
++++ b/nova/templates/deployment-novncproxy.yaml
+@@ -58,6 +58,12 @@ spec:
+ {{ tuple "nova_novncproxy" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "nova-novncproxy" "containerNames" (list "nova-novncproxy" "nova-novncproxy-init-assets" "nova-novncproxy-init" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_novncproxy }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_novncproxy }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/nova/templates/deployment-scheduler.yaml b/nova/templates/deployment-scheduler.yaml
+index bba444c9..a8a529a9 100644
+--- a/nova/templates/deployment-scheduler.yaml
++++ b/nova/templates/deployment-scheduler.yaml
+@@ -69,6 +69,12 @@ spec:
+ {{ tuple "nova_scheduler" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+ {{ dict "envAll" $envAll "podName" "nova-scheduler" "containerNames" (list "nova-scheduler" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_scheduler }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_scheduler }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/nova/templates/deployment-spiceproxy.yaml b/nova/templates/deployment-spiceproxy.yaml
+index eca1628e..68b3c004 100644
+--- a/nova/templates/deployment-spiceproxy.yaml
++++ b/nova/templates/deployment-spiceproxy.yaml
+@@ -55,6 +55,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ tuple "nova_spiceproxy" . | include "helm-toolkit.snippets.custom_pod_annotations" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_spiceproxy }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_spiceproxy }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/nova/templates/pod-rally-test.yaml b/nova/templates/pod-rally-test.yaml
+index d53f2047..659d4b50 100644
+--- a/nova/templates/pod-rally-test.yaml
++++ b/nova/templates/pod-rally-test.yaml
+@@ -44,6 +44,12 @@ spec:
+ {{ tuple $envAll "nova" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 2 }}
+ {{ end }}
+   restartPolicy: Never
++{{ with .Values.pod.priorityClassName.nova_tests }}
++  priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_tests }}
++  runtimeClassName: {{ . }}
++{{ end }}
+   serviceAccountName: {{ $serviceAccountName }}
+   initContainers:
+ {{ tuple $envAll "tests" $mounts_tests_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
+diff --git a/nova/templates/statefulset-compute-ironic.yaml b/nova/templates/statefulset-compute-ironic.yaml
+index 37d3fc5a..deb6c7c0 100644
+--- a/nova/templates/statefulset-compute-ironic.yaml
++++ b/nova/templates/statefulset-compute-ironic.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "nova-compute-default" "containerNames" (list "nova-compute") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.nova_compute_ironic }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.nova_compute_ironic }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/nova/values.yaml b/nova/values.yaml
+index e0c5866e..4967835c 100644
+--- a/nova/values.yaml
++++ b/nova/values.yaml
+@@ -2037,6 +2037,34 @@ endpoints:
+         default: 80
+ 
+ pod:
++  priorityClassName:
++    nova_compute: null
++    nova_api_metadata: null
++    nova_api_osapi: null
++    nova_conductor: null
++    nova_novncproxy: null
++    nova_scheduler: null
++    nova_spiceproxy: null
++    nova_archive_deleted_rows: null
++    nova_cell_setup: null
++    nova_service_cleaner: null
++    nova_compute_ironic: null
++    nova_tests: null
++    db_sync: null
++  runtimeClassName:
++    nova_compute: null
++    nova_api_metadata: null
++    nova_api_osapi: null
++    nova_conductor: null
++    nova_novncproxy: null
++    nova_scheduler: null
++    nova_spiceproxy: null
++    nova_archive_deleted_rows: null
++    nova_cell_setup: null
++    nova_service_cleaner: null
++    nova_compute_ironic: null
++    nova_tests: null
++    db_sync: null
+   probes:
+     rpc_timeout: 60
+     rpc_retries: 2
+-- 
+2.34.1
+
diff --git a/charts/patches/nova/0003-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/nova/0003-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..880ecc4
--- /dev/null
+++ b/charts/patches/nova/0003-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,59 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/nova/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/nova/templates/job-bootstrap.yaml b/nova/templates/job-bootstrap.yaml
+index de8812dd..72cc319d 100644
+--- a/nova/templates/job-bootstrap.yaml
++++ b/nova/templates/job-bootstrap.yaml
+@@ -40,6 +40,12 @@ spec:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
+ {{ dict "envAll" $envAll "application" "bootstrap" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
++{{ with .Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       nodeSelector:
+diff --git a/nova/values.yaml b/nova/values.yaml
+index 4967835c..2b1a924a 100644
+--- a/nova/values.yaml
++++ b/nova/values.yaml
+@@ -2050,6 +2050,7 @@ pod:
+     nova_service_cleaner: null
+     nova_compute_ironic: null
+     nova_tests: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+     nova_compute: null
+@@ -2064,6 +2065,7 @@ pod:
+     nova_service_cleaner: null
+     nova_compute_ironic: null
+     nova_tests: null
++    bootstrap: null
+     db_sync: null
+   probes:
+     rpc_timeout: 60
diff --git a/charts/patches/octavia/0002-Enable-priority-runtime-ClassName-for-Octavia.patch b/charts/patches/octavia/0002-Enable-priority-runtime-ClassName-for-Octavia.patch
new file mode 100644
index 0000000..842edd6
--- /dev/null
+++ b/charts/patches/octavia/0002-Enable-priority-runtime-ClassName-for-Octavia.patch
@@ -0,0 +1,114 @@
+From 128338c29bab59691b7a3bb610dc9f7889587871 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Wed, 12 Feb 2025 02:49:32 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Octavia
+
+---
+ .../octavia/templates/daemonset-health-manager.yaml  |  6 ++++++
+ .../octavia/templates/deployment-api.yaml            |  6 ++++++
+ .../octavia/templates/deployment-housekeeping.yaml   |  6 ++++++
+ .../octavia/templates/deployment-worker.yaml         |  8 +++++++-
+ .../octavia/values.yaml                              | 12 ++++++++++++
+ 5 files changed, 37 insertions(+), 1 deletion(-)
+
+diff --git a/octavia/templates/daemonset-health-manager.yaml b/octavia/templates/daemonset-health-manager.yaml
+index a355e86e..1fcda415 100644
+--- a/octavia/templates/daemonset-health-manager.yaml
++++ b/octavia/templates/daemonset-health-manager.yaml
+@@ -46,6 +46,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.octavia_health_manager }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.octavia_health_manager }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       dnsPolicy: ClusterFirstWithHostNet
+       hostNetwork: true
+diff --git a/octavia/templates/deployment-api.yaml b/octavia/templates/deployment-api.yaml
+index da159724..e584f768 100644
+--- a/octavia/templates/deployment-api.yaml
++++ b/octavia/templates/deployment-api.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.octavia_api }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.octavia_api }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       affinity:
+ {{ tuple $envAll "octavia" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+diff --git a/octavia/templates/deployment-housekeeping.yaml b/octavia/templates/deployment-housekeeping.yaml
+index 39903b6b..584ca180 100644
+--- a/octavia/templates/deployment-housekeeping.yaml
++++ b/octavia/templates/deployment-housekeeping.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.octavia_housekeeping }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.octavia_housekeeping }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       dnsPolicy: ClusterFirstWithHostNet
+       hostNetwork: true
+diff --git a/octavia/templates/deployment-worker.yaml b/octavia/templates/deployment-worker.yaml
+index 54cf68d7..561a604c 100644
+--- a/octavia/templates/deployment-worker.yaml
++++ b/octavia/templates/deployment-worker.yaml
+@@ -45,6 +45,12 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
++{{ with .Values.pod.priorityClassName.octavia_worker }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.octavia_worker }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       dnsPolicy: ClusterFirstWithHostNet
+       hostNetwork: true
+@@ -99,4 +105,4 @@ spec:
+             secretName: octavia-etc
+             defaultMode: 0444
+ {{ if $mounts_octavia_worker.volumes }}{{ toYaml $mounts_octavia_worker.volumes | indent 8 }}{{ end }}
+-{{- end }}
+\ No newline at end of file
++{{- end }}
+diff --git a/octavia/values.yaml b/octavia/values.yaml
+index b15114a5..448c781e 100644
+--- a/octavia/values.yaml
++++ b/octavia/values.yaml
+@@ -526,6 +526,18 @@ endpoints:
+         public: 80
+ 
+ pod:
++  priorityClassName:
++    octavia_health_manager: null
++    octavia_api: null
++    octavia_housekeeping: null
++    octavia_worker: null
++    db_sync: null
++  runtimeClassName:
++    octavia_health_manager: null
++    octavia_api: null
++    octavia_housekeeping: null
++    octavia_worker: null
++    db_sync: null
+   user:
+     octavia:
+       uid: 42424
+-- 
+2.34.1
+
diff --git a/charts/patches/octavia/0003-Fix-the-missing-priority-runtime-ClassName.patch b/charts/patches/octavia/0003-Fix-the-missing-priority-runtime-ClassName.patch
new file mode 100644
index 0000000..84e16c1
--- /dev/null
+++ b/charts/patches/octavia/0003-Fix-the-missing-priority-runtime-ClassName.patch
@@ -0,0 +1,41 @@
+From 1df9feeb330507bc2f988951e2b4813408621cb3 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Thu, 13 Feb 2025 16:15:52 +0000
+Subject: [PATCH] Fix the missing {priority,runtime}ClassName
+
+diff --git a/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+index 6b77004f..da3c4819 100644
+--- a/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
++++ b/octavia/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+@@ -70,6 +70,12 @@ spec:
+       annotations:
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
+     spec:
++{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
++      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
++{{- end }}
++{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
++      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
++{{- end }}
+       serviceAccountName: {{ $serviceAccountName }}
+       restartPolicy: OnFailure
+       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
+diff --git a/octavia/values.yaml b/octavia/values.yaml
+index 448c781e..36fcbf0b 100644
+--- a/octavia/values.yaml
++++ b/octavia/values.yaml
+@@ -531,12 +531,14 @@ pod:
+     octavia_api: null
+     octavia_housekeeping: null
+     octavia_worker: null
++    bootstrap: null
+     db_sync: null
+   runtimeClassName:
+     octavia_health_manager: null
+     octavia_api: null
+     octavia_housekeeping: null
+     octavia_worker: null
++    bootstrap: null
+     db_sync: null
+   user:
+     octavia:
diff --git a/charts/patches/ovn/0002-add-ovn-network-logging-parser.patch b/charts/patches/ovn/0002-add-ovn-network-logging-parser.patch
new file mode 100644
index 0000000..cb50183
--- /dev/null
+++ b/charts/patches/ovn/0002-add-ovn-network-logging-parser.patch
@@ -0,0 +1,333 @@
+From fea8c6e46350ecdaa6bf43aaafc22313910f7cf4 Mon Sep 17 00:00:00 2001
+From: ricolin <rlin@vexxhost.com>
+Date: Wed, 13 Nov 2024 16:49:53 +0800
+Subject: [PATCH] Add OVN network logging parser
+
+Change-Id: I03a1c600c161536e693743219912199fabc1e5a5
+---
+
+diff --git a/ovn/templates/bin/_ovn-network-logging-parser.sh.tpl b/ovn/templates/bin/_ovn-network-logging-parser.sh.tpl
+new file mode 100644
+index 0000000..06eaaa7
+--- /dev/null
++++ b/ovn/templates/bin/_ovn-network-logging-parser.sh.tpl
+@@ -0,0 +1,28 @@
++#!/bin/bash
++
++{{/*
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++   http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/}}
++
++set -ex
++COMMAND="${@:-start}"
++
++function start () {
++  exec uwsgi --ini /etc/neutron/neutron-ovn-network-logging-parser-uwsgi.ini
++}
++
++function stop () {
++  kill -TERM 1
++}
++
++$COMMAND
+diff --git a/ovn/templates/configmap-bin.yaml b/ovn/templates/configmap-bin.yaml
+index 82001f9..7754747 100644
+--- a/ovn/templates/configmap-bin.yaml
++++ b/ovn/templates/configmap-bin.yaml
+@@ -26,4 +26,6 @@
+ {{- end }}
+   ovn-controller-init.sh: |
+ {{ tuple "bin/_ovn-controller-init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
++  ovn-network-logging-parser.sh: |
++{{ tuple "bin/_ovn-network-logging-parser.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+ {{- end }}
+diff --git a/ovn/templates/configmap-etc.yaml b/ovn/templates/configmap-etc.yaml
+index 47b84be..0d221f1 100644
+--- a/ovn/templates/configmap-etc.yaml
++++ b/ovn/templates/configmap-etc.yaml
+@@ -17,6 +17,12 @@
+ {{- $envAll := index . 1 }}
+ {{- with $envAll }}
+ 
++{{- if empty (index .Values.conf.ovn_network_logging_parser_uwsgi.uwsgi "http-socket") -}}
++{{- $http_socket_port := tuple "ovn_logging_parser" "service" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" | toString }}
++{{- $http_socket := printf "0.0.0.0:%s" $http_socket_port }}
++{{- $_ := set .Values.conf.ovn_network_logging_parser_uwsgi.uwsgi "http-socket" $http_socket -}}
++{{- end -}}
++
+ ---
+ apiVersion: v1
+ kind: Secret
+@@ -25,7 +31,7 @@
+ type: Opaque
+ data:
+   auto_bridge_add: {{ toJson $envAll.Values.conf.auto_bridge_add | b64enc }}
+-
++  neutron-ovn-network-logging-parser-uwsgi.ini: {{ include "helm-toolkit.utils.to_oslo_conf" .Values.conf.ovn_network_logging_parser_uwsgi | b64enc }}
+ {{- end }}
+ {{- end }}
+ 
+diff --git a/ovn/templates/daemonset-controller.yaml b/ovn/templates/daemonset-controller.yaml
+index ae6b33c..3c2933f 100644
+--- a/ovn/templates/daemonset-controller.yaml
++++ b/ovn/templates/daemonset-controller.yaml
+@@ -156,6 +156,52 @@
+               mountPath: /var/log/ovn
+             - name: run-openvswitch
+               mountPath: /run/ovn
++        {{- if .Values.pod.sidecars.vector }}
++        - name: vector
++{{ tuple $envAll "vector" | include "helm-toolkit.snippets.image" | indent 10 }}
++{{ tuple $envAll $envAll.Values.pod.resources.vector | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
++{{ dict "envAll" $envAll "application" "ovn_controller" "container" "vector" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
++          command:
++            - vector
++            - --config
++            - /etc/vector/vector.toml
++          volumeMounts:
++            - name: vector-config
++              mountPath: /etc/vector
++            - name: logs
++              mountPath: /logs
++            - name: vector-data
++              mountPath: /var/lib/vector
++        {{- end }}
++        {{- if .Values.pod.sidecars.ovn_logging_parser }}
++        - name: log-parser
++{{ tuple $envAll "ovn_logging_parser" | include "helm-toolkit.snippets.image" | indent 10 }}
++{{ tuple $envAll $envAll.Values.pod.resources.ovn_logging_parser | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
++{{ dict "envAll" $envAll "application" "ovn_controller" "container" "ovn_logging_parser" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
++          command:
++            - /tmp/ovn-network-logging-parser.sh
++            - start
++          env:
++            - name: VECTOR_HTTP_ENDPOINT
++              value: http://localhost:5001
++          ports:
++            - name: http
++              containerPort: {{ tuple "ovn_logging_parser" "service" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
++              protocol: TCP
++          volumeMounts:
++            - name: neutron-etc
++              mountPath: /etc/neutron/neutron.conf
++              subPath: neutron.conf
++              readOnly: true
++            - name: ovn-bin
++              mountPath: /tmp/ovn-network-logging-parser.sh
++              subPath: ovn-network-logging-parser.sh
++              readOnly: true
++            - name: ovn-etc
++              mountPath: /etc/neutron/neutron-ovn-network-logging-parser-uwsgi.ini
++              subPath: neutron-ovn-network-logging-parser-uwsgi.ini
++              readOnly: true
++        {{- end }}
+       volumes:
+         - name: ovn-bin
+           configMap:
+@@ -179,4 +225,17 @@
+             type: DirectoryOrCreate
+         - name: gw-enabled
+           emptyDir: {}
++        {{- if .Values.pod.sidecars.vector }}
++        - name: vector-config
++          secret:
++            secretName: ovn-vector-config
++        - name: vector-data
++          emptyDir: {}
++        {{- end }}
++        {{- if .Values.pod.sidecars.ovn_logging_parser }}
++        - name: neutron-etc
++          secret:
++            secretName: neutron-etc
++            defaultMode: 0444
++        {{- end }}
+ {{- end }}
+diff --git a/ovn/templates/secret-vector.yaml b/ovn/templates/secret-vector.yaml
+new file mode 100644
+index 0000000..989f3af
+--- /dev/null
++++ b/ovn/templates/secret-vector.yaml
+@@ -0,0 +1,26 @@
++{{/*
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++   http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++*/}}
++
++{{- if .Values.pod.sidecars.vector }}
++{{- $envAll := . }}
++
++---
++apiVersion: v1
++kind: Secret
++metadata:
++  name: ovn-vector-config
++type: Opaque
++data:
++{{- include "helm-toolkit.snippets.values_template_renderer" (dict "envAll" $envAll "template" .Values.conf.vector "key" "vector.toml" "format" "Secret" ) | indent 2 }}
++{{- end }}
+diff --git a/ovn/values.yaml b/ovn/values.yaml
+index ca60650..0191c85 100644
+--- a/ovn/values.yaml
++++ b/ovn/values.yaml
+@@ -27,6 +27,8 @@
+     ovn_controller_kubectl: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_jammy
+     dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal
+     image_repo_sync: docker.io/library/docker:17.07.0
++    vector: docker.io/timberio/vector:0.39.0-debian
++    ovn_logging_parser: docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy
+   pull_policy: "IfNotPresent"
+   local_registry:
+     active: false
+@@ -82,6 +84,55 @@
+   #   br-private: eth0
+   #   br-public: eth1
+   auto_bridge_add: {}
++  ovn_network_logging_parser_uwsgi:
++    uwsgi:
++      add-header: "Connection: close"
++      buffer-size: 65535
++      die-on-term: true
++      enable-threads: true
++      exit-on-reload: false
++      hook-master-start: unix_signal:15 gracefully_kill_them_all
++      lazy-apps: true
++      log-x-forwarded-for: true
++      master: true
++      processes: 1
++      procname-prefix-spaced: "neutron-ovn-network-logging-parser:"
++      route-user-agent: '^kube-probe.* donotlog:'
++      thunder-lock: true
++      worker-reload-mercy: 80
++      wsgi-file: /var/lib/openstack/bin/neutron-ovn-network-logging-parser-wsgi
++  vector: |
++    [sources.file_logs]
++    type = "file"
++    include = [ "/logs/ovn-controller.log" ]
++
++    [sinks.ovn_log_parser_in]
++    type = "http"
++    inputs = ["file_logs"]
++    uri = "{{ tuple "ovn_logging_parser" "default" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}"
++    encoding.codec = "json"
++    method = "post"
++
++    [sources.ovn_log_parser_out]
++    type = "http_server"
++    address = "0.0.0.0:5001"
++    encoding = "json"
++
++    [transforms.parse_log_message]
++    type = "remap"
++    inputs = ["ovn_log_parser_out"]
++    source = '''
++      del(.source_type)
++      del(.path)
++    '''
++
++    [sinks.loki_sink]
++    type = "loki"
++    labels.event_source = "network_logs"
++    inputs = ["parse_log_message"]
++    endpoint = "http://loki.monitoring:3100"
++    encoding.codec = "json"
++    tenant_id = "{{`{{ project_id }}`}}"
+ 
+ pod:
+   # NOTE: should be same as nova.pod.use_fqdn.compute
+@@ -102,6 +153,12 @@
+         controller:
+           readOnlyRootFilesystem: true
+           privileged: true
++        ovn_logging_parser:
++          allowPrivilegeEscalation: false
++          readOnlyRootFilesystem: true
++        vector:
++          allowPrivilegeEscalation: false
++          readOnlyRootFilesystem: true
+   tolerations:
+     ovn_ovsdb_nb:
+       enabled: false
+@@ -216,6 +273,20 @@
+       limits:
+         memory: "1024Mi"
+         cpu: "2000m"
++    ovn_logging_parser:
++      requests:
++        memory: "128Mi"
++        cpu: "100m"
++      limits:
++        memory: "256Mi"
++        cpu: "500m"
++    vector:
++      requests:
++        memory: "128Mi"
++        cpu: "100m"
++      limits:
++        memory: "256Mi"
++        cpu: "500m"
+     jobs:
+       image_repo_sync:
+         requests:
+@@ -225,6 +296,10 @@
+           memory: "1024Mi"
+           cpu: "2000m"
+ 
++  sidecars:
++    ovn_logging_parser: false
++    vector: false
++
+ secrets:
+   oci_image_registry:
+     ovn: ovn-oci-image-registry-key
+@@ -283,6 +358,22 @@
+         default: 6642
+       raft:
+         default: 6644
++  ovn_logging_parser:
++    name: ovn-logging-parser
++    namespace: null
++    hosts:
++      default: localhost
++    host_fqdn_override:
++      default: localhost
++    scheme:
++      default: 'http'
++      service: 'http'
++    path:
++      default: "/logs"
++    port:
++      api:
++        default: 9697
++        service: 9697
+ 
+ network_policy:
+   ovn_ovsdb_nb:
+diff --git a/releasenotes/notes/ovn-a82eced671495a3d.yaml b/releasenotes/notes/ovn-a82eced671495a3d.yaml
+new file mode 100644
+index 0000000..c429489
+--- /dev/null
++++ b/releasenotes/notes/ovn-a82eced671495a3d.yaml
+@@ -0,0 +1,4 @@
++---
++ovn:
++  - Add OVN network logging parser
++...
diff --git a/charts/patches/ovn/0003-Switch-OVN-to-ovsinit.patch b/charts/patches/ovn/0003-Switch-OVN-to-ovsinit.patch
new file mode 100644
index 0000000..ba04dcf
--- /dev/null
+++ b/charts/patches/ovn/0003-Switch-OVN-to-ovsinit.patch
@@ -0,0 +1,84 @@
+From 6c2dac4c0bcd71d400c113b922ba862d7945a09e Mon Sep 17 00:00:00 2001
+From: Mohammed Naser <mnaser@vexxhost.com>
+Date: Mon, 17 Feb 2025 11:00:30 -0500
+Subject: [PATCH] Switch OVN to ovsinit
+
+---
+ ovn/templates/bin/_ovn-controller-init.sh.tpl | 55 +------------------
+ 1 file changed, 2 insertions(+), 53 deletions(-)
+
+diff --git a/ovn/templates/bin/_ovn-controller-init.sh.tpl b/ovn/templates/bin/_ovn-controller-init.sh.tpl
+index 357c069d..006582f9 100644
+--- a/ovn/templates/bin/_ovn-controller-init.sh.tpl
++++ b/ovn/templates/bin/_ovn-controller-init.sh.tpl
+@@ -25,58 +25,6 @@ function get_ip_address_from_interface {
+   echo ${ip}
+ }
+ 
+-function get_ip_prefix_from_interface {
+-  local interface=$1
+-  local prefix=$(ip -4 -o addr s "${interface}" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $2}')
+-  if [ -z "${prefix}" ] ; then
+-    exit 1
+-  fi
+-  echo ${prefix}
+-}
+-
+-function migrate_ip_from_nic {
+-  src_nic=$1
+-  bridge_name=$2
+-
+-  # Enabling explicit error handling: We must avoid to lose the IP
+-  # address in the migration process. Hence, on every error, we
+-  # attempt to assign the IP back to the original NIC and exit.
+-  set +e
+-
+-  ip=$(get_ip_address_from_interface ${src_nic})
+-  prefix=$(get_ip_prefix_from_interface ${src_nic})
+-
+-  bridge_ip=$(get_ip_address_from_interface "${bridge_name}")
+-  bridge_prefix=$(get_ip_prefix_from_interface "${bridge_name}")
+-
+-  ip link set ${bridge_name} up
+-
+-  if [[ -n "${ip}" && -n "${prefix}" ]]; then
+-    ip addr flush dev ${src_nic}
+-    if [ $? -ne 0 ] ; then
+-      ip addr add ${ip}/${prefix} dev ${src_nic}
+-      echo "Error while flushing IP from ${src_nic}."
+-      exit 1
+-    fi
+-
+-    ip addr add ${ip}/${prefix} dev "${bridge_name}"
+-    if [ $? -ne 0 ] ; then
+-      echo "Error assigning IP to bridge "${bridge_name}"."
+-      ip addr add ${ip}/${prefix} dev ${src_nic}
+-      exit 1
+-    fi
+-  elif [[ -n "${bridge_ip}" && -n "${bridge_prefix}" ]]; then
+-    echo "Bridge '${bridge_name}' already has IP assigned. Keeping the same:: IP:[${bridge_ip}]; Prefix:[${bridge_prefix}]..."
+-  elif [[ -z "${bridge_ip}" && -z "${ip}" ]]; then
+-    echo "Interface and bridge have no ips configured. Leaving as is."
+-  else
+-    echo "Interface ${src_nic} has invalid IP address. IP:[${ip}]; Prefix:[${prefix}]..."
+-    exit 1
+-  fi
+-
+-  set -e
+-}
+-
+ function get_current_system_id {
+   ovs-vsctl --if-exists get Open_vSwitch . external_ids:system-id | tr -d '"'
+ }
+@@ -174,6 +122,7 @@ do
+   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
+   then
+     ovs-vsctl --may-exist add-port $bridge $iface
+-    migrate_ip_from_nic $iface $bridge
+   fi
+ done
++
++/usr/local/bin/ovsinit /tmp/auto_bridge_add
+-- 
+2.47.0
+
diff --git a/charts/patches/ovn/0003-add-ovn-northd-pod-affinity.patch b/charts/patches/ovn/0003-add-ovn-northd-pod-affinity.patch
new file mode 100644
index 0000000..37acdae
--- /dev/null
+++ b/charts/patches/ovn/0003-add-ovn-northd-pod-affinity.patch
@@ -0,0 +1,13 @@
+diff --git a/charts/ovn/templates/deployment-northd.yaml b/charts/ovn/templates/deployment-northd.yaml
+index 2dbbb689..baf5a0c7 100644
+--- a/ovn/templates/deployment-northd.yaml
++++ b/ovn/templates/deployment-northd.yaml
+@@ -49,6 +49,8 @@ spec:
+         configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
+     spec:
+       serviceAccountName: {{ $serviceAccountName }}
++      affinity:
++{{- tuple $envAll "ovn" "ovn_northd" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+       nodeSelector:
+         {{ .Values.labels.ovn_northd.node_selector_key }}: {{ .Values.labels.ovn_northd.node_selector_value }}
+       initContainers:
diff --git a/charts/patches/placement/0002-Enable-priority-runtime-ClassName-for-Placement.patch b/charts/patches/placement/0002-Enable-priority-runtime-ClassName-for-Placement.patch
new file mode 100644
index 0000000..02041af
--- /dev/null
+++ b/charts/patches/placement/0002-Enable-priority-runtime-ClassName-for-Placement.patch
@@ -0,0 +1,47 @@
+From 9a7ec6f874f55d5c19068c54a1a2e0dc7f5f8235 Mon Sep 17 00:00:00 2001
+From: Dong Ma <dong.ma@vexxhost.com>
+Date: Wed, 12 Feb 2025 03:36:18 +0000
+Subject: [PATCH] Enable {priority,runtime}ClassName for Placement
+
+---
+ placement/templates/deployment.yaml | 6 ++++++
+ placement/values.yaml               | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/placement/templates/deployment.yaml b/placement/templates/deployment.yaml
+index 3082216d..da02d33b 100644
+--- a/placement/templates/deployment.yaml
++++ b/placement/templates/deployment.yaml
+@@ -47,6 +47,12 @@ spec:
+         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
+ {{ dict "envAll" $envAll "podName" "placement-api" "containerNames" (list "placement-api" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
+     spec:
++{{ with .Values.pod.priorityClassName.placement }}
++      priorityClassName: {{ . }}
++{{ end }}
++{{ with .Values.pod.runtimeClassName.placement }}
++      runtimeClassName: {{ . }}
++{{ end }}
+       serviceAccountName: {{ $serviceAccountName }}
+ {{ dict "envAll" $envAll "application" "placement" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+       affinity:
+diff --git a/placement/values.yaml b/placement/values.yaml
+index 0f27a247..20e46564 100644
+--- a/placement/values.yaml
++++ b/placement/values.yaml
+@@ -278,6 +278,12 @@ endpoints:
+         service: 8778
+ 
+ pod:
++  priorityClassName:
++    placement: null
++    db_sync: null
++  runtimeClassName:
++    placement: null
++    db_sync: null
+   security_context:
+     placement:
+       pod:
+-- 
+2.34.1
+
diff --git a/charts/placement/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/placement/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/placement/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/placement/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/placement/templates/deployment.yaml b/charts/placement/templates/deployment.yaml
index 3082216..da02d33 100644
--- a/charts/placement/templates/deployment.yaml
+++ b/charts/placement/templates/deployment.yaml
@@ -47,6 +47,12 @@
         configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
 {{ dict "envAll" $envAll "podName" "placement-api" "containerNames" (list "placement-api" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
+{{ with .Values.pod.priorityClassName.placement }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.placement }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "placement" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       affinity:
diff --git a/charts/placement/values.yaml b/charts/placement/values.yaml
index 0f27a24..20e4656 100644
--- a/charts/placement/values.yaml
+++ b/charts/placement/values.yaml
@@ -278,6 +278,12 @@
         service: 8778
 
 pod:
+  priorityClassName:
+    placement: null
+    db_sync: null
+  runtimeClassName:
+    placement: null
+    db_sync: null
   security_context:
     placement:
       pod:
diff --git a/charts/staffeln/charts/helm-toolkit/Chart.yaml b/charts/staffeln/charts/helm-toolkit/Chart.yaml
index e6aec81..d4c0ea2 100644
--- a/charts/staffeln/charts/helm-toolkit/Chart.yaml
+++ b/charts/staffeln/charts/helm-toolkit/Chart.yaml
@@ -9,4 +9,4 @@
 sources:
 - https://opendev.org/openstack/openstack-helm-infra
 - https://opendev.org/openstack/openstack-helm
-version: 0.2.54
+version: 0.2.69
diff --git a/charts/staffeln/charts/helm-toolkit/requirements.lock b/charts/staffeln/charts/helm-toolkit/requirements.lock
new file mode 100644
index 0000000..e28bc5d
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/requirements.lock
@@ -0,0 +1,3 @@
+dependencies: []
+digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726
+generated: "0001-01-01T00:00:00Z"
diff --git a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl
index 12b84de..d7390d8 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl
@@ -50,7 +50,7 @@
 {{- $endpointScheme := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}
 {{- $userMap := index $context.Values.endpoints ( $type | replace "-" "_" ) "auth" $userclass }}
 {{- $endpointUser := index $userMap "username" }}
-{{- $endpointPass := index $userMap "password" }}
+{{- $endpointPass := index $userMap "password" | urlquery }}
 {{- $endpointHost := tuple $type $endpoint $context | include "helm-toolkit.endpoints.endpoint_host_lookup" }}
 {{- $endpointPort := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
 {{- $endpointPath := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.keystone_endpoint_path_lookup" }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl
index b7cf287..b9ac9d9 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl
@@ -100,7 +100,7 @@
 {{-   $ssMap := index $context.Values.endpoints ( $type | replace "-" "_" ) "statefulset" | default false}}
 {{-   $hostFqdnOverride := index $context.Values.endpoints ( $type | replace "-" "_" ) "host_fqdn_override" }}
 {{-   $endpointUser := index $userMap "username" }}
-{{-   $endpointPass := index $userMap "password" }}
+{{-   $endpointPass := index $userMap "password" | urlquery }}
 {{-   $endpointHostSuffix := tuple $type $endpoint $context | include "helm-toolkit.endpoints.endpoint_host_lookup" }}
 {{-   $endpointPort := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
 {{-   $local := dict "endpointCredsAndHosts" list -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl
index 4c476b2..cacb4b8 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl
@@ -59,7 +59,7 @@
               default: 9311
               public: 80
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" ) -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -67,16 +67,16 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         rules:
           - host: barbican
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -86,7 +86,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -96,7 +96,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -108,10 +108,10 @@
       metadata:
         name: barbican-namespace-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -121,7 +121,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -133,10 +133,10 @@
       metadata:
         name: barbican-cluster-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx-cluster"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx-cluster"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -146,7 +146,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -194,7 +194,7 @@
               default: 9311
               public: 80
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" ) -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -202,10 +202,10 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -217,7 +217,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -227,7 +227,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -237,7 +237,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -294,7 +294,7 @@
                 name: ca-issuer
                 kind: Issuer
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer" ) -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -302,12 +302,12 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           cert-manager.io/issuer: ca-issuer
           certmanager.k8s.io/issuer: ca-issuer
           nginx.ingress.kubernetes.io/backend-protocol: https
           nginx.ingress.kubernetes.io/secure-backends: "true"
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public-certmanager
             hosts:
@@ -319,7 +319,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -329,7 +329,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -339,7 +339,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -396,7 +396,7 @@
                 name: ca-issuer
                 kind: ClusterIssuer
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer") -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -404,12 +404,12 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           cert-manager.io/cluster-issuer: ca-issuer
           certmanager.k8s.io/cluster-issuer: ca-issuer
           nginx.ingress.kubernetes.io/backend-protocol: https
           nginx.ingress.kubernetes.io/secure-backends: "true"
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public-certmanager
             hosts:
@@ -421,7 +421,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -431,7 +431,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -441,7 +441,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -479,7 +479,7 @@
             grafana:
               public: grafana-tls-public
     usage: |
-      {{- $ingressOpts := dict "envAll" . "backendService" "grafana" "backendServiceType" "grafana" "backendPort" "dashboard" -}}
+      {{- $ingressOpts := dict "envAll" . "backendService" "grafana" "backendServiceType" "grafana" "backendPort" "dashboard" "pathType" "Prefix" -}}
       {{ $ingressOpts | include "helm-toolkit.manifests.ingress" }}
     return: |
       ---
@@ -488,16 +488,16 @@
       metadata:
         name: grafana
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         rules:
           - host: grafana
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -507,7 +507,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -517,7 +517,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -529,10 +529,10 @@
       metadata:
         name: grafana-namespace-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: grafana-tls-public
             hosts:
@@ -543,7 +543,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -553,7 +553,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -565,10 +565,10 @@
       metadata:
         name: grafana-cluster-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx-cluster"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx-cluster"
         tls:
           - secretName: grafana-tls-public
             hosts:
@@ -579,7 +579,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -589,7 +589,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -602,11 +602,12 @@
 {{- $vHost := index . "vHost" -}}
 {{- $backendName := index . "backendName" -}}
 {{- $backendPort := index . "backendPort" -}}
+{{- $pathType := index . "pathType" -}}
 - host: {{ $vHost }}
   http:
     paths:
       - path: /
-        pathType: ImplementationSpecific
+        pathType: {{ $pathType }}
         backend:
           service:
             name: {{ $backendName }}
@@ -624,6 +625,7 @@
 {{- $backendServiceType := index . "backendServiceType" -}}
 {{- $backendPort := index . "backendPort" -}}
 {{- $endpoint := index . "endpoint" | default "public" -}}
+{{- $pathType := index . "pathType" | default "Prefix" -}}
 {{- $certIssuer := index . "certIssuer" | default "" -}}
 {{- $ingressName := tuple $backendServiceType $endpoint $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
 {{- $backendName := tuple $backendServiceType "internal" $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
@@ -639,7 +641,6 @@
 metadata:
   name: {{ $ingressName }}
   annotations:
-    kubernetes.io/ingress.class: {{ index $envAll.Values.network $backendService "ingress" "classes" "namespace" | quote }}
 {{- if $certIssuer }}
     cert-manager.io/{{ $certIssuerType }}: {{ $certIssuer }}
     certmanager.k8s.io/{{ $certIssuerType }}: {{ $certIssuer }}
@@ -650,6 +651,7 @@
 {{- end }}
 {{ toYaml (index $envAll.Values.network $backendService "ingress" "annotations") | indent 4 }}
 spec:
+  ingressClassName: {{ index $envAll.Values.network $backendService "ingress" "classes" "namespace" | quote }}
 {{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace "-" "_" ) "hosts" }}
 {{- if $certIssuer }}
 {{- $secretName := index $envAll.Values.secrets "tls" ( $backendServiceType | replace "-" "_" ) $backendService $endpoint }}
@@ -681,7 +683,7 @@
 {{- end }}
   rules:
 {{- range $key1, $vHost := tuple $hostName (printf "%s.%s" $hostName $envAll.Release.Namespace) (printf "%s.%s.svc.%s" $hostName $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) }}
-{{- $hostRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort }}
+{{- $hostRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort "pathType" $pathType }}
 {{ $hostRules | include "helm-toolkit.manifests.ingress._host_rules" | indent 4 }}
 {{- end }}
 {{- if not ( hasSuffix ( printf ".%s.svc.%s" $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) $hostNameFull) }}
@@ -695,9 +697,9 @@
 metadata:
   name: {{ printf "%s-%s-%s" $ingressName $ingressController "fqdn" }}
   annotations:
-    kubernetes.io/ingress.class: {{ index $envAll.Values.network $backendService "ingress" "classes" $ingressController | quote }}
 {{ toYaml (index $envAll.Values.network $backendService "ingress" "annotations") | indent 4 }}
 spec:
+  ingressClassName: {{ index $envAll.Values.network $backendService "ingress" "classes" $ingressController | quote }}
 {{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace "-" "_" ) "host_fqdn_override" }}
 {{- if hasKey $host $endpoint }}
 {{- $endpointHost := index $host $endpoint }}
@@ -719,7 +721,7 @@
 {{- end }}
   rules:
 {{- range $vHost := $vHosts }}
-{{- $hostNameFullRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort }}
+{{- $hostNameFullRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort "pathType" $pathType }}
 {{ $hostNameFullRules | include "helm-toolkit.manifests.ingress._host_rules" | indent 4 }}
 {{- end }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 5d98c8b..da3c481 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -51,6 +51,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
@@ -69,6 +70,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.bootstrap }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.bootstrap }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.bootstrap }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.bootstrap }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "bootstrap" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl
index 62ed119..2b7ff2c 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl
@@ -54,6 +54,7 @@
   annotations:
     "helm.sh/hook": pre-delete
     "helm.sh/hook-delete-policy": hook-succeeded
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
index 745e8da..b8a1dce 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
@@ -52,6 +52,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 24d2496..5c35dd0 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -49,6 +49,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
@@ -67,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl
index 3a7df7f..d69c9e6 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl
@@ -52,6 +52,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl
index a109e3c..9604c63 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl
@@ -52,6 +52,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl
index 905eb71..58dcdc5 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl
@@ -74,6 +74,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl
index 6982064..2cfadaf 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl
@@ -42,6 +42,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl
index 29cb993..b5fdc09 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl
@@ -49,6 +49,7 @@
 {{- end }}
   annotations:
     {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl
index 50d9af5..77d1a71 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl
@@ -47,6 +47,7 @@
   annotations:
     "helm.sh/hook-delete-policy": before-hook-creation
     {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl
index 4854bb1..7ad505b 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl
@@ -17,6 +17,11 @@
   Creates a manifest for a authenticating a registry with a secret
 examples:
   - values: |
+      annotations:
+        secret:
+          oci_image_registry:
+            {{ $serviceName }}:
+              custom.tld/key: "value"
       secrets:
         oci_image_registry:
           {{ $serviceName }}: {{ $keyName }}
@@ -36,30 +41,8 @@
     kind: Secret
     metadata:
       name: {{ $secretName }}
-    type: kubernetes.io/dockerconfigjson
-    data:
-      dockerconfigjson: {{ $dockerAuth }}
-
-  - values: |
-      secrets:
-        oci_image_registry:
-          {{ $serviceName }}: {{ $keyName }}
-      endpoints:
-        oci_image_registry:
-          name: oci-image-registry
-          auth:
-            enabled: true
-             {{ $serviceName }}:
-                name: {{ $userName }}
-                password: {{ $password }}
-  usage: |
-    {{- include "helm-toolkit.manifests.secret_registry" ( dict "envAll" . "registryUser" .Chart.Name ) -}}
-  return: |
-    ---
-    apiVersion: v1
-    kind: Secret
-    metadata:
-      name: {{ $secretName }}
+      annotations:
+        custom.tld/key: "value"
     type: kubernetes.io/dockerconfigjson
     data:
       dockerconfigjson: {{ $dockerAuth }}
@@ -87,6 +70,8 @@
 kind: Secret
 metadata:
   name: {{ $secretName }}
+  annotations:
+{{ tuple "oci_image_registry" $registryUser $envAll | include "helm-toolkit.snippets.custom_secret_annotations" | indent 4 }}
 type: kubernetes.io/dockerconfigjson
 data:
   .dockerconfigjson: {{ $dockerAuth }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl
index 24a7045..c800340 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl
@@ -17,6 +17,11 @@
   Creates a manifest for a services public tls secret
 examples:
   - values: |
+      annotations:
+        secret:
+          tls:
+            key_manager_api_public:
+              custom.tld/key: "value"
       secrets:
         tls:
           key_manager:
@@ -41,6 +46,8 @@
     kind: Secret
     metadata:
       name: barbican-tls-public
+      annotations:
+        custom.tld/key: "value"
     type: kubernetes.io/tls
     data:
       tls.key: Rk9PLUtFWQo=
@@ -88,11 +95,15 @@
 {{- if kindIs "map" $endpointHost }}
 {{- if hasKey $endpointHost "tls" }}
 {{- if and $endpointHost.tls.key $endpointHost.tls.crt }}
+
+{{- $customAnnotationKey := printf "%s_%s_%s" ( $backendServiceType | replace "-" "_" ) $backendService $endpoint }}
 ---
 apiVersion: v1
 kind: Secret
 metadata:
   name: {{ index $envAll.Values.secrets.tls ( $backendServiceType | replace "-" "_" ) $backendService $endpoint }}
+  annotations:
+{{ tuple "tls" $customAnnotationKey $envAll | include "helm-toolkit.snippets.custom_secret_annotations" | indent 4 }}
 type: kubernetes.io/tls
 data:
   tls.key: {{ $endpointHost.tls.key | b64enc }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl
index 03884fa..c6a7521 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl
@@ -33,6 +33,7 @@
     PARSER_OPTS = {"strict": False}
 import logging
 from sqlalchemy import create_engine
+from sqlalchemy import text
 
 # Create logger, console handler and formatter
 logger = logging.getLogger('OpenStack-Helm DB Drop')
@@ -124,7 +125,12 @@
 
 # Delete DB
 try:
-    root_engine.execute("DROP DATABASE IF EXISTS {0}".format(database))
+    with root_engine.connect() as connection:
+        connection.execute(text("DROP DATABASE IF EXISTS {0}".format(database)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Deleted database {0}".format(database))
 except:
     logger.critical("Could not drop database {0}".format(database))
@@ -132,7 +138,12 @@
 
 # Delete DB User
 try:
-    root_engine.execute("DROP USER IF EXISTS {0}".format(user))
+    with root_engine.connect() as connection:
+        connection.execute(text("DROP USER IF EXISTS {0}".format(user)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Deleted user {0}".format(user))
 except:
     logger.critical("Could not delete user {0}".format(user))
diff --git a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl
index 6027b95..1917f78 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl
@@ -33,6 +33,7 @@
     PARSER_OPTS = {"strict": False}
 import logging
 from sqlalchemy import create_engine
+from sqlalchemy import text
 
 # Create logger, console handler and formatter
 logger = logging.getLogger('OpenStack-Helm DB Init')
@@ -124,7 +125,12 @@
 
 # Create DB
 try:
-    root_engine.execute("CREATE DATABASE IF NOT EXISTS {0}".format(database))
+    with root_engine.connect() as connection:
+        connection.execute(text("CREATE DATABASE IF NOT EXISTS {0}".format(database)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Created database {0}".format(database))
 except:
     logger.critical("Could not create database {0}".format(database))
@@ -132,11 +138,16 @@
 
 # Create DB User
 try:
-    root_engine.execute(
-        "CREATE USER IF NOT EXISTS \'{0}\'@\'%%\' IDENTIFIED BY \'{1}\' {2}".format(
-            user, password, mysql_x509))
-    root_engine.execute(
-        "GRANT ALL ON `{0}`.* TO \'{1}\'@\'%%\'".format(database, user))
+    with root_engine.connect() as connection:
+        connection.execute(
+            text("CREATE USER IF NOT EXISTS \'{0}\'@\'%%\' IDENTIFIED BY \'{1}\' {2}".format(
+                user, password, mysql_x509)))
+        connection.execute(
+            text("GRANT ALL ON `{0}`.* TO \'{1}\'@\'%%\'".format(database, user)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Created user {0} for {1}".format(user, database))
 except:
     logger.critical("Could not create user {0} for {1}".format(user, database))
diff --git a/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl b/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
index 3963bd4..695cb2e 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
@@ -49,6 +49,13 @@
 #                                          A random number between min and max delay is generated
 #                                          to set the delay.
 #
+#         RGW backup throttle limits variables:
+#           export THROTTLE_BACKUPS_ENABLED   Boolean variableto control backup functionality
+#           export THROTTLE_LIMIT             Number of simultaneous RGW upload sessions
+#           export THROTTLE_LOCK_EXPIRE_AFTER Time in seconds to expire flag file is orphaned
+#           export THROTTLE_RETRY_AFTER       Time in seconds to wait before retry
+#           export THROTTLE_CONTAINER_NAME    Name of RGW container to place flag falies into
+#
 # The database-specific functions that need to be implemented are:
 #   dump_databases_to_directory <directory> <err_logfile> [scope]
 #       where:
@@ -84,8 +91,10 @@
 #      specified by the "LOCAL_DAYS_TO_KEEP" variable.
 #   4) Removing remote backup tarballs (from the remote gateway) which are older
 #      than the number of days specified by the "REMOTE_DAYS_TO_KEEP" variable.
+#   5) Controlling remote storage gateway load from client side and throttling it
+#      by using a dedicated RGW container to store flag files defining upload session
+#      in progress
 #
-
 # Note: not using set -e in this script because more elaborate error handling
 # is needed.
 
@@ -95,7 +104,7 @@
   log ERROR "${DB_NAME}_backup" "${DB_NAMESPACE} namespace: ${MSG}"
   rm -f $ERR_LOG_FILE
   rm -rf $TMP_DIR
-  exit $ERRCODE
+  exit 0
 }
 
 log_verify_backup_exit() {
@@ -104,7 +113,7 @@
   log ERROR "${DB_NAME}_verify_backup" "${DB_NAMESPACE} namespace: ${MSG}"
   rm -f $ERR_LOG_FILE
   # rm -rf $TMP_DIR
-  exit $ERRCODE
+  exit 0
 }
 
 
@@ -218,6 +227,113 @@
   echo "Sleeping for ${DELAY} seconds to spread the load in time..."
   sleep ${DELAY}
 
+  #---------------------------------------------------------------------------
+  # Remote backup throttling
+  export THROTTLE_BACKUPS_ENABLED=$(echo $THROTTLE_BACKUPS_ENABLED | sed 's/"//g')
+  if $THROTTLE_BACKUPS_ENABLED; then
+    # Remove Quotes from the constants which were added due to reading
+    # from secret.
+    export THROTTLE_LIMIT=$(echo $THROTTLE_LIMIT | sed 's/"//g')
+    export THROTTLE_LOCK_EXPIRE_AFTER=$(echo $THROTTLE_LOCK_EXPIRE_AFTER | sed 's/"//g')
+    export THROTTLE_RETRY_AFTER=$(echo $THROTTLE_RETRY_AFTER | sed 's/"//g')
+    export THROTTLE_CONTAINER_NAME=$(echo $THROTTLE_CONTAINER_NAME | sed 's/"//g')
+
+    # load balance delay
+    RESULT=$(openstack container list 2>&1)
+
+    if [[ $? -eq 0 ]]; then
+      echo $RESULT | grep $THROTTLE_CONTAINER_NAME
+      if [[ $? -ne 0 ]]; then
+        # Find the swift URL from the keystone endpoint list
+        SWIFT_URL=$(openstack catalog show object-store -c endpoints | grep public | awk '{print $4}')
+        if [[ $? -ne 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to get object-store enpoints from keystone catalog."
+          return 2
+        fi
+
+        # Get a token from keystone
+        TOKEN=$(openstack token issue -f value -c id)
+        if [[ $? -ne 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to get  keystone token."
+          return 2
+        fi
+
+        # Create the container
+        RES_FILE=$(mktemp -p /tmp)
+        curl -g -i -X PUT ${SWIFT_URL}/${THROTTLE_CONTAINER_NAME} \
+            -H "X-Auth-Token: ${TOKEN}" \
+            -H "X-Storage-Policy: ${STORAGE_POLICY}" 2>&1 > $RES_FILE
+
+        if [[ $? -ne 0 || $(grep "HTTP" $RES_FILE | awk '{print $2}') -ge 400 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to create container ${THROTTLE_CONTAINER_NAME}"
+          cat $RES_FILE
+          rm -f $RES_FILE
+          return 2
+        fi
+        rm -f $RES_FILE
+
+        swift stat $THROTTLE_CONTAINER_NAME
+        if [[ $? -ne 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to retrieve container ${THROTTLE_CONTAINER_NAME} details after creation."
+          return 2
+        fi
+      fi
+    else
+      echo $RESULT | grep -E "HTTP 401|HTTP 403"
+      if [[ $? -eq 0 ]]; then
+        log ERROR "${DB_NAME}_backup" "Access denied by keystone: ${RESULT}"
+        return 1
+      else
+        echo $RESULT | grep -E "ConnectionError|Failed to discover available identity versions|Service Unavailable|HTTP 50"
+        if [[ $? -eq 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Could not reach the RGW: ${RESULT}"
+          # In this case, keystone or the site/node may be temporarily down.
+          # Return slightly different error code so the calling code can retry
+          return 2
+        else
+          log ERROR "${DB_NAME}_backup" "Could not get container list: ${RESULT}"
+          return 1
+        fi
+      fi
+    fi
+
+    NUMBER_OF_SESSIONS=$(openstack object list $THROTTLE_CONTAINER_NAME -f value | wc -l)
+    log INFO  "${DB_NAME}_backup"  "There are ${NUMBER_OF_SESSIONS} remote sessions right now."
+    while [[ ${NUMBER_OF_SESSIONS} -ge ${THROTTLE_LIMIT} ]]
+    do
+      log INFO "${DB_NAME}_backup" "Current number of active uploads is ${NUMBER_OF_SESSIONS}>=${THROTTLE_LIMIT}!"
+      log INFO "${DB_NAME}_backup" "Retrying in ${THROTTLE_RETRY_AFTER} seconds...."
+      sleep ${THROTTLE_RETRY_AFTER}
+      NUMBER_OF_SESSIONS=$(openstack object list $THROTTLE_CONTAINER_NAME -f value | wc -l)
+      log INFO  "${DB_NAME}_backup"  "There are ${NUMBER_OF_SESSIONS} remote sessions right now."
+    done
+
+    # Create a lock file in THROTTLE_CONTAINER
+    THROTTLE_FILEPATH=$(mktemp -d)
+    THROTTLE_FILE=${CONTAINER_NAME}.lock
+    date +%s > $THROTTLE_FILEPATH/$THROTTLE_FILE
+
+    # Create an object to store the file
+    openstack object create --name $THROTTLE_FILE $THROTTLE_CONTAINER_NAME $THROTTLE_FILEPATH/$THROTTLE_FILE
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Cannot create throttle container object ${THROTTLE_FILE}!"
+      return 2
+    fi
+
+    swift post  $THROTTLE_CONTAINER_NAME $THROTTLE_FILE -H "X-Delete-After:${THROTTLE_LOCK_EXPIRE_AFTER}"
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Cannot set throttle container object ${THROTTLE_FILE} expiration header!"
+      return 2
+    fi
+    openstack object show $THROTTLE_CONTAINER_NAME $THROTTLE_FILE
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Unable to retrieve throttle container object $THROTTLE_FILE after creation."
+      return 2
+    fi
+  fi
+
+  #---------------------------------------------------------------------------
+
   # Create an object to store the file
   openstack object create --name $FILE $CONTAINER_NAME $FILEPATH/$FILE
   if [[ $? -ne 0 ]]; then
@@ -243,7 +359,25 @@
       log ERROR "${DB_NAME}_backup" "Mismatch between the local backup & remote backup MD5 hash values"
       return 2
   fi
-  rm -rf ${REMOTE_FILE}
+  rm -f ${REMOTE_FILE}
+
+  #---------------------------------------------------------------------------
+  # Remote backup throttling
+  export THROTTLE_BACKUPS_ENABLED=$(echo $THROTTLE_BACKUPS_ENABLED | sed 's/"//g')
+  if $THROTTLE_BACKUPS_ENABLED; then
+    # Remove flag file
+    # Delete an object to remove the flag file
+    openstack object delete $THROTTLE_CONTAINER_NAME $THROTTLE_FILE
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Cannot delete throttle container object ${THROTTLE_FILE}"
+      return 0
+    else
+      log INFO "${DB_NAME}_backup" "The throttle container object ${THROTTLE_FILE} has been successfully removed."
+    fi
+    rm -f ${THROTTLE_FILEPATH}/${THROTTLE_FILE}
+  fi
+
+  #---------------------------------------------------------------------------
 
   log INFO "${DB_NAME}_backup" "Created file $FILE in container $CONTAINER_NAME successfully."
   return 0
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_job_annotations.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_job_annotations.tpl
new file mode 100644
index 0000000..fc42614
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_job_annotations.tpl
@@ -0,0 +1,76 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{/*
+abstract: |
+  Adds custom annotations to the job spec of a component.
+examples:
+  - values: |
+      annotations:
+        job:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          keystone_domain_manage:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "keystone_domain_manage" . | include "helm-toolkit.snippets.custom_job_annotations" }}
+    return: |
+      another.tld/foo: bar
+  - values: |
+      annotations:
+        job:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          keystone_domain_manage:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "keystone_bootstrap" . | include "helm-toolkit.snippets.custom_job_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+  - values: |
+      annotations:
+        job:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          keystone_domain_manage:
+            another.tld/foo: "bar"
+          keystone_bootstrap:
+    usage: |
+      {{ tuple "keystone_bootstrap" . | include "helm-toolkit.snippets.custom_job_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+*/}}
+
+{{- define "helm-toolkit.snippets.custom_job_annotations" -}}
+{{- $envAll := index . 1 -}}
+{{- $component := index . 0 | replace "-" "_" -}}
+{{- if (hasKey $envAll.Values "annotations") -}}
+{{- if (hasKey $envAll.Values.annotations "job") -}}
+{{- $annotationsMap := $envAll.Values.annotations.job -}}
+{{- $defaultAnnotations := dict -}}
+{{- if (hasKey $annotationsMap "default" ) -}}
+{{- $defaultAnnotations = $annotationsMap.default -}}
+{{- end -}}
+{{- $annotations := index $annotationsMap $component | default $defaultAnnotations -}}
+{{- if (not (empty $annotations)) -}}
+{{- toYaml $annotations -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_pod_annotations.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_pod_annotations.tpl
new file mode 100644
index 0000000..ecff6e9
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_pod_annotations.tpl
@@ -0,0 +1,76 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{/*
+abstract: |
+  Adds custom annotations to the pod spec of a component.
+examples:
+  - values: |
+      annotations:
+        pod:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          nova_compute:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "nova_compute" . | include "helm-toolkit.snippets.custom_pod_annotations" }}
+    return: |
+      another.tld/foo: bar
+  - values: |
+      annotations:
+        pod:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          nova_compute:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "nova_api" . | include "helm-toolkit.snippets.custom_pod_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+  - values: |
+      annotations:
+        pod:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          nova_compute:
+            another.tld/foo: "bar"
+          nova_api:
+    usage: |
+      {{ tuple "nova_api" . | include "helm-toolkit.snippets.custom_pod_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+*/}}
+
+{{- define "helm-toolkit.snippets.custom_pod_annotations" -}}
+{{- $component := index . 0 -}}
+{{- $envAll := index . 1 -}}
+{{- if (hasKey $envAll.Values "annotations") -}}
+{{- if (hasKey $envAll.Values.annotations "pod") -}}
+{{- $annotationsMap := $envAll.Values.annotations.pod -}}
+{{- $defaultAnnotations := dict -}}
+{{- if (hasKey $annotationsMap "default" ) -}}
+{{- $defaultAnnotations = $annotationsMap.default -}}
+{{- end -}}
+{{- $annotations := index $annotationsMap $component | default $defaultAnnotations -}}
+{{- if (not (empty $annotations)) -}}
+{{- toYaml $annotations -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_secret_annotations.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_secret_annotations.tpl
new file mode 100644
index 0000000..19c4380
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_secret_annotations.tpl
@@ -0,0 +1,81 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{/*
+abstract: |
+  Adds custom annotations to the secret spec of a component.
+examples:
+  - values: |
+      annotations:
+        secret:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          identity:
+            admin:
+              another.tld/foo: "bar"
+    usage: |
+      {{ tuple "identity" "admin" . | include "helm-toolkit.snippets.custom_secret_annotations" }}
+    return: |
+      another.tld/foo: bar
+  - values: |
+      annotations:
+        secret:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          identity:
+            admin:
+              another.tld/foo: "bar"
+    usage: |
+      {{ tuple "oslo_db" "admin" . | include "helm-toolkit.snippets.custom_secret_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+  - values: |
+      annotations:
+        secret:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          identity:
+            admin:
+              another.tld/foo: "bar"
+          oslo_db:
+            admin:
+    usage: |
+      {{ tuple "oslo_db" "admin" . | include "helm-toolkit.snippets.custom_secret_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+*/}}
+
+{{- define "helm-toolkit.snippets.custom_secret_annotations" -}}
+{{- $secretType := index . 0 -}}
+{{- $userClass := index . 1 | replace "-" "_" -}}
+{{- $envAll := index . 2 -}}
+{{- if (hasKey $envAll.Values "annotations") -}}
+{{- if (hasKey $envAll.Values.annotations "secret") -}}
+{{- $annotationsMap := index $envAll.Values.annotations.secret $secretType | default dict -}}
+{{- $defaultAnnotations := dict -}}
+{{- if (hasKey $envAll.Values.annotations.secret "default" ) -}}
+{{- $defaultAnnotations = $envAll.Values.annotations.secret.default -}}
+{{- end -}}
+{{- $annotations := index $annotationsMap $userClass | default $defaultAnnotations -}}
+{{- if (not (empty $annotations)) -}}
+{{- toYaml $annotations -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl
index 029c93d..678b844 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl
@@ -19,7 +19,7 @@
   images:
     tags:
       test_image: docker.io/port/test:version-foo
-      image_foo: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
+      image_foo: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal
     pull_policy: IfNotPresent
     local_registry:
       active: true
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl
index bed712e..ad628da 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl
@@ -19,7 +19,7 @@
 values: |
   images:
     tags:
-      dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
+      dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal
     pull_policy: IfNotPresent
     local_registry:
       active: true
@@ -76,7 +76,7 @@
   {{ tuple . "calico_node" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" }}
 return: |
   - name: init
-    image: "quay.io/airshipit/kubernetes-entrypoint:v1.0.0"
+    image: "quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal"
     imagePullPolicy: IfNotPresent
     securityContext:
       allowPrivilegeEscalation: false
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_rgw_s3_bucket_user_env_vars_rook.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_rgw_s3_bucket_user_env_vars_rook.tpl
new file mode 100644
index 0000000..08521e0
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_rgw_s3_bucket_user_env_vars_rook.tpl
@@ -0,0 +1,28 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "helm-toolkit.snippets.rgw_s3_bucket_user_env_vars_rook" }}
+{{- range $s3Bucket := .Values.storage.s3.buckets }}
+- name: {{ printf "%s_S3_ACCESS_KEY" ($s3Bucket.client | replace "-" "_" | upper) }}
+  valueFrom:
+    secretKeyRef:
+      name: {{ $s3Bucket.name }}
+      key: AWS_ACCESS_KEY_ID
+- name: {{ printf "%s_S3_SECRET_KEY" ($s3Bucket.client | replace "-" "_" | upper) }}
+  valueFrom:
+    secretKeyRef:
+      name: {{ $s3Bucket.name }}
+      key: AWS_SECRET_ACCESS_KEY
+{{- end }}
+{{- end }}
diff --git a/charts/staffeln/templates/deployment-api.yaml b/charts/staffeln/templates/deployment-api.yaml
index 577a062..e61236c 100644
--- a/charts/staffeln/templates/deployment-api.yaml
+++ b/charts/staffeln/templates/deployment-api.yaml
@@ -46,6 +46,12 @@
 {{ dict "envAll" $envAll "podName" "staffeln-api" "containerNames" (list "init" "staffeln-api") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "staffeln" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.staffeln_api }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.staffeln_api }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "staffeln" "api" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/staffeln/templates/deployment-conductor.yaml b/charts/staffeln/templates/deployment-conductor.yaml
index cf0ad77..a88b97f 100644
--- a/charts/staffeln/templates/deployment-conductor.yaml
+++ b/charts/staffeln/templates/deployment-conductor.yaml
@@ -79,6 +79,12 @@
 {{ dict "envAll" $envAll "podName" "staffeln-conductor" "containerNames" (list "init" "staffeln-conductor") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
 {{ dict "envAll" $envAll "application" "staffeln" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+{{ with .Values.pod.priorityClassName.staffeln_conductor }}
+      priorityClassName: {{ . }}
+{{ end }}
+{{ with .Values.pod.runtimeClassName.staffeln_conductor }}
+      runtimeClassName: {{ . }}
+{{ end }}
       serviceAccountName: {{ $serviceAccountName }}
       affinity:
 {{ tuple $envAll "staffeln" "conductor" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
diff --git a/charts/staffeln/values.yaml b/charts/staffeln/values.yaml
index d705115..e6e09c4 100644
--- a/charts/staffeln/values.yaml
+++ b/charts/staffeln/values.yaml
@@ -54,6 +54,18 @@
       - image_repo_sync
 
 pod:
+  priorityClassName:
+    staffeln_api: null
+    staffeln_conductor: null
+    staffeln_tests: null
+    bootstrap: null
+    db_sync: null
+  runtimeClassName:
+    staffeln_api: null
+    staffeln_conductor: null
+    staffeln_tests: null
+    bootstrap: null
+    db_sync: null
   security_context:
     staffeln:
       pod:
diff --git a/charts/tempest/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/tempest/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 4696c88..5c35dd0 100644
--- a/charts/tempest/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/tempest/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -68,6 +68,12 @@
       annotations:
 {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
     spec:
+{{- if and $envAll.Values.pod.priorityClassName $envAll.Values.pod.priorityClassName.db_sync }}
+      priorityClassName: {{ $envAll.Values.pod.priorityClassName.db_sync }}
+{{- end }}
+{{- if and $envAll.Values.pod.runtimeClassName $envAll.Values.pod.runtimeClassName.db_sync }}
+      runtimeClassName: {{ $envAll.Values.pod.runtimeClassName.db_sync }}
+{{- end }}
       serviceAccountName: {{ $serviceAccountName }}
       restartPolicy: OnFailure
       {{ tuple $envAll "db_sync" | include "helm-toolkit.snippets.kubernetes_image_pull_secrets" | indent 6 }}
diff --git a/crates/imagebumper/Cargo.toml b/crates/imagebumper/Cargo.toml
new file mode 100644
index 0000000..b8af954
--- /dev/null
+++ b/crates/imagebumper/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "imagebumper"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+async-trait = "0.1.86"
+clap = { version = "4.5.29", features = ["derive"] }
+env_logger = { version = "0.11.6", features = ["unstable-kv"] }
+gitea-sdk = "0.5.0"
+log = { version = "0.4.25", features = ["kv"] }
+octocrab = "0.43.0"
+regex = "1.11.1"
+reqwest = { version = "0.12.12", features = ["json", "native-tls-vendored"] }
+serde_json = "1.0.138"
+tokio = { version = "1.43.0", features = ["fs", "macros", "rt-multi-thread"] }
+url = "2.5.4"
diff --git a/crates/imagebumper/src/clients/github.rs b/crates/imagebumper/src/clients/github.rs
new file mode 100644
index 0000000..b8ac5f3
--- /dev/null
+++ b/crates/imagebumper/src/clients/github.rs
@@ -0,0 +1,36 @@
+use crate::RepositoryClient;
+use async_trait::async_trait;
+use octocrab::Octocrab;
+use std::error::Error;
+use std::sync::Arc;
+
+pub struct Client {
+    client: Arc<Octocrab>,
+}
+
+impl Client {
+    pub fn new() -> Self {
+      Client {
+            client: octocrab::instance(),
+        }
+    }
+}
+
+#[async_trait]
+impl RepositoryClient for Client {
+    async fn get_latest_commit(
+        &self,
+        repository: &crate::repository::Repository,
+        branch: &str,
+    ) -> Result<String, Box<dyn Error>> {
+        let commits = self
+            .client
+            .repos(repository.owner.clone(), repository.name.clone())
+            .list_commits()
+            .branch(branch)
+            .send()
+            .await?;
+
+        Ok(commits.items[0].sha.clone())
+    }
+}
diff --git a/crates/imagebumper/src/clients/mod.rs b/crates/imagebumper/src/clients/mod.rs
new file mode 100644
index 0000000..8c2bfde
--- /dev/null
+++ b/crates/imagebumper/src/clients/mod.rs
@@ -0,0 +1,19 @@
+pub mod github;
+pub mod opendev;
+
+use crate::clients::github::Client as GitHubClient;
+use crate::clients::opendev::Client as OpenDevClient;
+
+pub struct ClientSet {
+    pub github: GitHubClient,
+    pub opendev: OpenDevClient,
+}
+
+impl ClientSet {
+    pub fn new() -> Self {
+        ClientSet {
+            github: GitHubClient::new(),
+            opendev: OpenDevClient::new(),
+        }
+    }
+}
diff --git a/crates/imagebumper/src/clients/opendev.rs b/crates/imagebumper/src/clients/opendev.rs
new file mode 100644
index 0000000..252ed60
--- /dev/null
+++ b/crates/imagebumper/src/clients/opendev.rs
@@ -0,0 +1,35 @@
+use crate::RepositoryClient;
+use async_trait::async_trait;
+use gitea_sdk::Auth;
+use gitea_sdk::Client as GiteaClient;
+use std::error::Error;
+
+pub struct Client {
+    client: GiteaClient,
+}
+
+impl Client {
+    pub fn new() -> Self {
+        Client {
+            client: GiteaClient::new("https://opendev.org", Auth::None::<String>),
+        }
+    }
+}
+
+#[async_trait]
+impl RepositoryClient for Client {
+    async fn get_latest_commit(
+        &self,
+        repository: &crate::repository::Repository,
+        branch: &str,
+    ) -> Result<String, Box<dyn Error>> {
+        let branch_info = self
+            .client
+            .repos(repository.owner.clone(), repository.name.clone())
+            .get_branch(branch)
+            .send(&self.client)
+            .await?;
+
+        Ok(branch_info.commit.id)
+    }
+}
diff --git a/crates/imagebumper/src/lib.rs b/crates/imagebumper/src/lib.rs
new file mode 100644
index 0000000..6142e02
--- /dev/null
+++ b/crates/imagebumper/src/lib.rs
@@ -0,0 +1,15 @@
+pub mod repository;
+pub mod clients;
+
+use async_trait::async_trait;
+use std::any::Any;
+use std::error::Error;
+
+#[async_trait]
+pub trait RepositoryClient: Any + Send + Sync {
+    async fn get_latest_commit(
+        &self,
+        repository: &crate::repository::Repository,
+        branch: &str,
+    ) -> Result<String, Box<dyn Error>>;
+}
diff --git a/crates/imagebumper/src/main.rs b/crates/imagebumper/src/main.rs
new file mode 100644
index 0000000..9958331
--- /dev/null
+++ b/crates/imagebumper/src/main.rs
@@ -0,0 +1,176 @@
+use clap::Parser;
+use imagebumper::clients::ClientSet;
+use imagebumper::repository::Repository;
+use log::error;
+use log::{info, warn};
+use regex::Regex;
+use std::collections::HashMap;
+use std::path::{Path, PathBuf};
+use tokio::fs;
+use tokio::io::AsyncWriteExt;
+
+#[derive(Parser, Debug)]
+#[clap(author, version, about)]
+struct Args {
+    #[clap(short, long)]
+    branch: String,
+
+    #[clap(required = true)]
+    files: Vec<PathBuf>,
+}
+
+fn get_repo_map(clientset: &ClientSet) -> HashMap<&'static str, Repository> {
+    let mut map = HashMap::new();
+
+    map.insert(
+        "BARBICAN_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/barbican.git").unwrap(),
+    );
+    map.insert(
+        "CINDER_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/cinder.git").unwrap(),
+    );
+    map.insert(
+        "DESIGNATE_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/designate.git").unwrap(),
+    );
+    map.insert(
+        "GLANCE_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/glance.git").unwrap(),
+    );
+    map.insert(
+        "HEAT_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/heat.git").unwrap(),
+    );
+    map.insert(
+        "HORIZON_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/horizon.git").unwrap(),
+    );
+    map.insert(
+        "IRONIC_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/ironic.git").unwrap(),
+    );
+    map.insert(
+        "KEYSTONE_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/keystone.git").unwrap(),
+    );
+    map.insert(
+        "KUBERNETES_ENTRYPOINT_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/airship/kubernetes-entrypoint").unwrap(),
+    );
+    map.insert(
+        "MAGNUM_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/magnum.git").unwrap(),
+    );
+    map.insert(
+        "MANILA_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/manila.git").unwrap(),
+    );
+    map.insert(
+        "NETOFFLOAD_GIT_REF",
+        Repository::from_url(clientset, "https://github.com/vexxhost/netoffload.git").unwrap(),
+    );
+    map.insert(
+        "NEUTRON_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/neutron.git").unwrap(),
+    );
+    map.insert(
+        "NEUTRON_VPNAAS_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/neutron-vpnaas.git").unwrap(),
+    );
+    map.insert(
+        "NETWORKING_BAREMETAL_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/networking-baremetal.git").unwrap(),
+    );
+    map.insert(
+        "POLICY_SERVER_GIT_REF",
+        Repository::from_url(clientset, "https://github.com/vexxhost/neutron-policy-server.git").unwrap(),
+    );
+    map.insert(
+        "LOG_PASER_GIT_REF",
+        Repository::from_url(clientset, "https://github.com/vexxhost/neutron-ovn-network-logging-parser.git")
+            .unwrap(),
+    );
+    map.insert(
+        "NOVA_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/nova.git").unwrap(),
+    );
+    map.insert(
+        "SCHEDULER_FILTERS_GIT_REF",
+        Repository::from_url(clientset, "https://github.com/vexxhost/nova-scheduler-filters.git").unwrap(),
+    );
+    map.insert(
+        "OCTAVIA_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/octavia.git").unwrap(),
+    );
+    map.insert(
+        "REQUIREMENTS_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/requirements.git").unwrap(),
+    );
+    map.insert(
+        "PLACEMENT_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/placement.git").unwrap(),
+    );
+    map.insert(
+        "STAFFELN_GIT_REF",
+        Repository::from_url(clientset, "https://github.com/vexxhost/staffeln.git").unwrap(),
+    );
+    map.insert(
+        "TEMPEST_GIT_REF",
+        Repository::from_url(clientset, "https://opendev.org/openstack/tempest.git").unwrap(),
+    );
+
+    map
+}
+
+async fn update_dockerfile(clientset: &ClientSet, path: &Path, branch: &str) -> Result<(), Box<dyn std::error::Error>> {
+    let content = fs::read_to_string(path).await?;
+    let re = Regex::new(r"(ARG\s+(\w+_GIT_REF)=)(\S+)")?;
+    let mut new_content = content.clone();
+
+    for cap in re.captures_iter(&content) {
+        let arg_name = cap.get(2).unwrap().as_str();
+        if let Some(repo) = get_repo_map(clientset).get(arg_name) {
+            let new_git_ref = match repo.get_latest_commit(branch).await {
+                Ok(commit) => commit,
+                Err(e) => {
+                    error!(arg = arg_name, error = e.to_string().as_str().trim(); "Failed to get latest commit");
+                    continue;
+                }
+            };
+
+            new_content = new_content.replace(
+                &format!("{}{}", &cap[1], &cap[3]),
+                &format!("{}{}", &cap[1], new_git_ref),
+            );
+
+            info!(arg = arg_name, path = path.to_str(), ref = new_git_ref.as_str(); "Updated Dockerfile");
+        } else {
+            error!(arg = arg_name; "No repository URL found.");
+        }
+    }
+
+    if new_content != content {
+        let mut file = fs::File::create(path).await?;
+        file.write_all(new_content.as_bytes()).await?;
+    }
+    Ok(())
+}
+
+#[tokio::main]
+async fn main() -> Result<(), Box<dyn std::error::Error>> {
+    env_logger::init();
+    let args = Args::parse();
+
+    let clientset = ClientSet::new();
+
+    for file_path in args.files {
+        if file_path.is_file() {
+            update_dockerfile(&clientset, &file_path, &args.branch).await?;
+        } else {
+            warn!("{:?} is not a file, skipping", file_path);
+        }
+    }
+
+    Ok(())
+}
diff --git a/crates/imagebumper/src/repository.rs b/crates/imagebumper/src/repository.rs
new file mode 100644
index 0000000..2f1a274
--- /dev/null
+++ b/crates/imagebumper/src/repository.rs
@@ -0,0 +1,66 @@
+use crate::clients::ClientSet;
+use crate::RepositoryClient;
+use std::error::Error;
+use url::Url;
+
+pub struct Repository<'a> {
+    pub owner: String,
+    pub name: String,
+    client: &'a dyn RepositoryClient,
+}
+
+impl<'a> Repository<'a> {
+    pub fn from_url(clientset: &'a ClientSet, url: &str) -> Result<Self, Box<dyn Error>> {
+        let url = url.trim_end_matches(".git");
+        let parsed_url = Url::parse(url)?;
+        let hostname = parsed_url.host_str().ok_or("Invalid repository URL")?;
+        let parts: Vec<&str> = parsed_url
+            .path_segments()
+            .ok_or("Invalid repository URL")?
+            .collect();
+        if parts.len() < 2 {
+            return Err("Invalid repository URL".into());
+        }
+
+        let client: &dyn RepositoryClient = match hostname {
+            "opendev.org" => &clientset.opendev,
+            "github.com" => &clientset.github,
+            _ => return Err("Unsupported repository host".into()),
+        };
+
+        Ok(Repository {
+            owner: parts[parts.len() - 2].to_string(),
+            name: parts[parts.len() - 1].to_string(),
+            client,
+        })
+    }
+
+    pub async fn get_latest_commit(&self, branch: &str) -> Result<String, Box<dyn Error>> {
+        self.client.get_latest_commit(self, branch).await
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[tokio::test]
+    async fn test_from_url_for_opendev() {
+        let clientset = ClientSet::new();
+        let repo =
+            Repository::from_url(&clientset, "https://opendev.org/openstack/nova.git").unwrap();
+
+        assert_eq!(repo.owner, "openstack");
+        assert_eq!(repo.name, "nova");
+    }
+
+    #[tokio::test]
+    async fn test_from_url_for_github() {
+        let clientset = ClientSet::new();
+        let repo =
+            Repository::from_url(&clientset, "https://github.com/vexxhost/atmosphere.git").unwrap();
+
+        assert_eq!(repo.owner, "vexxhost");
+        assert_eq!(repo.name, "atmosphere");
+    }
+}
diff --git a/crates/ovsinit/Cargo.toml b/crates/ovsinit/Cargo.toml
new file mode 100644
index 0000000..ac7d810
--- /dev/null
+++ b/crates/ovsinit/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "ovsinit"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+clap = { version = "4.5.29", features = ["derive"] }
+env_logger = { version = "0.11.6", features = ["unstable-kv"] }
+futures = "0.3.31"
+futures-util = "0.3.31"
+ipnet = "2.11.0"
+libc = "0.2.169"
+log = { version = "0.4.25", features = ["kv"] }
+netlink-packet-route = "0.19.0"
+rtnetlink = "0.14.1"
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1.0"
+thiserror = "2.0.11"
+tokio = { version = "1", features = ["rt-multi-thread", "macros"] }
diff --git a/crates/ovsinit/src/config.rs b/crates/ovsinit/src/config.rs
new file mode 100644
index 0000000..7c3d6b7
--- /dev/null
+++ b/crates/ovsinit/src/config.rs
@@ -0,0 +1,82 @@
+use serde::Deserialize;
+use std::collections::HashMap;
+use std::{fs::File, path::PathBuf};
+use thiserror::Error;
+use log::{error, info};
+
+#[derive(Deserialize)]
+pub struct NetworkConfig {
+    #[serde(flatten)]
+    pub bridges: HashMap<String, Option<String>>,
+}
+
+#[derive(Debug, Error)]
+pub enum NetworkConfigError {
+    #[error("Failed to open file: {0}")]
+    OpenFile(#[from] std::io::Error),
+
+    #[error("Failed to parse JSON: {0}")]
+    ParseJson(#[from] serde_json::Error),
+}
+
+impl NetworkConfig {
+    pub fn from_path(path: &PathBuf) -> Result<Self, NetworkConfigError> {
+        let file = File::open(path)?;
+        NetworkConfig::from_file(file)
+    }
+
+    pub fn from_file(file: File) -> Result<Self, NetworkConfigError> {
+        let config: NetworkConfig = serde_json::from_reader(file)?;
+        Ok(config)
+    }
+
+    pub fn bridges_with_interfaces_iter(&self) -> impl Iterator<Item = (&String, &String)> {
+        self.bridges.iter().filter_map(|(k, v)| {
+            if let Some(v) = v {
+                Some((k, v))
+            } else {
+                info!(bridge = k.as_str(); "Bridge has no interface, skipping.");
+
+                None
+            }
+        })
+    }
+
+    #[allow(dead_code)]
+    pub fn from_string(json: &str) -> Result<Self, NetworkConfigError> {
+        let config: NetworkConfig = serde_json::from_str(json)?;
+        Ok(config)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_null_interface() {
+        let config = NetworkConfig::from_string("{\"br-ex\": null}").unwrap();
+
+        assert_eq!(config.bridges.len(), 1);
+        assert_eq!(config.bridges.get("br-ex"), Some(&None));
+    }
+
+    #[test]
+    fn test_bridges_with_interfaces_iter_with_null_interface() {
+        let config = NetworkConfig::from_string("{\"br-ex\": null}").unwrap();
+
+        let mut iter = config.bridges_with_interfaces_iter();
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_bridges_with_interfaces_iter_with_interface() {
+        let config = NetworkConfig::from_string("{\"br-ex\": \"bond0\"}").unwrap();
+
+        let mut iter = config.bridges_with_interfaces_iter();
+        assert_eq!(
+            iter.next(),
+            Some((&"br-ex".to_string(), &"bond0".to_string()))
+        );
+    }
+}
diff --git a/crates/ovsinit/src/lib.rs b/crates/ovsinit/src/lib.rs
new file mode 100644
index 0000000..80fb9cd
--- /dev/null
+++ b/crates/ovsinit/src/lib.rs
@@ -0,0 +1,353 @@
+extern crate ipnet;
+
+mod routes;
+
+use futures_util::stream::TryStreamExt;
+use ipnet::IpNet;
+use log::{error, info};
+use netlink_packet_route::{
+    address::{AddressAttribute, AddressMessage},
+    route::{RouteAttribute, RouteMessage, RouteScope},
+    AddressFamily,
+};
+use rtnetlink::{Handle, IpVersion};
+use std::net::IpAddr;
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum InterfaceError {
+    #[error("Interface {0} not found")]
+    NotFound(String),
+
+    #[error(transparent)]
+    NetlinkError(#[from] rtnetlink::Error),
+
+    #[error(transparent)]
+    IpNetError(#[from] ipnet::PrefixLenError),
+
+    #[error(transparent)]
+    RouteError(#[from] routes::RouteError),
+}
+
+#[derive(Error, Debug)]
+pub enum InterfaceMigrationError {
+    #[error(transparent)]
+    InterfaceError(#[from] InterfaceError),
+
+    #[error("IP configuration on both interfaces")]
+    IpConflict,
+}
+
+pub struct Interface {
+    name: String,
+    index: u32,
+    address_messages: Vec<AddressMessage>,
+    route_messages: Vec<RouteMessage>,
+}
+
+impl Interface {
+    pub async fn new(handle: &Handle, name: String) -> Result<Self, InterfaceError> {
+        let index = handle
+            .link()
+            .get()
+            .match_name(name.clone())
+            .execute()
+            .try_next()
+            .await
+            .map_err(|e| match e {
+                rtnetlink::Error::NetlinkError(inner) if -inner.raw_code() == libc::ENODEV => {
+                    InterfaceError::NotFound(name.clone())
+                }
+                _ => InterfaceError::NetlinkError(e),
+            })?
+            .map(|link| link.header.index)
+            .ok_or_else(|| InterfaceError::NotFound(name.clone()))?;
+
+        let address_messages: Vec<AddressMessage> = handle
+            .address()
+            .get()
+            .set_link_index_filter(index)
+            .execute()
+            .map_err(InterfaceError::NetlinkError)
+            .try_filter(|msg| futures::future::ready(msg.header.family == AddressFamily::Inet))
+            .try_collect()
+            .await?;
+
+        let route_messages: Vec<RouteMessage> = handle
+            .route()
+            .get(IpVersion::V4)
+            .execute()
+            .map_err(InterfaceError::NetlinkError)
+            .try_filter(move |route_msg| {
+                let matches = route_msg
+                    .attributes
+                    .iter()
+                    .any(|attr| matches!(attr, RouteAttribute::Oif(idx) if *idx == index))
+                    && route_msg.header.kind != netlink_packet_route::route::RouteType::Local;
+
+                futures_util::future::ready(matches)
+            })
+            .try_collect()
+            .await?;
+
+        Ok(Self {
+            name,
+            index,
+            address_messages,
+            route_messages,
+        })
+    }
+
+    fn addresses(&self) -> Vec<IpNet> {
+        self.address_messages
+            .iter()
+            .filter_map(|msg| {
+                msg.attributes.iter().find_map(|nla| {
+                    if let AddressAttribute::Address(ip) = nla {
+                        IpNet::new(*ip, msg.header.prefix_len).ok()
+                    } else {
+                        None
+                    }
+                })
+            })
+            .collect()
+    }
+
+    fn routes(&self) -> Result<Vec<routes::Route>, routes::RouteError> {
+        self.route_messages
+            .iter()
+            .filter_map(|msg| {
+                if msg.header.scope == RouteScope::Link {
+                    return None;
+                }
+
+                Some(routes::Route::from_message(msg.clone()))
+            })
+            .collect::<Result<Vec<routes::Route>, routes::RouteError>>()
+    }
+
+    async fn up(&self, handle: &Handle) -> Result<(), InterfaceError> {
+        handle
+            .link()
+            .set(self.index)
+            .up()
+            .execute()
+            .await
+            .map_err(InterfaceError::NetlinkError)
+    }
+
+    async fn restore(&self, handle: &Handle) -> Result<(), InterfaceError> {
+        self.migrate_addresses_from_interface(handle, self).await?;
+        self.migrate_routes_from_interface(handle, self).await?;
+
+        Ok(())
+    }
+
+    async fn flush(&self, handle: &Handle) -> Result<(), InterfaceError> {
+        for msg in self.address_messages.iter() {
+            handle.address().del(msg.clone()).execute().await?;
+        }
+
+        // NOTE(mnaser): Once the interface has no more addresses, it will
+        //               automatically lose all of it's routes.
+
+        Ok(())
+    }
+
+    async fn migrate_addresses_from_interface(
+        &self,
+        handle: &Handle,
+        src_interface: &Interface,
+    ) -> Result<(), InterfaceError> {
+        for msg in src_interface.address_messages.iter() {
+            let ip = msg.attributes.iter().find_map(|nla| match nla {
+                AddressAttribute::Address(ip) => Some(ip),
+                _ => None,
+            });
+
+            if let Some(ip) = ip {
+                handle
+                    .address()
+                    .add(self.index, *ip, msg.header.prefix_len)
+                    .replace()
+                    .execute()
+                    .await?;
+            }
+        }
+
+        Ok(())
+    }
+
+    async fn migrate_routes_from_interface(
+        &self,
+        handle: &Handle,
+        src_interface: &Interface,
+    ) -> Result<(), InterfaceError> {
+        for route in src_interface.routes()?.iter() {
+            let mut request = handle.route().add();
+            request = request.protocol(route.protocol);
+
+            match route.destination.addr() {
+                IpAddr::V4(ipv4) => {
+                    let mut request = request
+                        .v4()
+                        .replace()
+                        .destination_prefix(ipv4, route.destination.prefix_len());
+
+                    if let IpAddr::V4(gateway) = route.gateway {
+                        request = request.gateway(gateway);
+                    }
+
+                    request.execute().await?;
+                }
+                IpAddr::V6(ipv6) => {
+                    let mut request = request
+                        .v6()
+                        .replace()
+                        .destination_prefix(ipv6, route.destination.prefix_len());
+
+                    if let IpAddr::V6(gateway) = route.gateway {
+                        request = request.gateway(gateway);
+                    }
+
+                    request.execute().await?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub async fn migrate_from_interface(
+        &self,
+        handle: &Handle,
+        src_interface: &Interface,
+    ) -> Result<(), InterfaceMigrationError> {
+        self.up(handle).await?;
+
+        match (
+            src_interface.address_messages.is_empty(),
+            self.address_messages.is_empty(),
+        ) {
+            (false, false) => {
+                // Both source and destination interfaces have IPs assigned
+                error!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str(),
+                    src_ip_addresses = format!("{:?}", src_interface.addresses()).as_str(),
+                    dst_ip_addresses = format!("{:?}", self.addresses()).as_str();
+                    "Both source and destination interfaces have IPs assigned. This is not safe in production, please fix manually."
+                );
+
+                Err(InterfaceMigrationError::IpConflict)
+            }
+            (false, true) => {
+                // Source interface has IPs, destination interface has no IPs
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str(),
+                    ip_addresses = format!("{:?}", src_interface.addresses()).as_str(),
+                    routes = format!("{:?}", src_interface.routes()).as_str();
+                    "Migrating IP addresses from interface to bridge."
+                );
+
+                if let Err(e) = src_interface.flush(handle).await {
+                    error!(
+                        src_interface = src_interface.name.as_str(),
+                        error = e.to_string().as_str();
+                        "Error while flushing IPs from source interface."
+                    );
+
+                    if let Err(restore_err) = src_interface.restore(handle).await {
+                        error!(
+                            src_interface = src_interface.name.as_str(),
+                            error = restore_err.to_string().as_str();
+                            "Error while restoring IPs to source interface."
+                        );
+                    }
+
+                    return Err(InterfaceMigrationError::InterfaceError(e));
+                }
+
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str();
+                    "Successfully flushed IP addresses from source interface."
+                );
+
+                if let Err(e) = self
+                    .migrate_addresses_from_interface(handle, src_interface)
+                    .await
+                {
+                    error!(
+                        dst_interface = self.name.as_str(),
+                        error = e.to_string().as_str();
+                        "Error while migrating IP addresses to destination interface."
+                    );
+
+                    if let Err(restore_err) = src_interface.restore(handle).await {
+                        error!(
+                            src_interface = src_interface.name.as_str(),
+                            error = restore_err.to_string().as_str();
+                            "Error while restoring IPs to source interface."
+                        );
+                    }
+
+                    return Err(InterfaceMigrationError::InterfaceError(e));
+                }
+
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str();
+                    "Successfully migrated IP addresseses to new interface."
+                );
+
+                if let Err(e) = self
+                    .migrate_routes_from_interface(handle, src_interface)
+                    .await
+                {
+                    error!(
+                        dst_interface = self.name.as_str(),
+                        routes = format!("{:?}", src_interface.routes()).as_str(),
+                        error = e.to_string().as_str();
+                        "Error while migrating routes to destination interface."
+                    );
+
+                    if let Err(restore_err) = src_interface.restore(handle).await {
+                        error!(
+                            src_interface = src_interface.name.as_str(),
+                            routes = format!("{:?}", src_interface.routes()).as_str(),
+                            error = restore_err.to_string().as_str();
+                            "Error while restoring source interface."
+                        );
+                    }
+
+                    return Err(InterfaceMigrationError::InterfaceError(e));
+                }
+
+                Ok(())
+            }
+            (true, false) => {
+                // Destination interface has IPs, source interface has no IPs
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str(),
+                    ip_addresses = format!("{:?}", self.addresses()).as_str();
+                    "Bridge already has IPs assigned. Skipping migration."
+                );
+
+                Ok(())
+            }
+            (true, true) => {
+                // Neither interface has IPs
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str();
+                    "Neither interface nor bridge have IPs assigned. Skipping migration."
+                );
+
+                Ok(())
+            }
+        }
+    }
+}
diff --git a/crates/ovsinit/src/main.rs b/crates/ovsinit/src/main.rs
new file mode 100644
index 0000000..fb77530
--- /dev/null
+++ b/crates/ovsinit/src/main.rs
@@ -0,0 +1,63 @@
+mod config;
+
+use clap::Parser;
+use env_logger::Env;
+use log::error;
+use rtnetlink::Handle;
+use std::{path::PathBuf, process};
+
+#[derive(Parser, Debug)]
+#[command(version, about, long_about = None)]
+struct Cli {
+    #[arg(default_value = "/tmp/auto_bridge_add", help = "Path to the JSON file")]
+    config: PathBuf,
+}
+
+#[tokio::main]
+async fn main() {
+    let cli = Cli::parse();
+
+    let env = Env::default()
+        .filter_or("MY_LOG_LEVEL", "info")
+        .write_style_or("MY_LOG_STYLE", "always");
+    env_logger::init_from_env(env);
+
+    let network_config = match config::NetworkConfig::from_path(&cli.config) {
+        Ok(network_config) => network_config,
+        Err(e) => {
+            error!("Failed to load network config: {}", e);
+
+            process::exit(1);
+        }
+    };
+
+    let (connection, handle, _) = rtnetlink::new_connection().expect("Failed to create connection");
+    tokio::spawn(connection);
+
+    for (bridge_name, interface_name) in network_config.bridges_with_interfaces_iter() {
+        let interface = get_interface(&handle, interface_name).await;
+        let bridge = get_interface(&handle, bridge_name).await;
+
+        if let Err(e) = bridge.migrate_from_interface(&handle, &interface).await {
+            error!(
+                "Failed to migrate from {} to {}: {}",
+                interface_name, bridge_name, e
+            );
+            process::exit(1);
+        }
+    }
+}
+
+async fn get_interface(handle: &Handle, name: &str) -> ovsinit::Interface {
+    match ovsinit::Interface::new(handle, name.to_string()).await {
+        Ok(interface) => interface,
+        Err(ovsinit::InterfaceError::NotFound(name)) => {
+            error!(interface = name.as_str(); "Interface not found.");
+            process::exit(1);
+        }
+        Err(e) => {
+            error!(error = e.to_string().as_str(); "Failed to lookup interface.");
+            process::exit(1);
+        }
+    }
+}
diff --git a/crates/ovsinit/src/routes.rs b/crates/ovsinit/src/routes.rs
new file mode 100644
index 0000000..a4e0130
--- /dev/null
+++ b/crates/ovsinit/src/routes.rs
@@ -0,0 +1,150 @@
+use ipnet::IpNet;
+use log::error;
+use netlink_packet_route::{
+    route::{RouteAddress, RouteAttribute, RouteMessage, RouteProtocol},
+    AddressFamily,
+};
+use std::{
+    fmt,
+    net::{IpAddr, Ipv4Addr, Ipv6Addr},
+};
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum RouteError {
+    #[error("Invalid gateway")]
+    InvalidGateway,
+
+    #[error("Invalid destination")]
+    InvalidDestination,
+
+    #[error("Invalid prefix length")]
+    InvalidPrefixLength,
+
+    #[error("Missing gateway")]
+    MissingGateway,
+
+    #[error("Missing destination")]
+    MissingDestination,
+}
+
+pub struct Route {
+    pub protocol: RouteProtocol,
+    pub destination: IpNet,
+    pub gateway: IpAddr,
+}
+
+impl fmt::Debug for Route {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{} via {}", self.destination, self.gateway)
+    }
+}
+
+impl Route {
+    pub fn from_message(message: RouteMessage) -> Result<Self, RouteError> {
+        let mut gateway = None;
+        let mut destination = None;
+
+        for nla in message.attributes.iter() {
+            if let RouteAttribute::Gateway(ip) = nla {
+                gateway = match ip {
+                    RouteAddress::Inet(ip) => Some(IpAddr::V4(*ip)),
+                    RouteAddress::Inet6(ip) => Some(IpAddr::V6(*ip)),
+                    _ => return Err(RouteError::InvalidGateway),
+                };
+            }
+
+            if let RouteAttribute::Destination(ref ip) = nla {
+                destination = match ip {
+                    RouteAddress::Inet(ip) => Some(
+                        IpNet::new(IpAddr::V4(*ip), message.header.destination_prefix_length)
+                            .map_err(|_| RouteError::InvalidPrefixLength)?,
+                    ),
+                    RouteAddress::Inet6(ip) => Some(
+                        IpNet::new(IpAddr::V6(*ip), message.header.destination_prefix_length)
+                            .map_err(|_| RouteError::InvalidPrefixLength)?,
+                    ),
+                    _ => return Err(RouteError::InvalidDestination),
+                };
+            }
+        }
+
+        let gateway = match gateway {
+            Some(gateway) => gateway,
+            None => return Err(RouteError::MissingGateway),
+        };
+
+        let destination = match destination {
+            Some(destination) => destination,
+            None => match message.header.address_family {
+                AddressFamily::Inet => IpNet::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)
+                    .map_err(|_| RouteError::InvalidPrefixLength)?,
+                AddressFamily::Inet6 => IpNet::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0)
+                    .map_err(|_| RouteError::InvalidPrefixLength)?,
+                _ => return Err(RouteError::InvalidDestination),
+            },
+        };
+
+        Ok(Route {
+            protocol: message.header.protocol,
+            destination,
+            gateway,
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use netlink_packet_route::AddressFamily;
+    use std::net::Ipv4Addr;
+
+    #[tokio::test]
+    async fn test_default_ipv4_route() {
+        let mut message = RouteMessage::default();
+
+        message.header.address_family = AddressFamily::Inet;
+        message.header.destination_prefix_length = 0;
+        message.header.protocol = RouteProtocol::Static;
+        message
+            .attributes
+            .push(RouteAttribute::Gateway(RouteAddress::Inet(Ipv4Addr::new(
+                192, 168, 1, 1,
+            ))));
+
+        let route = Route::from_message(message).unwrap();
+
+        assert_eq!(route.protocol, RouteProtocol::Static);
+        assert_eq!(
+            route.destination,
+            IpNet::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0).unwrap()
+        );
+        assert_eq!(route.gateway, IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)));
+    }
+
+    #[tokio::test]
+    async fn test_default_ipv6_route() {
+        let mut message = RouteMessage::default();
+
+        message.header.address_family = AddressFamily::Inet6;
+        message.header.destination_prefix_length = 0;
+        message.header.protocol = RouteProtocol::Static;
+        message
+            .attributes
+            .push(RouteAttribute::Gateway(RouteAddress::Inet6(Ipv6Addr::new(
+                0, 0, 0, 0, 0, 0, 0, 1,
+            ))));
+
+        let route = Route::from_message(message).unwrap();
+
+        assert_eq!(route.protocol, RouteProtocol::Static);
+        assert_eq!(
+            route.destination,
+            IpNet::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0).unwrap()
+        );
+        assert_eq!(
+            route.gateway,
+            IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))
+        );
+    }
+}
diff --git a/crates/passwd/Cargo.toml b/crates/passwd/Cargo.toml
new file mode 100644
index 0000000..25b3dc1
--- /dev/null
+++ b/crates/passwd/Cargo.toml
@@ -0,0 +1,7 @@
+[package]
+name = "passwd"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+thiserror = "2.0.11"
diff --git a/crates/passwd/src/lib.rs b/crates/passwd/src/lib.rs
new file mode 100644
index 0000000..395a441
--- /dev/null
+++ b/crates/passwd/src/lib.rs
@@ -0,0 +1,81 @@
+use std::num::ParseIntError;
+use thiserror::Error;
+
+#[derive(Debug, Error)]
+pub enum PasswdEntryError {
+    #[error("Failed to parse ID: {0}")]
+    FailedToParseID(#[from] ParseIntError),
+
+    #[error("Invalid passwd entry: {0}")]
+    InvalidPasswdEntry(String),
+}
+
+#[derive(Debug, Clone)]
+pub struct PasswdEntry {
+    pub name: String,
+    pub passwd: String,
+    pub uid: u32,
+    pub gid: u32,
+    pub gecos: String,
+    pub dir: String,
+    pub shell: String,
+}
+
+impl PasswdEntry {
+    pub fn from_line(line: &str) -> Result<PasswdEntry, PasswdEntryError> {
+        let parts: Vec<&str> = line.split(":").map(|part| part.trim()).collect();
+
+        if parts.len() != 7 {
+            return Err(PasswdEntryError::InvalidPasswdEntry(line.to_string()));
+        }
+
+        Ok(PasswdEntry {
+            name: parts[0].to_string(),
+            passwd: parts[1].to_string(),
+            uid: parts[2].parse()?,
+            gid: parts[3].parse()?,
+            gecos: parts[4].to_string(),
+            dir: parts[5].to_string(),
+            shell: parts[6].to_string(),
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_from_line_valid() {
+        let line = "username:x:1000:1000:User Name:/home/username:/bin/bash";
+        let entry = PasswdEntry::from_line(line).unwrap();
+        assert_eq!(entry.name, "username");
+        assert_eq!(entry.passwd, "x");
+        assert_eq!(entry.uid, 1000);
+        assert_eq!(entry.gid, 1000);
+        assert_eq!(entry.gecos, "User Name");
+        assert_eq!(entry.dir, "/home/username");
+        assert_eq!(entry.shell, "/bin/bash");
+    }
+
+    #[test]
+    fn test_from_line_invalid_uid() {
+        let line = "username:x:invalid:1000:User Name:/home/username:/bin/bash";
+        let result = PasswdEntry::from_line(line);
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn test_from_line_invalid_gid() {
+        let line = "username:x:1000:invalid:User Name:/home/username:/bin/bash";
+        let result = PasswdEntry::from_line(line);
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn test_from_line_missing_fields() {
+        let line = "username:x:1000:1000:User Name:/home/username";
+        let result = PasswdEntry::from_line(line);
+        assert!(result.is_err());
+    }
+}
diff --git a/crates/rustainers/Cargo.toml b/crates/rustainers/Cargo.toml
new file mode 100644
index 0000000..3c5adb5
--- /dev/null
+++ b/crates/rustainers/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "rustainers"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+bollard = "0.18.1"
+bytes = "1.10.0"
+futures-util = "0.3.31"
+rand = "0.8"
+tar = "0.4.43"
+thiserror = "2.0.11"
+tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
+passwd = { path = "../passwd" }
diff --git a/crates/rustainers/src/lib.rs b/crates/rustainers/src/lib.rs
new file mode 100644
index 0000000..65f1505
--- /dev/null
+++ b/crates/rustainers/src/lib.rs
@@ -0,0 +1,244 @@
+extern crate tar;
+
+use bollard::container::{Config, CreateContainerOptions, StartContainerOptions};
+use bollard::exec::{CreateExecOptions, StartExecResults};
+use bollard::Docker;
+use bytes::{BufMut, BytesMut};
+use futures_util::stream::StreamExt;
+use futures_util::TryStreamExt;
+use passwd::PasswdEntry;
+use rand::Rng;
+use std::collections::HashMap;
+use std::io::Read;
+use thiserror::Error;
+
+#[derive(Debug, Error)]
+pub enum DockerContainerGuardError {
+    #[error("Docker API error: {0}")]
+    DockerError(#[from] bollard::errors::Error),
+
+    #[error("File not found: {0}")]
+    FileNotFound(String),
+
+    #[error("Failed to extract file: {0}")]
+    FileExtractionFailed(#[from] std::io::Error),
+
+    #[error("Too many files in tarball")]
+    TooManyFilesInTarball,
+
+    #[error("Failed to parse password entry: {0}")]
+    FailedToParsePasswdEntry(#[from] passwd::PasswdEntryError),
+
+    #[error("User not found: {0}")]
+    UserNotFound(String),
+}
+
+#[derive(Debug)]
+pub struct DockerContainerGuard {
+    pub id: String,
+    pub image: String,
+    docker: Docker,
+}
+
+impl DockerContainerGuard {
+    // Spawns a new container using Bollard.
+    //
+    // The container is automatically cleaned up when the guard goes out of scope.
+    pub async fn spawn(image_name: &str) -> Result<Self, DockerContainerGuardError> {
+        let docker = Docker::connect_with_local_defaults()?;
+
+        let container_name: String = rand::thread_rng()
+            .sample_iter(&rand::distributions::Alphanumeric)
+            .take(10)
+            .map(char::from)
+            .collect();
+
+        docker
+            .create_image(
+                Some(bollard::image::CreateImageOptions {
+                    from_image: image_name,
+                    ..Default::default()
+                }),
+                None,
+                None,
+            )
+            .try_collect::<Vec<_>>()
+            .await?;
+
+        let container = docker
+            .create_container(
+                Some(CreateContainerOptions {
+                    name: container_name,
+                    ..Default::default()
+                }),
+                Config {
+                    image: Some(image_name),
+                    cmd: Some(vec!["sh"]),
+                    tty: Some(true),
+                    ..Default::default()
+                },
+            )
+            .await?;
+
+        docker
+            .start_container(&container.id, None::<StartContainerOptions<String>>)
+            .await?;
+
+        Ok(Self {
+            id: container.id,
+            image: image_name.to_string(),
+            docker,
+        })
+    }
+
+    /// Executes a command inside the container using Bollard.
+    ///
+    /// Returns the output as a String.
+    pub async fn exec(&self, cmd: Vec<&str>) -> Result<String, bollard::errors::Error> {
+        let exec_instance = self
+            .docker
+            .create_exec(
+                &self.id,
+                CreateExecOptions {
+                    attach_stdout: Some(true),
+                    attach_stderr: Some(true),
+                    cmd: Some(cmd.iter().map(|s| s.to_string()).collect()),
+                    ..Default::default()
+                },
+            )
+            .await?;
+        let start_exec_result = self.docker.start_exec(&exec_instance.id, None).await?;
+
+        if let StartExecResults::Attached {
+            output: out_stream, ..
+        } = start_exec_result
+        {
+            let output = out_stream
+                .filter_map(|chunk| async {
+                    match chunk {
+                        Ok(bollard::container::LogOutput::StdOut { message })
+                        | Ok(bollard::container::LogOutput::StdErr { message }) => {
+                            Some(String::from_utf8_lossy(&message).to_string())
+                        }
+                        _ => None,
+                    }
+                })
+                .fold(String::new(), |mut acc, item| async move {
+                    acc.push_str(&item);
+                    acc
+                })
+                .await;
+
+            return Ok(output);
+        }
+
+        Ok(String::new())
+    }
+
+    // Read a file from the container.
+    pub async fn read_file(&self, path: &str) -> Result<String, DockerContainerGuardError> {
+        let bytes = self
+            .docker
+            .download_from_container::<String>(
+                &self.id,
+                Some(bollard::container::DownloadFromContainerOptions { path: path.into() }),
+            )
+            .try_fold(BytesMut::new(), |mut bytes, b| async move {
+                bytes.put(b);
+                Ok(bytes)
+            })
+            .await?;
+
+        if bytes.len() == 0 {
+            return Err(DockerContainerGuardError::FileNotFound(path.into()));
+        }
+
+        for file in tar::Archive::new(&bytes[..]).entries()? {
+            let mut s = String::new();
+            file?.read_to_string(&mut s).unwrap();
+            return Ok(s);
+        }
+
+        Err(DockerContainerGuardError::FileNotFound(path.into()))
+    }
+
+    // Get a HashMap of all users in the container.
+    pub async fn get_users(
+        &self,
+    ) -> Result<HashMap<String, PasswdEntry>, DockerContainerGuardError> {
+        let output = self.read_file("/etc/passwd").await?;
+
+        return output
+            .lines()
+            .map(|line| {
+                PasswdEntry::from_line(line)
+                    .map(|entry| (entry.name.clone(), entry))
+                    .map_err(DockerContainerGuardError::from)
+            })
+            .collect();
+    }
+
+    // Get a specific user from the container.
+    pub async fn get_user(&self, name: &str) -> Result<PasswdEntry, DockerContainerGuardError> {
+        let users = self.get_users().await?;
+        let user = users
+            .get(name)
+            .ok_or_else(|| DockerContainerGuardError::UserNotFound(name.into()))?;
+
+        Ok(user.clone())
+    }
+}
+
+impl Drop for DockerContainerGuard {
+    fn drop(&mut self) {
+        let docker = self.docker.clone();
+        let container_id = self.id.clone();
+
+        tokio::spawn(async move {
+            docker
+                .remove_container(
+                    &container_id,
+                    Some(bollard::container::RemoveContainerOptions {
+                        force: true,
+                        ..Default::default()
+                    }),
+                )
+                .await
+        });
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[tokio::test]
+    async fn test_container_exec() -> Result<(), DockerContainerGuardError> {
+        let guard = DockerContainerGuard::spawn("registry.atmosphere.dev/docker.io/library/alpine:latest").await?;
+
+        let output = guard.exec(vec!["echo", "hello from container"]).await?;
+        assert!(output.contains("hello from container"));
+
+        Ok(())
+    }
+
+    #[tokio::test]
+    async fn test_container_read_file() -> Result<(), DockerContainerGuardError> {
+        let guard = DockerContainerGuard::spawn("registry.atmosphere.dev/docker.io/library/alpine:latest").await?;
+
+        let file = guard.read_file("/usr/lib/os-release").await?;
+        assert!(file.len() > 0);
+
+        Ok(())
+    }
+
+    #[tokio::test]
+    async fn test_container_get_user() -> Result<(), DockerContainerGuardError> {
+        let guard = DockerContainerGuard::spawn("registry.atmosphere.dev/docker.io/library/alpine:latest").await?;
+
+        let user = guard.get_user("root").await?;
+        assert_eq!(user.name, "root");
+
+        Ok(())
+    }
+}
diff --git a/doc/source/release-notes.rst b/doc/source/release-notes.rst
index c8fc6d0..6cadd61 100644
--- a/doc/source/release-notes.rst
+++ b/doc/source/release-notes.rst
@@ -5,6 +5,9 @@
 .. release-notes:: Development Release
    :branch: main
 
+.. release-notes:: OpenStack Dalmatian (2024.2)
+   :branch: stable/2024.2
+
 .. release-notes:: OpenStack Caracal (2024.1)
    :branch: stable/2024.1
 
diff --git a/docker-bake.hcl b/docker-bake.hcl
index fb1b0fe..cb068e3 100644
--- a/docker-bake.hcl
+++ b/docker-bake.hcl
@@ -15,6 +15,17 @@
     }
 }
 
+target "ovsinit" {
+    context = "images/ovsinit"
+    platforms = ["linux/amd64", "linux/arm64"]
+
+    contexts = {
+        "runtime" = "docker-image://docker.io/library/debian:bullseye-slim"
+        "rust" = "docker-image://docker.io/library/rust:1.84-bullseye"
+        "src" = "./crates/ovsinit"
+    }
+}
+
 target "ubuntu-cloud-archive" {
     context = "images/ubuntu-cloud-archive"
     platforms = ["linux/amd64", "linux/arm64"]
@@ -79,7 +90,7 @@
     platforms = ["linux/amd64", "linux/arm64"]
 
     contexts = {
-        "golang" = "docker-image://docker.io/library/golang:1.21"
+        "golang" = "docker-image://docker.io/library/golang:1.23"
     }
 
     tags = [
@@ -128,6 +139,7 @@
 
     args = {
         PROJECT = "nova"
+        SHELL = "/bin/bash"
     }
 
     tags = [
@@ -160,6 +172,7 @@
     contexts = {
         "golang" = "docker-image://docker.io/library/golang:1.20"
         "openvswitch" = "target:openvswitch"
+        "ovsinit" = "target:ovsinit"
     }
 
     args = {
@@ -216,8 +229,9 @@
     }
 
     contexts = {
-        "openstack-venv-builder" = "target:openstack-venv-builder"
         "openstack-python-runtime" = "target:openstack-python-runtime"
+        "openstack-venv-builder" = "target:openstack-venv-builder"
+        "ovsinit" = "target:ovsinit"
     }
 
     tags = [
diff --git a/galaxy.yml b/galaxy.yml
index ea31365..d16583d 100644
--- a/galaxy.yml
+++ b/galaxy.yml
@@ -15,7 +15,7 @@
   community.general: 7.3.0
   community.mysql: 3.6.0
   kubernetes.core: 2.4.0
-  openstack.cloud: 1.7.0
+  openstack.cloud: ">=2.0.0"
   vexxhost.ceph: 3.0.1
   vexxhost.kubernetes: ">=2.0.1"
 tags:
diff --git a/images/barbican/Dockerfile b/images/barbican/Dockerfile
index 500cc4d..8f0dfcb 100644
--- a/images/barbican/Dockerfile
+++ b/images/barbican/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG BARBICAN_GIT_REF=ca57ef5436e20e90cf6cd6853efe3c89a9afd986
+ARG BARBICAN_GIT_REF=b5841df387e5ab38caf173950a1d98ab37a51453
 ADD --keep-git-dir=true https://opendev.org/openstack/barbican.git#${BARBICAN_GIT_REF} /src/barbican
 RUN git -C /src/barbican fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
diff --git a/images/cinder/Dockerfile b/images/cinder/Dockerfile
index 24db11a..6c384ff 100644
--- a/images/cinder/Dockerfile
+++ b/images/cinder/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG CINDER_GIT_REF=b0f0b9015b9dfa228dff98eeee5116d8eca1c3cc
+ARG CINDER_GIT_REF=9d1a7de850ad06e9f8e242ecfeb070da22c688c4
 ADD --keep-git-dir=true https://opendev.org/openstack/cinder.git#${CINDER_GIT_REF} /src/cinder
 RUN git -C /src/cinder fetch --unshallow
 COPY patches/cinder /patches/cinder
@@ -18,17 +18,17 @@
         storpool.spopenstack
 EOF
 ADD --chmod=644 \
-    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/cinder/openstack/bobcat/storpool.py \
+    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/cinder/openstack/caracal/storpool.py \
     /var/lib/openstack/lib/python3.10/site-packages/cinder/volume/drivers/storpool.py
 ADD --chmod=644 \
-    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/os_brick/openstack/bobcat/storpool.py \
+    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/os_brick/openstack/caracal/storpool.py \
     /var/lib/openstack/lib/python3.10/site-packages/os_brick/initiator/connectors/storpool.py
 
 FROM openstack-python-runtime
 RUN <<EOF bash -xe
 apt-get update -qq
 apt-get install -qq -y --no-install-recommends \
-    ceph-common lsscsi nfs-common nvme-cli python3-rados python3-rbd qemu-utils qemu-block-extra sysfsutils udev util-linux
+    ceph-common dmidecode lsscsi nfs-common nvme-cli python3-rados python3-rbd qemu-utils qemu-block-extra sysfsutils udev util-linux
 apt-get clean
 rm -rf /var/lib/apt/lists/*
 EOF
diff --git a/images/designate/Dockerfile b/images/designate/Dockerfile
index 83e737d..72bf3d2 100644
--- a/images/designate/Dockerfile
+++ b/images/designate/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG DESIGNATE_GIT_REF=097ffc6df181290eba1bcd7c492b1b505bc15434
+ARG DESIGNATE_GIT_REF=505ea9b1245e07b28e12c1be3ca5d5e86d77efaf
 ADD --keep-git-dir=true https://opendev.org/openstack/designate.git#${DESIGNATE_GIT_REF} /src/designate
 RUN git -C /src/designate fetch --unshallow
 COPY patches/designate /patches/designate
diff --git a/images/glance/Dockerfile b/images/glance/Dockerfile
index 62509ec..1d2ad18 100644
--- a/images/glance/Dockerfile
+++ b/images/glance/Dockerfile
@@ -3,10 +3,10 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG GLANCE_GIT_REF=0bcd6cd71c09917c6734421374fd598d73e8d0cc
+ARG GLANCE_GIT_REF=d1cc917a29c9d2e87b1bad51a33a8a2500eb69c6
 ADD --keep-git-dir=true https://opendev.org/openstack/glance.git#${GLANCE_GIT_REF} /src/glance
 RUN git -C /src/glance fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/glance_store.git#master /src/glance_store
+ADD --keep-git-dir=true https://opendev.org/openstack/glance_store.git#stable/2024.2 /src/glance_store
 RUN git -C /src/glance_store fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
 pip3 install \
@@ -17,14 +17,14 @@
         storpool.spopenstack
 EOF
 ADD --chmod=644 \
-    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/os_brick/openstack/bobcat/storpool.py \
+    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/os_brick/openstack/caracal/storpool.py \
     /var/lib/openstack/lib/python3.10/site-packages/os_brick/initiator/connectors/storpool.py
 
 FROM openstack-python-runtime
 RUN <<EOF bash -xe
 apt-get update -qq
 apt-get install -qq -y --no-install-recommends \
-    ceph-common lsscsi nvme-cli python3-rados python3-rbd qemu-block-extra qemu-utils sysfsutils udev util-linux
+    ceph-common dmidecode lsscsi nvme-cli python3-rados python3-rbd qemu-block-extra qemu-utils sysfsutils udev util-linux
 apt-get clean
 rm -rf /var/lib/apt/lists/*
 EOF
diff --git a/images/heat/Dockerfile b/images/heat/Dockerfile
index 318c06f..72493d7 100644
--- a/images/heat/Dockerfile
+++ b/images/heat/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG HEAT_GIT_REF=80eea85194825773d1b60ecc4386b2d5ba52a066
+ARG HEAT_GIT_REF=64bdbb9bc66c38760989dd7bb2574ccc14069872
 ADD --keep-git-dir=true https://opendev.org/openstack/heat.git#${HEAT_GIT_REF} /src/heat
 RUN git -C /src/heat fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
diff --git a/images/horizon/Dockerfile b/images/horizon/Dockerfile
index 2b7b0a0..62cb2b3 100644
--- a/images/horizon/Dockerfile
+++ b/images/horizon/Dockerfile
@@ -3,23 +3,23 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG HORIZON_GIT_REF=14212342cf8f7eb987e50de112958af31063e02e
+ARG HORIZON_GIT_REF=23d0b9525f7c11288d503123e29db0bd66f9ca88
 ADD --keep-git-dir=true https://opendev.org/openstack/horizon.git#${HORIZON_GIT_REF} /src/horizon
 RUN git -C /src/horizon fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/designate-dashboard.git#master /src/designate-dashboard
+ADD --keep-git-dir=true https://opendev.org/openstack/designate-dashboard.git#stable/2024.2 /src/designate-dashboard
 RUN git -C /src/designate-dashboard fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/heat-dashboard.git#3070b2c8d5cc6e070df01233ec50b32ea987d92d /src/heat-dashboard
+ADD --keep-git-dir=true https://opendev.org/openstack/heat-dashboard.git#stable/2024.2 /src/heat-dashboard
 RUN git -C /src/heat-dashboard fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/ironic-ui.git#master /src/ironic-ui
+ADD --keep-git-dir=true https://opendev.org/openstack/ironic-ui.git#stable/2024.2 /src/ironic-ui
 RUN git -C /src/ironic-ui fetch --unshallow
 ARG MAGNUM_UI_REF=c9fdb537eaded73e81ea296d893e45d753337dc7
 ADD --keep-git-dir=true https://opendev.org/openstack/magnum-ui.git#${MAGNUM_UI_REF} /src/magnum-ui
 RUN git -C /src/magnum-ui fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/manila-ui.git#master /src/manila-ui
+ADD --keep-git-dir=true https://opendev.org/openstack/manila-ui.git#stable/2024.2 /src/manila-ui
 RUN git -C /src/manila-ui fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/neutron-vpnaas-dashboard.git#master /src/neutron-vpnaas-dashboard
+ADD --keep-git-dir=true https://opendev.org/openstack/neutron-vpnaas-dashboard.git#stable/2024.2 /src/neutron-vpnaas-dashboard
 RUN git -C /src/neutron-vpnaas-dashboard fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/octavia-dashboard.git#master /src/octavia-dashboard
+ADD --keep-git-dir=true https://opendev.org/openstack/octavia-dashboard.git#stable/2024.2 /src/octavia-dashboard
 RUN git -C /src/octavia-dashboard fetch --unshallow
 COPY patches/horizon /patches/horizon
 RUN git -C /src/horizon apply --verbose /patches/horizon/*
diff --git a/images/ironic/Dockerfile b/images/ironic/Dockerfile
index 967437e..5f81ed0 100644
--- a/images/ironic/Dockerfile
+++ b/images/ironic/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG IRONIC_GIT_REF=22aa29b864eecd00bfb7c67cc2075030da1eb1d0
+ARG IRONIC_GIT_REF=5aa51d6985d25acd6abfb161c62c66facc20a6ca
 ADD --keep-git-dir=true https://opendev.org/openstack/ironic.git#${IRONIC_GIT_REF} /src/ironic
 RUN git -C /src/ironic fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
diff --git a/images/keystone/Dockerfile b/images/keystone/Dockerfile
index ba76495..084869f 100644
--- a/images/keystone/Dockerfile
+++ b/images/keystone/Dockerfile
@@ -3,14 +3,14 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG KEYSTONE_GIT_REF=8ca73f758bb613a57815fbe4ae78e3d2afa4af49
+ARG KEYSTONE_GIT_REF=47891f4ae8fd7876e5a7657f58c32c371feeddc3
 ADD --keep-git-dir=true https://opendev.org/openstack/keystone.git#${KEYSTONE_GIT_REF} /src/keystone
 RUN git -C /src/keystone fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
 pip3 install \
     --constraint /upper-constraints.txt \
         /src/keystone[ldap] \
-        keystone-keycloak-backend==0.1.8
+        keystone-keycloak-backend==0.2.0
 EOF
 
 FROM openstack-python-runtime
diff --git a/images/kubernetes-entrypoint/Dockerfile b/images/kubernetes-entrypoint/Dockerfile
index 67250af..98d162d 100644
--- a/images/kubernetes-entrypoint/Dockerfile
+++ b/images/kubernetes-entrypoint/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM golang AS build
-ARG KUBERNETES_ENTRYPOINT_GIT_REF=4fbcf7ce324dc66e78480f73035e31434cfea1e8
+ARG KUBERNETES_ENTRYPOINT_GIT_REF=df2f40f3dec3aca3e648f4f351ff5ccfbd659b59
 ADD https://opendev.org/airship/kubernetes-entrypoint.git#${KUBERNETES_ENTRYPOINT_GIT_REF} /src
 WORKDIR /src
 RUN CGO_ENABLED=0 GOOS=linux go build -o /main
diff --git a/images/magnum/Dockerfile b/images/magnum/Dockerfile
index a8a63c6..c2cb92c 100644
--- a/images/magnum/Dockerfile
+++ b/images/magnum/Dockerfile
@@ -11,13 +11,10 @@
 RUN mv /${TARGETOS}-${TARGETARCH}/helm /usr/bin/helm
 
 FROM openstack-venv-builder AS build
-ARG MAGNUM_GIT_REF=c613ea4e419edc0086116da07e93cf19206746e1
+ARG MAGNUM_GIT_REF=db197e08a09da93062fc4222180051dadfc0f0d8
 ADD --keep-git-dir=true https://opendev.org/openstack/magnum.git#${MAGNUM_GIT_REF} /src/magnum
 RUN git -C /src/magnum fetch --unshallow
-COPY patches/magnum /patches/magnum
-RUN git -C /src/magnum apply --verbose /patches/magnum/*
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
-sed -i s/^oslo.db===.*$/oslo.db==14.1.0/ /upper-constraints.txt
 pip3 install \
     --constraint /upper-constraints.txt \
         /src/magnum \
diff --git a/images/magnum/patches/magnum/0000-Revert-Remove-use-of-autocommit.patch b/images/magnum/patches/magnum/0000-Revert-Remove-use-of-autocommit.patch
deleted file mode 100644
index 67058bf..0000000
--- a/images/magnum/patches/magnum/0000-Revert-Remove-use-of-autocommit.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From 4f309a12433956d09af30eec5a80129dfd069e36 Mon Sep 17 00:00:00 2001
-From: Mohammed Naser <mnaser@vexxhost.com>
-Date: Tue, 24 Sep 2024 20:12:33 +0000
-Subject: [PATCH] Revert "Remove use of autocommit"
-
-This reverts commit d544698fae220549f68afa218dd252366fe90b27.
-
-Reason for revert: Broken networking.
-
-Change-Id: I002d4825308afb462e698ff69c69977f6da3d9a8
----
-
-diff --git a/magnum/db/sqlalchemy/api.py b/magnum/db/sqlalchemy/api.py
-index 0ec4380..f6f3087 100644
---- a/magnum/db/sqlalchemy/api.py
-+++ b/magnum/db/sqlalchemy/api.py
-@@ -47,7 +47,9 @@
- def _create_facade_lazily():
-     global _FACADE
-     if _FACADE is None:
--        _FACADE = db_session.EngineFacade.from_config(CONF)
-+        # FIXME(karolinku): autocommit=True it's not compatible with
-+        # SQLAlchemy 2.0, and will be removed in future
-+        _FACADE = db_session.EngineFacade.from_config(CONF, autocommit=True)
-         if profiler_sqlalchemy:
-             if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy:
-                 profiler_sqlalchemy.add_tracing(sa, _FACADE.get_engine(), "db")
-diff --git a/magnum/db/sqlalchemy/models.py b/magnum/db/sqlalchemy/models.py
-index 92b474d..0b7ae94 100644
---- a/magnum/db/sqlalchemy/models.py
-+++ b/magnum/db/sqlalchemy/models.py
-@@ -93,8 +93,7 @@
-         if session is None:
-             session = db_api.get_session()
- 
--        with session.begin():
--            super(MagnumBase, self).save(session)
-+        super(MagnumBase, self).save(session)
- 
- 
- Base = declarative_base(cls=MagnumBase)
diff --git a/images/manila/Dockerfile b/images/manila/Dockerfile
index f9caa61..52ac86e 100644
--- a/images/manila/Dockerfile
+++ b/images/manila/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG MANILA_GIT_REF=d8987589ae88ae9b2769fbe6f26d5b6994098038
+ARG MANILA_GIT_REF=09f3ab0a229362c00bb55f704cfeae43bccd3c8d
 ADD --keep-git-dir=true https://opendev.org/openstack/manila.git#${MANILA_GIT_REF} /src/manila
 RUN git -C /src/manila fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
diff --git a/images/neutron/Dockerfile b/images/neutron/Dockerfile
index db5b52a..854ad0a 100644
--- a/images/neutron/Dockerfile
+++ b/images/neutron/Dockerfile
@@ -3,13 +3,13 @@
 # Atmosphere-Rebuild-Time: 2025-01-24T11:51:19Z
 
 FROM openstack-venv-builder AS build
-ARG NEUTRON_GIT_REF=019294c71d94b788c14b23dc1da3c21f51bcdb0b
+ARG NEUTRON_GIT_REF=804d6006e3f09c214d6de8a3f23de70c44f1d51d
 ADD --keep-git-dir=true https://opendev.org/openstack/neutron.git#${NEUTRON_GIT_REF} /src/neutron
 RUN git -C /src/neutron fetch --unshallow
-ARG NEUTRON_VPNAAS_GIT_REF=7bc6d94305d34269d7522a9850c22aa42b50cdab
+ARG NEUTRON_VPNAAS_GIT_REF=990e478b1e6db459b6cb9aec53ce808e2957bb65
 ADD --keep-git-dir=true https://opendev.org/openstack/neutron-vpnaas.git#${NEUTRON_VPNAAS_GIT_REF} /src/neutron-vpnaas
 RUN git -C /src/neutron-vpnaas fetch --unshallow
-ARG NETWORKING_BAREMETAL_GIT_REF=8b92ad81c0bdbfde60a6f0c47ff0133c08bb617e
+ARG NETWORKING_BAREMETAL_GIT_REF=1fba63ce21619d3fe70117c6679e53629c612bc1
 ADD --keep-git-dir=true https://opendev.org/openstack/networking-baremetal.git#${NETWORKING_BAREMETAL_GIT_REF} /src/networking-baremetal
 RUN git -C /src/networking-baremetal fetch --unshallow
 ARG POLICY_SERVER_GIT_REF=d87012b56741cb2ad44fa4dec9c5f24001ad60fe
@@ -36,4 +36,5 @@
 apt-get clean
 rm -rf /var/lib/apt/lists/*
 EOF
+COPY --from=ovsinit /usr/local/bin/ovsinit /usr/local/bin/ovsinit
 COPY --from=build --link /var/lib/openstack /var/lib/openstack
diff --git a/images/nova/Dockerfile b/images/nova/Dockerfile
index 378b95a..497d9f2 100644
--- a/images/nova/Dockerfile
+++ b/images/nova/Dockerfile
@@ -3,11 +3,9 @@
 # Atmosphere-Rebuild-Time: 2024-12-17T01:27:44Z
 
 FROM openstack-venv-builder AS build
-ARG NOVA_GIT_REF=c199becf52267ba37c5191f6f82e29bb5232b607
+ARG NOVA_GIT_REF=1b28f649feaf2c9929f15214814f8af950e5c19c
 ADD --keep-git-dir=true https://opendev.org/openstack/nova.git#${NOVA_GIT_REF} /src/nova
 RUN git -C /src/nova fetch --unshallow
-COPY patches/nova /patches/nova
-RUN git -C /src/nova apply --verbose /patches/nova/*
 ARG SCHEDULER_FILTERS_GIT_REF=77ed1c2ca70f4166a6d0995c7d3d90822f0ca6c0
 ADD --keep-git-dir=true https://github.com/vexxhost/nova-scheduler-filters.git#${SCHEDULER_FILTERS_GIT_REF} /src/nova-scheduler-filters
 RUN git -C /src/nova-scheduler-filters fetch --unshallow
@@ -21,7 +19,7 @@
         storpool.spopenstack
 EOF
 ADD --chmod=644 \
-    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/os_brick/openstack/bobcat/storpool.py \
+    https://github.com/storpool/storpool-openstack-integration/raw/master/drivers/os_brick/openstack/caracal/storpool.py \
     /var/lib/openstack/lib/python3.10/site-packages/os_brick/initiator/connectors/storpool.py
 
 FROM openstack-python-runtime
@@ -29,7 +27,7 @@
 RUN <<EOF bash -xe
 apt-get update -qq
 apt-get install -qq -y --no-install-recommends \
-    ceph-common genisoimage iproute2 libosinfo-bin lsscsi ndctl nfs-common nvme-cli openssh-client ovmf python3-libvirt python3-rados python3-rbd qemu-efi-aarch64 qemu-block-extra qemu-utils sysfsutils udev util-linux swtpm swtpm-tools libtpms0
+    ceph-common dmidecode genisoimage iproute2 libosinfo-bin lsscsi ndctl nfs-common nvme-cli openssh-client ovmf python3-libvirt python3-rados python3-rbd qemu-efi-aarch64 qemu-block-extra qemu-utils sysfsutils udev util-linux swtpm swtpm-tools libtpms0
 apt-get clean
 rm -rf /var/lib/apt/lists/*
 EOF
diff --git a/images/nova/patches/nova/0000-Fix-port-group-network-metadata-generation.patch b/images/nova/patches/nova/0000-Fix-port-group-network-metadata-generation.patch
deleted file mode 100644
index 98be564..0000000
--- a/images/nova/patches/nova/0000-Fix-port-group-network-metadata-generation.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 8558f59630f81beba2789e6deef2cb5e6b367f20 Mon Sep 17 00:00:00 2001
-From: Mohammed Naser <mnaser@vexxhost.com>
-Date: Thu, 04 Jul 2024 14:09:23 -0400
-Subject: [PATCH] Fix port group network metadata generation
-
-When switching to using OpenStack SDK, there was a change missed
-that didn't account for the SDK returning generators instead of
-a list, so the loop on ports and port groups made it so that it
-started returning an empty list afterwards.
-
-Since there is no a masse of ports for a baremetal system usually,
-we take the generator into a list right away to prevent this.
-
-Closes-Bug: #2071972
-Change-Id: I90766f8c225d834bb2eec606754107ea6a212f6d
----
-
-diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
-index 736eac2..bf32918 100644
---- a/nova/tests/unit/virt/ironic/test_driver.py
-+++ b/nova/tests/unit/virt/ironic/test_driver.py
-@@ -2785,8 +2785,8 @@
-                                            node_id=self.node.id,
-                                            address='00:00:00:00:00:02',
-                                            port_group_id=portgroup.id)
--        self.mock_conn.ports.return_value = [port1, port2]
--        self.mock_conn.port_groups.return_value = [portgroup]
-+        self.mock_conn.ports.return_value = iter([port1, port2])
-+        self.mock_conn.port_groups.return_value = iter([portgroup])
- 
-         metadata = self.driver._get_network_metadata(self.node,
-                                                      self.network_info)
-diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
-index 3af85d3..c990218 100644
---- a/nova/virt/ironic/driver.py
-+++ b/nova/virt/ironic/driver.py
-@@ -1050,10 +1050,10 @@
-         :param network_info: Instance network information.
-         """
-         base_metadata = netutils.get_network_metadata(network_info)
--        ports = self.ironic_connection.ports(node=node.id, details=True)
--        port_groups = self.ironic_connection.port_groups(
-+        ports = list(self.ironic_connection.ports(node=node.id, details=True))
-+        port_groups = list(self.ironic_connection.port_groups(
-             node=node.id, details=True,
--        )
-+        ))
-         vif_id_to_objects = {'ports': {}, 'portgroups': {}}
-         for collection, name in ((ports, 'ports'),
-                                  (port_groups, 'portgroups')):
diff --git a/images/octavia/Dockerfile b/images/octavia/Dockerfile
index 14c7db2..ef9a147 100644
--- a/images/octavia/Dockerfile
+++ b/images/octavia/Dockerfile
@@ -3,10 +3,10 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG OCTAVIA_GIT_REF=824b51a1dad80292b7a8ad5d61bf3ce706b1fb29
+ARG OCTAVIA_GIT_REF=e15cb80d8f325e7474fb2175a1a8e9805a473295
 ADD --keep-git-dir=true https://opendev.org/openstack/octavia.git#${OCTAVIA_GIT_REF} /src/octavia
 RUN git -C /src/octavia fetch --unshallow
-ADD --keep-git-dir=true https://opendev.org/openstack/ovn-octavia-provider.git#master /src/ovn-octavia-provider
+ADD --keep-git-dir=true https://opendev.org/openstack/ovn-octavia-provider.git#stable/2024.2 /src/ovn-octavia-provider
 RUN git -C /src/ovn-octavia-provider fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
 pip3 install \
diff --git a/images/openstack-venv-builder/Dockerfile b/images/openstack-venv-builder/Dockerfile
index a1346a5..70bab69 100644
--- a/images/openstack-venv-builder/Dockerfile
+++ b/images/openstack-venv-builder/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM ubuntu-cloud-archive AS requirements
-ARG REQUIREMENTS_GIT_REF=18098b9abacbd8d7257bebc1b302294f634441ab
+ARG REQUIREMENTS_GIT_REF=d620ff557c22c4b8c1398b6dc84772c341fb4d5a
 ADD --keep-git-dir=true https://opendev.org/openstack/requirements.git#${REQUIREMENTS_GIT_REF} /src/requirements
 RUN cp /src/requirements/upper-constraints.txt /upper-constraints.txt
 RUN <<EOF sh -xe
diff --git a/images/openvswitch/Dockerfile b/images/openvswitch/Dockerfile
index 3ee23da..d107114 100644
--- a/images/openvswitch/Dockerfile
+++ b/images/openvswitch/Dockerfile
@@ -4,7 +4,7 @@
 
 FROM centos
 ADD --chmod=755 https://github.com/krallin/tini/releases/download/v0.19.0/tini /tini
-ARG OVS_SERIES=3.2
+ARG OVS_SERIES=3.3
 ARG OVS_VERSION=${OVS_SERIES}.0-80
 RUN <<EOF sh -xe
 dnf -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs install \
@@ -14,7 +14,8 @@
     openvswitch${OVS_SERIES}-${OVS_VERSION}.el9s \
     openvswitch${OVS_SERIES}-test-${OVS_VERSION}.el9s \
     python3-netifaces \
-    tcpdump
+    tcpdump \
+    jq
 dnf -y clean all
 rm -rf /var/cache/dnf
 usermod -u 42424 openvswitch
diff --git a/images/ovn/Dockerfile b/images/ovn/Dockerfile
index d628380..0509356 100644
--- a/images/ovn/Dockerfile
+++ b/images/ovn/Dockerfile
@@ -17,7 +17,7 @@
 ARG TARGETPLATFORM
 ADD --chmod=755 https://dl.k8s.io/release/v1.29.3/bin/${TARGETPLATFORM}/kubectl /usr/local/bin/kubectl
 ARG OVN_SERIES=24.03
-ARG OVN_VERSION=${OVN_SERIES}.1-44
+ARG OVN_VERSION=${OVN_SERIES}.2-34
 RUN <<EOF sh -xe
 dnf -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs install \
     firewalld-filesystem hostname ovn${OVN_SERIES}-${OVN_VERSION}.el9s procps-ng
@@ -34,7 +34,7 @@
 COPY --from=ovn-kubernetes --link /src/dist/images/ovndb-raft-functions.sh /root/ovndb-raft-functions.sh
 COPY --from=ovn-kubernetes --link /src/dist/images/ovnkube.sh /root/ovnkube.sh
 COPY --from=ovn-kubernetes --link /usr/bin/ovn-kube-util /usr/bin/ovn-kube-util
-
+COPY --from=ovsinit /usr/local/bin/ovsinit /usr/local/bin/ovsinit
 RUN <<EOF bash -xe
     usermod -u 42424 openvswitch
     mkdir -p  /var/log/ovn /var/lib/ovn /var/run/ovn
diff --git a/images/ovsinit/Dockerfile b/images/ovsinit/Dockerfile
new file mode 100644
index 0000000..edb2201
--- /dev/null
+++ b/images/ovsinit/Dockerfile
@@ -0,0 +1,11 @@
+# SPDX-FileCopyrightText: © 2025 VEXXHOST, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# Atmosphere-Rebuild-Time: 2025-02-16T12:56:04Z
+
+FROM rust AS builder
+WORKDIR /src
+COPY --from=src / /src
+RUN cargo install --path .
+
+FROM runtime
+COPY --from=builder /usr/local/cargo/bin/ovsinit /usr/local/bin/ovsinit
diff --git a/images/placement/Dockerfile b/images/placement/Dockerfile
index 958b35f..1ce55c8 100644
--- a/images/placement/Dockerfile
+++ b/images/placement/Dockerfile
@@ -3,7 +3,7 @@
 # Atmosphere-Rebuild-Time: 2024-06-25T22:49:25Z
 
 FROM openstack-venv-builder AS build
-ARG PLACEMENT_GIT_REF=96a9aeb3b4a6ffff5bbf247b213409395239fc7a
+ARG PLACEMENT_GIT_REF=828b2559a1b3c0b59c543e851c6ea3efb1baae20
 ADD --keep-git-dir=true https://opendev.org/openstack/placement.git#${PLACEMENT_GIT_REF} /src/placement
 RUN git -C /src/placement fetch --unshallow
 RUN --mount=type=cache,mode=0755,target=/root/.cache/pip,sharing=private <<EOF bash -xe
diff --git a/images/tempest/Dockerfile b/images/tempest/Dockerfile
index 5cabb86..4edec8e 100644
--- a/images/tempest/Dockerfile
+++ b/images/tempest/Dockerfile
@@ -10,7 +10,7 @@
     /src/octavia_tempest_plugin/contrib/test_server/test_server.go
 
 FROM openstack-venv-builder AS build
-ARG TEMPEST_GIT_REF=c0da6e843a74c2392c8e87e8ff36d2fea12949c4
+ARG TEMPEST_GIT_REF=338a3b7224a55e88fc46d7f80e8896a3231b910e
 ADD --keep-git-dir=true https://opendev.org/openstack/tempest.git#${TEMPEST_GIT_REF} /src/tempest
 RUN git -C /src/tempest fetch --unshallow
 ADD --keep-git-dir=true https://opendev.org/openstack/barbican-tempest-plugin.git#master /src/barbican-tempest-plugin
diff --git a/internal/openstack_helm/ironic.go b/internal/openstack_helm/ironic.go
new file mode 100644
index 0000000..ff19502
--- /dev/null
+++ b/internal/openstack_helm/ironic.go
@@ -0,0 +1,5 @@
+package openstack_helm
+
+type IronicConf struct {
+	Database *DatabaseConf `yaml:"database,omitempty"`
+}
diff --git a/internal/openstack_helm/memcached.go b/internal/openstack_helm/memcached.go
new file mode 100644
index 0000000..0051a08
--- /dev/null
+++ b/internal/openstack_helm/memcached.go
@@ -0,0 +1,5 @@
+package openstack_helm
+
+type MemcachedConf struct {
+	Database *DatabaseConf `yaml:"database,omitempty"`
+}
diff --git a/internal/openstack_helm/openstack_helm.go b/internal/openstack_helm/openstack_helm.go
index 5280e16..e7a509d 100644
--- a/internal/openstack_helm/openstack_helm.go
+++ b/internal/openstack_helm/openstack_helm.go
@@ -7,18 +7,35 @@
 )
 
 type HelmValues struct {
+	Pod  `yaml:"pod"`
 	Conf `yaml:"conf"`
 }
 
+type PodPriorityClassConfig map[string]string
+type PodRuntimeClassConfig map[string]string
+
+type PodMount map[string]interface{}
+
+type Pod struct {
+	PriorityClass PodPriorityClassConfig `yaml:"priorityClassName,omitempty"`
+	RuntimeClass  PodRuntimeClassConfig  `yaml:"runtimeClassName,omitempty"`
+	Mounts        map[string]PodMount    `yaml:"mounts,omitempty"`
+	AntiAffinityType PodAntiAffinityTypeConfig `yaml:"affinity.anti.type,omitempty"`
+}
+
+type PodAntiAffinityTypeConfig map[string]interface{}
+
 type Conf struct {
 	Barbican  *BarbicanConf  `yaml:"barbican,omitempty"`
 	Cinder    *CinderConf    `yaml:"cinder,omitempty"`
 	Designate *DesignateConf `yaml:"designate,omitempty"`
 	Glance    *GlanceConf    `yaml:"glance,omitempty"`
 	Heat      *HeatConf      `yaml:"heat,omitempty"`
+	Ironic    *IronicConf    `yaml:"ironic,omitempty"`
 	Keystone  *KeystoneConf  `yaml:"keystone,omitempty"`
 	Magnum    *MagnumConf    `yaml:"magnum,omitempty"`
 	Manila    *ManilaConf    `yaml:"manila,omitempty"`
+	Memcached *MemcachedConf `yaml:"memcached,omitempty"`
 	Neutron   *NeutronConf   `yaml:"neutron,omitempty"`
 	Nova      *NovaConf      `yaml:"nova,omitempty"`
 	Octavia   *OctaviaConf   `yaml:"octavia,omitempty"`
diff --git a/internal/testutils/oslo_db.go b/internal/testutils/oslo_db.go
index b7aabd1..3a9159d 100644
--- a/internal/testutils/oslo_db.go
+++ b/internal/testutils/oslo_db.go
@@ -1,9 +1,11 @@
 package testutils
 
 import (
+	"strings"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 
 	"github.com/vexxhost/atmosphere/internal/openstack_helm"
 )
@@ -13,3 +15,49 @@
 	assert.Equal(t, 5, config.MaxPoolSize)
 	assert.Equal(t, -1, config.MaxRetries)
 }
+
+func podNameForClass(pod string) string {
+	// There are a few pods which are built/created inside "helm-toolkit" so
+	// we cannot refer to them by their full name or the code will get real
+	// messy.
+	if strings.HasSuffix(pod, "db_init") {
+		return "db_init"
+	} else if strings.HasSuffix(pod, "db_sync") {
+		return "db_sync"
+	} else if strings.HasSuffix(pod, "_bootstrap") {
+		return "bootstrap"
+	}
+
+	return pod
+}
+
+func TestAllPodsHaveRuntimeClass(t *testing.T, vals *openstack_helm.HelmValues) {
+	for pod := range vals.Pod.Mounts {
+		podName := podNameForClass(pod)
+		assert.Contains(t, vals.Pod.RuntimeClass, podName)
+	}
+}
+
+func TestAllPodsHavePriorityClass(t *testing.T, vals *openstack_helm.HelmValues) {
+	for pod := range vals.Pod.Mounts {
+		podName := podNameForClass(pod)
+		assert.Contains(t, vals.Pod.PriorityClass, podName)
+	}
+}
+
+func TestAllPodsHaveAntiAffinityType(t *testing.T, vals *openstack_helm.HelmValues) {
+	for pod := range vals.Pod.AntiAffinityType {
+		podName := podNameForClass(pod)
+
+		expected := "requiredDuringSchedulingIgnoredDuringExecution"
+
+		defaultRaw, ok := vals.Pod.AntiAffinityType["default"]
+		require.True(t, ok, "default key not found in affinity.anti.type block")
+
+		actual, ok := defaultRaw.(string)
+		require.True(t, ok, "default anti affinity type is not a string")
+
+		assert.Equal(t, expected, actual, "anti affinity type does not match expected value")
+		assert.Contains(t, vals.Pod.AntiAffinityType, podName)
+	}
+}
diff --git a/molecule/default/requirements.txt b/molecule/default/requirements.txt
index 862d238..4544af7 100644
--- a/molecule/default/requirements.txt
+++ b/molecule/default/requirements.txt
@@ -1,3 +1,3 @@
 molecule==3.5.2 # https://github.com/ansible-community/molecule/issues/3435
-openstacksdk==0.61.0
+openstacksdk
 netaddr
diff --git a/molecule/keycloak/verify.yml b/molecule/keycloak/verify.yml
index e481319..7a4a179 100644
--- a/molecule/keycloak/verify.yml
+++ b/molecule/keycloak/verify.yml
@@ -48,15 +48,15 @@
       #              we try a few more times.
       retries: 30
       delay: 1
-      until: identity_user_info_result.openstack_users | length > 0
+      until: identity_user_info_result.users | length > 0
 
     - name: Assert that the user exists
       run_once: true
       ansible.builtin.assert:
         that:
-          - identity_user_info_result.openstack_users | length > 0
-          - identity_user_info_result.openstack_users[0].id == keycloak_user_info.id | regex_replace('-', '')
-          - identity_user_info_result.openstack_users[0].name == keycloak_user_info.username
+          - identity_user_info_result.users | length > 0
+          - identity_user_info_result.users[0].id == keycloak_user_info.id | regex_replace('-', '')
+          - identity_user_info_result.users[0].name == keycloak_user_info.username
 
     # TODO: Simulate Keystone authentication
     # TODO: Simulate Horizon login
diff --git a/plugins/modules/subnet.py b/plugins/modules/subnet.py
new file mode 100644
index 0000000..7c50aee
--- /dev/null
+++ b/plugins/modules/subnet.py
@@ -0,0 +1,486 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Benno Joy <benno@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = '''
+---
+module: subnet
+short_description: Add/Remove subnet to an OpenStack network
+author: OpenStack Ansible SIG
+description:
+   - Add or Remove a subnet to an OpenStack network
+options:
+    state:
+        description:
+            - Indicate desired state of the resource
+        choices: ['present', 'absent']
+        default: present
+        type: str
+    allocation_pool_start:
+        description:
+            - From the subnet pool the starting address from which the IP
+              should be allocated.
+        type: str
+    allocation_pool_end:
+        description:
+            - From the subnet pool the last IP that should be assigned to the
+              virtual machines.
+        type: str
+    allocation_pools:
+        description:
+            - List of allocation pools to assign to the subnet. Each element
+              consists of a 'start' and 'end' value.
+        type: list
+        elements: dict
+    cidr:
+        description:
+            - The CIDR representation of the subnet that should be assigned to
+              the subnet. Required when I(state) is 'present' and a subnetpool
+              is not specified.
+        type: str
+    description:
+        description:
+            - Description of the subnet
+        type: str
+    disable_gateway_ip:
+        description:
+            - The gateway IP would not be assigned for this subnet
+        type: bool
+        aliases: ['no_gateway_ip']
+        default: 'false'
+    dns_nameservers:
+        description:
+            - List of DNS nameservers for this subnet.
+        type: list
+        elements: str
+    extra_attrs:
+        description:
+            - Dictionary with extra key/value pairs passed to the API
+        required: false
+        aliases: ['extra_specs']
+        default: {}
+        type: dict
+    host_routes:
+        description:
+            - A list of host route dictionaries for the subnet.
+        type: list
+        elements: dict
+        suboptions:
+            destination:
+                description: The destination network (CIDR).
+                type: str
+                required: true
+            nexthop:
+                description: The next hop (aka gateway) for the I(destination).
+                type: str
+                required: true
+    gateway_ip:
+        description:
+            - The ip that would be assigned to the gateway for this subnet
+        type: str
+    ip_version:
+        description:
+            - The IP version of the subnet 4 or 6
+        default: 4
+        type: int
+        choices: [4, 6]
+    is_dhcp_enabled:
+        description:
+            - Whether DHCP should be enabled for this subnet.
+        type: bool
+        aliases: ['enable_dhcp']
+        default: 'true'
+    ipv6_ra_mode:
+        description:
+            - IPv6 router advertisement mode
+        choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
+        type: str
+    ipv6_address_mode:
+        description:
+            - IPv6 address mode
+        choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
+        type: str
+    name:
+        description:
+            - The name of the subnet that should be created. Although Neutron
+              allows for non-unique subnet names, this module enforces subnet
+              name uniqueness.
+        required: true
+        type: str
+    network:
+        description:
+            - Name or id of the network to which the subnet should be attached
+            - Required when I(state) is 'present'
+        aliases: ['network_name']
+        type: str
+    project:
+        description:
+            - Project name or ID containing the subnet (name admin-only)
+        type: str
+    prefix_length:
+        description:
+            - The prefix length to use for subnet allocation from a subnet pool
+        type: str
+    use_default_subnet_pool:
+        description:
+            - Use the default subnetpool for I(ip_version) to obtain a CIDR.
+        type: bool
+        aliases: ['use_default_subnetpool']
+    subnet_pool:
+        description:
+            - The subnet pool name or ID from which to obtain a CIDR
+        type: str
+        required: false
+extends_documentation_fragment:
+- openstack.cloud.openstack
+'''
+
+EXAMPLES = '''
+# Create a new (or update an existing) subnet on the specified network
+- openstack.cloud.subnet:
+    state: present
+    network_name: network1
+    name: net1subnet
+    cidr: 192.168.0.0/24
+    dns_nameservers:
+       - 8.8.8.7
+       - 8.8.8.8
+    host_routes:
+       - destination: 0.0.0.0/0
+         nexthop: 12.34.56.78
+       - destination: 192.168.0.0/24
+         nexthop: 192.168.0.1
+
+# Delete a subnet
+- openstack.cloud.subnet:
+    state: absent
+    name: net1subnet
+
+# Create an ipv6 stateless subnet
+- openstack.cloud.subnet:
+    state: present
+    name: intv6
+    network_name: internal
+    ip_version: 6
+    cidr: 2db8:1::/64
+    dns_nameservers:
+        - 2001:4860:4860::8888
+        - 2001:4860:4860::8844
+    ipv6_ra_mode: dhcpv6-stateless
+    ipv6_address_mode: dhcpv6-stateless
+'''
+
+RETURN = '''
+id:
+    description: Id of subnet
+    returned: On success when subnet exists.
+    type: str
+subnet:
+    description: Dictionary describing the subnet.
+    returned: On success when subnet exists.
+    type: dict
+    contains:
+        allocation_pools:
+            description: Allocation pools associated with this subnet.
+            returned: success
+            type: list
+            elements: dict
+        cidr:
+            description: Subnet's CIDR.
+            returned: success
+            type: str
+        created_at:
+            description: Created at timestamp
+            type: str
+        description:
+            description: Description
+            type: str
+        dns_nameservers:
+            description: DNS name servers for this subnet.
+            returned: success
+            type: list
+            elements: str
+        dns_publish_fixed_ip:
+            description: Whether to publish DNS records for fixed IPs.
+            returned: success
+            type: bool
+        gateway_ip:
+            description: Subnet's gateway ip.
+            returned: success
+            type: str
+        host_routes:
+            description: A list of host routes.
+            returned: success
+            type: str
+        id:
+            description: Unique UUID.
+            returned: success
+            type: str
+        ip_version:
+            description: IP version for this subnet.
+            returned: success
+            type: int
+        ipv6_address_mode:
+            description: |
+                The IPv6 address modes which are 'dhcpv6-stateful',
+                'dhcpv6-stateless' or 'slaac'.
+            returned: success
+            type: str
+        ipv6_ra_mode:
+            description: |
+                The IPv6 router advertisements modes which can be 'slaac',
+                'dhcpv6-stateful', 'dhcpv6-stateless'.
+            returned: success
+            type: str
+        is_dhcp_enabled:
+            description: DHCP enable flag for this subnet.
+            returned: success
+            type: bool
+        name:
+            description: Name given to the subnet.
+            returned: success
+            type: str
+        network_id:
+            description: Network ID this subnet belongs in.
+            returned: success
+            type: str
+        prefix_length:
+            description: |
+                The prefix length to use for subnet allocation from a subnet
+                pool.
+            returned: success
+            type: str
+        project_id:
+            description: Project id associated with this subnet.
+            returned: success
+            type: str
+        revision_number:
+            description: Revision number of the resource
+            returned: success
+            type: int
+        segment_id:
+            description: The ID of the segment this subnet is associated with.
+            returned: success
+            type: str
+        service_types:
+            description: Service types for this subnet
+            returned: success
+            type: list
+        subnet_pool_id:
+            description: The subnet pool ID from which to obtain a CIDR.
+            returned: success
+            type: str
+        tags:
+            description: Tags
+            type: str
+        updated_at:
+            description: Timestamp when the subnet was last updated.
+            returned: success
+            type: str
+        use_default_subnet_pool:
+            description: |
+                Whether to use the default subnet pool to obtain a CIDR.
+            returned: success
+            type: bool
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+
+
+class SubnetModule(OpenStackModule):
+    ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
+    argument_spec = dict(
+        name=dict(required=True),
+        network=dict(aliases=['network_name']),
+        cidr=dict(),
+        description=dict(),
+        ip_version=dict(type='int', default=4, choices=[4, 6]),
+        is_dhcp_enabled=dict(type='bool', default=True,
+                             aliases=['enable_dhcp']),
+        gateway_ip=dict(),
+        disable_gateway_ip=dict(
+            type='bool', default=False, aliases=['no_gateway_ip']),
+        dns_nameservers=dict(type='list', elements='str'),
+        allocation_pool_start=dict(),
+        allocation_pool_end=dict(),
+        allocation_pools=dict(type='list', elements='dict'),
+        host_routes=dict(type='list', elements='dict'),
+        ipv6_ra_mode=dict(choices=ipv6_mode_choices),
+        ipv6_address_mode=dict(choices=ipv6_mode_choices),
+        subnet_pool=dict(),
+        prefix_length=dict(),
+        use_default_subnet_pool=dict(
+            type='bool', aliases=['use_default_subnetpool']),
+        extra_attrs=dict(type='dict', default=dict(), aliases=['extra_specs']),
+        state=dict(default='present',
+                   choices=['absent', 'present']),
+        project=dict(),
+    )
+
+    module_kwargs = dict(
+        supports_check_mode=True,
+        required_together=[['allocation_pool_end', 'allocation_pool_start']],
+        required_if=[
+            ('state', 'present', ('network',)),
+            ('state', 'present',
+             ('cidr', 'use_default_subnet_pool', 'subnet_pool'), True),
+        ],
+        mutually_exclusive=[
+            ('use_default_subnet_pool', 'subnet_pool'),
+            ('allocation_pool_start', 'allocation_pools'),
+            ('allocation_pool_end', 'allocation_pools')
+        ]
+    )
+
+    # resource attributes obtainable directly from params
+    attr_params = ('cidr', 'description',
+                   'dns_nameservers', 'gateway_ip', 'host_routes',
+                   'ip_version', 'ipv6_address_mode', 'ipv6_ra_mode',
+                   'is_dhcp_enabled', 'name', 'prefix_length',
+                   'use_default_subnet_pool',)
+
+    def _validate_update(self, subnet, update):
+        """ Check for differences in non-updatable values """
+        # Ref.: https://docs.openstack.org/api-ref/network/v2/index.html#update-subnet
+        for attr in ('cidr', 'ip_version', 'ipv6_ra_mode', 'ipv6_address_mode',
+                     'prefix_length', 'use_default_subnet_pool'):
+            if attr in update and update[attr] != subnet[attr]:
+                self.fail_json(
+                    msg='Cannot update {0} in existing subnet'.format(attr))
+
+    def _system_state_change(self, subnet, network, project, subnet_pool):
+        state = self.params['state']
+        if state == 'absent':
+            return subnet is not None
+        # else state is present
+        if not subnet:
+            return True
+        params = self._build_params(network, project, subnet_pool)
+        updates = self._build_updates(subnet, params)
+        self._validate_update(subnet, updates)
+        return bool(updates)
+
+    def _build_pool(self):
+        pool_start = self.params['allocation_pool_start']
+        pool_end = self.params['allocation_pool_end']
+        if pool_start:
+            return [dict(start=pool_start, end=pool_end)]
+        return None
+
+    def _build_params(self, network, project, subnet_pool):
+        params = {attr: self.params[attr] for attr in self.attr_params}
+        params['network_id'] = network.id
+        if project:
+            params['project_id'] = project.id
+        if subnet_pool:
+            params['subnet_pool_id'] = subnet_pool.id
+        if self.params['allocation_pool_start']:
+            params['allocation_pools'] = self._build_pool()
+        else:
+            params['allocation_pools'] = self.params['allocation_pools']
+        params = self._add_extra_attrs(params)
+        params = {k: v for k, v in params.items() if v is not None}
+        if self.params['disable_gateway_ip']:
+            params['gateway_ip'] = None
+        return params
+
+    def _build_updates(self, subnet, params):
+        # Sort lists before doing comparisons comparisons
+        if 'dns_nameservers' in params:
+            params['dns_nameservers'].sort()
+            subnet['dns_nameservers'].sort()
+
+        if 'host_routes' in params:
+            params['host_routes'].sort(key=lambda r: sorted(r.items()))
+            subnet['host_routes'].sort(key=lambda r: sorted(r.items()))
+
+        if 'allocation_pools' in params:
+            params['allocation_pools'].sort(key=lambda r: sorted(r.items()))
+            subnet['allocation_pools'].sort(key=lambda r: sorted(r.items()))
+
+        updates = {k: params[k] for k in params if params[k] != subnet[k]}
+        if self.params['disable_gateway_ip'] and subnet.gateway_ip:
+            updates['gateway_ip'] = None
+        return updates
+
+    def _add_extra_attrs(self, params):
+        duplicates = set(self.params['extra_attrs']) & set(params)
+        if duplicates:
+            self.fail_json(msg='Duplicate key(s) {0} in extra_specs'
+                           .format(list(duplicates)))
+        params.update(self.params['extra_attrs'])
+        return params
+
+    def run(self):
+        state = self.params['state']
+        network_name_or_id = self.params['network']
+        project_name_or_id = self.params['project']
+        subnet_pool_name_or_id = self.params['subnet_pool']
+        subnet_name = self.params['name']
+        gateway_ip = self.params['gateway_ip']
+        disable_gateway_ip = self.params['disable_gateway_ip']
+
+        # fail early if incompatible options have been specified
+        if disable_gateway_ip and gateway_ip:
+            self.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
+
+        subnet_pool_filters = {}
+        filters = {}
+
+        project = None
+        if project_name_or_id:
+            project = self.conn.identity.find_project(project_name_or_id,
+                                                      ignore_missing=False)
+            subnet_pool_filters['project_id'] = project.id
+            filters['project_id'] = project.id
+
+        network = None
+        if network_name_or_id:
+            # At this point filters can only contain project_id
+            network = self.conn.network.find_network(network_name_or_id,
+                                                     ignore_missing=False,
+                                                     **filters)
+            filters['network_id'] = network.id
+
+        subnet_pool = None
+        if subnet_pool_name_or_id:
+            subnet_pool = self.conn.network.find_subnet_pool(
+                subnet_pool_name_or_id,
+                ignore_missing=False,
+                **subnet_pool_filters)
+            filters['subnet_pool_id'] = subnet_pool.id
+
+        subnet = self.conn.network.find_subnet(subnet_name, **filters)
+
+        if self.ansible.check_mode:
+            self.exit_json(changed=self._system_state_change(
+                subnet, network, project, subnet_pool))
+
+        changed = False
+        if state == 'present':
+            params = self._build_params(network, project, subnet_pool)
+            if subnet is None:
+                subnet = self.conn.network.create_subnet(**params)
+                changed = True
+            else:
+                updates = self._build_updates(subnet, params)
+                if updates:
+                    self._validate_update(subnet, updates)
+                    subnet = self.conn.network.update_subnet(subnet, **updates)
+                    changed = True
+            self.exit_json(changed=changed, subnet=subnet, id=subnet.id)
+        elif state == 'absent' and subnet is not None:
+            self.conn.network.delete_subnet(subnet)
+            changed = True
+        self.exit_json(changed=changed)
+
+
+def main():
+    module = SubnetModule()
+    module()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/releasenotes/notes/add-extra-keycloak-realm-options-a8b14740bd999ebb.yaml b/releasenotes/notes/add-extra-keycloak-realm-options-a8b14740bd999ebb.yaml
new file mode 100644
index 0000000..4a7a43b
--- /dev/null
+++ b/releasenotes/notes/add-extra-keycloak-realm-options-a8b14740bd999ebb.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - The Keystone role now supports additional parameters when creating the
+    Keycloak realm to allow for the configuration of options such as password
+    policy, brute force protection, and more.
diff --git a/releasenotes/notes/add-glance-image-tempfile-path-6c1ec42dccba948a.yaml b/releasenotes/notes/add-glance-image-tempfile-path-6c1ec42dccba948a.yaml
new file mode 100644
index 0000000..370e0cb
--- /dev/null
+++ b/releasenotes/notes/add-glance-image-tempfile-path-6c1ec42dccba948a.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - Add ``glance_image_tempfile_path`` variable to allow users for changing the
+    temporary path for downloading images before uploading them to Glance.
diff --git a/releasenotes/notes/add-mfa-config-options-6f2d6811bca1a789.yaml b/releasenotes/notes/add-mfa-config-options-6f2d6811bca1a789.yaml
new file mode 100644
index 0000000..cb2d445
--- /dev/null
+++ b/releasenotes/notes/add-mfa-config-options-6f2d6811bca1a789.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Keystone role now supports configuring multi-factor authentication for
+    the users within the Atmosphere realm.
diff --git a/releasenotes/notes/add-missing-osbrick-helper-0bc348399986a5d6.yaml b/releasenotes/notes/add-missing-osbrick-helper-0bc348399986a5d6.yaml
new file mode 100644
index 0000000..e663f72
--- /dev/null
+++ b/releasenotes/notes/add-missing-osbrick-helper-0bc348399986a5d6.yaml
@@ -0,0 +1,15 @@
+---
+fixes:
+  - The ``[privsep_osbrick]/helper_command`` configuration value was not
+    configured in both of the Cinder and Nova services, which lead to the
+    inability to run certain CLI commands since it instead tried to do a plain
+    ``sudo`` instead.  This has been fixed by adding the missing helper command
+    configuration to both services.
+  - The ``dmidecode`` package which is required by the ``os-brick`` library for
+    certain operations was not installed on the images that needed it, which
+    can cause NVMe-oF discovery issues.  The package has been added to all
+    images that require it.
+  - The ``[cinder]/auth_type`` configuration value was not set resulting in
+    the entire Cinder section not being rendered in the configuration file, it
+    is now set to ``password`` which will fully render the Cinder section
+    for OpenStack Nova.
diff --git a/releasenotes/notes/add-missing-shell-dc5f8d4fca30eca6.yaml b/releasenotes/notes/add-missing-shell-dc5f8d4fca30eca6.yaml
new file mode 100644
index 0000000..025b2f1
--- /dev/null
+++ b/releasenotes/notes/add-missing-shell-dc5f8d4fca30eca6.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - The ``nova`` user within the ``nova-ssh`` image was missing the ``SHELL``
+    build argument which would cause live & cold migrations to fail, this has
+    been resolved by adding the missing build argument.
diff --git a/releasenotes/notes/add-ovsinit-56990eaaf93c6f9d.yaml b/releasenotes/notes/add-ovsinit-56990eaaf93c6f9d.yaml
new file mode 100644
index 0000000..5482a80
--- /dev/null
+++ b/releasenotes/notes/add-ovsinit-56990eaaf93c6f9d.yaml
@@ -0,0 +1,9 @@
+---
+features:
+  - Introduced a new Rust-based binary ``ovsinit`` which focuses on handling
+    the migration of IP addresses from a physical interface to an OVS bridge
+    during the Neutron or OVN initialization process.
+fixes:
+  - During a Neutron or OVN initialization process, the routes assigned to
+    the physical interface are now removed and added to the OVS bridge
+    to maintain the connectivity of the host.
diff --git a/releasenotes/notes/adding-nicname-as-an-option-f7e790ea8174e6af.yaml b/releasenotes/notes/adding-nicname-as-an-option-f7e790ea8174e6af.yaml
new file mode 100644
index 0000000..103a6db
--- /dev/null
+++ b/releasenotes/notes/adding-nicname-as-an-option-f7e790ea8174e6af.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    It is now possible to configure DPDK interfaces using the interface names in addition to
+    possibly being able to use the ``pci_id`` to ease deploying in heterogeneous environments.
diff --git a/releasenotes/notes/allow-configuring-ingress-class-name-0c50f395d9a1b213.yaml b/releasenotes/notes/allow-configuring-ingress-class-name-0c50f395d9a1b213.yaml
new file mode 100644
index 0000000..23172db
--- /dev/null
+++ b/releasenotes/notes/allow-configuring-ingress-class-name-0c50f395d9a1b213.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - |
+    All roles that deploy ``Ingress`` resources as part of the deployment
+    process now support the ability to specify the class name to use for the
+    ``Ingress`` resource.  This is done by setting the
+    ``<role>_ingress_class_name`` variable to the desired class name.
diff --git a/releasenotes/notes/allow-using-default-cert-b28067c8a1525e1f.yaml b/releasenotes/notes/allow-using-default-cert-b28067c8a1525e1f.yaml
new file mode 100644
index 0000000..aac6ce0
--- /dev/null
+++ b/releasenotes/notes/allow-using-default-cert-b28067c8a1525e1f.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - |
+    It's now possible to use the default TLS certificates configured within the
+    ingress by using the ``ingress_use_default_tls_certificate`` variable which
+    will omit the ``tls`` section from any ``Ingress`` resources managed by
+    Atmosphere.
diff --git a/releasenotes/notes/barbican-priority-runtime-class-b84c8515f03e18c5.yaml b/releasenotes/notes/barbican-priority-runtime-class-b84c8515f03e18c5.yaml
new file mode 100644
index 0000000..4414c45
--- /dev/null
+++ b/releasenotes/notes/barbican-priority-runtime-class-b84c8515f03e18c5.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Barbican role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/bump-openstack-collection-382923f617548b01.yaml b/releasenotes/notes/bump-openstack-collection-382923f617548b01.yaml
new file mode 100644
index 0000000..60eb020
--- /dev/null
+++ b/releasenotes/notes/bump-openstack-collection-382923f617548b01.yaml
@@ -0,0 +1,4 @@
+---
+other:
+  - The Atmosphere collection now uses the new major version of the OpenStack
+    collection as a dependency.
diff --git a/releasenotes/notes/bump-openvswitch-435cea61eec39371.yaml b/releasenotes/notes/bump-openvswitch-435cea61eec39371.yaml
new file mode 100644
index 0000000..2b1dc99
--- /dev/null
+++ b/releasenotes/notes/bump-openvswitch-435cea61eec39371.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - The Open vSwitch version has been bumped to 3.3.0 in order to resolve packet
+    drops include ``Packet dropped. Max recirculation depth exceeded.`` log messages
+    in the Open vSwitch log.
diff --git a/releasenotes/notes/bump-ovn-version-d4216ca44d5e6f41.yaml b/releasenotes/notes/bump-ovn-version-d4216ca44d5e6f41.yaml
new file mode 100644
index 0000000..6988615
--- /dev/null
+++ b/releasenotes/notes/bump-ovn-version-d4216ca44d5e6f41.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+  - |
+    Bump OVN from 24.03.1-44 to 24.03.2.34.
diff --git a/releasenotes/notes/bump-storpool-caracal-525bae827bef1f62.yaml b/releasenotes/notes/bump-storpool-caracal-525bae827bef1f62.yaml
new file mode 100644
index 0000000..13de75f
--- /dev/null
+++ b/releasenotes/notes/bump-storpool-caracal-525bae827bef1f62.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Storpool driver has been updated from the Bobcat release to the Caracal
+    release.
diff --git a/releasenotes/notes/cinder-priority-runtime-class-910112b1da7bd5c1.yaml b/releasenotes/notes/cinder-priority-runtime-class-910112b1da7bd5c1.yaml
new file mode 100644
index 0000000..6852e16
--- /dev/null
+++ b/releasenotes/notes/cinder-priority-runtime-class-910112b1da7bd5c1.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Cinder role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/designate-priority-runtime-class-63f9e7efe1b3e494.yaml b/releasenotes/notes/designate-priority-runtime-class-63f9e7efe1b3e494.yaml
new file mode 100644
index 0000000..a42e7c2
--- /dev/null
+++ b/releasenotes/notes/designate-priority-runtime-class-63f9e7efe1b3e494.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Designate role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/enable-ovn-affinity-rules-54efa650be79426c.yaml b/releasenotes/notes/enable-ovn-affinity-rules-54efa650be79426c.yaml
new file mode 100644
index 0000000..601f254
--- /dev/null
+++ b/releasenotes/notes/enable-ovn-affinity-rules-54efa650be79426c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Applied the same pod affinity rules used for OVN NB/SB sts's to northd deployment and
+    changed the default pod affinity rules from preferred during scheduling to required
+    during scheduling.
diff --git a/releasenotes/notes/enable-ovn-northd-liveness-probe-8b80c6e4399c5225.yaml b/releasenotes/notes/enable-ovn-northd-liveness-probe-8b80c6e4399c5225.yaml
new file mode 100644
index 0000000..45bbb1c
--- /dev/null
+++ b/releasenotes/notes/enable-ovn-northd-liveness-probe-8b80c6e4399c5225.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - The ``ovn-northd`` service did not have liveness probes enabled
+    which can result in the pod failing readiness checks but not being
+    automatically restarted.  The liveness probe is now enabled by
+    default which will restart any stuck ``ovn-northd`` processes.
diff --git a/releasenotes/notes/fix-aio-max-limit-228f73927b88d3ee.yaml b/releasenotes/notes/fix-aio-max-limit-228f73927b88d3ee.yaml
new file mode 100644
index 0000000..1f3ecad
--- /dev/null
+++ b/releasenotes/notes/fix-aio-max-limit-228f73927b88d3ee.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    This fix introduces a kernel option to adjust ``aio-max-nr``, ensuring that the
+    system can handle more asynchronous I/O events, preventing VM startup
+    failures related to AIO limits.
diff --git a/releasenotes/notes/fix-two-redundant-securityContext-problems-28bfb724627e8920.yaml b/releasenotes/notes/fix-two-redundant-securityContext-problems-28bfb724627e8920.yaml
new file mode 100644
index 0000000..799899e
--- /dev/null
+++ b/releasenotes/notes/fix-two-redundant-securityContext-problems-28bfb724627e8920.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Fix two redundant securityContext problems in
+    statefulset-compute-ironic.yaml template.
diff --git a/releasenotes/notes/glance-priority-runtime-class-8902ce859fba65f6.yaml b/releasenotes/notes/glance-priority-runtime-class-8902ce859fba65f6.yaml
new file mode 100644
index 0000000..0f7dd5b
--- /dev/null
+++ b/releasenotes/notes/glance-priority-runtime-class-8902ce859fba65f6.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Glance role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/heat-priority-runtime-class-493ffeb8be07ac6a.yaml b/releasenotes/notes/heat-priority-runtime-class-493ffeb8be07ac6a.yaml
new file mode 100644
index 0000000..7bf3b8f
--- /dev/null
+++ b/releasenotes/notes/heat-priority-runtime-class-493ffeb8be07ac6a.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Heat role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/horizon-priority-runtime-class-0004e6be3fdeab2b.yaml b/releasenotes/notes/horizon-priority-runtime-class-0004e6be3fdeab2b.yaml
new file mode 100644
index 0000000..a6a4437
--- /dev/null
+++ b/releasenotes/notes/horizon-priority-runtime-class-0004e6be3fdeab2b.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Horizon role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/horizon-security-improvements-22b2535a85daab75.yaml b/releasenotes/notes/horizon-security-improvements-22b2535a85daab75.yaml
new file mode 100644
index 0000000..2b52c90
--- /dev/null
+++ b/releasenotes/notes/horizon-security-improvements-22b2535a85daab75.yaml
@@ -0,0 +1,8 @@
+---
+security:
+  - The Horizon service now runs as the non-privileged user `horizon` in the
+    container.
+  - The Horizon service ``ALLOWED_HOSTS`` setting is now configured to point
+    to the configured endpoints for the service.
+  - The CORS headers are now configured to only allow requests from the
+    configured endpoints for the service.
diff --git a/releasenotes/notes/ironic-priority-runtime-class-260a89c958179e92.yaml b/releasenotes/notes/ironic-priority-runtime-class-260a89c958179e92.yaml
new file mode 100644
index 0000000..ed17275
--- /dev/null
+++ b/releasenotes/notes/ironic-priority-runtime-class-260a89c958179e92.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Ironic role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/keystone-priority-runtime-class-3d41226e8815f369.yaml b/releasenotes/notes/keystone-priority-runtime-class-3d41226e8815f369.yaml
new file mode 100644
index 0000000..81ffbc2
--- /dev/null
+++ b/releasenotes/notes/keystone-priority-runtime-class-3d41226e8815f369.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Keystone role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/magnum-priority-runtime-class-1fa01f838854cb94.yaml b/releasenotes/notes/magnum-priority-runtime-class-1fa01f838854cb94.yaml
new file mode 100644
index 0000000..246b3d2
--- /dev/null
+++ b/releasenotes/notes/magnum-priority-runtime-class-1fa01f838854cb94.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Magnum role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/manila-priority-runtime-class-2b73aa2ad577d258.yaml b/releasenotes/notes/manila-priority-runtime-class-2b73aa2ad577d258.yaml
new file mode 100644
index 0000000..90ff058
--- /dev/null
+++ b/releasenotes/notes/manila-priority-runtime-class-2b73aa2ad577d258.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Manila role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/neutron-priority-runtime-class-b23c083ebd115e08.yaml b/releasenotes/notes/neutron-priority-runtime-class-b23c083ebd115e08.yaml
new file mode 100644
index 0000000..57f9400
--- /dev/null
+++ b/releasenotes/notes/neutron-priority-runtime-class-b23c083ebd115e08.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Neutron role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/nova-priority-runtime-class-97013402a7abf251.yaml b/releasenotes/notes/nova-priority-runtime-class-97013402a7abf251.yaml
new file mode 100644
index 0000000..a5b2aae
--- /dev/null
+++ b/releasenotes/notes/nova-priority-runtime-class-97013402a7abf251.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Nova role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/octavia-priority-runtime-class-3803f91e26a627a4.yaml b/releasenotes/notes/octavia-priority-runtime-class-3803f91e26a627a4.yaml
new file mode 100644
index 0000000..eb2bcce
--- /dev/null
+++ b/releasenotes/notes/octavia-priority-runtime-class-3803f91e26a627a4.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Octavia role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/ovn-dhcp-agent-6da645f88a2c39c3.yaml b/releasenotes/notes/ovn-dhcp-agent-6da645f88a2c39c3.yaml
new file mode 100644
index 0000000..a92726e
--- /dev/null
+++ b/releasenotes/notes/ovn-dhcp-agent-6da645f88a2c39c3.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Neutron now supports using the built-in DHCP agent when using OVN (Open Virtual Network)
+    for cases when DHCP relay is necessary.
diff --git a/releasenotes/notes/placement-priority-runtime-class-3d5598c95c26dc32.yaml b/releasenotes/notes/placement-priority-runtime-class-3d5598c95c26dc32.yaml
new file mode 100644
index 0000000..6a60037
--- /dev/null
+++ b/releasenotes/notes/placement-priority-runtime-class-3d5598c95c26dc32.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Placement role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/staffeln-priority-runtime-class-d7a4ae951ddcc214.yaml b/releasenotes/notes/staffeln-priority-runtime-class-d7a4ae951ddcc214.yaml
new file mode 100644
index 0000000..4a00dc6
--- /dev/null
+++ b/releasenotes/notes/staffeln-priority-runtime-class-d7a4ae951ddcc214.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - The Staffeln role now allows users to configure the ``priorityClassName`` and
+    the ``runtimeClassName`` for all of the different components of the service.
diff --git a/releasenotes/notes/use-internal-endpoint-for-magnum-capi-client-in-default-da61531ce88c94aa.yaml b/releasenotes/notes/use-internal-endpoint-for-magnum-capi-client-in-default-da61531ce88c94aa.yaml
new file mode 100644
index 0000000..d621716
--- /dev/null
+++ b/releasenotes/notes/use-internal-endpoint-for-magnum-capi-client-in-default-da61531ce88c94aa.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    The Cluster API driver for Magnum is now configured to use the internal
+    endpoints by default in order to avoid going through the ingress and
+    leverage client-side load balancing.
diff --git a/requirements.txt b/requirements.txt
index 9157d5b..038719c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
 ansible-core>=2.15.9
 jmespath>=1.0.1
-openstacksdk<0.99.0
+openstacksdk>1
 docker-image-py>=0.1.12
 rjsonnet>=0.5.2
 netaddr>=0.8.0
diff --git a/roles/barbican/defaults/main.yml b/roles/barbican/defaults/main.yml
index 8d2abc6..011e4d6 100644
--- a/roles/barbican/defaults/main.yml
+++ b/roles/barbican/defaults/main.yml
@@ -20,7 +20,11 @@
 barbican_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 barbican_helm_values: {}
 
+# Class name to use for the Ingress
+barbican_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 barbican_ingress_annotations: {}
+
 # Barbican key encryption key
 barbican_kek: "{{ undef(hint='You must specify a Barbican key encryption key') }}"
diff --git a/roles/barbican/tasks/main.yml b/roles/barbican/tasks/main.yml
index d7261dc..d4992b6 100644
--- a/roles/barbican/tasks/main.yml
+++ b/roles/barbican/tasks/main.yml
@@ -30,6 +30,7 @@
     openstack_helm_ingress_service_name: barbican-api
     openstack_helm_ingress_service_port: 9311
     openstack_helm_ingress_annotations: "{{ barbican_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ barbican_ingress_class_name }}"
 
 - name: Create creator role
   openstack.cloud.identity_role:
diff --git a/roles/barbican/tests/priorityclass_test.yaml b/roles/barbican/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..21063c0
--- /dev/null
+++ b/roles/barbican/tests/priorityclass_test.yaml
@@ -0,0 +1,65 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-test.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        priorityClassName:
+          barbican_api: platform
+          barbican_tests: platform
+          db_sync: platform
+          bootstrap: platform
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-test.yaml
+        documentIndex: 1
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/barbican/tests/runtimeclass_test.yaml b/roles/barbican/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..7c55c02
--- /dev/null
+++ b/roles/barbican/tests/runtimeclass_test.yaml
@@ -0,0 +1,65 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-test.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        runtimeClassName:
+          barbican_api: kata-clh
+          barbican_tests: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-test.yaml
+        documentIndex: 1
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/barbican/vars_test.go b/roles/barbican/vars_test.go
index 8a0dccc..ca6a897 100644
--- a/roles/barbican/vars_test.go
+++ b/roles/barbican/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Barbican.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/cinder/defaults/main.yml b/roles/cinder/defaults/main.yml
index 9a2012f..51ac2dc 100644
--- a/roles/cinder/defaults/main.yml
+++ b/roles/cinder/defaults/main.yml
@@ -20,5 +20,8 @@
 cinder_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 cinder_helm_values: {}
 
+# Class name to use for the Ingress
+cinder_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 cinder_ingress_annotations: {}
diff --git a/roles/cinder/tasks/main.yml b/roles/cinder/tasks/main.yml
index aa7b213..cde4251 100644
--- a/roles/cinder/tasks/main.yml
+++ b/roles/cinder/tasks/main.yml
@@ -40,3 +40,4 @@
     openstack_helm_ingress_service_name: cinder-api
     openstack_helm_ingress_service_port: 8776
     openstack_helm_ingress_annotations: "{{ _cinder_ingress_annotations | combine(cinder_ingress_annotations) }}"
+    openstack_helm_ingress_class_name: "{{ cinder_ingress_class_name }}"
diff --git a/roles/cinder/tests/priorityclass_test.yaml b/roles/cinder/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..4ca4967
--- /dev/null
+++ b/roles/cinder/tests/priorityclass_test.yaml
@@ -0,0 +1,96 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-volume.yaml
+      - templates/cron-job-cinder-volume-usage-audit.yaml
+      - templates/deployment-backup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-volume.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/cron-job-cinder-volume-usage-audit.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/deployment-backup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-volume.yaml
+      - templates/cron-job-cinder-volume-usage-audit.yaml
+      - templates/deployment-backup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        priorityClassName:
+          cinder_api: platform
+          cinder_backup: platform
+          cinder_scheduler: platform
+          cinder_tests: platform
+          cinder_volume_usage_audit: platform
+          cinder_volume: platform
+          db_sync: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-volume.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-cinder-volume-usage-audit.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-backup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/cinder/tests/runtimeclass_test.yaml b/roles/cinder/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..151958a
--- /dev/null
+++ b/roles/cinder/tests/runtimeclass_test.yaml
@@ -0,0 +1,96 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-volume.yaml
+      - templates/cron-job-cinder-volume-usage-audit.yaml
+      - templates/deployment-backup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-volume.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/cron-job-cinder-volume-usage-audit.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/deployment-backup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-volume.yaml
+      - templates/cron-job-cinder-volume-usage-audit.yaml
+      - templates/deployment-backup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        runtimeClassName:
+          cinder_api: kata-clh
+          cinder_backup: kata-clh
+          cinder_scheduler: kata-clh
+          cinder_tests: kata-clh
+          cinder_volume_usage_audit: kata-clh
+          cinder_volume: kata-clh
+          db_sync: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-volume.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-cinder-volume-usage-audit.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-backup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/cinder/vars/main.yml b/roles/cinder/vars/main.yml
index 4917bd9..ef3d6cd 100644
--- a/roles/cinder/vars/main.yml
+++ b/roles/cinder/vars/main.yml
@@ -52,6 +52,8 @@
         pool_timeout: 30
       oslo_messaging_notifications:
         driver: noop
+      privsep_osbrick:
+        helper_command: sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf
   manifests:
     ingress_api: false
     job_clean: false
diff --git a/roles/cinder/vars_test.go b/roles/cinder/vars_test.go
index c501802..c2ba7d3 100644
--- a/roles/cinder/vars_test.go
+++ b/roles/cinder/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Cinder.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/designate/defaults/main.yml b/roles/designate/defaults/main.yml
index 7932b41..91bf556 100644
--- a/roles/designate/defaults/main.yml
+++ b/roles/designate/defaults/main.yml
@@ -20,6 +20,9 @@
 designate_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 designate_helm_values: {}
 
+# Class name to use for the Ingress
+designate_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 designate_ingress_annotations: {}
 
diff --git a/roles/designate/tasks/main.yml b/roles/designate/tasks/main.yml
index fe90969..e4c969d 100644
--- a/roles/designate/tasks/main.yml
+++ b/roles/designate/tasks/main.yml
@@ -34,3 +34,4 @@
     openstack_helm_ingress_service_name: designate-api
     openstack_helm_ingress_service_port: 9001
     openstack_helm_ingress_annotations: "{{ designate_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ designate_ingress_class_name }}"
diff --git a/roles/designate/tests/priorityclass_test.yaml b/roles/designate/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..c8de434
--- /dev/null
+++ b/roles/designate/tests/priorityclass_test.yaml
@@ -0,0 +1,96 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-central.yaml
+      - templates/deployment-mdns.yaml
+      - templates/deployment-producer.yaml
+      - templates/deployment-sink.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-central.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-mdns.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-producer.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      # - template: templates/deployment-sink.yaml
+      #   documentIndex: 3
+      #   notExists:
+      #     path: spec.template.spec.priorityClassName
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-central.yaml
+      - templates/deployment-mdns.yaml
+      - templates/deployment-producer.yaml
+      - templates/deployment-sink.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        priorityClassName:
+          designate_api: platform
+          designate_central: platform
+          designate_mdns: platform
+          designate_producer: platform
+          designate_sink: platform
+          designate_worker: platform
+          db_sync: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-central.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-mdns.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-producer.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      # - template: templates/deployment-sink.yaml
+      #   documentIndex: 3
+      #   equal:
+      #     path: spec.template.spec.priorityClassName
+      #     value: platform
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/designate/tests/runtimeclass_test.yaml b/roles/designate/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..ac10d05
--- /dev/null
+++ b/roles/designate/tests/runtimeclass_test.yaml
@@ -0,0 +1,96 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-central.yaml
+      - templates/deployment-mdns.yaml
+      - templates/deployment-producer.yaml
+      - templates/deployment-sink.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-central.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-mdns.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-producer.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      # - template: templates/deployment-sink.yaml
+      #   documentIndex: 3
+      #   notExists:
+      #     path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-central.yaml
+      - templates/deployment-mdns.yaml
+      - templates/deployment-producer.yaml
+      - templates/deployment-sink.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        runtimeClassName:
+          designate_api: kata-clh
+          designate_central: kata-clh
+          designate_mdns: kata-clh
+          designate_producer: kata-clh
+          designate_sink: kata-clh
+          designate_worker: kata-clh
+          db_sync: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-central.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-mdns.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-producer.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      # - template: templates/deployment-sink.yaml
+      #   documentIndex: 3
+      #   equal:
+      #     path: spec.template.spec.runtimeClassName
+      #     value: kata-clh
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/designate/vars/main.yml b/roles/designate/vars/main.yml
index e6bfe9b..1bf3970 100644
--- a/roles/designate/vars/main.yml
+++ b/roles/designate/vars/main.yml
@@ -26,7 +26,7 @@
         max_pool_size: 5
         pool_timeout: 30
       service:central:
-        managed_resource_tenant_id: "{{ _designate_project_info.openstack_projects[0].id }}"
+        managed_resource_tenant_id: "{{ _designate_project_info.projects[0].id }}"
     pools: "{{ designate_pools | to_yaml }}"
   pod:
     replicas:
diff --git a/roles/designate/vars_test.go b/roles/designate/vars_test.go
index e3ec382..36be484 100644
--- a/roles/designate/vars_test.go
+++ b/roles/designate/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Designate.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/glance/defaults/main.yml b/roles/glance/defaults/main.yml
index d747588..62c026b 100644
--- a/roles/glance/defaults/main.yml
+++ b/roles/glance/defaults/main.yml
@@ -20,6 +20,9 @@
 glance_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 glance_helm_values: {}
 
+# Class name to use for the Ingress
+glance_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 glance_ingress_annotations: {}
 
diff --git a/roles/glance/tasks/main.yml b/roles/glance/tasks/main.yml
index 32cf42c..c36357f 100644
--- a/roles/glance/tasks/main.yml
+++ b/roles/glance/tasks/main.yml
@@ -30,6 +30,7 @@
     openstack_helm_ingress_service_name: glance-api
     openstack_helm_ingress_service_port: 9292
     openstack_helm_ingress_annotations: "{{ _glance_ingress_annotations | combine(glance_ingress_annotations) }}"
+    openstack_helm_ingress_class_name: "{{ glance_ingress_class_name }}"
 
 - name: Create images
   ansible.builtin.include_role:
diff --git a/roles/glance/tests/priorityclass_test.yaml b/roles/glance/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..deeed31
--- /dev/null
+++ b/roles/glance/tests/priorityclass_test.yaml
@@ -0,0 +1,48 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        priorityClassName:
+          glance_api: platform
+          glance_tests: platform
+          db_sync: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/glance/tests/runtimeclass_test.yaml b/roles/glance/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..282a1c8
--- /dev/null
+++ b/roles/glance/tests/runtimeclass_test.yaml
@@ -0,0 +1,48 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        runtimeClassName:
+          glance_api: kata-clh
+          glance_tests: kata-clh
+          db_sync: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/glance/vars_test.go b/roles/glance/vars_test.go
index aa0455c..9b95a4c 100644
--- a/roles/glance/vars_test.go
+++ b/roles/glance/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Glance.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/glance_image/defaults/main.yml b/roles/glance_image/defaults/main.yml
index d906e1f..92541d0 100644
--- a/roles/glance_image/defaults/main.yml
+++ b/roles/glance_image/defaults/main.yml
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+# glance_image_tempfile_path:
+
 glance_image_http_proxy: "{{ http_proxy | default('') }}"
 glance_image_https_proxy: "{{ https_proxy | default('') }}"
 glance_image_no_proxy: "{{ no_proxy | default('') }}"
diff --git a/roles/glance_image/tasks/main.yml b/roles/glance_image/tasks/main.yml
index 5f56e55..1f0059a 100644
--- a/roles/glance_image/tasks/main.yml
+++ b/roles/glance_image/tasks/main.yml
@@ -26,10 +26,11 @@
 
 - name: Download image and upload into Glance
   run_once: true
-  when: _image_info.openstack_image == None
+  when: _image_info.images | length == 0
   block:
     - name: Generate temporary work directory
       ansible.builtin.tempfile:
+        path: "{{ glance_image_tempfile_path | default(omit) }}"
         state: directory
       register: _workdir
 
@@ -38,6 +39,7 @@
         url: "{{ glance_image_url }}"
         dest: "{{ _workdir.path }}/{{ glance_image_url | basename }}"
         mode: "0600"
+        tmp_dest: "{{ _workdir.path }}"
       register: _get_url
       retries: 3
       delay: "{{ 15 | random + 3 }}"
diff --git a/roles/heat/defaults/main.yml b/roles/heat/defaults/main.yml
index 3ea9ded..6a7173d 100644
--- a/roles/heat/defaults/main.yml
+++ b/roles/heat/defaults/main.yml
@@ -20,6 +20,10 @@
 heat_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 heat_helm_values: {}
 
+# Class name to use for the Ingress
+heat_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
+# List of annotations to apply to the Ingress
 heat_ingress_annotations: {}
 
 # Encryption key for Heat to use for encrypting sensitive data
diff --git a/roles/heat/tasks/main.yml b/roles/heat/tasks/main.yml
index 96c301a..3a2790e 100644
--- a/roles/heat/tasks/main.yml
+++ b/roles/heat/tasks/main.yml
@@ -30,6 +30,7 @@
     openstack_helm_ingress_service_name: heat-api
     openstack_helm_ingress_service_port: 8004
     openstack_helm_ingress_annotations: "{{ _heat_ingress_annotations | combine(heat_ingress_annotations, recursive=True) }}"
+    openstack_helm_ingress_class_name: "{{ heat_ingress_class_name }}"
 
 - name: Create Ingress
   ansible.builtin.include_role:
@@ -39,3 +40,4 @@
     openstack_helm_ingress_service_name: heat-cfn
     openstack_helm_ingress_service_port: 8000
     openstack_helm_ingress_annotations: "{{ _heat_ingress_annotations | combine(heat_ingress_annotations, recursive=True) }}"
+    openstack_helm_ingress_class_name: "{{ heat_ingress_class_name }}"
diff --git a/roles/heat/tests/priorityclass_test.yaml b/roles/heat/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..bef0fb4
--- /dev/null
+++ b/roles/heat/tests/priorityclass_test.yaml
@@ -0,0 +1,123 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-cfn.yaml
+      - templates/deployment-cloudwatch.yaml
+      - templates/cron-job-engine-cleaner.yaml
+      - templates/cron-job-purge-deleted.yaml
+      - templates/deployment-engine.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+      - templates/job-trusts.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-cfn.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/cron-job-engine-cleaner.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/cron-job-purge-deleted.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/deployment-engine.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-trusts.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-cfn.yaml
+      - templates/deployment-cloudwatch.yaml
+      - templates/cron-job-engine-cleaner.yaml
+      - templates/cron-job-purge-deleted.yaml
+      - templates/deployment-engine.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+      - templates/job-trusts.yaml
+    set:
+      pod:
+        priorityClassName:
+          heat_api: platform
+          heat_cfn: platform
+          heat_cloudwatch: platform
+          heat_tests: platform
+          heat_engine_cleaner: platform
+          heat_purge_deleted: platform
+          heat_engine: platform
+          db_sync: platform
+          bootstrap: platform
+          heat_trusts: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-cfn.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-engine-cleaner.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-purge-deleted.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-engine.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-trusts.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/heat/tests/runtimeclass_test.yaml b/roles/heat/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..754538b
--- /dev/null
+++ b/roles/heat/tests/runtimeclass_test.yaml
@@ -0,0 +1,123 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-cfn.yaml
+      - templates/deployment-cloudwatch.yaml
+      - templates/cron-job-engine-cleaner.yaml
+      - templates/cron-job-purge-deleted.yaml
+      - templates/deployment-engine.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+      - templates/job-trusts.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-cfn.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/cron-job-engine-cleaner.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/cron-job-purge-deleted.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/deployment-engine.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-trusts.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-cfn.yaml
+      - templates/deployment-cloudwatch.yaml
+      - templates/cron-job-engine-cleaner.yaml
+      - templates/cron-job-purge-deleted.yaml
+      - templates/deployment-engine.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+      - templates/job-trusts.yaml
+    set:
+      pod:
+        runtimeClassName:
+          heat_api: kata-clh
+          heat_cfn: kata-clh
+          heat_cloudwatch: kata-clh
+          heat_tests: kata-clh
+          heat_engine_cleaner: kata-clh
+          heat_purge_deleted: kata-clh
+          heat_engine: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+          heat_trusts: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-cfn.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-engine-cleaner.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-purge-deleted.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-engine.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-trusts.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/heat/vars_test.go b/roles/heat/vars_test.go
index a87f90c..be07332 100644
--- a/roles/heat/vars_test.go
+++ b/roles/heat/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Heat.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/horizon/defaults/main.yml b/roles/horizon/defaults/main.yml
index 116a87a..441fe4d 100644
--- a/roles/horizon/defaults/main.yml
+++ b/roles/horizon/defaults/main.yml
@@ -20,5 +20,8 @@
 horizon_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 horizon_helm_values: {}
 
+# Class name to use for the Ingress
+horizon_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 horizon_ingress_annotations: {}
diff --git a/roles/horizon/tasks/main.yml b/roles/horizon/tasks/main.yml
index 3cd1653..5c9e4e0 100644
--- a/roles/horizon/tasks/main.yml
+++ b/roles/horizon/tasks/main.yml
@@ -30,3 +30,4 @@
     openstack_helm_ingress_service_name: horizon-int
     openstack_helm_ingress_service_port: 80
     openstack_helm_ingress_annotations: "{{ _horizon_ingress_annotations | combine(horizon_ingress_annotations) }}"
+    openstack_helm_ingress_class_name: "{{ horizon_ingress_class_name }}"
diff --git a/roles/horizon/tests/priorityclass_test.yaml b/roles/horizon/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..08ec868
--- /dev/null
+++ b/roles/horizon/tests/priorityclass_test.yaml
@@ -0,0 +1,60 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment.yaml
+      - templates/pod-helm-tests.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-helm-tests.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment.yaml
+      - templates/pod-helm-tests.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+    set:
+      pod:
+        priorityClassName:
+          horizon: platform
+          horizon_tests: platform
+          db_sync: platform
+          db_init: platform
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-helm-tests.yaml
+        documentIndex: 3
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/horizon/tests/runtimeclass_test.yaml b/roles/horizon/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..d4dda15
--- /dev/null
+++ b/roles/horizon/tests/runtimeclass_test.yaml
@@ -0,0 +1,60 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment.yaml
+      - templates/pod-helm-tests.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-helm-tests.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment.yaml
+      - templates/pod-helm-tests.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+    set:
+      pod:
+        runtimeClassName:
+          horizon: kata-clh
+          horizon_tests: kata-clh
+          db_sync: kata-clh
+          db_init: kata-clh
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-helm-tests.yaml
+        documentIndex: 3
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/horizon/vars/main.yml b/roles/horizon/vars/main.yml
index 3e4f541..e0499bb 100644
--- a/roles/horizon/vars/main.yml
+++ b/roles/horizon/vars/main.yml
@@ -17,6 +17,16 @@
   images:
     tags: "{{ atmosphere_images | vexxhost.atmosphere.openstack_helm_image_tags('horizon') }}"
   pod:
+    security_context:
+      horizon:
+        pod:
+          fsGroup: 42424
+      db_sync:
+        pod:
+          fsGroup: 42424
+      tests:
+        pod:
+          fsGroup: 42424
     replicas:
       server: 3
   conf:
@@ -24,6 +34,8 @@
       local_settings:
         config:
           disallow_iframe_embed: "True"
+          allowed_hosts:
+            - "{{ openstack_helm_endpoints_horizon_api_host }}"
           secure_proxy_ssl_header: "True"
           horizon_images_upload_mode: direct
           openstack_enable_password_retrieve: "True"
@@ -55,3 +67,5 @@
 _horizon_ingress_annotations:
   nginx.ingress.kubernetes.io/proxy-body-size: "5000m"
   nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
+  nginx.ingress.kubernetes.io/enable-cors: "true"
+  nginx.ingress.kubernetes.io/cors-allow-origin: "{{ openstack_helm_endpoints_horizon_api_host }}"
diff --git a/roles/horizon/vars_test.go b/roles/horizon/vars_test.go
new file mode 100644
index 0000000..0799720
--- /dev/null
+++ b/roles/horizon/vars_test.go
@@ -0,0 +1,40 @@
+package horizon
+
+import (
+	_ "embed"
+	"os"
+	"testing"
+
+	"github.com/goccy/go-yaml"
+	"github.com/stretchr/testify/require"
+
+	"github.com/vexxhost/atmosphere/internal/openstack_helm"
+	"github.com/vexxhost/atmosphere/internal/testutils"
+)
+
+var (
+	//go:embed vars/main.yml
+	varsFile []byte
+	vars     Vars
+)
+
+type Vars struct {
+	openstack_helm.HelmValues `yaml:"_horizon_helm_values"`
+}
+
+func TestMain(m *testing.M) {
+	t := &testing.T{}
+	err := yaml.UnmarshalWithOptions(varsFile, &vars)
+	require.NoError(t, err)
+
+	code := m.Run()
+	os.Exit(code)
+}
+
+func TestHelmValues(t *testing.T) {
+	vals, err := openstack_helm.CoalescedHelmValues("../../charts/horizon", &vars.HelmValues)
+	require.NoError(t, err)
+
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
+}
diff --git a/roles/ingress/defaults/main.yml b/roles/ingress/defaults/main.yml
index 05a2cc3..82ea1e0 100644
--- a/roles/ingress/defaults/main.yml
+++ b/roles/ingress/defaults/main.yml
@@ -38,3 +38,6 @@
 
 # List of annotations to apply to all Ingress resources as default
 ingress_default_annotations: "{{ ingress_global_annotations | default(atmosphere_ingress_annotations) }}"
+
+# Use default TLS certificate
+ingress_use_default_tls_certificate: false
diff --git a/roles/ingress/templates/ingress.yml.j2 b/roles/ingress/templates/ingress.yml.j2
index 04135bc..6da2788 100644
--- a/roles/ingress/templates/ingress.yml.j2
+++ b/roles/ingress/templates/ingress.yml.j2
@@ -21,7 +21,9 @@
                 name: {{ ingress_service_name }}

                 port:

                   number: {{ ingress_service_port }}

+{% if not ingress_use_default_tls_certificate %}

   tls:

     - secretName: {{ ingress_secret_name | default(ingress_service_name ~ '-certs') }}

       hosts:

         - {{ ingress_host }}

+{% endif %}

diff --git a/roles/ironic/defaults/main.yml b/roles/ironic/defaults/main.yml
index 4df68da..3b0ef6d 100644
--- a/roles/ironic/defaults/main.yml
+++ b/roles/ironic/defaults/main.yml
@@ -20,6 +20,9 @@
 ironic_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 ironic_helm_values: {}
 
+# Class name to use for the Ingress
+ironic_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 ironic_ingress_annotations: {}
 
diff --git a/roles/ironic/tasks/main.yml b/roles/ironic/tasks/main.yml
index bd975f1..a70ef8b 100644
--- a/roles/ironic/tasks/main.yml
+++ b/roles/ironic/tasks/main.yml
@@ -94,3 +94,4 @@
     openstack_helm_ingress_service_name: ironic-api
     openstack_helm_ingress_service_port: 6385
     openstack_helm_ingress_annotations: "{{ ironic_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ ironic_ingress_class_name }}"
diff --git a/roles/ironic/tasks/network/create.yml b/roles/ironic/tasks/network/create.yml
index 8032983..a459b74 100644
--- a/roles/ironic/tasks/network/create.yml
+++ b/roles/ironic/tasks/network/create.yml
@@ -25,7 +25,7 @@
 
 - name: Create bare metal network subnet
   run_once: true
-  openstack.cloud.subnet:
+  vexxhost.atmosphere.subnet:
     cloud: atmosphere
     # Subnet settings
     network_name: "{{ ironic_bare_metal_subnet_name }}"
diff --git a/roles/ironic/tasks/network/lookup.yml b/roles/ironic/tasks/network/lookup.yml
index 8838ac9..95a8f71 100644
--- a/roles/ironic/tasks/network/lookup.yml
+++ b/roles/ironic/tasks/network/lookup.yml
@@ -23,11 +23,11 @@
 - name: Assert that we match a single network only
   ansible.builtin.assert:
     that:
-      - ironic_bare_metal_networks_info.openstack_networks | length == 1
-    fail_msg: "Expected exactly one network, but found {{ ironic_bare_metal_networks_info.openstack_networks | length }}"
+      - ironic_bare_metal_networks_info.networks | length == 1
+    fail_msg: "Expected exactly one network, but found {{ ironic_bare_metal_networks_info.networks | length }}"
     success_msg: "Successfully matched a single network"
   run_once: true
 
 - name: Set fact with bare metal network information
   ansible.builtin.set_fact:
-    ironic_bare_metal_network: "{{ ironic_bare_metal_networks_info.openstack_networks[0] }}"
+    ironic_bare_metal_network: "{{ ironic_bare_metal_networks_info.networks[0] }}"
diff --git a/roles/ironic/tests/priorityclass_test.yaml b/roles/ironic/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..aeaba8f
--- /dev/null
+++ b/roles/ironic/tests/priorityclass_test.yaml
@@ -0,0 +1,60 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        priorityClassName:
+          ironic_api: platform
+          ironic_conductor: platform
+          db_sync: platform
+          bootstrap: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/ironic/tests/runtimeclass_test.yaml b/roles/ironic/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..f2a4c33
--- /dev/null
+++ b/roles/ironic/tests/runtimeclass_test.yaml
@@ -0,0 +1,60 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        runtimeClassName:
+          ironic_api: kata-clh
+          ironic_conductor: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/ironic/vars/main.yml b/roles/ironic/vars/main.yml
index 51d32b4..94e01c8 100644
--- a/roles/ironic/vars/main.yml
+++ b/roles/ironic/vars/main.yml
@@ -50,8 +50,13 @@
         rbac_service_role_elevated_access: true
       conductor:
         clean_step_priority_override: deploy.erase_devices_express:5
-        deploy_kernel: "{{ ironic_python_agent_deploy_kernel.openstack_image.id }}"
-        deploy_ramdisk: "{{ ironic_python_agent_deploy_ramdisk.openstack_image.id }}"
+        deploy_kernel: "{{ ironic_python_agent_deploy_kernel.images.0.id }}"
+        deploy_ramdisk: "{{ ironic_python_agent_deploy_ramdisk.images.0.id }}"
+      database:
+        connection_recycle_time: 600
+        max_overflow: 50
+        max_pool_size: 5
+        pool_timeout: 30
       deploy:
         erase_devices_priority: 0
         erase_devices_metadata_priority: 0
diff --git a/roles/ironic/vars_test.go b/roles/ironic/vars_test.go
new file mode 100644
index 0000000..658e211
--- /dev/null
+++ b/roles/ironic/vars_test.go
@@ -0,0 +1,41 @@
+package ironic
+
+import (
+	_ "embed"
+	"os"
+	"testing"
+
+	"github.com/goccy/go-yaml"
+	"github.com/stretchr/testify/require"
+
+	"github.com/vexxhost/atmosphere/internal/openstack_helm"
+	"github.com/vexxhost/atmosphere/internal/testutils"
+)
+
+var (
+	//go:embed vars/main.yml
+	varsFile []byte
+	vars     Vars
+)
+
+type Vars struct {
+	openstack_helm.HelmValues `yaml:"_ironic_helm_values"`
+}
+
+func TestMain(m *testing.M) {
+	t := &testing.T{}
+	err := yaml.UnmarshalWithOptions(varsFile, &vars)
+	require.NoError(t, err)
+
+	code := m.Run()
+	os.Exit(code)
+}
+
+func TestHelmValues(t *testing.T) {
+	vals, err := openstack_helm.CoalescedHelmValues("../../charts/ironic", &vars.HelmValues)
+	require.NoError(t, err)
+
+	testutils.TestDatabaseConf(t, vals.Conf.Ironic.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
+}
diff --git a/roles/keystone/defaults/main.yml b/roles/keystone/defaults/main.yml
index f71c59f..67031c5 100644
--- a/roles/keystone/defaults/main.yml
+++ b/roles/keystone/defaults/main.yml
@@ -20,6 +20,9 @@
 keystone_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 keystone_helm_values: {}
 
+# Class name to use for the Ingress
+keystone_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 keystone_ingress_annotations: {}
 
@@ -34,6 +37,16 @@
 keystone_keycloak_admin_password: "{{ keycloak_admin_password }}"
 keystone_keycloak_realm: atmosphere
 keystone_keycloak_realm_name: Atmosphere
+# keystone_keycloak_realm_default_password_policy:
+# keystone_keycloak_realm_default_brute_force_protected:
+# keystone_keycloak_realm_default_brute_force_failure_factor:
+# keystone_keycloak_realm_default_brute_force_wait_increment_seconds:
+# keystone_keycloak_realm_default_brute_force_max_failure_wait_seconds:
+# keystone_keycloak_realm_default_brute_force_max_delta_time_seconds:
+# keystone_keycloak_realm_default_minimum_quick_login_wait_seconds:
+# keystone_keycloak_realm_default_quick_login_check_milli_seconds:
+# keystone_keycloak_realm_default_totp_default_action:
+
 keystone_keycloak_client_id: keystone
 # keystone_keycloak_client_secret:
 keystone_keycloak_scopes: "openid email profile"
diff --git a/roles/keystone/tasks/main.yml b/roles/keystone/tasks/main.yml
index a35a9ce..e5abcb0 100644
--- a/roles/keystone/tasks/main.yml
+++ b/roles/keystone/tasks/main.yml
@@ -29,6 +29,36 @@
     realm: "{{ item.keycloak_realm }}"
     display_name: "{{ item.label }}"
     enabled: true
+    password_policy: "{{ item.keycloak_password_policy | default(keystone_keycloak_realm_default_password_policy | default(omit)) }}"
+    brute_force_protected: "{{ item.keycloak_brute_force_protected | default(keystone_keycloak_realm_default_brute_force_protected | default(omit)) }}"
+    failure_factor: "{{ item.keycloak_brute_force_failure_factor | default(keystone_keycloak_realm_default_brute_force_failure_factor | default(omit)) }}"
+    wait_increment_seconds: "{{ item.keycloak_brute_force_wait_increment_seconds | default(keystone_keycloak_realm_default_brute_force_wait_increment_seconds | default(omit)) }}"
+    max_failure_wait_seconds: "{{ item.keycloak_brute_force_max_failure_wait_seconds | default(keystone_keycloak_realm_default_brute_force_max_failure_wait_seconds | default(omit)) }}"
+    max_delta_time_seconds: "{{ item.keycloak_brute_force_max_delta_time_seconds | default(keystone_keycloak_realm_default_brute_force_max_delta_time_seconds | default(omit)) }}"
+    minimum_quick_login_wait_seconds: "{{ item.keycloak_minimum_quick_login_wait_seconds | default(keystone_keycloak_realm_default_minimum_quick_login_wait_seconds | default(omit)) }}"
+    quick_login_check_milli_seconds: "{{ item.keycloak_quick_login_check_milli_seconds | default(keystone_keycloak_realm_default_quick_login_check_milli_seconds | default(omit)) }}"
+  loop: "{{ keystone_domains }}"
+  loop_control:
+    label: "{{ item.name }}"
+
+- name: Setup Keycloak Authentication Required Actions (MFA)
+  community.general.keycloak_authentication_required_actions:
+    # Keycloak settings
+    auth_keycloak_url: "{{ item.keycloak_server_url }}"
+    auth_realm: "{{ item.keycloak_user_realm_name }}"
+    auth_client_id: "{{ item.keycloak_admin_client_id }}"
+    auth_username: "{{ item.keycloak_admin_user }}"
+    auth_password: "{{ item.keycloak_admin_password }}"
+    validate_certs: "{{ cluster_issuer_type != 'self-signed' }}"
+    # Realm settings
+    realm: "{{ item.name }}"
+    required_actions:
+      - alias: "CONFIGURE_TOTP"
+        name: "Configure OTP"
+        providerId: "CONFIGURE_TOTP"
+        defaultAction: "{{ item.keycloak_totp_default_action | default(keystone_keycloak_realm_default_totp_default_action | default(omit)) }}"
+        enabled: true
+    state: present
   loop: "{{ keystone_domains }}"
   loop_control:
     label: "{{ item.name }}"
@@ -78,6 +108,7 @@
     openstack_helm_ingress_service_name: keystone-api
     openstack_helm_ingress_service_port: 5000
     openstack_helm_ingress_annotations: "{{ keystone_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ keystone_ingress_class_name }}"
 
 - name: Validate if ingress is reachable
   ansible.builtin.uri:
@@ -119,6 +150,7 @@
   vexxhost.atmosphere.federation_idp:
     name: "{{ item.domain.name }}"
     domain_id: "{{ item.domain.id }}"
+    is_enabled: true
     remote_ids:
       - "{{ item.item | vexxhost.atmosphere.issuer_from_domain }}"
   loop: "{{ keystone_domains_result.results }}"
diff --git a/roles/keystone/tests/priorityclass_test.yaml b/roles/keystone/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..8524f9b
--- /dev/null
+++ b/roles/keystone/tests/priorityclass_test.yaml
@@ -0,0 +1,141 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/cron-job-credential-rotate.yaml
+      - templates/cron-job-fernet-rotate.yaml
+      - templates/job-credential-cleanup.yaml
+      - templates/job-credential-setup.yaml
+      - templates/job-domain-manage.yaml
+      - templates/job-fernet-setup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/cron-job-credential-rotate.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/cron-job-fernet-rotate.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/job-credential-cleanup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-fernet-setup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-credential-setup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-domain-manage.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/job-credential-cleanup.yaml
+      - templates/job-credential-setup.yaml
+      - templates/job-domain-manage.yaml
+      - templates/job-fernet-setup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        priorityClassName:
+          keystone_api: platform
+          keystone_tests: platform
+          keystone_credential_rotate: platform
+          keystone_fernet_rotate: platform
+          keystone_credential_setup: platform
+          keystone_fernet_setup: platform
+          keystone_credential_cleanup: platform
+          keystone_domain_manage: platform
+          db_sync: platform
+          db_init: platform
+          bootstrap: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-credential-rotate.yaml
+        documentIndex: 5
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-fernet-rotate.yaml
+        documentIndex: 5
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-credential-setup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-fernet-setup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-credential-cleanup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-domain-manage.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/keystone/tests/runtimeclass_test.yaml b/roles/keystone/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..4478d86
--- /dev/null
+++ b/roles/keystone/tests/runtimeclass_test.yaml
@@ -0,0 +1,144 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/cron-job-credential-rotate.yaml
+      - templates/cron-job-fernet-rotate.yaml
+      - templates/job-credential-cleanup.yaml
+      - templates/job-credential-setup.yaml
+      - templates/job-domain-manage.yaml
+      - templates/job-fernet-setup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/cron-job-credential-rotate.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/cron-job-fernet-rotate.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/job-credential-cleanup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-fernet-setup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-credential-setup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-domain-manage.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/cron-job-credential-rotate.yaml
+      - templates/cron-job-fernet-rotate.yaml
+      - templates/job-credential-cleanup.yaml
+      - templates/job-credential-setup.yaml
+      - templates/job-domain-manage.yaml
+      - templates/job-fernet-setup.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-db-init.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        runtimeClassName:
+          keystone_api: kata-clh
+          keystone_tests: kata-clh
+          keystone_credential_rotate: kata-clh
+          keystone_fernet_rotate: kata-clh
+          keystone_credential_setup: kata-clh
+          keystone_fernet_setup: kata-clh
+          keystone_credential_cleanup: kata-clh
+          keystone_domain_manage: kata-clh
+          db_sync: kata-clh
+          db_init: kata-clh
+          bootstrap: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-credential-rotate.yaml
+        documentIndex: 5
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-fernet-rotate.yaml
+        documentIndex: 5
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-credential-setup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-fernet-setup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-credential-cleanup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-domain-manage.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-init.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/keystone/vars_test.go b/roles/keystone/vars_test.go
index c5bfe19..f79886a 100644
--- a/roles/keystone/vars_test.go
+++ b/roles/keystone/vars_test.go
@@ -29,4 +29,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Keystone.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/magnum/defaults/main.yml b/roles/magnum/defaults/main.yml
index 740fbb0..84355f7 100644
--- a/roles/magnum/defaults/main.yml
+++ b/roles/magnum/defaults/main.yml
@@ -20,6 +20,10 @@
 magnum_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 magnum_helm_values: {}
 
+# Class name to use for the Ingress
+magnum_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+magnum_registry_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 magnum_ingress_annotations: {}
 magnum_registry_ingress_annotations: {}
diff --git a/roles/magnum/tasks/main.yml b/roles/magnum/tasks/main.yml
index fc9b7a0..4805cbc 100644
--- a/roles/magnum/tasks/main.yml
+++ b/roles/magnum/tasks/main.yml
@@ -128,6 +128,7 @@
     openstack_helm_ingress_service_name: magnum-api
     openstack_helm_ingress_service_port: 9511
     openstack_helm_ingress_annotations: "{{ magnum_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ magnum_ingress_class_name }}"
 
 - name: Deploy magnum registry
   run_once: true
@@ -204,6 +205,7 @@
     openstack_helm_ingress_service_name: magnum-registry
     openstack_helm_ingress_service_port: 5000
     openstack_helm_ingress_annotations: "{{ _magnum_registry_ingress_annotations | combine(magnum_registry_ingress_annotations) }}"
+    openstack_helm_ingress_class_name: "{{ magnum_registry_ingress_class_name }}"
 
 - name: Upload images
   ansible.builtin.include_role:
diff --git a/roles/magnum/tests/priorityclass_test.yaml b/roles/magnum/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..2b69337
--- /dev/null
+++ b/roles/magnum/tests/priorityclass_test.yaml
@@ -0,0 +1,65 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        priorityClassName:
+          magnum_api: platform
+          magnum_conductor: platform
+          db_sync: platform
+          bootstrap: platform
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/magnum/tests/runtimeclass_test.yaml b/roles/magnum/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..b939eba
--- /dev/null
+++ b/roles/magnum/tests/runtimeclass_test.yaml
@@ -0,0 +1,65 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/statefulset-conductor.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        runtimeClassName:
+          magnum_api: kata-clh
+          magnum_conductor: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+      bootstrap:
+        enabled: true
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/statefulset-conductor.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/magnum/vars/main.yml b/roles/magnum/vars/main.yml
index 95582ac..5fff81e 100644
--- a/roles/magnum/vars/main.yml
+++ b/roles/magnum/vars/main.yml
@@ -27,6 +27,7 @@
         region_name: "{{ openstack_helm_endpoints_barbican_region_name }}"
       capi_client:
         ca_file: /etc/ssl/certs/ca-certificates.crt
+        endpoint_type: internalURL
       cinder_client:
         endpoint_type: internalURL
         region_name: "{{ openstack_helm_endpoints_cinder_region_name }}"
diff --git a/roles/magnum/vars_test.go b/roles/magnum/vars_test.go
index 21e8fd6..c727b6c 100644
--- a/roles/magnum/vars_test.go
+++ b/roles/magnum/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Magnum.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/manila/defaults/main.yml b/roles/manila/defaults/main.yml
index d0c66f0..e17830c 100644
--- a/roles/manila/defaults/main.yml
+++ b/roles/manila/defaults/main.yml
@@ -20,6 +20,9 @@
 manila_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 manila_helm_values: {}
 
+# Class name to use for the Ingress
+manila_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 manila_ingress_annotations: {}
 
diff --git a/roles/manila/tasks/generate_resources.yml b/roles/manila/tasks/generate_resources.yml
index 9bfa6c8..08c5278 100644
--- a/roles/manila/tasks/generate_resources.yml
+++ b/roles/manila/tasks/generate_resources.yml
@@ -43,7 +43,7 @@
 - name: Create generic share driver security group tcp rules
   openstack.cloud.security_group_rule:
     cloud: atmosphere
-    security_group: "{{ _manila_service_security_group.id }}"
+    security_group: "{{ _manila_service_security_group.security_group.id }}"
     direction: ingress
     ethertype: IPv4
     protocol: tcp
@@ -58,7 +58,7 @@
 - name: Create generic share driver security group icmp rules
   openstack.cloud.security_group_rule:
     cloud: atmosphere
-    security_group: "{{ _manila_service_security_group.id }}"
+    security_group: "{{ _manila_service_security_group.security_group.id }}"
     direction: ingress
     ethertype: IPv4
     protocol: icmp
diff --git a/roles/manila/tasks/main.yml b/roles/manila/tasks/main.yml
index 8ae97e8..540ce34 100644
--- a/roles/manila/tasks/main.yml
+++ b/roles/manila/tasks/main.yml
@@ -38,6 +38,7 @@
     openstack_helm_ingress_service_name: manila-api
     openstack_helm_ingress_service_port: 8786
     openstack_helm_ingress_annotations: "{{ manila_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ manila_ingress_class_name }}"
 
 - name: Update service tenant quotas
   openstack.cloud.quota:
diff --git a/roles/manila/tests/priorityclass_test.yaml b/roles/manila/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..9591737
--- /dev/null
+++ b/roles/manila/tests/priorityclass_test.yaml
@@ -0,0 +1,96 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-data.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-share.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-data.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-share.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-data.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-share.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        priorityClassName:
+          manila_api: platform
+          manila_data: platform
+          manila_scheduler: platform
+          manila_share: platform
+          manila_tests: platform
+          db_sync: platform
+          bootstrap: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-data.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-share.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 1
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/manila/tests/runtimeclass_test.yaml b/roles/manila/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..1a80ac3
--- /dev/null
+++ b/roles/manila/tests/runtimeclass_test.yaml
@@ -0,0 +1,96 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-data.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-share.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-data.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-share.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-data.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-share.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        runtimeClassName:
+          manila_api: kata-clh
+          manila_data: kata-clh
+          manila_scheduler: kata-clh
+          manila_tests: kata-clh
+          manila_share: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-data.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-share.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 1
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/manila/vars/main.yml b/roles/manila/vars/main.yml
index ce2175d..3c86604 100644
--- a/roles/manila/vars/main.yml
+++ b/roles/manila/vars/main.yml
@@ -58,7 +58,7 @@
         path_to_private_key: /etc/manila/ssh-keys/id_rsa
         path_to_public_key: /etc/manila/ssh-keys/id_rsa.pub
         service_image_name: "{{ manila_image_name }}"
-        service_instance_flavor_id: "{{ _manila_flavor.id }}"
+        service_instance_flavor_id: "{{ _manila_flavor.flavor.id }}"
         service_instance_security_group: manila-service-security-group
       oslo_messaging_notifications:
         driver: noop
diff --git a/roles/manila/vars_test.go b/roles/manila/vars_test.go
index 627920b..14f578f 100644
--- a/roles/manila/vars_test.go
+++ b/roles/manila/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Manila.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/memcached/vars_test.go b/roles/memcached/vars_test.go
new file mode 100644
index 0000000..81062ef
--- /dev/null
+++ b/roles/memcached/vars_test.go
@@ -0,0 +1,40 @@
+package memcached
+
+import (
+	_ "embed"
+	"os"
+	"testing"
+
+	"github.com/goccy/go-yaml"
+	"github.com/stretchr/testify/require"
+
+	"github.com/vexxhost/atmosphere/internal/openstack_helm"
+	"github.com/vexxhost/atmosphere/internal/testutils"
+)
+
+var (
+	//go:embed vars/main.yml
+	varsFile []byte
+	vars     Vars
+)
+
+type Vars struct {
+	openstack_helm.HelmValues `yaml:"_memcached_helm_values"`
+}
+
+func TestMain(m *testing.M) {
+	t := &testing.T{}
+	err := yaml.UnmarshalWithOptions(varsFile, &vars)
+	require.NoError(t, err)
+
+	code := m.Run()
+	os.Exit(code)
+}
+
+func TestHelmValues(t *testing.T) {
+	vals, err := openstack_helm.CoalescedHelmValues("../../charts/memcached", &vars.HelmValues)
+	require.NoError(t, err)
+
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
+}
diff --git a/roles/neutron/defaults/main.yml b/roles/neutron/defaults/main.yml
index 04d48ac..b8579eb 100644
--- a/roles/neutron/defaults/main.yml
+++ b/roles/neutron/defaults/main.yml
@@ -23,6 +23,9 @@
 # List of networks to provision inside OpenStack
 neutron_networks: []
 
+# Class name to use for the Ingress
+neutron_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 neutron_ingress_annotations: {}
 
diff --git a/roles/neutron/tasks/main.yml b/roles/neutron/tasks/main.yml
index 874ed3c..47ea874 100644
--- a/roles/neutron/tasks/main.yml
+++ b/roles/neutron/tasks/main.yml
@@ -49,6 +49,7 @@
     openstack_helm_ingress_service_name: neutron-server
     openstack_helm_ingress_service_port: 9696
     openstack_helm_ingress_annotations: "{{ neutron_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ neutron_ingress_class_name }}"
 
 - name: Create networks
   when: neutron_networks | length > 0
@@ -87,7 +88,7 @@
       until: _result is not failed
 
     - name: Create subnets
-      openstack.cloud.subnet:
+      vexxhost.atmosphere.subnet:
         cloud: atmosphere
         # Subnet settings
         network_name: "{{ item.0.name }}"
diff --git a/roles/neutron/tests/priorityclass_test.yaml b/roles/neutron/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..27e52b3
--- /dev/null
+++ b/roles/neutron/tests/priorityclass_test.yaml
@@ -0,0 +1,257 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/daemonset-bagpipe-bgp.yaml
+      - templates/daemonset-bgp-dragent.yaml
+      - templates/daemonset-dhcp-agent.yaml
+      - templates/daemonset-l2gw-agent.yaml
+      - templates/daemonset-l3-agent.yaml
+      - templates/daemonset-lb-agent.yaml
+      - templates/daemonset-metadata-agent.yaml
+      - templates/daemonset-netns-cleanup-cron.yaml
+      - templates/daemonset-neutron-ovn-vpn-agent.yaml
+      - templates/daemonset-ovn-metadata-agent.yaml
+      - templates/daemonset-ovs-agent.yaml
+      - templates/daemonset-sriov-agent.yaml
+      - templates/deployment-ironic-agent.yaml
+      - templates/deployment-rpc_server.yaml
+      - templates/deployment-server.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+      network:
+        backend:
+          - openvswitch
+          - sriov
+          - linuxbridge
+      manifests:
+        daemonset_l2gw_agent: true
+        daemonset_bagpipe_bgp: true
+        daemonset_bgp_dragent: true
+        daemonset_ovn_metadata_agent: true
+        daemonset_ovn_vpn_agent: true
+        deployment_ironic_agent: true
+    asserts:
+      - template: templates/daemonset-bagpipe-bgp.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-bgp-dragent.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-dhcp-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-l2gw-agent.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-l3-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-lb-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-metadata-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-netns-cleanup-cron.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-neutron-ovn-vpn-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-ovn-metadata-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-ovs-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/daemonset-sriov-agent.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-ironic-agent.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-rpc_server.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-server.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/daemonset-bagpipe-bgp.yaml
+      - templates/daemonset-bgp-dragent.yaml
+      - templates/daemonset-dhcp-agent.yaml
+      - templates/daemonset-l2gw-agent.yaml
+      - templates/daemonset-l3-agent.yaml
+      - templates/daemonset-lb-agent.yaml
+      - templates/daemonset-metadata-agent.yaml
+      - templates/daemonset-netns-cleanup-cron.yaml
+      - templates/daemonset-neutron-ovn-vpn-agent.yaml
+      - templates/daemonset-ovn-metadata-agent.yaml
+      - templates/daemonset-ovs-agent.yaml
+      - templates/daemonset-sriov-agent.yaml
+      - templates/deployment-ironic-agent.yaml
+      - templates/deployment-rpc_server.yaml
+      - templates/deployment-server.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+      network:
+        backend:
+          - openvswitch
+          - sriov
+          - linuxbridge
+      pod:
+        priorityClassName:
+          bagpipe_bgp: platform
+          bgp_dragent: platform
+          neutron_dhcp_agent: platform
+          neutron_l2gw_agent: platform
+          neutron_l3_agent: platform
+          neutron_lb_agent: platform
+          neutron_metadata_agent: platform
+          neutron_netns_cleanup_cron: platform
+          ovn_vpn_agent: platform
+          neutron_ovn_metadata_agent: platform
+          neutron_ovs_agent: platform
+          neutron_sriov_agent: platform
+          neutron_ironic_agent: platform
+          neutron_rpc_server: platform
+          neutron_server: platform
+          neutron_tests: platform
+          db_sync: platform
+          bootstrap: platform
+      manifests:
+        daemonset_l2gw_agent: true
+        daemonset_bagpipe_bgp: true
+        daemonset_bgp_dragent: true
+        daemonset_ovn_metadata_agent: true
+        daemonset_ovn_vpn_agent: true
+        deployment_ironic_agent: true
+    asserts:
+      - template: templates/daemonset-bagpipe-bgp.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-bgp-dragent.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-dhcp-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-l2gw-agent.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-l3-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-lb-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-metadata-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-netns-cleanup-cron.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-neutron-ovn-vpn-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-ovn-metadata-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-ovs-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/daemonset-sriov-agent.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-ironic-agent.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-rpc_server.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-server.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/neutron/tests/runtimeclass_test.yaml b/roles/neutron/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..c4efae7
--- /dev/null
+++ b/roles/neutron/tests/runtimeclass_test.yaml
@@ -0,0 +1,257 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/daemonset-bagpipe-bgp.yaml
+      - templates/daemonset-bgp-dragent.yaml
+      - templates/daemonset-dhcp-agent.yaml
+      - templates/daemonset-l2gw-agent.yaml
+      - templates/daemonset-l3-agent.yaml
+      - templates/daemonset-lb-agent.yaml
+      - templates/daemonset-metadata-agent.yaml
+      - templates/daemonset-netns-cleanup-cron.yaml
+      - templates/daemonset-neutron-ovn-vpn-agent.yaml
+      - templates/daemonset-ovn-metadata-agent.yaml
+      - templates/daemonset-ovs-agent.yaml
+      - templates/daemonset-sriov-agent.yaml
+      - templates/deployment-ironic-agent.yaml
+      - templates/deployment-rpc_server.yaml
+      - templates/deployment-server.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+      network:
+        backend:
+          - openvswitch
+          - sriov
+          - linuxbridge
+      manifests:
+        daemonset_l2gw_agent: true
+        daemonset_bagpipe_bgp: true
+        daemonset_bgp_dragent: true
+        daemonset_ovn_metadata_agent: true
+        daemonset_ovn_vpn_agent: true
+        deployment_ironic_agent: true
+    asserts:
+      - template: templates/daemonset-bagpipe-bgp.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-bgp-dragent.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-dhcp-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-l2gw-agent.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-l3-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-lb-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-metadata-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-netns-cleanup-cron.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-neutron-ovn-vpn-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-ovn-metadata-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-ovs-agent.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/daemonset-sriov-agent.yaml
+        documentIndex: 2
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-ironic-agent.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-rpc_server.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-server.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/daemonset-bagpipe-bgp.yaml
+      - templates/daemonset-bgp-dragent.yaml
+      - templates/daemonset-dhcp-agent.yaml
+      - templates/daemonset-l2gw-agent.yaml
+      - templates/daemonset-l3-agent.yaml
+      - templates/daemonset-lb-agent.yaml
+      - templates/daemonset-metadata-agent.yaml
+      - templates/daemonset-netns-cleanup-cron.yaml
+      - templates/daemonset-neutron-ovn-vpn-agent.yaml
+      - templates/daemonset-ovn-metadata-agent.yaml
+      - templates/daemonset-ovs-agent.yaml
+      - templates/daemonset-sriov-agent.yaml
+      - templates/deployment-ironic-agent.yaml
+      - templates/deployment-rpc_server.yaml
+      - templates/deployment-server.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      bootstrap:
+        enabled: true
+      network:
+        backend:
+          - openvswitch
+          - sriov
+          - linuxbridge
+      pod:
+        runtimeClassName:
+          bagpipe_bgp: kata-clh
+          bgp_dragent: kata-clh
+          neutron_dhcp_agent: kata-clh
+          neutron_l2gw_agent: kata-clh
+          neutron_l3_agent: kata-clh
+          neutron_lb_agent: kata-clh
+          neutron_metadata_agent: kata-clh
+          neutron_netns_cleanup_cron: kata-clh
+          ovn_vpn_agent: kata-clh
+          neutron_ovn_metadata_agent: kata-clh
+          neutron_ovs_agent: kata-clh
+          neutron_sriov_agent: kata-clh
+          neutron_ironic_agent: kata-clh
+          neutron_rpc_server: kata-clh
+          neutron_server: kata-clh
+          neutron_tests: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+      manifests:
+        daemonset_l2gw_agent: true
+        daemonset_bagpipe_bgp: true
+        daemonset_bgp_dragent: true
+        daemonset_ovn_metadata_agent: true
+        daemonset_ovn_vpn_agent: true
+        deployment_ironic_agent: true
+    asserts:
+      - template: templates/daemonset-bagpipe-bgp.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-bgp-dragent.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-dhcp-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-l2gw-agent.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-l3-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-lb-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-metadata-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-netns-cleanup-cron.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-neutron-ovn-vpn-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-ovn-metadata-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-ovs-agent.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/daemonset-sriov-agent.yaml
+        documentIndex: 2
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-ironic-agent.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-rpc_server.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-server.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/neutron/vars_test.go b/roles/neutron/vars_test.go
index 8358366..1ce11e2 100644
--- a/roles/neutron/vars_test.go
+++ b/roles/neutron/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Neutron.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/nova/defaults/main.yml b/roles/nova/defaults/main.yml
index e02faac..a951a5f 100644
--- a/roles/nova/defaults/main.yml
+++ b/roles/nova/defaults/main.yml
@@ -26,6 +26,10 @@
 # List of flavors to provision inside Nova
 nova_flavors: []
 
+# Class name to use for the Ingress
+nova_api_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+nova_novnc_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 nova_api_ingress_annotations: {}
 nova_novnc_ingress_annotations: {}
diff --git a/roles/nova/tasks/main.yml b/roles/nova/tasks/main.yml
index ed10aa5..a86aa0c 100644
--- a/roles/nova/tasks/main.yml
+++ b/roles/nova/tasks/main.yml
@@ -49,6 +49,7 @@
     openstack_helm_ingress_service_name: nova-api
     openstack_helm_ingress_service_port: 8774
     openstack_helm_ingress_annotations: "{{ nova_api_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ nova_api_ingress_class_name }}"
 
 - name: Create Ingress
   ansible.builtin.include_role:
@@ -58,6 +59,7 @@
     openstack_helm_ingress_service_name: nova-novncproxy
     openstack_helm_ingress_service_port: 6080
     openstack_helm_ingress_annotations: "{{ _nova_novnc_ingress_annotations | combine(nova_novnc_ingress_annotations) }}"
+    openstack_helm_ingress_class_name: "{{ nova_novnc_ingress_class_name }}"
 
 - name: Create flavors
   when: nova_flavors | length > 0
diff --git a/roles/nova/tests/priorityclass_test.yaml b/roles/nova/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..cf23b0a
--- /dev/null
+++ b/roles/nova/tests/priorityclass_test.yaml
@@ -0,0 +1,178 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/daemonset-compute.yaml
+      - templates/deployment-api-metadata.yaml
+      - templates/deployment-api-osapi.yaml
+      - templates/deployment-conductor.yaml
+      - templates/deployment-novncproxy.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-spiceproxy.yaml
+      - templates/cron-job-archive-deleted-rows.yaml
+      - templates/cron-job-cell-setup.yaml
+      - templates/cron-job-service-cleaner.yaml
+      - templates/statefulset-compute-ironic.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      manifests:
+        cron_job_archive_deleted_rows: true
+        statefulset_compute_ironic: true
+    asserts:
+      - template: templates/daemonset-compute.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-api-metadata.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-api-osapi.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-novncproxy.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/statefulset-compute-ironic.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/cron-job-archive-deleted-rows.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/cron-job-cell-setup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/cron-job-service-cleaner.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 5
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/daemonset-compute.yaml
+      - templates/deployment-api-metadata.yaml
+      - templates/deployment-api-osapi.yaml
+      - templates/deployment-conductor.yaml
+      - templates/deployment-novncproxy.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-spiceproxy.yaml
+      - templates/cron-job-archive-deleted-rows.yaml
+      - templates/cron-job-cell-setup.yaml
+      - templates/cron-job-service-cleaner.yaml
+      - templates/statefulset-compute-ironic.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      manifests:
+        cron_job_archive_deleted_rows: true
+        statefulset_compute_ironic: true
+      pod:
+        priorityClassName:
+          nova_compute: platform
+          nova_api_metadata: platform
+          nova_api_osapi: platform
+          nova_conductor: platform
+          nova_novncproxy: platform
+          nova_scheduler: platform
+          nova_spiceproxy: platform
+          nova_archive_deleted_rows: platform
+          nova_cell_setup: platform
+          nova_service_cleaner: platform
+          nova_compute_ironic: platform
+          nova_tests: platform
+          db_sync: platform
+          bootstrap: platform
+    asserts:
+      - template: templates/daemonset-compute.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-api-metadata.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-api-osapi.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-novncproxy.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-archive-deleted-rows.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-cell-setup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/cron-job-service-cleaner.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/statefulset-compute-ironic.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/nova/tests/runtimeclass_test.yaml b/roles/nova/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..3561a38
--- /dev/null
+++ b/roles/nova/tests/runtimeclass_test.yaml
@@ -0,0 +1,182 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/daemonset-compute.yaml
+      - templates/deployment-api-metadata.yaml
+      - templates/deployment-api-osapi.yaml
+      - templates/deployment-conductor.yaml
+      - templates/deployment-novncproxy.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-spiceproxy.yaml
+      - templates/cron-job-archive-deleted-rows.yaml
+      - templates/cron-job-cell-setup.yaml
+      - templates/cron-job-service-cleaner.yaml
+      - templates/statefulset-compute-ironic.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      console:
+        console_kind: spice
+      manifests:
+        cron_job_archive_deleted_rows: true
+        statefulset_compute_ironic: true
+    asserts:
+      - template: templates/daemonset-compute.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-api-metadata.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-api-osapi.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-spiceproxy.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/statefulset-compute-ironic.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/cron-job-archive-deleted-rows.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/cron-job-cell-setup.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/cron-job-service-cleaner.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/daemonset-compute.yaml
+      - templates/deployment-api-metadata.yaml
+      - templates/deployment-api-osapi.yaml
+      - templates/deployment-conductor.yaml
+      - templates/deployment-novncproxy.yaml
+      - templates/deployment-scheduler.yaml
+      - templates/deployment-spiceproxy.yaml
+      - templates/cron-job-archive-deleted-rows.yaml
+      - templates/cron-job-cell-setup.yaml
+      - templates/cron-job-service-cleaner.yaml
+      - templates/statefulset-compute-ironic.yaml
+      - templates/pod-rally-test.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      console:
+        console_kind: spice
+      manifests:
+        cron_job_archive_deleted_rows: true
+        statefulset_compute_ironic: true
+      pod:
+        runtimeClassName:
+          nova_compute: kata-clh
+          nova_api_metadata: kata-clh
+          nova_api_osapi: kata-clh
+          nova_conductor: kata-clh
+          nova_novncproxy: kata-clh
+          nova_scheduler: kata-clh
+          nova_spiceproxy: kata-clh
+          nova_archive_deleted_rows: kata-clh
+          nova_cell_setup: kata-clh
+          nova_service_cleaner: kata-clh
+          nova_compute_ironic: kata-clh
+          nova_tests: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+    asserts:
+      - template: templates/daemonset-compute.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-api-metadata.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-api-osapi.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-scheduler.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-spiceproxy.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-archive-deleted-rows.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-cell-setup.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/cron-job-service-cleaner.yaml
+        documentIndex: 3
+        equal:
+          path: spec.jobTemplate.spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/statefulset-compute-ironic.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/pod-rally-test.yaml
+        documentIndex: 3
+        equal:
+          path: spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/nova/vars/main.yml b/roles/nova/vars/main.yml
index bd72f52..10c67a6 100644
--- a/roles/nova/vars/main.yml
+++ b/roles/nova/vars/main.yml
@@ -62,10 +62,7 @@
       cache:
         backend: oslo_cache.memcache_pool
       cinder:
-        catalog_info: volumev3::internalURL
-        os_region_name: "{{ openstack_helm_endpoints_nova_region_name }}"
-        username: "nova-{{ openstack_helm_endpoints_nova_region_name }}"
-        password: "{{ openstack_helm_endpoints_nova_keystone_password }}"
+        auth_type: password
       conductor:
         workers: 8
       compute:
@@ -111,6 +108,8 @@
         driver: noop
       os_vif_ovs:
         ovsdb_connection: unix:/run/openvswitch/db.sock
+      privsep_osbrick:
+        helper_command: sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file /etc/nova/nova.conf
       scheduler:
         max_attempts: 3
         workers: 8
diff --git a/roles/nova/vars_test.go b/roles/nova/vars_test.go
index 716b946..93046f8 100644
--- a/roles/nova/vars_test.go
+++ b/roles/nova/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Nova.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/octavia/defaults/main.yml b/roles/octavia/defaults/main.yml
index 19b31f8..3f6427b 100644
--- a/roles/octavia/defaults/main.yml
+++ b/roles/octavia/defaults/main.yml
@@ -20,6 +20,9 @@
 octavia_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 octavia_helm_values: {}
 
+# Class name to use for the Ingress
+octavia_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 octavia_ingress_annotations: {}
 
diff --git a/roles/octavia/tasks/generate_resources.yml b/roles/octavia/tasks/generate_resources.yml
index 5960aa1..e582be4 100644
--- a/roles/octavia/tasks/generate_resources.yml
+++ b/roles/octavia/tasks/generate_resources.yml
@@ -20,13 +20,13 @@
   register: _octavia_management_network
 
 - name: Create management subnet
-  openstack.cloud.subnet:
+  vexxhost.atmosphere.subnet:
     cloud: atmosphere
     # Subnet settings
     network_name: "{{ octavia_management_network_name }}"
     name: "{{ octavia_management_subnet_name }}"
     cidr: "{{ octavia_management_subnet_cidr }}"
-    no_gateway_ip: true
+    disable_gateway_ip: true
 
 - name: Create health manager security group
   openstack.cloud.security_group:
@@ -37,7 +37,7 @@
 - name: Create health manager security group rules
   openstack.cloud.security_group_rule:
     cloud: atmosphere
-    security_group: "{{ _octavia_health_manager_sg.id }}"
+    security_group: "{{ _octavia_health_manager_sg.security_group.id }}"
     direction: ingress
     ethertype: IPv4
     protocol: "{{ item.protocol }}"
@@ -67,7 +67,7 @@
         if hostvars[item]['octavia_health_manager_ip'] is defined else omit
       }}
     security_groups:
-      - "{{ _octavia_health_manager_sg.id }}"
+      - "{{ _octavia_health_manager_sg.security_group.id }}"
   loop: "{{ groups['controllers'] }}"
   loop_control:
     index_var: _octavia_health_manager_port_index
@@ -110,10 +110,10 @@
 
 - name: Set controller_ip_port_list
   ansible.builtin.set_fact:
-    _octavia_controller_ip_port_list: "{{ (_octavia_controller_ip_port_list | d([]) + [item.openstack_ports[0].fixed_ips[0].ip_address + ':5555']) | unique }}"
+    _octavia_controller_ip_port_list: "{{ (_octavia_controller_ip_port_list | d([]) + [item.ports[0].fixed_ips[0].ip_address + ':5555']) | unique }}"
   loop: "{{ _octavia_health_manager_ports.results }}"
   loop_control:
-    label: "{{ item.openstack_ports[0].name }}"
+    label: "{{ item.ports[0].name }}"
 
 - name: Create amphora security group
   openstack.cloud.security_group:
@@ -124,13 +124,13 @@
 - name: Create amphora security group rules
   openstack.cloud.security_group_rule:
     cloud: atmosphere
-    security_group: "{{ _octavia_amphora_sg.id }}"
+    security_group: "{{ _octavia_amphora_sg.security_group.id }}"
     direction: ingress
     ethertype: IPv4
     protocol: tcp
     port_range_min: "{{ item.0 }}"
     port_range_max: "{{ item.0 }}"
-    remote_ip_prefix: "{{ item.1.openstack_ports[0].fixed_ips[0].ip_address }}/32"
+    remote_ip_prefix: "{{ item.1.ports[0].fixed_ips[0].ip_address }}/32"
   with_nested:
     - [22, 9443]
     - "{{ _octavia_health_manager_ports.results }}"
diff --git a/roles/octavia/tasks/main.yml b/roles/octavia/tasks/main.yml
index b2c94fd..4432856 100644
--- a/roles/octavia/tasks/main.yml
+++ b/roles/octavia/tasks/main.yml
@@ -138,3 +138,4 @@
     openstack_helm_ingress_service_name: octavia-api
     openstack_helm_ingress_service_port: 9876
     openstack_helm_ingress_annotations: "{{ octavia_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ octavia_ingress_class_name }}"
diff --git a/roles/octavia/tests/priorityclass_test.yaml b/roles/octavia/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..1732a9a
--- /dev/null
+++ b/roles/octavia/tests/priorityclass_test.yaml
@@ -0,0 +1,84 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/daemonset-health-manager.yaml
+      - templates/deployment-api.yaml
+      - templates/deployment-housekeeping.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/daemonset-health-manager.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-housekeeping.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/daemonset-health-manager.yaml
+      - templates/deployment-api.yaml
+      - templates/deployment-housekeeping.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        priorityClassName:
+          octavia_health_manager: platform
+          octavia_api: platform
+          octavia_housekeeping: platform
+          octavia_worker: platform
+          db_sync: platform
+          bootstrap: platform
+    asserts:
+      - template: templates/daemonset-health-manager.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-housekeeping.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/octavia/tests/runtimeclass_test.yaml b/roles/octavia/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..53f10fc
--- /dev/null
+++ b/roles/octavia/tests/runtimeclass_test.yaml
@@ -0,0 +1,84 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/daemonset-health-manager.yaml
+      - templates/deployment-api.yaml
+      - templates/deployment-housekeeping.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    asserts:
+      - template: templates/daemonset-health-manager.yaml
+        documentIndex: 4
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-housekeeping.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/daemonset-health-manager.yaml
+      - templates/deployment-api.yaml
+      - templates/deployment-housekeeping.yaml
+      - templates/deployment-worker.yaml
+      - templates/job-db-sync.yaml
+      - templates/job-bootstrap.yaml
+    set:
+      pod:
+        runtimeClassName:
+          octavia_health_manager: kata-clh
+          octavia_api: kata-clh
+          octavia_housekeeping: kata-clh
+          octavia_worker: kata-clh
+          db_sync: kata-clh
+          bootstrap: kata-clh
+    asserts:
+      - template: templates/daemonset-health-manager.yaml
+        documentIndex: 4
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-housekeeping.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-worker.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-bootstrap.yaml
+        documentIndex: 1
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/octavia/vars/main.yml b/roles/octavia/vars/main.yml
index 8423aba..1f78f70 100644
--- a/roles/octavia/vars/main.yml
+++ b/roles/octavia/vars/main.yml
@@ -98,10 +98,10 @@
         endpoint_type: internalURL
       controller_worker:
         amp_boot_network_list: "{{ _octavia_management_network.id }}"
-        amp_flavor_id: "{{ _octavia_amphora_flavor.id }}"
-        amp_image_owner_id: "{{ _octavia_amphora_image.openstack_image.owner }}"
-        amp_secgroup_list: "{{ _octavia_amphora_sg.id }}"
-        amp_ssh_key_name: "{{ octavia_amphora_ssh_keypair.key.name }}"
+        amp_flavor_id: "{{ _octavia_amphora_flavor.flavor.id }}"
+        amp_image_owner_id: "{{ _octavia_amphora_image.images.0.owner }}"
+        amp_secgroup_list: "{{ _octavia_amphora_sg.security_group.id }}"
+        amp_ssh_key_name: "{{ octavia_amphora_ssh_keypair.keypair.name }}"
         client_ca: /etc/octavia/certs/client/ca.crt
         volume_driver: volume_cinder_driver
         workers: 4
diff --git a/roles/octavia/vars_test.go b/roles/octavia/vars_test.go
index 8c99a14..4e7068e 100644
--- a/roles/octavia/vars_test.go
+++ b/roles/octavia/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Octavia.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/openstack_helm_ingress/defaults/main.yml b/roles/openstack_helm_ingress/defaults/main.yml
index f3c0133..3e530ab 100644
--- a/roles/openstack_helm_ingress/defaults/main.yml
+++ b/roles/openstack_helm_ingress/defaults/main.yml
@@ -21,3 +21,6 @@
 # this is useful when you want to use a single certificate for all services and
 # use DNS-01 challenge to issue the certificate.
 # openstack_helm_ingress_wildcard_domain: cloud.atmosphere.dev
+
+# Ingress class to use for the Ingress
+openstack_helm_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
diff --git a/roles/openstack_helm_ingress/tasks/main.yml b/roles/openstack_helm_ingress/tasks/main.yml
index bf1d770..ee9e0f7 100644
--- a/roles/openstack_helm_ingress/tasks/main.yml
+++ b/roles/openstack_helm_ingress/tasks/main.yml
@@ -56,3 +56,4 @@
     ingress_service_name: "{{ openstack_helm_ingress_service_name }}"
     ingress_service_port: "{{ openstack_helm_ingress_service_port }}"
     ingress_secret_name: "{{ openstack_helm_ingress_secret_name | default(openstack_helm_ingress_service_name ~ '-certs') }}"
+    ingress_class_name: "{{ openstack_helm_ingress_class_name }}"
diff --git a/roles/openstacksdk/defaults/main.yml b/roles/openstacksdk/defaults/main.yml
index 27dc31a..96db768 100644
--- a/roles/openstacksdk/defaults/main.yml
+++ b/roles/openstacksdk/defaults/main.yml
@@ -12,4 +12,4 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-openstacksdk_version: "0.61.0"
+# openstacksdk_version:
diff --git a/roles/openstacksdk/tasks/main.yml b/roles/openstacksdk/tasks/main.yml
index 1a4c3b4..5f082a1 100644
--- a/roles/openstacksdk/tasks/main.yml
+++ b/roles/openstacksdk/tasks/main.yml
@@ -15,7 +15,7 @@
 - name: Install openstacksdk
   ansible.builtin.pip:
     name: openstacksdk
-    version: "{{ openstacksdk_version }}"
+    version: "{{ openstacksdk_version | default(omit) }}"
 
 - name: Create openstack config directory
   become: true
diff --git a/roles/ovn/vars/main.yml b/roles/ovn/vars/main.yml
index 994894f..a24b7e9 100644
--- a/roles/ovn/vars/main.yml
+++ b/roles/ovn/vars/main.yml
@@ -34,6 +34,23 @@
     ovn_ovsdb_sb:
       size: 20Gi
   pod:
+    probes:
+      ovn_northd:
+        northd:
+          liveness:
+            enabled: true
+            params:
+              initialDelaySeconds: 30
+              timeoutSeconds: 30
+              periodSeconds: 60
+    affinity:
+      anti:
+        type:
+          default: requiredDuringSchedulingIgnoredDuringExecution
+        topologyKey:
+          default: kubernetes.io/hostname
+        weight:
+          default: 10
     replicas:
       ovn_ovsdb_nb: 3
       ovn_ovsdb_sb: 3
diff --git a/roles/ovn/vars_test.go b/roles/ovn/vars_test.go
new file mode 100644
index 0000000..ee4d95b
--- /dev/null
+++ b/roles/ovn/vars_test.go
@@ -0,0 +1,39 @@
+package ovn
+
+import (
+	_ "embed"
+	"os"
+	"testing"
+
+	"github.com/goccy/go-yaml"
+	"github.com/stretchr/testify/require"
+
+	"github.com/vexxhost/atmosphere/internal/openstack_helm"
+	"github.com/vexxhost/atmosphere/internal/testutils"
+)
+
+var (
+	//go:embed vars/main.yml
+	varsFile []byte
+	vars     Vars
+)
+
+type Vars struct {
+	openstack_helm.HelmValues `yaml:"_ovn_helm_values"`
+}
+
+func TestMain(m *testing.M) {
+	t := &testing.T{}
+	err := yaml.UnmarshalWithOptions(varsFile, &vars)
+	require.NoError(t, err)
+
+	code := m.Run()
+	os.Exit(code)
+}
+
+func TestHelmValues(t *testing.T) {
+	vals, err := openstack_helm.CoalescedHelmValues("../../charts/ovn", &vars.HelmValues)
+	require.NoError(t, err)
+
+	testutils.TestAllPodsHaveAntiAffinityType(t, vals)
+}
diff --git a/roles/placement/defaults/main.yml b/roles/placement/defaults/main.yml
index bc72b55..e29baa5 100644
--- a/roles/placement/defaults/main.yml
+++ b/roles/placement/defaults/main.yml
@@ -20,5 +20,8 @@
 placement_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 placement_helm_values: {}
 
+# Class name to use for the Ingress
+placement_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 placement_ingress_annotations: {}
diff --git a/roles/placement/tasks/main.yml b/roles/placement/tasks/main.yml
index cebee35..cb33025 100644
--- a/roles/placement/tasks/main.yml
+++ b/roles/placement/tasks/main.yml
@@ -30,3 +30,4 @@
     openstack_helm_ingress_service_name: placement-api
     openstack_helm_ingress_service_port: 8778
     openstack_helm_ingress_annotations: "{{ placement_ingress_annotations }}"
+    openstack_helm_ingress_class_name: "{{ placement_ingress_class_name }}"
diff --git a/roles/placement/tests/priorityclass_test.yaml b/roles/placement/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..e7e0900
--- /dev/null
+++ b/roles/placement/tests/priorityclass_test.yaml
@@ -0,0 +1,36 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        priorityClassName:
+          placement: platform
+          db_sync: platform
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/placement/tests/runtimeclass_test.yaml b/roles/placement/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..79bf19d
--- /dev/null
+++ b/roles/placement/tests/runtimeclass_test.yaml
@@ -0,0 +1,36 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        runtimeClassName:
+          placement: kata-clh
+          db_sync: kata-clh
+    asserts:
+      - template: templates/deployment.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/placement/vars_test.go b/roles/placement/vars_test.go
index fa3fac3..f0cb72f 100644
--- a/roles/placement/vars_test.go
+++ b/roles/placement/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Placement.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/rook_ceph_cluster/defaults/main.yml b/roles/rook_ceph_cluster/defaults/main.yml
index 4c2545b..d5b1c2d 100644
--- a/roles/rook_ceph_cluster/defaults/main.yml
+++ b/roles/rook_ceph_cluster/defaults/main.yml
@@ -22,6 +22,9 @@
 rook_ceph_cluster_helm_kubeconfig: "{{ kubeconfig_path | default('/etc/kubernetes/admin.conf') }}"
 rook_ceph_cluster_helm_values: {}
 
+# Class name to use for the Ingress
+rook_ceph_cluster_ingress_class_name: "{{ atmosphere_ingress_class_name }}"
+
 # List of annotations to apply to the Ingress
 rook_ceph_cluster_radosgw_annotations: {}
 
diff --git a/roles/rook_ceph_cluster/tasks/main.yml b/roles/rook_ceph_cluster/tasks/main.yml
index 92f3842..a957efb 100644
--- a/roles/rook_ceph_cluster/tasks/main.yml
+++ b/roles/rook_ceph_cluster/tasks/main.yml
@@ -96,13 +96,21 @@
     password: "{{ openstack_helm_endpoints.identity.auth.rgw.password }}"
     domain: service
 
+# NOTE(mnaser): https://storyboard.openstack.org/#!/story/2010579
 - name: Grant access to "service" project
-  openstack.cloud.role_assignment:
-    cloud: atmosphere
-    domain: service
-    user: "{{ openstack_helm_endpoints.identity.auth.rgw.username }}"
-    project: service
-    role: admin
+  changed_when: false
+  ansible.builtin.shell: |
+    set -o posix
+    source /etc/profile.d/atmosphere.sh
+    openstack role add \
+      --user-domain service \
+      --project service \
+      --user {{ openstack_helm_endpoints.identity.auth.rgw.username }} \
+      admin
+  args:
+    executable: /bin/bash
+  environment:
+    OS_CLOUD: atmosphere
 
 - name: Create OpenStack service
   openstack.cloud.catalog_service:
@@ -132,3 +140,4 @@
     openstack_helm_ingress_service_name: rook-ceph-rgw-{{ rook_ceph_cluster_name }}
     openstack_helm_ingress_service_port: 80
     openstack_helm_ingress_annotations: "{{ _rook_ceph_cluster_radosgw_annotations | combine(rook_ceph_cluster_radosgw_annotations, recursive=True) }}"
+    openstack_helm_ingress_class_name: "{{ rook_ceph_cluster_ingress_class_name }}"
diff --git a/roles/staffeln/tests/priorityclass_test.yaml b/roles/staffeln/tests/priorityclass_test.yaml
new file mode 100644
index 0000000..943c8ac
--- /dev/null
+++ b/roles/staffeln/tests/priorityclass_test.yaml
@@ -0,0 +1,48 @@
+suite: priorityclass
+tests:
+  - it: should support not having a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-conductor.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 5
+        notExists:
+          path: spec.template.spec.priorityClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.priorityClassName
+
+  - it: should support setting a priority class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-conductor.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        priorityClassName:
+          staffeln_api: platform
+          staffeln_conductor: platform
+          db_sync: platform
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 5
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.priorityClassName
+          value: platform
diff --git a/roles/staffeln/tests/runtimeclass_test.yaml b/roles/staffeln/tests/runtimeclass_test.yaml
new file mode 100644
index 0000000..4835aa1
--- /dev/null
+++ b/roles/staffeln/tests/runtimeclass_test.yaml
@@ -0,0 +1,48 @@
+suite: runtimeclass
+tests:
+  - it: should support not having a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-conductor.yaml
+      - templates/job-db-sync.yaml
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 5
+        notExists:
+          path: spec.template.spec.runtimeClassName
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        notExists:
+          path: spec.template.spec.runtimeClassName
+
+  - it: should support setting a runtime class
+    templates:
+      - templates/deployment-api.yaml
+      - templates/deployment-conductor.yaml
+      - templates/job-db-sync.yaml
+    set:
+      pod:
+        runtimeClassName:
+          staffeln_api: kata-clh
+          staffeln_conductor: kata-clh
+          db_sync: kata-clh
+    asserts:
+      - template: templates/deployment-api.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/deployment-conductor.yaml
+        documentIndex: 5
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
+      - template: templates/job-db-sync.yaml
+        documentIndex: 3
+        equal:
+          path: spec.template.spec.runtimeClassName
+          value: kata-clh
diff --git a/roles/staffeln/vars_test.go b/roles/staffeln/vars_test.go
index 07ec9ab..9377c01 100644
--- a/roles/staffeln/vars_test.go
+++ b/roles/staffeln/vars_test.go
@@ -36,4 +36,6 @@
 	require.NoError(t, err)
 
 	testutils.TestDatabaseConf(t, vals.Conf.Staffeln.Database)
+	testutils.TestAllPodsHaveRuntimeClass(t, vals)
+	testutils.TestAllPodsHavePriorityClass(t, vals)
 }
diff --git a/roles/sysctl/defaults/main.yml b/roles/sysctl/defaults/main.yml
index 509597e..dbb09e9 100644
--- a/roles/sysctl/defaults/main.yml
+++ b/roles/sysctl/defaults/main.yml
@@ -15,6 +15,8 @@
 #
 # List of ``sysctl`` parameters to set
 sysctls:
+  - name: fs.aio-max-nr
+    value: 1048576
   - name: net.ipv4.tcp_timestamps
     value: 0
   - name: net.ipv4.tcp_sack
diff --git a/roles/tempest/tasks/main.yml b/roles/tempest/tasks/main.yml
index 8ea8e8f..3440b2d 100644
--- a/roles/tempest/tasks/main.yml
+++ b/roles/tempest/tasks/main.yml
@@ -34,10 +34,10 @@
         conf:
           tempest:
             compute:
-              image_ref: "{{ _tempest_test_image.openstack_image.id }}"
+              image_ref: "{{ _tempest_test_image.images.0.id }}"
       when:
         - tempest_helm_values.conf.tempest.compute.image_ref is not defined
-        - _tempest_test_image.openstack_image.id is defined
+        - _tempest_test_image.images | length > 0
 
     - name: Get test flavor object
       openstack.cloud.compute_flavor_info:
@@ -53,10 +53,10 @@
         conf:
           tempest:
             compute:
-              flavor_ref: "{{ _tempest_test_flavor.openstack_flavors[0].id }}"
+              flavor_ref: "{{ _tempest_test_flavor.flavors[0].id }}"
       when:
         - tempest_helm_values.conf.tempest.compute.flavor_ref is not defined
-        - _tempest_test_flavor.openstack_flavors[0].id is defined
+        - _tempest_test_flavor.flavors[0].id is defined
 
     - name: Get test network object
       openstack.cloud.networks_info:
@@ -72,10 +72,10 @@
         conf:
           tempest:
             network:
-              public_network_id: "{{ _tempest_test_network.openstack_networks[0].id }}"
+              public_network_id: "{{ _tempest_test_network.networks[0].id }}"
       when:
         - tempest_helm_values.conf.tempest.network.public_network_id is not defined
-        - _tempest_test_network.openstack_networks[0].id is defined
+        - _tempest_test_network.networks[0].id is defined
 
 - name: Deploy Helm chart
   failed_when: false
diff --git a/roles/tempest/vars/main.yml b/roles/tempest/vars/main.yml
index ea508f9..5c92ad6 100644
--- a/roles/tempest/vars/main.yml
+++ b/roles/tempest/vars/main.yml
@@ -33,7 +33,8 @@
         endpoint_type: internal
         fixed_network_name: public
       dashboard:
-        dashboard_url: "http://horizon-int.openstack.svc.cluster.local"
+        dashboard_url: "https://{{ openstack_helm_endpoints_horizon_api_host }}"
+        disable_ssl_certificate_validation: "{{ cluster_issuer_type == 'self-signed' }}"
       identity:
         v3_endpoint_type: internal
       image:
diff --git a/tests/image_tests.rs b/tests/image_tests.rs
new file mode 100644
index 0000000..0e6797b
--- /dev/null
+++ b/tests/image_tests.rs
@@ -0,0 +1,21 @@
+use rustainers::DockerContainerGuard;
+use rustainers::DockerContainerGuardError;
+use std::env;
+
+#[tokio::test]
+async fn test_nova_ssh() -> Result<(), DockerContainerGuardError> {
+    let guard = DockerContainerGuard::spawn(&format!(
+        "{}/nova-ssh:{}",
+        env::var("REGISTRY").unwrap_or_else(|_| "harbor.atmosphere.dev/library".to_string()),
+        env::var("TAG").unwrap_or_else(|_| "main".to_string())
+    ))
+    .await?;
+
+    let user = guard.get_user("nova").await?;
+    assert_eq!(user.uid, 42424);
+    assert_eq!(user.gid, 42424);
+    assert_eq!(user.dir, "/var/lib/nova");
+    assert_eq!(user.shell, "/bin/bash");
+
+    Ok(())
+}
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index 970c59b..4179269 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -6,6 +6,14 @@
       go_version: 1.21.13
 
 - job:
+    name: atmosphere-cargo-test
+    run:
+      - zuul.d/playbooks/cargo-test/run.yml
+    dependencies:
+      - name: atmosphere-build-images
+        soft: true
+
+- job:
     name: atmosphere-golang-go-test
     parent: atmosphere-golang-go
     vars:
@@ -151,8 +159,6 @@
 - job:
     name: atmosphere-molecule-aio-ovn
     parent: atmosphere-molecule-aio-full
-    # NOTE(mnaser): https://github.com/vexxhost/atmosphere/issues/662
-    voting: false
     vars:
       tox_envlist: molecule-aio-ovn
 
diff --git a/zuul.d/playbooks/cargo-test/run.yml b/zuul.d/playbooks/cargo-test/run.yml
new file mode 100644
index 0000000..48ecdb3
--- /dev/null
+++ b/zuul.d/playbooks/cargo-test/run.yml
@@ -0,0 +1,23 @@
+- hosts: all
+  tasks:
+    - name: Install Docker
+      ansible.builtin.include_role:
+        name: ensure-docker
+
+    - name: Install Rust
+      ansible.builtin.include_role:
+        name: ensure-rust
+
+    - name: Ensure required packages are installed
+      become: true
+      ansible.builtin.apt:
+        name: ["build-essential"]
+        state: present
+
+    - name: Run "cargo test"
+      ansible.builtin.command: cargo test --workspace
+      args:
+        chdir: "{{ zuul.project.src_dir }}"
+      environment:
+        REGISTRY: "{{ (zuul.artifacts | default([]) | length > 0) | ternary('harbor.atmosphere.dev/ci', 'harbor.atmosphere.dev/library') }}"
+        TAG: "{{ (zuul.artifacts | default([]) | length > 0) | ternary(zuul.change, zuul.branch.replace('stable/', '')) }}"
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 9d34783..aa27cd0 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -15,6 +15,7 @@
 - project:
     check:
       jobs:
+        - atmosphere-cargo-test
         - atmosphere-chart-vendor
         - atmosphere-check-commit
         - atmosphere-golang-go-test
@@ -39,6 +40,7 @@
             dependencies: *image_build_jobs
     gate:
       jobs:
+        - atmosphere-cargo-test
         - atmosphere-chart-vendor
         - atmosphere-check-commit
         - atmosphere-golang-go-test