Merge "Upgrade Storpool driver to Caracal" into stable/2024.1
diff --git a/.ansible-lint b/.ansible-lint
index c81fa73..2776884 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -1,5 +1,6 @@
 ---
 exclude_paths:
+  - .ansible
   - .github
   - atmosphere
   - charts
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bd2d792..2c0ca4e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -41,6 +41,6 @@
         args: ["--profile", "black", "--filter-files"]
 
   - repo: https://github.com/ansible/ansible-lint.git
-    rev: v24.7.0
+    rev: v25.1.2
     hooks:
       - id: ansible-lint
diff --git a/Cargo.lock b/Cargo.lock
index fb7ed86..1ea12db 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1,6 +1,6 @@
 # This file is automatically @generated by Cargo.
 # It is not intended for manual editing.
-version = 3
+version = 4
 
 [[package]]
 name = "addr2line"
@@ -18,6 +18,15 @@
 checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
 
 [[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
 name = "android-tzdata"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -33,6 +42,62 @@
 ]
 
 [[package]]
+name = "anstream"
+version = "0.6.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is_terminal_polyfill",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
+
+[[package]]
+name = "anstyle-parse"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "3.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e"
+dependencies = [
+ "anstyle",
+ "once_cell",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
+
+[[package]]
 name = "atmosphere"
 version = "0.0.0"
 dependencies = [
@@ -98,7 +163,7 @@
  "serde_json",
  "serde_repr",
  "serde_urlencoded",
- "thiserror",
+ "thiserror 2.0.11",
  "tokio",
  "tokio-util",
  "tower-service",
@@ -164,6 +229,52 @@
 ]
 
 [[package]]
+name = "clap"
+version = "4.5.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8acebd8ad879283633b343856142139f2da2317c96b05b4dd6181c61e2480184"
+dependencies = [
+ "clap_builder",
+ "clap_derive",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.5.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6ba32cbda51c7e1dfd49acc1457ba1a7dec5b64fe360e828acb13ca8dc9c2f9"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "clap_lex",
+ "strsim",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.5.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
+
+[[package]]
+name = "colorchoice"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
+
+[[package]]
 name = "core-foundation-sys"
 version = "0.8.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -191,6 +302,29 @@
 ]
 
 [[package]]
+name = "env_filter"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
+dependencies = [
+ "log",
+ "regex",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.11.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "env_filter",
+ "humantime",
+ "log",
+]
+
+[[package]]
 name = "equivalent"
 version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -234,12 +368,28 @@
 ]
 
 [[package]]
+name = "futures"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-io",
+ "futures-sink",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
 name = "futures-channel"
 version = "0.3.31"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
 dependencies = [
  "futures-core",
+ "futures-sink",
 ]
 
 [[package]]
@@ -249,6 +399,23 @@
 checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
 
 [[package]]
+name = "futures-executor"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
+dependencies = [
+ "futures-core",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
+
+[[package]]
 name = "futures-macro"
 version = "0.3.31"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -277,9 +444,13 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
 dependencies = [
+ "futures-channel",
  "futures-core",
+ "futures-io",
  "futures-macro",
+ "futures-sink",
  "futures-task",
+ "memchr",
  "pin-project-lite",
  "pin-utils",
  "slab",
@@ -315,6 +486,12 @@
 checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
 
 [[package]]
+name = "heck"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
+
+[[package]]
 name = "hex"
 version = "0.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -367,6 +544,12 @@
 checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
 
 [[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
 name = "hyper"
 version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -620,6 +803,18 @@
 ]
 
 [[package]]
+name = "ipnet"
+version = "2.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
+
+[[package]]
+name = "is_terminal_polyfill"
+version = "1.70.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
+
+[[package]]
 name = "itoa"
 version = "1.0.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -697,6 +892,81 @@
 ]
 
 [[package]]
+name = "netlink-packet-core"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4"
+dependencies = [
+ "anyhow",
+ "byteorder",
+ "netlink-packet-utils",
+]
+
+[[package]]
+name = "netlink-packet-route"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74c171cd77b4ee8c7708da746ce392440cb7bcf618d122ec9ecc607b12938bf4"
+dependencies = [
+ "anyhow",
+ "byteorder",
+ "libc",
+ "log",
+ "netlink-packet-core",
+ "netlink-packet-utils",
+]
+
+[[package]]
+name = "netlink-packet-utils"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34"
+dependencies = [
+ "anyhow",
+ "byteorder",
+ "paste",
+ "thiserror 1.0.69",
+]
+
+[[package]]
+name = "netlink-proto"
+version = "0.11.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60"
+dependencies = [
+ "bytes",
+ "futures",
+ "log",
+ "netlink-packet-core",
+ "netlink-sys",
+ "thiserror 2.0.11",
+]
+
+[[package]]
+name = "netlink-sys"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23"
+dependencies = [
+ "bytes",
+ "futures",
+ "libc",
+ "log",
+ "tokio",
+]
+
+[[package]]
+name = "nix"
+version = "0.27.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "libc",
+]
+
+[[package]]
 name = "num-conv"
 version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -727,13 +997,38 @@
 checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e"
 
 [[package]]
+name = "ovsinit"
+version = "0.1.0"
+dependencies = [
+ "clap",
+ "env_logger",
+ "futures",
+ "futures-util",
+ "ipnet",
+ "libc",
+ "log",
+ "netlink-packet-route",
+ "rtnetlink",
+ "serde",
+ "serde_json",
+ "thiserror 2.0.11",
+ "tokio",
+]
+
+[[package]]
 name = "passwd"
 version = "0.1.0"
 dependencies = [
- "thiserror",
+ "thiserror 2.0.11",
 ]
 
 [[package]]
+name = "paste"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
+
+[[package]]
 name = "percent-encoding"
 version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -824,6 +1119,53 @@
 ]
 
 [[package]]
+name = "regex"
+version = "1.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
+
+[[package]]
+name = "rtnetlink"
+version = "0.14.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b684475344d8df1859ddb2d395dd3dac4f8f3422a1aa0725993cb375fc5caba5"
+dependencies = [
+ "futures",
+ "log",
+ "netlink-packet-core",
+ "netlink-packet-route",
+ "netlink-packet-utils",
+ "netlink-proto",
+ "netlink-sys",
+ "nix",
+ "thiserror 1.0.69",
+ "tokio",
+]
+
+[[package]]
 name = "rustainers"
 version = "0.1.0"
 dependencies = [
@@ -833,7 +1175,7 @@
  "passwd",
  "rand",
  "tar",
- "thiserror",
+ "thiserror 2.0.11",
  "tokio",
 ]
 
@@ -978,6 +1320,12 @@
 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
 
 [[package]]
+name = "strsim"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
+
+[[package]]
 name = "syn"
 version = "2.0.98"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1012,11 +1360,31 @@
 
 [[package]]
 name = "thiserror"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
+dependencies = [
+ "thiserror-impl 1.0.69",
+]
+
+[[package]]
+name = "thiserror"
 version = "2.0.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc"
 dependencies = [
- "thiserror-impl",
+ "thiserror-impl 2.0.11",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
@@ -1172,6 +1540,12 @@
 checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
 
 [[package]]
+name = "utf8parse"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
+
+[[package]]
 name = "want"
 version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/Cargo.toml b/Cargo.toml
index e5a3d8e..746bdd0 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -7,4 +7,4 @@
 tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
 
 [workspace]
-members = ["crates/passwd", "crates/rustainers"]
+members = [ "crates/ovsinit","crates/passwd", "crates/rustainers"]
diff --git a/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl b/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
index bd0a64a..c15e40a 100644
--- a/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+++ b/charts/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
@@ -435,13 +435,14 @@
   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
   then
     ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-port $bridge $iface
-    migrate_ip_from_nic $iface $bridge
     if [[ "${DPDK_ENABLED}" != "true" ]]; then
       ip link set dev $iface up
     fi
   fi
 done
 
+/usr/local/bin/ovsinit /tmp/auto_bridge_add
+
 tunnel_types="{{- .Values.conf.plugins.openvswitch_agent.agent.tunnel_types -}}"
 if [[ -n "${tunnel_types}" ]] ; then
     tunnel_interface="{{- .Values.network.interface.tunnel -}}"
diff --git a/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl b/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl
index 1d303c8..049f731 100644
--- a/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl
+++ b/charts/ovn/templates/bin/_ovn-controller-init.sh.tpl
@@ -25,58 +25,6 @@
   echo ${ip}
 }
 
-function get_ip_prefix_from_interface {
-  local interface=$1
-  local prefix=$(ip -4 -o addr s "${interface}" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $2}')
-  if [ -z "${prefix}" ] ; then
-    exit 1
-  fi
-  echo ${prefix}
-}
-
-function migrate_ip_from_nic {
-  src_nic=$1
-  bridge_name=$2
-
-  # Enabling explicit error handling: We must avoid to lose the IP
-  # address in the migration process. Hence, on every error, we
-  # attempt to assign the IP back to the original NIC and exit.
-  set +e
-
-  ip=$(get_ip_address_from_interface ${src_nic})
-  prefix=$(get_ip_prefix_from_interface ${src_nic})
-
-  bridge_ip=$(get_ip_address_from_interface "${bridge_name}")
-  bridge_prefix=$(get_ip_prefix_from_interface "${bridge_name}")
-
-  ip link set ${bridge_name} up
-
-  if [[ -n "${ip}" && -n "${prefix}" ]]; then
-    ip addr flush dev ${src_nic}
-    if [ $? -ne 0 ] ; then
-      ip addr add ${ip}/${prefix} dev ${src_nic}
-      echo "Error while flushing IP from ${src_nic}."
-      exit 1
-    fi
-
-    ip addr add ${ip}/${prefix} dev "${bridge_name}"
-    if [ $? -ne 0 ] ; then
-      echo "Error assigning IP to bridge "${bridge_name}"."
-      ip addr add ${ip}/${prefix} dev ${src_nic}
-      exit 1
-    fi
-  elif [[ -n "${bridge_ip}" && -n "${bridge_prefix}" ]]; then
-    echo "Bridge '${bridge_name}' already has IP assigned. Keeping the same:: IP:[${bridge_ip}]; Prefix:[${bridge_prefix}]..."
-  elif [[ -z "${bridge_ip}" && -z "${ip}" ]]; then
-    echo "Interface and bridge have no ips configured. Leaving as is."
-  else
-    echo "Interface ${src_nic} has invalid IP address. IP:[${ip}]; Prefix:[${prefix}]..."
-    exit 1
-  fi
-
-  set -e
-}
-
 function get_current_system_id {
   ovs-vsctl --if-exists get Open_vSwitch . external_ids:system-id | tr -d '"'
 }
@@ -174,6 +122,7 @@
   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
   then
     ovs-vsctl --may-exist add-port $bridge $iface
-    migrate_ip_from_nic $iface $bridge
   fi
 done
+
+/usr/local/bin/ovsinit /tmp/auto_bridge_add
diff --git a/charts/patches/0001-fix-Use-more-standard-configs-for-staffeln-DBdropjob.patch b/charts/patches/0001-fix-Use-more-standard-configs-for-staffeln-DBdropjob.patch
deleted file mode 100644
index 1a491cf..0000000
--- a/charts/patches/0001-fix-Use-more-standard-configs-for-staffeln-DBdropjob.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From f74a254e87acaafb9493630cb8521fda145c6c5c Mon Sep 17 00:00:00 2001
-From: ricolin <rlin@vexxhost.com>
-Date: Wed, 8 Jan 2025 21:29:08 +0800
-Subject: [PATCH] fix: Use more standard configs for staffeln DB drop job
-
----
- charts/staffeln/templates/job-db-drop.yaml | 6 ++----
- 1 file changed, 2 insertions(+), 4 deletions(-)
-
-diff --git a/charts/staffeln/templates/job-db-drop.yaml b/charts/staffeln/templates/job-db-drop.yaml
-index dffa5aba..519e0b31 100644
---- a/charts/staffeln/templates/job-db-drop.yaml
-+++ b/charts/staffeln/templates/job-db-drop.yaml
-@@ -13,11 +13,9 @@ limitations under the License.
- */}}
- 
- {{- if .Values.manifests.job_db_drop }}
--{{- $serviceName := "staffeln" -}}
--{{- $dbToDrop := dict "adminSecret" .Values.secrets.oslo_db.admin "configFile" (printf "/etc/%s/%s.conf" $serviceName $serviceName ) "logConfigFile" (printf "/etc/%s/logging.conf" $serviceName ) "configDbSection" "DEFAULT" "configDbKey" "sql_connection" -}}
--{{- $dbDropJob := dict "envAll" . "serviceName" $serviceName "dbToDrop" $dbToDrop -}}
-+{{- $dbDropJob := dict "envAll" . "serviceName" "staffeln" -}}
- {{- if .Values.manifests.certificates -}}
--{{- $_ := set $dbToDrop "dbAdminTlsSecret" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}
-+{{- $_ := set $dbDropJob "dbAdminTlsSecret" .Values.endpoints.oslo_db.auth.admin.secret.tls.internal -}}
- {{- end -}}
- {{- if .Values.pod.tolerations.staffeln.enabled -}}
- {{- $_ := set $dbDropJob "tolerationsEnabled" true -}}
--- 
-2.25.1
-
diff --git a/charts/patches/neutron/0001-Switch-Neutron-to-ovsinit.patch b/charts/patches/neutron/0001-Switch-Neutron-to-ovsinit.patch
new file mode 100644
index 0000000..0c1d7d3
--- /dev/null
+++ b/charts/patches/neutron/0001-Switch-Neutron-to-ovsinit.patch
@@ -0,0 +1,32 @@
+From 3e0120d8457faf947f6f5d3ed79a1f08a0d271cd Mon Sep 17 00:00:00 2001
+From: Mohammed Naser <mnaser@vexxhost.com>
+Date: Mon, 17 Feb 2025 10:58:17 -0500
+Subject: [PATCH] Switch Neutron to ovsinit
+
+---
+ neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl b/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+index bd0a64ac..c15e40a5 100644
+--- a/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
++++ b/neutron/templates/bin/_neutron-openvswitch-agent-init.sh.tpl
+@@ -435,13 +435,14 @@ do
+   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
+   then
+     ovs-vsctl --db=unix:${OVS_SOCKET} --may-exist add-port $bridge $iface
+-    migrate_ip_from_nic $iface $bridge
+     if [[ "${DPDK_ENABLED}" != "true" ]]; then
+       ip link set dev $iface up
+     fi
+   fi
+ done
+ 
++/usr/local/bin/ovsinit /tmp/auto_bridge_add
++
+ tunnel_types="{{- .Values.conf.plugins.openvswitch_agent.agent.tunnel_types -}}"
+ if [[ -n "${tunnel_types}" ]] ; then
+     tunnel_interface="{{- .Values.network.interface.tunnel -}}"
+-- 
+2.47.0
+
diff --git a/charts/patches/ovn/0003-Switch-OVN-to-ovsinit.patch b/charts/patches/ovn/0003-Switch-OVN-to-ovsinit.patch
new file mode 100644
index 0000000..ba04dcf
--- /dev/null
+++ b/charts/patches/ovn/0003-Switch-OVN-to-ovsinit.patch
@@ -0,0 +1,84 @@
+From 6c2dac4c0bcd71d400c113b922ba862d7945a09e Mon Sep 17 00:00:00 2001
+From: Mohammed Naser <mnaser@vexxhost.com>
+Date: Mon, 17 Feb 2025 11:00:30 -0500
+Subject: [PATCH] Switch OVN to ovsinit
+
+---
+ ovn/templates/bin/_ovn-controller-init.sh.tpl | 55 +------------------
+ 1 file changed, 2 insertions(+), 53 deletions(-)
+
+diff --git a/ovn/templates/bin/_ovn-controller-init.sh.tpl b/ovn/templates/bin/_ovn-controller-init.sh.tpl
+index 357c069d..006582f9 100644
+--- a/ovn/templates/bin/_ovn-controller-init.sh.tpl
++++ b/ovn/templates/bin/_ovn-controller-init.sh.tpl
+@@ -25,58 +25,6 @@ function get_ip_address_from_interface {
+   echo ${ip}
+ }
+ 
+-function get_ip_prefix_from_interface {
+-  local interface=$1
+-  local prefix=$(ip -4 -o addr s "${interface}" | awk '{ print $4; exit }' | awk -F '/' 'NR==1 {print $2}')
+-  if [ -z "${prefix}" ] ; then
+-    exit 1
+-  fi
+-  echo ${prefix}
+-}
+-
+-function migrate_ip_from_nic {
+-  src_nic=$1
+-  bridge_name=$2
+-
+-  # Enabling explicit error handling: We must avoid to lose the IP
+-  # address in the migration process. Hence, on every error, we
+-  # attempt to assign the IP back to the original NIC and exit.
+-  set +e
+-
+-  ip=$(get_ip_address_from_interface ${src_nic})
+-  prefix=$(get_ip_prefix_from_interface ${src_nic})
+-
+-  bridge_ip=$(get_ip_address_from_interface "${bridge_name}")
+-  bridge_prefix=$(get_ip_prefix_from_interface "${bridge_name}")
+-
+-  ip link set ${bridge_name} up
+-
+-  if [[ -n "${ip}" && -n "${prefix}" ]]; then
+-    ip addr flush dev ${src_nic}
+-    if [ $? -ne 0 ] ; then
+-      ip addr add ${ip}/${prefix} dev ${src_nic}
+-      echo "Error while flushing IP from ${src_nic}."
+-      exit 1
+-    fi
+-
+-    ip addr add ${ip}/${prefix} dev "${bridge_name}"
+-    if [ $? -ne 0 ] ; then
+-      echo "Error assigning IP to bridge "${bridge_name}"."
+-      ip addr add ${ip}/${prefix} dev ${src_nic}
+-      exit 1
+-    fi
+-  elif [[ -n "${bridge_ip}" && -n "${bridge_prefix}" ]]; then
+-    echo "Bridge '${bridge_name}' already has IP assigned. Keeping the same:: IP:[${bridge_ip}]; Prefix:[${bridge_prefix}]..."
+-  elif [[ -z "${bridge_ip}" && -z "${ip}" ]]; then
+-    echo "Interface and bridge have no ips configured. Leaving as is."
+-  else
+-    echo "Interface ${src_nic} has invalid IP address. IP:[${ip}]; Prefix:[${prefix}]..."
+-    exit 1
+-  fi
+-
+-  set -e
+-}
+-
+ function get_current_system_id {
+   ovs-vsctl --if-exists get Open_vSwitch . external_ids:system-id | tr -d '"'
+ }
+@@ -174,6 +122,7 @@ do
+   if [ -n "$iface" ] && [ "$iface" != "null" ] && ( ip link show $iface 1>/dev/null 2>&1 );
+   then
+     ovs-vsctl --may-exist add-port $bridge $iface
+-    migrate_ip_from_nic $iface $bridge
+   fi
+ done
++
++/usr/local/bin/ovsinit /tmp/auto_bridge_add
+-- 
+2.47.0
+
diff --git a/charts/staffeln/charts/helm-toolkit/Chart.yaml b/charts/staffeln/charts/helm-toolkit/Chart.yaml
index e6aec81..d4c0ea2 100644
--- a/charts/staffeln/charts/helm-toolkit/Chart.yaml
+++ b/charts/staffeln/charts/helm-toolkit/Chart.yaml
@@ -9,4 +9,4 @@
 sources:
 - https://opendev.org/openstack/openstack-helm-infra
 - https://opendev.org/openstack/openstack-helm
-version: 0.2.54
+version: 0.2.69
diff --git a/charts/staffeln/charts/helm-toolkit/requirements.lock b/charts/staffeln/charts/helm-toolkit/requirements.lock
new file mode 100644
index 0000000..e28bc5d
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/requirements.lock
@@ -0,0 +1,3 @@
+dependencies: []
+digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726
+generated: "0001-01-01T00:00:00Z"
diff --git a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl
index 12b84de..d7390d8 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_endpoint_uri_lookup.tpl
@@ -50,7 +50,7 @@
 {{- $endpointScheme := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}
 {{- $userMap := index $context.Values.endpoints ( $type | replace "-" "_" ) "auth" $userclass }}
 {{- $endpointUser := index $userMap "username" }}
-{{- $endpointPass := index $userMap "password" }}
+{{- $endpointPass := index $userMap "password" | urlquery }}
 {{- $endpointHost := tuple $type $endpoint $context | include "helm-toolkit.endpoints.endpoint_host_lookup" }}
 {{- $endpointPort := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
 {{- $endpointPath := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.keystone_endpoint_path_lookup" }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl
index b7cf287..b9ac9d9 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/endpoints/_authenticated_transport_endpoint_uri_lookup.tpl
@@ -100,7 +100,7 @@
 {{-   $ssMap := index $context.Values.endpoints ( $type | replace "-" "_" ) "statefulset" | default false}}
 {{-   $hostFqdnOverride := index $context.Values.endpoints ( $type | replace "-" "_" ) "host_fqdn_override" }}
 {{-   $endpointUser := index $userMap "username" }}
-{{-   $endpointPass := index $userMap "password" }}
+{{-   $endpointPass := index $userMap "password" | urlquery }}
 {{-   $endpointHostSuffix := tuple $type $endpoint $context | include "helm-toolkit.endpoints.endpoint_host_lookup" }}
 {{-   $endpointPort := tuple $type $endpoint $port $context | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
 {{-   $local := dict "endpointCredsAndHosts" list -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl
index 4c476b2..cacb4b8 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_ingress.tpl
@@ -59,7 +59,7 @@
               default: 9311
               public: 80
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" ) -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -67,16 +67,16 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         rules:
           - host: barbican
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -86,7 +86,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -96,7 +96,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -108,10 +108,10 @@
       metadata:
         name: barbican-namespace-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -121,7 +121,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -133,10 +133,10 @@
       metadata:
         name: barbican-cluster-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx-cluster"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx-cluster"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -146,7 +146,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -194,7 +194,7 @@
               default: 9311
               public: 80
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" ) -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -202,10 +202,10 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -217,7 +217,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -227,7 +227,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -237,7 +237,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -294,7 +294,7 @@
                 name: ca-issuer
                 kind: Issuer
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer" ) -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -302,12 +302,12 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           cert-manager.io/issuer: ca-issuer
           certmanager.k8s.io/issuer: ca-issuer
           nginx.ingress.kubernetes.io/backend-protocol: https
           nginx.ingress.kubernetes.io/secure-backends: "true"
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public-certmanager
             hosts:
@@ -319,7 +319,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -329,7 +329,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -339,7 +339,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -396,7 +396,7 @@
                 name: ca-issuer
                 kind: ClusterIssuer
     usage: |
-      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer") -}}
+      {{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" "certIssuer" "ca-issuer" "pathType" "Prefix" ) -}}
     return: |
       ---
       apiVersion: networking.k8s.io/v1
@@ -404,12 +404,12 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           cert-manager.io/cluster-issuer: ca-issuer
           certmanager.k8s.io/cluster-issuer: ca-issuer
           nginx.ingress.kubernetes.io/backend-protocol: https
           nginx.ingress.kubernetes.io/secure-backends: "true"
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public-certmanager
             hosts:
@@ -421,7 +421,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -431,7 +431,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -441,7 +441,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: barbican-api
@@ -479,7 +479,7 @@
             grafana:
               public: grafana-tls-public
     usage: |
-      {{- $ingressOpts := dict "envAll" . "backendService" "grafana" "backendServiceType" "grafana" "backendPort" "dashboard" -}}
+      {{- $ingressOpts := dict "envAll" . "backendService" "grafana" "backendServiceType" "grafana" "backendPort" "dashboard" "pathType" "Prefix" -}}
       {{ $ingressOpts | include "helm-toolkit.manifests.ingress" }}
     return: |
       ---
@@ -488,16 +488,16 @@
       metadata:
         name: grafana
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         rules:
           - host: grafana
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -507,7 +507,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -517,7 +517,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -529,10 +529,10 @@
       metadata:
         name: grafana-namespace-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: grafana-tls-public
             hosts:
@@ -543,7 +543,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -553,7 +553,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -565,10 +565,10 @@
       metadata:
         name: grafana-cluster-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx-cluster"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx-cluster"
         tls:
           - secretName: grafana-tls-public
             hosts:
@@ -579,7 +579,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -589,7 +589,7 @@
             http:
               paths:
                 - path: /
-                  pathType: ImplementationSpecific
+                  pathType: Prefix
                   backend:
                     service:
                       name: grafana-dashboard
@@ -602,11 +602,12 @@
 {{- $vHost := index . "vHost" -}}
 {{- $backendName := index . "backendName" -}}
 {{- $backendPort := index . "backendPort" -}}
+{{- $pathType := index . "pathType" -}}
 - host: {{ $vHost }}
   http:
     paths:
       - path: /
-        pathType: ImplementationSpecific
+        pathType: {{ $pathType }}
         backend:
           service:
             name: {{ $backendName }}
@@ -624,6 +625,7 @@
 {{- $backendServiceType := index . "backendServiceType" -}}
 {{- $backendPort := index . "backendPort" -}}
 {{- $endpoint := index . "endpoint" | default "public" -}}
+{{- $pathType := index . "pathType" | default "Prefix" -}}
 {{- $certIssuer := index . "certIssuer" | default "" -}}
 {{- $ingressName := tuple $backendServiceType $endpoint $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
 {{- $backendName := tuple $backendServiceType "internal" $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
@@ -639,7 +641,6 @@
 metadata:
   name: {{ $ingressName }}
   annotations:
-    kubernetes.io/ingress.class: {{ index $envAll.Values.network $backendService "ingress" "classes" "namespace" | quote }}
 {{- if $certIssuer }}
     cert-manager.io/{{ $certIssuerType }}: {{ $certIssuer }}
     certmanager.k8s.io/{{ $certIssuerType }}: {{ $certIssuer }}
@@ -650,6 +651,7 @@
 {{- end }}
 {{ toYaml (index $envAll.Values.network $backendService "ingress" "annotations") | indent 4 }}
 spec:
+  ingressClassName: {{ index $envAll.Values.network $backendService "ingress" "classes" "namespace" | quote }}
 {{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace "-" "_" ) "hosts" }}
 {{- if $certIssuer }}
 {{- $secretName := index $envAll.Values.secrets "tls" ( $backendServiceType | replace "-" "_" ) $backendService $endpoint }}
@@ -681,7 +683,7 @@
 {{- end }}
   rules:
 {{- range $key1, $vHost := tuple $hostName (printf "%s.%s" $hostName $envAll.Release.Namespace) (printf "%s.%s.svc.%s" $hostName $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) }}
-{{- $hostRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort }}
+{{- $hostRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort "pathType" $pathType }}
 {{ $hostRules | include "helm-toolkit.manifests.ingress._host_rules" | indent 4 }}
 {{- end }}
 {{- if not ( hasSuffix ( printf ".%s.svc.%s" $envAll.Release.Namespace $envAll.Values.endpoints.cluster_domain_suffix) $hostNameFull) }}
@@ -695,9 +697,9 @@
 metadata:
   name: {{ printf "%s-%s-%s" $ingressName $ingressController "fqdn" }}
   annotations:
-    kubernetes.io/ingress.class: {{ index $envAll.Values.network $backendService "ingress" "classes" $ingressController | quote }}
 {{ toYaml (index $envAll.Values.network $backendService "ingress" "annotations") | indent 4 }}
 spec:
+  ingressClassName: {{ index $envAll.Values.network $backendService "ingress" "classes" $ingressController | quote }}
 {{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace "-" "_" ) "host_fqdn_override" }}
 {{- if hasKey $host $endpoint }}
 {{- $endpointHost := index $host $endpoint }}
@@ -719,7 +721,7 @@
 {{- end }}
   rules:
 {{- range $vHost := $vHosts }}
-{{- $hostNameFullRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort }}
+{{- $hostNameFullRules := dict "vHost" $vHost "backendName" $backendName "backendPort" $backendPort "pathType" $pathType }}
 {{ $hostNameFullRules | include "helm-toolkit.manifests.ingress._host_rules" | indent 4 }}
 {{- end }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
index 5d98c8b..6b77004 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-bootstrap.tpl
@@ -51,6 +51,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl
index 62ed119..2b7ff2c 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-drop-mysql.tpl
@@ -54,6 +54,7 @@
   annotations:
     "helm.sh/hook": pre-delete
     "helm.sh/hook-delete-policy": hook-succeeded
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
index 745e8da..b8a1dce 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-init-mysql.tpl
@@ -52,6 +52,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
index 24d2496..4696c88 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-db-sync.tpl
@@ -49,6 +49,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl
index 3a7df7f..d69c9e6 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-endpoints.tpl
@@ -52,6 +52,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl
index a109e3c..9604c63 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-service.tpl
@@ -52,6 +52,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl
index 905eb71..58dcdc5 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-ks-user.yaml.tpl
@@ -74,6 +74,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl
index 6982064..2cfadaf 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-rabbit-init.yaml.tpl
@@ -42,6 +42,7 @@
 {{ toYaml $jobLabels | indent 4 }}
 {{- end }}
   annotations:
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl
index 29cb993..b5fdc09 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-bucket.yaml.tpl
@@ -49,6 +49,7 @@
 {{- end }}
   annotations:
     {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl
index 50d9af5..77d1a71 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_job-s3-user.yaml.tpl
@@ -47,6 +47,7 @@
   annotations:
     "helm.sh/hook-delete-policy": before-hook-creation
     {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
+{{ tuple $serviceAccountName $envAll | include "helm-toolkit.snippets.custom_job_annotations" | indent 4 -}}
 {{- if $jobAnnotations }}
 {{ toYaml $jobAnnotations | indent 4 }}
 {{- end }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl
index 4854bb1..7ad505b 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-registry.yaml.tpl
@@ -17,6 +17,11 @@
   Creates a manifest for a authenticating a registry with a secret
 examples:
   - values: |
+      annotations:
+        secret:
+          oci_image_registry:
+            {{ $serviceName }}:
+              custom.tld/key: "value"
       secrets:
         oci_image_registry:
           {{ $serviceName }}: {{ $keyName }}
@@ -36,30 +41,8 @@
     kind: Secret
     metadata:
       name: {{ $secretName }}
-    type: kubernetes.io/dockerconfigjson
-    data:
-      dockerconfigjson: {{ $dockerAuth }}
-
-  - values: |
-      secrets:
-        oci_image_registry:
-          {{ $serviceName }}: {{ $keyName }}
-      endpoints:
-        oci_image_registry:
-          name: oci-image-registry
-          auth:
-            enabled: true
-             {{ $serviceName }}:
-                name: {{ $userName }}
-                password: {{ $password }}
-  usage: |
-    {{- include "helm-toolkit.manifests.secret_registry" ( dict "envAll" . "registryUser" .Chart.Name ) -}}
-  return: |
-    ---
-    apiVersion: v1
-    kind: Secret
-    metadata:
-      name: {{ $secretName }}
+      annotations:
+        custom.tld/key: "value"
     type: kubernetes.io/dockerconfigjson
     data:
       dockerconfigjson: {{ $dockerAuth }}
@@ -87,6 +70,8 @@
 kind: Secret
 metadata:
   name: {{ $secretName }}
+  annotations:
+{{ tuple "oci_image_registry" $registryUser $envAll | include "helm-toolkit.snippets.custom_secret_annotations" | indent 4 }}
 type: kubernetes.io/dockerconfigjson
 data:
   .dockerconfigjson: {{ $dockerAuth }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl
index 24a7045..c800340 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/manifests/_secret-tls.yaml.tpl
@@ -17,6 +17,11 @@
   Creates a manifest for a services public tls secret
 examples:
   - values: |
+      annotations:
+        secret:
+          tls:
+            key_manager_api_public:
+              custom.tld/key: "value"
       secrets:
         tls:
           key_manager:
@@ -41,6 +46,8 @@
     kind: Secret
     metadata:
       name: barbican-tls-public
+      annotations:
+        custom.tld/key: "value"
     type: kubernetes.io/tls
     data:
       tls.key: Rk9PLUtFWQo=
@@ -88,11 +95,15 @@
 {{- if kindIs "map" $endpointHost }}
 {{- if hasKey $endpointHost "tls" }}
 {{- if and $endpointHost.tls.key $endpointHost.tls.crt }}
+
+{{- $customAnnotationKey := printf "%s_%s_%s" ( $backendServiceType | replace "-" "_" ) $backendService $endpoint }}
 ---
 apiVersion: v1
 kind: Secret
 metadata:
   name: {{ index $envAll.Values.secrets.tls ( $backendServiceType | replace "-" "_" ) $backendService $endpoint }}
+  annotations:
+{{ tuple "tls" $customAnnotationKey $envAll | include "helm-toolkit.snippets.custom_secret_annotations" | indent 4 }}
 type: kubernetes.io/tls
 data:
   tls.key: {{ $endpointHost.tls.key | b64enc }}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl
index 03884fa..c6a7521 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-drop.py.tpl
@@ -33,6 +33,7 @@
     PARSER_OPTS = {"strict": False}
 import logging
 from sqlalchemy import create_engine
+from sqlalchemy import text
 
 # Create logger, console handler and formatter
 logger = logging.getLogger('OpenStack-Helm DB Drop')
@@ -124,7 +125,12 @@
 
 # Delete DB
 try:
-    root_engine.execute("DROP DATABASE IF EXISTS {0}".format(database))
+    with root_engine.connect() as connection:
+        connection.execute(text("DROP DATABASE IF EXISTS {0}".format(database)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Deleted database {0}".format(database))
 except:
     logger.critical("Could not drop database {0}".format(database))
@@ -132,7 +138,12 @@
 
 # Delete DB User
 try:
-    root_engine.execute("DROP USER IF EXISTS {0}".format(user))
+    with root_engine.connect() as connection:
+        connection.execute(text("DROP USER IF EXISTS {0}".format(user)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Deleted user {0}".format(user))
 except:
     logger.critical("Could not delete user {0}".format(user))
diff --git a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl
index 6027b95..1917f78 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/scripts/_db-init.py.tpl
@@ -33,6 +33,7 @@
     PARSER_OPTS = {"strict": False}
 import logging
 from sqlalchemy import create_engine
+from sqlalchemy import text
 
 # Create logger, console handler and formatter
 logger = logging.getLogger('OpenStack-Helm DB Init')
@@ -124,7 +125,12 @@
 
 # Create DB
 try:
-    root_engine.execute("CREATE DATABASE IF NOT EXISTS {0}".format(database))
+    with root_engine.connect() as connection:
+        connection.execute(text("CREATE DATABASE IF NOT EXISTS {0}".format(database)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Created database {0}".format(database))
 except:
     logger.critical("Could not create database {0}".format(database))
@@ -132,11 +138,16 @@
 
 # Create DB User
 try:
-    root_engine.execute(
-        "CREATE USER IF NOT EXISTS \'{0}\'@\'%%\' IDENTIFIED BY \'{1}\' {2}".format(
-            user, password, mysql_x509))
-    root_engine.execute(
-        "GRANT ALL ON `{0}`.* TO \'{1}\'@\'%%\'".format(database, user))
+    with root_engine.connect() as connection:
+        connection.execute(
+            text("CREATE USER IF NOT EXISTS \'{0}\'@\'%%\' IDENTIFIED BY \'{1}\' {2}".format(
+                user, password, mysql_x509)))
+        connection.execute(
+            text("GRANT ALL ON `{0}`.* TO \'{1}\'@\'%%\'".format(database, user)))
+        try:
+            connection.commit()
+        except AttributeError:
+            pass
     logger.info("Created user {0} for {1}".format(user, database))
 except:
     logger.critical("Could not create user {0} for {1}".format(user, database))
diff --git a/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl b/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
index 3963bd4..695cb2e 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
@@ -49,6 +49,13 @@
 #                                          A random number between min and max delay is generated
 #                                          to set the delay.
 #
+#         RGW backup throttle limits variables:
+#           export THROTTLE_BACKUPS_ENABLED   Boolean variableto control backup functionality
+#           export THROTTLE_LIMIT             Number of simultaneous RGW upload sessions
+#           export THROTTLE_LOCK_EXPIRE_AFTER Time in seconds to expire flag file is orphaned
+#           export THROTTLE_RETRY_AFTER       Time in seconds to wait before retry
+#           export THROTTLE_CONTAINER_NAME    Name of RGW container to place flag falies into
+#
 # The database-specific functions that need to be implemented are:
 #   dump_databases_to_directory <directory> <err_logfile> [scope]
 #       where:
@@ -84,8 +91,10 @@
 #      specified by the "LOCAL_DAYS_TO_KEEP" variable.
 #   4) Removing remote backup tarballs (from the remote gateway) which are older
 #      than the number of days specified by the "REMOTE_DAYS_TO_KEEP" variable.
+#   5) Controlling remote storage gateway load from client side and throttling it
+#      by using a dedicated RGW container to store flag files defining upload session
+#      in progress
 #
-
 # Note: not using set -e in this script because more elaborate error handling
 # is needed.
 
@@ -95,7 +104,7 @@
   log ERROR "${DB_NAME}_backup" "${DB_NAMESPACE} namespace: ${MSG}"
   rm -f $ERR_LOG_FILE
   rm -rf $TMP_DIR
-  exit $ERRCODE
+  exit 0
 }
 
 log_verify_backup_exit() {
@@ -104,7 +113,7 @@
   log ERROR "${DB_NAME}_verify_backup" "${DB_NAMESPACE} namespace: ${MSG}"
   rm -f $ERR_LOG_FILE
   # rm -rf $TMP_DIR
-  exit $ERRCODE
+  exit 0
 }
 
 
@@ -218,6 +227,113 @@
   echo "Sleeping for ${DELAY} seconds to spread the load in time..."
   sleep ${DELAY}
 
+  #---------------------------------------------------------------------------
+  # Remote backup throttling
+  export THROTTLE_BACKUPS_ENABLED=$(echo $THROTTLE_BACKUPS_ENABLED | sed 's/"//g')
+  if $THROTTLE_BACKUPS_ENABLED; then
+    # Remove Quotes from the constants which were added due to reading
+    # from secret.
+    export THROTTLE_LIMIT=$(echo $THROTTLE_LIMIT | sed 's/"//g')
+    export THROTTLE_LOCK_EXPIRE_AFTER=$(echo $THROTTLE_LOCK_EXPIRE_AFTER | sed 's/"//g')
+    export THROTTLE_RETRY_AFTER=$(echo $THROTTLE_RETRY_AFTER | sed 's/"//g')
+    export THROTTLE_CONTAINER_NAME=$(echo $THROTTLE_CONTAINER_NAME | sed 's/"//g')
+
+    # load balance delay
+    RESULT=$(openstack container list 2>&1)
+
+    if [[ $? -eq 0 ]]; then
+      echo $RESULT | grep $THROTTLE_CONTAINER_NAME
+      if [[ $? -ne 0 ]]; then
+        # Find the swift URL from the keystone endpoint list
+        SWIFT_URL=$(openstack catalog show object-store -c endpoints | grep public | awk '{print $4}')
+        if [[ $? -ne 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to get object-store enpoints from keystone catalog."
+          return 2
+        fi
+
+        # Get a token from keystone
+        TOKEN=$(openstack token issue -f value -c id)
+        if [[ $? -ne 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to get  keystone token."
+          return 2
+        fi
+
+        # Create the container
+        RES_FILE=$(mktemp -p /tmp)
+        curl -g -i -X PUT ${SWIFT_URL}/${THROTTLE_CONTAINER_NAME} \
+            -H "X-Auth-Token: ${TOKEN}" \
+            -H "X-Storage-Policy: ${STORAGE_POLICY}" 2>&1 > $RES_FILE
+
+        if [[ $? -ne 0 || $(grep "HTTP" $RES_FILE | awk '{print $2}') -ge 400 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to create container ${THROTTLE_CONTAINER_NAME}"
+          cat $RES_FILE
+          rm -f $RES_FILE
+          return 2
+        fi
+        rm -f $RES_FILE
+
+        swift stat $THROTTLE_CONTAINER_NAME
+        if [[ $? -ne 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Unable to retrieve container ${THROTTLE_CONTAINER_NAME} details after creation."
+          return 2
+        fi
+      fi
+    else
+      echo $RESULT | grep -E "HTTP 401|HTTP 403"
+      if [[ $? -eq 0 ]]; then
+        log ERROR "${DB_NAME}_backup" "Access denied by keystone: ${RESULT}"
+        return 1
+      else
+        echo $RESULT | grep -E "ConnectionError|Failed to discover available identity versions|Service Unavailable|HTTP 50"
+        if [[ $? -eq 0 ]]; then
+          log WARN "${DB_NAME}_backup" "Could not reach the RGW: ${RESULT}"
+          # In this case, keystone or the site/node may be temporarily down.
+          # Return slightly different error code so the calling code can retry
+          return 2
+        else
+          log ERROR "${DB_NAME}_backup" "Could not get container list: ${RESULT}"
+          return 1
+        fi
+      fi
+    fi
+
+    NUMBER_OF_SESSIONS=$(openstack object list $THROTTLE_CONTAINER_NAME -f value | wc -l)
+    log INFO  "${DB_NAME}_backup"  "There are ${NUMBER_OF_SESSIONS} remote sessions right now."
+    while [[ ${NUMBER_OF_SESSIONS} -ge ${THROTTLE_LIMIT} ]]
+    do
+      log INFO "${DB_NAME}_backup" "Current number of active uploads is ${NUMBER_OF_SESSIONS}>=${THROTTLE_LIMIT}!"
+      log INFO "${DB_NAME}_backup" "Retrying in ${THROTTLE_RETRY_AFTER} seconds...."
+      sleep ${THROTTLE_RETRY_AFTER}
+      NUMBER_OF_SESSIONS=$(openstack object list $THROTTLE_CONTAINER_NAME -f value | wc -l)
+      log INFO  "${DB_NAME}_backup"  "There are ${NUMBER_OF_SESSIONS} remote sessions right now."
+    done
+
+    # Create a lock file in THROTTLE_CONTAINER
+    THROTTLE_FILEPATH=$(mktemp -d)
+    THROTTLE_FILE=${CONTAINER_NAME}.lock
+    date +%s > $THROTTLE_FILEPATH/$THROTTLE_FILE
+
+    # Create an object to store the file
+    openstack object create --name $THROTTLE_FILE $THROTTLE_CONTAINER_NAME $THROTTLE_FILEPATH/$THROTTLE_FILE
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Cannot create throttle container object ${THROTTLE_FILE}!"
+      return 2
+    fi
+
+    swift post  $THROTTLE_CONTAINER_NAME $THROTTLE_FILE -H "X-Delete-After:${THROTTLE_LOCK_EXPIRE_AFTER}"
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Cannot set throttle container object ${THROTTLE_FILE} expiration header!"
+      return 2
+    fi
+    openstack object show $THROTTLE_CONTAINER_NAME $THROTTLE_FILE
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Unable to retrieve throttle container object $THROTTLE_FILE after creation."
+      return 2
+    fi
+  fi
+
+  #---------------------------------------------------------------------------
+
   # Create an object to store the file
   openstack object create --name $FILE $CONTAINER_NAME $FILEPATH/$FILE
   if [[ $? -ne 0 ]]; then
@@ -243,7 +359,25 @@
       log ERROR "${DB_NAME}_backup" "Mismatch between the local backup & remote backup MD5 hash values"
       return 2
   fi
-  rm -rf ${REMOTE_FILE}
+  rm -f ${REMOTE_FILE}
+
+  #---------------------------------------------------------------------------
+  # Remote backup throttling
+  export THROTTLE_BACKUPS_ENABLED=$(echo $THROTTLE_BACKUPS_ENABLED | sed 's/"//g')
+  if $THROTTLE_BACKUPS_ENABLED; then
+    # Remove flag file
+    # Delete an object to remove the flag file
+    openstack object delete $THROTTLE_CONTAINER_NAME $THROTTLE_FILE
+    if [[ $? -ne 0 ]]; then
+      log WARN "${DB_NAME}_backup" "Cannot delete throttle container object ${THROTTLE_FILE}"
+      return 0
+    else
+      log INFO "${DB_NAME}_backup" "The throttle container object ${THROTTLE_FILE} has been successfully removed."
+    fi
+    rm -f ${THROTTLE_FILEPATH}/${THROTTLE_FILE}
+  fi
+
+  #---------------------------------------------------------------------------
 
   log INFO "${DB_NAME}_backup" "Created file $FILE in container $CONTAINER_NAME successfully."
   return 0
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_job_annotations.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_job_annotations.tpl
new file mode 100644
index 0000000..fc42614
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_job_annotations.tpl
@@ -0,0 +1,76 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{/*
+abstract: |
+  Adds custom annotations to the job spec of a component.
+examples:
+  - values: |
+      annotations:
+        job:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          keystone_domain_manage:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "keystone_domain_manage" . | include "helm-toolkit.snippets.custom_job_annotations" }}
+    return: |
+      another.tld/foo: bar
+  - values: |
+      annotations:
+        job:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          keystone_domain_manage:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "keystone_bootstrap" . | include "helm-toolkit.snippets.custom_job_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+  - values: |
+      annotations:
+        job:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          keystone_domain_manage:
+            another.tld/foo: "bar"
+          keystone_bootstrap:
+    usage: |
+      {{ tuple "keystone_bootstrap" . | include "helm-toolkit.snippets.custom_job_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+*/}}
+
+{{- define "helm-toolkit.snippets.custom_job_annotations" -}}
+{{- $envAll := index . 1 -}}
+{{- $component := index . 0 | replace "-" "_" -}}
+{{- if (hasKey $envAll.Values "annotations") -}}
+{{- if (hasKey $envAll.Values.annotations "job") -}}
+{{- $annotationsMap := $envAll.Values.annotations.job -}}
+{{- $defaultAnnotations := dict -}}
+{{- if (hasKey $annotationsMap "default" ) -}}
+{{- $defaultAnnotations = $annotationsMap.default -}}
+{{- end -}}
+{{- $annotations := index $annotationsMap $component | default $defaultAnnotations -}}
+{{- if (not (empty $annotations)) -}}
+{{- toYaml $annotations -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_pod_annotations.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_pod_annotations.tpl
new file mode 100644
index 0000000..ecff6e9
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_pod_annotations.tpl
@@ -0,0 +1,76 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{/*
+abstract: |
+  Adds custom annotations to the pod spec of a component.
+examples:
+  - values: |
+      annotations:
+        pod:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          nova_compute:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "nova_compute" . | include "helm-toolkit.snippets.custom_pod_annotations" }}
+    return: |
+      another.tld/foo: bar
+  - values: |
+      annotations:
+        pod:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          nova_compute:
+            another.tld/foo: "bar"
+    usage: |
+      {{ tuple "nova_api" . | include "helm-toolkit.snippets.custom_pod_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+  - values: |
+      annotations:
+        pod:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          nova_compute:
+            another.tld/foo: "bar"
+          nova_api:
+    usage: |
+      {{ tuple "nova_api" . | include "helm-toolkit.snippets.custom_pod_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+*/}}
+
+{{- define "helm-toolkit.snippets.custom_pod_annotations" -}}
+{{- $component := index . 0 -}}
+{{- $envAll := index . 1 -}}
+{{- if (hasKey $envAll.Values "annotations") -}}
+{{- if (hasKey $envAll.Values.annotations "pod") -}}
+{{- $annotationsMap := $envAll.Values.annotations.pod -}}
+{{- $defaultAnnotations := dict -}}
+{{- if (hasKey $annotationsMap "default" ) -}}
+{{- $defaultAnnotations = $annotationsMap.default -}}
+{{- end -}}
+{{- $annotations := index $annotationsMap $component | default $defaultAnnotations -}}
+{{- if (not (empty $annotations)) -}}
+{{- toYaml $annotations -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_secret_annotations.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_secret_annotations.tpl
new file mode 100644
index 0000000..19c4380
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_custom_secret_annotations.tpl
@@ -0,0 +1,81 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{/*
+abstract: |
+  Adds custom annotations to the secret spec of a component.
+examples:
+  - values: |
+      annotations:
+        secret:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          identity:
+            admin:
+              another.tld/foo: "bar"
+    usage: |
+      {{ tuple "identity" "admin" . | include "helm-toolkit.snippets.custom_secret_annotations" }}
+    return: |
+      another.tld/foo: bar
+  - values: |
+      annotations:
+        secret:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          identity:
+            admin:
+              another.tld/foo: "bar"
+    usage: |
+      {{ tuple "oslo_db" "admin" . | include "helm-toolkit.snippets.custom_secret_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+  - values: |
+      annotations:
+        secret:
+          default:
+            custom.tld/key: "value"
+            custom.tld/key2: "value2"
+          identity:
+            admin:
+              another.tld/foo: "bar"
+          oslo_db:
+            admin:
+    usage: |
+      {{ tuple "oslo_db" "admin" . | include "helm-toolkit.snippets.custom_secret_annotations" }}
+    return: |
+      custom.tld/key: "value"
+      custom.tld/key2: "value2"
+*/}}
+
+{{- define "helm-toolkit.snippets.custom_secret_annotations" -}}
+{{- $secretType := index . 0 -}}
+{{- $userClass := index . 1 | replace "-" "_" -}}
+{{- $envAll := index . 2 -}}
+{{- if (hasKey $envAll.Values "annotations") -}}
+{{- if (hasKey $envAll.Values.annotations "secret") -}}
+{{- $annotationsMap := index $envAll.Values.annotations.secret $secretType | default dict -}}
+{{- $defaultAnnotations := dict -}}
+{{- if (hasKey $envAll.Values.annotations.secret "default" ) -}}
+{{- $defaultAnnotations = $envAll.Values.annotations.secret.default -}}
+{{- end -}}
+{{- $annotations := index $annotationsMap $userClass | default $defaultAnnotations -}}
+{{- if (not (empty $annotations)) -}}
+{{- toYaml $annotations -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl
index 029c93d..678b844 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_image.tpl
@@ -19,7 +19,7 @@
   images:
     tags:
       test_image: docker.io/port/test:version-foo
-      image_foo: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
+      image_foo: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal
     pull_policy: IfNotPresent
     local_registry:
       active: true
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl
index bed712e..ad628da 100644
--- a/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_kubernetes_entrypoint_init_container.tpl
@@ -19,7 +19,7 @@
 values: |
   images:
     tags:
-      dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
+      dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal
     pull_policy: IfNotPresent
     local_registry:
       active: true
@@ -76,7 +76,7 @@
   {{ tuple . "calico_node" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" }}
 return: |
   - name: init
-    image: "quay.io/airshipit/kubernetes-entrypoint:v1.0.0"
+    image: "quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal"
     imagePullPolicy: IfNotPresent
     securityContext:
       allowPrivilegeEscalation: false
diff --git a/charts/staffeln/charts/helm-toolkit/templates/snippets/_rgw_s3_bucket_user_env_vars_rook.tpl b/charts/staffeln/charts/helm-toolkit/templates/snippets/_rgw_s3_bucket_user_env_vars_rook.tpl
new file mode 100644
index 0000000..08521e0
--- /dev/null
+++ b/charts/staffeln/charts/helm-toolkit/templates/snippets/_rgw_s3_bucket_user_env_vars_rook.tpl
@@ -0,0 +1,28 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "helm-toolkit.snippets.rgw_s3_bucket_user_env_vars_rook" }}
+{{- range $s3Bucket := .Values.storage.s3.buckets }}
+- name: {{ printf "%s_S3_ACCESS_KEY" ($s3Bucket.client | replace "-" "_" | upper) }}
+  valueFrom:
+    secretKeyRef:
+      name: {{ $s3Bucket.name }}
+      key: AWS_ACCESS_KEY_ID
+- name: {{ printf "%s_S3_SECRET_KEY" ($s3Bucket.client | replace "-" "_" | upper) }}
+  valueFrom:
+    secretKeyRef:
+      name: {{ $s3Bucket.name }}
+      key: AWS_SECRET_ACCESS_KEY
+{{- end }}
+{{- end }}
diff --git a/crates/ovsinit/Cargo.toml b/crates/ovsinit/Cargo.toml
new file mode 100644
index 0000000..ac7d810
--- /dev/null
+++ b/crates/ovsinit/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "ovsinit"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+clap = { version = "4.5.29", features = ["derive"] }
+env_logger = { version = "0.11.6", features = ["unstable-kv"] }
+futures = "0.3.31"
+futures-util = "0.3.31"
+ipnet = "2.11.0"
+libc = "0.2.169"
+log = { version = "0.4.25", features = ["kv"] }
+netlink-packet-route = "0.19.0"
+rtnetlink = "0.14.1"
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1.0"
+thiserror = "2.0.11"
+tokio = { version = "1", features = ["rt-multi-thread", "macros"] }
diff --git a/crates/ovsinit/src/config.rs b/crates/ovsinit/src/config.rs
new file mode 100644
index 0000000..7c3d6b7
--- /dev/null
+++ b/crates/ovsinit/src/config.rs
@@ -0,0 +1,82 @@
+use serde::Deserialize;
+use std::collections::HashMap;
+use std::{fs::File, path::PathBuf};
+use thiserror::Error;
+use log::{error, info};
+
+#[derive(Deserialize)]
+pub struct NetworkConfig {
+    #[serde(flatten)]
+    pub bridges: HashMap<String, Option<String>>,
+}
+
+#[derive(Debug, Error)]
+pub enum NetworkConfigError {
+    #[error("Failed to open file: {0}")]
+    OpenFile(#[from] std::io::Error),
+
+    #[error("Failed to parse JSON: {0}")]
+    ParseJson(#[from] serde_json::Error),
+}
+
+impl NetworkConfig {
+    pub fn from_path(path: &PathBuf) -> Result<Self, NetworkConfigError> {
+        let file = File::open(path)?;
+        NetworkConfig::from_file(file)
+    }
+
+    pub fn from_file(file: File) -> Result<Self, NetworkConfigError> {
+        let config: NetworkConfig = serde_json::from_reader(file)?;
+        Ok(config)
+    }
+
+    pub fn bridges_with_interfaces_iter(&self) -> impl Iterator<Item = (&String, &String)> {
+        self.bridges.iter().filter_map(|(k, v)| {
+            if let Some(v) = v {
+                Some((k, v))
+            } else {
+                info!(bridge = k.as_str(); "Bridge has no interface, skipping.");
+
+                None
+            }
+        })
+    }
+
+    #[allow(dead_code)]
+    pub fn from_string(json: &str) -> Result<Self, NetworkConfigError> {
+        let config: NetworkConfig = serde_json::from_str(json)?;
+        Ok(config)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_null_interface() {
+        let config = NetworkConfig::from_string("{\"br-ex\": null}").unwrap();
+
+        assert_eq!(config.bridges.len(), 1);
+        assert_eq!(config.bridges.get("br-ex"), Some(&None));
+    }
+
+    #[test]
+    fn test_bridges_with_interfaces_iter_with_null_interface() {
+        let config = NetworkConfig::from_string("{\"br-ex\": null}").unwrap();
+
+        let mut iter = config.bridges_with_interfaces_iter();
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_bridges_with_interfaces_iter_with_interface() {
+        let config = NetworkConfig::from_string("{\"br-ex\": \"bond0\"}").unwrap();
+
+        let mut iter = config.bridges_with_interfaces_iter();
+        assert_eq!(
+            iter.next(),
+            Some((&"br-ex".to_string(), &"bond0".to_string()))
+        );
+    }
+}
diff --git a/crates/ovsinit/src/lib.rs b/crates/ovsinit/src/lib.rs
new file mode 100644
index 0000000..80fb9cd
--- /dev/null
+++ b/crates/ovsinit/src/lib.rs
@@ -0,0 +1,353 @@
+extern crate ipnet;
+
+mod routes;
+
+use futures_util::stream::TryStreamExt;
+use ipnet::IpNet;
+use log::{error, info};
+use netlink_packet_route::{
+    address::{AddressAttribute, AddressMessage},
+    route::{RouteAttribute, RouteMessage, RouteScope},
+    AddressFamily,
+};
+use rtnetlink::{Handle, IpVersion};
+use std::net::IpAddr;
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum InterfaceError {
+    #[error("Interface {0} not found")]
+    NotFound(String),
+
+    #[error(transparent)]
+    NetlinkError(#[from] rtnetlink::Error),
+
+    #[error(transparent)]
+    IpNetError(#[from] ipnet::PrefixLenError),
+
+    #[error(transparent)]
+    RouteError(#[from] routes::RouteError),
+}
+
+#[derive(Error, Debug)]
+pub enum InterfaceMigrationError {
+    #[error(transparent)]
+    InterfaceError(#[from] InterfaceError),
+
+    #[error("IP configuration on both interfaces")]
+    IpConflict,
+}
+
+pub struct Interface {
+    name: String,
+    index: u32,
+    address_messages: Vec<AddressMessage>,
+    route_messages: Vec<RouteMessage>,
+}
+
+impl Interface {
+    pub async fn new(handle: &Handle, name: String) -> Result<Self, InterfaceError> {
+        let index = handle
+            .link()
+            .get()
+            .match_name(name.clone())
+            .execute()
+            .try_next()
+            .await
+            .map_err(|e| match e {
+                rtnetlink::Error::NetlinkError(inner) if -inner.raw_code() == libc::ENODEV => {
+                    InterfaceError::NotFound(name.clone())
+                }
+                _ => InterfaceError::NetlinkError(e),
+            })?
+            .map(|link| link.header.index)
+            .ok_or_else(|| InterfaceError::NotFound(name.clone()))?;
+
+        let address_messages: Vec<AddressMessage> = handle
+            .address()
+            .get()
+            .set_link_index_filter(index)
+            .execute()
+            .map_err(InterfaceError::NetlinkError)
+            .try_filter(|msg| futures::future::ready(msg.header.family == AddressFamily::Inet))
+            .try_collect()
+            .await?;
+
+        let route_messages: Vec<RouteMessage> = handle
+            .route()
+            .get(IpVersion::V4)
+            .execute()
+            .map_err(InterfaceError::NetlinkError)
+            .try_filter(move |route_msg| {
+                let matches = route_msg
+                    .attributes
+                    .iter()
+                    .any(|attr| matches!(attr, RouteAttribute::Oif(idx) if *idx == index))
+                    && route_msg.header.kind != netlink_packet_route::route::RouteType::Local;
+
+                futures_util::future::ready(matches)
+            })
+            .try_collect()
+            .await?;
+
+        Ok(Self {
+            name,
+            index,
+            address_messages,
+            route_messages,
+        })
+    }
+
+    fn addresses(&self) -> Vec<IpNet> {
+        self.address_messages
+            .iter()
+            .filter_map(|msg| {
+                msg.attributes.iter().find_map(|nla| {
+                    if let AddressAttribute::Address(ip) = nla {
+                        IpNet::new(*ip, msg.header.prefix_len).ok()
+                    } else {
+                        None
+                    }
+                })
+            })
+            .collect()
+    }
+
+    fn routes(&self) -> Result<Vec<routes::Route>, routes::RouteError> {
+        self.route_messages
+            .iter()
+            .filter_map(|msg| {
+                if msg.header.scope == RouteScope::Link {
+                    return None;
+                }
+
+                Some(routes::Route::from_message(msg.clone()))
+            })
+            .collect::<Result<Vec<routes::Route>, routes::RouteError>>()
+    }
+
+    async fn up(&self, handle: &Handle) -> Result<(), InterfaceError> {
+        handle
+            .link()
+            .set(self.index)
+            .up()
+            .execute()
+            .await
+            .map_err(InterfaceError::NetlinkError)
+    }
+
+    async fn restore(&self, handle: &Handle) -> Result<(), InterfaceError> {
+        self.migrate_addresses_from_interface(handle, self).await?;
+        self.migrate_routes_from_interface(handle, self).await?;
+
+        Ok(())
+    }
+
+    async fn flush(&self, handle: &Handle) -> Result<(), InterfaceError> {
+        for msg in self.address_messages.iter() {
+            handle.address().del(msg.clone()).execute().await?;
+        }
+
+        // NOTE(mnaser): Once the interface has no more addresses, it will
+        //               automatically lose all of it's routes.
+
+        Ok(())
+    }
+
+    async fn migrate_addresses_from_interface(
+        &self,
+        handle: &Handle,
+        src_interface: &Interface,
+    ) -> Result<(), InterfaceError> {
+        for msg in src_interface.address_messages.iter() {
+            let ip = msg.attributes.iter().find_map(|nla| match nla {
+                AddressAttribute::Address(ip) => Some(ip),
+                _ => None,
+            });
+
+            if let Some(ip) = ip {
+                handle
+                    .address()
+                    .add(self.index, *ip, msg.header.prefix_len)
+                    .replace()
+                    .execute()
+                    .await?;
+            }
+        }
+
+        Ok(())
+    }
+
+    async fn migrate_routes_from_interface(
+        &self,
+        handle: &Handle,
+        src_interface: &Interface,
+    ) -> Result<(), InterfaceError> {
+        for route in src_interface.routes()?.iter() {
+            let mut request = handle.route().add();
+            request = request.protocol(route.protocol);
+
+            match route.destination.addr() {
+                IpAddr::V4(ipv4) => {
+                    let mut request = request
+                        .v4()
+                        .replace()
+                        .destination_prefix(ipv4, route.destination.prefix_len());
+
+                    if let IpAddr::V4(gateway) = route.gateway {
+                        request = request.gateway(gateway);
+                    }
+
+                    request.execute().await?;
+                }
+                IpAddr::V6(ipv6) => {
+                    let mut request = request
+                        .v6()
+                        .replace()
+                        .destination_prefix(ipv6, route.destination.prefix_len());
+
+                    if let IpAddr::V6(gateway) = route.gateway {
+                        request = request.gateway(gateway);
+                    }
+
+                    request.execute().await?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub async fn migrate_from_interface(
+        &self,
+        handle: &Handle,
+        src_interface: &Interface,
+    ) -> Result<(), InterfaceMigrationError> {
+        self.up(handle).await?;
+
+        match (
+            src_interface.address_messages.is_empty(),
+            self.address_messages.is_empty(),
+        ) {
+            (false, false) => {
+                // Both source and destination interfaces have IPs assigned
+                error!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str(),
+                    src_ip_addresses = format!("{:?}", src_interface.addresses()).as_str(),
+                    dst_ip_addresses = format!("{:?}", self.addresses()).as_str();
+                    "Both source and destination interfaces have IPs assigned. This is not safe in production, please fix manually."
+                );
+
+                Err(InterfaceMigrationError::IpConflict)
+            }
+            (false, true) => {
+                // Source interface has IPs, destination interface has no IPs
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str(),
+                    ip_addresses = format!("{:?}", src_interface.addresses()).as_str(),
+                    routes = format!("{:?}", src_interface.routes()).as_str();
+                    "Migrating IP addresses from interface to bridge."
+                );
+
+                if let Err(e) = src_interface.flush(handle).await {
+                    error!(
+                        src_interface = src_interface.name.as_str(),
+                        error = e.to_string().as_str();
+                        "Error while flushing IPs from source interface."
+                    );
+
+                    if let Err(restore_err) = src_interface.restore(handle).await {
+                        error!(
+                            src_interface = src_interface.name.as_str(),
+                            error = restore_err.to_string().as_str();
+                            "Error while restoring IPs to source interface."
+                        );
+                    }
+
+                    return Err(InterfaceMigrationError::InterfaceError(e));
+                }
+
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str();
+                    "Successfully flushed IP addresses from source interface."
+                );
+
+                if let Err(e) = self
+                    .migrate_addresses_from_interface(handle, src_interface)
+                    .await
+                {
+                    error!(
+                        dst_interface = self.name.as_str(),
+                        error = e.to_string().as_str();
+                        "Error while migrating IP addresses to destination interface."
+                    );
+
+                    if let Err(restore_err) = src_interface.restore(handle).await {
+                        error!(
+                            src_interface = src_interface.name.as_str(),
+                            error = restore_err.to_string().as_str();
+                            "Error while restoring IPs to source interface."
+                        );
+                    }
+
+                    return Err(InterfaceMigrationError::InterfaceError(e));
+                }
+
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str();
+                    "Successfully migrated IP addresseses to new interface."
+                );
+
+                if let Err(e) = self
+                    .migrate_routes_from_interface(handle, src_interface)
+                    .await
+                {
+                    error!(
+                        dst_interface = self.name.as_str(),
+                        routes = format!("{:?}", src_interface.routes()).as_str(),
+                        error = e.to_string().as_str();
+                        "Error while migrating routes to destination interface."
+                    );
+
+                    if let Err(restore_err) = src_interface.restore(handle).await {
+                        error!(
+                            src_interface = src_interface.name.as_str(),
+                            routes = format!("{:?}", src_interface.routes()).as_str(),
+                            error = restore_err.to_string().as_str();
+                            "Error while restoring source interface."
+                        );
+                    }
+
+                    return Err(InterfaceMigrationError::InterfaceError(e));
+                }
+
+                Ok(())
+            }
+            (true, false) => {
+                // Destination interface has IPs, source interface has no IPs
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str(),
+                    ip_addresses = format!("{:?}", self.addresses()).as_str();
+                    "Bridge already has IPs assigned. Skipping migration."
+                );
+
+                Ok(())
+            }
+            (true, true) => {
+                // Neither interface has IPs
+                info!(
+                    src_interface = src_interface.name.as_str(),
+                    dst_interface = self.name.as_str();
+                    "Neither interface nor bridge have IPs assigned. Skipping migration."
+                );
+
+                Ok(())
+            }
+        }
+    }
+}
diff --git a/crates/ovsinit/src/main.rs b/crates/ovsinit/src/main.rs
new file mode 100644
index 0000000..fb77530
--- /dev/null
+++ b/crates/ovsinit/src/main.rs
@@ -0,0 +1,63 @@
+mod config;
+
+use clap::Parser;
+use env_logger::Env;
+use log::error;
+use rtnetlink::Handle;
+use std::{path::PathBuf, process};
+
+#[derive(Parser, Debug)]
+#[command(version, about, long_about = None)]
+struct Cli {
+    #[arg(default_value = "/tmp/auto_bridge_add", help = "Path to the JSON file")]
+    config: PathBuf,
+}
+
+#[tokio::main]
+async fn main() {
+    let cli = Cli::parse();
+
+    let env = Env::default()
+        .filter_or("MY_LOG_LEVEL", "info")
+        .write_style_or("MY_LOG_STYLE", "always");
+    env_logger::init_from_env(env);
+
+    let network_config = match config::NetworkConfig::from_path(&cli.config) {
+        Ok(network_config) => network_config,
+        Err(e) => {
+            error!("Failed to load network config: {}", e);
+
+            process::exit(1);
+        }
+    };
+
+    let (connection, handle, _) = rtnetlink::new_connection().expect("Failed to create connection");
+    tokio::spawn(connection);
+
+    for (bridge_name, interface_name) in network_config.bridges_with_interfaces_iter() {
+        let interface = get_interface(&handle, interface_name).await;
+        let bridge = get_interface(&handle, bridge_name).await;
+
+        if let Err(e) = bridge.migrate_from_interface(&handle, &interface).await {
+            error!(
+                "Failed to migrate from {} to {}: {}",
+                interface_name, bridge_name, e
+            );
+            process::exit(1);
+        }
+    }
+}
+
+async fn get_interface(handle: &Handle, name: &str) -> ovsinit::Interface {
+    match ovsinit::Interface::new(handle, name.to_string()).await {
+        Ok(interface) => interface,
+        Err(ovsinit::InterfaceError::NotFound(name)) => {
+            error!(interface = name.as_str(); "Interface not found.");
+            process::exit(1);
+        }
+        Err(e) => {
+            error!(error = e.to_string().as_str(); "Failed to lookup interface.");
+            process::exit(1);
+        }
+    }
+}
diff --git a/crates/ovsinit/src/routes.rs b/crates/ovsinit/src/routes.rs
new file mode 100644
index 0000000..a4e0130
--- /dev/null
+++ b/crates/ovsinit/src/routes.rs
@@ -0,0 +1,150 @@
+use ipnet::IpNet;
+use log::error;
+use netlink_packet_route::{
+    route::{RouteAddress, RouteAttribute, RouteMessage, RouteProtocol},
+    AddressFamily,
+};
+use std::{
+    fmt,
+    net::{IpAddr, Ipv4Addr, Ipv6Addr},
+};
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum RouteError {
+    #[error("Invalid gateway")]
+    InvalidGateway,
+
+    #[error("Invalid destination")]
+    InvalidDestination,
+
+    #[error("Invalid prefix length")]
+    InvalidPrefixLength,
+
+    #[error("Missing gateway")]
+    MissingGateway,
+
+    #[error("Missing destination")]
+    MissingDestination,
+}
+
+pub struct Route {
+    pub protocol: RouteProtocol,
+    pub destination: IpNet,
+    pub gateway: IpAddr,
+}
+
+impl fmt::Debug for Route {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{} via {}", self.destination, self.gateway)
+    }
+}
+
+impl Route {
+    pub fn from_message(message: RouteMessage) -> Result<Self, RouteError> {
+        let mut gateway = None;
+        let mut destination = None;
+
+        for nla in message.attributes.iter() {
+            if let RouteAttribute::Gateway(ip) = nla {
+                gateway = match ip {
+                    RouteAddress::Inet(ip) => Some(IpAddr::V4(*ip)),
+                    RouteAddress::Inet6(ip) => Some(IpAddr::V6(*ip)),
+                    _ => return Err(RouteError::InvalidGateway),
+                };
+            }
+
+            if let RouteAttribute::Destination(ref ip) = nla {
+                destination = match ip {
+                    RouteAddress::Inet(ip) => Some(
+                        IpNet::new(IpAddr::V4(*ip), message.header.destination_prefix_length)
+                            .map_err(|_| RouteError::InvalidPrefixLength)?,
+                    ),
+                    RouteAddress::Inet6(ip) => Some(
+                        IpNet::new(IpAddr::V6(*ip), message.header.destination_prefix_length)
+                            .map_err(|_| RouteError::InvalidPrefixLength)?,
+                    ),
+                    _ => return Err(RouteError::InvalidDestination),
+                };
+            }
+        }
+
+        let gateway = match gateway {
+            Some(gateway) => gateway,
+            None => return Err(RouteError::MissingGateway),
+        };
+
+        let destination = match destination {
+            Some(destination) => destination,
+            None => match message.header.address_family {
+                AddressFamily::Inet => IpNet::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)
+                    .map_err(|_| RouteError::InvalidPrefixLength)?,
+                AddressFamily::Inet6 => IpNet::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0)
+                    .map_err(|_| RouteError::InvalidPrefixLength)?,
+                _ => return Err(RouteError::InvalidDestination),
+            },
+        };
+
+        Ok(Route {
+            protocol: message.header.protocol,
+            destination,
+            gateway,
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use netlink_packet_route::AddressFamily;
+    use std::net::Ipv4Addr;
+
+    #[tokio::test]
+    async fn test_default_ipv4_route() {
+        let mut message = RouteMessage::default();
+
+        message.header.address_family = AddressFamily::Inet;
+        message.header.destination_prefix_length = 0;
+        message.header.protocol = RouteProtocol::Static;
+        message
+            .attributes
+            .push(RouteAttribute::Gateway(RouteAddress::Inet(Ipv4Addr::new(
+                192, 168, 1, 1,
+            ))));
+
+        let route = Route::from_message(message).unwrap();
+
+        assert_eq!(route.protocol, RouteProtocol::Static);
+        assert_eq!(
+            route.destination,
+            IpNet::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0).unwrap()
+        );
+        assert_eq!(route.gateway, IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)));
+    }
+
+    #[tokio::test]
+    async fn test_default_ipv6_route() {
+        let mut message = RouteMessage::default();
+
+        message.header.address_family = AddressFamily::Inet6;
+        message.header.destination_prefix_length = 0;
+        message.header.protocol = RouteProtocol::Static;
+        message
+            .attributes
+            .push(RouteAttribute::Gateway(RouteAddress::Inet6(Ipv6Addr::new(
+                0, 0, 0, 0, 0, 0, 0, 1,
+            ))));
+
+        let route = Route::from_message(message).unwrap();
+
+        assert_eq!(route.protocol, RouteProtocol::Static);
+        assert_eq!(
+            route.destination,
+            IpNet::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0).unwrap()
+        );
+        assert_eq!(
+            route.gateway,
+            IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))
+        );
+    }
+}
diff --git a/crates/rustainers/src/lib.rs b/crates/rustainers/src/lib.rs
index f5d224f..65f1505 100644
--- a/crates/rustainers/src/lib.rs
+++ b/crates/rustainers/src/lib.rs
@@ -214,7 +214,7 @@
 
     #[tokio::test]
     async fn test_container_exec() -> Result<(), DockerContainerGuardError> {
-        let guard = DockerContainerGuard::spawn("alpine:latest").await?;
+        let guard = DockerContainerGuard::spawn("registry.atmosphere.dev/docker.io/library/alpine:latest").await?;
 
         let output = guard.exec(vec!["echo", "hello from container"]).await?;
         assert!(output.contains("hello from container"));
@@ -224,7 +224,7 @@
 
     #[tokio::test]
     async fn test_container_read_file() -> Result<(), DockerContainerGuardError> {
-        let guard = DockerContainerGuard::spawn("alpine:latest").await?;
+        let guard = DockerContainerGuard::spawn("registry.atmosphere.dev/docker.io/library/alpine:latest").await?;
 
         let file = guard.read_file("/usr/lib/os-release").await?;
         assert!(file.len() > 0);
@@ -234,7 +234,7 @@
 
     #[tokio::test]
     async fn test_container_get_user() -> Result<(), DockerContainerGuardError> {
-        let guard = DockerContainerGuard::spawn("alpine:latest").await?;
+        let guard = DockerContainerGuard::spawn("registry.atmosphere.dev/docker.io/library/alpine:latest").await?;
 
         let user = guard.get_user("root").await?;
         assert_eq!(user.name, "root");
diff --git a/docker-bake.hcl b/docker-bake.hcl
index 14746b6..371ecbb 100644
--- a/docker-bake.hcl
+++ b/docker-bake.hcl
@@ -15,6 +15,17 @@
     }
 }
 
+target "ovsinit" {
+    context = "images/ovsinit"
+    platforms = ["linux/amd64", "linux/arm64"]
+
+    contexts = {
+        "runtime" = "docker-image://docker.io/library/debian:bullseye-slim"
+        "rust" = "docker-image://docker.io/library/rust:1.84-bullseye"
+        "src" = "./crates/ovsinit"
+    }
+}
+
 target "ubuntu-cloud-archive" {
     context = "images/ubuntu-cloud-archive"
     platforms = ["linux/amd64", "linux/arm64"]
@@ -161,6 +172,7 @@
     contexts = {
         "golang" = "docker-image://docker.io/library/golang:1.20"
         "openvswitch" = "target:openvswitch"
+        "ovsinit" = "target:ovsinit"
     }
 
     args = {
@@ -217,8 +229,9 @@
     }
 
     contexts = {
-        "openstack-venv-builder" = "target:openstack-venv-builder"
         "openstack-python-runtime" = "target:openstack-python-runtime"
+        "openstack-venv-builder" = "target:openstack-venv-builder"
+        "ovsinit" = "target:ovsinit"
     }
 
     tags = [
diff --git a/images/neutron/Dockerfile b/images/neutron/Dockerfile
index a7b0796..a64ceb6 100644
--- a/images/neutron/Dockerfile
+++ b/images/neutron/Dockerfile
@@ -36,4 +36,5 @@
 apt-get clean
 rm -rf /var/lib/apt/lists/*
 EOF
+COPY --from=ovsinit /usr/local/bin/ovsinit /usr/local/bin/ovsinit
 COPY --from=build --link /var/lib/openstack /var/lib/openstack
diff --git a/images/ovn/Dockerfile b/images/ovn/Dockerfile
index 7a9bd06..961d611 100644
--- a/images/ovn/Dockerfile
+++ b/images/ovn/Dockerfile
@@ -34,7 +34,7 @@
 COPY --from=ovn-kubernetes --link /src/dist/images/ovndb-raft-functions.sh /root/ovndb-raft-functions.sh
 COPY --from=ovn-kubernetes --link /src/dist/images/ovnkube.sh /root/ovnkube.sh
 COPY --from=ovn-kubernetes --link /usr/bin/ovn-kube-util /usr/bin/ovn-kube-util
-
+COPY --from=ovsinit /usr/local/bin/ovsinit /usr/local/bin/ovsinit
 RUN <<EOF bash -xe
     usermod -u 42424 openvswitch
     mkdir -p  /var/log/ovn /var/lib/ovn /var/run/ovn
diff --git a/images/ovsinit/Dockerfile b/images/ovsinit/Dockerfile
new file mode 100644
index 0000000..edb2201
--- /dev/null
+++ b/images/ovsinit/Dockerfile
@@ -0,0 +1,11 @@
+# SPDX-FileCopyrightText: © 2025 VEXXHOST, Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+# Atmosphere-Rebuild-Time: 2025-02-16T12:56:04Z
+
+FROM rust AS builder
+WORKDIR /src
+COPY --from=src / /src
+RUN cargo install --path .
+
+FROM runtime
+COPY --from=builder /usr/local/cargo/bin/ovsinit /usr/local/bin/ovsinit
diff --git a/releasenotes/notes/add-ovsinit-56990eaaf93c6f9d.yaml b/releasenotes/notes/add-ovsinit-56990eaaf93c6f9d.yaml
new file mode 100644
index 0000000..5482a80
--- /dev/null
+++ b/releasenotes/notes/add-ovsinit-56990eaaf93c6f9d.yaml
@@ -0,0 +1,9 @@
+---
+features:
+  - Introduced a new Rust-based binary ``ovsinit`` which focuses on handling
+    the migration of IP addresses from a physical interface to an OVS bridge
+    during the Neutron or OVN initialization process.
+fixes:
+  - During a Neutron or OVN initialization process, the routes assigned to
+    the physical interface are now removed and added to the OVS bridge
+    to maintain the connectivity of the host.