chore(openstack): Sync charts (#688)

diff --git a/charts/openvswitch/Chart.yaml b/charts/openvswitch/Chart.yaml
index abce993..3d58163 100644
--- a/charts/openvswitch/Chart.yaml
+++ b/charts/openvswitch/Chart.yaml
@@ -9,4 +9,4 @@
 sources:
 - https://github.com/openvswitch/ovs
 - https://opendev.org/openstack/openstack-helm
-version: 0.1.15
+version: 0.1.19
diff --git a/charts/openvswitch/charts/helm-toolkit/Chart.yaml b/charts/openvswitch/charts/helm-toolkit/Chart.yaml
index e6aec81..1ee9758 100644
--- a/charts/openvswitch/charts/helm-toolkit/Chart.yaml
+++ b/charts/openvswitch/charts/helm-toolkit/Chart.yaml
@@ -9,4 +9,4 @@
 sources:
 - https://opendev.org/openstack/openstack-helm-infra
 - https://opendev.org/openstack/openstack-helm
-version: 0.2.54
+version: 0.2.55
diff --git a/charts/openvswitch/charts/helm-toolkit/requirements.lock b/charts/openvswitch/charts/helm-toolkit/requirements.lock
index 9792284..be213ec 100644
--- a/charts/openvswitch/charts/helm-toolkit/requirements.lock
+++ b/charts/openvswitch/charts/helm-toolkit/requirements.lock
@@ -1,3 +1,3 @@
 dependencies: []
 digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726
-generated: "2023-08-09T12:11:24.478182096Z"
+generated: "2023-11-02T02:34:47.093652653Z"
diff --git a/charts/openvswitch/charts/helm-toolkit/templates/manifests/_ingress.tpl b/charts/openvswitch/charts/helm-toolkit/templates/manifests/_ingress.tpl
index 4c476b2..972e429 100644
--- a/charts/openvswitch/charts/helm-toolkit/templates/manifests/_ingress.tpl
+++ b/charts/openvswitch/charts/helm-toolkit/templates/manifests/_ingress.tpl
@@ -67,10 +67,10 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         rules:
           - host: barbican
             http:
@@ -108,10 +108,10 @@
       metadata:
         name: barbican-namespace-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -133,10 +133,10 @@
       metadata:
         name: barbican-cluster-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx-cluster"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx-cluster"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -202,10 +202,10 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public
             hosts:
@@ -302,12 +302,12 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           cert-manager.io/issuer: ca-issuer
           certmanager.k8s.io/issuer: ca-issuer
           nginx.ingress.kubernetes.io/backend-protocol: https
           nginx.ingress.kubernetes.io/secure-backends: "true"
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public-certmanager
             hosts:
@@ -404,12 +404,12 @@
       metadata:
         name: barbican
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           cert-manager.io/cluster-issuer: ca-issuer
           certmanager.k8s.io/cluster-issuer: ca-issuer
           nginx.ingress.kubernetes.io/backend-protocol: https
           nginx.ingress.kubernetes.io/secure-backends: "true"
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: barbican-tls-public-certmanager
             hosts:
@@ -488,10 +488,10 @@
       metadata:
         name: grafana
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         rules:
           - host: grafana
             http:
@@ -529,10 +529,10 @@
       metadata:
         name: grafana-namespace-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx"
         tls:
           - secretName: grafana-tls-public
             hosts:
@@ -565,10 +565,10 @@
       metadata:
         name: grafana-cluster-fqdn
         annotations:
-          kubernetes.io/ingress.class: "nginx-cluster"
           nginx.ingress.kubernetes.io/rewrite-target: /
 
       spec:
+        ingressClassName: "nginx-cluster"
         tls:
           - secretName: grafana-tls-public
             hosts:
@@ -639,7 +639,6 @@
 metadata:
   name: {{ $ingressName }}
   annotations:
-    kubernetes.io/ingress.class: {{ index $envAll.Values.network $backendService "ingress" "classes" "namespace" | quote }}
 {{- if $certIssuer }}
     cert-manager.io/{{ $certIssuerType }}: {{ $certIssuer }}
     certmanager.k8s.io/{{ $certIssuerType }}: {{ $certIssuer }}
@@ -650,6 +649,7 @@
 {{- end }}
 {{ toYaml (index $envAll.Values.network $backendService "ingress" "annotations") | indent 4 }}
 spec:
+  ingressClassName: {{ index $envAll.Values.network $backendService "ingress" "classes" "namespace" | quote }}
 {{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace "-" "_" ) "hosts" }}
 {{- if $certIssuer }}
 {{- $secretName := index $envAll.Values.secrets "tls" ( $backendServiceType | replace "-" "_" ) $backendService $endpoint }}
@@ -695,9 +695,9 @@
 metadata:
   name: {{ printf "%s-%s-%s" $ingressName $ingressController "fqdn" }}
   annotations:
-    kubernetes.io/ingress.class: {{ index $envAll.Values.network $backendService "ingress" "classes" $ingressController | quote }}
 {{ toYaml (index $envAll.Values.network $backendService "ingress" "annotations") | indent 4 }}
 spec:
+  ingressClassName: {{ index $envAll.Values.network $backendService "ingress" "classes" $ingressController | quote }}
 {{- $host := index $envAll.Values.endpoints ( $backendServiceType | replace "-" "_" ) "host_fqdn_override" }}
 {{- if hasKey $host $endpoint }}
 {{- $endpointHost := index $host $endpoint }}
diff --git a/charts/openvswitch/requirements.lock b/charts/openvswitch/requirements.lock
index c3f6c8e..b9613ad 100644
--- a/charts/openvswitch/requirements.lock
+++ b/charts/openvswitch/requirements.lock
@@ -1,6 +1,6 @@
 dependencies:
 - name: helm-toolkit
   repository: file://../helm-toolkit
-  version: 0.2.54
-digest: sha256:dd4dba67518d3c1ed79bf1663fbb9379b51c4a5d985f8a4884f4e9d168ab940d
-generated: "2023-08-09T12:11:40.236995086Z"
+  version: 0.2.55
+digest: sha256:fe6ad5ce9983b4435c76e650bb0050609eb56467a4429aa844e50d954d2e91c8
+generated: "2023-11-02T02:35:07.681193516Z"
diff --git a/charts/openvswitch/templates/bin/_openvswitch-vswitchd.sh.tpl b/charts/openvswitch/templates/bin/_openvswitch-vswitchd.sh.tpl
index f85d0c7..dad613c 100644
--- a/charts/openvswitch/templates/bin/_openvswitch-vswitchd.sh.tpl
+++ b/charts/openvswitch/templates/bin/_openvswitch-vswitchd.sh.tpl
@@ -77,19 +77,38 @@
 
   # No need to create the cgroup if lcore_mask or pmd_cpu_mask is not set.
   if [[ -n ${PMD_CPU_MASK} || -n ${LCORE_MASK} ]]; then
-      # Setup Cgroups to use when breaking out of Kubernetes defined groups
-      mkdir -p /sys/fs/cgroup/cpuset/osh-openvswitch
-      target_mems="/sys/fs/cgroup/cpuset/osh-openvswitch/cpuset.mems"
-      target_cpus="/sys/fs/cgroup/cpuset/osh-openvswitch/cpuset.cpus"
+      if [ "$(stat -fc %T /sys/fs/cgroup/)" = "cgroup2fs" ]; then
+          # Setup Cgroups to use when breaking out of Kubernetes defined groups
+          mkdir -p /sys/fs/cgroup/osh-openvswitch
+          target_mems="/sys/fs/cgroup/osh-openvswitch/cpuset.mems"
+          target_cpus="/sys/fs/cgroup/osh-openvswitch/cpuset.cpus"
+          touch $target_mems
+          touch $target_cpus
 
-      # Ensure the write target for the for cpuset.mem for the pod exists
-      if [[ -f "$target_mems" && -f "$target_cpus" ]]; then
-        # Write cpuset.mem and cpuset.cpus for new cgroup and add current task to new cgroup
-        cat /sys/fs/cgroup/cpuset/cpuset.mems > "$target_mems"
-        cat /sys/fs/cgroup/cpuset/cpuset.cpus > "$target_cpus"
-        echo $$ > /sys/fs/cgroup/cpuset/osh-openvswitch/tasks
+          # Ensure the write target for the for cpuset.mem for the pod exists
+          if [[ -f "$target_mems" && -f "$target_cpus" ]]; then
+            # Write cpuset.mem and cpuset.cpus for new cgroup and add current task to new cgroup
+            cat /sys/fs/cgroup/cpuset.mems.effective > "$target_mems"
+            cat /sys/fs/cgroup/cpuset.cpus.effective > "$target_cpus"
+            echo $$ > /sys/fs/cgroup/osh-openvswitch/cgroup.procs
+          else
+            echo "ERROR: Could not find write target for either cpuset.mems: $target_mems or cpuset.cpus: $target_cpus"
+          fi
       else
-        echo "ERROR: Could not find write target for either cpuset.mems: $target_mems or cpuset.cpus: $target_cpus"
+          # Setup Cgroups to use when breaking out of Kubernetes defined groups
+          mkdir -p /sys/fs/cgroup/cpuset/osh-openvswitch
+          target_mems="/sys/fs/cgroup/cpuset/osh-openvswitch/cpuset.mems"
+          target_cpus="/sys/fs/cgroup/cpuset/osh-openvswitch/cpuset.cpus"
+
+          # Ensure the write target for the for cpuset.mem for the pod exists
+          if [[ -f "$target_mems" && -f "$target_cpus" ]]; then
+            # Write cpuset.mem and cpuset.cpus for new cgroup and add current task to new cgroup
+            cat /sys/fs/cgroup/cpuset/cpuset.mems > "$target_mems"
+            cat /sys/fs/cgroup/cpuset/cpuset.cpus > "$target_cpus"
+            echo $$ > /sys/fs/cgroup/cpuset/osh-openvswitch/tasks
+          else
+            echo "ERROR: Could not find write target for either cpuset.mems: $target_mems or cpuset.cpus: $target_cpus"
+          fi
       fi
   fi
 {{- end }}
@@ -107,13 +126,37 @@
   ovs-appctl -T1 -t /run/openvswitch/ovs-vswitchd.${PID}.ctl exit
 }
 
+find_latest_ctl_file() {
+    latest_file=""
+    latest_file=$(ls -lt /run/openvswitch/*.ctl | awk 'NR==1 {if ($3 == "{{ .Values.conf.poststart.rootUser }}") print $NF}')
+
+    echo "$latest_file"
+}
+
 function poststart () {
   # This enables the usage of 'ovs-appctl' from neutron-ovs-agent pod.
+
+  # Wait for potential new ctl file before continuing
+  timeout={{ .Values.conf.poststart.timeout }}
+  start_time=$(date +%s)
+  while true; do
+      latest_ctl_file=$(find_latest_ctl_file)
+      if [ -n "$latest_ctl_file" ]; then
+          break
+      fi
+      current_time=$(date +%s)
+      if (( current_time - start_time >= timeout )); then
+          break
+      fi
+      sleep 1
+  done
+
   until [ -f $OVS_PID ]
   do
       echo "Waiting for file $OVS_PID"
       sleep 1
   done
+
   PID=$(cat $OVS_PID)
   OVS_CTL=/run/openvswitch/ovs-vswitchd.${PID}.ctl
 
@@ -123,6 +166,11 @@
       sleep 1
   done
   chown {{ .Values.pod.user.nova.uid }}.{{ .Values.pod.user.nova.uid }} ${OVS_CTL}
+
+{{- if .Values.conf.poststart.extraCommand }}
+{{ .Values.conf.poststart.extraCommand | indent 2 }}
+{{- end }}
+
 }
 
 $COMMAND
diff --git a/charts/openvswitch/templates/daemonset.yaml b/charts/openvswitch/templates/daemonset.yaml
index 244ffb8..7984023 100644
--- a/charts/openvswitch/templates/daemonset.yaml
+++ b/charts/openvswitch/templates/daemonset.yaml
@@ -56,6 +56,9 @@
 
 {{- if .Values.manifests.daemonset }}
 {{- $envAll := . }}
+
+{{- $serviceAccountName := "openvswitch-server" }}
+{{ tuple $envAll "vswitchd" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -80,6 +83,7 @@
 {{ dict "envAll" $envAll "podName" "openvswitch" "containerNames" (list "openvswitch-db" "openvswitch-db-perms" "openvswitch-vswitchd" "openvswitch-vswitchd-modules" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
     spec:
       shareProcessNamespace: true
+      serviceAccountName: {{ $serviceAccountName }}
 {{ dict "envAll" $envAll "application" "ovs" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
       nodeSelector:
         {{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
diff --git a/charts/openvswitch/values.yaml b/charts/openvswitch/values.yaml
index 4c6971c..f967c75 100644
--- a/charts/openvswitch/values.yaml
+++ b/charts/openvswitch/values.yaml
@@ -208,6 +208,10 @@
   secret_registry: true
 
 conf:
+  poststart:
+    timeout: 5
+    rootUser: "root"
+    extraCommand: null
   openvswitch_db_server:
     ptcp_port: null
   ovs_other_config: