chore: bundled more helm charts
diff --git a/charts/cilium/templates/NOTES.txt b/charts/cilium/templates/NOTES.txt
new file mode 100644
index 0000000..3024efa
--- /dev/null
+++ b/charts/cilium/templates/NOTES.txt
@@ -0,0 +1,20 @@
+{{- if (and (.Values.preflight.enabled) (not (.Values.agent)) (not (.Values.operator.enabled))) }}
+    You have successfully ran the preflight check.
+    Now make sure to check the number of READY pods is the same as the number of running cilium pods.
+    Then make sure the cilium preflight deployment is also marked READY 1/1.
+    If you have an issues please refer to the CNP Validation section in the upgrade guide.
+{{- else if (and (.Values.hubble.enabled) (.Values.hubble.relay.enabled)) }}
+    {{- if (.Values.hubble.ui.enabled) }}
+        You have successfully installed {{ title .Chart.Name }} with Hubble Relay and Hubble UI.
+    {{- else }}
+        You have successfully installed {{ title .Chart.Name }} with Hubble Relay.
+    {{- end }}
+{{- else if .Values.hubble.enabled }}
+    You have successfully installed {{ title .Chart.Name }} with Hubble.
+{{- else }}
+    You have successfully installed {{ title .Chart.Name }}.
+{{- end }}
+
+Your release version is {{ .Chart.Version }}.
+
+For any further help, visit https://docs.cilium.io/en/v{{ (semver .Chart.Version).Major }}.{{ (semver .Chart.Version).Minor }}/gettinghelp
diff --git a/charts/cilium/templates/_clustermesh-apiserver-generate-certs-job-spec.tpl b/charts/cilium/templates/_clustermesh-apiserver-generate-certs-job-spec.tpl
new file mode 100644
index 0000000..62ce848
--- /dev/null
+++ b/charts/cilium/templates/_clustermesh-apiserver-generate-certs-job-spec.tpl
@@ -0,0 +1,48 @@
+{{- define "clustermesh-apiserver-generate-certs.job.spec" }}
+{{- $certValiditySecondsStr := printf "%ds" (mul .Values.clustermesh.apiserver.tls.auto.certValidityDuration 24 60 60) -}}
+spec:
+  template:
+    metadata:
+      labels:
+        k8s-app: clustermesh-apiserver-generate-certs
+        {{- with .Values.clustermesh.apiserver.podLabels }}
+        {{- toYaml . | nindent 8 }}
+        {{- end }}
+    spec:
+      serviceAccount: {{ .Values.serviceAccounts.clustermeshcertgen.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.clustermeshcertgen.name | quote }}
+      containers:
+        - name: certgen
+          image: {{ .Values.certgen.image.repository }}:{{ .Values.certgen.image.tag }}
+          imagePullPolicy: {{ .Values.certgen.image.pullPolicy }}
+          command:
+            - "/usr/bin/cilium-certgen"
+          args:
+            - "--cilium-namespace={{ .Release.Namespace }}"
+            - "--clustermesh-apiserver-ca-cert-reuse-secret"
+            {{- if .Values.debug.enabled }}
+            - "--debug"
+            {{- end }}
+            {{- if not (and .Values.clustermesh.apiserver.tls.ca.cert .Values.clustermesh.apiserver.tls.ca.key) }}
+            - "--clustermesh-apiserver-ca-cert-generate"
+            {{- end }}
+            {{- if not (and .Values.clustermesh.apiserver.tls.server.cert .Values.clustermesh.apiserver.tls.server.key) }}
+            - "--clustermesh-apiserver-server-cert-generate"
+            {{- end }}
+            {{- if not (and .Values.clustermesh.apiserver.tls.admin.cert .Values.clustermesh.apiserver.tls.admin.key) }}
+            - "--clustermesh-apiserver-admin-cert-generate"
+            {{- end }}
+            {{- if not (and .Values.clustermesh.apiserver.tls.client.cert .Values.clustermesh.apiserver.tls.client.key) }}
+            - "--clustermesh-apiserver-client-cert-generate"
+            {{- end }}
+            {{- if not (and .Values.clustermesh.apiserver.tls.remote.cert .Values.clustermesh.apiserver.tls.remote.key) }}
+            - "--clustermesh-apiserver-remote-cert-generate"
+            {{- end }}
+      hostNetwork: true
+      {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+      {{ toYaml .Values.imagePullSecrets | indent 6 }}
+      {{- end }}
+      restartPolicy: OnFailure
+  ttlSecondsAfterFinished: {{ .Values.certgen.ttlSecondsAfterFinished }}
+{{- end }}
diff --git a/charts/cilium/templates/_helpers.tpl b/charts/cilium/templates/_helpers.tpl
new file mode 100644
index 0000000..73de1df
--- /dev/null
+++ b/charts/cilium/templates/_helpers.tpl
@@ -0,0 +1,128 @@
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cilium.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "ingress.apiVersion" -}}
+{{- if semverCompare ">=1.16-0, <1.19-0" .Capabilities.KubeVersion.Version -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else if semverCompare "^1.19-0" .Capabilities.KubeVersion.Version -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate backend for Hubble UI ingress.
+*/}}
+{{- define "ingress.paths" -}}
+{{ if semverCompare ">=1.4-0, <1.19-0" .Capabilities.KubeVersion.Version -}}
+backend:
+  serviceName: hubble-ui
+  servicePort: http
+{{- else if semverCompare "^1.19-0" .Capabilities.KubeVersion.Version -}}
+pathType: Prefix
+backend:
+  service:
+    name: hubble-ui
+    port:
+      name: http
+{{- end -}}
+{{- end -}}
+
+
+{{/*
+Generate TLS certificates for Hubble Server and Hubble Relay.
+
+Note: these 2 lines, that are repeated several times below, are a trick to
+ensure the CA certs are generated only once:
+
+    $ca := .ca | default (genCA "hubble-ca.cilium.io" (.Values.hubble.tls.auto.certValidityDuration | int))
+    $_ := set . "ca" $ca
+
+Please, don't try to "simplify" them as without this trick, every generated
+certificate would be signed by a different CA.
+*/}}
+{{- define "hubble.ca.gen-cert-only" }}
+{{- $ca := .ca | default (genCA "hubble-ca.cilium.io" (.Values.hubble.tls.auto.certValidityDuration | int)) -}}
+{{- $_ := set . "ca" $ca -}}
+ca.crt: |-
+{{ $ca.Cert | indent 2 -}}
+{{- end }}
+{{- define "hubble.server.gen-certs" }}
+{{- $ca := .ca | default (genCA "hubble-ca.cilium.io" (.Values.hubble.tls.auto.certValidityDuration | int)) -}}
+{{- $_ := set . "ca" $ca -}}
+{{- $cn := list "*" (.Values.cluster.name | replace "." "-") "hubble-grpc.cilium.io" | join "." }}
+{{- $cert := genSignedCert $cn nil (list $cn) (.Values.hubble.tls.auto.certValidityDuration | int) $ca -}}
+ca.crt: {{ $ca.Cert | b64enc }}
+tls.crt: {{ $cert.Cert | b64enc }}
+tls.key: {{ $cert.Key | b64enc }}
+{{- end }}
+{{- define "hubble.relay.gen-certs" }}
+{{- $ca := .ca | default (genCA "hubble-ca.cilium.io" (.Values.hubble.tls.auto.certValidityDuration | int)) -}}
+{{- $_ := set . "ca" $ca -}}
+{{- $cert := genSignedCert "*.hubble-relay.cilium.io" nil (list "*.hubble-relay.cilium.io") (.Values.hubble.tls.auto.certValidityDuration | int) $ca -}}
+ca.crt: {{ $ca.Cert | b64enc }}
+tls.crt: {{ $cert.Cert | b64enc }}
+tls.key: {{ $cert.Key | b64enc }}
+{{- end }}
+
+{{/* Generate CA "vmca" for clustermesh-apiserver in the global dict. */}}
+{{- define "clustermesh.apiserver.generate.ca" }}
+{{- $ca := .cmca | default (genCA "clustermesh-apiserver-ca.cilium.io" (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int)) -}}
+{{- $_ := set . "cmca" $ca -}}
+{{- end }}
+
+{{/* Generate CA certificate clustermesh-apiserver. */}}
+{{- define "clustermesh.apiserver.ca.gen-cert" }}
+{{- template "clustermesh.apiserver.generate.ca" . -}}
+ca.crt: {{ .cmca.Cert | b64enc }}
+ca.key: {{ .cmca.Key | b64enc }}
+{{- end }}
+
+{{/* Generate server certificate clustermesh-apiserver. */}}
+{{- define "clustermesh.apiserver.server.gen-cert" }}
+{{- template "clustermesh.apiserver.generate.ca" . }}
+{{- $CN := "clustermesh-apiserver.cilium.io" }}
+{{- $IPs := (list "127.0.0.1") }}
+{{- $SANs := (list $CN "*.mesh.cilium.io") }}
+{{- $cert := genSignedCert $CN $IPs $SANs (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int) .cmca -}}
+ca.crt: {{ .cmca.Cert | b64enc }}
+tls.crt: {{ $cert.Cert | b64enc }}
+tls.key: {{ $cert.Key | b64enc }}
+{{- end }}
+
+{{/* Generate admin certificate clustermesh-apiserver. */}}
+{{- define "clustermesh.apiserver.admin.gen-cert" }}
+{{- template "clustermesh.apiserver.generate.ca" . }}
+{{- $CN := "root" }}
+{{- $SANs := (list "localhost") }}
+{{- $cert := genSignedCert $CN nil $SANs (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int) .cmca -}}
+ca.crt: {{ .cmca.Cert | b64enc }}
+tls.crt: {{ $cert.Cert | b64enc }}
+tls.key: {{ $cert.Key | b64enc }}
+{{- end }}
+
+{{/* Generate client certificate clustermesh-apiserver. */}}
+{{- define "clustermesh.apiserver.client.gen-cert" }}
+{{- template "clustermesh.apiserver.generate.ca" . }}
+{{- $CN := "externalworkload" }}
+{{- $cert := genSignedCert $CN nil nil (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int) .cmca -}}
+ca.crt: {{ .cmca.Cert | b64enc }}
+tls.crt: {{ $cert.Cert | b64enc }}
+tls.key: {{ $cert.Key | b64enc }}
+{{- end }}
+
+{{/* Generate remote certificate clustermesh-apiserver. */}}
+{{- define "clustermesh.apiserver.remote.gen-cert" }}
+{{- template "clustermesh.apiserver.generate.ca" . }}
+{{- $CN := "remote" }}
+{{- $cert := genSignedCert $CN nil nil (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int) .cmca -}}
+ca.crt: {{ .cmca.Cert | b64enc }}
+tls.crt: {{ $cert.Cert | b64enc }}
+tls.key: {{ $cert.Key | b64enc }}
+{{- end }}
diff --git a/charts/cilium/templates/_hubble-generate-certs-job-spec.tpl b/charts/cilium/templates/_hubble-generate-certs-job-spec.tpl
new file mode 100644
index 0000000..e708488
--- /dev/null
+++ b/charts/cilium/templates/_hubble-generate-certs-job-spec.tpl
@@ -0,0 +1,68 @@
+{{- define "hubble-generate-certs.job.spec" }}
+{{- $certValiditySecondsStr := printf "%ds" (mul .Values.hubble.tls.auto.certValidityDuration 24 60 60) -}}
+spec:
+  template:
+    metadata:
+      labels:
+        k8s-app: hubble-generate-certs
+        {{- with .Values.certgen.podLabels }}
+        {{- toYaml . | nindent 8 }}
+        {{- end }}
+    spec:
+      serviceAccount: {{ .Values.serviceAccounts.hubblecertgen.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.hubblecertgen.name | quote }}
+      containers:
+        - name: certgen
+          image: {{ .Values.certgen.image.repository }}:{{ .Values.certgen.image.tag }}
+          imagePullPolicy: {{ .Values.certgen.image.pullPolicy }}
+          command:
+            - "/usr/bin/cilium-certgen"
+          # Because this is executed as a job, we pass the values as command
+          # line args instead of via config map. This allows users to inspect
+          # the values used in past runs by inspecting the completed pod.
+          args:
+            - "--cilium-namespace={{ .Release.Namespace }}"
+            - "--hubble-ca-reuse-secret=true"
+            - "--hubble-ca-secret-name=hubble-ca-secret"
+            {{- if .Values.debug.enabled }}
+            - "--debug"
+            {{- end }}
+            {{- $hubbleCAProvided := and .Values.hubble.tls.ca.cert .Values.hubble.tls.ca.key -}}
+            {{- if $hubbleCAProvided }}
+            - "--hubble-ca-generate=false"
+            {{- else }}
+            - "--hubble-ca-generate=true"
+            - "--hubble-ca-validity-duration={{ $certValiditySecondsStr }}"
+            - "--hubble-ca-config-map-create=true"
+            - "--hubble-ca-config-map-name=hubble-ca-cert"
+            {{- end }}
+            {{- if and .Values.hubble.tls.server.cert .Values.hubble.tls.server.key $hubbleCAProvided }}
+            - "--hubble-server-cert-generate=false"
+            {{- else }}
+            - "--hubble-server-cert-generate=true"
+            - "--hubble-server-cert-common-name={{ list "*" (.Values.cluster.name | replace "." "-") "hubble-grpc.cilium.io" | join "." }}"
+            - "--hubble-server-cert-validity-duration={{ $certValiditySecondsStr }}"
+            - "--hubble-server-cert-secret-name=hubble-server-certs"
+            {{- end }}
+            {{- if and .Values.hubble.relay.tls.client.cert .Values.hubble.relay.tls.client.key $hubbleCAProvided }}
+            - "--hubble-relay-client-cert-generate=false"
+            {{- else }}
+            - "--hubble-relay-client-cert-generate=true"
+            - "--hubble-relay-client-cert-validity-duration={{ $certValiditySecondsStr }}"
+            - "--hubble-relay-client-cert-secret-name=hubble-relay-client-certs"
+            {{- end }}
+            {{- if or (and .Values.hubble.relay.tls.server.cert .Values.hubble.relay.tls.server.key) (not .Values.hubble.relay.tls.server.enabled) }}
+            - "--hubble-relay-server-cert-generate=false"
+            {{- else if .Values.hubble.relay.tls.server.enabled }}
+            - "--hubble-relay-server-cert-generate=true"
+            - "--hubble-relay-server-cert-validity-duration={{ $certValiditySecondsStr }}"
+            - "--hubble-relay-server-cert-secret-name=hubble-relay-server-certs"
+            {{- end }}
+      hostNetwork: true
+      {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+      {{ toYaml .Values.imagePullSecrets | indent 6 }}
+      {{- end }}
+      restartPolicy: OnFailure
+  ttlSecondsAfterFinished: {{ .Values.certgen.ttlSecondsAfterFinished }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-agent-clusterrole.yaml b/charts/cilium/templates/cilium-agent-clusterrole.yaml
new file mode 100644
index 0000000..e17d5de
--- /dev/null
+++ b/charts/cilium/templates/cilium-agent-clusterrole.yaml
@@ -0,0 +1,110 @@
+{{- if and (.Values.agent) (not .Values.preflight.enabled) }}
+{{- /*
+Keep file in synced with cilium-preflight-clusterrole.yaml
+*/ -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: cilium
+rules:
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - networkpolicies
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - namespaces
+  - services
+  - nodes
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - pods/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  - nodes/status
+  verbs:
+  - patch
+- apiGroups:
+  - apiextensions.k8s.io
+  resources:
+  - customresourcedefinitions
+  verbs:
+  # Deprecated for removal in v1.10
+  - create
+  - list
+  - watch
+  - update
+
+  # This is used when validating policies in preflight. This will need to stay
+  # until we figure out how to avoid "get" inside the preflight, and then
+  # should be removed ideally.
+  - get
+{{- if eq "k8s" .Values.tls.secretsBackend }}
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+{{- end }}
+- apiGroups:
+  - cilium.io
+  resources:
+  - ciliumnetworkpolicies
+  - ciliumnetworkpolicies/status
+  - ciliumnetworkpolicies/finalizers
+  - ciliumclusterwidenetworkpolicies
+  - ciliumclusterwidenetworkpolicies/status
+  - ciliumclusterwidenetworkpolicies/finalizers
+  - ciliumendpoints
+  - ciliumendpoints/status
+  - ciliumendpoints/finalizers
+  - ciliumnodes
+  - ciliumnodes/status
+  - ciliumnodes/finalizers
+  - ciliumidentities
+  - ciliumidentities/finalizers
+  - ciliumlocalredirectpolicies
+  - ciliumlocalredirectpolicies/status
+  - ciliumlocalredirectpolicies/finalizers
+  - ciliumegressnatpolicies
+  verbs:
+  - '*'
+{{- end }}
diff --git a/charts/cilium/templates/cilium-agent-clusterrolebinding.yaml b/charts/cilium/templates/cilium-agent-clusterrolebinding.yaml
new file mode 100644
index 0000000..6a8b660
--- /dev/null
+++ b/charts/cilium/templates/cilium-agent-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if and (.Values.agent) (not .Values.preflight.enabled) .Values.serviceAccounts.cilium.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: cilium
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cilium
+subjects:
+- kind: ServiceAccount
+  name: {{ .Values.serviceAccounts.cilium.name | quote }}
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-agent-daemonset.yaml b/charts/cilium/templates/cilium-agent-daemonset.yaml
new file mode 100644
index 0000000..cc64ff9
--- /dev/null
+++ b/charts/cilium/templates/cilium-agent-daemonset.yaml
@@ -0,0 +1,620 @@
+{{- if and (.Values.agent) (not .Values.preflight.enabled) }}
+
+{{- /*  Default values with backwards compatibility */ -}}
+{{- $defaultKeepDeprecatedProbes := "true" -}}
+
+{{- /* Default values when 1.8 was initially deployed */ -}}
+{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}}
+{{- $defaultKeepDeprecatedProbes = "false" -}}
+{{- end -}}
+
+{{- /* Workaround so that we can set the minimal k8s version that we support */ -}}
+{{- $k8sVersion := .Capabilities.KubeVersion.Version -}}
+{{- $k8sMajor := .Capabilities.KubeVersion.Major -}}
+{{- $k8sMinor := .Capabilities.KubeVersion.Minor -}}
+
+{{- if .Values.Capabilities -}}
+{{- if .Values.Capabilities.KubeVersion -}}
+{{- if .Values.Capabilities.KubeVersion.Version -}}
+{{- $k8sVersion = .Values.Capabilities.KubeVersion.Version -}}
+{{- if .Values.Capabilities.KubeVersion.Major -}}
+{{- $k8sMajor = toString (.Values.Capabilities.KubeVersion.Major) -}}
+{{- if .Values.Capabilities.KubeVersion.Minor -}}
+{{- $k8sMinor = toString (.Values.Capabilities.KubeVersion.Minor) -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  labels:
+    k8s-app: cilium
+{{- if .Values.keepDeprecatedLabels }}
+    kubernetes.io/cluster-service: "true"
+{{- if and (eq .Release.Namespace "kube-system" ) .Values.gke.enabled }}
+{{- fail "Invalid configuration: Installing Cilium on GKE with 'kubernetes.io/cluster-service' labels on 'kube-system' namespace causes Cilium DaemonSet to be removed by GKE. Either install Cilium on a different Namespace or install with '--set keepDeprecatedLabels=false'"}}
+{{- end }}
+{{- end }}
+  name: cilium
+  namespace: {{ .Release.Namespace }}
+spec:
+  selector:
+    matchLabels:
+      k8s-app: cilium
+{{- if .Values.keepDeprecatedLabels }}
+      kubernetes.io/cluster-service: "true"
+{{- end }}
+{{- with .Values.updateStrategy }}
+  updateStrategy:
+    {{- toYaml . | trim | nindent 4 }}
+{{- end }}
+  template:
+    metadata:
+      annotations:
+{{- if and .Values.prometheus.enabled (not .Values.prometheus.serviceMonitor.enabled) }}
+        prometheus.io/port: "{{ .Values.prometheus.port }}"
+        prometheus.io/scrape: "true"
+{{- end }}
+{{- if .Values.rollOutCiliumPods }}
+        # ensure pods roll when configmap updates
+        cilium.io/cilium-configmap-checksum: {{ include (print $.Template.BasePath "/cilium-configmap.yaml") . | sha256sum | quote }}
+{{- end }}
+        # This annotation plus the CriticalAddonsOnly toleration makes
+        # cilium to be a critical pod in the cluster, which ensures cilium
+        # gets priority scheduling.
+        # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
+        scheduler.alpha.kubernetes.io/critical-pod: ""
+{{- with .Values.podAnnotations }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        k8s-app: cilium
+{{- if .Values.keepDeprecatedLabels }}
+        kubernetes.io/cluster-service: "true"
+{{- end }}
+{{- with .Values.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+{{- if .Values.affinity }}
+      affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 6 }}
+{{- end }}
+      containers:
+{{- if .Values.sleepAfterInit }}
+      - command: [ "/bin/bash", "-c", "--" ]
+        args: [ "while true; do sleep 30; done;" ]
+        livenessProbe:
+          exec:
+            command:
+            - "true"
+        readinessProbe:
+          exec:
+            command:
+            - "true"
+{{- else }}
+      - args:
+        - --config-dir=/tmp/cilium/config-map
+{{- with .Values.extraArgs }}
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+        command:
+        - cilium-agent
+{{- if semverCompare ">=1.20-0" $k8sVersion }}
+        startupProbe:
+          httpGet:
+{{- if .Values.ipv4.enabled }}
+            host: '127.0.0.1'
+{{- else }}
+            host: '::1'
+{{- end }}
+            path: /healthz
+            port: {{ .Values.healthPort }}
+            scheme: HTTP
+            httpHeaders:
+            - name: "brief"
+              value: "true"
+          failureThreshold: {{ .Values.startupProbe.failureThreshold }}
+          periodSeconds: {{ .Values.startupProbe.periodSeconds }}
+          successThreshold: 1
+{{- end }}
+        livenessProbe:
+{{- if or .Values.keepDeprecatedProbes (eq $defaultKeepDeprecatedProbes "true") }}
+          exec:
+            command:
+            - cilium
+            - status
+            - --brief
+{{- else }}
+          httpGet:
+{{- if .Values.ipv4.enabled }}
+            host: '127.0.0.1'
+{{- else }}
+            host: '::1'
+{{- end }}
+            path: /healthz
+            port: {{ .Values.healthPort }}
+            scheme: HTTP
+            httpHeaders:
+            - name: "brief"
+              value: "true"
+{{- end }}
+          failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+{{- if semverCompare "<1.20-0" $k8sVersion }}
+          # The initial delay for the liveness probe is intentionally large to
+          # avoid an endless kill & restart cycle if in the event that the initial
+          # bootstrapping takes longer than expected.
+          # Starting from Kubernetes 1.20, we are using startupProbe instead
+          # of this field.
+          initialDelaySeconds: 120
+{{- end }}
+          periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+          successThreshold: 1
+          timeoutSeconds: 5
+        readinessProbe:
+{{- if or .Values.keepDeprecatedProbes (eq $defaultKeepDeprecatedProbes "true") }}
+          exec:
+            command:
+            - cilium
+            - status
+            - --brief
+{{- else }}
+          httpGet:
+{{- if .Values.ipv4.enabled }}
+            host: '127.0.0.1'
+{{- else }}
+            host: '::1'
+{{- end }}
+            path: /healthz
+            port: {{ .Values.healthPort }}
+            scheme: HTTP
+            httpHeaders:
+            - name: "brief"
+              value: "true"
+{{- end }}
+          failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+{{- if semverCompare "<1.20-0" $k8sVersion }}
+          initialDelaySeconds: 5
+{{- end }}
+          periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+          successThreshold: 1
+          timeoutSeconds: 5
+{{- end }}
+        env:
+        - name: K8S_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: CILIUM_K8S_NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        - name: CILIUM_CLUSTERMESH_CONFIG
+          value: /var/lib/cilium/clustermesh/
+        - name: CILIUM_CNI_CHAINING_MODE
+          valueFrom:
+            configMapKeyRef:
+              key: cni-chaining-mode
+              name: cilium-config
+              optional: true
+        - name: CILIUM_CUSTOM_CNI_CONF
+          valueFrom:
+            configMapKeyRef:
+              key: custom-cni-conf
+              name: cilium-config
+              optional: true
+{{- if .Values.k8sServiceHost }}
+        - name: KUBERNETES_SERVICE_HOST
+          value: {{ .Values.k8sServiceHost | quote }}
+{{- end }}
+{{- if .Values.k8sServicePort }}
+        - name: KUBERNETES_SERVICE_PORT
+          value: {{ .Values.k8sServicePort | quote }}
+{{- end }}
+{{- with .Values.extraEnv }}
+{{ toYaml . | trim | indent 8 }}
+{{- end }}
+        image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}{{ if .Values.image.useDigest }}@{{ .Values.image.digest }}{{ end }}"
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+{{- if .Values.cni.install }}
+        lifecycle:
+          postStart:
+            exec:
+              command:
+              - "/cni-install.sh"
+              - "--enable-debug={{- if .Values.debug.enabled }}true{{- else }}false{{- end }}"
+              - "--cni-exclusive={{- if .Values.cni.exclusive }}true{{- else }}false{{- end }}"
+          preStop:
+            exec:
+              command:
+              - /cni-uninstall.sh
+{{- end }}
+{{- if .Values.resources }}
+        resources:
+          {{- toYaml .Values.resources | trim | nindent 10 }}
+{{- end }}
+        name: cilium-agent
+{{- if or .Values.prometheus.enabled .Values.hubble.metrics.enabled }}
+        ports:
+{{- if .Values.prometheus.enabled }}
+        - containerPort: {{ .Values.prometheus.port }}
+          hostPort: {{ .Values.prometheus.port }}
+          name: prometheus
+          protocol: TCP
+{{- if .Values.proxy.prometheus.enabled }}          
+        - containerPort: {{ .Values.proxy.prometheus.port }}
+          hostPort: {{ .Values.proxy.prometheus.port }}
+          name: envoy-metrics
+          protocol: TCP
+{{- end }}
+{{- end }}
+{{- if .Values.hubble.metrics.enabled }}
+        - containerPort: {{ .Values.hubble.metrics.port }}
+          hostPort: {{ .Values.hubble.metrics.port }}
+          name: hubble-metrics
+          protocol: TCP
+{{- end }}
+{{- end }}
+        securityContext:
+          capabilities:
+            add:
+            - NET_ADMIN
+            - SYS_MODULE
+          privileged: true
+        volumeMounts:
+{{- /* CRI-O already mounts the BPF filesystem */ -}}
+{{- if not (eq .Values.containerRuntime.integration "crio") }}
+        - mountPath: /sys/fs/bpf
+          mountPropagation: Bidirectional
+          name: bpf-maps
+{{- end }}
+{{- if not (contains "/run/cilium/cgroupv2" .Values.cgroup.hostRoot) }}
+        # Check for duplicate mounts before mounting
+        - mountPath: {{ .Values.cgroup.hostRoot }}
+          name: cilium-cgroup
+{{- end}}
+        - mountPath: /var/run/cilium
+          name: cilium-run
+        - mountPath: /host/opt/cni/bin
+          name: cni-path
+        - mountPath: {{ .Values.cni.hostConfDirMountPath }}
+          name: etc-cni-netd
+{{- if .Values.etcd.enabled }}
+        - mountPath: /var/lib/etcd-config
+          name: etcd-config-path
+          readOnly: true
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+        - mountPath: /var/lib/etcd-secrets
+          name: etcd-secrets
+          readOnly: true
+{{- end }}
+{{- end }}
+        - mountPath: /var/lib/cilium/clustermesh
+          name: clustermesh-secrets
+          readOnly: true
+        - mountPath: /tmp/cilium/config-map
+          name: cilium-config-path
+          readOnly: true
+{{- if .Values.ipMasqAgent.enabled }}
+        - mountPath: /etc/config
+          name: ip-masq-agent
+          readOnly: true
+{{- end }}
+{{- if .Values.cni.configMap }}
+        - mountPath: {{ .Values.cni.confFileMountPath }}
+          name: cni-configuration
+          readOnly: true
+{{- end }}
+          # Needed to be able to load kernel modules
+        - mountPath: /lib/modules
+          name: lib-modules
+          readOnly: true
+        - mountPath: /run/xtables.lock
+          name: xtables-lock
+{{- if and ( .Values.encryption.enabled ) ( eq .Values.encryption.type "ipsec" ) }}
+  {{- if .Values.encryption.ipsec.mountPath }}
+        - mountPath: {{ .Values.encryption.ipsec.mountPath }}
+  {{- else }}
+        - mountPath: {{ .Values.encryption.mountPath }}
+  {{- end }}
+          name: cilium-ipsec-secrets
+{{- end }}
+{{- if .Values.kubeConfigPath }}
+        - mountPath: {{ .Values.kubeConfigPath }}
+          name: kube-config
+          readOnly: true
+{{- end }}
+{{- if .Values.bgp.enabled }}
+        - mountPath: /var/lib/cilium/bgp
+          name: bgp-config-path
+          readOnly: true
+{{- end }}
+{{- if and (.Values.hubble.enabled) (hasKey .Values.hubble "listenAddress") (.Values.hubble.tls.enabled) }}
+        - mountPath: /var/lib/cilium/tls/hubble
+          name: hubble-tls
+          readOnly: true
+{{- end }}
+{{- range .Values.extraHostPathMounts }}
+        - mountPath: {{ .mountPath }}
+          name: {{ .name }}
+          readOnly: {{ .readOnly }}
+{{- if .mountPropagation }}
+          mountPropagation: {{ .mountPropagation }}
+{{- end }}
+{{- end }}
+{{- if .Values.monitor.enabled }}
+      - name: cilium-monitor
+        command: ["cilium"]
+        args:
+        - monitor
+{{- range $type := .Values.monitor.eventTypes }}
+        - --type={{ $type }}
+{{- end }}
+        image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}{{ if .Values.image.useDigest }}@{{ .Values.image.digest }}{{ end }}"
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+        volumeMounts:
+        - mountPath: /var/run/cilium
+          name: cilium-run
+{{- if .Values.monitor.resources }}
+        resources:
+          {{- toYaml .Values.monitor.resources | trim | nindent 10 }}
+{{- end }}
+{{- end }}
+{{- if (and .Values.etcd.managed (not .Values.etcd.k8sService)) }}
+      # In managed etcd mode, Cilium must be able to resolve the DNS name of
+      # the etcd service
+      dnsPolicy: ClusterFirstWithHostNet
+{{- end }}
+      hostNetwork: true
+      initContainers:
+{{- if .Values.cgroup.autoMount.enabled }}
+      # Required to mount cgroup2 filesystem on the underlying Kubernetes node.
+      # We use nsenter command with host's cgroup and mount namespaces enabled.
+      - name: mount-cgroup
+        env:
+          - name: CGROUP_ROOT
+            value: {{ .Values.cgroup.hostRoot }}
+          - name: BIN_PATH
+            value: {{ .Values.cni.binPath }}
+        command:
+          - sh
+          - -c
+          # The statically linked Go program binary is invoked to avoid any
+          # dependency on utilities like sh and mount that can be missing on certain
+          # distros installed on the underlying host. Copy the binary to the
+          # same directory where we install cilium cni plugin so that exec permissions
+          # are available.
+          - 'cp /usr/bin/cilium-mount /hostbin/cilium-mount && nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; rm /hostbin/cilium-mount'
+        image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}{{ if .Values.image.useDigest }}@{{ .Values.image.digest }}{{ end }}"
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+        volumeMounts:
+          - mountPath: /hostproc
+            name: hostproc
+          - mountPath: /hostbin
+            name: cni-path
+        securityContext:
+          privileged: true
+{{- end }}
+{{- if and .Values.nodeinit.enabled (not (eq .Values.nodeinit.bootstrapFile "")) }}
+      - name: wait-for-node-init
+        command: ['sh', '-c', 'until stat {{ .Values.nodeinit.bootstrapFile }} > /dev/null 2>&1; do echo "Waiting on node-init to run..."; sleep 1; done']
+        image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}{{ if .Values.image.useDigest }}@{{ .Values.image.digest }}{{ end }}"
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+        volumeMounts:
+        - mountPath: {{ .Values.nodeinit.bootstrapFile }}
+          name: cilium-bootstrap-file
+{{- end }}
+      - command:
+        - /init-container.sh
+        env:
+        - name: CILIUM_ALL_STATE
+          valueFrom:
+            configMapKeyRef:
+              key: clean-cilium-state
+              name: cilium-config
+              optional: true
+        - name: CILIUM_BPF_STATE
+          valueFrom:
+            configMapKeyRef:
+              key: clean-cilium-bpf-state
+              name: cilium-config
+              optional: true
+{{- if .Values.k8sServiceHost }}
+        - name: KUBERNETES_SERVICE_HOST
+          value: {{ .Values.k8sServiceHost | quote }}
+{{- end }}
+{{- if .Values.k8sServicePort }}
+        - name: KUBERNETES_SERVICE_PORT
+          value: {{ .Values.k8sServicePort | quote }}
+{{- end }}
+{{- if .Values.extraEnv }}
+{{ toYaml .Values.extraEnv | indent 8 }}
+{{- end }}
+        image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}{{ if .Values.image.useDigest }}@{{ .Values.image.digest }}{{ end }}"
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+        name: clean-cilium-state
+        securityContext:
+          capabilities:
+            add:
+            - NET_ADMIN
+          privileged: true
+        volumeMounts:
+{{- /* CRI-O already mounts the BPF filesystem */ -}}
+{{- if not (eq .Values.containerRuntime.integration "crio") }}
+        - mountPath: /sys/fs/bpf
+          name: bpf-maps
+{{- end }}
+{{- if .Values.cgroup.autoMount.enabled }}
+          # Required to mount cgroup filesystem from the host to cilium agent pod
+        - mountPath: {{ .Values.cgroup.hostRoot }}
+          name: cilium-cgroup
+          mountPropagation: HostToContainer
+{{-  else }}
+          # Required to mount cgroup filesystem from the host to cilium agent pod
+        - mountPath: {{ .Values.cgroup.hostRoot }}
+          name: cilium-cgroup
+          mountPropagation: HostToContainer
+{{- end }}
+        - mountPath: /var/run/cilium
+          name: cilium-run
+{{- if .Values.nodeinit.resources }}
+        resources:
+          {{- toYaml .Values.nodeinit.resources | trim | nindent 10 }}
+{{- end }}
+      restartPolicy: Always
+{{- if and (or (and (eq .Release.Namespace "kube-system") (gt $k8sMinor "10")) (ge $k8sMinor "17") (gt $k8sMajor "1")) .Values.enableCriticalPriorityClass }}
+      priorityClassName: system-node-critical
+{{- end }}
+      serviceAccount: {{ .Values.serviceAccounts.cilium.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.cilium.name | quote }}
+      terminationGracePeriodSeconds: 1
+{{- with .Values.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 6 }}
+{{- end }}
+      volumes:
+        # To keep state between restarts / upgrades
+      - hostPath:
+          path: {{ .Values.daemon.runPath }}
+          type: DirectoryOrCreate
+        name: cilium-run
+{{- /* CRI-O already mounts the BPF filesystem */ -}}
+{{- if not (eq .Values.containerRuntime.integration "crio") }}
+        # To keep state between restarts / upgrades for bpf maps
+      - hostPath:
+          path: /sys/fs/bpf
+          type: DirectoryOrCreate
+        name: bpf-maps
+{{- end }}
+{{- if .Values.cgroup.autoMount.enabled }}
+      # To mount cgroup2 filesystem on the host
+      - hostPath:
+          path: /proc
+          type: Directory
+        name: hostproc
+{{- end }}
+      # To keep state between restarts / upgrades for cgroup2 filesystem
+      - hostPath:
+          path: {{ .Values.cgroup.hostRoot}}
+          type: DirectoryOrCreate
+        name: cilium-cgroup
+      # To install cilium cni plugin in the host
+      - hostPath:
+          path:  {{ .Values.cni.binPath }}
+          type: DirectoryOrCreate
+        name: cni-path
+        # To install cilium cni configuration in the host
+      - hostPath:
+          path: {{ .Values.cni.confPath }}
+          type: DirectoryOrCreate
+        name: etc-cni-netd
+        # To be able to load kernel modules
+      - hostPath:
+          path: /lib/modules
+        name: lib-modules
+        # To access iptables concurrently with other processes (e.g. kube-proxy)
+      - hostPath:
+          path: /run/xtables.lock
+          type: FileOrCreate
+        name: xtables-lock
+{{- if .Values.kubeConfigPath }}
+      - hostPath:
+          path: {{ .Values.kubeConfigPath }}
+          type: FileOrCreate
+        name: kube-config
+{{- end }}
+{{- if and .Values.nodeinit.enabled (not (eq .Values.nodeinit.bootstrapFile "")) }}
+      - hostPath:
+          path: {{ .Values.nodeinit.bootstrapFile }}
+          type: FileOrCreate
+        name: cilium-bootstrap-file
+{{- end }}
+{{- range .Values.extraHostPathMounts }}
+      - name: {{ .name }}
+        hostPath:
+          path: {{ .hostPath }}
+{{- if .hostPathType }}
+          type: {{ .hostPathType }}
+{{- end }}
+{{- end }}
+{{- if .Values.etcd.enabled }}
+        # To read the etcd config stored in config maps
+      - configMap:
+          defaultMode: 420
+          items:
+          - key: etcd-config
+            path: etcd.config
+          name: cilium-config
+        name: etcd-config-path
+        # To read the k8s etcd secrets in case the user might want to use TLS
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+      - name: etcd-secrets
+        secret:
+          defaultMode: 420
+          optional: true
+          secretName: cilium-etcd-secrets
+{{- end }}
+{{- end }}
+        # To read the clustermesh configuration
+      - name: clustermesh-secrets
+        secret:
+          defaultMode: 420
+          optional: true
+          secretName: cilium-clustermesh
+        # To read the configuration from the config map
+      - configMap:
+          name: cilium-config
+        name: cilium-config-path
+{{- if and .Values.ipMasqAgent .Values.ipMasqAgent.enabled }}
+      - configMap:
+          name: ip-masq-agent
+          optional: true
+          items:
+          - key: config
+            path: ip-masq-agent
+        name: ip-masq-agent
+{{- end }}
+{{- if and ( .Values.encryption.enabled ) ( eq .Values.encryption.type "ipsec" ) }}
+      - name: cilium-ipsec-secrets
+        secret:
+  {{- if .Values.encryption.ipsec.secretName }}
+          secretName: {{ .Values.encryption.ipsec.secretName }}
+  {{- else }}
+          secretName: {{ .Values.encryption.secretName }}
+  {{- end }}
+{{- end }}
+{{- if .Values.cni.configMap }}
+      - name: cni-configuration
+        configMap:
+          name: {{ .Values.cni.configMap }}
+{{- end }}
+{{- if .Values.bgp.enabled }}
+      - configMap:
+          name: bgp-config
+        name: bgp-config-path
+{{- end }}
+{{- if and .Values.hubble.enabled .Values.hubble.tls.enabled (hasKey .Values.hubble "listenAddress") }}
+      - name: hubble-tls
+        projected:
+          sources:
+          - secret:
+              name: hubble-server-certs
+              items:
+                - key: ca.crt
+                  path: client-ca.crt
+                - key: tls.crt
+                  path: server.crt
+                - key: tls.key
+                  path: server.key
+              optional: true
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-agent-service.yaml b/charts/cilium/templates/cilium-agent-service.yaml
new file mode 100644
index 0000000..9f1555e
--- /dev/null
+++ b/charts/cilium/templates/cilium-agent-service.yaml
@@ -0,0 +1,67 @@
+{{- if and (.Values.agent) (not .Values.preflight.enabled) (.Values.prometheus.enabled) (.Values.prometheus.serviceMonitor.enabled) }}
+kind: Service
+apiVersion: v1
+metadata:
+  name: cilium-agent
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: cilium
+spec:
+  clusterIP: None
+  type: ClusterIP
+  ports:
+  - name: metrics
+    port: {{ .Values.prometheus.port }}
+    protocol: TCP
+    targetPort: prometheus
+  - name: envoy-metrics
+    port: {{ .Values.proxy.prometheus.port }}
+    protocol: TCP
+    targetPort: envoy-metrics
+  selector:
+    k8s-app: cilium
+{{- else if and (.Values.prometheus.enabled) (.Values.proxy.prometheus.enabled) }}
+kind: Service
+apiVersion: v1
+metadata:
+  name: cilium-agent
+  namespace: {{ .Release.Namespace }}
+  annotations:
+    prometheus.io/scrape: 'true'
+    prometheus.io/port: {{ .Values.proxy.prometheus.port | quote }}
+  labels:
+    k8s-app: cilium
+spec:
+  clusterIP: None
+  type: ClusterIP
+  ports:
+  - name: envoy-metrics
+    port: {{ .Values.proxy.prometheus.port }}
+    protocol: TCP
+    targetPort: envoy-metrics
+  selector:
+    k8s-app: cilium
+{{- end }}
+{{- if and .Values.hubble.metrics.enabled }}
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: hubble-metrics
+  namespace: {{ .Release.Namespace }}
+  annotations:
+    prometheus.io/scrape: 'true'
+    prometheus.io/port: {{ .Values.hubble.metrics.port | quote }}
+  labels:
+    k8s-app: hubble
+spec:
+  clusterIP: None
+  type: ClusterIP
+  ports:
+  - name: hubble-metrics
+    port: {{ .Values.hubble.metrics.port }}
+    protocol: TCP
+    targetPort: hubble-metrics
+  selector:
+    k8s-app: cilium
+{{- end }}
diff --git a/charts/cilium/templates/cilium-agent-serviceaccount.yaml b/charts/cilium/templates/cilium-agent-serviceaccount.yaml
new file mode 100644
index 0000000..ba0141f
--- /dev/null
+++ b/charts/cilium/templates/cilium-agent-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and (.Values.agent) (.Values.serviceAccounts.cilium.create) (not .Values.preflight.enabled) }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.cilium.name | quote }}
+  namespace: {{ .Release.Namespace }}
+  {{- if .Values.serviceAccounts.cilium.annotations }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.cilium.annotations | indent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-agent-servicemonitor.yaml b/charts/cilium/templates/cilium-agent-servicemonitor.yaml
new file mode 100644
index 0000000..b035e4e
--- /dev/null
+++ b/charts/cilium/templates/cilium-agent-servicemonitor.yaml
@@ -0,0 +1,63 @@
+{{- if and (.Values.agent) (not .Values.preflight.enabled) (.Values.prometheus.enabled) (.Values.prometheus.serviceMonitor.enabled) }}
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: cilium-agent
+  {{- if .Values.prometheus.serviceMonitor.namespace }}
+  namespace: {{ .Values.prometheus.serviceMonitor.namespace }}
+  {{- else }}
+  namespace: {{ .Release.Namespace }}
+  {{- end }}
+  labels:
+    {{- with .Values.prometheus.serviceMonitor.labels }}
+    {{- toYaml . | nindent 4 }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels:
+      k8s-app: cilium
+  namespaceSelector:
+    matchNames:
+    - {{ .Release.Namespace }}
+  endpoints:
+  - port: metrics
+    interval: 10s
+    honorLabels: true
+    path: /metrics
+  targetLabels:
+  - k8s-app
+{{- end }}
+{{- if and .Values.hubble.metrics.enabled (.Values.hubble.metrics.serviceMonitor.enabled) }}
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: hubble
+  {{- if .Values.prometheus.serviceMonitor.namespace }}
+  namespace: {{ .Values.prometheus.serviceMonitor.namespace }}
+  {{- else }}
+  namespace: {{ .Release.Namespace }}
+  {{- end }}
+  labels:
+    {{- with .Values.hubble.metrics.serviceMonitor.labels }}
+    {{- toYaml . | nindent 4 }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels:
+      k8s-app: hubble
+  namespaceSelector:
+    matchNames:
+    - {{ .Release.Namespace }}
+  endpoints:
+  - port: hubble-metrics
+    interval: 10s
+    honorLabels: true
+    path: /metrics
+    relabelings:
+    - replacement: ${1}
+      sourceLabels:
+      - __meta_kubernetes_pod_node_name
+      targetLabel: node
+{{- end }}
diff --git a/charts/cilium/templates/cilium-configmap.yaml b/charts/cilium/templates/cilium-configmap.yaml
new file mode 100644
index 0000000..3fd95e0
--- /dev/null
+++ b/charts/cilium/templates/cilium-configmap.yaml
@@ -0,0 +1,748 @@
+{{- if and (.Values.agent) (not .Values.preflight.enabled) }}
+{{- /*  Default values with backwards compatibility */ -}}
+{{- $defaultEnableCnpStatusUpdates := "true" -}}
+{{- $defaultBpfMapDynamicSizeRatio := 0.0 -}}
+{{- $defaultBpfMasquerade := "false" -}}
+{{- $defaultBpfClockProbe := "false" -}}
+{{- $defaultBpfTProxy := "false" -}}
+{{- $defaultIPAM := "cluster-pool" -}}
+{{- $defaultSessionAffinity := "false" -}}
+{{- $defaultOperatorApiServeAddr := "localhost:9234" -}}
+{{- $defaultBpfCtTcpMax := 524288 -}}
+{{- $defaultBpfCtAnyMax := 262144 -}}
+{{- $enableIdentityMark := "true" -}}
+{{- $fragmentTracking := "true" -}}
+{{- $crdWaitTimeout := "5m" -}}
+{{- $defaultKubeProxyReplacement := "probe" -}}
+
+{{- /* Default values when 1.8 was initially deployed */ -}}
+{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}}
+{{- $defaultEnableCnpStatusUpdates = "false" -}}
+{{- $defaultBpfMapDynamicSizeRatio = 0.0025 -}}
+{{- $defaultBpfMasquerade = "true" -}}
+{{- $defaultBpfClockProbe = "true" -}}
+{{- $defaultIPAM = "cluster-pool" -}}
+{{- $defaultSessionAffinity = "true" -}}
+{{- if .Values.ipv4.enabled }}
+{{- $defaultOperatorApiServeAddr = "127.0.0.1:9234" -}}
+{{- else -}}
+{{- $defaultOperatorApiServeAddr = "[::1]:9234" -}}
+{{- end }}
+{{- $defaultBpfCtTcpMax = 0 -}}
+{{- $defaultBpfCtAnyMax = 0 -}}
+{{- end -}}
+
+{{- /* Default values when 1.10 was initially deployed */ -}}
+{{- if semverCompare ">=1.10" (default "1.10" .Values.upgradeCompatibility) -}}
+  {{- $defaultKubeProxyReplacement = "disabled" -}}
+  {{- /* Needs to be explicitly disabled because it was enabled on all versions >=v1.8 above. */ -}}
+  {{- $defaultBpfMasquerade = "false" -}}
+{{- end -}}
+
+{{- $ipam := (coalesce .Values.ipam.mode $defaultIPAM) -}}
+{{- $bpfCtTcpMax := (coalesce .Values.bpf.ctTcpMax $defaultBpfCtTcpMax) -}}
+{{- $bpfCtAnyMax := (coalesce .Values.bpf.ctAnyMax $defaultBpfCtAnyMax) -}}
+{{- $kubeProxyReplacement := (coalesce .Values.kubeProxyReplacement $defaultKubeProxyReplacement) -}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: cilium-config
+  namespace: {{ .Release.Namespace }}
+data:
+{{- if .Values.etcd.enabled }}
+  # The kvstore configuration is used to enable use of a kvstore for state
+  # storage. This can either be provided with an external kvstore or with the
+  # help of cilium-etcd-operator which operates an etcd cluster automatically.
+  kvstore: etcd
+{{- if .Values.etcd.k8sService }}
+  kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config", "etcd.operator": "true"}'
+{{- else }}
+  kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}'
+{{- end }}
+
+  # This etcd-config contains the etcd endpoints of your cluster. If you use
+  # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config
+  etcd-config: |-
+    ---
+    endpoints:
+{{- if .Values.etcd.managed }}
+      - https://cilium-etcd-client.{{ .Release.Namespace }}.svc:2379
+{{- else }}
+{{- range .Values.etcd.endpoints }}
+      - {{ . }}
+{{- end }}
+{{- end }}
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+    trusted-ca-file: '/var/lib/etcd-secrets/etcd-client-ca.crt'
+    key-file: '/var/lib/etcd-secrets/etcd-client.key'
+    cert-file: '/var/lib/etcd-secrets/etcd-client.crt'
+{{- end }}
+{{- end }}
+
+{{- if hasKey .Values "conntrackGCInterval" }}
+  conntrack-gc-interval: {{ .Values.conntrackGCInterval | quote }}
+{{- end }}
+
+{{- if hasKey .Values "disableEnvoyVersionCheck" }}
+  disable-envoy-version-check: {{ .Values.disableEnvoyVersionCheck | quote }}
+{{- end }}
+
+  # Identity allocation mode selects how identities are shared between cilium
+  # nodes by setting how they are stored. The options are "crd" or "kvstore".
+  # - "crd" stores identities in kubernetes as CRDs (custom resource definition).
+  #   These can be queried with:
+  #     kubectl get ciliumid
+  # - "kvstore" stores identities in a kvstore, etcd or consul, that is
+  #   configured below. Cilium versions before 1.6 supported only the kvstore
+  #   backend. Upgrades from these older cilium versions should continue using
+  #   the kvstore by commenting out the identity-allocation-mode below, or
+  #   setting it to "kvstore".
+  identity-allocation-mode: {{ .Values.identityAllocationMode }}
+{{- if hasKey .Values "identityHeartbeatTimeout" }}
+  identity-heartbeat-timeout: "{{ .Values.identityHeartbeatTimeout }}"
+{{- end }}
+{{- if hasKey .Values "identityGCInterval" }}
+  identity-gc-interval: "{{ .Values.identityGCInterval }}"
+{{- end }}
+{{- if hasKey .Values.operator "endpointGCInterval" }}
+  cilium-endpoint-gc-interval: "{{ .Values.operator.endpointGCInterval }}"
+{{- end }}
+
+{{- if hasKey .Values "identityChangeGracePeriod" }}
+  # identity-change-grace-period is the grace period that needs to pass
+  # before an endpoint that has changed its identity will start using
+  # that new identity. During the grace period, the new identity has
+  # already been allocated and other nodes in the cluster have a chance
+  # to whitelist the new upcoming identity of the endpoint.
+  identity-change-grace-period: {{ default "5s" .Values.identityChangeGracePeriod | quote }}
+{{- end }}
+
+{{- if hasKey .Values "labels" }}
+  # To include or exclude matched resources from cilium identity evaluation
+  labels: {{ .Values.labels | quote }}
+{{- end }}
+
+  # If you want to run cilium in debug mode change this value to true
+  debug: {{ .Values.debug.enabled | quote }}
+
+{{- if hasKey .Values.debug "verbose" }}
+  debug-verbose: "{{ .Values.debug.verbose }}"
+{{- end }}
+
+{{- if ne (int .Values.healthPort) 9876 }}
+  # Set the TCP port for the agent health status API. This is not the port used
+  # for cilium-health.
+  agent-health-port: "{{ .Values.healthPort }}"
+{{- end }}
+{{- if hasKey .Values "policyEnforcementMode" }}
+  # The agent can be put into the following three policy enforcement modes
+  # default, always and never.
+  # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
+  enable-policy: "{{ lower .Values.policyEnforcementMode }}"
+{{- end }}
+
+{{- if .Values.prometheus.enabled }}
+  # If you want metrics enabled in all of your Cilium agents, set the port for
+  # which the Cilium agents will have their metrics exposed.
+  # This option deprecates the "prometheus-serve-addr" in the
+  # "cilium-metrics-config" ConfigMap
+  # NOTE that this will open the port on ALL nodes where Cilium pods are
+  # scheduled.
+  prometheus-serve-addr: ":{{ .Values.prometheus.port }}"
+  # Port to expose Envoy metrics (e.g. "9095"). Envoy metrics listener will be disabled if this
+  # field is not set.
+  {{- if .Values.proxy.prometheus.enabled }}
+  proxy-prometheus-port: "{{ .Values.proxy.prometheus.port }}"
+  {{- end }}
+  {{- if .Values.prometheus.metrics }}
+  # Metrics that should be enabled or disabled from the default metric
+  # list. (+metric_foo to enable metric_foo , -metric_bar to disable
+  # metric_bar).
+  metrics: {{- range .Values.prometheus.metrics }}
+    {{ . }}
+  {{- end }}
+  {{- end }}
+{{- end }}
+
+{{- if .Values.operator.prometheus.enabled }}
+  # If you want metrics enabled in cilium-operator, set the port for
+  # which the Cilium Operator will have their metrics exposed.
+  # NOTE that this will open the port on the nodes where Cilium operator pod
+  # is scheduled.
+  operator-prometheus-serve-addr: ":{{ .Values.operator.prometheus.port }}"
+  enable-metrics: "true"
+{{- end }}
+
+{{- if .Values.operator.skipCRDCreation }}
+  skip-crd-creation: "true"
+{{- end }}
+
+  # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
+  # address.
+  enable-ipv4: {{ .Values.ipv4.enabled | quote }}
+
+  # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
+  # address.
+  enable-ipv6: {{ .Values.ipv6.enabled | quote }}
+
+{{- if .Values.cleanState }}
+  # If a serious issue occurs during Cilium startup, this
+  # invasive option may be set to true to remove all persistent
+  # state. Endpoints will not be restored using knowledge from a
+  # prior Cilium run, so they may receive new IP addresses upon
+  # restart. This also triggers clean-cilium-bpf-state.
+  clean-cilium-state: "true"
+{{- end }}
+
+{{- if .Values.cleanBpfState }}
+  # If you want to clean cilium BPF state, set this to true;
+  # Removes all BPF maps from the filesystem. Upon restart,
+  # endpoints are restored with the same IP addresses, however
+  # any ongoing connections may be disrupted briefly.
+  # Loadbalancing decisions will be reset, so any ongoing
+  # connections via a service may be loadbalanced to a different
+  # backend after restart.
+  clean-cilium-bpf-state: "true"
+{{- end }}
+
+{{- if hasKey .Values.cni "customConf" }}
+  # Users who wish to specify their own custom CNI configuration file must set
+  # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
+  custom-cni-conf: "{{ .Values.cni.customConf }}"
+{{- end }}
+
+{{- if hasKey .Values "bpfClockProbe" }}
+  enable-bpf-clock-probe: {{ .Values.bpfClockProbe | quote }}
+{{- else if eq $defaultBpfClockProbe "true" }}
+  enable-bpf-clock-probe: {{ $defaultBpfClockProbe | quote }}
+{{- end }}
+
+{{- if hasKey .Values.bpf "tproxy" }}
+  enable-bpf-tproxy: {{ .Values.bpf.tproxy | quote }}
+{{- else if eq $defaultBpfTProxy "true" }}
+  enable-bpf-tproxy: {{ $defaultBpfTProxy | quote }}
+{{- end }}
+  # If you want cilium monitor to aggregate tracing for packets, set this level
+  # to "low", "medium", or "maximum". The higher the level, the less packets
+  # that will be seen in monitor output.
+  monitor-aggregation: {{ .Values.bpf.monitorAggregation }}
+
+  # The monitor aggregation interval governs the typical time between monitor
+  # notification events for each allowed connection.
+  #
+  # Only effective when monitor aggregation is set to "medium" or higher.
+  monitor-aggregation-interval: {{ .Values.bpf.monitorInterval }}
+
+  # The monitor aggregation flags determine which TCP flags which, upon the
+  # first observation, cause monitor notifications to be generated.
+  #
+  # Only effective when monitor aggregation is set to "medium" or higher.
+  monitor-aggregation-flags: {{ .Values.bpf.monitorFlags }}
+
+
+
+
+{{- if hasKey .Values.bpf "mapDynamicSizeRatio" }}
+  # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
+  # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
+  bpf-map-dynamic-size-ratio: {{ .Values.bpf.mapDynamicSizeRatio | quote }}
+{{- else if ne $defaultBpfMapDynamicSizeRatio 0.0 }}
+  # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
+  # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
+  bpf-map-dynamic-size-ratio: {{ $defaultBpfMapDynamicSizeRatio | quote }}
+{{- end }}
+
+{{- if hasKey .Values.bpf "hostRouting" }}
+  enable-host-legacy-routing: {{ .Values.bpf.hostRouting | quote }}
+{{- end }}
+
+{{- if or $bpfCtTcpMax $bpfCtAnyMax }}
+  # bpf-ct-global-*-max specifies the maximum number of connections
+  # supported across all endpoints, split by protocol: tcp or other. One pair
+  # of maps uses these values for IPv4 connections, and another pair of maps
+  # use these values for IPv6 connections.
+  #
+  # If these values are modified, then during the next Cilium startup the
+  # tracking of ongoing connections may be disrupted. As a result, reply
+  # packets may be dropped and the load-balancing decisions for established
+  # connections may change.
+  #
+  # For users upgrading from Cilium 1.2 or earlier, to minimize disruption
+  # during the upgrade process, set bpf-ct-global-tcp-max to 1000000.
+{{- if $bpfCtTcpMax }}
+  bpf-ct-global-tcp-max: {{ $bpfCtTcpMax | quote }}
+{{- end }}
+{{- if $bpfCtAnyMax }}
+  bpf-ct-global-any-max: {{ $bpfCtAnyMax | quote }}
+{{- end }}
+{{- end }}
+{{- if hasKey .Values.bpf "natMax" }}
+  # bpf-nat-global-max specified the maximum number of entries in the
+  # BPF NAT table.
+  bpf-nat-global-max: "{{ .Values.bpf.natMax }}"
+{{- end }}
+{{- if hasKey .Values.bpf "neighMax" }}
+  # bpf-neigh-global-max specified the maximum number of entries in the
+  # BPF neighbor table.
+  bpf-neigh-global-max: "{{ .Values.bpf.neighMax }}"
+{{- end }}
+{{- if hasKey .Values.bpf "policyMapMax" }}
+  # bpf-policy-map-max specifies the maximum number of entries in endpoint
+  # policy map (per endpoint)
+  bpf-policy-map-max: "{{ .Values.bpf.policyMapMax }}"
+{{- end }}
+{{- if hasKey .Values.bpf "lbMapMax" }}
+  # bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
+  # backend and affinity maps.
+  bpf-lb-map-max: "{{ .Values.bpf.lbMapMax }}"
+{{- end }}
+  # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass
+  # optimization for nodeport reverse NAT handling.
+{{- if hasKey .Values.bpf "lbBypassFIBLookup" }}
+  bpf-lb-bypass-fib-lookup: {{ .Values.bpf.lbBypassFIBLookup | quote }}
+{{- end }}
+{{- if hasKey .Values.bpf "lbExternalClusterIP" }}
+  bpf-lb-external-clusterip: {{ .Values.bpf.lbExternalClusterIP | quote }}
+{{- end }}
+
+  # Pre-allocation of map entries allows per-packet latency to be reduced, at
+  # the expense of up-front memory allocation for the entries in the maps. The
+  # default value below will minimize memory usage in the default installation;
+  # users who are sensitive to latency may consider setting this to "true".
+  #
+  # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
+  # this option and behave as though it is set to "true".
+  #
+  # If this value is modified, then during the next Cilium startup the restore
+  # of existing endpoints and tracking of ongoing connections may be disrupted.
+  # As a result, reply packets may be dropped and the load-balancing decisions
+  # for established connections may change.
+  #
+  # If this option is set to "false" during an upgrade from 1.3 or earlier to
+  # 1.4 or later, then it may cause one-time disruptions during the upgrade.
+  preallocate-bpf-maps: "{{ .Values.bpf.preallocateMaps }}"
+
+  # Regular expression matching compatible Istio sidecar istio-proxy
+  # container image names
+  sidecar-istio-proxy-image: "{{ .Values.proxy.sidecarImageRegex }}"
+
+  # Name of the cluster. Only relevant when building a mesh of clusters.
+  cluster-name: {{ .Values.cluster.name }}
+
+{{- if hasKey .Values.cluster "id" }}
+  # Unique ID of the cluster. Must be unique across all conneted clusters and
+  # in the range of 1 and 255. Only relevant when building a mesh of clusters.
+  cluster-id: "{{ .Values.cluster.id }}"
+{{- end }}
+
+  # Encapsulation mode for communication between nodes
+  # Possible values:
+  #   - disabled
+  #   - vxlan (default)
+  #   - geneve
+{{- if .Values.gke.enabled }}
+  tunnel: "disabled"
+  enable-endpoint-routes: "true"
+  enable-local-node-route: "false"
+{{- else }}
+  tunnel: {{ .Values.tunnel }}
+{{- end }}
+
+{{- if .Values.eni.enabled }}
+  enable-endpoint-routes: "true"
+  auto-create-cilium-node-resource: "true"
+{{- if .Values.eni.updateEC2AdapterLimitViaAPI }}
+  update-ec2-adapter-limit-via-api: "true"
+{{- end }}
+{{- if .Values.eni.awsReleaseExcessIPs }}
+  aws-release-excess-ips: "true"
+{{- end }}
+  ec2-api-endpoint: {{ .Values.eni.ec2APIEndpoint | quote }}
+  eni-tags: {{ .Values.eni.eniTags | toRawJson | quote }}
+  subnet-ids-filter: {{ .Values.eni.subnetIDsFilter | quote }}
+  subnet-tags-filter: {{ .Values.eni.subnetTagsFilter | quote }}
+{{- end }}
+
+{{- if .Values.azure.enabled }}
+  enable-endpoint-routes: "true"
+  auto-create-cilium-node-resource: "true"
+  enable-local-node-route: "false"
+{{- if .Values.azure.userAssignedIdentityID }}
+  azure-user-assigned-identity-id: {{ .Values.azure.userAssignedIdentityID | quote }}
+{{- end }}
+{{- end }}
+
+{{- if .Values.alibabacloud.enabled }}
+  enable-endpoint-routes: "true"
+  auto-create-cilium-node-resource: "true"
+{{- end }}
+
+{{- if hasKey .Values "l7Proxy" }}
+  # Enables L7 proxy for L7 policy enforcement and visibility
+  enable-l7-proxy: {{ .Values.l7Proxy | quote }}
+{{- end }}
+
+{{- if ne .Values.cni.chainingMode "none" }}
+  # Enable chaining with another CNI plugin
+  #
+  # Supported modes:
+  #  - none
+  #  - aws-cni
+  #  - flannel
+  #  - portmap (Enables HostPort support for Cilium)
+  cni-chaining-mode: {{ .Values.cni.chainingMode }}
+
+{{- if hasKey .Values "enableIdentityMark" }}
+  enable-identity-mark: {{ .Values.enableIdentityMark | quote }}
+{{- else if (ne $enableIdentityMark "true") }}
+  enable-identity-mark: "false"
+{{- end }}
+{{- if ne .Values.cni.chainingMode "portmap" }}
+  # Disable the PodCIDR route to the cilium_host interface as it is not
+  # required. While chaining, it is the responsibility of the underlying plugin
+  # to enable routing.
+  enable-local-node-route: "false"
+{{- end }}
+{{- end }}
+
+  enable-ipv4-masquerade: {{ .Values.enableIPv4Masquerade | quote }}
+  enable-ipv6-masquerade: {{ .Values.enableIPv6Masquerade | quote }}
+
+{{- if hasKey .Values.bpf "masquerade" }}
+  enable-bpf-masquerade: {{ .Values.bpf.masquerade | quote }}
+{{- else if eq $defaultBpfMasquerade "true" }}
+  enable-bpf-masquerade: {{ $defaultBpfMasquerade | quote }}
+{{- end }}
+{{- if hasKey .Values "egressMasqueradeInterfaces" }}
+  egress-masquerade-interfaces: {{ .Values.egressMasqueradeInterfaces }}
+{{- end }}
+{{- if and .Values.ipMasqAgent .Values.ipMasqAgent.enabled }}
+  enable-ip-masq-agent: "true"
+{{- end }}
+
+{{- if .Values.encryption.enabled }}
+  {{- if eq .Values.encryption.type "ipsec" }}
+  enable-ipsec: {{ .Values.encryption.enabled | quote }}
+
+    {{- if and .Values.encryption.ipsec.mountPath .Values.encryption.ipsec.keyFile }}
+  ipsec-key-file: {{ .Values.encryption.ipsec.mountPath }}/{{ .Values.encryption.ipsec.keyFile }}
+    {{- else }}
+  ipsec-key-file: {{ .Values.encryption.mountPath }}/{{ .Values.encryption.keyFile }}
+    {{- end }}
+    {{- if .Values.encryption.ipsec.interface }}
+  encrypt-interface: {{ .Values.encryption.ipsec.interface }}
+    {{- else if .Values.encryption.interface }}
+  encrypt-interface: {{ .Values.encryption.interface }}
+    {{- end }}
+
+    {{- if .Values.encryption.nodeEncryption }}
+  encrypt-node: {{ .Values.encryption.nodeEncryption | quote }}
+    {{- end }}
+  {{- else if eq .Values.encryption.type "wireguard" }}
+  enable-wireguard: {{ .Values.encryption.enabled | quote }}
+  {{- end }}
+{{- end }}
+
+{{- if hasKey .Values "datapathMode" }}
+{{- if eq .Values.datapathMode "ipvlan" }}
+  datapath-mode: ipvlan
+  ipvlan-master-device: {{ .Values.ipvlan.masterDevice }}
+{{- end }}
+{{- end }}
+
+  enable-xt-socket-fallback: {{ .Values.enableXTSocketFallback | quote }}
+  install-iptables-rules: {{ .Values.installIptablesRules | quote }}
+{{- if or (.Values.azure.enabled) (.Values.eni.enabled) (.Values.gke.enabled) (ne .Values.cni.chainingMode "none") }}
+  install-no-conntrack-iptables-rules: "false"
+{{- else }}
+  install-no-conntrack-iptables-rules: {{ .Values.installNoConntrackIptablesRules | quote }}
+{{- end}}
+
+{{- if hasKey .Values "iptablesRandomFully" }}
+  iptables-random-fully: {{ .Values.iptablesRandomFully | quote }}
+{{- end }}
+
+{{- if hasKey .Values "iptablesLockTimeout" }}
+  iptables-lock-timeout: {{ .Values.iptablesLockTimeout | quote }}
+{{- end }}
+
+  auto-direct-node-routes: {{ .Values.autoDirectNodeRoutes | quote }}
+  enable-bandwidth-manager: {{ .Values.bandwidthManager | quote }}
+
+{{- if hasKey .Values "localRedirectPolicy" }}
+  enable-local-redirect-policy: {{ .Values.localRedirectPolicy | quote }}
+{{- end }}
+
+{{- if hasKey .Values "nativeRoutingCIDR" }}
+  native-routing-cidr: {{ .Values.nativeRoutingCIDR }}
+{{- end }}
+
+{{- if hasKey .Values "fragmentTracking" }}
+  enable-ipv4-fragment-tracking: {{ .Values.fragmentTracking | quote }}
+{{- else if (ne $fragmentTracking "true") }}
+  enable-ipv4-fragment-tracking: "false"
+{{- end }}
+
+{{- if .Values.hostFirewall }}
+  enable-host-firewall: {{ .Values.hostFirewall | quote }}
+{{- end}}
+
+{{- if hasKey .Values "devices" }}
+  # List of devices used to attach bpf_host.o (implements BPF NodePort,
+  # host-firewall and BPF masquerading)
+  devices: {{ join " " .Values.devices | quote }}
+{{- end }}
+
+  kube-proxy-replacement:  {{ $kubeProxyReplacement | quote }}
+{{- if ne $kubeProxyReplacement "disabled" }}
+  kube-proxy-replacement-healthz-bind-address: {{ default "" .Values.kubeProxyReplacementHealthzBindAddr | quote}}
+{{- end }}
+
+{{- if hasKey .Values "hostServices" }}
+{{- if .Values.hostServices.enabled }}
+  enable-host-reachable-services: {{ .Values.hostServices.enabled | quote }}
+{{- end }}
+{{- if ne .Values.hostServices.protocols "tcp,udp" }}
+  host-reachable-services-protos: {{ .Values.hostServices.protocols }}
+{{- end }}
+{{- if hasKey .Values.hostServices "hostNamespaceOnly" }}
+  bpf-lb-sock-hostns-only: {{ .Values.hostServices.hostNamespaceOnly | quote }}
+{{- end }}
+{{- end }}
+{{- if hasKey .Values "hostPort" }}
+{{- if eq $kubeProxyReplacement "partial" }}
+  enable-host-port: {{ .Values.hostPort.enabled | quote }}
+{{- end }}
+{{- end }}
+{{- if hasKey .Values "externalIPs" }}
+{{- if eq $kubeProxyReplacement "partial" }}
+  enable-external-ips: {{ .Values.externalIPs.enabled | quote }}
+{{- end }}
+{{- end }}
+{{- if hasKey .Values "nodePort" }}
+{{- if eq $kubeProxyReplacement "partial" }}
+  enable-node-port: {{ .Values.nodePort.enabled | quote }}
+{{- end }}
+{{- if hasKey .Values.nodePort "range" }}
+  node-port-range: {{ .Values.nodePort.range | quote }}
+{{- end }}
+{{- if hasKey .Values.nodePort "directRoutingDevice" }}
+  direct-routing-device: {{ .Values.nodePort.directRoutingDevice | quote }}
+{{- end }}
+{{- if hasKey .Values.nodePort "enableHealthCheck" }}
+  enable-health-check-nodeport: {{ .Values.nodePort.enableHealthCheck | quote}}
+{{- end }}
+  node-port-bind-protection: {{ .Values.nodePort.bindProtection | quote }}
+  enable-auto-protect-node-port-range: {{ .Values.nodePort.autoProtectPortRange | quote }}
+{{- end }}
+{{- if hasKey .Values "loadBalancer" }}
+{{- if .Values.loadBalancer.standalone }}
+  datapath-mode: lb-only
+{{- end }}
+{{- if hasKey .Values.loadBalancer "mode" }}
+  bpf-lb-mode: {{ .Values.loadBalancer.mode | quote }}
+{{- end }}
+{{- if hasKey .Values.loadBalancer "algorithm" }}
+  bpf-lb-algorithm: {{ .Values.loadBalancer.algorithm | quote }}
+{{- end }}
+{{- if hasKey .Values.loadBalancer "acceleration" }}
+  bpf-lb-acceleration: {{ .Values.loadBalancer.acceleration | quote }}
+{{- end }}
+{{- if hasKey .Values.loadBalancer "dsrDispatch" }}
+  bpf-lb-dsr-dispatch: {{ .Values.loadBalancer.dsrDispatch | quote }}
+{{- end }}
+
+{{- end }}
+{{- if hasKey .Values.maglev "tableSize" }}
+  bpf-lb-maglev-table-size: {{ .Values.maglev.tableSize | quote}}
+{{- end }}
+{{- if hasKey .Values.maglev "hashSeed" }}
+  bpf-lb-maglev-hash-seed: {{ .Values.maglev.hashSeed | quote}}
+{{- end }}
+{{- if .Values.sessionAffinity }}
+  enable-session-affinity: {{ .Values.sessionAffinity | quote }}
+{{- else if eq $defaultSessionAffinity "true" }}
+  enable-session-affinity: {{ $defaultSessionAffinity | quote }}
+{{- end }}
+{{- if .Values.svcSourceRangeCheck }}
+  enable-svc-source-range-check: {{ .Values.svcSourceRangeCheck | quote }}
+{{- end }}
+
+{{- if hasKey .Values "l2NeighDiscovery" }}
+{{- if hasKey .Values.l2NeighDiscovery "enabled" }}
+  enable-l2-neigh-discovery: {{ .Values.l2NeighDiscovery.enabled | quote }}
+{{- end }}
+{{- if hasKey .Values.l2NeighDiscovery "refreshPeriod" }}
+  arping-refresh-period: {{ .Values.l2NeighDiscovery.refreshPeriod | quote }}
+{{- end }}
+{{- end }}
+
+{{- if and .Values.pprof .Values.pprof.enabled }}
+  pprof: {{ .Values.pprof.enabled | quote }}
+{{- end }}
+{{- if .Values.logSystemLoad }}
+  log-system-load: {{ .Values.logSystemLoad | quote }}
+{{- end }}
+{{- if .Values.logOptions }}
+  log-opt: {{ .Values.logOptions | toJson | quote }}
+{{- end }}
+{{- if and .Values.sockops .Values.sockops.enabled }}
+  sockops-enable: {{ .Values.sockops.enabled | quote }}
+{{- end }}
+{{- if hasKey .Values.k8s "requireIPv4PodCIDR" }}
+  k8s-require-ipv4-pod-cidr: {{ .Values.k8s.requireIPv4PodCIDR | quote }}
+{{- end }}
+{{- if hasKey .Values.k8s "requireIPv6PodCIDR" }}
+  k8s-require-ipv6-pod-cidr: {{ .Values.k8s.requireIPv6PodCIDR | quote }}
+{{- end }}
+{{- if .Values.endpointStatus.enabled }}
+  endpoint-status: {{ required "endpointStatus.status required: policy, health, controllers, logs and / or state. For 2 or more options use a comma: \"policy, health\"" .Values.endpointStatus.status | quote }}
+{{- end }}
+{{- if and .Values.endpointRoutes .Values.endpointRoutes.enabled }}
+  enable-endpoint-routes: {{ .Values.endpointRoutes.enabled | quote }}
+{{- end }}
+{{- if .Values.cni.configMap }}
+  read-cni-conf: {{ .Values.cni.confFileMountPath }}/{{ .Values.cni.configMapKey }}
+  write-cni-conf-when-ready: {{ .Values.cni.hostConfDirMountPath }}/05-cilium.conflist
+{{- else if .Values.cni.readCniConf }}
+  read-cni-conf: {{ .Values.cni.readCniConf }}
+{{- end }}
+{{- if .Values.kubeConfigPath }}
+  k8s-kubeconfig-path: {{ .Values.kubeConfigPath | quote }}
+{{- end }}
+{{- if and ( .Values.endpointHealthChecking.enabled ) (or (eq .Values.cni.chainingMode "portmap") (eq .Values.cni.chainingMode "none")) }}
+  enable-endpoint-health-checking: "true"
+{{- else}}
+  # Disable health checking, when chaining mode is not set to portmap or none
+  enable-endpoint-health-checking: "false"
+{{- end }}
+{{- if hasKey .Values "healthChecking" }}
+  enable-health-checking: {{ .Values.healthChecking | quote }}
+{{- end }}
+{{- if or .Values.wellKnownIdentities.enabled .Values.etcd.managed }}
+  enable-well-known-identities: "true"
+{{- else }}
+  enable-well-known-identities: "false"
+{{- end }}
+  enable-remote-node-identity: {{ .Values.remoteNodeIdentity | quote }}
+
+{{- if hasKey .Values "synchronizeK8sNodes" }}
+  synchronize-k8s-nodes: {{ .Values.synchronizeK8sNodes | quote }}
+{{- end }}
+
+{{- if hasKey .Values "policyAuditMode" }}
+  policy-audit-mode: {{ .Values.policyAuditMode | quote }}
+{{- end }}
+
+{{- if ne $defaultOperatorApiServeAddr "localhost:9234" }}
+  operator-api-serve-addr: {{ $defaultOperatorApiServeAddr | quote }}
+{{- end }}
+
+{{- if .Values.hubble.enabled }}
+  # Enable Hubble gRPC service.
+  enable-hubble: {{ .Values.hubble.enabled  | quote }}
+  # UNIX domain socket for Hubble server to listen to.
+  hubble-socket-path:  {{ .Values.hubble.socketPath | quote }}
+{{- if hasKey .Values.hubble "eventQueueSize" }}
+  # Buffer size of the channel for Hubble to receive monitor events. If this field is not set,
+  # the buffer size is set to the default monitor queue size.
+  hubble-event-queue-size: {{ .Values.hubble.eventQueueSize | quote }}
+{{- end }}
+{{- if hasKey .Values.hubble "flowBufferSize" }}
+  # DEPRECATED: this block should be removed in 1.11
+  hubble-flow-buffer-size: {{ .Values.hubble.flowBufferSize | quote }}
+{{- end }}
+{{- if hasKey .Values.hubble "eventBufferCapacity" }}
+  # Capacity of the buffer to store recent events.
+  hubble-event-buffer-capacity: {{ .Values.hubble.eventBufferCapacity | quote }}
+{{- end }}
+{{- if .Values.hubble.metrics.enabled }}
+  # Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this
+  # field is not set.
+  hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}"
+  # A space separated list of metrics to enable. See [0] for available metrics.
+  #
+  # https://github.com/cilium/hubble/blob/master/Documentation/metrics.md
+  hubble-metrics: {{- range .Values.hubble.metrics.enabled }}
+    {{.}}
+{{- end }}
+{{- end }}
+{{- if hasKey .Values.hubble "listenAddress" }}
+  # An additional address for Hubble server to listen to (e.g. ":4244").
+  hubble-listen-address: {{ .Values.hubble.listenAddress | quote }}
+{{- if .Values.hubble.tls.enabled }}
+  hubble-disable-tls: "false"
+  hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
+  hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
+  hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
+{{- else }}
+  hubble-disable-tls: "true"
+{{- end }}
+{{- end }}
+{{- end }}
+{{- if hasKey .Values "disableIptablesFeederRules" }}
+  # A space separated list of iptables chains to disable when installing feeder rules.
+  disable-iptables-feeder-rules: {{ .Values.disableIptablesFeederRules | join " " | quote }}
+{{- end }}
+  ipam: {{ $ipam | quote }}
+
+{{- if eq $ipam "cluster-pool" }}
+{{- if .Values.ipv4.enabled }}
+  cluster-pool-ipv4-cidr: {{ .Values.ipam.operator.clusterPoolIPv4PodCIDR | quote }}
+  cluster-pool-ipv4-mask-size: {{ .Values.ipam.operator.clusterPoolIPv4MaskSize | quote  }}
+{{- end }}
+{{- if .Values.ipv6.enabled }}
+  cluster-pool-ipv6-cidr: {{ .Values.ipam.operator.clusterPoolIPv6PodCIDR | quote }}
+  cluster-pool-ipv6-mask-size: {{ .Values.ipam.operator.clusterPoolIPv6MaskSize | quote }}
+{{- end }}
+{{- end }}
+
+{{- if .Values.enableCnpStatusUpdates }}
+  disable-cnp-status-updates: {{ (not .Values.enableCnpStatusUpdates) | quote }}
+{{- else if (eq $defaultEnableCnpStatusUpdates "false") }}
+  disable-cnp-status-updates: "true"
+{{- end }}
+
+{{- if .Values.egressGateway.enabled }}
+  enable-egress-gateway: "true"
+{{- end }}
+
+{{- if .Values.enableK8sEventHandover }}
+  enable-k8s-event-handover: "true"
+{{- end }}
+
+{{- if hasKey .Values "crdWaitTimeout" }}
+  crd-wait-timeout: {{ .Values.crdWaitTimeout | quote }}
+{{- else if ( ne $crdWaitTimeout "5m" ) }}
+  crd-wait-timeout: {{ $crdWaitTimeout | quote }}
+{{- end }}
+
+{{- if .Values.enableK8sEndpointSlice }}
+  enable-k8s-endpoint-slice: {{ .Values.enableK8sEndpointSlice | quote }}
+{{- end }}
+
+{{- if hasKey .Values.k8s "serviceProxyName" }}
+  # Configure service proxy name for Cilium.
+  k8s-service-proxy-name: {{ .Values.k8s.serviceProxyName | quote }}
+{{- end }}
+
+{{- if and .Values.customCalls .Values.customCalls.enabled }}
+  # Enable tail call hooks for custom eBPF programs.
+  enable-custom-calls: {{ .Values.customCalls.enabled | quote }}
+{{- end }}
+
+{{- if and .Values.bgp.enabled (not .Values.bgp.announce.loadbalancerIP) }}
+{{ fail "BGP was enabled, but no announcements were enabled. Please enable one or more announcements." }}
+{{- else if and .Values.bgp.enabled .Values.bgp.announce.loadbalancerIP }}
+  bgp-announce-lb-ip: {{ .Values.bgp.announce.loadbalancerIP | quote }}
+{{- end }}
+
+{{- if hasKey .Values.cgroup "hostRoot" }}
+  cgroup-root: {{ .Values.cgroup.hostRoot | quote }}
+{{- end }}
+
+{{- if .Values.extraConfig }}
+{{ toYaml .Values.extraConfig | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-etcd-operator-clusterrole.yaml b/charts/cilium/templates/cilium-etcd-operator-clusterrole.yaml
new file mode 100644
index 0000000..d702793
--- /dev/null
+++ b/charts/cilium/templates/cilium-etcd-operator-clusterrole.yaml
@@ -0,0 +1,73 @@
+{{- if .Values.etcd.managed }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: cilium-etcd-operator
+rules:
+- apiGroups:
+  - etcd.database.coreos.com
+  resources:
+  - etcdclusters
+  verbs:
+  - get
+  - delete
+  - create
+  - update
+- apiGroups:
+  - apiextensions.k8s.io
+  resources:
+  - customresourcedefinitions
+  verbs:
+  - delete
+  - get
+  - create
+- apiGroups:
+  - ""
+  resources:
+  - deployments
+  verbs:
+  - delete
+  - create
+  - get
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - list
+  - get
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  verbs:
+  - delete
+  - create
+  - get
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - componentstatuses
+  verbs:
+  - get
+- apiGroups:
+  - extensions
+  resources:
+  - deployments
+  verbs:
+  - delete
+  - create
+  - get
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - create
+  - delete
+{{- end }}
diff --git a/charts/cilium/templates/cilium-etcd-operator-clusterrolebinding.yaml b/charts/cilium/templates/cilium-etcd-operator-clusterrolebinding.yaml
new file mode 100644
index 0000000..026df49
--- /dev/null
+++ b/charts/cilium/templates/cilium-etcd-operator-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.etcd.managed .Values.serviceAccounts.etcd.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: cilium-etcd-operator
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cilium-etcd-operator
+subjects:
+- kind: ServiceAccount
+  name: {{ .Values.serviceAccounts.etcd.name | quote }}
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-etcd-operator-deployment.yaml b/charts/cilium/templates/cilium-etcd-operator-deployment.yaml
new file mode 100644
index 0000000..8665cf6
--- /dev/null
+++ b/charts/cilium/templates/cilium-etcd-operator-deployment.yaml
@@ -0,0 +1,112 @@
+{{- if .Values.etcd.managed }}
+
+{{- /* Workaround so that we can set the minimal k8s version that we support */ -}}
+{{- $k8sVersion := .Capabilities.KubeVersion.Version -}}
+{{- $k8sMajor := .Capabilities.KubeVersion.Major -}}
+{{- $k8sMinor := .Capabilities.KubeVersion.Minor -}}
+
+{{- if .Values.Capabilities -}}
+{{- if .Values.Capabilities.KubeVersion -}}
+{{- if .Values.Capabilities.KubeVersion.Version -}}
+{{- $k8sVersion = .Values.Capabilities.KubeVersion.Version -}}
+{{- if .Values.Capabilities.KubeVersion.Major -}}
+{{- $k8sMajor = toString (.Values.Capabilities.KubeVersion.Major) -}}
+{{- if .Values.Capabilities.KubeVersion.Minor -}}
+{{- $k8sMinor = toString (.Values.Capabilities.KubeVersion.Minor) -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    io.cilium/app: etcd-operator
+    name: cilium-etcd-operator
+  name: cilium-etcd-operator
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      io.cilium/app: etcd-operator
+      name: cilium-etcd-operator
+{{- with .Values.etcd.updateStrategy }}
+  strategy:
+    {{- toYaml . | trim | nindent 4 }}
+{{- end }}
+  template:
+    metadata:
+{{- with .Values.etcd.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        io.cilium/app: etcd-operator
+        name: cilium-etcd-operator
+{{- with .Values.etcd.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+{{- if .Values.etcd.affinity }}
+      affinity:
+{{ toYaml .Values.etcd.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{ toYaml .Values.imagePullSecrets | indent 8 }}
+{{- end }}
+      containers:
+      - args:
+{{- with .Values.etcd.extraArgs }}
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+        #- --etcd-node-selector=disktype=ssd,cputype=high
+        command:
+        - /usr/bin/cilium-etcd-operator
+        env:
+        - name: CILIUM_ETCD_OPERATOR_CLUSTER_DOMAIN
+          value: "{{ .Values.etcd.clusterDomain }}"
+        - name: CILIUM_ETCD_OPERATOR_ETCD_CLUSTER_SIZE
+          value: "{{ .Values.etcd.clusterSize }}"
+        - name: CILIUM_ETCD_OPERATOR_NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        - name: CILIUM_ETCD_OPERATOR_POD_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.name
+        - name: CILIUM_ETCD_OPERATOR_POD_UID
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.uid
+        - name: CILIUM_ETCD_META_ETCD_AUTO_COMPACTION_MODE
+          value: "revision"
+        - name: CILIUM_ETCD_META_ETCD_AUTO_COMPACTION_RETENTION
+          value: "25000"
+        image: {{ .Values.etcd.image.repository }}:{{ .Values.etcd.image.tag }}
+        imagePullPolicy: {{ .Values.etcd.image.pullPolicy }}
+        name: cilium-etcd-operator
+      dnsPolicy: ClusterFirst
+      hostNetwork: true
+{{- if and (or (and (eq .Release.Namespace "kube-system") (gt $k8sMinor "10")) (ge $k8sMinor "17") (gt $k8sMajor "1")) .Values.enableCriticalPriorityClass }}
+      priorityClassName: system-cluster-critical
+{{- end }}
+      restartPolicy: Always
+      serviceAccount: {{ .Values.serviceAccounts.etcd.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.etcd.name | quote }}
+{{- with .Values.etcd.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+{{- with .Values.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 6 }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-etcd-operator-serviceaccount.yaml b/charts/cilium/templates/cilium-etcd-operator-serviceaccount.yaml
new file mode 100644
index 0000000..9bc0a3e
--- /dev/null
+++ b/charts/cilium/templates/cilium-etcd-operator-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.etcd.managed .Values.serviceAccounts.etcd.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.etcd.name | quote }}
+  namespace: {{ .Release.Namespace }}
+  {{- if .Values.serviceAccounts.etcd.annotations }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.etcd.annotations | indent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-nodeinit-daemonset.yaml b/charts/cilium/templates/cilium-nodeinit-daemonset.yaml
new file mode 100644
index 0000000..3b1d6ed
--- /dev/null
+++ b/charts/cilium/templates/cilium-nodeinit-daemonset.yaml
@@ -0,0 +1,290 @@
+{{- if .Values.nodeinit.enabled }}
+
+{{- /* Workaround so that we can set the minimal k8s version that we support */ -}}
+{{- $k8sVersion := .Capabilities.KubeVersion.Version -}}
+{{- $k8sMajor := .Capabilities.KubeVersion.Major -}}
+{{- $k8sMinor := .Capabilities.KubeVersion.Minor -}}
+
+{{- if .Values.Capabilities -}}
+{{- if .Values.Capabilities.KubeVersion -}}
+{{- if .Values.Capabilities.KubeVersion.Version -}}
+{{- $k8sVersion = .Values.Capabilities.KubeVersion.Version -}}
+{{- if .Values.Capabilities.KubeVersion.Major -}}
+{{- $k8sMajor = toString (.Values.Capabilities.KubeVersion.Major) -}}
+{{- if .Values.Capabilities.KubeVersion.Minor -}}
+{{- $k8sMinor = toString (.Values.Capabilities.KubeVersion.Minor) -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: cilium-node-init
+  namespace: {{ .Release.Namespace }}
+  labels:
+    app: cilium-node-init
+spec:
+  selector:
+    matchLabels:
+      app: cilium-node-init
+  template:
+    metadata:
+      annotations:
+{{- with .Values.nodeinit.podAnnotations }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        app: cilium-node-init
+{{- with .Values.nodeinit.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+{{- with .Values.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 6 }}
+{{- end }}
+      hostPID: true
+      hostNetwork: true
+{{- if and (or (and (eq .Release.Namespace "kube-system") (gt $k8sMinor "10")) (ge $k8sMinor "17") (gt $k8sMajor "1")) .Values.enableCriticalPriorityClass }}
+      priorityClassName: system-node-critical
+{{- end }}
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{ toYaml .Values.imagePullSecrets | indent 6 }}
+{{- end }}
+      volumes:
+      # To access iptables concurrently with other processes (e.g. kube-proxy)
+      - hostPath:
+          path: /run/xtables.lock
+          type: FileOrCreate
+        name: xtables-lock
+      containers:
+        - name: node-init
+          image: {{ .Values.nodeinit.image.repository }}:{{ .Values.nodeinit.image.tag }}
+          imagePullPolicy: {{ .Values.nodeinit.image.pullPolicy }}
+          securityContext:
+            privileged: true
+          volumeMounts:
+            # To access iptables concurrently with other processes (e.g. kube-proxy)
+            - mountPath: /run/xtables.lock
+              name: xtables-lock
+          lifecycle:
+{{- if .Values.eni.enabled }}
+            postStart:
+              exec:
+                command:
+                  - "/bin/sh"
+                  - "-c"
+                  - |
+                    #!/bin/bash
+
+                    set -o errexit
+                    set -o pipefail
+                    set -o nounset
+
+                    # When running in AWS ENI mode, it's likely that 'aws-node' has
+                    # had a chance to install SNAT iptables rules. These can result
+                    # in dropped traffic, so we should attempt to remove them.
+                    # We do it using a 'postStart' hook since this may need to run
+                    # for nodes which might have already been init'ed but may still
+                    # have dangling rules. This is safe because there are no
+                    # dependencies on anything that is part of the startup script
+                    # itself, and can be safely run multiple times per node (e.g. in
+                    # case of a restart).
+                    if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]];
+                    then
+                      echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
+                      iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore
+                    fi
+                    echo 'Done!'
+{{- end }}
+{{- if .Values.nodeinit.revertReconfigureKubelet }}
+            preStop:
+              exec:
+                command:
+                  - "nsenter"
+                  - "-t"
+                  - "1"
+                  - "-m"
+                  - "--"
+                  - "/bin/sh"
+                  - "-c"
+                  - |
+                    #!/bin/bash
+
+                    set -o errexit
+                    set -o pipefail
+                    set -o nounset
+
+                    if stat /tmp/node-deinit.cilium.io > /dev/null 2>&1; then
+                      exit 0
+                    fi
+
+                    echo "Waiting on pods to stop..."
+                    if [ ! -f /etc/crictl.yaml ] || grep -q 'docker' /etc/crictl.yaml; then
+                      # Works for COS, ubuntu
+                      while docker ps | grep -v "node-init" | grep -q "POD_cilium"; do sleep 1; done
+                    else
+                      # COS-beta (with containerd). Some versions of COS have crictl in /home/kubernetes/bin.
+                      while PATH="${PATH}:/home/kubernetes/bin" crictl ps | grep -v "node-init" | grep -q "POD_cilium"; do sleep 1; done
+                    fi
+
+                    if ip link show cilium_host; then
+                      echo "Deleting cilium_host interface..."
+                      ip link del cilium_host
+                    fi
+
+{{- if not (eq .Values.nodeinit.bootstrapFile "") }}
+                    rm -f {{ .Values.nodeinit.bootstrapFile }}
+{{- end }}
+
+                    rm -f /tmp/node-init.cilium.io
+                    touch /tmp/node-deinit.cilium.io
+
+{{- if .Values.nodeinit.reconfigureKubelet }}
+                    echo "Changing kubelet configuration to --network-plugin=kubenet"
+                    sed -i "s:--network-plugin=cni\ --cni-bin-dir={{ .Values.cni.binPath }}:--network-plugin=kubenet:g" /etc/default/kubelet
+                    echo "Restarting kubelet..."
+                    systemctl restart kubelet
+{{- end }}
+
+{{- if (and .Values.gke.enabled (or .Values.masquerade .Values.gke.disableDefaultSnat))}}
+                    # If the IP-MASQ chain exists, add back default jump rule from the GKE instance configure script
+                    if iptables -w -t nat -L IP-MASQ > /dev/null; then
+                      iptables -w -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ
+                    fi
+{{- end }}
+
+                    echo "Node de-initialization complete"
+{{- end }}
+          env:
+          - name: CHECKPOINT_PATH
+            value: /tmp/node-init.cilium.io
+          # STARTUP_SCRIPT is the script run on node bootstrap. Node
+          # bootstrapping can be customized in this script. This script is invoked
+          # using nsenter, so it runs in the host's network and mount namespace using
+          # the host's userland tools!
+          - name: STARTUP_SCRIPT
+            value: |
+              #!/bin/bash
+
+              set -o errexit
+              set -o pipefail
+              set -o nounset
+
+              echo "Link information:"
+              ip link
+
+              echo "Routing table:"
+              ip route
+
+              echo "Addressing:"
+              ip -4 a
+              ip -6 a
+
+{{- if .Values.nodeinit.removeCbrBridge }}
+              if ip link show cbr0; then
+                echo "Detected cbr0 bridge. Deleting interface..."
+                ip link del cbr0
+              fi
+{{- end }}
+
+{{- if .Values.nodeinit.reconfigureKubelet }}
+              # GKE: Alter the kubelet configuration to run in CNI mode
+              echo "Changing kubelet configuration to --network-plugin=cni --cni-bin-dir={{ .Values.cni.binPath }}"
+              mkdir -p {{ .Values.cni.binPath }}
+              sed -i "s:--network-plugin=kubenet:--network-plugin=cni\ --cni-bin-dir={{ .Values.cni.binPath }}:g" /etc/default/kubelet
+              echo "Restarting kubelet..."
+              systemctl restart kubelet
+{{- end }}
+
+{{- if (and .Values.gke.enabled (or .Values.masquerade .Values.gke.disableDefaultSnat))}}
+              # If Cilium is configured to manage masquerading of traffic leaving the node,
+              # we need to disable the IP-MASQ chain because even if ip-masq-agent
+              # is not installed, the node init script installs some default rules into
+              # the IP-MASQ chain.
+              # If we remove the jump to that ip-masq chain, then we ensure the ip masquerade
+              # configuration is solely managed by Cilium.
+              # Also, if Cilium is installed, it may be expected that it would be solely responsible
+              # for the networking configuration on that node. So provide the same functionality
+              # as the --disable-snat-flag for existing GKE clusters.
+              iptables -w -t nat -D POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ || true
+{{- end }}
+
+{{- if not (eq .Values.nodeinit.bootstrapFile "") }}
+              date > {{ .Values.nodeinit.bootstrapFile }}
+{{- end }}
+
+{{- if .Values.nodeinit.restartPods }}
+              echo "Restarting kubenet managed pods"
+              if [ ! -f /etc/crictl.yaml ] || grep -q 'docker' /etc/crictl.yaml; then
+                # Works for COS, ubuntu
+                # Note the first line is the containerID with a trailing \r
+                for f in `find /var/lib/cni/networks/ -type f ! -name lock ! -name last_reserved_ip.0`; do docker rm -f "$(sed 's/\r//;1q' $f)" || true; done
+              elif [ -n "$(docker ps --format '{{ "{{" }}.Image{{ "}}" }}' | grep ^[0-9]*\.dkr\.ecr\.[a-z]*-[a-z]*-[0-9]*\.amazonaws\.com/amazon-k8s-cni)" ]; then
+                timeout=1
+                for i in $(seq 1 7); do
+                  echo "Checking introspection API"
+                  curl localhost:61679 && retry=false || retry=true
+                  if [ $retry == false ]; then break ; fi
+                  sleep "$timeout"
+                  timeout=$(($timeout * 2))
+                done
+
+                for pod in $(curl "localhost:61679/v1/pods" 2> /dev/null | jq -r '. | keys[]'); do
+                  container_id=$(echo "$pod" | awk -F_ ' { print $3 } ' | cut -c1-12)
+                  echo "Restarting ${container_id}"
+                  docker kill "${container_id}" || true
+                done
+              else
+                # COS-beta (with containerd). Some versions of COS have crictl in /home/kubernetes/bin.
+                for f in `find /var/lib/cni/networks/ -type f ! -name lock ! -name last_reserved_ip.0`; do PATH="${PATH}:/home/kubernetes/bin" crictl stopp "$(sed 's/\r//;1q' $f)" || true; done
+              fi
+{{- end }}
+
+              # AKS: If azure-vnet is installed on the node, and (still) configured in bridge mode,
+              # configure it as 'transparent' to be consistent with Cilium's CNI chaining config.
+              # If the azure-vnet CNI config is not removed, kubelet will execute CNI CHECK commands
+              # against it every 5 seconds and write 'bridge' to its state file, causing inconsistent
+              # behaviour when Pods are removed.
+              if [ -f /etc/cni/net.d/10-azure.conflist ]; then
+                echo "Ensuring azure-vnet is configured in 'transparent' mode..."
+                sed -i 's/"mode":\s*"bridge"/"mode":"transparent"/g' /etc/cni/net.d/10-azure.conflist
+              fi
+
+{{- if .Values.azure.enabled }}
+              # The azure0 interface being present means the node was booted with azure-vnet configured
+              # in bridge mode. This means there might be ebtables rules and neight entries interfering
+              # with pod connectivity if we deploy with Azure IPAM.
+              if ip l show dev azure0 >/dev/null 2>&1; then
+
+                # In Azure IPAM mode, also remove the azure-vnet state file, otherwise ebtables rules get
+                # restored by the azure-vnet CNI plugin on every CNI CHECK, which can cause connectivity
+                # issues in Cilium-managed Pods. Since azure-vnet is no longer called on scheduling events,
+                # this file can be removed.
+                rm -f /var/run/azure-vnet.json
+
+                # This breaks connectivity for existing workload Pods when Cilium is scheduled, but we need
+                # to flush these to prevent Cilium-managed Pod IPs conflicting with Pod IPs previously allocated
+                # by azure-vnet. These ebtables DNAT rules contain fixed MACs that are no longer bound on the node,
+                # causing packets for these Pods to be redirected back out to the gateway, where they are dropped.
+                echo 'Flushing ebtables pre/postrouting rules in nat table.. (disconnecting non-Cilium Pods!)'
+                ebtables -t nat -F PREROUTING || true
+                ebtables -t nat -F POSTROUTING || true
+
+                # ip-masq-agent periodically injects PERM neigh entries towards the gateway
+                # for all other k8s nodes in the cluster. These are safe to flush, as ARP can
+                # resolve these nodes as usual. PERM entries will be automatically restored later.
+                echo 'Deleting all permanent neighbour entries on azure0...'
+                ip neigh show dev azure0 nud permanent | cut -d' ' -f1 | xargs -r -n1 ip neigh del dev azure0 to || true
+              fi
+{{- end }}
+
+{{- if .Values.nodeinit.revertReconfigureKubelet }}
+              rm -f /tmp/node-deinit.cilium.io
+{{- end }}
+              echo "Node initialization complete"
+{{- end }}
diff --git a/charts/cilium/templates/cilium-operator-clusterrole.yaml b/charts/cilium/templates/cilium-operator-clusterrole.yaml
new file mode 100644
index 0000000..33dcc8f
--- /dev/null
+++ b/charts/cilium/templates/cilium-operator-clusterrole.yaml
@@ -0,0 +1,120 @@
+{{- if .Values.operator.enabled }}
+
+{{- /* Workaround so that we can set the minimal k8s version that we support */ -}}
+{{- $k8sVersion := .Capabilities.KubeVersion.Version -}}
+{{- $k8sMajor := .Capabilities.KubeVersion.Major -}}
+{{- $k8sMinor := .Capabilities.KubeVersion.Minor -}}
+
+{{- if .Values.Capabilities -}}
+{{- if .Values.Capabilities.KubeVersion -}}
+{{- if .Values.Capabilities.KubeVersion.Version -}}
+{{- $k8sVersion = .Values.Capabilities.KubeVersion.Version -}}
+{{- if .Values.Capabilities.KubeVersion.Major -}}
+{{- $k8sMajor = toString (.Values.Capabilities.KubeVersion.Major) -}}
+{{- if .Values.Capabilities.KubeVersion.Minor -}}
+{{- $k8sMinor = toString (.Values.Capabilities.KubeVersion.Minor) -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: cilium-operator
+rules:
+- apiGroups:
+  - ""
+  resources:
+  # to automatically delete [core|kube]dns pods so that are starting to being
+  # managed by Cilium
+  - pods
+  verbs:
+  - get
+  - list
+  - watch
+  - delete
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  # to perform LB IP allocation for BGP
+  - services/status
+  verbs:
+  - update
+- apiGroups:
+  - ""
+  resources:
+  # to perform the translation of a CNP that contains `ToGroup` to its endpoints
+  - services
+  - endpoints
+  # to check apiserver connectivity
+  - namespaces
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - cilium.io
+  resources:
+  - ciliumnetworkpolicies
+  - ciliumnetworkpolicies/status
+  - ciliumnetworkpolicies/finalizers
+  - ciliumclusterwidenetworkpolicies
+  - ciliumclusterwidenetworkpolicies/status
+  - ciliumclusterwidenetworkpolicies/finalizers
+  - ciliumendpoints
+  - ciliumendpoints/status
+  - ciliumendpoints/finalizers
+  - ciliumnodes
+  - ciliumnodes/status
+  - ciliumnodes/finalizers
+  - ciliumidentities
+  - ciliumidentities/status
+  - ciliumidentities/finalizers
+  - ciliumlocalredirectpolicies
+  - ciliumlocalredirectpolicies/status
+  - ciliumlocalredirectpolicies/finalizers
+  verbs:
+  - '*'
+- apiGroups:
+  - apiextensions.k8s.io
+  resources:
+  - customresourcedefinitions
+  verbs:
+  - create
+  - get
+  - list
+  - update
+  - watch
+# For cilium-operator running in HA mode.
+#
+# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
+# between multiple running instances.
+# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
+# common and fewer objects in the cluster watch "all Leases".
+- apiGroups:
+  - coordination.k8s.io
+  resources:
+  - leases
+  verbs:
+  - create
+  - get
+  - update
+{{- end }}
diff --git a/charts/cilium/templates/cilium-operator-clusterrolebinding.yaml b/charts/cilium/templates/cilium-operator-clusterrolebinding.yaml
new file mode 100644
index 0000000..4eb6c82
--- /dev/null
+++ b/charts/cilium/templates/cilium-operator-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: cilium-operator
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cilium-operator
+subjects:
+- kind: ServiceAccount
+  name: {{ .Values.serviceAccounts.operator.name | quote }}
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-operator-deployment.yaml b/charts/cilium/templates/cilium-operator-deployment.yaml
new file mode 100644
index 0000000..ff114c9
--- /dev/null
+++ b/charts/cilium/templates/cilium-operator-deployment.yaml
@@ -0,0 +1,302 @@
+{{- if .Values.operator.enabled }}
+
+{{- /* Workaround so that we can set the minimal k8s version that we support */ -}}
+{{- $k8sVersion := .Capabilities.KubeVersion.Version -}}
+{{- $k8sMajor := .Capabilities.KubeVersion.Major -}}
+{{- $k8sMinor := .Capabilities.KubeVersion.Minor -}}
+
+{{- if .Values.Capabilities -}}
+{{- if .Values.Capabilities.KubeVersion -}}
+{{- if .Values.Capabilities.KubeVersion.Version -}}
+{{- $k8sVersion = .Values.Capabilities.KubeVersion.Version -}}
+{{- if .Values.Capabilities.KubeVersion.Major -}}
+{{- $k8sMajor = toString (.Values.Capabilities.KubeVersion.Major) -}}
+{{- if .Values.Capabilities.KubeVersion.Minor -}}
+{{- $k8sMinor = toString (.Values.Capabilities.KubeVersion.Minor) -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    io.cilium/app: operator
+    name: cilium-operator
+  name: cilium-operator
+  namespace: {{ .Release.Namespace }}
+spec:
+  # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
+  # for more details.
+  replicas: {{ .Values.operator.replicas }}
+  selector:
+    matchLabels:
+      io.cilium/app: operator
+      name: cilium-operator
+{{- with .Values.operator.updateStrategy }}
+  strategy:
+    {{- toYaml . | trim | nindent 4 }}
+{{- end }}
+  template:
+    metadata:
+      annotations:
+{{- if .Values.operator.rollOutPods }}
+        # ensure pods roll when configmap updates
+        cilium.io/cilium-configmap-checksum: {{ include (print $.Template.BasePath "/cilium-configmap.yaml") . | sha256sum | quote }}
+{{- end }}
+{{- if and .Values.operator.prometheus.enabled (not .Values.operator.prometheus.serviceMonitor.enabled) }}
+        prometheus.io/port: {{ .Values.operator.prometheus.port | quote }}
+        prometheus.io/scrape: "true"
+{{- end }}
+{{- with .Values.operator.podAnnotations }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        io.cilium/app: operator
+        name: cilium-operator
+{{- with .Values.operator.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+      # In HA mode, cilium-operator pods must not be scheduled on the same
+      # node as they will clash with each other.
+  {{- if .Values.operator.affinity }}
+      affinity:
+  {{- toYaml .Values.operator.affinity | trim | nindent 8 }}
+  {{- end }}
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 6 }}
+{{- end }}
+      containers:
+      - args:
+        - --config-dir=/tmp/cilium/config-map
+        - --debug=$(CILIUM_DEBUG)
+{{- with .Values.operator.extraArgs }}
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+        command:
+{{- if .Values.eni.enabled }}
+        - cilium-operator-aws
+{{- else if .Values.azure.enabled }}
+        - cilium-operator-azure
+{{- else if .Values.alibabacloud.enabled}}
+        - cilium-operator-alibabacloud
+{{- else }}
+        - cilium-operator-generic
+{{- end }}
+        env:
+        - name: K8S_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: CILIUM_K8S_NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        - name: CILIUM_DEBUG
+          valueFrom:
+            configMapKeyRef:
+              key: debug
+              name: cilium-config
+              optional: true
+{{- if (and .Values.eni.enabled (not .Values.eni.iamRole )) }}
+        - name: AWS_ACCESS_KEY_ID
+          valueFrom:
+            secretKeyRef:
+              key: AWS_ACCESS_KEY_ID
+              name: cilium-aws
+              optional: true
+        - name: AWS_SECRET_ACCESS_KEY
+          valueFrom:
+            secretKeyRef:
+              key: AWS_SECRET_ACCESS_KEY
+              name: cilium-aws
+              optional: true
+        - name: AWS_DEFAULT_REGION
+          valueFrom:
+            secretKeyRef:
+              key: AWS_DEFAULT_REGION
+              name: cilium-aws
+              optional: true
+{{- end }}
+{{- if .Values.alibabacloud.enabled }}
+        - name: ALIBABA_CLOUD_ACCESS_KEY_ID
+          valueFrom:
+            secretKeyRef:
+              key: ALIBABA_CLOUD_ACCESS_KEY_ID
+              name: cilium-alibabacloud
+              optional: true
+        - name: ALIBABA_CLOUD_ACCESS_KEY_SECRET
+          valueFrom:
+            secretKeyRef:
+              key: ALIBABA_CLOUD_ACCESS_KEY_SECRET
+              name: cilium-alibabacloud
+              optional: true
+{{- end }}
+{{- if .Values.k8sServiceHost }}
+        - name: KUBERNETES_SERVICE_HOST
+          value: {{ .Values.k8sServiceHost | quote }}
+{{- end }}
+{{- if .Values.k8sServicePort }}
+        - name: KUBERNETES_SERVICE_PORT
+          value: {{ .Values.k8sServicePort | quote }}
+{{- end }}
+{{- if .Values.azure.subscriptionID }}
+        - name: AZURE_SUBSCRIPTION_ID
+          value: {{ .Values.azure.subscriptionID }}
+{{- end }}
+{{- if .Values.azure.tenantID }}
+        - name: AZURE_TENANT_ID
+          value: {{ .Values.azure.tenantID }}
+{{- end }}
+{{- if .Values.azure.resourceGroup }}
+        - name: AZURE_RESOURCE_GROUP
+          value: {{ .Values.azure.resourceGroup }}
+{{- end }}
+{{- if .Values.azure.clientID }}
+        - name: AZURE_CLIENT_ID
+          value: {{ .Values.azure.clientID }}
+{{- end }}
+{{- if .Values.azure.clientSecret }}
+        - name: AZURE_CLIENT_SECRET
+          value: {{ .Values.azure.clientSecret }}
+{{- end }}
+{{- range $key, $value := .Values.operator.extraEnv }}
+        - name: {{ $key }}
+          value: {{ $value }}
+{{- end }}
+{{- if .Values.eni.enabled }}
+        image: "{{ .Values.operator.image.repository }}-aws{{ .Values.operator.image.suffix }}:{{ .Values.operator.image.tag }}{{ if .Values.operator.image.useDigest }}@{{ .Values.operator.image.awsDigest }}{{ end }}"
+{{- else if .Values.azure.enabled }}
+        image: "{{ .Values.operator.image.repository }}-azure{{ .Values.operator.image.suffix }}:{{ .Values.operator.image.tag }}{{ if .Values.operator.image.useDigest }}@{{ .Values.operator.image.azureDigest }}{{ end }}"
+{{- else if .Values.alibabacloud.enabled }}
+        image: "{{ .Values.operator.image.repository }}-alibabacloud{{ .Values.operator.image.suffix }}:{{ .Values.operator.image.tag }}{{ if .Values.operator.image.useDigest }}@{{ .Values.operator.image.alibabacloudDigest }}{{ end }}"
+{{- else }}
+        image: "{{ .Values.operator.image.repository }}-generic{{ .Values.operator.image.suffix }}:{{ .Values.operator.image.tag }}{{ if .Values.operator.image.useDigest }}@{{ .Values.operator.image.genericDigest }}{{ end }}"
+{{- end }}
+        imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
+        name: cilium-operator
+{{- if .Values.operator.prometheus.enabled }}
+        ports:
+        - containerPort: {{ .Values.operator.prometheus.port }}
+          hostPort: {{ .Values.operator.prometheus.port }}
+          name: prometheus
+          protocol: TCP
+{{- end }}
+        livenessProbe:
+          httpGet:
+{{- if .Values.ipv4.enabled }}
+            host: '127.0.0.1'
+{{- else }}
+            host: '::1'
+{{- end }}
+            path: /healthz
+            port: 9234
+            scheme: HTTP
+          initialDelaySeconds: 60
+          periodSeconds: 10
+          timeoutSeconds: 3
+        volumeMounts:
+        - mountPath: /tmp/cilium/config-map
+          name: cilium-config-path
+          readOnly: true
+{{- if .Values.etcd.enabled }}
+        - mountPath: /var/lib/etcd-config
+          name: etcd-config-path
+          readOnly: true
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+        - mountPath: /var/lib/etcd-secrets
+          name: etcd-secrets
+          readOnly: true
+{{- end }}
+{{- end }}
+{{- if .Values.kubeConfigPath }}
+        - mountPath: {{ .Values.kubeConfigPath }}
+          name: kube-config
+          readOnly: true
+{{- end }}
+{{- range .Values.operator.extraHostPathMounts }}
+        - mountPath: {{ .mountPath }}
+          name: {{ .name }}
+          readOnly: {{ .readOnly }}
+{{- if .mountPropagation }}
+          mountPropagation: {{ .mountPropagation }}
+{{- end }}
+{{- end }}
+{{- if .Values.bgp.enabled }}
+        - mountPath: /var/lib/cilium/bgp
+          name: bgp-config-path
+          readOnly: true
+{{- end }}
+{{- if .Values.operator.resources }}
+        resources:
+          {{- toYaml .Values.operator.resources | trim | nindent 10 }}
+{{- end }}
+      hostNetwork: true
+{{- if (and .Values.etcd.managed (not .Values.etcd.k8sService)) }}
+      # In managed etcd mode, Cilium must be able to resolve the DNS name of
+      # the etcd service
+      dnsPolicy: ClusterFirstWithHostNet
+{{- end }}
+      restartPolicy: Always
+{{- if and (or (and (eq .Release.Namespace "kube-system") (gt $k8sMinor "10")) (ge $k8sMinor "17") (gt $k8sMajor "1")) .Values.enableCriticalPriorityClass }}
+      priorityClassName: system-cluster-critical
+{{- end }}
+      serviceAccount: {{ .Values.serviceAccounts.operator.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.operator.name | quote }}
+{{- with .Values.operator.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+{{- with .Values.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 6 }}
+{{- end }}
+      volumes:
+        # To read the configuration from the config map
+      - configMap:
+          name: cilium-config
+        name: cilium-config-path
+{{- if .Values.etcd.enabled }}
+      # To read the etcd config stored in config maps
+      - configMap:
+          defaultMode: 420
+          items:
+          - key: etcd-config
+            path: etcd.config
+          name: cilium-config
+        name: etcd-config-path
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+        # To read the k8s etcd secrets in case the user might want to use TLS
+      - name: etcd-secrets
+        secret:
+          defaultMode: 420
+          optional: true
+          secretName: cilium-etcd-secrets
+{{- end }}
+{{- end }}
+{{- if .Values.kubeConfigPath }}
+      - hostPath:
+          path: {{ .Values.kubeConfigPath }}
+          type: FileOrCreate
+        name: kube-config
+{{- end }}
+{{- range .Values.operator.extraHostPathMounts }}
+      - name: {{ .name }}
+        hostPath:
+          path: {{ .hostPath }}
+{{- if .hostPathType }}
+          type: {{ .hostPathType }}
+{{- end }}
+{{- end }}
+{{- if .Values.bgp.enabled }}
+      - configMap:
+          name: bgp-config
+        name: bgp-config-path
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-operator-service.yaml b/charts/cilium/templates/cilium-operator-service.yaml
new file mode 100644
index 0000000..eb1dadd
--- /dev/null
+++ b/charts/cilium/templates/cilium-operator-service.yaml
@@ -0,0 +1,21 @@
+{{- if and (.Values.operator.enabled) (.Values.operator.prometheus.enabled) (.Values.operator.prometheus.serviceMonitor.enabled) }}
+kind: Service
+apiVersion: v1
+metadata:
+  name: cilium-operator
+  namespace: {{ .Release.Namespace }}
+  labels:
+    io.cilium/app: operator
+    name: cilium-operator
+spec:
+  clusterIP: None
+  type: ClusterIP
+  ports:
+  - name: metrics
+    port: 6942
+    protocol: TCP
+    targetPort: prometheus
+  selector:
+    io.cilium/app: operator
+    name: cilium-operator
+{{- end }}
diff --git a/charts/cilium/templates/cilium-operator-serviceaccount.yaml b/charts/cilium/templates/cilium-operator-serviceaccount.yaml
new file mode 100644
index 0000000..0c579b8
--- /dev/null
+++ b/charts/cilium/templates/cilium-operator-serviceaccount.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.operator.name | quote }}
+  namespace: {{ .Release.Namespace }}
+  {{- if and .Values.eni.enabled .Values.eni.iamRole }}
+  {{ $_ := set .Values.serviceAccounts.operator.annotations "eks.amazonaws.com/role-arn" .Values.eni.iamRole }}
+  {{- end}}
+  {{- if .Values.serviceAccounts.operator.annotations }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.operator.annotations | indent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-operator-servicemonitor.yaml b/charts/cilium/templates/cilium-operator-servicemonitor.yaml
new file mode 100644
index 0000000..39ce055
--- /dev/null
+++ b/charts/cilium/templates/cilium-operator-servicemonitor.yaml
@@ -0,0 +1,30 @@
+{{- if and (.Values.operator.enabled) (.Values.operator.prometheus.enabled) (.Values.operator.prometheus.serviceMonitor.enabled) }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: cilium-operator
+  {{- if .Values.operator.prometheus.serviceMonitor.namespace }}
+  namespace: {{ .Values.operator.prometheus.serviceMonitor.namespace }}
+  {{- else }}
+  namespace: {{ .Release.Namespace }}
+  {{- end }}
+  labels:
+    {{- with .Values.operator.prometheus.serviceMonitor.labels }}
+    {{- toYaml . | nindent 4 }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels:
+      io.cilium/app: operator
+      name: cilium-operator
+  namespaceSelector:
+    matchNames:
+    - {{ .Release.Namespace }}
+  endpoints:
+  - port: metrics
+    interval: 10s
+    honorLabels: true
+    path: /metrics
+  targetLabels:
+  - io.cilium/app
+{{- end }}
diff --git a/charts/cilium/templates/cilium-preflight-clusterrole.yaml b/charts/cilium/templates/cilium-preflight-clusterrole.yaml
new file mode 100644
index 0000000..a0fe42e
--- /dev/null
+++ b/charts/cilium/templates/cilium-preflight-clusterrole.yaml
@@ -0,0 +1,110 @@
+{{- if .Values.preflight.enabled }}
+{{- /*
+Keep file in synced with cilium-agent-clusterrole.yaml
+*/ -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: cilium-pre-flight
+rules:
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - networkpolicies
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - namespaces
+  - services
+  - nodes
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - pods/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  - nodes/status
+  verbs:
+  - patch
+- apiGroups:
+  - apiextensions.k8s.io
+  resources:
+  - customresourcedefinitions
+  verbs:
+  # Deprecated for removal in v1.10
+  - create
+  - list
+  - watch
+  - update
+
+  # This is used when validating policies in preflight. This will need to stay
+  # until we figure out how to avoid "get" inside the preflight, and then
+  # should be removed ideally.
+  - get
+{{- if eq "k8s" .Values.tls.secretsBackend }}
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+{{- end }}
+- apiGroups:
+  - cilium.io
+  resources:
+  - ciliumnetworkpolicies
+  - ciliumnetworkpolicies/status
+  - ciliumnetworkpolicies/finalizers
+  - ciliumclusterwidenetworkpolicies
+  - ciliumclusterwidenetworkpolicies/status
+  - ciliumclusterwidenetworkpolicies/finalizers
+  - ciliumendpoints
+  - ciliumendpoints/status
+  - ciliumendpoints/finalizers
+  - ciliumnodes
+  - ciliumnodes/status
+  - ciliumnodes/finalizers
+  - ciliumidentities
+  - ciliumidentities/finalizers
+  - ciliumlocalredirectpolicies
+  - ciliumlocalredirectpolicies/status
+  - ciliumlocalredirectpolicies/finalizers
+  - ciliumegressnatpolicies
+  verbs:
+  - '*'
+{{- end }}
diff --git a/charts/cilium/templates/cilium-preflight-clusterrolebinding.yaml b/charts/cilium/templates/cilium-preflight-clusterrolebinding.yaml
new file mode 100644
index 0000000..832dfb5
--- /dev/null
+++ b/charts/cilium/templates/cilium-preflight-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if and (.Values.preflight.enabled) (.Values.serviceAccounts.preflight.create) }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: cilium-pre-flight
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cilium-pre-flight
+subjects:
+- kind: ServiceAccount
+  name: {{ .Values.serviceAccounts.preflight.name | quote }} 
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-preflight-daemonset.yaml b/charts/cilium/templates/cilium-preflight-daemonset.yaml
new file mode 100644
index 0000000..58d614b
--- /dev/null
+++ b/charts/cilium/templates/cilium-preflight-daemonset.yaml
@@ -0,0 +1,161 @@
+{{- if .Values.preflight.enabled }}
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: cilium-pre-flight-check
+  namespace: {{ .Release.Namespace }}
+spec:
+  selector:
+    matchLabels:
+      k8s-app: cilium-pre-flight-check
+      kubernetes.io/cluster-service: "true"
+  template:
+    metadata:
+{{- with .Values.preflight.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        k8s-app: cilium-pre-flight-check
+        kubernetes.io/cluster-service: "true"
+{{- with .Values.preflight.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{ toYaml .Values.imagePullSecrets | indent 6 }}
+{{- end }}
+      initContainers:
+        - name: clean-cilium-state
+          image: "{{ .Values.preflight.image.repository }}:{{ .Values.preflight.image.tag }}{{ if .Values.preflight.image.useDigest }}@{{ .Values.preflight.image.digest }}{{ end }}"
+          imagePullPolicy: {{ .Values.preflight.image.pullPolicy }}
+          command: ["/bin/echo"]
+          args:
+          - "hello"
+      containers:
+        - name: cilium-pre-flight-check
+          image: "{{ .Values.preflight.image.repository }}:{{ .Values.preflight.image.tag }}{{ if .Values.preflight.image.useDigest }}@{{ .Values.preflight.image.digest }}{{ end }}"
+          imagePullPolicy: {{ .Values.preflight.image.pullPolicy }}
+          command: ["/bin/sh"]
+          args:
+          - -c
+          - "touch /tmp/ready; sleep 1h"
+          livenessProbe:
+            exec:
+              command:
+              - cat
+              - /tmp/ready
+            initialDelaySeconds: 5
+            periodSeconds: 5
+          readinessProbe:
+            exec:
+              command:
+              - cat
+              - /tmp/ready
+            initialDelaySeconds: 5
+            periodSeconds: 5
+          volumeMounts:
+          - mountPath: /var/run/cilium
+            name: cilium-run
+{{- if .Values.etcd.enabled }}
+          - mountPath: /var/lib/etcd-config
+            name: etcd-config-path
+            readOnly: true
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+          - mountPath: /var/lib/etcd-secrets
+            name: etcd-secrets
+            readOnly: true
+{{- end }}
+{{- end }}
+
+{{- if ne .Values.preflight.tofqdnsPreCache "" }}
+        - name: cilium-pre-flight-fqdn-precache
+          image: "{{ .Values.preflight.image.repository }}:{{ .Values.preflight.image.tag }}{{ if .Values.preflight.image.useDigest }}@{{ .Values.preflight.image.digest }}{{ end }}"
+          imagePullPolicy: {{ .Values.preflight.image.pullPolicy }}
+          name: cilium-pre-flight-fqdn-precache
+          command: ["/bin/sh"]
+          args:
+          - -c
+          - "cilium preflight fqdn-poller --tofqdns-pre-cache {{ .Values.preflight.tofqdnsPreCache }} && touch /tmp/ready-tofqdns-precache"
+          livenessProbe:
+            exec:
+              command:
+              - cat
+              - /tmp/read-tofqdns-precachey
+            initialDelaySeconds: 5
+            periodSeconds: 5
+          readinessProbe:
+            exec:
+              command:
+              - cat
+              - /tmp/read-tofqdns-precachey
+            initialDelaySeconds: 5
+            periodSeconds: 5
+          env:
+{{- if .Values.k8sServiceHost }}
+          - name: KUBERNETES_SERVICE_HOST
+            value: {{ .Values.k8sServiceHost | quote }}
+{{- end }}
+{{- if .Values.k8sServicePort }}
+          - name: KUBERNETES_SERVICE_PORT
+            value: {{ .Values.k8sServicePort | quote }}
+{{- end }}
+          volumeMounts:
+          - mountPath: /var/run/cilium
+            name: cilium-run
+{{- if .Values.etcd.enabled }}
+          - mountPath: /var/lib/etcd-config
+            name: etcd-config-path
+            readOnly: true
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+          - mountPath: /var/lib/etcd-secrets
+            name: etcd-secrets
+            readOnly: true
+{{- end }}
+{{- end }}
+{{- end }}
+      hostNetwork: true
+      # This is here to seamlessly allow migrate-identity to work with
+      # etcd-operator setups. The assumption is that other cases would also
+      # work since the cluster DNS would forward the request on.
+      # This differs from the cilium-agent daemonset, where this is only
+      # enabled when etcd.managed=true
+      dnsPolicy: ClusterFirstWithHostNet
+      restartPolicy: Always
+      serviceAccount: {{ .Values.serviceAccounts.preflight.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.preflight.name | quote }}
+      terminationGracePeriodSeconds: 1
+{{- with .Values.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 6 }}
+{{- end }}
+      volumes:
+        # To keep state between restarts / upgrades
+      - hostPath:
+          path: /var/run/cilium
+          type: DirectoryOrCreate
+        name: cilium-run
+      - hostPath:
+          path: /sys/fs/bpf
+          type: DirectoryOrCreate
+        name: bpf-maps
+{{- if .Values.etcd.enabled }}
+        # To read the etcd config stored in config maps
+      - configMap:
+          defaultMode: 420
+          items:
+          - key: etcd-config
+            path: etcd.config
+          name: cilium-config
+        name: etcd-config-path
+        # To read the k8s etcd secrets in case the user might want to use TLS
+{{- if or .Values.etcd.ssl .Values.etcd.managed }}
+      - name: etcd-secrets
+        secret:
+          defaultMode: 420
+          optional: true
+          secretName: cilium-etcd-secrets
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-preflight-deployment.yaml b/charts/cilium/templates/cilium-preflight-deployment.yaml
new file mode 100644
index 0000000..461a1a6
--- /dev/null
+++ b/charts/cilium/templates/cilium-preflight-deployment.yaml
@@ -0,0 +1,89 @@
+{{- if .Values.preflight.enabled }}
+{{- if .Values.preflight.validateCNPs }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cilium-pre-flight-check
+  namespace: {{ .Release.Namespace }}
+spec:
+  selector:
+    matchLabels:
+      k8s-app: cilium-pre-flight-check-deployment
+      kubernetes.io/cluster-service: "true"
+  template:
+    metadata:
+{{- with .Values.preflight.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        k8s-app: cilium-pre-flight-check-deployment
+        kubernetes.io/cluster-service: "true"
+{{- with .Values.preflight.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+      affinity:
+        podAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchExpressions:
+              - key: "k8s-app"
+                operator: In
+                values:
+                - cilium
+            topologyKey: "kubernetes.io/hostname"
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{ toYaml .Values.imagePullSecrets | indent 8 }}
+{{- end }}
+      containers:
+{{- if .Values.preflight.validateCNPs }}
+        - name: cnp-validator
+          image: "{{ .Values.preflight.image.repository }}:{{ .Values.preflight.image.tag }}{{ if .Values.preflight.image.useDigest }}@{{ .Values.preflight.image.digest }}{{ end }}"
+          imagePullPolicy: {{ .Values.preflight.image.pullPolicy }}
+          command: ["/bin/sh"]
+          args:
+          - -c
+          - "cilium preflight validate-cnp && touch /tmp/ready-validate-cnp && sleep 1h"
+          livenessProbe:
+            exec:
+              command:
+              - cat
+              - /tmp/ready-validate-cnp
+            initialDelaySeconds: 5
+            periodSeconds: 5
+          readinessProbe:
+            exec:
+              command:
+              - cat
+              - /tmp/ready-validate-cnp
+            initialDelaySeconds: 5
+            periodSeconds: 5
+{{- if not ( and ( empty ( .Values.k8sServiceHost ))  ( empty ( .Values.k8sServicePort ))) }}
+          env:
+{{- if .Values.k8sServiceHost }}
+          - name: KUBERNETES_SERVICE_HOST
+            value: {{ .Values.k8sServiceHost | quote }}
+{{- end }}
+{{- if .Values.k8sServicePort }}
+          - name: KUBERNETES_SERVICE_PORT
+            value: {{ .Values.k8sServicePort | quote }}
+{{- end }}
+{{- end }}
+{{- end }}
+      hostNetwork: true
+      restartPolicy: Always
+      serviceAccount: {{ .Values.serviceAccounts.preflight.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.preflight.name | quote }}
+      terminationGracePeriodSeconds: 1
+{{- with .Values.preflight.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+{{- with .Values.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 6 }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-preflight-serviceaccount.yaml b/charts/cilium/templates/cilium-preflight-serviceaccount.yaml
new file mode 100644
index 0000000..0d62e00
--- /dev/null
+++ b/charts/cilium/templates/cilium-preflight-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.preflight.enabled .Values.serviceAccounts.preflight.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.preflight.name | quote }}
+  namespace: {{ .Release.Namespace }}
+  {{- if .Values.serviceAccounts.preflight.annotations }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.preflight.annotations | indent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/cilium-resource-quota.yaml b/charts/cilium/templates/cilium-resource-quota.yaml
new file mode 100644
index 0000000..a985696
--- /dev/null
+++ b/charts/cilium/templates/cilium-resource-quota.yaml
@@ -0,0 +1,35 @@
+{{- if or .Values.resourceQuotas.enabled (and (ne .Release.Namespace "kube-system") .Values.gke.enabled) }}
+{{- if .Values.agent }}
+apiVersion: v1
+kind: ResourceQuota
+metadata:
+  name: cilium-resource-quota
+  namespace: {{ .Release.Namespace }}
+spec:
+  hard:
+    pods: {{ .Values.resourceQuotas.cilium.hard.pods | quote }}
+  scopeSelector:
+    matchExpressions:
+    - operator: In
+      scopeName: PriorityClass
+      values:
+      - system-node-critical
+{{- end }}
+{{- if .Values.operator.enabled }}
+---
+apiVersion: v1
+kind: ResourceQuota
+metadata:
+  name: cilium-operator-resource-quota
+  namespace: {{ .Release.Namespace }}
+spec:
+  hard:
+    pods: {{ .Values.resourceQuotas.operator.hard.pods | quote }}
+  scopeSelector:
+    matchExpressions:
+    - operator: In
+      scopeName: PriorityClass
+      values:
+      - system-cluster-critical
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-admin-secret.yaml b/charts/cilium/templates/clustermesh-apiserver-admin-secret.yaml
new file mode 100644
index 0000000..4b02306
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-admin-secret.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.agent (not .Values.preflight.enabled) (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) }}
+{{- $adminCertsProvided := and .Values.clustermesh.apiserver.tls.ca.cert .Values.clustermesh.apiserver.tls.admin.cert .Values.clustermesh.apiserver.tls.admin.key }}
+{{- $apiserverCertsGenerate := and .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm") -}}
+{{- if or $adminCertsProvided $apiserverCertsGenerate }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: clustermesh-apiserver-admin-cert
+  namespace: {{ .Release.Namespace }}
+type: kubernetes.io/tls
+data:
+{{- if $apiserverCertsGenerate }}
+{{ include "clustermesh.apiserver.admin.gen-cert" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.clustermesh.apiserver.tls.ca.cert }}
+  tls.crt: {{ .Values.clustermesh.apiserver.tls.admin.cert }}
+  tls.key: {{ .Values.clustermesh.apiserver.tls.admin.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-ca-secret.yaml b/charts/cilium/templates/clustermesh-apiserver-ca-secret.yaml
new file mode 100644
index 0000000..129f8eb
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-ca-secret.yaml
@@ -0,0 +1,17 @@
+{{- if and  .Values.agent (not .Values.preflight.enabled) (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) }}
+{{- if or (and .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm")) (and .Values.clustermesh.apiserver.tls.ca.cert .Values.clustermesh.apiserver.tls.ca.key) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: clustermesh-apiserver-ca-cert
+  namespace: {{ .Release.Namespace }}
+data:
+{{- if and .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm") }}
+{{ include "clustermesh.apiserver.ca.gen-cert" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.clustermesh.apiserver.tls.ca.cert }}
+  ca.key: {{ .Values.clustermesh.apiserver.tls.ca.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-client-secret.yaml b/charts/cilium/templates/clustermesh-apiserver-client-secret.yaml
new file mode 100644
index 0000000..d155a86
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-client-secret.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.agent (not .Values.preflight.enabled) .Values.externalWorkloads.enabled }}
+{{- $clientCertsProvided := and .Values.clustermesh.apiserver.tls.ca.cert .Values.clustermesh.apiserver.tls.client.cert .Values.clustermesh.apiserver.tls.client.key }}
+{{- $apiserverCertsGenerate := and .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm") -}}
+{{- if or $clientCertsProvided $apiserverCertsGenerate }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: clustermesh-apiserver-client-cert
+  namespace: {{ .Release.Namespace }}
+type: kubernetes.io/tls
+data:
+{{- if $apiserverCertsGenerate }}
+{{ include "clustermesh.apiserver.client.gen-cert" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.clustermesh.apiserver.tls.ca.cert }}
+  tls.crt: {{ .Values.clustermesh.apiserver.tls.client.cert }}
+  tls.key: {{ .Values.clustermesh.apiserver.tls.client.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-clusterrole.yaml b/charts/cilium/templates/clustermesh-apiserver-clusterrole.yaml
new file mode 100644
index 0000000..74bd961
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-clusterrole.yaml
@@ -0,0 +1,44 @@
+{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.serviceAccounts.clustermeshApiserver.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: clustermesh-apiserver
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - endpoints
+  - namespaces
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - apiextensions.k8s.io
+  resources:
+  - customresourcedefinitions
+  verbs:
+  - list
+- apiGroups:
+  - cilium.io
+  resources:
+  - ciliumnodes
+  - ciliumnodes/status
+  - ciliumexternalworkloads
+  - ciliumexternalworkloads/status
+  - ciliumidentities
+  - ciliumidentities/status
+  - ciliumendpoints
+  - ciliumendpoints/status
+  verbs:
+  - '*'
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-clusterrolebinding.yaml b/charts/cilium/templates/clustermesh-apiserver-clusterrolebinding.yaml
new file mode 100644
index 0000000..a69bb6d
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) (.Values.serviceAccounts.clustermeshApiserver.create) }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: clustermesh-apiserver
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: clustermesh-apiserver
+subjects:
+- kind: ServiceAccount
+  name: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }}
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-deployment.yaml b/charts/cilium/templates/clustermesh-apiserver-deployment.yaml
new file mode 100644
index 0000000..dfe50ac
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-deployment.yaml
@@ -0,0 +1,169 @@
+{{- if (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: clustermesh-apiserver
+  labels:
+    k8s-app: clustermesh-apiserver
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: {{ .Values.clustermesh.apiserver.replicas }}
+  selector:
+    matchLabels:
+      k8s-app: clustermesh-apiserver
+{{- with .Values.clustermesh.apiserver.updateStrategy }}
+  strategy: {{- toYaml . | nindent 4 }}
+{{- end }}
+  template:
+    metadata:
+      annotations:
+{{- with .Values.clustermesh.apiserver.podAnnotations }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        k8s-app: clustermesh-apiserver
+{{- with .Values.clustermesh.apiserver.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+{{- with .Values.imagePullSecrets }}
+      imagePullSecrets: {{- toYaml . | nindent 8 }}
+{{- end }}
+      restartPolicy: Always
+      serviceAccount: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }}
+      initContainers:
+      - name: etcd-init
+        image: {{ .Values.clustermesh.apiserver.etcd.image.repository }}:{{ .Values.clustermesh.apiserver.etcd.image.tag }}
+        imagePullPolicy: {{ .Values.clustermesh.apiserver.etcd.image.pullPolicy }}
+        env:
+        - name: ETCDCTL_API
+          value: "3"
+        - name: HOSTNAME_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        command: ["/bin/sh", "-c"]
+        args:
+        - >
+          rm -rf /var/run/etcd/*;
+          export ETCDCTL_API=3;
+          /usr/local/bin/etcd --data-dir=/var/run/etcd --name=clustermesh-apiserver --listen-client-urls=http://127.0.0.1:2379 --advertise-client-urls=http://127.0.0.1:2379 --initial-cluster-token=clustermesh-apiserver --initial-cluster-state=new --auto-compaction-retention=1 &
+          export rootpw=`head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16`;
+          echo $rootpw | etcdctl --interactive=false user add root;
+          etcdctl user grant-role root root;
+          export vmpw=`head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16`;
+          echo $vmpw | etcdctl --interactive=false user add externalworkload;
+          etcdctl role add externalworkload;
+          etcdctl role grant-permission externalworkload --from-key read '';
+          etcdctl role grant-permission externalworkload readwrite --prefix cilium/state/noderegister/v1/;
+          etcdctl role grant-permission externalworkload readwrite --prefix cilium/.initlock/;
+          etcdctl user grant-role externalworkload externalworkload;
+          export remotepw=`head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16`;
+          echo $remotepw | etcdctl --interactive=false user add remote;
+          etcdctl role add remote;
+          etcdctl role grant-permission remote --from-key read '';
+          etcdctl user grant-role remote remote;
+          etcdctl auth enable;
+          exit
+        volumeMounts:
+        - mountPath: /var/run/etcd
+          name: etcd-data-dir
+      containers:
+      - name: etcd
+        image: {{ .Values.clustermesh.apiserver.etcd.image.repository }}:{{ .Values.clustermesh.apiserver.etcd.image.tag }}
+        imagePullPolicy: {{ .Values.clustermesh.apiserver.etcd.image.pullPolicy }}
+        env:
+        - name: ETCDCTL_API
+          value: "3"
+        - name: HOSTNAME_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        command:
+          - /usr/local/bin/etcd
+        args:
+          - --data-dir=/var/run/etcd
+          - --name=clustermesh-apiserver
+          - --client-cert-auth
+          - --trusted-ca-file=/var/lib/etcd-secrets/ca.crt
+          - --cert-file=/var/lib/etcd-secrets/tls.crt
+          - --key-file=/var/lib/etcd-secrets/tls.key
+          - --listen-client-urls=https://127.0.0.1:2379,https://$(HOSTNAME_IP):2379
+          - --advertise-client-urls=https://$(HOSTNAME_IP):2379
+          - --initial-cluster-token=clustermesh-apiserver
+          - --auto-compaction-retention=1
+        volumeMounts:
+        - mountPath: /var/lib/etcd-secrets
+          name: etcd-server-secrets
+          readOnly: true
+        - mountPath: /var/run/etcd
+          name: etcd-data-dir
+      - name: "apiserver"
+        image: "{{ .Values.clustermesh.apiserver.image.repository }}:{{ .Values.clustermesh.apiserver.image.tag }}{{ if .Values.clustermesh.apiserver.image.useDigest }}@{{ .Values.clustermesh.apiserver.image.digest }}{{ end }}"
+        imagePullPolicy: {{ .Values.clustermesh.apiserver.image.pullPolicy }}
+        command:
+          - /usr/bin/clustermesh-apiserver
+        args:
+{{- if .Values.debug.enabled }}
+          - --debug
+{{- end }}
+          - --cluster-name=$(CLUSTER_NAME)
+          - --kvstore-opt
+          - etcd.config=/var/lib/cilium/etcd-config.yaml
+        env:
+        - name: CLUSTER_NAME
+          valueFrom:
+            configMapKeyRef:
+              key: cluster-name
+              name: cilium-config
+        - name: CLUSTER_ID
+          valueFrom:
+            configMapKeyRef:
+              key: cluster-id
+              name: cilium-config
+              optional: true
+        - name: IDENTITY_ALLOCATION_MODE
+          valueFrom:
+            configMapKeyRef:
+              key: identity-allocation-mode
+              name: cilium-config
+{{- with .Values.clustermesh.apiserver.resources }}
+        resources: {{- toYaml . | nindent 10 }}
+{{- end }}
+        volumeMounts:
+        - mountPath: /var/lib/cilium/etcd-secrets
+          name: etcd-admin-client
+          readOnly: true
+      volumes:
+      - name: etcd-server-secrets
+        projected:
+          defaultMode: 0420
+          sources:
+          - secret:
+              name: clustermesh-apiserver-ca-cert
+              items:
+              - key: ca.crt
+                path: ca.crt
+          - secret:
+              name: clustermesh-apiserver-server-cert
+      - name: etcd-admin-client
+        projected:
+          defaultMode: 0420
+          sources:
+          - secret:
+              name: clustermesh-apiserver-ca-cert
+              items:
+              - key: ca.crt
+                path: ca.crt
+          - secret:
+              name: clustermesh-apiserver-admin-cert
+      - name: etcd-data-dir
+        emptyDir: {}
+{{- with .Values.clustermesh.apiserver.nodeSelector }}
+      nodeSelector: {{- toYaml . | nindent 8 }}
+{{- end }}
+{{- with .Values.clustermesh.apiserver.tolerations }}
+      tolerations: {{- toYaml . | nindent 8 }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-generate-certs-cronjob.yaml b/charts/cilium/templates/clustermesh-apiserver-generate-certs-cronjob.yaml
new file mode 100644
index 0000000..a1962c3
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-generate-certs-cronjob.yaml
@@ -0,0 +1,14 @@
+{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "cronJob") .Values.clustermesh.apiserver.tls.auto.schedule }}
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+  name: clustermesh-apiserver-generate-certs
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: clustermesh-apiserver-generate-certs
+spec:
+  schedule: {{ .Values.clustermesh.apiserver.tls.auto.schedule | quote }}
+  concurrencyPolicy: Forbid
+  jobTemplate:
+{{- include "clustermesh-apiserver-generate-certs.job.spec" . | nindent 4 }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-generate-certs-job.yaml b/charts/cilium/templates/clustermesh-apiserver-generate-certs-job.yaml
new file mode 100644
index 0000000..c0e4f03
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-generate-certs-job.yaml
@@ -0,0 +1,10 @@
+{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) (eq .Values.clustermesh.apiserver.tls.auto.method "cronJob") }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: clustermesh-apiserver-generate-certs
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: clustermesh-apiserver-generate-certs
+{{ include "clustermesh-apiserver-generate-certs.job.spec" . }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-generate-certs-role.yaml b/charts/cilium/templates/clustermesh-apiserver-generate-certs-role.yaml
new file mode 100644
index 0000000..45e59c8
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-generate-certs-role.yaml
@@ -0,0 +1,34 @@
+{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "cronJob") .Values.serviceAccounts.clustermeshcertgen.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: clustermesh-apiserver-generate-certs
+  namespace: {{ .Release.Namespace }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - create
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    resourceNames:
+      - clustermesh-apiserver-ca-cert
+    verbs:
+      - get
+      - update
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    resourceNames:
+      - clustermesh-apiserver-server-cert
+      - clustermesh-apiserver-admin-cert
+      - clustermesh-apiserver-remote-cert
+      - clustermesh-apiserver-client-cert
+    verbs:
+      - update
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-generate-certs-rolebinding.yaml b/charts/cilium/templates/clustermesh-apiserver-generate-certs-rolebinding.yaml
new file mode 100644
index 0000000..d52e9c0
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-generate-certs-rolebinding.yaml
@@ -0,0 +1,15 @@
+{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "cronJob") .Values.serviceAccounts.clustermeshcertgen.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: clustermesh-apiserver-generate-certs
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: clustermesh-apiserver-generate-certs
+subjects:
+- kind: ServiceAccount
+  name: {{ .Values.serviceAccounts.clustermeshcertgen.name | quote }} 
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-generate-certs-serviceaccount.yaml b/charts/cilium/templates/clustermesh-apiserver-generate-certs-serviceaccount.yaml
new file mode 100644
index 0000000..a7583fa
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-generate-certs-serviceaccount.yaml
@@ -0,0 +1,10 @@
+{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "cronJob") .Values.serviceAccounts.clustermeshcertgen.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.clustermeshcertgen.name | quote }} 
+  namespace: {{ .Release.Namespace }}
+{{- with .Values.serviceAccounts.clustermeshcertgen.annotations }}
+  annotations: {{- toYaml . | nindent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-remote-secret.yaml b/charts/cilium/templates/clustermesh-apiserver-remote-secret.yaml
new file mode 100644
index 0000000..0545990
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-remote-secret.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.agent (not .Values.preflight.enabled) .Values.clustermesh.useAPIServer }}
+{{- $remoteCertsProvided := and .Values.clustermesh.apiserver.tls.ca.cert .Values.clustermesh.apiserver.tls.remote.cert .Values.clustermesh.apiserver.tls.remote.key }}
+{{- $apiserverCertsGenerate := and .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm") -}}
+{{- if or $remoteCertsProvided $apiserverCertsGenerate }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: clustermesh-apiserver-remote-cert
+  namespace: {{ .Release.Namespace }}
+type: kubernetes.io/tls
+data:
+{{- if $apiserverCertsGenerate }}
+{{ include "clustermesh.apiserver.remote.gen-cert" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.clustermesh.apiserver.tls.ca.cert }}
+  tls.crt: {{ .Values.clustermesh.apiserver.tls.remote.cert }}
+  tls.key: {{ .Values.clustermesh.apiserver.tls.remote.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-server-secret.yaml b/charts/cilium/templates/clustermesh-apiserver-server-secret.yaml
new file mode 100644
index 0000000..4654e83
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-server-secret.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.agent (not .Values.preflight.enabled) (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) }}
+{{- $serverCertsProvided := and .Values.clustermesh.apiserver.tls.ca.cert .Values.clustermesh.apiserver.tls.server.cert .Values.clustermesh.apiserver.tls.server.key }}
+{{- $apiserverCertsGenerate := and .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm") -}}
+{{- if or $serverCertsProvided $apiserverCertsGenerate }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: clustermesh-apiserver-server-cert
+  namespace: {{ .Release.Namespace }}
+type: kubernetes.io/tls
+data:
+{{- if $apiserverCertsGenerate }}
+{{ include "clustermesh.apiserver.server.gen-cert" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.clustermesh.apiserver.tls.ca.cert }}
+  tls.crt: {{ .Values.clustermesh.apiserver.tls.server.cert }}
+  tls.key: {{ .Values.clustermesh.apiserver.tls.server.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-service.yaml b/charts/cilium/templates/clustermesh-apiserver-service.yaml
new file mode 100644
index 0000000..16480bd
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-service.yaml
@@ -0,0 +1,24 @@
+{{- if (or .Values.externalWorkloads.enabled  .Values.clustermesh.useAPIServer) }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: "clustermesh-apiserver"
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: clustermesh-apiserver
+{{- with .Values.clustermesh.apiserver.service.annotations }}
+  annotations: {{- toYaml . | nindent 4 }}
+{{- end }}
+spec:
+  type: {{ .Values.clustermesh.apiserver.service.type }}
+  selector:
+    k8s-app: clustermesh-apiserver
+  ports:
+  - port: 2379
+{{- if and (eq "NodePort" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.nodePort }}
+    nodePort: {{ .Values.clustermesh.apiserver.service.nodePort }}
+{{- end }}
+{{- if and (eq "LoadBalancer" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.loadBalancerIP }}
+  loadBalancerIP: {{ .Values.clustermesh.apiserver.service.loadBalancerIP }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/clustermesh-apiserver-serviceaccount.yaml b/charts/cilium/templates/clustermesh-apiserver-serviceaccount.yaml
new file mode 100644
index 0000000..715d5c2
--- /dev/null
+++ b/charts/cilium/templates/clustermesh-apiserver-serviceaccount.yaml
@@ -0,0 +1,10 @@
+{{- if and (or .Values.externalWorkloads.enabled  .Values.clustermesh.useAPIServer) .Values.serviceAccounts.clustermeshApiserver.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }}
+  namespace: {{ .Release.Namespace }}
+{{- with .Values.serviceAccounts.clustermeshApiserver.annotations }}
+  annotations: {{- toYaml . | nindent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/etcd-operator-clusterrole.yaml b/charts/cilium/templates/etcd-operator-clusterrole.yaml
new file mode 100644
index 0000000..5a87497
--- /dev/null
+++ b/charts/cilium/templates/etcd-operator-clusterrole.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.etcd.managed }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: etcd-operator
+rules:
+- apiGroups:
+  - etcd.database.coreos.com
+  resources:
+  - etcdclusters
+  - etcdbackups
+  - etcdrestores
+  verbs:
+  - '*'
+- apiGroups:
+  - apiextensions.k8s.io
+  resources:
+  - customresourcedefinitions
+  verbs:
+  - '*'
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  - endpoints
+  - persistentvolumeclaims
+  - events
+  - deployments
+  verbs:
+  - '*'
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  verbs:
+  - '*'
+- apiGroups:
+  - extensions
+  resources:
+  - deployments
+  verbs:
+  - create
+  - get
+  - list
+  - patch
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+{{- end }}
diff --git a/charts/cilium/templates/etcd-operator-clusterrolebinding.yaml b/charts/cilium/templates/etcd-operator-clusterrolebinding.yaml
new file mode 100644
index 0000000..f2f36e2
--- /dev/null
+++ b/charts/cilium/templates/etcd-operator-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.etcd.managed }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: etcd-operator
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: etcd-operator
+subjects:
+- kind: ServiceAccount
+  name: cilium-etcd-sa
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/etcd-operator-serviceaccount.yaml b/charts/cilium/templates/etcd-operator-serviceaccount.yaml
new file mode 100644
index 0000000..278d984
--- /dev/null
+++ b/charts/cilium/templates/etcd-operator-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.etcd.managed }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: cilium-etcd-sa
+  namespace: {{ .Release.Namespace }}
+  {{- if .Values.serviceAccounts.etcd.annotations }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.etcd.annotations | indent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ca-configmap.yaml b/charts/cilium/templates/hubble-ca-configmap.yaml
new file mode 100644
index 0000000..50a05f2
--- /dev/null
+++ b/charts/cilium/templates/hubble-ca-configmap.yaml
@@ -0,0 +1,21 @@
+{{- if and (not .Values.preflight.enabled) .Values.agent .Values.hubble.enabled .Values.hubble.tls.enabled }}
+{{- $hubbleCAProvided := .Values.hubble.tls.ca.cert }}
+{{- $hubbleCAGenerate := and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") .Values.hubble.relay.enabled -}}
+{{- if or $hubbleCAProvided $hubbleCAGenerate }}
+# NOTE: the hubble-ca-cert ConfigMap is deprecated and will be removed in v1.11
+# The Hubble CA certificate can be found in both the hubble-server-certs and
+# hubble-relay-client-certs Secrets under the ca.crt key.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: hubble-ca-cert
+  namespace: {{ .Release.Namespace }}
+data:
+{{- if $hubbleCAGenerate }}
+{{ include "hubble.ca.gen-cert-only" . | indent 2 }}
+{{- else }}
+  ca.crt: |-
+{{ .Values.hubble.tls.ca.cert | b64dec | indent 4 -}}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-generate-certs-ca-secret.yaml b/charts/cilium/templates/hubble-generate-certs-ca-secret.yaml
new file mode 100644
index 0000000..de579be
--- /dev/null
+++ b/charts/cilium/templates/hubble-generate-certs-ca-secret.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.hubble.tls.ca.cert .Values.hubble.tls.ca.key }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: hubble-ca-secret
+  namespace: {{ .Release.Namespace }}
+data:
+  ca.crt: {{ .Values.hubble.tls.ca.cert }}
+  ca.key: {{ .Values.hubble.tls.ca.key }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-generate-certs-clusterrole.yaml b/charts/cilium/templates/hubble-generate-certs-clusterrole.yaml
new file mode 100644
index 0000000..fcd2530
--- /dev/null
+++ b/charts/cilium/templates/hubble-generate-certs-clusterrole.yaml
@@ -0,0 +1,41 @@
+{{- if and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.serviceAccounts.hubblecertgen.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: hubble-generate-certs
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+      - configmaps
+    verbs:
+      - create
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    resourceNames:
+      - hubble-server-certs
+      - hubble-relay-client-certs
+      - hubble-relay-server-certs
+    verbs:
+      - update
+  - apiGroups:
+      - ""
+    resources:
+      - configmaps
+    resourceNames:
+      - hubble-ca-cert
+    verbs:
+      - update
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    resourceNames:
+      - hubble-ca-secret
+    verbs:
+      - get
+      - update
+{{- end }}
diff --git a/charts/cilium/templates/hubble-generate-certs-clusterrolebinding.yaml b/charts/cilium/templates/hubble-generate-certs-clusterrolebinding.yaml
new file mode 100644
index 0000000..39e57c9
--- /dev/null
+++ b/charts/cilium/templates/hubble-generate-certs-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.serviceAccounts.hubblecertgen.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: hubble-generate-certs
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: hubble-generate-certs
+subjects:
+- kind: ServiceAccount
+  name: {{ .Values.serviceAccounts.hubblecertgen.name | quote }}
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-generate-certs-cronjob.yaml b/charts/cilium/templates/hubble-generate-certs-cronjob.yaml
new file mode 100644
index 0000000..d0c8385
--- /dev/null
+++ b/charts/cilium/templates/hubble-generate-certs-cronjob.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.hubble.tls.auto.schedule }}
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+  name: hubble-generate-certs
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: hubble-generate-certs
+spec:
+  schedule: {{ .Values.hubble.tls.auto.schedule | quote }}
+  concurrencyPolicy: Forbid
+  jobTemplate:
+{{- include "hubble-generate-certs.job.spec" . | nindent 4 }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-generate-certs-job.yaml b/charts/cilium/templates/hubble-generate-certs-job.yaml
new file mode 100644
index 0000000..e6e9874
--- /dev/null
+++ b/charts/cilium/templates/hubble-generate-certs-job.yaml
@@ -0,0 +1,19 @@
+{{- if and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") }}
+{{/*
+Because Kubernetes job specs are immutable, Helm will fail patch this job if
+the spec changes between releases. To avoid breaking the upgrade path, we
+generate a name for the job here which is based on the checksum of the spec.
+This will cause the name of the job to change if its content changes,
+and in turn cause Helm to do delete the old job and replace it with a new one.
+*/}}
+{{- $jobSpec := include "hubble-generate-certs.job.spec" . -}}
+{{- $checkSum := $jobSpec | sha256sum | trunc 10 -}}
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: hubble-generate-certs-{{$checkSum}}
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: hubble-generate-certs
+{{ $jobSpec }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-generate-certs-serviceaccount.yaml b/charts/cilium/templates/hubble-generate-certs-serviceaccount.yaml
new file mode 100644
index 0000000..b9cd557
--- /dev/null
+++ b/charts/cilium/templates/hubble-generate-certs-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.serviceAccounts.hubblecertgen.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.hubblecertgen.name | quote }} 
+  namespace: {{ .Release.Namespace }}
+{{- if .Values.serviceAccounts.hubblecertgen.annotations }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.hubblecertgen.annotations | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-relay-client-tls-secret.yaml b/charts/cilium/templates/hubble-relay-client-tls-secret.yaml
new file mode 100644
index 0000000..2a9a80c
--- /dev/null
+++ b/charts/cilium/templates/hubble-relay-client-tls-secret.yaml
@@ -0,0 +1,20 @@
+{{- if and (.Values.hubble.relay.enabled) (.Values.hubble.tls.enabled) }}
+{{- $clientCertsProvided := and .Values.hubble.tls.ca.cert .Values.hubble.relay.tls.client.cert .Values.hubble.relay.tls.client.key }}
+{{- $hubbleCertsGenerate := and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") -}}
+{{- if or $clientCertsProvided $hubbleCertsGenerate }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: hubble-relay-client-certs
+  namespace: {{ .Release.Namespace }}
+type: kubernetes.io/tls
+data:
+{{- if $hubbleCertsGenerate }}
+{{ include "hubble.relay.gen-certs" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.hubble.tls.ca.cert }}
+  tls.crt: {{ .Values.hubble.relay.tls.client.cert }}
+  tls.key: {{ .Values.hubble.relay.tls.client.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-relay-configmap.yaml b/charts/cilium/templates/hubble-relay-configmap.yaml
new file mode 100644
index 0000000..0d281a0
--- /dev/null
+++ b/charts/cilium/templates/hubble-relay-configmap.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.hubble.relay.enabled }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: hubble-relay-config
+  namespace: {{ .Release.Namespace }}
+data:
+  config.yaml: |
+    peer-service: unix://{{ .Values.hubble.socketPath }}
+    listen-address: {{ .Values.hubble.relay.listenHost }}:{{ .Values.hubble.relay.listenPort }}
+    dial-timeout: {{ .Values.hubble.relay.dialTimeout }}
+    retry-timeout: {{ .Values.hubble.relay.retryTimeout }}
+    sort-buffer-len-max: {{ .Values.hubble.relay.sortBufferLenMax }}
+    sort-buffer-drain-timeout: {{ .Values.hubble.relay.sortBufferDrainTimeout }}
+{{- if .Values.hubble.tls.enabled }}
+    tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt
+    tls-client-key-file: /var/lib/hubble-relay/tls/client.key
+    tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt
+{{- else }}
+    disable-client-tls: true
+{{- end }}
+{{- if .Values.hubble.relay.tls.server.enabled }}
+    tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt
+    tls-server-key-file: /var/lib/hubble-relay/tls/server.key
+{{- else }}
+    disable-server-tls: true
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-relay-deployment.yaml b/charts/cilium/templates/hubble-relay-deployment.yaml
new file mode 100644
index 0000000..e230859
--- /dev/null
+++ b/charts/cilium/templates/hubble-relay-deployment.yaml
@@ -0,0 +1,132 @@
+{{- if .Values.hubble.relay.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: hubble-relay
+  labels:
+    k8s-app: hubble-relay
+  namespace: {{ .Release.Namespace }}
+spec:
+
+  replicas: {{ .Values.hubble.relay.replicas }}
+  selector:
+    matchLabels:
+      k8s-app: hubble-relay
+{{- with .Values.hubble.relay.updateStrategy }}
+  strategy:
+{{ toYaml .  | trim | indent 4 }}
+{{- end }}
+  template:
+    metadata:
+      annotations:
+{{- if .Values.hubble.relay.rollOutPods }}
+        # ensure pods roll when configmap updates
+        cilium.io/hubble-relay-configmap-checksum: {{ include (print $.Template.BasePath "/hubble-relay-configmap.yaml") . | sha256sum | quote }}
+{{- end }}
+{{- with .Values.hubble.relay.podAnnotations }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        k8s-app: hubble-relay
+{{- with .Values.hubble.relay.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+      affinity:
+        podAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchExpressions:
+                - key: "k8s-app"
+                  operator: In
+                  values:
+                    - cilium
+            topologyKey: "kubernetes.io/hostname"
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 8 }}
+{{- end }}
+      containers:
+        - name: hubble-relay
+          image: "{{ .Values.hubble.relay.image.repository }}:{{ .Values.hubble.relay.image.tag }}{{ if .Values.hubble.relay.image.useDigest }}@{{ .Values.hubble.relay.image.digest }}{{ end }}"
+          imagePullPolicy: {{ .Values.hubble.relay.image.pullPolicy }}
+          command:
+            - hubble-relay
+          args:
+            - serve
+{{- if .Values.debug.enabled }}
+            - "--debug"
+{{- end }}
+          ports:
+            - name: grpc
+              containerPort: {{ .Values.hubble.relay.listenPort }}
+          readinessProbe:
+            tcpSocket:
+              port: grpc
+          livenessProbe:
+            tcpSocket:
+              port: grpc
+{{- with .Values.hubble.relay.resources }}
+          resources:
+            {{- toYaml . | trim | nindent 12 }}
+{{- end }}
+          volumeMounts:
+          - mountPath: {{ dir .Values.hubble.socketPath }}
+            name: hubble-sock-dir
+            readOnly: true
+          - mountPath: /etc/hubble-relay
+            name: config
+            readOnly: true
+{{- if .Values.hubble.tls.enabled }}
+          - mountPath: /var/lib/hubble-relay/tls
+            name: tls
+            readOnly: true
+{{- end }}
+      restartPolicy: Always
+      serviceAccount: {{ .Values.serviceAccounts.relay.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.relay.name | quote }}
+      automountServiceAccountToken: false
+      terminationGracePeriodSeconds: 0
+{{- with .Values.hubble.relay.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+{{- with .Values.hubble.relay.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+      volumes:
+      - configMap:
+          name: hubble-relay-config
+          items:
+          - key: config.yaml
+            path: config.yaml
+        name: config
+      - hostPath:
+          path: {{ dir .Values.hubble.socketPath }}
+          type: Directory
+        name: hubble-sock-dir
+{{- if .Values.hubble.tls.enabled }}
+      - projected:
+          sources:
+          - secret:
+              name: hubble-relay-client-certs
+              items:
+                - key: ca.crt
+                  path: hubble-server-ca.crt
+                - key: tls.crt
+                  path: client.crt
+                - key: tls.key
+                  path: client.key
+{{- if .Values.hubble.relay.tls.server.enabled }}
+          - secret:
+              name: hubble-relay-server-certs
+              items:
+                - key: tls.crt
+                  path: server.crt
+                - key: tls.key
+                  path: server.key
+{{- end }}
+        name: tls
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-relay-service.yaml b/charts/cilium/templates/hubble-relay-service.yaml
new file mode 100644
index 0000000..d50fa31
--- /dev/null
+++ b/charts/cilium/templates/hubble-relay-service.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.hubble.relay.enabled }}
+kind: Service
+apiVersion: v1
+metadata:
+  name: hubble-relay
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: hubble-relay
+spec:
+  type: ClusterIP
+  selector:
+    k8s-app: hubble-relay
+  ports:
+  - protocol: TCP
+{{- if .Values.hubble.relay.servicePort }}
+    port: {{ .Values.hubble.relay.servicePort }}
+{{- else if .Values.hubble.relay.tls.server.enabled }}
+    port: 443
+{{- else }}
+    port: 80
+{{- end }}
+    targetPort: {{ .Values.hubble.relay.listenPort }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-relay-serviceaccount.yaml b/charts/cilium/templates/hubble-relay-serviceaccount.yaml
new file mode 100644
index 0000000..25ea262
--- /dev/null
+++ b/charts/cilium/templates/hubble-relay-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and (.Values.hubble.relay.enabled) (.Values.serviceAccounts.relay.create) -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.relay.name | quote }}
+  namespace: {{ .Release.Namespace }}
+  {{- with .Values.serviceAccounts.relay.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-relay-tls-server-secret.yaml b/charts/cilium/templates/hubble-relay-tls-server-secret.yaml
new file mode 100644
index 0000000..e07d00a
--- /dev/null
+++ b/charts/cilium/templates/hubble-relay-tls-server-secret.yaml
@@ -0,0 +1,20 @@
+{{- if and (.Values.hubble.relay.enabled) (.Values.hubble.relay.tls.server.enabled) }}
+{{- $serverCertsProvided := and .Values.hubble.tls.ca.cert .Values.hubble.relay.tls.server.cert .Values.hubble.relay.tls.server.key -}}
+{{- $hubbleCertsGenerate := and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") -}}
+{{- if or $serverCertsProvided $hubbleCertsGenerate }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: hubble-relay-server-certs
+  namespace: {{ .Release.Namespace }}
+type: kubernetes.io/tls
+data:
+{{- if $hubbleCertsGenerate }}
+{{ include "hubble.relay.gen-certs" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.hubble.tls.ca.cert }}
+  tls.crt: {{ .Values.hubble.relay.tls.server.cert }}
+  tls.key: {{ .Values.hubble.relay.tls.server.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-server-secret.yaml b/charts/cilium/templates/hubble-server-secret.yaml
new file mode 100644
index 0000000..1cdb806
--- /dev/null
+++ b/charts/cilium/templates/hubble-server-secret.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.agent (not .Values.preflight.enabled) .Values.hubble.enabled .Values.hubble.tls.enabled }}
+{{- $hubbleCertsProvided := and .Values.hubble.tls.ca.cert .Values.hubble.tls.server.cert .Values.hubble.tls.server.key }}
+{{- $hubbleCertsGenerate := and .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") .Values.hubble.relay.enabled -}}
+{{- if or $hubbleCertsProvided $hubbleCertsGenerate }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: hubble-server-certs
+  namespace: {{ .Release.Namespace }}
+type: kubernetes.io/tls
+data:
+{{- if $hubbleCertsGenerate }}
+{{ include "hubble.server.gen-certs" . | indent 2 }}
+{{- else }}
+  ca.crt: {{ .Values.hubble.tls.ca.cert }}
+  tls.crt: {{ .Values.hubble.tls.server.cert }}
+  tls.key: {{ .Values.hubble.tls.server.key }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ui-clusterrole.yaml b/charts/cilium/templates/hubble-ui-clusterrole.yaml
new file mode 100644
index 0000000..fc4aa2f
--- /dev/null
+++ b/charts/cilium/templates/hubble-ui-clusterrole.yaml
@@ -0,0 +1,44 @@
+{{- if and (.Values.hubble.ui.enabled) (.Values.serviceAccounts.ui.create) }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: hubble-ui
+rules:
+  - apiGroups:
+      - networking.k8s.io
+    resources:
+      - networkpolicies
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - ""
+    resources:
+      - componentstatuses
+      - endpoints
+      - namespaces
+      - nodes
+      - pods
+      - services
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - apiextensions.k8s.io
+    resources:
+      - customresourcedefinitions
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - cilium.io
+    resources:
+      - "*"
+    verbs:
+      - get
+      - list
+      - watch
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ui-clusterrolebinding.yaml b/charts/cilium/templates/hubble-ui-clusterrolebinding.yaml
new file mode 100644
index 0000000..455a83d
--- /dev/null
+++ b/charts/cilium/templates/hubble-ui-clusterrolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if and (.Values.hubble.ui.enabled) (.Values.serviceAccounts.ui.create) }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: hubble-ui
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: hubble-ui
+subjects:
+- kind: ServiceAccount
+  namespace: {{ .Release.Namespace }}
+  name: {{ .Values.serviceAccounts.ui.name | quote }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ui-configmap.yaml b/charts/cilium/templates/hubble-ui-configmap.yaml
new file mode 100644
index 0000000..3739516
--- /dev/null
+++ b/charts/cilium/templates/hubble-ui-configmap.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.hubble.ui.enabled }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: hubble-ui-envoy
+  namespace: {{ .Release.Namespace }}
+data:
+{{ (.Files.Glob "files/envoy/*").AsConfig | indent 2 }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ui-deployment.yaml b/charts/cilium/templates/hubble-ui-deployment.yaml
new file mode 100644
index 0000000..967af00
--- /dev/null
+++ b/charts/cilium/templates/hubble-ui-deployment.yaml
@@ -0,0 +1,94 @@
+{{- if .Values.hubble.ui.enabled }}
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: hubble-ui
+  name: hubble-ui
+spec:
+  replicas: {{ .Values.hubble.ui.replicas }}
+  selector:
+    matchLabels:
+      k8s-app: hubble-ui
+  template:
+    metadata:
+      annotations:
+{{- if .Values.hubble.ui.rollOutPods }}
+        # ensure pods roll when configmap updates
+        cilium.io/hubble-ui-envoy-configmap-checksum: {{ include (print $.Template.BasePath "/hubble-ui-configmap.yaml") . | sha256sum | quote }}
+{{- end }}
+{{- with .Values.hubble.ui.podAnnotations }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+      labels:
+        k8s-app: hubble-ui
+{{- with .Values.hubble.ui.podLabels }}
+        {{- toYaml . | nindent 8 }}
+{{- end }}
+    spec:
+      {{- if .Values.hubble.ui.securityContext.enabled }}
+      securityContext:
+        runAsUser: 1001
+      {{- end }}
+      serviceAccount: {{ .Values.serviceAccounts.ui.name | quote }}
+      serviceAccountName: {{ .Values.serviceAccounts.ui.name | quote }}
+{{- with .Values.hubble.ui.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | trim | nindent 8 }}
+{{- end }}
+{{- with .Values.hubble.ui.tolerations }}
+      tolerations:
+      {{- toYaml . | trim | nindent 6 }}
+{{- end }}
+{{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 6 }}
+{{- end }}
+      containers:
+        - name: frontend
+          image: "{{ .Values.hubble.ui.frontend.image.repository }}:{{ .Values.hubble.ui.frontend.image.tag }}"
+          imagePullPolicy: {{ .Values.hubble.ui.frontend.image.pullPolicy }}
+          ports:
+            - containerPort: 8080
+              name: http
+          resources:
+            {{- toYaml .Values.hubble.ui.frontend.resources | trim | nindent 12 }}
+        - name: backend
+          image: "{{ .Values.hubble.ui.backend.image.repository }}:{{ .Values.hubble.ui.backend.image.tag }}"
+          imagePullPolicy: {{ .Values.hubble.ui.backend.image.pullPolicy }}
+          env:
+            - name: EVENTS_SERVER_PORT
+              value: "8090"
+            - name: FLOWS_API_ADDR
+              value: "hubble-relay:80"
+          ports:
+            - containerPort: 8090
+              name: grpc
+          resources:
+            {{- toYaml .Values.hubble.ui.backend.resources  | trim | nindent 12 }}
+        - name: proxy
+          image: "{{ .Values.hubble.ui.proxy.image.repository }}:{{ .Values.hubble.ui.proxy.image.tag }}"
+          imagePullPolicy: {{ .Values.hubble.ui.proxy.image.pullPolicy }}
+          ports:
+            - containerPort: 8081
+              name: http
+          resources:
+          {{- toYaml .Values.hubble.ui.proxy.resources | trim | nindent 12 }}
+          command: ["envoy"]
+          args:
+            [
+              "-c",
+              "/etc/envoy.yaml",
+              "-l",
+              "info"
+            ]
+          volumeMounts:
+            - name: hubble-ui-envoy-yaml
+              mountPath: /etc/envoy.yaml
+              subPath: envoy.yaml
+      volumes:
+        - name: hubble-ui-envoy-yaml
+          configMap:
+            name: hubble-ui-envoy
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ui-ingress.yaml b/charts/cilium/templates/hubble-ui-ingress.yaml
new file mode 100644
index 0000000..f67a423
--- /dev/null
+++ b/charts/cilium/templates/hubble-ui-ingress.yaml
@@ -0,0 +1,26 @@
+{{- if and (.Values.hubble.ui.enabled) (.Values.hubble.ui.ingress.enabled) -}}
+apiVersion: {{ template "ingress.apiVersion" . }}
+kind: Ingress
+metadata:
+  name: hubble-ui
+  namespace: {{ .Release.Namespace }}
+  labels:
+    k8s-app: hubble-ui
+{{- with .Values.hubble.ui.ingress.annotations }}
+  annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.hubble.ui.ingress.tls }}
+  tls:
+{{ toYaml .Values.hubble.ui.ingress.tls | indent 4 }}
+{{- end }}
+  rules:
+  {{- range .Values.hubble.ui.ingress.hosts }}
+    - host: {{ . }}
+      http:
+        paths:
+          - path: /
+{{ include "ingress.paths" $ | indent 12 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ui-service.yaml b/charts/cilium/templates/hubble-ui-service.yaml
new file mode 100644
index 0000000..5aa701d
--- /dev/null
+++ b/charts/cilium/templates/hubble-ui-service.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.hubble.ui.enabled }}
+kind: Service
+apiVersion: v1
+metadata:
+  name: hubble-ui
+  labels:
+    k8s-app: hubble-ui
+  namespace: {{ .Release.Namespace }}
+spec:
+  selector:
+    k8s-app: hubble-ui
+  ports:
+    - name: http
+      port: 80
+      targetPort: 8081
+  type: ClusterIP
+{{- end }}
diff --git a/charts/cilium/templates/hubble-ui-serviceaccount.yaml b/charts/cilium/templates/hubble-ui-serviceaccount.yaml
new file mode 100644
index 0000000..0b4a85d
--- /dev/null
+++ b/charts/cilium/templates/hubble-ui-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and (.Values.hubble.ui.enabled) (.Values.serviceAccounts.ui.create) -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ .Values.serviceAccounts.ui.name | quote }}
+  namespace: {{ .Release.Namespace }}
+  {{- with .Values.serviceAccounts.ui.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/charts/cilium/templates/validate.yaml b/charts/cilium/templates/validate.yaml
new file mode 100644
index 0000000..1e9cbb8
--- /dev/null
+++ b/charts/cilium/templates/validate.yaml
@@ -0,0 +1,18 @@
+{{/* validate hubble config */}}
+{{- if .Values.hubble.ui.enabled }}
+  {{- if not .Values.hubble.relay.enabled }}
+    {{ fail "Hubble UI requires .Values.hubble.relay.enabled=true" }}
+  {{- end }}
+{{- end }}
+{{- if .Values.hubble.relay.enabled }}
+  {{- if not .Values.hubble.enabled }}
+    {{ fail "Hubble Relay requires .Values.hubble.enabled=true" }}
+  {{- end }}
+{{- end }}
+
+{{/* validate service monitoring CRDs */}}
+{{- if and (.Values.prometheus.enabled) (or (.Values.prometheus.serviceMonitor.enabled) (.Values.operator.prometheus.serviceMonitor.enabled)) }}
+  {{- if not (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1") }}
+      {{ fail "Service Monitor requires monitoring.coreos.com/v1 CRDs. Please refer to https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml" }}
+  {{- end }}
+{{- end }}