chore: Switch to new images (#830)
Co-authored-by: Oleksandr K <okozachenko@vexxhost.com>
Co-authored-by: okozachenko1203 <okozachenko1203@users.noreply.github.com>
diff --git a/images/magnum/Earthfile b/images/magnum/Earthfile
new file mode 100644
index 0000000..f7e189f
--- /dev/null
+++ b/images/magnum/Earthfile
@@ -0,0 +1,18 @@
+VERSION 0.7
+
+image:
+ ARG PROJECT=magnum
+ ARG RELEASE=zed
+ ARG REF=c671d8baf9d6f4705a1b832ae2d96980e5a58db6
+ FROM ../openstack-service+image \
+ --PROJECT ${PROJECT} \
+ --RELEASE ${RELEASE} \
+ --PROJECT_REF ${REF} \
+ --PIP_PACKAGES "magnum-cluster-api==0.13.3"
+ DO \
+ ../+APT_INSTALL \
+ --PACKAGES "haproxy"
+ DO ../+APPLY_PATCHES
+ SAVE IMAGE --push \
+ ghcr.io/vexxhost/atmosphere/${PROJECT}:${RELEASE} \
+ ghcr.io/vexxhost/atmosphere/${PROJECT}:${REF}
diff --git a/images/magnum/patches/0000-containerd-cni-plugin-path-in-coreos-35.patch b/images/magnum/patches/0000-containerd-cni-plugin-path-in-coreos-35.patch
new file mode 100644
index 0000000..8d7c411
--- /dev/null
+++ b/images/magnum/patches/0000-containerd-cni-plugin-path-in-coreos-35.patch
@@ -0,0 +1,35 @@
+From 7f9f804a766083b65389b4cc2870fbb1a951b29e Mon Sep 17 00:00:00 2001
+From: Mohammed Naser <mnaser@vexxhost.com>
+Date: Thu, 9 Mar 2023 09:45:43 +0100
+Subject: [PATCH] Containerd cni plugin path in CoreOS 35 (#1)
+
+Task: 45387
+Story: 2010041
+
+In Fedora CoreOS 35 default containerd cni bin_dir is set to
+/usr/libexec/cni. Since we're installing our own in /opt/cni/bin need to
+override in containerd config.toml otherwise pods get stuck in
+ContainerCreating state looking for for ex. calico in wrong path.
+
+Change-Id: I3242b718e32c92942ac471bc7e182a42e803005b
+(cherry picked from commit fbfd3ce9a30fed291c96179f409821b7e016d2ba)
+
+Co-authored-by: Jakub Darmach <jakub@stackhpc.com>
+---
+ .../common/templates/kubernetes/fragments/install-cri.sh | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/install-cri.sh b/magnum/drivers/common/templates/kubernetes/fragments/install-cri.sh
+index f60efe47a8..61204fe47a 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/install-cri.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/install-cri.sh
+@@ -10,6 +10,9 @@ ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost"
+ if [ "${CONTAINER_RUNTIME}" = "containerd" ] ; then
+ $ssh_cmd systemctl disable docker.service docker.socket
+ $ssh_cmd systemctl stop docker.service docker.socket
++ if $ssh_cmd [ -f /etc/containerd/config.toml ] ; then
++ $ssh_cmd sed -i 's/bin_dir.*$/bin_dir\ =\ \""\/opt\/cni\/bin\/"\"/' /etc/containerd/config.toml
++ fi
+ if [ -z "${CONTAINERD_TARBALL_URL}" ] ; then
+ CONTAINERD_TARBALL_URL="https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/cri-containerd-cni-${CONTAINERD_VERSION}-linux-amd64.tar.gz"
+ fi
diff --git a/images/magnum/patches/0001-update-chart-metadata-version-to-reflect-breaking-change-in-helm-v3-5-2.patch b/images/magnum/patches/0001-update-chart-metadata-version-to-reflect-breaking-change-in-helm-v3-5-2.patch
new file mode 100644
index 0000000..9bee808
--- /dev/null
+++ b/images/magnum/patches/0001-update-chart-metadata-version-to-reflect-breaking-change-in-helm-v3-5-2.patch
@@ -0,0 +1,28 @@
+From 61592d46e7fc5644c4b5148c7ca6bf767131e504 Mon Sep 17 00:00:00 2001
+From: okozachenko1203 <okozachenko1203@gmail.com>
+Date: Fri, 31 Mar 2023 23:41:43 +1100
+Subject: [PATCH] Update chart.metadata.version to reflect breaking change in
+ helm v3.5.2
+
+https: //github.com/helm/helm/issues/9342
+Change-Id: I1dbe7b0b85380e713ebb5dcdd7ecbfc6a438b852
+(cherry picked from commit ebee3263b6b3d3fa213ea8f837911b89785a4700)
+---
+ .../templates/kubernetes/fragments/install-helm-modules.sh | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/install-helm-modules.sh b/magnum/drivers/common/templates/kubernetes/fragments/install-helm-modules.sh
+index 475e8dbf6c..a0b3f4bc75 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/install-helm-modules.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/install-helm-modules.sh
+@@ -72,8 +72,8 @@ else
+ cat << EOF > Chart.yaml
+ apiVersion: v1
+ name: magnum
+-version: metachart
+-appVersion: metachart
++version: 1.0.0
++appVersion: v1.0.0
+ description: Magnum Helm Charts
+ EOF
+ sed -i '1i\dependencies:' requirements.yaml
diff --git a/images/magnum/patches/0002-support-k8s-1-24.patch b/images/magnum/patches/0002-support-k8s-1-24.patch
new file mode 100644
index 0000000..bc69c96
--- /dev/null
+++ b/images/magnum/patches/0002-support-k8s-1-24.patch
@@ -0,0 +1,75 @@
+From f25b5c0f89dcc16918d5d8636355831ce0dc4091 Mon Sep 17 00:00:00 2001
+From: Daniel Meyerholt <dxm523@gmail.com>
+Date: Sat, 28 May 2022 12:43:45 +0200
+Subject: [PATCH] Support K8s 1.24+
+
+Only specify dockershim options when container runtime is not containerd.
+Those options were ignored in the past when using containerd but since 1.24
+kubelet refuses to start.
+
+Task: 45282
+Story: 2010028
+
+Signed-off-by: Daniel Meyerholt <dxm523@gmail.com>
+Change-Id: Ib44cc30285c8bd4219d4a45dc956696505ddd570
+(cherry picked from commit f7cd2928d6a84e869c87c333b814de76cae9a920)
+---
+ .../kubernetes/fragments/configure-kubernetes-master.sh | 3 ++-
+ .../kubernetes/fragments/configure-kubernetes-minion.sh | 3 ++-
+ .../notes/support-dockershim-removal-cad104d069f1a50b.yaml | 5 +++++
+ 3 files changed, 9 insertions(+), 2 deletions(-)
+ create mode 100644 releasenotes/notes/support-dockershim-removal-cad104d069f1a50b.yaml
+
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
+index 42267404a1..61ca0a7a59 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
+@@ -454,7 +454,6 @@ if [ -f /etc/sysconfig/docker ] ; then
+ sed -i -E 's/^OPTIONS=("|'"'"')/OPTIONS=\1'"${DOCKER_OPTIONS}"' /' /etc/sysconfig/docker
+ fi
+
+-KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
+ KUBELET_ARGS="${KUBELET_ARGS} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule"
+ KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}"
+ KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}"
+@@ -503,6 +502,8 @@ if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then
+ KUBELET_ARGS="${KUBELET_ARGS} --container-runtime=remote"
+ KUBELET_ARGS="${KUBELET_ARGS} --runtime-request-timeout=15m"
+ KUBELET_ARGS="${KUBELET_ARGS} --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
++else
++ KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
+ fi
+
+ if [ -z "${KUBE_NODE_IP}" ]; then
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
+index 46055244ac..60fc1918bc 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
+@@ -282,6 +282,8 @@ if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then
+ KUBELET_ARGS="${KUBELET_ARGS} --container-runtime=remote"
+ KUBELET_ARGS="${KUBELET_ARGS} --runtime-request-timeout=15m"
+ KUBELET_ARGS="${KUBELET_ARGS} --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
++else
++ KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
+ fi
+
+ auto_healing_enabled=$(echo ${AUTO_HEALING_ENABLED} | tr '[:upper:]' '[:lower:]')
+@@ -290,7 +292,6 @@ if [[ "${auto_healing_enabled}" = "true" && "${autohealing_controller}" = "drain
+ KUBELET_ARGS="${KUBELET_ARGS} --node-labels=draino-enabled=true"
+ fi
+
+-KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
+
+ sed -i '
+ /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
+diff --git a/releasenotes/notes/support-dockershim-removal-cad104d069f1a50b.yaml b/releasenotes/notes/support-dockershim-removal-cad104d069f1a50b.yaml
+new file mode 100644
+index 0000000000..f228db6321
+--- /dev/null
++++ b/releasenotes/notes/support-dockershim-removal-cad104d069f1a50b.yaml
+@@ -0,0 +1,5 @@
++---
++fixes:
++ - |
++ Support K8s 1.24 which removed support of dockershim. Needs containerd as
++ container runtime.
diff --git a/images/magnum/patches/0003-fix-kubelet-for-fedora-coreos-36-to-provide-real-resolvconf-to-containers.patch b/images/magnum/patches/0003-fix-kubelet-for-fedora-coreos-36-to-provide-real-resolvconf-to-containers.patch
new file mode 100644
index 0000000..a79d935
--- /dev/null
+++ b/images/magnum/patches/0003-fix-kubelet-for-fedora-coreos-36-to-provide-real-resolvconf-to-containers.patch
@@ -0,0 +1,48 @@
+From 34564ae02c1e7bef3b69967c7497f201058c82a5 Mon Sep 17 00:00:00 2001
+From: Dale Smith <dale@catalystcloud.nz>
+Date: Thu, 22 Dec 2022 16:06:07 +1300
+Subject: [PATCH] Fix kubelet for Fedora CoreOS 36 to provide real resolvconf
+ to containers.
+
+In Fedora CoreOS 36 CoreDNS cannot start correctly due to a loopback issue
+where /etc/resolv.conf is mounted and points to localhost.
+
+Tested on Fedora CoreOS 35,36,37, with Docker and containerd.
+
+https://coredns.io/plugins/loop/#troubleshooting-loops-in-kubernetes-clusters
+https://fedoraproject.org/wiki/Changes/systemd-resolved#Detailed_Description
+
+Story: 2010519
+Depends-On: I3242b718e32c92942ac471bc7e182a42e803005b
+
+Change-Id: I8106324ce71d6c22fa99e1a84b5a09743315811a
+(cherry picked from commit 5061dc5bb5c9aaba8fcfb3cb06404ada084a1908)
+---
+ .../kubernetes/fragments/configure-kubernetes-master.sh | 1 +
+ .../kubernetes/fragments/configure-kubernetes-minion.sh | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
+index 61ca0a7a59..24d7e48f4f 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
+@@ -435,6 +435,7 @@ $ssh_cmd mkdir -p /etc/kubernetes/manifests
+ KUBELET_ARGS="--register-node=true --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${INSTANCE_NAME}"
+ KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}pause:3.1"
+ KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}"
++KUBELET_ARGS="${KUBELET_ARGS} --resolv-conf=/run/systemd/resolve/resolv.conf"
+ KUBELET_ARGS="${KUBELET_ARGS} --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
+ KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}"
+
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
+index 60fc1918bc..6508ac3ef0 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
+@@ -250,6 +250,7 @@ mkdir -p /etc/kubernetes/manifests
+ KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --kubeconfig ${KUBELET_KUBECONFIG} --hostname-override=${INSTANCE_NAME}"
+ KUBELET_ARGS="${KUBELET_ARGS} --address=${KUBE_NODE_IP} --port=10250 --read-only-port=0 --anonymous-auth=false --authorization-mode=Webhook --authentication-token-webhook=true"
+ KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}"
++KUBELET_ARGS="${KUBELET_ARGS} --resolv-conf=/run/systemd/resolve/resolv.conf"
+ KUBELET_ARGS="${KUBELET_ARGS} --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
+ KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}"
+ KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}"
diff --git a/images/magnum/patches/0004-adapt-cinder-csi-to-upstream-manifest.patch b/images/magnum/patches/0004-adapt-cinder-csi-to-upstream-manifest.patch
new file mode 100644
index 0000000..7d302cf
--- /dev/null
+++ b/images/magnum/patches/0004-adapt-cinder-csi-to-upstream-manifest.patch
@@ -0,0 +1,860 @@
+From b13335fc56d4938346619229bb2c23c128a1d58a Mon Sep 17 00:00:00 2001
+From: Michal Nasiadka <mnasiadka@gmail.com>
+Date: Fri, 11 Mar 2022 13:33:15 +0100
+Subject: [PATCH] Adapt Cinder CSI to upstream manifest
+
+- Bump also components to upstream manifest versions.
+- Add small tool to sync Cinder CSI manifests automatically
+
+Change-Id: Icd19b41d03b7aa200965a3357a8ddf8b4b40794a
+(cherry picked from commit ac5702c40653942634e259788434037e1e8c980a)
+---
+ doc/source/user/index.rst | 11 +
+ .../kubernetes/fragments/enable-cinder-csi.sh | 237 +++++++++---------
+ .../fragments/write-heat-params-master.sh | 1 +
+ .../drivers/heat/k8s_fedora_template_def.py | 1 +
+ .../templates/kubecluster.yaml | 19 +-
+ .../templates/kubemaster.yaml | 6 +
+ .../unit/drivers/test_template_definition.py | 6 +
+ tools/sync/cinder-csi | 162 ++++++++++++
+ 8 files changed, 322 insertions(+), 121 deletions(-)
+ create mode 100755 tools/sync/cinder-csi
+
+diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
+index 20c56400f8..9d8d747204 100644
+--- a/doc/source/user/index.rst
++++ b/doc/source/user/index.rst
+@@ -1400,30 +1400,35 @@ _`cinder_csi_plugin_tag`
+ <https://hub.docker.com/r/k8scloudprovider/cinder-csi-plugin/tags>`_.
+ Train default: v1.16.0
+ Ussuri default: v1.18.0
++ Yoga default: v1.23.0
+
+ _`csi_attacher_tag`
+ This label allows users to override the default container tag for CSI attacher.
+ For additional tags, `refer to CSI attacher page
+ <https://quay.io/repository/k8scsi/csi-attacher?tab=tags>`_.
+ Ussuri-default: v2.0.0
++ Yoga-default: v3.3.0
+
+ _`csi_provisioner_tag`
+ This label allows users to override the default container tag for CSI provisioner.
+ For additional tags, `refer to CSI provisioner page
+ <https://quay.io/repository/k8scsi/csi-provisioner?tab=tags>`_.
+ Ussuri-default: v1.4.0
++ Yoga-default: v3.0.0
+
+ _`csi_snapshotter_tag`
+ This label allows users to override the default container tag for CSI snapshotter.
+ For additional tags, `refer to CSI snapshotter page
+ <https://quay.io/repository/k8scsi/csi-snapshotter?tab=tags>`_.
+ Ussuri-default: v1.2.2
++ Yoga-default: v4.2.1
+
+ _`csi_resizer_tag`
+ This label allows users to override the default container tag for CSI resizer.
+ For additional tags, `refer to CSI resizer page
+ <https://quay.io/repository/k8scsi/csi-resizer?tab=tags>`_.
+ Ussuri-default: v0.3.0
++ Yoga-default: v1.3.0
+
+ _`csi_node_driver_registrar_tag`
+ This label allows users to override the default container tag for CSI node
+@@ -1431,6 +1436,12 @@ _`csi_node_driver_registrar_tag`
+ page
+ <https://quay.io/repository/k8scsi/csi-node-driver-registrar?tab=tags>`_.
+ Ussuri-default: v1.1.0
++ Yoga-default: v2.4.0
++
++-`csi_liveness_probe_tag`
++ This label allows users to override the default container tag for CSI
++ liveness probe.
++ Yoga-default: v2.5.0
+
+ _`keystone_auth_enabled`
+ If this label is set to True, Kubernetes will support use Keystone for
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-cinder-csi.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-cinder-csi.sh
+index b85258a5f3..524b5e98ed 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/enable-cinder-csi.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/enable-cinder-csi.sh
+@@ -12,15 +12,15 @@ if [ "${volume_driver}" = "cinder" ] && [ "${cinder_csi_enabled}" = "true" ]; th
+ echo "Writing File: $CINDER_CSI_DEPLOY"
+ mkdir -p $(dirname ${CINDER_CSI_DEPLOY})
+ cat << EOF > ${CINDER_CSI_DEPLOY}
+----
+ # This YAML file contains RBAC API objects,
+ # which are necessary to run csi controller plugin
+----
++
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: csi-cinder-controller-sa
+ namespace: kube-system
++
+ ---
+ # external attacher
+ kind: ClusterRole
+@@ -30,16 +30,20 @@ metadata:
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+- verbs: ["get", "list", "watch", "update", "patch"]
+- - apiGroups: [""]
+- resources: ["nodes"]
++ verbs: ["get", "list", "watch", "patch"]
++ - apiGroups: ["storage.k8s.io"]
++ resources: ["csinodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments"]
+- verbs: ["get", "list", "watch", "update", "patch"]
++ verbs: ["get", "list", "watch", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+- resources: ["csinodes"]
+- verbs: ["get", "list", "watch"]
++ resources: ["volumeattachments/status"]
++ verbs: ["patch"]
++ - apiGroups: ["coordination.k8s.io"]
++ resources: ["leases"]
++ verbs: ["get", "watch", "list", "delete", "update", "create"]
++
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+@@ -53,6 +57,7 @@ roleRef:
+ kind: ClusterRole
+ name: csi-attacher-role
+ apiGroup: rbac.authorization.k8s.io
++
+ ---
+ # external Provisioner
+ kind: ClusterRole
+@@ -84,6 +89,12 @@ rules:
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["get", "list"]
++ - apiGroups: ["storage.k8s.io"]
++ resources: ["volumeattachments"]
++ verbs: ["get", "list", "watch"]
++ - apiGroups: ["coordination.k8s.io"]
++ resources: ["leases"]
++ verbs: ["get", "watch", "list", "delete", "update", "create"]
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+@@ -97,6 +108,7 @@ roleRef:
+ kind: ClusterRole
+ name: csi-provisioner-role
+ apiGroup: rbac.authorization.k8s.io
++
+ ---
+ # external snapshotter
+ kind: ClusterRole
+@@ -104,36 +116,28 @@ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: csi-snapshotter-role
+ rules:
+- - apiGroups: [""]
+- resources: ["persistentvolumes"]
+- verbs: ["get", "list", "watch"]
+- - apiGroups: [""]
+- resources: ["persistentvolumeclaims"]
+- verbs: ["get", "list", "watch"]
+- - apiGroups: ["storage.k8s.io"]
+- resources: ["storageclasses"]
+- verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+- - apiGroups: [""]
+- resources: ["secrets"]
+- verbs: ["get", "list"]
++ # Secret permission is optional.
++ # Enable it if your driver needs secret.
++ # For example, `csi.storage.k8s.io/snapshotter-secret-name` is set in VolumeSnapshotClass.
++ # See https://kubernetes-csi.github.io/docs/secrets-and-credentials.html for more details.
++ # - apiGroups: [""]
++ # resources: ["secrets"]
++ # verbs: ["get", "list"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+- verbs: ["create", "get", "list", "watch", "update", "delete"]
++ verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+- resources: ["volumesnapshots"]
+- verbs: ["get", "list", "watch", "update"]
+- - apiGroups: ["snapshot.storage.k8s.io"]
+- resources: ["volumesnapshots/status"]
+- verbs: ["update"]
+- - apiGroups: ["apiextensions.k8s.io"]
+- resources: ["customresourcedefinitions"]
+- verbs: ["create", "list", "watch", "delete"]
++ resources: ["volumesnapshotcontents/status"]
++ verbs: ["update", "patch"]
++ - apiGroups: ["coordination.k8s.io"]
++ resources: ["leases"]
++ verbs: ["get", "watch", "list", "delete", "update", "create"]
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+@@ -148,6 +152,7 @@ roleRef:
+ name: csi-snapshotter-role
+ apiGroup: rbac.authorization.k8s.io
+ ---
++
+ # External Resizer
+ kind: ClusterRole
+ apiVersion: rbac.authorization.k8s.io/v1
+@@ -161,19 +166,22 @@ rules:
+ # verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+- verbs: ["get", "list", "watch", "update", "patch"]
++ verbs: ["get", "list", "watch", "patch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+- resources: ["persistentvolumeclaims/status"]
+- verbs: ["update", "patch"]
+- - apiGroups: ["storage.k8s.io"]
+- resources: ["storageclasses"]
++ resources: ["pods"]
+ verbs: ["get", "list", "watch"]
++ - apiGroups: [""]
++ resources: ["persistentvolumeclaims/status"]
++ verbs: ["patch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
++ - apiGroups: ["coordination.k8s.io"]
++ resources: ["leases"]
++ verbs: ["get", "watch", "list", "delete", "update", "create"]
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+@@ -187,56 +195,24 @@ roleRef:
+ kind: ClusterRole
+ name: csi-resizer-role
+ apiGroup: rbac.authorization.k8s.io
+----
+-kind: Role
+-apiVersion: rbac.authorization.k8s.io/v1
+-metadata:
+- namespace: kube-system
+- name: external-resizer-cfg
+-rules:
+-- apiGroups: ["coordination.k8s.io"]
+- resources: ["leases"]
+- verbs: ["get", "watch", "list", "delete", "update", "create"]
+----
+-kind: RoleBinding
+-apiVersion: rbac.authorization.k8s.io/v1
+-metadata:
+- name: csi-resizer-role-cfg
+- namespace: kube-system
+-subjects:
+- - kind: ServiceAccount
+- name: csi-cinder-controller-sa
+- namespace: kube-system
+-roleRef:
+- kind: Role
+- name: external-resizer-cfg
+- apiGroup: rbac.authorization.k8s.io
++
+ ---
+ # This YAML file contains CSI Controller Plugin Sidecars
+ # external-attacher, external-provisioner, external-snapshotter
+----
+-kind: Service
+-apiVersion: v1
+-metadata:
+- name: csi-cinder-controller-service
+- namespace: kube-system
+- labels:
+- app: csi-cinder-controllerplugin
+-spec:
+- selector:
+- app: csi-cinder-controllerplugin
+- ports:
+- - name: dummy
+- port: 12345
+----
+-kind: StatefulSet
++# external-resize, liveness-probe
++
++kind: Deployment
+ apiVersion: apps/v1
+ metadata:
+ name: csi-cinder-controllerplugin
+ namespace: kube-system
+ spec:
+- serviceName: "csi-cinder-controller-service"
+ replicas: 1
++ strategy:
++ type: RollingUpdate
++ rollingUpdate:
++ maxUnavailable: 0
++ maxSurge: 1
+ selector:
+ matchLabels:
+ app: csi-cinder-controllerplugin
+@@ -246,6 +222,7 @@ spec:
+ app: csi-cinder-controllerplugin
+ spec:
+ serviceAccount: csi-cinder-controller-sa
++ hostNetwork: true
+ tolerations:
+ # Make sure the pod can be scheduled on master kubelet.
+ - effect: NoSchedule
+@@ -257,11 +234,11 @@ spec:
+ node-role.kubernetes.io/master: ""
+ containers:
+ - name: csi-attacher
+- image: ${CONTAINER_INFRA_PREFIX:-quay.io/k8scsi/}csi-attacher:${CSI_ATTACHER_TAG}
++ image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-attacher:${CSI_ATTACHER_TAG}
+ args:
+- - "--v=5"
+ - "--csi-address=\$(ADDRESS)"
+ - "--timeout=3m"
++ - "--leader-election=true"
+ resources:
+ requests:
+ cpu: 20m
+@@ -273,10 +250,14 @@ spec:
+ - name: socket-dir
+ mountPath: /var/lib/csi/sockets/pluginproxy/
+ - name: csi-provisioner
+- image: ${CONTAINER_INFRA_PREFIX:-quay.io/k8scsi/}csi-provisioner:${CSI_PROVISIONER_TAG}
++ image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-provisioner:${CSI_PROVISIONER_TAG}
+ args:
+ - "--csi-address=\$(ADDRESS)"
+ - "--timeout=3m"
++ - "--default-fstype=ext4"
++ - "--feature-gates=Topology=true"
++ - "--extra-create-metadata"
++ - "--leader-election=true"
+ resources:
+ requests:
+ cpu: 20m
+@@ -288,9 +269,12 @@ spec:
+ - name: socket-dir
+ mountPath: /var/lib/csi/sockets/pluginproxy/
+ - name: csi-snapshotter
+- image: ${CONTAINER_INFRA_PREFIX:-quay.io/k8scsi/}csi-snapshotter:${CSI_SNAPSHOTTER_TAG}
++ image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-snapshotter:${CSI_SNAPSHOTTER_TAG}
+ args:
+ - "--csi-address=\$(ADDRESS)"
++ - "--timeout=3m"
++ - "--extra-create-metadata"
++ - "--leader-election=true"
+ resources:
+ requests:
+ cpu: 20m
+@@ -302,10 +286,12 @@ spec:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - name: csi-resizer
+- image: ${CONTAINER_INFRA_PREFIX:-quay.io/k8scsi/}csi-resizer:${CSI_RESIZER_TAG}
++ image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-resizer:${CSI_RESIZER_TAG}
+ args:
+- - "--v=5"
+ - "--csi-address=\$(ADDRESS)"
++ - "--timeout=3m"
++ - "--handle-volume-inuse-error=false"
++ - "--leader-election=true"
+ resources:
+ requests:
+ cpu: 20m
+@@ -316,22 +302,27 @@ spec:
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /var/lib/csi/sockets/pluginproxy/
++ - name: liveness-probe
++ image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}livenessprobe:${CSI_LIVENESS_PROBE_TAG}
++ args:
++ - "--csi-address=\$(ADDRESS)"
++ resources:
++ requests:
++ cpu: 20m
++ env:
++ - name: ADDRESS
++ value: /var/lib/csi/sockets/pluginproxy/csi.sock
++ volumeMounts:
++ - mountPath: /var/lib/csi/sockets/pluginproxy/
++ name: socket-dir
+ - name: cinder-csi-plugin
+ image: ${CONTAINER_INFRA_PREFIX:-docker.io/k8scloudprovider/}cinder-csi-plugin:${CINDER_CSI_PLUGIN_TAG}
+- args :
++ args:
+ - /bin/cinder-csi-plugin
+- - "--nodeid=\$(NODE_ID)"
+ - "--endpoint=\$(CSI_ENDPOINT)"
+ - "--cloud-config=\$(CLOUD_CONFIG)"
+ - "--cluster=\$(CLUSTER_NAME)"
+- resources:
+- requests:
+- cpu: 20m
+ env:
+- - name: NODE_ID
+- valueFrom:
+- fieldRef:
+- fieldPath: spec.nodeName
+ - name: CSI_ENDPOINT
+ value: unix://csi/csi.sock
+ - name: CLOUD_CONFIG
+@@ -339,6 +330,19 @@ spec:
+ - name: CLUSTER_NAME
+ value: kubernetes
+ imagePullPolicy: "IfNotPresent"
++ ports:
++ - containerPort: 9808
++ name: healthz
++ protocol: TCP
++ # The probe
++ livenessProbe:
++ failureThreshold: 5
++ httpGet:
++ path: /healthz
++ port: healthz
++ initialDelaySeconds: 10
++ timeoutSeconds: 10
++ periodSeconds: 60
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+@@ -360,7 +364,7 @@ spec:
+ type: File
+ ---
+ # This YAML defines all API objects to create RBAC roles for csi node plugin.
+----
++
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+@@ -375,6 +379,7 @@ rules:
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
++
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+@@ -391,7 +396,7 @@ roleRef:
+ ---
+ # This YAML file contains driver-registrar & csi driver nodeplugin API objects,
+ # which are necessary to run csi nodeplugin for cinder.
+----
++
+ kind: DaemonSet
+ apiVersion: apps/v1
+ metadata:
+@@ -412,17 +417,10 @@ spec:
+ hostNetwork: true
+ containers:
+ - name: node-driver-registrar
+- image: ${CONTAINER_INFRA_PREFIX:-quay.io/k8scsi/}csi-node-driver-registrar:${CSI_NODE_DRIVER_REGISTRAR_TAG}
++ image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-node-driver-registrar:${CSI_NODE_DRIVER_REGISTRAR_TAG}
+ args:
+ - "--csi-address=\$(ADDRESS)"
+ - "--kubelet-registration-path=\$(DRIVER_REG_SOCK_PATH)"
+- resources:
+- requests:
+- cpu: 25m
+- lifecycle:
+- preStop:
+- exec:
+- command: ["/bin/sh", "-c", "rm -rf /registration/cinder.csi.openstack.org /registration/cinder.csi.openstack.org-reg.sock"]
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+@@ -438,6 +436,16 @@ spec:
+ mountPath: /csi
+ - name: registration-dir
+ mountPath: /registration
++ - name: liveness-probe
++ image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}livenessprobe:${CSI_LIVENESS_PROBE_TAG}
++ args:
++ - --csi-address=/csi/csi.sock
++ resources:
++ requests:
++ cpu: 20m
++ volumeMounts:
++ - name: socket-dir
++ mountPath: /csi
+ - name: cinder-csi-plugin
+ securityContext:
+ privileged: true
+@@ -445,33 +453,35 @@ spec:
+ add: ["SYS_ADMIN"]
+ allowPrivilegeEscalation: true
+ image: ${CONTAINER_INFRA_PREFIX:-docker.io/k8scloudprovider/}cinder-csi-plugin:${CINDER_CSI_PLUGIN_TAG}
+- args :
++ args:
+ - /bin/cinder-csi-plugin
+- - "--nodeid=\$(NODE_ID)"
+ - "--endpoint=\$(CSI_ENDPOINT)"
+ - "--cloud-config=\$(CLOUD_CONFIG)"
+- resources:
+- requests:
+- cpu: 25m
+ env:
+- - name: NODE_ID
+- valueFrom:
+- fieldRef:
+- fieldPath: spec.nodeName
+ - name: CSI_ENDPOINT
+ value: unix://csi/csi.sock
+ - name: CLOUD_CONFIG
+ value: /etc/config/cloud-config
+ imagePullPolicy: "IfNotPresent"
++ ports:
++ - containerPort: 9808
++ name: healthz
++ protocol: TCP
++ # The probe
++ livenessProbe:
++ failureThreshold: 5
++ httpGet:
++ path: /healthz
++ port: healthz
++ initialDelaySeconds: 10
++ timeoutSeconds: 3
++ periodSeconds: 10
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ - name: kubelet-dir
+ mountPath: /var/lib/kubelet
+ mountPropagation: "Bidirectional"
+- - name: pods-cloud-data
+- mountPath: /var/lib/cloud/data
+- readOnly: true
+ - name: pods-probe-dir
+ mountPath: /dev
+ mountPropagation: "HostToContainer"
+@@ -494,9 +504,6 @@ spec:
+ hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+- - name: pods-cloud-data
+- hostPath:
+- path: /var/lib/cloud/data
+ - name: pods-probe-dir
+ hostPath:
+ path: /dev
+diff --git a/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh
+index a50b184558..0cd02bf95b 100644
+--- a/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh
++++ b/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh
+@@ -143,6 +143,7 @@ CSI_PROVISIONER_TAG="$CSI_PROVISIONER_TAG"
+ CSI_SNAPSHOTTER_TAG="$CSI_SNAPSHOTTER_TAG"
+ CSI_RESIZER_TAG="$CSI_RESIZER_TAG"
+ CSI_NODE_DRIVER_REGISTRAR_TAG="$CSI_NODE_DRIVER_REGISTRAR_TAG"
++CSI_LIVENESS_PROBE_TAG="$CSI_LIVENESS_PROBE_TAG"
+ DRAINO_TAG="$DRAINO_TAG"
+ MAGNUM_AUTO_HEALER_TAG="$MAGNUM_AUTO_HEALER_TAG"
+ AUTOSCALER_TAG="$AUTOSCALER_TAG"
+diff --git a/magnum/drivers/heat/k8s_fedora_template_def.py b/magnum/drivers/heat/k8s_fedora_template_def.py
+index 659069bc28..a4ec6250ab 100644
+--- a/magnum/drivers/heat/k8s_fedora_template_def.py
++++ b/magnum/drivers/heat/k8s_fedora_template_def.py
+@@ -90,6 +90,7 @@ def get_params(self, context, cluster_template, cluster, **kwargs):
+ 'csi_attacher_tag', 'csi_provisioner_tag',
+ 'csi_snapshotter_tag', 'csi_resizer_tag',
+ 'csi_node_driver_registrar_tag',
++ 'csi_liveness_probe_tag',
+ 'etcd_tag', 'flannel_tag', 'flannel_cni_tag',
+ 'cloud_provider_tag',
+ 'prometheus_tag', 'grafana_tag',
+diff --git a/magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml
+index 35ca781d42..15bfd9af25 100644
+--- a/magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml
++++ b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml
+@@ -866,32 +866,38 @@ parameters:
+ description: tag of cinder csi plugin
+ tag of the k8scloudprovider/cinder-csi-plugin container
+ https://hub.docker.com/r/k8scloudprovider/cinder-csi-plugin/tags/
+- default: v1.18.0
++ default: v1.23.0
+
+ csi_attacher_tag:
+ type: string
+ description: tag of csi attacher
+- default: v2.0.0
++ default: v3.3.0
+
+ csi_provisioner_tag:
+ type: string
+ description: tag of csi provisioner
+- default: v1.4.0
++ default: v3.0.0
+
+ csi_snapshotter_tag:
+ type: string
+ description: tag of csi snapshotter
+- default: v1.2.2
++ default: v4.2.1
+
+ csi_resizer_tag:
+ type: string
+ description: tag of csi resizer
+- default: v0.3.0
++ default: v1.3.0
+
+ csi_node_driver_registrar_tag:
+ type: string
+ description: tag of csi node driver registrar
+- default: v1.1.0
++ default: v2.4.0
++
++ csi_liveness_probe_tag:
++ type: string
++ description: tag of cinder csi liveness probe
++ tag of the k8s.gcr.io/sig-storage/liveness-probe container
++ default: v2.5.0
+
+ node_problem_detector_tag:
+ type: string
+@@ -1384,6 +1390,7 @@ resources:
+ csi_snapshotter_tag: {get_param: csi_snapshotter_tag}
+ csi_resizer_tag: {get_param: csi_resizer_tag}
+ csi_node_driver_registrar_tag: {get_param: csi_node_driver_registrar_tag}
++ csi_liveness_probe_tag: {get_param: csi_liveness_probe_tag}
+ draino_tag: {get_param: draino_tag}
+ autoscaler_tag: {get_param: autoscaler_tag}
+ min_node_count: {get_param: min_node_count}
+diff --git a/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml
+index a038f144d0..917f010db8 100644
+--- a/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml
++++ b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml
+@@ -621,6 +621,11 @@ parameters:
+ type: string
+ description: tag of csi node driver registrar
+
++ csi_liveness_probe_tag:
++ type: string
++ description: >
++ Tag of liveness-probe for cinder csi.
++
+ node_problem_detector_tag:
+ type: string
+ description: tag of the node problem detector container
+@@ -910,6 +915,7 @@ resources:
+ "$CSI_SNAPSHOTTER_TAG": {get_param: csi_snapshotter_tag}
+ "$CSI_RESIZER_TAG": {get_param: csi_resizer_tag}
+ "$CSI_NODE_DRIVER_REGISTRAR_TAG": {get_param: csi_node_driver_registrar_tag}
++ "$CSI_LIVENESS_PROBE_TAG": {get_param: csi_liveness_probe_tag}
+ "$DRAINO_TAG": {get_param: draino_tag}
+ "$AUTOSCALER_TAG": {get_param: autoscaler_tag}
+ "$MIN_NODE_COUNT": {get_param: min_node_count}
+diff --git a/magnum/tests/unit/drivers/test_template_definition.py b/magnum/tests/unit/drivers/test_template_definition.py
+index b523744597..7b08196bf1 100644
+--- a/magnum/tests/unit/drivers/test_template_definition.py
++++ b/magnum/tests/unit/drivers/test_template_definition.py
+@@ -600,6 +600,8 @@ def test_k8s_get_params(self, mock_generate_csr_and_key,
+ 'csi_resizer_tag')
+ csi_node_driver_registrar_tag = mock_cluster.labels.get(
+ 'csi_node_driver_registrar_tag')
++ csi_liveness_probe_tag = mock_cluster.labels.get(
++ 'csi_liveness_probe_tag')
+ draino_tag = mock_cluster.labels.get('draino_tag')
+ autoscaler_tag = mock_cluster.labels.get('autoscaler_tag')
+ min_node_count = mock_cluster.labels.get('min_node_count')
+@@ -725,6 +727,7 @@ def test_k8s_get_params(self, mock_generate_csr_and_key,
+ 'csi_snapshotter_tag': csi_snapshotter_tag,
+ 'csi_resizer_tag': csi_resizer_tag,
+ 'csi_node_driver_registrar_tag': csi_node_driver_registrar_tag,
++ 'csi_liveness_probe_tag': csi_liveness_probe_tag,
+ 'draino_tag': draino_tag,
+ 'autoscaler_tag': autoscaler_tag,
+ 'min_node_count': min_node_count,
+@@ -1161,6 +1164,8 @@ def test_k8s_get_params_insecure(self, mock_generate_csr_and_key,
+ 'csi_resizer_tag')
+ csi_node_driver_registrar_tag = mock_cluster.labels.get(
+ 'csi_node_driver_registrar_tag')
++ csi_liveness_probe_tag = mock_cluster.labels.get(
++ 'csi_liveness_probe_tag')
+ draino_tag = mock_cluster.labels.get('draino_tag')
+ autoscaler_tag = mock_cluster.labels.get('autoscaler_tag')
+ min_node_count = mock_cluster.labels.get('min_node_count')
+@@ -1290,6 +1295,7 @@ def test_k8s_get_params_insecure(self, mock_generate_csr_and_key,
+ 'csi_snapshotter_tag': csi_snapshotter_tag,
+ 'csi_resizer_tag': csi_resizer_tag,
+ 'csi_node_driver_registrar_tag': csi_node_driver_registrar_tag,
++ 'csi_liveness_probe_tag': csi_liveness_probe_tag,
+ 'draino_tag': draino_tag,
+ 'autoscaler_tag': autoscaler_tag,
+ 'min_node_count': min_node_count,
+diff --git a/tools/sync/cinder-csi b/tools/sync/cinder-csi
+new file mode 100755
+index 0000000000..5789631d52
+--- /dev/null
++++ b/tools/sync/cinder-csi
+@@ -0,0 +1,162 @@
++#!/usr/bin/env python3.9
++
++import requests
++
++manifest_data = []
++
++files = requests.get("https://api.github.com/repos/kubernetes/cloud-provider-openstack/contents/manifests/cinder-csi-plugin").json()
++for file in files:
++ if file['name'] == 'csi-secret-cinderplugin.yaml':
++ continue
++
++ r = requests.get(file['download_url'])
++ manifest_data.append(r.text)
++
++manifests = "---\n".join(manifest_data)
++
++# Clean-ups
++manifests = manifests.replace(
++"""
++ # - name: cacert
++ # mountPath: /etc/cacert
++ # readOnly: true
++""",
++"""
++ - name: cacert
++ mountPath: /etc/kubernetes/ca-bundle.crt
++ readOnly: true
++""").replace(
++"""
++ secretName: cloud-config
++ # - name: cacert
++ # hostPath:
++ # path: /etc/cacert
++""",
++"""
++ secretName: cinder-csi-cloud-config
++ - name: cacert
++ hostPath:
++ path: /etc/kubernetes/ca-bundle.crt
++ type: File
++""").replace(
++"""
++ serviceAccount: csi-cinder-controller-sa
++""",
++"""
++ serviceAccount: csi-cinder-controller-sa
++ hostNetwork: true
++ tolerations:
++ # Make sure the pod can be scheduled on master kubelet.
++ - effect: NoSchedule
++ operator: Exists
++ # Mark the pod as a critical add-on for rescheduling.
++ - key: CriticalAddonsOnly
++ operator: Exists
++ nodeSelector:
++ node-role.kubernetes.io/master: ""
++""").replace(
++"""
++ - --csi-address=/csi/csi.sock
++""",
++"""
++ - --csi-address=/csi/csi.sock
++ resources:
++ requests:
++ cpu: 20m
++""").replace(
++"""
++ env:
++ - name: ADDRESS
++ value: /var/lib/csi/sockets/pluginproxy/csi.sock
++""",
++"""
++ resources:
++ requests:
++ cpu: 20m
++ env:
++ - name: ADDRESS
++ value: /var/lib/csi/sockets/pluginproxy/csi.sock
++""").replace(
++ "$(",
++ "\$("
++).replace(
++ "k8s.gcr.io/sig-storage/",
++ "${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}"
++).replace(
++ "docker.io/k8scloudprovider/",
++ "${CONTAINER_INFRA_PREFIX:-docker.io/k8scloudprovider/}",
++).replace(
++ "csi-attacher:v3.4.0",
++ "csi-attacher:${CSI_ATTACHER_TAG}",
++).replace(
++ "csi-provisioner:v3.1.0",
++ "csi-provisioner:${CSI_PROVISIONER_TAG}",
++).replace(
++ "csi-snapshotter:v6.0.1",
++ "csi-snapshotter:${CSI_SNAPSHOTTER_TAG}",
++).replace(
++ "csi-resizer:v1.4.0",
++ "csi-resizer:${CSI_RESIZER_TAG}",
++).replace(
++ "livenessprobe:v2.7.0",
++ "livenessprobe:${CSI_LIVENESS_PROBE_TAG}",
++).replace(
++ "cinder-csi-plugin:latest",
++ "cinder-csi-plugin:${CINDER_CSI_PLUGIN_TAG}",
++).replace(
++ "csi-node-driver-registrar:v2.5.1",
++ "csi-node-driver-registrar:${CSI_NODE_DRIVER_REGISTRAR_TAG}",
++).replace(
++ "/etc/config/cloud.conf",
++ "/etc/config/cloud-config"
++)
++
++template = f"""step="enable-cinder-csi"
++printf "Starting to run ${{step}}\\n"
++
++. /etc/sysconfig/heat-params
++
++volume_driver=$(echo "${{VOLUME_DRIVER}}" | tr '[:upper:]' '[:lower:]')
++cinder_csi_enabled=$(echo $CINDER_CSI_ENABLED | tr '[:upper:]' '[:lower:]')
++
++if [ "${{volume_driver}}" = "cinder" ] && [ "${{cinder_csi_enabled}}" = "true" ]; then
++ # Generate Cinder CSI manifest file
++ CINDER_CSI_DEPLOY=/srv/magnum/kubernetes/manifests/cinder-csi.yaml
++ echo "Writing File: $CINDER_CSI_DEPLOY"
++ mkdir -p $(dirname ${{CINDER_CSI_DEPLOY}})
++ cat << EOF > ${{CINDER_CSI_DEPLOY}}
++{manifests.strip()}
++EOF
++
++ echo "Waiting for Kubernetes API..."
++ until [ "ok" = "$(kubectl get --raw='/healthz')" ]
++ do
++ sleep 5
++ done
++
++ cat <<EOF | kubectl apply -f -
++---
++apiVersion: v1
++kind: Secret
++metadata:
++ name: cinder-csi-cloud-config
++ namespace: kube-system
++type: Opaque
++stringData:
++ cloud-config: |-
++ [Global]
++ auth-url=$AUTH_URL
++ user-id=$TRUSTEE_USER_ID
++ password=$TRUSTEE_PASSWORD
++ trust-id=$TRUST_ID
++ region=$REGION_NAME
++ ca-file=/etc/kubernetes/ca-bundle.crt
++EOF
++
++ kubectl apply -f ${{CINDER_CSI_DEPLOY}}
++fi
++printf "Finished running ${{step}}\\n"
++"""
++
++with open("magnum/drivers/common/templates/kubernetes/fragments/enable-cinder-csi.sh", "w") as fd:
++ fd.write(template)
diff --git a/images/magnum/patches/0005-secure-rbac.patch b/images/magnum/patches/0005-secure-rbac.patch
new file mode 100644
index 0000000..4f4ea6e
--- /dev/null
+++ b/images/magnum/patches/0005-secure-rbac.patch
@@ -0,0 +1,1969 @@
+From 7ffb23c87d04ea2c7f5b07a0af98573cb69379e0 Mon Sep 17 00:00:00 2001
+From: Rico Lin <ricolin@ricolky.com>
+Date: Tue, 11 Jul 2023 05:40:01 -0700
+Subject: [PATCH] Secure Rbac (#10)
+
+* Support enables rbac policies new defaults
+
+The Magnum service allow enables policies (RBAC) new defaults and scope by
+default. The Default value of config options ``[oslo_policy] enforce_scope``
+and ``[oslo_policy] oslo_policy.enforce_new_defaults`` are both to
+``False``, but will change to ``True`` in following cycles.
+
+To enable them then modify the below config options value in
+``magnum.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=True
+ enforce_scope=True
+
+reference tc goal for more detail:
+https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html
+
+Related blueprint secure-rbac
+
+Change-Id: I249942a355577c4f1ef51b3988f0cc4979959d0b
+
+* Allow Admin to perform all API requests
+
+This propose changes is base on same concerns as this bug in neutron
+https://bugs.launchpad.net/neutron/+bug/1997089
+
+This propose to keep and make sure ADMIN can perform all API requests.
+
+Change-Id: I9a3003963bf13a591cc363fa04ec8e5719ae9114
+
+* Add policies unit tests (Part one)
+
+Add plicies unit test base function
+and tests for federation, quotas and stats.
+
+Change-Id: I0eb12bf77e0e786652e674c787b2821415bd4506
+
+* Add policies unit tests (Part two)
+
+Add plicies unit test base function
+and tests for certificate, and magnum service.
+
+Change-Id: Ib4047cb5a84647ff2848f06de71181673cc0627a
+
+* Add policies unit tests (Part three)
+
+Add plicies unit test base function
+and tests for cluster, cluster template, and nodegroup.
+
+Change-Id: I0555e557725b02f3ec9812f0adf84d283f7389b0
+---
+ magnum/api/hooks.py | 8 +-
+ magnum/common/context.py | 12 +-
+ magnum/common/policies/base.py | 169 +++++++++++++++++-
+ magnum/common/policies/certificate.py | 11 +-
+ magnum/common/policies/cluster.py | 27 ++-
+ magnum/common/policies/cluster_template.py | 20 ++-
+ magnum/common/policies/federation.py | 18 +-
+ magnum/common/policies/nodegroup.py | 15 +-
+ magnum/common/policies/quota.py | 3 +-
+ magnum/common/policies/stats.py | 3 +-
+ magnum/common/policy.py | 12 +-
+ magnum/tests/fakes.py | 2 +-
+ magnum/tests/unit/api/base.py | 16 ++
+ .../tests/unit/api/controllers/test_root.py | 4 +-
+ .../api/controllers/v1/test_certificate.py | 23 ++-
+ .../unit/api/controllers/v1/test_cluster.py | 34 ++--
+ .../controllers/v1/test_cluster_actions.py | 48 +++--
+ .../unit/api/controllers/v1/test_nodegroup.py | 12 +-
+ .../unit/api/controllers/v1/test_quota.py | 2 +-
+ .../unit/api/controllers/v1/test_stats.py | 15 +-
+ magnum/tests/unit/api/test_hooks.py | 10 +-
+ magnum/tests/unit/common/policies/__init__.py | 0
+ magnum/tests/unit/common/policies/base.py | 37 ++++
+ .../policies/test_certificate_policy.py | 72 ++++++++
+ .../common/policies/test_cluster_policy.py | 65 +++++++
+ .../policies/test_cluster_template_policy.py | 74 ++++++++
+ .../common/policies/test_federation_policy.py | 67 +++++++
+ .../policies/test_magnum_service_policy.py | 26 +++
+ .../common/policies/test_nodegroup_policy.py | 74 ++++++++
+ .../unit/common/policies/test_quota_policy.py | 74 ++++++++
+ .../unit/common/policies/test_stats_policy.py | 33 ++++
+ magnum/tests/unit/common/test_context.py | 43 ++---
+ ...dmin_perform_acitons-cc988655bb72b3f3.yaml | 9 +
+ ...ope-and-new-defaults-7e6e503f74283071.yaml | 13 ++
+ 36 files changed, 943 insertions(+), 124 deletions(-)
+ create mode 100644 magnum/tests/unit/common/policies/__init__.py
+ create mode 100644 magnum/tests/unit/common/policies/base.py
+ create mode 100644 magnum/tests/unit/common/policies/test_certificate_policy.py
+ create mode 100644 magnum/tests/unit/common/policies/test_cluster_policy.py
+ create mode 100644 magnum/tests/unit/common/policies/test_cluster_template_policy.py
+ create mode 100644 magnum/tests/unit/common/policies/test_federation_policy.py
+ create mode 100644 magnum/tests/unit/common/policies/test_magnum_service_policy.py
+ create mode 100644 magnum/tests/unit/common/policies/test_nodegroup_policy.py
+ create mode 100644 magnum/tests/unit/common/policies/test_quota_policy.py
+ create mode 100644 magnum/tests/unit/common/policies/test_stats_policy.py
+ create mode 100644 releasenotes/notes/allow_admin_perform_acitons-cc988655bb72b3f3.yaml
+ create mode 100644 releasenotes/notes/enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml
+
+diff --git a/magnum/api/hooks.py b/magnum/api/hooks.py
+index e0d36a9a88..f5a9049795 100644
+--- a/magnum/api/hooks.py
++++ b/magnum/api/hooks.py
+@@ -52,8 +52,8 @@ def before(self, state):
+ user_id = headers.get('X-User-Id')
+ project = headers.get('X-Project-Name')
+ project_id = headers.get('X-Project-Id')
+- domain_id = headers.get('X-User-Domain-Id')
+- domain_name = headers.get('X-User-Domain-Name')
++ user_domain_id = headers.get('X-User-Domain-Id')
++ user_domain_name = headers.get('X-User-Domain-Name')
+ auth_token = headers.get('X-Auth-Token')
+ roles = headers.get('X-Roles', '').split(',')
+ auth_token_info = state.request.environ.get('keystone.token_info')
+@@ -72,8 +72,8 @@ def before(self, state):
+ user_id=user_id,
+ project_name=project,
+ project_id=project_id,
+- domain_id=domain_id,
+- domain_name=domain_name,
++ user_domain_id=user_domain_id,
++ user_domain_name=user_domain_name,
+ roles=roles)
+
+
+diff --git a/magnum/common/context.py b/magnum/common/context.py
+index 547c9cc9b4..c2c3be1e23 100644
+--- a/magnum/common/context.py
++++ b/magnum/common/context.py
+@@ -42,7 +42,7 @@ def __init__(self, auth_token=None, auth_url=None, domain_id=None,
+ """
+ super(RequestContext, self).__init__(auth_token=auth_token,
+ user_id=user_name,
+- project_id=project_name,
++ project_id=project_id,
+ is_admin=is_admin,
+ read_only=read_only,
+ show_deleted=show_deleted,
+@@ -53,8 +53,12 @@ def __init__(self, auth_token=None, auth_url=None, domain_id=None,
+ self.user_id = user_id
+ self.project_name = project_name
+ self.project_id = project_id
+- self.domain_id = domain_id
+- self.domain_name = domain_name
++ # (ricolin) Rmove domain_id because oslo_policy use this args to
++ # judge if this request is a domain scope or not. We might be consider
++ # bring this back only if that judge in oslo_policy is no longer affect
++ # project scope enforce.
++ # self.domain_id = domain_id
++ # self.domain_name = domain_name
+ self.user_domain_id = user_domain_id
+ self.user_domain_name = user_domain_name
+ self.auth_url = auth_url
+@@ -71,8 +75,6 @@ def to_dict(self):
+ value = super(RequestContext, self).to_dict()
+ value.update({'auth_token': self.auth_token,
+ 'auth_url': self.auth_url,
+- 'domain_id': self.domain_id,
+- 'domain_name': self.domain_name,
+ 'user_domain_id': self.user_domain_id,
+ 'user_domain_name': self.user_domain_name,
+ 'user_name': self.user_name,
+diff --git a/magnum/common/policies/base.py b/magnum/common/policies/base.py
+index 44c75b7daf..05ac11728b 100644
+--- a/magnum/common/policies/base.py
++++ b/magnum/common/policies/base.py
+@@ -13,12 +13,79 @@
+ # under the License.
+ from oslo_policy import policy
+
+-ROLE_ADMIN = 'rule:context_is_admin'
++
+ RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
+-RULE_ADMIN_API = 'rule:admin_api'
++RULE_ADMIN_API = 'rule:context_is_admin'
+ RULE_ADMIN_OR_USER = 'rule:admin_or_user'
+ RULE_CLUSTER_USER = 'rule:cluster_user'
+ RULE_DENY_CLUSTER_USER = 'rule:deny_cluster_user'
++RULE_USER = "rule:is_user"
++# Generic check string for checking if a user is authorized on a particular
++# project, specifically with the member role.
++RULE_PROJECT_MEMBER = 'rule:project_member'
++# Generic check string for checking if a user is authorized on a particular
++# project but with read-only access. For example, this persona would be able to
++# list private images owned by a project but cannot make any writeable changes
++# to those images.
++RULE_PROJECT_READER = 'rule:project_reader'
++
++RULE_USER_OR_CLUSTER_USER = (
++ 'rule:user_or_cluster_user')
++RULE_ADMIN_OR_PROJECT_READER = (
++ 'rule:admin_or_project_reader')
++RULE_ADMIN_OR_PROJECT_MEMBER = (
++ 'rule:admin_or_project_member')
++RULE_ADMIN_OR_PROJECT_MEMBER_USER = (
++ 'rule:admin_or_project_member_user')
++RULE_ADMIN_OR_PROJECT_MEMBER_USER_OR_CLUSTER_USER = (
++ 'rule:admin_or_project_member_user_or_cluster_user')
++RULE_PROJECT_MEMBER_DENY_CLUSTER_USER = (
++ 'rule:project_member_deny_cluster_user')
++RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER = (
++ 'rule:admin_or_project_member_deny_cluster_user')
++RULE_PROJECT_READER_DENY_CLUSTER_USER = (
++ 'rule:project_reader_deny_cluster_user')
++RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER = (
++ 'rule:admin_or_project_reader_deny_cluster_user')
++RULE_ADMIN_OR_PROJECT_READER_USER_OR_CLUSTER_USER = (
++ 'rule:admin_or_project_reader_user_or_cluster_user')
++
++# ==========================================================
++# Deprecated Since OpenStack 2023.2(Magnum 17.0.0) and should be removed in
++# The following cycle.
++
++DEPRECATED_REASON = """
++The Magnum API now enforces scoped tokens and default reader and member roles.
++"""
++
++DEPRECATED_SINCE = 'OpenStack 2023.2(Magnum 17.0.0)'
++
++
++DEPRECATED_DENY_CLUSTER_USER = policy.DeprecatedRule(
++ name=RULE_DENY_CLUSTER_USER,
++ check_str='not domain_id:%(trustee_domain_id)s',
++ deprecated_reason=DEPRECATED_REASON,
++ deprecated_since=DEPRECATED_SINCE
++)
++
++DEPRECATED_RULE_ADMIN_OR_OWNER = policy.DeprecatedRule(
++ name=RULE_ADMIN_OR_OWNER,
++ check_str='is_admin:True or project_id:%(project_id)s',
++ deprecated_reason=DEPRECATED_REASON,
++ deprecated_since=DEPRECATED_SINCE
++)
++
++# Only used for DEPRECATED_RULE_ADMIN_OR_USER_OR_CLUSTER_USER
++RULE_ADMIN_OR_USER_OR_CLUSTER_USER = (
++ 'rule:admin_or_user_or_cluster_user')
++
++DEPRECATED_RULE_ADMIN_OR_USER_OR_CLUSTER_USER = policy.DeprecatedRule(
++ name=RULE_ADMIN_OR_USER_OR_CLUSTER_USER,
++ check_str=f"(({RULE_ADMIN_API}) or ({RULE_USER_OR_CLUSTER_USER}))",
++ deprecated_reason=DEPRECATED_REASON,
++ deprecated_since=DEPRECATED_SINCE
++)
++# ==========================================================
+
+ rules = [
+ policy.RuleDefault(
+@@ -29,14 +96,14 @@
+ name='admin_or_owner',
+ check_str='is_admin:True or project_id:%(project_id)s'
+ ),
+- policy.RuleDefault(
+- name='admin_api',
+- check_str='rule:context_is_admin'
+- ),
+ policy.RuleDefault(
+ name='admin_or_user',
+ check_str='is_admin:True or user_id:%(user_id)s'
+ ),
++ policy.RuleDefault(
++ name='is_user',
++ check_str='user_id:%(user_id)s'
++ ),
+ policy.RuleDefault(
+ name='cluster_user',
+ check_str='user_id:%(trustee_user_id)s'
+@@ -44,7 +111,95 @@
+ policy.RuleDefault(
+ name='deny_cluster_user',
+ check_str='not domain_id:%(trustee_domain_id)s'
+- )
++ ),
++ policy.RuleDefault(
++ name='project_member',
++ check_str='role:member and project_id:%(project_id)s'
++ ),
++ policy.RuleDefault(
++ name='project_reader',
++ check_str='role:reader and project_id:%(project_id)s'
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_reader',
++ check_str=f"({RULE_ADMIN_API}) or ({RULE_PROJECT_READER})",
++ deprecated_rule=DEPRECATED_RULE_ADMIN_OR_OWNER
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_member',
++ check_str=f"({RULE_ADMIN_API}) or ({RULE_PROJECT_MEMBER})",
++ deprecated_rule=DEPRECATED_RULE_ADMIN_OR_OWNER
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_member_user',
++ check_str=(
++ f"({RULE_ADMIN_API}) or (({RULE_PROJECT_MEMBER}) and "
++ f"({RULE_USER}))"
++ )
++ ),
++ policy.RuleDefault(
++ name='user_or_cluster_user',
++ check_str=(
++ f"(({RULE_USER}) or ({RULE_CLUSTER_USER}))"
++ )
++ ),
++ policy.RuleDefault(
++ name='admin_or_user_or_cluster_user',
++ check_str=(
++ f"(({RULE_ADMIN_API}) or ({RULE_USER_OR_CLUSTER_USER}))"
++ )
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_member_cluster_user',
++ check_str=(
++ f"({RULE_ADMIN_API}) or (({RULE_PROJECT_MEMBER}) "
++ f"and ({RULE_CLUSTER_USER}))"
++ )
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_member_user_or_cluster_user',
++ check_str=(
++ f"({RULE_ADMIN_API}) or (({RULE_PROJECT_MEMBER}) and "
++ f"({RULE_USER_OR_CLUSTER_USER}))"
++ ),
++ deprecated_rule=DEPRECATED_RULE_ADMIN_OR_USER_OR_CLUSTER_USER
++ ),
++ policy.RuleDefault(
++ name='project_member_deny_cluster_user',
++ check_str=(
++ f"(({RULE_PROJECT_MEMBER}) and ({RULE_DENY_CLUSTER_USER}))"
++ ),
++ deprecated_rule=DEPRECATED_DENY_CLUSTER_USER
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_member_deny_cluster_user',
++ check_str=(
++ f"({RULE_ADMIN_API}) or ({RULE_PROJECT_MEMBER_DENY_CLUSTER_USER})"
++ ),
++ deprecated_rule=DEPRECATED_DENY_CLUSTER_USER
++ ),
++ policy.RuleDefault(
++ name='project_reader_deny_cluster_user',
++ check_str=(
++ f"(({RULE_PROJECT_READER}) and ({RULE_DENY_CLUSTER_USER}))"
++ ),
++ deprecated_rule=DEPRECATED_DENY_CLUSTER_USER
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_reader_deny_cluster_user',
++ check_str=(
++ f"({RULE_ADMIN_API}) or ({RULE_PROJECT_READER_DENY_CLUSTER_USER})"
++ ),
++ deprecated_rule=DEPRECATED_DENY_CLUSTER_USER
++ ),
++ policy.RuleDefault(
++ name='admin_or_project_reader_user_or_cluster_user',
++ check_str=(
++ f"({RULE_ADMIN_API}) or (({RULE_PROJECT_READER}) and "
++ f"({RULE_USER_OR_CLUSTER_USER}))"
++ ),
++ deprecated_rule=DEPRECATED_RULE_ADMIN_OR_USER_OR_CLUSTER_USER
++ ),
+ ]
+
+
+diff --git a/magnum/common/policies/certificate.py b/magnum/common/policies/certificate.py
+index 5e96b64f5b..32a7047a4b 100644
+--- a/magnum/common/policies/certificate.py
++++ b/magnum/common/policies/certificate.py
+@@ -16,13 +16,12 @@
+ from magnum.common.policies import base
+
+ CERTIFICATE = 'certificate:%s'
+-RULE_ADMIN_OR_USER_OR_CLUSTER_USER = base.RULE_ADMIN_OR_USER + " or " + \
+- base.RULE_CLUSTER_USER
+
+ rules = [
+ policy.DocumentedRuleDefault(
+ name=CERTIFICATE % 'create',
+- check_str=RULE_ADMIN_OR_USER_OR_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_USER_OR_CLUSTER_USER,
++ scope_types=["project"],
+ description='Sign a new certificate by the CA.',
+ operations=[
+ {
+@@ -33,7 +32,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CERTIFICATE % 'get',
+- check_str=RULE_ADMIN_OR_USER_OR_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_USER_OR_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve CA information about the given bay/cluster.',
+ operations=[
+ {
+@@ -44,7 +44,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CERTIFICATE % 'rotate_ca',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER,
++ scope_types=["project"],
+ description='Rotate the CA certificate on the given bay/cluster.',
+ operations=[
+ {
+diff --git a/magnum/common/policies/cluster.py b/magnum/common/policies/cluster.py
+index 15b63226b2..5e1864c377 100644
+--- a/magnum/common/policies/cluster.py
++++ b/magnum/common/policies/cluster.py
+@@ -20,7 +20,8 @@
+ rules = [
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'create',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Create a new cluster.',
+ operations=[
+ {
+@@ -31,7 +32,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'delete',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Delete a cluster.',
+ operations=[
+ {
+@@ -53,7 +55,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'detail',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve a list of clusters with detail.',
+ operations=[
+ {
+@@ -75,7 +78,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'get',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve information about the given cluster.',
+ operations=[
+ {
+@@ -98,7 +102,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'get_all',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve a list of clusters.',
+ operations=[
+ {
+@@ -120,7 +125,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'update',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Update an existing cluster.',
+ operations=[
+ {
+@@ -131,7 +137,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'update_health_status',
+- check_str=base.RULE_ADMIN_OR_USER + " or " + base.RULE_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_USER_OR_CLUSTER_USER,
++ scope_types=["project"],
+ description='Update the health status of an existing cluster.',
+ operations=[
+ {
+@@ -153,7 +160,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'resize',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Resize an existing cluster.',
+ operations=[
+ {
+@@ -164,7 +172,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER % 'upgrade',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Upgrade an existing cluster.',
+ operations=[
+ {
+diff --git a/magnum/common/policies/cluster_template.py b/magnum/common/policies/cluster_template.py
+index d9b51737ad..c0d8337051 100644
+--- a/magnum/common/policies/cluster_template.py
++++ b/magnum/common/policies/cluster_template.py
+@@ -20,18 +20,20 @@
+ rules = [
+ policy.DocumentedRuleDefault(
+ name=CLUSTER_TEMPLATE % 'create',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Create a new cluster template.',
+ operations=[
+ {
+ 'path': '/v1/clustertemplates',
+ 'method': 'POST'
+ }
+- ]
++ ],
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER_TEMPLATE % 'delete',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER,
++ scope_types=["project"],
+ description='Delete a cluster template.',
+ operations=[
+ {
+@@ -65,7 +67,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER_TEMPLATE % 'detail',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve a list of cluster templates with detail.',
+ operations=[
+ {
+@@ -76,7 +79,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER_TEMPLATE % 'get',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve information about the given cluster template.',
+ operations=[
+ {
+@@ -99,7 +103,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER_TEMPLATE % 'get_all',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve a list of cluster templates.',
+ operations=[
+ {
+@@ -121,7 +126,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=CLUSTER_TEMPLATE % 'update',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER,
++ scope_types=["project"],
+ description='Update an existing cluster template.',
+ operations=[
+ {
+diff --git a/magnum/common/policies/federation.py b/magnum/common/policies/federation.py
+index b78b1a1b1e..4c347993c3 100644
+--- a/magnum/common/policies/federation.py
++++ b/magnum/common/policies/federation.py
+@@ -20,7 +20,8 @@
+ rules = [
+ policy.DocumentedRuleDefault(
+ name=FEDERATION % 'create',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Create a new federation.',
+ operations=[
+ {
+@@ -31,7 +32,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=FEDERATION % 'delete',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Delete a federation.',
+ operations=[
+ {
+@@ -42,7 +44,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=FEDERATION % 'detail',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve a list of federations with detail.',
+ operations=[
+ {
+@@ -53,7 +56,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=FEDERATION % 'get',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve information about the given federation.',
+ operations=[
+ {
+@@ -64,7 +68,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=FEDERATION % 'get_all',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Retrieve a list of federations.',
+ operations=[
+ {
+@@ -75,7 +80,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=FEDERATION % 'update',
+- check_str=base.RULE_DENY_CLUSTER_USER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER_DENY_CLUSTER_USER,
++ scope_types=["project"],
+ description='Update an existing federation.',
+ operations=[
+ {
+diff --git a/magnum/common/policies/nodegroup.py b/magnum/common/policies/nodegroup.py
+index 64b2d670ea..25bad88579 100644
+--- a/magnum/common/policies/nodegroup.py
++++ b/magnum/common/policies/nodegroup.py
+@@ -24,7 +24,8 @@
+ rules = [
+ policy.DocumentedRuleDefault(
+ name=NODEGROUP % 'get',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER,
++ scope_types=["project"],
+ description='Retrieve information about the given nodegroup.',
+ operations=[
+ {
+@@ -35,7 +36,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=NODEGROUP % 'get_all',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER,
++ scope_types=["project"],
+ description='Retrieve a list of nodegroups that belong to a cluster.',
+ operations=[
+ {
+@@ -68,7 +70,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=NODEGROUP % 'create',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER,
++ scope_types=["project"],
+ description='Create a new nodegroup.',
+ operations=[
+ {
+@@ -79,7 +82,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=NODEGROUP % 'delete',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER,
++ scope_types=["project"],
+ description='Delete a nodegroup.',
+ operations=[
+ {
+@@ -90,7 +94,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=NODEGROUP % 'update',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_MEMBER,
++ scope_types=["project"],
+ description='Update an existing nodegroup.',
+ operations=[
+ {
+diff --git a/magnum/common/policies/quota.py b/magnum/common/policies/quota.py
+index 4baecf7d84..574857b1a4 100644
+--- a/magnum/common/policies/quota.py
++++ b/magnum/common/policies/quota.py
+@@ -42,7 +42,8 @@
+ ),
+ policy.DocumentedRuleDefault(
+ name=QUOTA % 'get',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER,
++ scope_types=["project"],
+ description='Retrieve Quota information for the given project_id.',
+ operations=[
+ {
+diff --git a/magnum/common/policies/stats.py b/magnum/common/policies/stats.py
+index c37164094b..64996443b7 100644
+--- a/magnum/common/policies/stats.py
++++ b/magnum/common/policies/stats.py
+@@ -20,7 +20,8 @@
+ rules = [
+ policy.DocumentedRuleDefault(
+ name=STATS % 'get_all',
+- check_str=base.RULE_ADMIN_OR_OWNER,
++ check_str=base.RULE_ADMIN_OR_PROJECT_READER,
++ scope_types=["project"],
+ description='Retrieve magnum stats.',
+ operations=[
+ {
+diff --git a/magnum/common/policy.py b/magnum/common/policy.py
+index d4bfff77b5..989676efb1 100644
+--- a/magnum/common/policy.py
++++ b/magnum/common/policy.py
+@@ -17,6 +17,7 @@
+
+ import decorator
+ from oslo_config import cfg
++from oslo_log import log as logging
+ from oslo_policy import opts
+ from oslo_policy import policy
+ from oslo_utils import importutils
+@@ -27,6 +28,7 @@
+ from magnum.common import policies
+
+
++LOG = logging.getLogger(__name__)
+ _ENFORCER = None
+ CONF = cfg.CONF
+
+@@ -105,8 +107,14 @@ def enforce(context, rule=None, target=None,
+ target = {'project_id': context.project_id,
+ 'user_id': context.user_id}
+ add_policy_attributes(target)
+- return enforcer.enforce(rule, target, credentials,
+- do_raise=do_raise, exc=exc, *args, **kwargs)
++
++ try:
++ result = enforcer.enforce(rule, target, credentials,
++ do_raise=do_raise, exc=exc, *args, **kwargs)
++ except policy.InvalidScope as ex:
++ LOG.debug(f"Invalide scope while enforce policy :{str(ex)}")
++ raise exc(action=rule)
++ return result
+
+
+ def add_policy_attributes(target):
+diff --git a/magnum/tests/fakes.py b/magnum/tests/fakes.py
+index 4407975306..3a64078ce8 100644
+--- a/magnum/tests/fakes.py
++++ b/magnum/tests/fakes.py
+@@ -25,7 +25,7 @@
+ 'X-Roles': 'role1,role2',
+ 'X-Auth-Url': 'fake_auth_url',
+ 'X-Identity-Status': 'Confirmed',
+- 'X-User-Domain-Name': 'domain',
++ 'X-User-Domain-Name': 'user_domain_name',
+ 'X-Project-Domain-Id': 'project_domain_id',
+ 'X-User-Domain-Id': 'user_domain_id',
+ 'OpenStack-API-Version': 'container-infra 1.0'
+diff --git a/magnum/tests/unit/api/base.py b/magnum/tests/unit/api/base.py
+index a4dd3fef63..ddf41277e4 100644
+--- a/magnum/tests/unit/api/base.py
++++ b/magnum/tests/unit/api/base.py
+@@ -128,6 +128,9 @@ def put_json(self, path, params, expect_errors=False, headers=None,
+ with the request
+ :param status: expected status code of response
+ """
++ # Provide member role for put request
++ if not headers:
++ headers = {"X-Roles": "member"}
+ return self._request_json(path=path, params=params,
+ expect_errors=expect_errors,
+ headers=headers, extra_environ=extra_environ,
+@@ -146,6 +149,9 @@ def post_json(self, path, params, expect_errors=False, headers=None,
+ with the request
+ :param status: expected status code of response
+ """
++ # Provide member role for post request
++ if not headers:
++ headers = {"X-Roles": "member"}
+ return self._request_json(path=path, params=params,
+ expect_errors=expect_errors,
+ headers=headers, extra_environ=extra_environ,
+@@ -164,6 +170,9 @@ def patch_json(self, path, params, expect_errors=False, headers=None,
+ with the request
+ :param status: expected status code of response
+ """
++ # Provide member role for patch request
++ if not headers:
++ headers = {"X-Roles": "member"}
+ return self._request_json(path=path, params=params,
+ expect_errors=expect_errors,
+ headers=headers, extra_environ=extra_environ,
+@@ -184,6 +193,9 @@ def delete(self, path, expect_errors=False, headers=None,
+ """
+ full_path = path_prefix + path
+ print('DELETE: %s' % (full_path))
++ # Provide member role for delete request
++ if not headers:
++ headers = {"X-Roles": "member"}
+ response = self.app.delete(str(full_path),
+ headers=headers,
+ status=status,
+@@ -215,6 +227,10 @@ def get_json(self, path, expect_errors=False, headers=None,
+ 'q.value': [],
+ 'q.op': [],
+ }
++
++ # Provide reader role for get request
++ if not headers:
++ headers = {"X-Roles": "reader"}
+ for query in q:
+ for name in ['field', 'op', 'value']:
+ query_params['q.%s' % name].append(query.get(name, ''))
+diff --git a/magnum/tests/unit/api/controllers/test_root.py b/magnum/tests/unit/api/controllers/test_root.py
+index e187715016..31700761fd 100644
+--- a/magnum/tests/unit/api/controllers/test_root.py
++++ b/magnum/tests/unit/api/controllers/test_root.py
+@@ -140,7 +140,9 @@ def test_noauth(self):
+ response = app.get('/v1/')
+ self.assertEqual(self.v1_expected, response.json)
+
+- response = app.get('/v1/clustertemplates')
++ response = app.get('/v1/clustertemplates',
++ headers={"X-Roles": "reader"}
++ )
+ self.assertEqual(200, response.status_int)
+
+ def test_auth_with_no_public_routes(self):
+diff --git a/magnum/tests/unit/api/controllers/v1/test_certificate.py b/magnum/tests/unit/api/controllers/v1/test_certificate.py
+index 02fcfb40a2..ecd14f0187 100644
+--- a/magnum/tests/unit/api/controllers/v1/test_certificate.py
++++ b/magnum/tests/unit/api/controllers/v1/test_certificate.py
+@@ -21,7 +21,14 @@
+ from magnum.tests.unit.objects import utils as obj_utils
+
+
+-HEADERS = {'OpenStack-API-Version': 'container-infra latest'}
++READER_HEADERS = {
++ 'OpenStack-API-Version': 'container-infra latest',
++ "X-Roles": "reader"
++}
++HEADERS = {
++ 'OpenStack-API-Version': 'container-infra latest',
++ "X-Roles": "member"
++}
+
+
+ class TestCertObject(base.TestCase):
+@@ -59,7 +66,7 @@ def test_get_one(self):
+ self.conductor_api.get_ca_certificate.return_value = mock_cert
+
+ response = self.get_json('/certificates/%s' % self.cluster.uuid,
+- headers=HEADERS)
++ headers=READER_HEADERS)
+
+ self.assertEqual(self.cluster.uuid, response['cluster_uuid'])
+ # check that bay is still valid as well
+@@ -74,7 +81,7 @@ def test_get_one_by_name(self):
+ self.conductor_api.get_ca_certificate.return_value = mock_cert
+
+ response = self.get_json('/certificates/%s' % self.cluster.name,
+- headers=HEADERS)
++ headers=READER_HEADERS)
+
+ self.assertEqual(self.cluster.uuid, response['cluster_uuid'])
+ # check that bay is still valid as well
+@@ -84,7 +91,8 @@ def test_get_one_by_name(self):
+
+ def test_get_one_by_name_not_found(self):
+ response = self.get_json('/certificates/not_found',
+- expect_errors=True, headers=HEADERS)
++ expect_errors=True,
++ headers=READER_HEADERS)
+
+ self.assertEqual(404, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+@@ -97,7 +105,8 @@ def test_get_one_by_name_multiple_cluster(self):
+ uuid=uuidutils.generate_uuid())
+
+ response = self.get_json('/certificates/test_cluster',
+- expect_errors=True, headers=HEADERS)
++ expect_errors=True,
++ headers=READER_HEADERS)
+
+ self.assertEqual(409, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+@@ -110,7 +119,7 @@ def test_links(self):
+ self.conductor_api.get_ca_certificate.return_value = mock_cert
+
+ response = self.get_json('/certificates/%s' % self.cluster.uuid,
+- headers=HEADERS)
++ headers=READER_HEADERS)
+
+ self.assertIn('links', response.keys())
+ self.assertEqual(2, len(response['links']))
+@@ -265,7 +274,7 @@ def test_policy_disallow_get_one(self):
+ self._common_policy_check(
+ "certificate:get", self.get_json,
+ '/certificates/%s' % cluster.uuid,
+- expect_errors=True, headers=HEADERS)
++ expect_errors=True, headers=READER_HEADERS)
+
+ def test_policy_disallow_create(self):
+ cluster = obj_utils.create_test_cluster(self.context)
+diff --git a/magnum/tests/unit/api/controllers/v1/test_cluster.py b/magnum/tests/unit/api/controllers/v1/test_cluster.py
+index 016f8cc173..9ff2439f36 100755
+--- a/magnum/tests/unit/api/controllers/v1/test_cluster.py
++++ b/magnum/tests/unit/api/controllers/v1/test_cluster.py
+@@ -494,7 +494,9 @@ def test_update_cluster_with_rollback_enabled(self):
+ '/clusters/%s/?rollback=True' % self.cluster_obj.uuid,
+ [{'path': '/node_count', 'value': node_count,
+ 'op': 'replace'}],
+- headers={'OpenStack-API-Version': 'container-infra 1.3'})
++ headers={'OpenStack-API-Version': 'container-infra 1.3',
++ "X-Roles": "member"
++ })
+
+ self.mock_cluster_update.assert_called_once_with(
+ mock.ANY, node_count, self.cluster_obj.health_status,
+@@ -507,7 +509,9 @@ def test_update_cluster_with_rollback_disabled(self):
+ '/clusters/%s/?rollback=False' % self.cluster_obj.uuid,
+ [{'path': '/node_count', 'value': node_count,
+ 'op': 'replace'}],
+- headers={'OpenStack-API-Version': 'container-infra 1.3'})
++ headers={'OpenStack-API-Version': 'container-infra 1.3',
++ "X-Roles": "member"
++ })
+
+ self.mock_cluster_update.assert_called_once_with(
+ mock.ANY, node_count, self.cluster_obj.health_status,
+@@ -520,7 +524,9 @@ def test_update_cluster_with_zero_node_count_fail(self):
+ '/clusters/%s' % self.cluster_obj.uuid,
+ [{'path': '/node_count', 'value': node_count,
+ 'op': 'replace'}],
+- headers={'OpenStack-API-Version': 'container-infra 1.9'},
++ headers={'OpenStack-API-Version': 'container-infra 1.9',
++ "X-Roles": "member"
++ },
+ expect_errors=True)
+
+ self.assertEqual(400, response.status_code)
+@@ -531,7 +537,9 @@ def test_update_cluster_with_zero_node_count(self):
+ '/clusters/%s' % self.cluster_obj.uuid,
+ [{'path': '/node_count', 'value': node_count,
+ 'op': 'replace'}],
+- headers={'OpenStack-API-Version': 'container-infra 1.10'})
++ headers={'OpenStack-API-Version': 'container-infra 1.10',
++ "X-Roles": "member"
++ })
+
+ self.mock_cluster_update.assert_called_once_with(
+ mock.ANY, node_count, self.cluster_obj.health_status,
+@@ -708,18 +716,24 @@ def test_create_cluster_with_cluster_template_name(self):
+ def test_create_cluster_with_zero_node_count_fail(self):
+ bdict = apiutils.cluster_post_data()
+ bdict['node_count'] = 0
+- response = self.post_json('/clusters', bdict, expect_errors=True,
+- headers={"Openstack-Api-Version":
+- "container-infra 1.9"})
++ response = self.post_json(
++ '/clusters', bdict, expect_errors=True,
++ headers={
++ "Openstack-Api-Version": "container-infra 1.9",
++ "X-Roles": "member"
++ })
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(400, response.status_int)
+
+ def test_create_cluster_with_zero_node_count(self):
+ bdict = apiutils.cluster_post_data()
+ bdict['node_count'] = 0
+- response = self.post_json('/clusters', bdict,
+- headers={"Openstack-Api-Version":
+- "container-infra 1.10"})
++ response = self.post_json(
++ '/clusters', bdict,
++ headers={
++ "Openstack-Api-Version": "container-infra 1.10",
++ "X-Roles": "member"
++ })
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(202, response.status_int)
+
+diff --git a/magnum/tests/unit/api/controllers/v1/test_cluster_actions.py b/magnum/tests/unit/api/controllers/v1/test_cluster_actions.py
+index ba9304fe1b..22baf556ce 100644
+--- a/magnum/tests/unit/api/controllers/v1/test_cluster_actions.py
++++ b/magnum/tests/unit/api/controllers/v1/test_cluster_actions.py
+@@ -46,7 +46,8 @@ def test_resize(self):
+ self.cluster_obj.uuid,
+ {"node_count": new_node_count},
+ headers={"Openstack-Api-Version":
+- "container-infra 1.7"})
++ "container-infra 1.7",
++ "X-Roles": "member"})
+ self.assertEqual(202, response.status_code)
+
+ response = self.get_json('/clusters/%s' % self.cluster_obj.uuid)
+@@ -69,7 +70,8 @@ def test_resize_with_nodegroup(self):
+ self.cluster_obj.uuid,
+ cluster_resize_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"})
++ "container-infra 1.9",
++ "X-Roles": "member"})
+ self.assertEqual(202, response.status_code)
+
+ response = self.get_json('/clusters/%s' % self.cluster_obj.uuid)
+@@ -89,7 +91,8 @@ def test_resize_with_master_nodegroup(self):
+ self.cluster_obj.uuid,
+ cluster_resize_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"},
++ "container-infra 1.9",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(400, response.status_code)
+
+@@ -106,7 +109,8 @@ def test_resize_with_node_count_greater_than_max(self):
+ self.cluster_obj.uuid,
+ cluster_resize_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"},
++ "container-infra 1.9",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(400, response.status_code)
+
+@@ -123,7 +127,8 @@ def test_resize_with_node_count_less_than_min(self):
+ self.cluster_obj.uuid,
+ cluster_resize_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"},
++ "container-infra 1.9",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(400, response.status_code)
+
+@@ -140,7 +145,8 @@ def test_resize_with_zero_node_count_fail(self):
+ self.cluster_obj.uuid,
+ cluster_resize_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"},
++ "container-infra 1.9",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(400, response.status_code)
+
+@@ -157,7 +163,8 @@ def test_resize_with_zero_node_count(self):
+ self.cluster_obj.uuid,
+ cluster_resize_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.10"})
++ "container-infra 1.10",
++ "X-Roles": "member"})
+ self.assertEqual(202, response.status_code)
+
+
+@@ -195,7 +202,8 @@ def test_upgrade(self):
+ self.cluster_obj.uuid,
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.8"})
++ "container-infra 1.8",
++ "X-Roles": "member"})
+ self.assertEqual(202, response.status_code)
+
+ def test_upgrade_cluster_as_admin(self):
+@@ -226,7 +234,8 @@ def test_upgrade_cluster_as_admin(self):
+ '/clusters/%s/actions/upgrade' %
+ cluster_uuid,
+ cluster_upgrade_req,
+- headers={"Openstack-Api-Version": "container-infra 1.8"})
++ headers={"Openstack-Api-Version": "container-infra 1.8",
++ "X-Roles": "member"})
+
+ self.assertEqual(202, response.status_int)
+
+@@ -239,7 +248,8 @@ def test_upgrade_default_worker(self):
+ self.cluster_obj.uuid,
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"})
++ "container-infra 1.9",
++ "X-Roles": "member"})
+ self.assertEqual(202, response.status_code)
+
+ def test_upgrade_default_master(self):
+@@ -251,7 +261,8 @@ def test_upgrade_default_master(self):
+ self.cluster_obj.uuid,
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"})
++ "container-infra 1.9",
++ "X-Roles": "member"})
+ self.assertEqual(202, response.status_code)
+
+ def test_upgrade_non_default_ng(self):
+@@ -263,7 +274,8 @@ def test_upgrade_non_default_ng(self):
+ self.cluster_obj.uuid,
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"})
++ "container-infra 1.9",
++ "X-Roles": "member"})
+ self.assertEqual(202, response.status_code)
+
+ def test_upgrade_cluster_not_found(self):
+@@ -273,7 +285,8 @@ def test_upgrade_cluster_not_found(self):
+ response = self.post_json('/clusters/not_there/actions/upgrade',
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.8"},
++ "container-infra 1.8",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(404, response.status_code)
+
+@@ -285,7 +298,8 @@ def test_upgrade_ct_not_found(self):
+ self.cluster_obj.uuid,
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.8"},
++ "container-infra 1.8",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(404, response.status_code)
+
+@@ -298,7 +312,8 @@ def test_upgrade_ng_not_found(self):
+ self.cluster_obj.uuid,
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"},
++ "container-infra 1.9",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(404, response.status_code)
+
+@@ -311,6 +326,7 @@ def test_upgrade_non_default_ng_invalid_ct(self):
+ self.cluster_obj.uuid,
+ cluster_upgrade_req,
+ headers={"Openstack-Api-Version":
+- "container-infra 1.9"},
++ "container-infra 1.9",
++ "X-Roles": "member"},
+ expect_errors=True)
+ self.assertEqual(409, response.status_code)
+diff --git a/magnum/tests/unit/api/controllers/v1/test_nodegroup.py b/magnum/tests/unit/api/controllers/v1/test_nodegroup.py
+index a6f73d54b2..68304a10f6 100644
+--- a/magnum/tests/unit/api/controllers/v1/test_nodegroup.py
++++ b/magnum/tests/unit/api/controllers/v1/test_nodegroup.py
+@@ -47,24 +47,26 @@ def test_nodegroup_init(self):
+ class NodeGroupControllerTest(api_base.FunctionalTest):
+ headers = {"Openstack-Api-Version": "container-infra latest"}
+
+- def _add_headers(self, kwargs):
++ def _add_headers(self, kwargs, roles=None):
+ if 'headers' not in kwargs:
+ kwargs['headers'] = self.headers
++ if roles:
++ kwargs['headers']['X-Roles'] = ",".join(roles)
+
+ def get_json(self, *args, **kwargs):
+- self._add_headers(kwargs)
++ self._add_headers(kwargs, roles=['reader'])
+ return super(NodeGroupControllerTest, self).get_json(*args, **kwargs)
+
+ def post_json(self, *args, **kwargs):
+- self._add_headers(kwargs)
++ self._add_headers(kwargs, roles=['member'])
+ return super(NodeGroupControllerTest, self).post_json(*args, **kwargs)
+
+ def delete(self, *args, **kwargs):
+- self._add_headers(kwargs)
++ self._add_headers(kwargs, roles=['member'])
+ return super(NodeGroupControllerTest, self).delete(*args, **kwargs)
+
+ def patch_json(self, *args, **kwargs):
+- self._add_headers(kwargs)
++ self._add_headers(kwargs, roles=['member'])
+ return super(NodeGroupControllerTest, self).patch_json(*args, **kwargs)
+
+
+diff --git a/magnum/tests/unit/api/controllers/v1/test_quota.py b/magnum/tests/unit/api/controllers/v1/test_quota.py
+index b6b47c481a..07e78857ed 100644
+--- a/magnum/tests/unit/api/controllers/v1/test_quota.py
++++ b/magnum/tests/unit/api/controllers/v1/test_quota.py
+@@ -207,7 +207,7 @@ def test_get_all_non_admin(self, mock_policy):
+ project_id="proj-id-"+str(i))
+ quota_list.append(quota)
+
+- headers = {'X-Project-Id': 'proj-id-2'}
++ headers = {'X-Project-Id': 'proj-id-2', "X-Roles": "member"}
+ response = self.get_json('/quotas', headers=headers)
+ self.assertEqual(1, len(response['quotas']))
+ self.assertEqual('proj-id-2', response['quotas'][0]['project_id'])
+diff --git a/magnum/tests/unit/api/controllers/v1/test_stats.py b/magnum/tests/unit/api/controllers/v1/test_stats.py
+index bb7aac28f4..2e41222d34 100644
+--- a/magnum/tests/unit/api/controllers/v1/test_stats.py
++++ b/magnum/tests/unit/api/controllers/v1/test_stats.py
+@@ -21,7 +21,14 @@
+ class TestStatsController(api_base.FunctionalTest):
+
+ def setUp(self):
+- self.base_headers = {'OpenStack-API-Version': 'container-infra 1.4'}
++ self.base_headers = {
++ "X-Roles": "reader",
++ "OpenStack-API-Version": "container-infra 1.4"
++ }
++ self.base_admin_headers = {
++ "X-Roles": "admin",
++ "OpenStack-API-Version": "container-infra 1.4"
++ }
+ super(TestStatsController, self).setUp()
+ obj_utils.create_test_cluster_template(self.context)
+
+@@ -39,7 +46,7 @@ def test_admin_get_all_stats(self, mock_context, mock_policy):
+ obj_utils.create_test_cluster(self.context,
+ project_id=234,
+ uuid='uuid2')
+- response = self.get_json('/stats', headers=self.base_headers)
++ response = self.get_json('/stats', headers=self.base_admin_headers)
+ expected = {u'clusters': 2, u'nodes': 12}
+ self.assertEqual(expected, response)
+
+@@ -54,7 +61,7 @@ def test_admin_get_tenant_stats(self, mock_context, mock_policy):
+ uuid='uuid2')
+ self.context.is_admin = True
+ response = self.get_json('/stats?project_id=234',
+- headers=self.base_headers)
++ headers=self.base_admin_headers)
+ expected = {u'clusters': 1, u'nodes': 6}
+ self.assertEqual(expected, response)
+
+@@ -69,7 +76,7 @@ def test_admin_get_invalid_tenant_stats(self, mock_context, mock_policy):
+ uuid='uuid2')
+ self.context.is_admin = True
+ response = self.get_json('/stats?project_id=34',
+- headers=self.base_headers)
++ headers=self.base_admin_headers)
+ expected = {u'clusters': 0, u'nodes': 0}
+ self.assertEqual(expected, response)
+
+diff --git a/magnum/tests/unit/api/test_hooks.py b/magnum/tests/unit/api/test_hooks.py
+index 9332c93120..3cbfde4363 100644
+--- a/magnum/tests/unit/api/test_hooks.py
++++ b/magnum/tests/unit/api/test_hooks.py
+@@ -34,7 +34,8 @@ def setUp(self):
+ super(TestContextHook, self).setUp()
+ self.app = fakes.FakeApp()
+
+- def test_context_hook_before_method(self):
++ @mock.patch("magnum.common.policy.check_is_admin")
++ def test_context_hook_before_method(self, m_c):
+ state = mock.Mock(request=fakes.FakePecanRequest())
+ hook = hooks.ContextHook()
+ hook.before(state)
+@@ -51,12 +52,13 @@ def test_context_hook_before_method(self):
+ self.assertEqual(fakes.fakeAuthTokenHeaders['X-Roles'],
+ ','.join(ctx.roles))
+ self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Name'],
+- ctx.domain_name)
++ ctx.user_domain_name)
+ self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Id'],
+- ctx.domain_id)
++ ctx.user_domain_id)
+ self.assertIsNone(ctx.auth_token_info)
+
+- def test_context_hook_before_method_auth_info(self):
++ @mock.patch("magnum.common.policy.check_is_admin")
++ def test_context_hook_before_method_auth_info(self, c_m):
+ state = mock.Mock(request=fakes.FakePecanRequest())
+ state.request.environ['keystone.token_info'] = 'assert_this'
+ hook = hooks.ContextHook()
+diff --git a/magnum/tests/unit/common/policies/__init__.py b/magnum/tests/unit/common/policies/__init__.py
+new file mode 100644
+index 0000000000..e69de29bb2
+diff --git a/magnum/tests/unit/common/policies/base.py b/magnum/tests/unit/common/policies/base.py
+new file mode 100644
+index 0000000000..22572c0a46
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/base.py
+@@ -0,0 +1,37 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from oslo_config import cfg
++
++from magnum.tests.unit.api import base as api_base
++
++
++CONF = cfg.CONF
++
++
++class PolicyFunctionalTest(api_base.FunctionalTest):
++ def setUp(self):
++ super(PolicyFunctionalTest, self).setUp()
++ CONF.set_override('enforce_scope', True, group='oslo_policy')
++ CONF.set_override('enforce_new_defaults', True, group='oslo_policy')
++ self.reader_headers = {
++ "X-Roles": "reader",
++ }
++ self.member_headers = {
++ "X-Roles": "member",
++ }
++ self.admin_headers = {
++ "X-Roles": "admin",
++ }
++ self.foo_headers = {
++ "X-Roles": "foo",
++ }
+diff --git a/magnum/tests/unit/common/policies/test_certificate_policy.py b/magnum/tests/unit/common/policies/test_certificate_policy.py
+new file mode 100644
+index 0000000000..cc53a71645
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_certificate_policy.py
+@@ -0,0 +1,72 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from unittest import mock
++from webtest.app import AppError
++
++from magnum.tests.unit.api import utils as apiutils
++from magnum.tests.unit.common.policies import base
++from magnum.tests.unit.objects import utils as obj_utils
++
++READER_HEADERS = {
++ 'OpenStack-API-Version': 'container-infra latest',
++ "X-Roles": "reader"
++}
++HEADERS = {
++ 'OpenStack-API-Version': 'container-infra latest',
++ "X-Roles": "member"
++}
++
++
++class TestCertifiactePolicy(base.PolicyFunctionalTest):
++ def setUp(self):
++ super(TestCertifiactePolicy, self).setUp()
++ self.cluster = obj_utils.create_test_cluster(self.context)
++
++ conductor_api_patcher = mock.patch('magnum.conductor.api.API')
++ self.conductor_api_class = conductor_api_patcher.start()
++ self.conductor_api = mock.MagicMock()
++ self.conductor_api_class.return_value = self.conductor_api
++ self.addCleanup(conductor_api_patcher.stop)
++
++ self.conductor_api.sign_certificate.side_effect = self._fake_sign
++
++ @staticmethod
++ def _fake_sign(cluster, cert):
++ cert.pem = 'fake-pem'
++ return cert
++
++ def test_get_no_permission(self):
++ exc = self.assertRaises(
++ AppError,
++ self.get_json,
++ f"/certificates/{self.cluster.uuid}",
++ headers=HEADERS)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_create_no_permission(self):
++ new_cert = apiutils.cert_post_data(cluster_uuid=self.cluster.uuid)
++ del new_cert['pem']
++
++ exc = self.assertRaises(
++ AppError, self.post_json,
++ '/certificates', new_cert,
++ headers=READER_HEADERS)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_update_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.patch_json,
++ f"/certificates/{self.cluster.uuid}", {},
++ headers=READER_HEADERS
++ )
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/policies/test_cluster_policy.py b/magnum/tests/unit/common/policies/test_cluster_policy.py
+new file mode 100644
+index 0000000000..01cfd25c5c
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_cluster_policy.py
+@@ -0,0 +1,65 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from webtest.app import AppError
++
++from magnum.tests.unit.api import utils as apiutils
++from magnum.tests.unit.common.policies import base
++from magnum.tests.unit.objects import utils as obj_utils
++
++
++class TestClusterPolicy(base.PolicyFunctionalTest):
++ def setUp(self):
++ super(TestClusterPolicy, self).setUp()
++ self.cluster = obj_utils.create_test_cluster(
++ self.context, name='cluster_example_A', node_count=3
++ )
++
++ def test_get_all_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.get_json, '/clusters',
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_get_no_permission(self):
++ exc = self.assertRaises(
++ AppError,
++ self.get_json,
++ f"/clusters/{self.cluster.uuid}",
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_create_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.post_json,
++ '/clusters', apiutils.cluster_post_data(),
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_update_no_permission(self):
++ cluster_dict = [
++ {'path': '/node_count', 'value': 4, 'op': 'replace'}
++ ]
++ exc = self.assertRaises(
++ AppError, self.patch_json,
++ f"/clusters/{self.cluster.name}", cluster_dict,
++ headers=self.reader_headers
++ )
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_delete_no_permission(self):
++ # delete cluster
++ exc = self.assertRaises(
++ AppError, self.delete, f"/clusters/{self.cluster.uuid}",
++ headers=self.reader_headers
++ )
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/policies/test_cluster_template_policy.py b/magnum/tests/unit/common/policies/test_cluster_template_policy.py
+new file mode 100644
+index 0000000000..c6eb9b60a6
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_cluster_template_policy.py
+@@ -0,0 +1,74 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from webtest.app import AppError
++
++from magnum.tests.unit.api import utils as apiutils
++from magnum.tests.unit.common.policies import base
++from magnum.tests.unit.objects import utils as obj_utils
++
++
++class TestClusterTemplatePolicy(base.PolicyFunctionalTest):
++ def setUp(self):
++ super(TestClusterTemplatePolicy, self).setUp()
++ self.clustertemplate = obj_utils.create_test_cluster_template(
++ self.context
++ )
++
++ def test_get_all_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.get_json, '/clustertemplates',
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_get_detail_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.get_json,
++ '/clustertemplates/detail',
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_get_no_permission(self):
++ exc = self.assertRaises(
++ AppError,
++ self.get_json,
++ f"/clustertemplates/{self.clustertemplate.uuid}",
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_create_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.post_json,
++ '/clustertemplates',
++ apiutils.cluster_template_post_data(),
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_update_no_permission(self):
++ clustertemplate_data = [
++ {'path': '/dns_nameserver', 'op': 'remove'}]
++ exc = self.assertRaises(
++ AppError,
++ self.patch_json,
++ f"/clustertemplates/{self.clustertemplate.uuid}",
++ clustertemplate_data,
++ headers=self.reader_headers
++ )
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_delete_no_permission(self):
++ # delete clustertemplate
++ exc = self.assertRaises(
++ AppError, self.delete,
++ f"/clustertemplates/{self.clustertemplate.uuid}",
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/policies/test_federation_policy.py b/magnum/tests/unit/common/policies/test_federation_policy.py
+new file mode 100644
+index 0000000000..68eb1d6212
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_federation_policy.py
+@@ -0,0 +1,67 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from oslo_utils import uuidutils
++from webtest.app import AppError
++
++from magnum.tests.unit.common.policies import base
++from magnum.tests.unit.objects import utils as obj_utils
++
++
++class TestFederationPolicy(base.PolicyFunctionalTest):
++ def setUp(self):
++ super(TestFederationPolicy, self).setUp()
++ self.create_frederation()
++
++ def create_frederation(self):
++ self.fake_uuid = uuidutils.generate_uuid()
++ self.federation = obj_utils.create_test_federation(
++ self.context, uuid=self.fake_uuid)
++
++ def test_get_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.get_json, '/federations',
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_get_reader(self):
++ response = self.get_json('/federations')
++ self.assertEqual(self.fake_uuid, response['federations'][0]['uuid'])
++
++ def test_create_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.post_json, '/federations', {},
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_update_no_permission(self):
++ new_member = obj_utils.create_test_cluster(self.context)
++ exc = self.assertRaises(
++ AppError, self.patch_json, '/federations/%s' % self.fake_uuid,
++ [{'path': '/member_ids', 'value': new_member.uuid, 'op': 'add'}],
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_delete_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.delete,
++ '/federations/%s' % self.fake_uuid,
++ headers=self.reader_headers
++ )
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_detail_list_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.get_json,
++ '/federations/detail',
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/policies/test_magnum_service_policy.py b/magnum/tests/unit/common/policies/test_magnum_service_policy.py
+new file mode 100644
+index 0000000000..9f8153d3a4
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_magnum_service_policy.py
+@@ -0,0 +1,26 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from webtest.app import AppError
++
++from magnum.tests.unit.common.policies import base
++
++
++class TestMagnumServicePolicy(base.PolicyFunctionalTest):
++ def setUp(self):
++ super(TestMagnumServicePolicy, self).setUp()
++
++ def test_get_all_no_permission(self):
++ exc = self.assertRaises(AppError,
++ self.get_json, "/mservices",
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/policies/test_nodegroup_policy.py b/magnum/tests/unit/common/policies/test_nodegroup_policy.py
+new file mode 100644
+index 0000000000..73f3e107e4
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_nodegroup_policy.py
+@@ -0,0 +1,74 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from oslo_utils import uuidutils
++from webtest.app import AppError
++
++from magnum import objects
++from magnum.tests.unit.api import utils as apiutils
++from magnum.tests.unit.common.policies import base
++from magnum.tests.unit.objects import utils as obj_utils
++
++
++class TestNodeGroupPolicy(base.PolicyFunctionalTest):
++ def setUp(self):
++ super(TestNodeGroupPolicy, self).setUp()
++ obj_utils.create_test_cluster_template(self.context)
++ self.cluster_uuid = uuidutils.generate_uuid()
++ obj_utils.create_test_cluster(
++ self.context, uuid=self.cluster_uuid)
++ self.cluster = objects.Cluster.get_by_uuid(self.context,
++ self.cluster_uuid)
++ self.nodegroup = obj_utils.create_test_nodegroup(
++ self.context, cluster_id=self.cluster.uuid, is_default=False)
++ self.url = f"/clusters/{self.cluster.uuid}/nodegroups/"
++ self.member = {"Openstack-Api-Version": "container-infra latest"}
++ self.member.update(self.member_headers)
++ self.reader = {"Openstack-Api-Version": "container-infra latest"}
++ self.reader.update(self.reader_headers)
++
++ def test_get_all_no_permission(self):
++ exc = self.assertRaises(AppError,
++ self.get_json, self.url,
++ headers=self.member)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_get_no_permission(self):
++ exc = self.assertRaises(
++ AppError,
++ self.get_json,
++ f"{self.url}foo",
++ headers=self.member)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_create_no_permission(self):
++ exc = self.assertRaises(AppError,
++ self.post_json, self.url,
++ apiutils.nodegroup_post_data(),
++ headers=self.reader)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_update_no_permission(self):
++ ng_dict = [
++ {'path': '/max_node_count', 'value': 4, 'op': 'replace'}]
++ exc = self.assertRaises(
++ AppError, self.patch_json,
++ self.url + self.nodegroup.uuid, ng_dict,
++ headers=self.reader)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_delete_no_permission(self):
++ # delete cluster
++ exc = self.assertRaises(
++ AppError, self.delete, self.url + self.nodegroup.uuid,
++ headers=self.reader)
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/policies/test_quota_policy.py b/magnum/tests/unit/common/policies/test_quota_policy.py
+new file mode 100644
+index 0000000000..48d4a09c2c
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_quota_policy.py
+@@ -0,0 +1,74 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from unittest import mock
++from webtest.app import AppError
++
++from magnum.common import clients
++from magnum.tests.unit.api import utils as apiutils
++from magnum.tests.unit.common.policies import base
++from magnum.tests.unit.objects import utils as obj_utils
++
++
++class TestQuotaPolicy(base.PolicyFunctionalTest):
++ def setUp(self):
++ super(TestQuotaPolicy, self).setUp()
++
++ def test_get_all_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.get_json, '/quotas',
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ def test_get_no_permission(self):
++ quota = obj_utils.create_test_quota(self.context)
++ exc = self.assertRaises(
++ AppError,
++ self.get_json,
++ f"/quotas/{quota['project_id']}/{quota['resource']}",
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ @mock.patch.object(clients.OpenStackClients, 'keystone')
++ def test_create_no_permission(self, mock_keystone):
++ exc = self.assertRaises(
++ AppError, self.post_json,
++ '/quotas', apiutils.quota_post_data(),
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ @mock.patch.object(clients.OpenStackClients, 'keystone')
++ def test_update_no_permission(self, mock_keystone):
++ with mock.patch("magnum.common.policy.enforce"):
++ quota_dict = apiutils.quota_post_data(hard_limit=5)
++ self.post_json('/quotas', quota_dict)
++ quota_dict['hard_limit'] = 20
++ exc = self.assertRaises(
++ AppError, self.patch_json, '/quotas', quota_dict,
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
++
++ @mock.patch.object(clients.OpenStackClients, 'keystone')
++ def test_delete_no_permission(self, mock_keystone):
++ with mock.patch("magnum.common.policy.enforce"):
++ quota_dict = apiutils.quota_post_data()
++ response = self.post_json('/quotas', quota_dict)
++ self.assertEqual('application/json', response.content_type)
++ self.assertEqual(201, response.status_int)
++
++ project_id = quota_dict['project_id']
++ resource = quota_dict['resource']
++ # delete quota
++ exc = self.assertRaises(
++ AppError, self.delete, f"/quotas/{project_id}/{resource}",
++ headers=self.reader_headers)
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/policies/test_stats_policy.py b/magnum/tests/unit/common/policies/test_stats_policy.py
+new file mode 100644
+index 0000000000..20cf1bee5c
+--- /dev/null
++++ b/magnum/tests/unit/common/policies/test_stats_policy.py
+@@ -0,0 +1,33 @@
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++
++from webtest.app import AppError
++
++from magnum.tests.unit.common.policies import base
++
++
++class TestStatsPolicy(base.PolicyFunctionalTest):
++ def test_stat_reader(self):
++ response = self.get_json('/stats', headers=self.reader_headers)
++ expected = {u'clusters': 0, u'nodes': 0}
++ self.assertEqual(expected, response)
++
++ def test_stat_admin(self):
++ response = self.get_json('/stats', headers=self.admin_headers)
++ expected = {u'clusters': 0, u'nodes': 0}
++ self.assertEqual(expected, response)
++
++ def test_stat_no_permission(self):
++ exc = self.assertRaises(
++ AppError, self.get_json, '/stats',
++ headers=self.member_headers)
++ self.assertIn("403 Forbidden", str(exc))
+diff --git a/magnum/tests/unit/common/test_context.py b/magnum/tests/unit/common/test_context.py
+index c72c2c763d..aed4d33ebd 100644
+--- a/magnum/tests/unit/common/test_context.py
++++ b/magnum/tests/unit/common/test_context.py
+@@ -19,29 +19,30 @@
+ class ContextTestCase(base.TestCase):
+
+ def _create_context(self, roles=None):
+- return magnum_context.RequestContext(auth_token='auth_token1',
+- auth_url='auth_url1',
+- domain_id='domain_id1',
+- domain_name='domain_name1',
+- user_name='user1',
+- user_id='user-id1',
+- project_name='tenant1',
+- project_id='tenant-id1',
+- roles=roles,
+- is_admin=True,
+- read_only=True,
+- show_deleted=True,
+- request_id='request_id1',
+- trust_id='trust_id1',
+- auth_token_info='token_info1')
++ return magnum_context.RequestContext(
++ auth_token='auth_token1',
++ auth_url='auth_url1',
++ user_domain_id='user_domain_id1',
++ user_domain_name='user_domain_name1',
++ user_name='user1',
++ user_id='user-id1',
++ project_name='tenant1',
++ project_id='tenant-id1',
++ roles=roles,
++ is_admin=True,
++ read_only=True,
++ show_deleted=True,
++ request_id='request_id1',
++ trust_id='trust_id1',
++ auth_token_info='token_info1')
+
+ def test_context(self):
+ ctx = self._create_context()
+
+ self.assertEqual("auth_token1", ctx.auth_token)
+ self.assertEqual("auth_url1", ctx.auth_url)
+- self.assertEqual("domain_id1", ctx.domain_id)
+- self.assertEqual("domain_name1", ctx.domain_name)
++ self.assertEqual("user_domain_id1", ctx.user_domain_id)
++ self.assertEqual("user_domain_name1", ctx.user_domain_name)
+ self.assertEqual("user1", ctx.user_name)
+ self.assertEqual("user-id1", ctx.user_id)
+ self.assertEqual("tenant1", ctx.project_name)
+@@ -59,8 +60,8 @@ def test_context_with_roles(self):
+
+ self.assertEqual("auth_token1", ctx.auth_token)
+ self.assertEqual("auth_url1", ctx.auth_url)
+- self.assertEqual("domain_id1", ctx.domain_id)
+- self.assertEqual("domain_name1", ctx.domain_name)
++ self.assertEqual("user_domain_id1", ctx.user_domain_id)
++ self.assertEqual("user_domain_name1", ctx.user_domain_name)
+ self.assertEqual("user1", ctx.user_name)
+ self.assertEqual("user-id1", ctx.user_id)
+ self.assertEqual("tenant1", ctx.project_name)
+@@ -80,8 +81,8 @@ def test_to_dict_from_dict(self):
+
+ self.assertEqual(ctx.auth_token, ctx2.auth_token)
+ self.assertEqual(ctx.auth_url, ctx2.auth_url)
+- self.assertEqual(ctx.domain_id, ctx2.domain_id)
+- self.assertEqual(ctx.domain_name, ctx2.domain_name)
++ self.assertEqual(ctx.user_domain_id, ctx2.user_domain_id)
++ self.assertEqual(ctx.user_domain_name, ctx2.user_domain_name)
+ self.assertEqual(ctx.user_name, ctx2.user_name)
+ self.assertEqual(ctx.user_id, ctx2.user_id)
+ self.assertEqual(ctx.project_id, ctx2.project_id)
+diff --git a/releasenotes/notes/allow_admin_perform_acitons-cc988655bb72b3f3.yaml b/releasenotes/notes/allow_admin_perform_acitons-cc988655bb72b3f3.yaml
+new file mode 100644
+index 0000000000..6cb516451c
+--- /dev/null
++++ b/releasenotes/notes/allow_admin_perform_acitons-cc988655bb72b3f3.yaml
+@@ -0,0 +1,9 @@
++---
++upgrade:
++ - |
++ To make sure better have backward compatibility,
++ we set specific rule to allow admin perform all actions.
++ This will apply on part of APIs in
++ * Cluster
++ * Cluster Template
++ * federation
+diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml
+new file mode 100644
+index 0000000000..69b9fec5eb
+--- /dev/null
++++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml
+@@ -0,0 +1,13 @@
++---
++upgrade:
++ - |
++ The Magnum service now allows enables policies (RBAC) new defaults
++ and scope checks. These are controlled by the following (default) config
++ options in ``magnum.conf`` file::
++
++ [oslo_policy]
++ enforce_new_defaults=False
++ enforce_scope=False
++
++ We will change the default to True in the following cycle.
++ If you want to enable them then modify both values to True.