blob: 3f7b10cbad65b7bace7bd606223bb30955119e40 [file] [log] [blame] [edit]
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
# -- Image
repository: rook/ceph
# -- Image tag
# @default -- `master`
tag: v1.10.10
# -- Image pull policy
pullPolicy: IfNotPresent
crds:
# -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
# managed independently with deploy/examples/crds.yaml.
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
# If the CRDs are deleted in this case, see
# [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
# to restore them.
enabled: true
# -- Pod resource requests & limits
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
nodeSelector: {}
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
tolerations: []
# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
# the Kubernetes default of 5 minutes
unreachableNodeTolerationSeconds: 5
# -- Whether the operator should watch cluster CRD in its own namespace or not
currentNamespaceOnly: false
# -- Pod annotations
annotations: {}
# -- Global log level for the operator.
# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
logLevel: INFO
# -- If true, create & use RBAC resources
rbacEnable: true
# -- If true, create & use PSP resources
pspEnable: false
# -- Set the priority class for the rook operator deployment if desired
priorityClassName:
# -- If true, loop devices are allowed to be used for osds in test clusters
allowLoopDevices: false
# Settings for whether to disable the drivers or other daemons if they are not
# needed
csi:
# -- Enable Ceph CSI RBD driver
enableRbdDriver: true
# -- Enable Ceph CSI CephFS driver
enableCephfsDriver: true
# -- Enable Ceph CSI GRPC Metrics
enableGrpcMetrics: false
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance
enableCSIHostNetwork: true
# -- Enable Snapshotter in CephFS provisioner pod
enableCephfsSnapshotter: true
# -- Enable Snapshotter in NFS provisioner pod
enableNFSSnapshotter: true
# -- Enable Snapshotter in RBD provisioner pod
enableRBDSnapshotter: true
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
enablePluginSelinuxHostMount: false
# -- Enable Ceph CSI PVC encryption support
enableCSIEncryption: false
# -- PriorityClassName to be set on csi driver plugin pods
pluginPriorityClassName: system-node-critical
# -- PriorityClassName to be set on csi driver provisioner pods
provisionerPriorityClassName: system-cluster-critical
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
rbdFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
cephFSFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
nfsFSGroupPolicy: "File"
# -- OMAP generator generates the omap mapping between the PV name and the RBD image
# which helps CSI to identify the rbd images for CSI operations.
# `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
# By default OMAP generator is disabled and when enabled, it will be deployed as a
# sidecar with CSI provisioner pod, to enable set it to true.
enableOMAPGenerator: false
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default
enableMetadata: false
# -- Set replicas for csi provisioner deployment
provisionerReplicas: 2
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
clusterName:
# -- Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
logLevel: 0
# -- Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
# @default -- `0`
sidecarLogLevel:
# -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
rbdPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
# @default -- `1`
rbdPluginUpdateStrategyMaxUnavailable:
# -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
cephFSPluginUpdateStrategy:
# -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
nfsPluginUpdateStrategy:
# -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
grpcTimeoutInSeconds: 150
# -- Allow starting an unsupported ceph-csi image
allowUnsupportedVersion: false
# -- The volume of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- The volume of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- CEPH CSI RBD provisioner resource requirement list
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
# @default -- see values.yaml
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : csi-omap-generator
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI RBD plugin resource requirement list
# @default -- see values.yaml
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI CephFS provisioner resource requirement list
# @default -- see values.yaml
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI CephFS plugin resource requirement list
# @default -- see values.yaml
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI NFS provisioner resource requirement list
# @default -- see values.yaml
csiNFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
# -- CEPH CSI NFS plugin resource requirement list
# @default -- see values.yaml
csiNFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
# Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CSI provisioner deployment [^1]
provisionerNodeAffinity: #key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
pluginNodeAffinity: # key1=value1,value2; key2=value3
# -- Enable Ceph CSI Liveness sidecar deployment
enableLiveness: false
# -- CSI CephFS driver GRPC metrics port
# @default -- `9091`
cephfsGrpcMetricsPort:
# -- CSI CephFS driver metrics port
# @default -- `9081`
cephfsLivenessMetricsPort:
# -- Ceph CSI RBD driver GRPC metrics port
# @default -- `9090`
rbdGrpcMetricsPort:
# -- CSI Addons server port
# @default -- `9070`
csiAddonsPort:
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
forceCephFSKernelClient: true
# -- Ceph CSI RBD driver metrics port
# @default -- `8080`
rbdLivenessMetricsPort:
# -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
# @default -- `/var/lib/kubelet`
kubeletDirPath:
cephcsi:
# -- Ceph CSI image
# @default -- `quay.io/cephcsi/cephcsi:v3.7.2`
image:
registrar:
# -- Kubernetes CSI registrar image
# @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0`
image:
provisioner:
# -- Kubernetes CSI provisioner image
# @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.4.0`
image:
snapshotter:
# -- Kubernetes CSI snapshotter image
# @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1`
image:
attacher:
# -- Kubernetes CSI Attacher image
# @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.1.0`
image:
resizer:
# -- Kubernetes CSI resizer image
# @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.7.0`
image:
# -- Image pull policy
imagePullPolicy: IfNotPresent
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
cephfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
nfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
rbdPodLabels: #"key1=value1,key2=value2"
csiAddons:
# -- Enable CSIAddons
enabled: false
# -- CSIAddons Sidecar image
image: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
nfs:
# -- Enable the nfs csi driver
enabled: false
topology:
# -- Enable topology based provisioning
enabled: false
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# -- domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
domainLabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/zone
# - topology.rook.io/rack
# -- Enable discovery daemon
enableDiscoveryDaemon: false
# -- The timeout for ceph commands in seconds
cephCommandsTimeoutSeconds: "15"
# -- if true, run rook operator on the host network
useOperatorHostNetwork:
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
discover:
# -- Toleration for the discover pods.
# Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
toleration:
# -- The specific key of the taint to tolerate
tolerationKey:
# -- Array of tolerations in YAML format which will be added to discover deployment
tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of `discover-agent` [^1]
nodeAffinity: # key1=value1,value2; key2=value3
# -- Labels to add to the discover pods
podLabels: # "key1=value1,key2=value2"
# -- Add resources to discover daemon pods
resources:
# - limits:
# cpu: 500m
# memory: 512Mi
# - requests:
# cpu: 100m
# memory: 128Mi
# -- Whether to disable the admission controller
disableAdmissionController: true
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
# - name: my-registry-secret
# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
enableOBCWatchOperatorNamespace: true
# -- Set tolerations and nodeAffinity [^1] for admission controller pod.
# The admission controller would be best to start on the same nodes as other ceph daemons.
admissionController:
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
monitoring:
# -- Enable monitoring. Requires Prometheus to be pre-installed.
# Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
enabled: false