blob: a333b78307eff7674d04701c785246dd95fc3248 [file] [log] [blame]
okozachenko120323147262023-01-28 04:16:42 +11001# Default values for rook-ceph-operator
2# This is a YAML-formatted file.
3# Declare variables to be passed into your templates.
4
5image:
6 repository: rook/ceph
7 tag: v1.10.3
8 pullPolicy: IfNotPresent
9
10crds:
11 # Whether the helm chart should create and update the CRDs. If false, the CRDs must be
12 # managed independently with deploy/examples/crds.yaml.
13 # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
14 # If the CRDs are deleted in this case, see the disaster recovery guide to restore them.
15 # https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion
16 enabled: true
17
18resources:
19 limits:
20 cpu: 500m
21 memory: 512Mi
22 requests:
23 cpu: 100m
24 memory: 128Mi
25
26nodeSelector: {}
27# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
28# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
29# disktype: ssd
30
31# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
32tolerations: []
33
34# Delay to use in node.kubernetes.io/unreachable toleration
35unreachableNodeTolerationSeconds: 5
36
37# Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false
38currentNamespaceOnly: false
39
40## Annotations to be added to pod
41annotations: {}
42
43## The logging level for the operator: ERROR | WARNING | INFO | DEBUG
44logLevel: INFO
45
46## If true, create & use RBAC resources
47##
48rbacEnable: true
49
50## If true, create & use PSP resources
51##
52pspEnable: true
53
54# Set the priority class for the rook operator deployment if desired
55# priorityClassName: class
56
57## Settings for whether to disable the drivers or other daemons if they are not
58## needed
59csi:
60 enableRbdDriver: true
61 enableCephfsDriver: true
62 enableGrpcMetrics: false
63 # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
64 # in some network configurations where the SDN does not provide access to an external cluster or
65 # there is significant drop in read/write performance.
66 enableCSIHostNetwork: true
67 # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
68 enableCephfsSnapshotter: true
69 # set to false to disable deployment of snapshotter container in NFS provisioner pod.
70 enableNFSSnapshotter: true
71 # set to false to disable deployment of snapshotter container in RBD provisioner pod.
72 enableRBDSnapshotter: true
73 # set to false if the selinux is not enabled or unavailable in cluster nodes.
74 enablePluginSelinuxHostMount: false
75 # set to true to enable Ceph CSI pvc encryption support.
76 enableCSIEncryption: false
77
78 # (Optional) set user created priorityclassName for csi plugin pods.
79 pluginPriorityClassName: system-node-critical
80
81 # (Optional) set user created priorityclassName for csi provisioner pods.
82 provisionerPriorityClassName: system-cluster-critical
83
84 # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
85 # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
86 rbdFSGroupPolicy: "File"
87
88 # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
89 # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
90 cephFSFSGroupPolicy: "File"
91
92 # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
93 # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
94 nfsFSGroupPolicy: "File"
95
96 # OMAP generator generates the omap mapping between the PV name and the RBD image
97 # which helps CSI to identify the rbd images for CSI operations.
98 # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
99 # By default OMAP generator is disabled and when enabled it will be deployed as a
100 # sidecar with CSI provisioner pod, to enable set it to true.
101 enableOMAPGenerator: false
102
103 # (Optional) set to true to enable adding volume metadata on the CephFS subvolumes and RBD images.
104 # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
105 # Hence enable metadata is false by default.
106 enableMetadata: false
107
108 # Set replicas for csi provisioner deployment.
109 provisionerReplicas: 2
110
111 # (Optional) cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
112 # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
113 # clusterName: "my-prod-cluster"
114 # Set logging level for cephCSI containers maintained by the cephCSI.
115 # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
116 #logLevel: 0
117 # Set logging level for Kubernetes-csi sidecar containers.
118 # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
119 #sidecarLogLevel: 0
120 # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
121 # Default value is RollingUpdate.
122 #rbdPluginUpdateStrategy: OnDelete
123 # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
124 # Default value is RollingUpdate.
125 #cephFSPluginUpdateStrategy: OnDelete
126 # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
127 # Default value is RollingUpdate.
128 #nfsPluginUpdateStrategy: OnDelete
129 # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
130 grpcTimeoutInSeconds: 150
131
132 # Allow starting unsupported ceph-csi image
133 allowUnsupportedVersion: false
134
135 # CephCSI RBD plugin Volumes
136 # csiRBDPluginVolumes: |
137 # - name: lib-modules
138 # hostPath:
139 # path: /run/current-system/kernel-modules/lib/modules/
140 # - name: host-nix
141 # hostPath:
142 # path: /nix
143
144 # CephCSI RBD plugin Volume mounts
145 # csiRBDPluginVolumeMounts: |
146 # - name: host-nix
147 # mountPath: /nix
148 # readOnly: true
149
150 # CephCSI CephFS plugin Volumes
151 # csiCephFSPluginVolumes: |
152 # - name: lib-modules
153 # hostPath:
154 # path: /run/current-system/kernel-modules/lib/modules/
155 # - name: host-nix
156 # hostPath:
157 # path: /nix
158
159 # CephCSI CephFS plugin Volume mounts
160 # csiCephFSPluginVolumeMounts: |
161 # - name: host-nix
162 # mountPath: /nix
163 # readOnly: true
164
165 # CEPH CSI RBD provisioner resource requirement list, Put here list of resource
166 # requests and limits you want to apply for provisioner pod
167 # csi-omap-generator resources will be applied only if enableOMAPGenerator is set to true
168 csiRBDProvisionerResource: |
169 - name : csi-provisioner
170 resource:
171 requests:
172 memory: 128Mi
173 cpu: 100m
174 limits:
175 memory: 256Mi
176 cpu: 200m
177 - name : csi-resizer
178 resource:
179 requests:
180 memory: 128Mi
181 cpu: 100m
182 limits:
183 memory: 256Mi
184 cpu: 200m
185 - name : csi-attacher
186 resource:
187 requests:
188 memory: 128Mi
189 cpu: 100m
190 limits:
191 memory: 256Mi
192 cpu: 200m
193 - name : csi-snapshotter
194 resource:
195 requests:
196 memory: 128Mi
197 cpu: 100m
198 limits:
199 memory: 256Mi
200 cpu: 200m
201 - name : csi-rbdplugin
202 resource:
203 requests:
204 memory: 512Mi
205 cpu: 250m
206 limits:
207 memory: 1Gi
208 cpu: 500m
209 - name : csi-omap-generator
210 resource:
211 requests:
212 memory: 512Mi
213 cpu: 250m
214 limits:
215 memory: 1Gi
216 cpu: 500m
217 - name : liveness-prometheus
218 resource:
219 requests:
220 memory: 128Mi
221 cpu: 50m
222 limits:
223 memory: 256Mi
224 cpu: 100m
225 # CEPH CSI RBD plugin resource requirement list, Put here list of resource
226 # requests and limits you want to apply for plugin pod
227 csiRBDPluginResource: |
228 - name : driver-registrar
229 resource:
230 requests:
231 memory: 128Mi
232 cpu: 50m
233 limits:
234 memory: 256Mi
235 cpu: 100m
236 - name : csi-rbdplugin
237 resource:
238 requests:
239 memory: 512Mi
240 cpu: 250m
241 limits:
242 memory: 1Gi
243 cpu: 500m
244 - name : liveness-prometheus
245 resource:
246 requests:
247 memory: 128Mi
248 cpu: 50m
249 limits:
250 memory: 256Mi
251 cpu: 100m
252 # CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
253 # requests and limits you want to apply for provisioner pod
254 csiCephFSProvisionerResource: |
255 - name : csi-provisioner
256 resource:
257 requests:
258 memory: 128Mi
259 cpu: 100m
260 limits:
261 memory: 256Mi
262 cpu: 200m
263 - name : csi-resizer
264 resource:
265 requests:
266 memory: 128Mi
267 cpu: 100m
268 limits:
269 memory: 256Mi
270 cpu: 200m
271 - name : csi-attacher
272 resource:
273 requests:
274 memory: 128Mi
275 cpu: 100m
276 limits:
277 memory: 256Mi
278 cpu: 200m
279 - name : csi-snapshotter
280 resource:
281 requests:
282 memory: 128Mi
283 cpu: 100m
284 limits:
285 memory: 256Mi
286 cpu: 200m
287 - name : csi-cephfsplugin
288 resource:
289 requests:
290 memory: 512Mi
291 cpu: 250m
292 limits:
293 memory: 1Gi
294 cpu: 500m
295 - name : liveness-prometheus
296 resource:
297 requests:
298 memory: 128Mi
299 cpu: 50m
300 limits:
301 memory: 256Mi
302 cpu: 100m
303 # CEPH CSI CephFS plugin resource requirement list, Put here list of resource
304 # requests and limits you want to apply for plugin pod
305 csiCephFSPluginResource: |
306 - name : driver-registrar
307 resource:
308 requests:
309 memory: 128Mi
310 cpu: 50m
311 limits:
312 memory: 256Mi
313 cpu: 100m
314 - name : csi-cephfsplugin
315 resource:
316 requests:
317 memory: 512Mi
318 cpu: 250m
319 limits:
320 memory: 1Gi
321 cpu: 500m
322 - name : liveness-prometheus
323 resource:
324 requests:
325 memory: 128Mi
326 cpu: 50m
327 limits:
328 memory: 256Mi
329 cpu: 100m
330 # CEPH CSI NFS provisioner resource requirement list, Put here list of resource
331 # requests and limits you want to apply for provisioner pod
332 csiNFSProvisionerResource: |
333 - name : csi-provisioner
334 resource:
335 requests:
336 memory: 128Mi
337 cpu: 100m
338 limits:
339 memory: 256Mi
340 cpu: 200m
341 - name : csi-nfsplugin
342 resource:
343 requests:
344 memory: 512Mi
345 cpu: 250m
346 limits:
347 memory: 1Gi
348 cpu: 500m
349 # CEPH CSI NFS plugin resource requirement list, Put here list of resource
350 # requests and limits you want to apply for plugin pod
351 csiNFSPluginResource: |
352 - name : driver-registrar
353 resource:
354 requests:
355 memory: 128Mi
356 cpu: 50m
357 limits:
358 memory: 256Mi
359 cpu: 100m
360 - name : csi-nfsplugin
361 resource:
362 requests:
363 memory: 512Mi
364 cpu: 250m
365 limits:
366 memory: 1Gi
367 cpu: 500m
368
369 # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
370 # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
371 # provisionerTolerations:
372 # - key: key
373 # operator: Exists
374 # effect: NoSchedule
375 # provisionerNodeAffinity: key1=value1,value2; key2=value3
376 # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
377 # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
378 # pluginTolerations:
379 # - key: key
380 # operator: Exists
381 # effect: NoSchedule
382 # pluginNodeAffinity: key1=value1,value2; key2=value3
383 # Set to true to enable Ceph CSI liveness container.
384 enableLiveness: false
385 #cephfsGrpcMetricsPort: 9091
386 #cephfsLivenessMetricsPort: 9081
387 #rbdGrpcMetricsPort: 9090
388 #csiAddonsPort: 9070
389 # Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
390 # you may want to disable this setting. However, this will cause an issue during upgrades
391 # with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html
392 forceCephFSKernelClient: true
393 #rbdLivenessMetricsPort: 9080
394 #kubeletDirPath: /var/lib/kubelet
395 #cephcsi:
396 # image: quay.io/cephcsi/cephcsi:v3.7.1
397 #registrar:
398 # image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
399 #provisioner:
400 # image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
401 #snapshotter:
402 # image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
403 #attacher:
404 # image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
405 #resizer:
406 # image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
407 #imagePullPolicy: IfNotPresent
408 # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
409 #cephfsPodLabels: "key1=value1,key2=value2"
410 # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
411 #nfsPodLabels: "key1=value1,key2=value2"
412 # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
413 #rbdPodLabels: "key1=value1,key2=value2"
414 # Enable the CSIAddons sidecar.
415 csiAddons:
416 enabled: false
417 #image: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
418 # Enable the nfs csi driver.
419 nfs:
420 enabled: false
421 # Enable topology based provisioning.
422 topology:
423 enabled: false
424 # domainLabels define which node labels to use as domains
425 # for CSI nodeplugins to advertise their domains
426 # NOTE: the value here serves as an example and needs to be
427 # updated with node labels that define domains of interest
428 # domainLabels:
429 # - kubernetes.io/hostname
430 # - topology.kubernetes.io/zone
431 # - topology.rook.io/rack
432enableDiscoveryDaemon: false
433cephCommandsTimeoutSeconds: "15"
434
435## if true, run rook operator on the host network
436# useOperatorHostNetwork: true
437
438## Rook Discover configuration
439## toleration: NoSchedule, PreferNoSchedule or NoExecute
440## tolerationKey: Set this to the specific key of the taint to tolerate
441## tolerations: Array of tolerations in YAML format which will be added to agent deployment
442## nodeAffinity: Set to labels of the node to match
443# discover:
444# toleration: NoSchedule
445# tolerationKey: key
446# tolerations:
447# - key: key
448# operator: Exists
449# effect: NoSchedule
450# nodeAffinity: key1=value1,value2; key2=value3
451# podLabels: "key1=value1,key2=value2"
452
453# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
454# Disable it here if you have similar issues.
455# For more details see https://github.com/rook/rook/issues/2417
456enableSelinuxRelabeling: true
457
458disableAdmissionController: false
459
460# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
461# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
462hostpathRequiresPrivileged: false
463
464# Disable automatic orchestration when new devices are discovered.
465disableDeviceHotplug: false
466
467# Blacklist certain disks according to the regex provided.
468discoverDaemonUdev:
469
470# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
471# imagePullSecrets:
472# - name: my-registry-secret
473
474# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
475enableOBCWatchOperatorNamespace: true
476
477admissionController:
478 # Set tolerations and nodeAffinity for admission controller pod.
479 # The admission controller would be best to start on the same nodes as other ceph daemons.
480 # tolerations:
481 # - key: key
482 # operator: Exists
483 # effect: NoSchedule
484 # nodeAffinity: key1=value1,value2; key2=value3
485
486monitoring:
487 # requires Prometheus to be pre-installed
488 # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
489 enabled: false