blob: ca3cfa4c2081a2a10c2599cb9ffd3886403507ac [file] [log] [blame]
Mohammed Naserb8eccd22023-02-10 05:55:56 +00001# Default values for a single rook-ceph cluster
2# This is a YAML-formatted file.
3# Declare variables to be passed into your templates.
4
5# -- Namespace of the main rook operator
6operatorNamespace: rook-ceph
7
8# -- The metadata.name of the CephCluster CR
9# @default -- The same as the namespace
10clusterName:
11
12# -- Optional override of the target kubernetes version
13kubeVersion:
14
15# -- Cluster ceph.conf override
16configOverride:
17# configOverride: |
18# [global]
19# mon_allow_pool_delete = true
20# osd_pool_default_size = 3
21# osd_pool_default_min_size = 2
22
23# Installs a debugging toolbox deployment
24toolbox:
25 # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
26 enabled: false
27 # -- Toolbox image, defaults to the image used by the Ceph cluster
Mohammed Naser65cda132024-05-02 14:34:08 -040028 image: #quay.io/ceph/ceph:v18.2.2
Mohammed Naserb8eccd22023-02-10 05:55:56 +000029 # -- Toolbox tolerations
30 tolerations: []
31 # -- Toolbox affinity
32 affinity: {}
Mohammed Naser65cda132024-05-02 14:34:08 -040033 # -- Toolbox container security context
34 containerSecurityContext:
35 runAsNonRoot: true
36 runAsUser: 2016
37 runAsGroup: 2016
38 capabilities:
39 drop: ["ALL"]
Mohammed Naserb8eccd22023-02-10 05:55:56 +000040 # -- Toolbox resources
41 resources:
42 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +000043 memory: "1Gi"
44 requests:
45 cpu: "100m"
46 memory: "128Mi"
47 # -- Set the priority class for the toolbox if desired
48 priorityClassName:
49
50monitoring:
51 # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
52 # Monitoring requires Prometheus to be pre-installed
53 enabled: false
54 # -- Whether to create the Prometheus rules for Ceph alerts
55 createPrometheusRules: false
56 # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
57 # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
Mohammed Naser65cda132024-05-02 14:34:08 -040058 # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
Mohammed Naserb8eccd22023-02-10 05:55:56 +000059 rulesNamespaceOverride:
60 # Monitoring settings for external clusters:
61 # externalMgrEndpoints: <list of endpoints>
62 # externalMgrPrometheusPort: <port>
Mohammed Naser65cda132024-05-02 14:34:08 -040063 # Scrape interval for prometheus
64 # interval: 10s
Mohammed Naserb8eccd22023-02-10 05:55:56 +000065 # allow adding custom labels and annotations to the prometheus rule
66 prometheusRule:
67 # -- Labels applied to PrometheusRule
68 labels: {}
69 # -- Annotations applied to PrometheusRule
70 annotations: {}
71
72# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
73pspEnable: false
74
75# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
76# imagePullSecrets:
77# - name: my-registry-secret
78
79# All values below are taken from the CephCluster CRD
80# -- Cluster configuration.
81# @default -- See [below](#ceph-cluster-spec)
82cephClusterSpec:
Mohammed Naser65cda132024-05-02 14:34:08 -040083 # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
84 # as in the host-based example (cluster.yaml). For a different configuration such as a
85 # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
86 # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
87 # with the specs from those examples.
88
Mohammed Naserb8eccd22023-02-10 05:55:56 +000089 # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
90 cephVersion:
91 # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
Mohammed Naser65cda132024-05-02 14:34:08 -040092 # v17 is Quincy, v18 is Reef.
93 # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
Mohammed Naserb8eccd22023-02-10 05:55:56 +000094 # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
Mohammed Naser65cda132024-05-02 14:34:08 -040095 # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.2-20240311
Mohammed Naserb8eccd22023-02-10 05:55:56 +000096 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
Mohammed Naser65cda132024-05-02 14:34:08 -040097 image: quay.io/ceph/ceph:v18.2.2
98 # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
99 # Future versions such as `squid` (v19) would require this to be set to `true`.
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000100 # Do not set to true in production.
101 allowUnsupported: false
102
103 # The path on the host where configuration files will be persisted. Must be specified.
104 # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
105 # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
106 dataDirHostPath: /var/lib/rook
107
108 # Whether or not upgrade should continue even if a check fails
109 # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
110 # Use at your OWN risk
111 # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
112 skipUpgradeChecks: false
113
114 # Whether or not continue if PGs are not clean during an upgrade
115 continueUpgradeAfterChecksEvenIfNotHealthy: false
116
117 # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
118 # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
119 # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
120 # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
121 # The default wait timeout is 10 minutes.
122 waitTimeoutForHealthyOSDInMinutes: 10
123
Mohammed Naser65cda132024-05-02 14:34:08 -0400124 # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
125 # This configuration will be ignored if `skipUpgradeChecks` is `true`.
126 # Default is false.
127 upgradeOSDRequiresHealthyPGs: false
128
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000129 mon:
130 # Set the number of mons to be started. Generally recommended to be 3.
131 # For highest availability, an odd number of mons should be specified.
132 count: 3
133 # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
134 # Mons should only be allowed on the same node for test environments where data loss is acceptable.
135 allowMultiplePerNode: false
136
137 mgr:
138 # When higher availability of the mgr is needed, increase the count to 2.
139 # In that case, one mgr will be active and one in standby. When Ceph updates which
140 # mgr is active, Rook will update the mgr services to match the active mgr.
141 count: 2
142 allowMultiplePerNode: false
143 modules:
Mohammed Naser65cda132024-05-02 14:34:08 -0400144 # List of modules to optionally enable or disable.
145 # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
146 # - name: rook
147 # enabled: true
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000148
149 # enable the ceph dashboard for viewing cluster status
150 dashboard:
151 enabled: true
152 # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
153 # urlPrefix: /ceph-dashboard
154 # serve the dashboard at the given port.
155 # port: 8443
156 # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
157 # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
158 ssl: true
159
Mohammed Naser65cda132024-05-02 14:34:08 -0400160 # Network configuration, see: https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
161 network:
162 connections:
163 # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
164 # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
165 # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
166 # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
167 # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
168 # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
169 encryption:
170 enabled: false
171 # Whether to compress the data in transit across the wire. The default is false.
172 # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
173 compression:
174 enabled: false
175 # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
176 # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
177 # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
178 requireMsgr2: false
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000179 # # enable host networking
180 # provider: host
181 # # EXPERIMENTAL: enable the Multus network provider
182 # provider: multus
183 # selectors:
184 # # The selector keys are required to be `public` and `cluster`.
185 # # Based on the configuration, the operator will do the following:
186 # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
187 # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
188 # #
189 # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
190 # #
191 # # public: public-conf --> NetworkAttachmentDefinition object name in Multus
192 # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
193 # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
194 # ipFamily: "IPv6"
195 # # Ceph daemons to listen on both IPv4 and Ipv6 networks
196 # dualStack: false
197
198 # enable the crash collector for ceph daemon crash collection
199 crashCollector:
200 disable: false
201 # Uncomment daysToRetain to prune ceph crash entries older than the
202 # specified number of days.
203 # daysToRetain: 30
204
205 # enable log collector, daemons will log on files and rotate
206 logCollector:
207 enabled: true
208 periodicity: daily # one of: hourly, daily, weekly, monthly
209 maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
210
Mohammed Naser65cda132024-05-02 14:34:08 -0400211 # automate [data cleanup process](https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000212 cleanupPolicy:
213 # Since cluster cleanup is destructive to data, confirmation is required.
214 # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
215 # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
216 # Rook will immediately stop configuring the cluster and only wait for the delete command.
217 # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
218 confirmation: ""
219 # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
220 sanitizeDisks:
221 # method indicates if the entire disk should be sanitized or simply ceph's metadata
222 # in both case, re-install is possible
223 # possible choices are 'complete' or 'quick' (default)
224 method: quick
225 # dataSource indicate where to get random bytes from to write on the disk
226 # possible choices are 'zero' (default) or 'random'
227 # using random sources will consume entropy from the system and will take much more time then the zero source
228 dataSource: zero
229 # iteration overwrite N times instead of the default (1)
230 # takes an integer value
231 iteration: 1
232 # allowUninstallWithVolumes defines how the uninstall should be performed
233 # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
234 allowUninstallWithVolumes: false
235
236 # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
237 # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
238 # tolerate taints with a key of 'storage-node'.
239 # placement:
240 # all:
241 # nodeAffinity:
242 # requiredDuringSchedulingIgnoredDuringExecution:
243 # nodeSelectorTerms:
244 # - matchExpressions:
245 # - key: role
246 # operator: In
247 # values:
248 # - storage-node
249 # podAffinity:
250 # podAntiAffinity:
251 # topologySpreadConstraints:
252 # tolerations:
253 # - key: storage-node
254 # operator: Exists
255 # # The above placement information can also be specified for mon, osd, and mgr components
256 # mon:
257 # # Monitor deployments may contain an anti-affinity rule for avoiding monitor
258 # # collocation on the same node. This is a required rule when host network is used
259 # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
260 # # preferred rule with weight: 50.
261 # osd:
262 # mgr:
263 # cleanup:
264
265 # annotations:
266 # all:
267 # mon:
268 # osd:
269 # cleanup:
270 # prepareosd:
271 # # If no mgr annotations are set, prometheus scrape annotations will be set by default.
272 # mgr:
273
274 # labels:
275 # all:
276 # mon:
277 # osd:
278 # cleanup:
279 # mgr:
280 # prepareosd:
281 # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
282 # # These labels can be passed as LabelSelector to Prometheus
283 # monitoring:
284
285 resources:
286 mgr:
287 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000288 memory: "1Gi"
289 requests:
290 cpu: "500m"
291 memory: "512Mi"
292 mon:
293 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000294 memory: "2Gi"
295 requests:
296 cpu: "1000m"
297 memory: "1Gi"
298 osd:
299 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000300 memory: "4Gi"
301 requests:
302 cpu: "1000m"
303 memory: "4Gi"
304 prepareosd:
305 # limits: It is not recommended to set limits on the OSD prepare job
306 # since it's a one-time burst for memory that must be allowed to
307 # complete without an OOM kill. Note however that if a k8s
308 # limitRange guardrail is defined external to Rook, the lack of
309 # a limit here may result in a sync failure, in which case a
310 # limit should be added. 1200Mi may suffice for up to 15Ti
311 # OSDs ; for larger devices 2Gi may be required.
312 # cf. https://github.com/rook/rook/pull/11103
313 requests:
314 cpu: "500m"
315 memory: "50Mi"
316 mgr-sidecar:
317 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000318 memory: "100Mi"
319 requests:
320 cpu: "100m"
321 memory: "40Mi"
322 crashcollector:
323 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000324 memory: "60Mi"
325 requests:
326 cpu: "100m"
327 memory: "60Mi"
328 logcollector:
329 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000330 memory: "1Gi"
331 requests:
332 cpu: "100m"
333 memory: "100Mi"
334 cleanup:
335 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000336 memory: "1Gi"
337 requests:
338 cpu: "500m"
339 memory: "100Mi"
Mohammed Naser65cda132024-05-02 14:34:08 -0400340 exporter:
341 limits:
342 memory: "128Mi"
343 requests:
344 cpu: "50m"
345 memory: "50Mi"
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000346
347 # The option to automatically remove OSDs that are out and are safe to destroy.
348 removeOSDsIfOutAndSafeToRemove: false
349
350 # priority classes to apply to ceph resources
351 priorityClassNames:
352 mon: system-node-critical
353 osd: system-node-critical
354 mgr: system-cluster-critical
355
356 storage: # cluster level storage configuration and selection
357 useAllNodes: true
358 useAllDevices: true
359 # deviceFilter:
360 # config:
361 # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
362 # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
363 # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000364 # osdsPerDevice: "1" # this value can be overridden at the node or device level
365 # encryptedDevice: "true" # the default value for this option is "false"
366 # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
367 # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
368 # nodes:
369 # - name: "172.17.4.201"
370 # devices: # specific devices to use for storage can be specified for each node
371 # - name: "sdb"
372 # - name: "nvme01" # multiple osds can be created on high performance devices
373 # config:
374 # osdsPerDevice: "5"
375 # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
376 # config: # configuration can be specified at the node level which overrides the cluster level config
377 # - name: "172.17.4.301"
378 # deviceFilter: "^sd."
379
380 # The section for configuring management of daemon disruptions during upgrade or fencing.
381 disruptionManagement:
382 # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
Mohammed Naser65cda132024-05-02 14:34:08 -0400383 # via the strategy outlined in the [design](https://github.com/rook/rook/blob/v1.14.2/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000384 # block eviction of OSDs by default and unblock them safely when drains are detected.
385 managePodBudgets: true
386 # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
387 # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
388 osdMaintenanceTimeout: 30
389 # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
390 # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
391 # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
392 pgHealthCheckTimeout: 0
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000393
394 # Configure the healthcheck and liveness probes for ceph pods.
395 # Valid values for daemons are 'mon', 'osd', 'status'
396 healthCheck:
397 daemonHealth:
398 mon:
399 disabled: false
400 interval: 45s
401 osd:
402 disabled: false
403 interval: 60s
404 status:
405 disabled: false
406 interval: 60s
407 # Change pod liveness probe, it works for all mon, mgr, and osd pods.
408 livenessProbe:
409 mon:
410 disabled: false
411 mgr:
412 disabled: false
413 osd:
414 disabled: false
415
416ingress:
417 # -- Enable an ingress for the ceph-dashboard
418 dashboard:
419 {}
420 # annotations:
421 # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
422 # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000423 # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
424 # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
425 # nginx.ingress.kubernetes.io/server-snippet: |
426 # proxy_ssl_verify off;
427 # host:
428 # name: dashboard.example.com
429 # path: "/ceph-dashboard(/|$)(.*)"
430 # tls:
431 # - hosts:
432 # - dashboard.example.com
433 # secretName: testsecret-tls
434 ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
435 ## to set the ingress class
436 # ingressClassName: nginx
437
438# -- A list of CephBlockPool configurations to deploy
439# @default -- See [below](#ceph-block-pools)
440cephBlockPools:
441 - name: ceph-blockpool
Mohammed Naser65cda132024-05-02 14:34:08 -0400442 # see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000443 spec:
444 failureDomain: host
445 replicated:
446 size: 3
Mohammed Naser65cda132024-05-02 14:34:08 -0400447 # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
448 # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
449 # enableRBDStats: true
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000450 storageClass:
451 enabled: true
452 name: ceph-block
453 isDefault: true
454 reclaimPolicy: Delete
455 allowVolumeExpansion: true
Mohammed Naser65cda132024-05-02 14:34:08 -0400456 volumeBindingMode: "Immediate"
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000457 mountOptions: []
458 # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
459 allowedTopologies: []
Mohammed Naser65cda132024-05-02 14:34:08 -0400460 # - matchLabelExpressions:
461 # - key: rook-ceph-role
462 # values:
463 # - storage-node
464 # see https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000465 parameters:
466 # (optional) mapOptions is a comma-separated list of map options.
467 # For krbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400468 # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000469 # For nbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400470 # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000471 # mapOptions: lock_on_read,queue_depth=1024
472
473 # (optional) unmapOptions is a comma-separated list of unmap options.
474 # For krbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400475 # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000476 # For nbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400477 # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000478 # unmapOptions: force
479
480 # RBD image format. Defaults to "2".
481 imageFormat: "2"
482
483 # RBD image features, equivalent to OR'd bitfield value: 63
484 # Available for imageFormat: "2". Older releases of CSI RBD
485 # support only the `layering` feature. The Linux kernel (KRBD) supports the
486 # full feature complement as of 5.4
487 imageFeatures: layering
488
489 # These secrets contain Ceph admin credentials.
490 csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
491 csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
492 csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
493 csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
494 csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
495 csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
496 # Specify the filesystem type of the volume. If not specified, csi-provisioner
497 # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
498 # in hyperconverged settings where the volume is mounted on the same node as the osds.
499 csi.storage.k8s.io/fstype: ext4
500
501# -- A list of CephFileSystem configurations to deploy
502# @default -- See [below](#ceph-file-systems)
503cephFileSystems:
504 - name: ceph-filesystem
Mohammed Naser65cda132024-05-02 14:34:08 -0400505 # see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000506 spec:
507 metadataPool:
508 replicated:
509 size: 3
510 dataPools:
511 - failureDomain: host
512 replicated:
513 size: 3
Mohammed Naser65cda132024-05-02 14:34:08 -0400514 # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000515 name: data0
516 metadataServer:
517 activeCount: 1
518 activeStandby: true
519 resources:
520 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000521 memory: "4Gi"
522 requests:
523 cpu: "1000m"
524 memory: "4Gi"
525 priorityClassName: system-cluster-critical
526 storageClass:
527 enabled: true
528 isDefault: false
529 name: ceph-filesystem
530 # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
531 pool: data0
532 reclaimPolicy: Delete
533 allowVolumeExpansion: true
Mohammed Naser65cda132024-05-02 14:34:08 -0400534 volumeBindingMode: "Immediate"
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000535 mountOptions: []
Mohammed Naser65cda132024-05-02 14:34:08 -0400536 # see https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000537 parameters:
538 # The secrets contain Ceph admin credentials.
539 csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
540 csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
541 csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
542 csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
543 csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
544 csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
545 # Specify the filesystem type of the volume. If not specified, csi-provisioner
546 # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
547 # in hyperconverged settings where the volume is mounted on the same node as the osds.
548 csi.storage.k8s.io/fstype: ext4
549
550# -- Settings for the filesystem snapshot class
551# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
552cephFileSystemVolumeSnapshotClass:
553 enabled: false
554 name: ceph-filesystem
555 isDefault: true
556 deletionPolicy: Delete
557 annotations: {}
558 labels: {}
559 # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
560 parameters: {}
561
562# -- Settings for the block pool snapshot class
563# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
564cephBlockPoolsVolumeSnapshotClass:
565 enabled: false
566 name: ceph-block
567 isDefault: false
568 deletionPolicy: Delete
569 annotations: {}
570 labels: {}
571 # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
572 parameters: {}
573
574# -- A list of CephObjectStore configurations to deploy
575# @default -- See [below](#ceph-object-stores)
576cephObjectStores:
577 - name: ceph-objectstore
Mohammed Naser65cda132024-05-02 14:34:08 -0400578 # see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000579 spec:
580 metadataPool:
581 failureDomain: host
582 replicated:
583 size: 3
584 dataPool:
585 failureDomain: host
586 erasureCoded:
587 dataChunks: 2
588 codingChunks: 1
589 preservePoolsOnDelete: true
590 gateway:
591 port: 80
592 resources:
593 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000594 memory: "2Gi"
595 requests:
596 cpu: "1000m"
597 memory: "1Gi"
598 # securePort: 443
599 # sslCertificateRef:
600 instances: 1
601 priorityClassName: system-cluster-critical
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000602 storageClass:
603 enabled: true
604 name: ceph-bucket
605 reclaimPolicy: Delete
Mohammed Naser65cda132024-05-02 14:34:08 -0400606 volumeBindingMode: "Immediate"
607 # see https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000608 parameters:
609 # note: objectStoreNamespace and objectStoreName are configured by the chart
610 region: us-east-1
Mohammed Naser65cda132024-05-02 14:34:08 -0400611 ingress:
612 # Enable an ingress for the ceph-objectstore
613 enabled: false
614 # annotations: {}
615 # host:
616 # name: objectstore.example.com
617 # path: /
618 # tls:
619 # - hosts:
620 # - objectstore.example.com
621 # secretName: ceph-objectstore-tls
622 # ingressClassName: nginx
623## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
624## For erasure coded a replicated metadata pool is required.
625## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
626#cephECBlockPools:
627# - name: ec-pool
628# spec:
629# metadataPool:
630# replicated:
631# size: 2
632# dataPool:
633# failureDomain: osd
634# erasureCoded:
635# dataChunks: 2
636# codingChunks: 1
637# deviceClass: hdd
638#
639# parameters:
640# # clusterID is the namespace where the rook cluster is running
641# # If you change this namespace, also change the namespace below where the secret namespaces are defined
642# clusterID: rook-ceph # namespace:cluster
643# # (optional) mapOptions is a comma-separated list of map options.
644# # For krbd options refer
645# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
646# # For nbd options refer
647# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
648# # mapOptions: lock_on_read,queue_depth=1024
649#
650# # (optional) unmapOptions is a comma-separated list of unmap options.
651# # For krbd options refer
652# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
653# # For nbd options refer
654# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
655# # unmapOptions: force
656#
657# # RBD image format. Defaults to "2".
658# imageFormat: "2"
659#
660# # RBD image features, equivalent to OR'd bitfield value: 63
661# # Available for imageFormat: "2". Older releases of CSI RBD
662# # support only the `layering` feature. The Linux kernel (KRBD) supports the
663# # full feature complement as of 5.4
664# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
665# imageFeatures: layering
666#
667# storageClass:
668# provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
669# enabled: true
670# name: rook-ceph-block
671# isDefault: false
672# allowVolumeExpansion: true
673# reclaimPolicy: Delete
674
675# -- CSI driver name prefix for cephfs, rbd and nfs.
676# @default -- `namespace name where rook-ceph operator is deployed`
677csiDriverNamePrefix: