blob: 0ce5424efb411f93e654a3fb003dd4d9d4af370c [file] [log] [blame]
Mohammed Naserb8eccd22023-02-10 05:55:56 +00001# Default values for a single rook-ceph cluster
2# This is a YAML-formatted file.
3# Declare variables to be passed into your templates.
4
5# -- Namespace of the main rook operator
6operatorNamespace: rook-ceph
7
8# -- The metadata.name of the CephCluster CR
9# @default -- The same as the namespace
10clusterName:
11
12# -- Optional override of the target kubernetes version
13kubeVersion:
14
15# -- Cluster ceph.conf override
16configOverride:
17# configOverride: |
18# [global]
19# mon_allow_pool_delete = true
20# osd_pool_default_size = 3
21# osd_pool_default_min_size = 2
22
23# Installs a debugging toolbox deployment
24toolbox:
25 # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
26 enabled: false
27 # -- Toolbox image, defaults to the image used by the Ceph cluster
Mohammed Naser65cda132024-05-02 14:34:08 -040028 image: #quay.io/ceph/ceph:v18.2.2
Mohammed Naserb8eccd22023-02-10 05:55:56 +000029 # -- Toolbox tolerations
30 tolerations: []
31 # -- Toolbox affinity
32 affinity: {}
Mohammed Naser65cda132024-05-02 14:34:08 -040033 # -- Toolbox container security context
34 containerSecurityContext:
35 runAsNonRoot: true
36 runAsUser: 2016
37 runAsGroup: 2016
38 capabilities:
39 drop: ["ALL"]
Mohammed Naserb8eccd22023-02-10 05:55:56 +000040 # -- Toolbox resources
41 resources:
42 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +000043 memory: "1Gi"
44 requests:
45 cpu: "100m"
46 memory: "128Mi"
47 # -- Set the priority class for the toolbox if desired
48 priorityClassName:
49
50monitoring:
51 # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
52 # Monitoring requires Prometheus to be pre-installed
53 enabled: false
54 # -- Whether to create the Prometheus rules for Ceph alerts
55 createPrometheusRules: false
56 # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
57 # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
Mohammed Naser65cda132024-05-02 14:34:08 -040058 # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
Mohammed Naserb8eccd22023-02-10 05:55:56 +000059 rulesNamespaceOverride:
60 # Monitoring settings for external clusters:
61 # externalMgrEndpoints: <list of endpoints>
62 # externalMgrPrometheusPort: <port>
Mohammed Naser65cda132024-05-02 14:34:08 -040063 # Scrape interval for prometheus
64 # interval: 10s
Mohammed Naserb8eccd22023-02-10 05:55:56 +000065 # allow adding custom labels and annotations to the prometheus rule
66 prometheusRule:
67 # -- Labels applied to PrometheusRule
68 labels: {}
69 # -- Annotations applied to PrometheusRule
70 annotations: {}
71
72# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
73pspEnable: false
74
75# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
76# imagePullSecrets:
77# - name: my-registry-secret
78
79# All values below are taken from the CephCluster CRD
80# -- Cluster configuration.
81# @default -- See [below](#ceph-cluster-spec)
82cephClusterSpec:
Mohammed Naser65cda132024-05-02 14:34:08 -040083 # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
84 # as in the host-based example (cluster.yaml). For a different configuration such as a
85 # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
86 # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
87 # with the specs from those examples.
88
Mohammed Naserb8eccd22023-02-10 05:55:56 +000089 # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
90 cephVersion:
91 # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
Mohammed Naser65cda132024-05-02 14:34:08 -040092 # v17 is Quincy, v18 is Reef.
93 # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
Mohammed Naserb8eccd22023-02-10 05:55:56 +000094 # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
Mohammed Naser65cda132024-05-02 14:34:08 -040095 # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.2-20240311
Mohammed Naserb8eccd22023-02-10 05:55:56 +000096 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
Mohammed Naser65cda132024-05-02 14:34:08 -040097 image: quay.io/ceph/ceph:v18.2.2
98 # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
99 # Future versions such as `squid` (v19) would require this to be set to `true`.
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000100 # Do not set to true in production.
101 allowUnsupported: false
102
103 # The path on the host where configuration files will be persisted. Must be specified.
104 # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
105 # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
106 dataDirHostPath: /var/lib/rook
107
108 # Whether or not upgrade should continue even if a check fails
109 # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
110 # Use at your OWN risk
111 # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
112 skipUpgradeChecks: false
113
114 # Whether or not continue if PGs are not clean during an upgrade
115 continueUpgradeAfterChecksEvenIfNotHealthy: false
116
117 # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
118 # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
119 # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
120 # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
121 # The default wait timeout is 10 minutes.
122 waitTimeoutForHealthyOSDInMinutes: 10
123
Mohammed Naser65cda132024-05-02 14:34:08 -0400124 # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
125 # This configuration will be ignored if `skipUpgradeChecks` is `true`.
126 # Default is false.
127 upgradeOSDRequiresHealthyPGs: false
128
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000129 mon:
130 # Set the number of mons to be started. Generally recommended to be 3.
131 # For highest availability, an odd number of mons should be specified.
132 count: 3
133 # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
134 # Mons should only be allowed on the same node for test environments where data loss is acceptable.
135 allowMultiplePerNode: false
136
137 mgr:
138 # When higher availability of the mgr is needed, increase the count to 2.
139 # In that case, one mgr will be active and one in standby. When Ceph updates which
140 # mgr is active, Rook will update the mgr services to match the active mgr.
141 count: 2
142 allowMultiplePerNode: false
143 modules:
Mohammed Naser65cda132024-05-02 14:34:08 -0400144 # List of modules to optionally enable or disable.
145 # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
146 # - name: rook
147 # enabled: true
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000148
149 # enable the ceph dashboard for viewing cluster status
150 dashboard:
151 enabled: true
152 # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
153 # urlPrefix: /ceph-dashboard
154 # serve the dashboard at the given port.
155 # port: 8443
156 # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
157 # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
158 ssl: true
159
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400160 # Network configuration, see: https://github.com/rook/rook/blob/v1.14.5/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
Mohammed Naser65cda132024-05-02 14:34:08 -0400161 network:
162 connections:
163 # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
164 # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
165 # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
166 # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
167 # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
168 # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
169 encryption:
170 enabled: false
171 # Whether to compress the data in transit across the wire. The default is false.
172 # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
173 compression:
174 enabled: false
175 # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
176 # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
177 # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
178 requireMsgr2: false
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000179 # # enable host networking
180 # provider: host
181 # # EXPERIMENTAL: enable the Multus network provider
182 # provider: multus
183 # selectors:
184 # # The selector keys are required to be `public` and `cluster`.
185 # # Based on the configuration, the operator will do the following:
186 # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
187 # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
188 # #
189 # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
190 # #
191 # # public: public-conf --> NetworkAttachmentDefinition object name in Multus
192 # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
193 # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
194 # ipFamily: "IPv6"
195 # # Ceph daemons to listen on both IPv4 and Ipv6 networks
196 # dualStack: false
197
198 # enable the crash collector for ceph daemon crash collection
199 crashCollector:
200 disable: false
201 # Uncomment daysToRetain to prune ceph crash entries older than the
202 # specified number of days.
203 # daysToRetain: 30
204
205 # enable log collector, daemons will log on files and rotate
206 logCollector:
207 enabled: true
208 periodicity: daily # one of: hourly, daily, weekly, monthly
209 maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
210
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400211 # automate [data cleanup process](https://github.com/rook/rook/blob/v1.14.5/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000212 cleanupPolicy:
213 # Since cluster cleanup is destructive to data, confirmation is required.
214 # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
215 # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
216 # Rook will immediately stop configuring the cluster and only wait for the delete command.
217 # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
218 confirmation: ""
219 # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
220 sanitizeDisks:
221 # method indicates if the entire disk should be sanitized or simply ceph's metadata
222 # in both case, re-install is possible
223 # possible choices are 'complete' or 'quick' (default)
224 method: quick
225 # dataSource indicate where to get random bytes from to write on the disk
226 # possible choices are 'zero' (default) or 'random'
227 # using random sources will consume entropy from the system and will take much more time then the zero source
228 dataSource: zero
229 # iteration overwrite N times instead of the default (1)
230 # takes an integer value
231 iteration: 1
232 # allowUninstallWithVolumes defines how the uninstall should be performed
233 # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
234 allowUninstallWithVolumes: false
235
236 # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
237 # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
238 # tolerate taints with a key of 'storage-node'.
239 # placement:
240 # all:
241 # nodeAffinity:
242 # requiredDuringSchedulingIgnoredDuringExecution:
243 # nodeSelectorTerms:
244 # - matchExpressions:
245 # - key: role
246 # operator: In
247 # values:
248 # - storage-node
249 # podAffinity:
250 # podAntiAffinity:
251 # topologySpreadConstraints:
252 # tolerations:
253 # - key: storage-node
254 # operator: Exists
255 # # The above placement information can also be specified for mon, osd, and mgr components
256 # mon:
257 # # Monitor deployments may contain an anti-affinity rule for avoiding monitor
258 # # collocation on the same node. This is a required rule when host network is used
259 # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
260 # # preferred rule with weight: 50.
261 # osd:
262 # mgr:
263 # cleanup:
264
265 # annotations:
266 # all:
267 # mon:
268 # osd:
269 # cleanup:
270 # prepareosd:
271 # # If no mgr annotations are set, prometheus scrape annotations will be set by default.
272 # mgr:
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400273 # dashboard:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000274
275 # labels:
276 # all:
277 # mon:
278 # osd:
279 # cleanup:
280 # mgr:
281 # prepareosd:
282 # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
283 # # These labels can be passed as LabelSelector to Prometheus
284 # monitoring:
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400285 # dashboard:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000286
287 resources:
288 mgr:
289 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000290 memory: "1Gi"
291 requests:
292 cpu: "500m"
293 memory: "512Mi"
294 mon:
295 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000296 memory: "2Gi"
297 requests:
298 cpu: "1000m"
299 memory: "1Gi"
300 osd:
301 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000302 memory: "4Gi"
303 requests:
304 cpu: "1000m"
305 memory: "4Gi"
306 prepareosd:
307 # limits: It is not recommended to set limits on the OSD prepare job
308 # since it's a one-time burst for memory that must be allowed to
309 # complete without an OOM kill. Note however that if a k8s
310 # limitRange guardrail is defined external to Rook, the lack of
311 # a limit here may result in a sync failure, in which case a
312 # limit should be added. 1200Mi may suffice for up to 15Ti
313 # OSDs ; for larger devices 2Gi may be required.
314 # cf. https://github.com/rook/rook/pull/11103
315 requests:
316 cpu: "500m"
317 memory: "50Mi"
318 mgr-sidecar:
319 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000320 memory: "100Mi"
321 requests:
322 cpu: "100m"
323 memory: "40Mi"
324 crashcollector:
325 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000326 memory: "60Mi"
327 requests:
328 cpu: "100m"
329 memory: "60Mi"
330 logcollector:
331 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000332 memory: "1Gi"
333 requests:
334 cpu: "100m"
335 memory: "100Mi"
336 cleanup:
337 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000338 memory: "1Gi"
339 requests:
340 cpu: "500m"
341 memory: "100Mi"
Mohammed Naser65cda132024-05-02 14:34:08 -0400342 exporter:
343 limits:
344 memory: "128Mi"
345 requests:
346 cpu: "50m"
347 memory: "50Mi"
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000348
349 # The option to automatically remove OSDs that are out and are safe to destroy.
350 removeOSDsIfOutAndSafeToRemove: false
351
352 # priority classes to apply to ceph resources
353 priorityClassNames:
354 mon: system-node-critical
355 osd: system-node-critical
356 mgr: system-cluster-critical
357
358 storage: # cluster level storage configuration and selection
359 useAllNodes: true
360 useAllDevices: true
361 # deviceFilter:
362 # config:
363 # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
364 # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
365 # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000366 # osdsPerDevice: "1" # this value can be overridden at the node or device level
367 # encryptedDevice: "true" # the default value for this option is "false"
368 # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
369 # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
370 # nodes:
371 # - name: "172.17.4.201"
372 # devices: # specific devices to use for storage can be specified for each node
373 # - name: "sdb"
374 # - name: "nvme01" # multiple osds can be created on high performance devices
375 # config:
376 # osdsPerDevice: "5"
377 # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
378 # config: # configuration can be specified at the node level which overrides the cluster level config
379 # - name: "172.17.4.301"
380 # deviceFilter: "^sd."
381
382 # The section for configuring management of daemon disruptions during upgrade or fencing.
383 disruptionManagement:
384 # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400385 # via the strategy outlined in the [design](https://github.com/rook/rook/blob/v1.14.5/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000386 # block eviction of OSDs by default and unblock them safely when drains are detected.
387 managePodBudgets: true
388 # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
389 # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
390 osdMaintenanceTimeout: 30
391 # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
392 # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
393 # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
394 pgHealthCheckTimeout: 0
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000395
396 # Configure the healthcheck and liveness probes for ceph pods.
397 # Valid values for daemons are 'mon', 'osd', 'status'
398 healthCheck:
399 daemonHealth:
400 mon:
401 disabled: false
402 interval: 45s
403 osd:
404 disabled: false
405 interval: 60s
406 status:
407 disabled: false
408 interval: 60s
409 # Change pod liveness probe, it works for all mon, mgr, and osd pods.
410 livenessProbe:
411 mon:
412 disabled: false
413 mgr:
414 disabled: false
415 osd:
416 disabled: false
417
418ingress:
419 # -- Enable an ingress for the ceph-dashboard
420 dashboard:
421 {}
422 # annotations:
423 # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
424 # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000425 # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
426 # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
427 # nginx.ingress.kubernetes.io/server-snippet: |
428 # proxy_ssl_verify off;
429 # host:
430 # name: dashboard.example.com
431 # path: "/ceph-dashboard(/|$)(.*)"
432 # tls:
433 # - hosts:
434 # - dashboard.example.com
435 # secretName: testsecret-tls
436 ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
437 ## to set the ingress class
438 # ingressClassName: nginx
439
440# -- A list of CephBlockPool configurations to deploy
441# @default -- See [below](#ceph-block-pools)
442cephBlockPools:
443 - name: ceph-blockpool
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400444 # see https://github.com/rook/rook/blob/v1.14.5/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000445 spec:
446 failureDomain: host
447 replicated:
448 size: 3
Mohammed Naser65cda132024-05-02 14:34:08 -0400449 # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
450 # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
451 # enableRBDStats: true
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000452 storageClass:
453 enabled: true
454 name: ceph-block
455 isDefault: true
456 reclaimPolicy: Delete
457 allowVolumeExpansion: true
Mohammed Naser65cda132024-05-02 14:34:08 -0400458 volumeBindingMode: "Immediate"
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000459 mountOptions: []
460 # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
461 allowedTopologies: []
Mohammed Naser65cda132024-05-02 14:34:08 -0400462 # - matchLabelExpressions:
463 # - key: rook-ceph-role
464 # values:
465 # - storage-node
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400466 # see https://github.com/rook/rook/blob/v1.14.5/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000467 parameters:
468 # (optional) mapOptions is a comma-separated list of map options.
469 # For krbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400470 # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000471 # For nbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400472 # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000473 # mapOptions: lock_on_read,queue_depth=1024
474
475 # (optional) unmapOptions is a comma-separated list of unmap options.
476 # For krbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400477 # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000478 # For nbd options refer
Mohammed Naser65cda132024-05-02 14:34:08 -0400479 # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000480 # unmapOptions: force
481
482 # RBD image format. Defaults to "2".
483 imageFormat: "2"
484
485 # RBD image features, equivalent to OR'd bitfield value: 63
486 # Available for imageFormat: "2". Older releases of CSI RBD
487 # support only the `layering` feature. The Linux kernel (KRBD) supports the
488 # full feature complement as of 5.4
489 imageFeatures: layering
490
491 # These secrets contain Ceph admin credentials.
492 csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
493 csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
494 csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
495 csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
496 csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
497 csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
498 # Specify the filesystem type of the volume. If not specified, csi-provisioner
499 # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
500 # in hyperconverged settings where the volume is mounted on the same node as the osds.
501 csi.storage.k8s.io/fstype: ext4
502
503# -- A list of CephFileSystem configurations to deploy
504# @default -- See [below](#ceph-file-systems)
505cephFileSystems:
506 - name: ceph-filesystem
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400507 # see https://github.com/rook/rook/blob/v1.14.5/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000508 spec:
509 metadataPool:
510 replicated:
511 size: 3
512 dataPools:
513 - failureDomain: host
514 replicated:
515 size: 3
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400516 # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/v1.14.5/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000517 name: data0
518 metadataServer:
519 activeCount: 1
520 activeStandby: true
521 resources:
522 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000523 memory: "4Gi"
524 requests:
525 cpu: "1000m"
526 memory: "4Gi"
527 priorityClassName: system-cluster-critical
528 storageClass:
529 enabled: true
530 isDefault: false
531 name: ceph-filesystem
532 # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
533 pool: data0
534 reclaimPolicy: Delete
535 allowVolumeExpansion: true
Mohammed Naser65cda132024-05-02 14:34:08 -0400536 volumeBindingMode: "Immediate"
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000537 mountOptions: []
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400538 # see https://github.com/rook/rook/blob/v1.14.5/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000539 parameters:
540 # The secrets contain Ceph admin credentials.
541 csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
542 csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
543 csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
544 csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
545 csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
546 csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
547 # Specify the filesystem type of the volume. If not specified, csi-provisioner
548 # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
549 # in hyperconverged settings where the volume is mounted on the same node as the osds.
550 csi.storage.k8s.io/fstype: ext4
551
552# -- Settings for the filesystem snapshot class
553# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
554cephFileSystemVolumeSnapshotClass:
555 enabled: false
556 name: ceph-filesystem
557 isDefault: true
558 deletionPolicy: Delete
559 annotations: {}
560 labels: {}
561 # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
562 parameters: {}
563
564# -- Settings for the block pool snapshot class
565# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
566cephBlockPoolsVolumeSnapshotClass:
567 enabled: false
568 name: ceph-block
569 isDefault: false
570 deletionPolicy: Delete
571 annotations: {}
572 labels: {}
573 # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
574 parameters: {}
575
576# -- A list of CephObjectStore configurations to deploy
577# @default -- See [below](#ceph-object-stores)
578cephObjectStores:
579 - name: ceph-objectstore
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400580 # see https://github.com/rook/rook/blob/v1.14.5/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000581 spec:
582 metadataPool:
583 failureDomain: host
584 replicated:
585 size: 3
586 dataPool:
587 failureDomain: host
588 erasureCoded:
589 dataChunks: 2
590 codingChunks: 1
591 preservePoolsOnDelete: true
592 gateway:
593 port: 80
594 resources:
595 limits:
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000596 memory: "2Gi"
597 requests:
598 cpu: "1000m"
599 memory: "1Gi"
600 # securePort: 443
601 # sslCertificateRef:
602 instances: 1
603 priorityClassName: system-cluster-critical
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000604 storageClass:
605 enabled: true
606 name: ceph-bucket
607 reclaimPolicy: Delete
Mohammed Naser65cda132024-05-02 14:34:08 -0400608 volumeBindingMode: "Immediate"
Mohammed Naser7d1623e2024-06-17 09:12:39 -0400609 # see https://github.com/rook/rook/blob/v1.14.5/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
Mohammed Naserb8eccd22023-02-10 05:55:56 +0000610 parameters:
611 # note: objectStoreNamespace and objectStoreName are configured by the chart
612 region: us-east-1
Mohammed Naser65cda132024-05-02 14:34:08 -0400613 ingress:
614 # Enable an ingress for the ceph-objectstore
615 enabled: false
616 # annotations: {}
617 # host:
618 # name: objectstore.example.com
619 # path: /
620 # tls:
621 # - hosts:
622 # - objectstore.example.com
623 # secretName: ceph-objectstore-tls
624 # ingressClassName: nginx
625## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
626## For erasure coded a replicated metadata pool is required.
627## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
628#cephECBlockPools:
629# - name: ec-pool
630# spec:
631# metadataPool:
632# replicated:
633# size: 2
634# dataPool:
635# failureDomain: osd
636# erasureCoded:
637# dataChunks: 2
638# codingChunks: 1
639# deviceClass: hdd
640#
641# parameters:
642# # clusterID is the namespace where the rook cluster is running
643# # If you change this namespace, also change the namespace below where the secret namespaces are defined
644# clusterID: rook-ceph # namespace:cluster
645# # (optional) mapOptions is a comma-separated list of map options.
646# # For krbd options refer
647# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
648# # For nbd options refer
649# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
650# # mapOptions: lock_on_read,queue_depth=1024
651#
652# # (optional) unmapOptions is a comma-separated list of unmap options.
653# # For krbd options refer
654# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
655# # For nbd options refer
656# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
657# # unmapOptions: force
658#
659# # RBD image format. Defaults to "2".
660# imageFormat: "2"
661#
662# # RBD image features, equivalent to OR'd bitfield value: 63
663# # Available for imageFormat: "2". Older releases of CSI RBD
664# # support only the `layering` feature. The Linux kernel (KRBD) supports the
665# # full feature complement as of 5.4
666# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
667# imageFeatures: layering
668#
669# storageClass:
670# provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
671# enabled: true
672# name: rook-ceph-block
673# isDefault: false
674# allowVolumeExpansion: true
675# reclaimPolicy: Delete
676
677# -- CSI driver name prefix for cephfs, rbd and nfs.
678# @default -- `namespace name where rook-ceph operator is deployed`
679csiDriverNamePrefix: