Mohammed Naser | b8eccd2 | 2023-02-10 05:55:56 +0000 | [diff] [blame] | 1 | # Default values for a single rook-ceph cluster |
| 2 | # This is a YAML-formatted file. |
| 3 | # Declare variables to be passed into your templates. |
| 4 | |
| 5 | # -- Namespace of the main rook operator |
| 6 | operatorNamespace: rook-ceph |
| 7 | |
| 8 | # -- The metadata.name of the CephCluster CR |
| 9 | # @default -- The same as the namespace |
| 10 | clusterName: |
| 11 | |
| 12 | # -- Optional override of the target kubernetes version |
| 13 | kubeVersion: |
| 14 | |
| 15 | # -- Cluster ceph.conf override |
| 16 | configOverride: |
| 17 | # configOverride: | |
| 18 | # [global] |
| 19 | # mon_allow_pool_delete = true |
| 20 | # osd_pool_default_size = 3 |
| 21 | # osd_pool_default_min_size = 2 |
| 22 | |
| 23 | # Installs a debugging toolbox deployment |
| 24 | toolbox: |
| 25 | # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md) |
| 26 | enabled: false |
| 27 | # -- Toolbox image, defaults to the image used by the Ceph cluster |
| 28 | image: #quay.io/ceph/ceph:v17.2.3 |
| 29 | # -- Toolbox tolerations |
| 30 | tolerations: [] |
| 31 | # -- Toolbox affinity |
| 32 | affinity: {} |
| 33 | # -- Toolbox resources |
| 34 | resources: |
| 35 | limits: |
| 36 | cpu: "500m" |
| 37 | memory: "1Gi" |
| 38 | requests: |
| 39 | cpu: "100m" |
| 40 | memory: "128Mi" |
| 41 | # -- Set the priority class for the toolbox if desired |
| 42 | priorityClassName: |
| 43 | |
| 44 | monitoring: |
| 45 | # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors. |
| 46 | # Monitoring requires Prometheus to be pre-installed |
| 47 | enabled: false |
| 48 | # -- Whether to create the Prometheus rules for Ceph alerts |
| 49 | createPrometheusRules: false |
| 50 | # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace. |
| 51 | # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus |
| 52 | # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. |
| 53 | rulesNamespaceOverride: |
| 54 | # Monitoring settings for external clusters: |
| 55 | # externalMgrEndpoints: <list of endpoints> |
| 56 | # externalMgrPrometheusPort: <port> |
| 57 | # allow adding custom labels and annotations to the prometheus rule |
| 58 | prometheusRule: |
| 59 | # -- Labels applied to PrometheusRule |
| 60 | labels: {} |
| 61 | # -- Annotations applied to PrometheusRule |
| 62 | annotations: {} |
| 63 | |
| 64 | # -- Create & use PSP resources. Set this to the same value as the rook-ceph chart. |
| 65 | pspEnable: false |
| 66 | |
| 67 | # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts. |
| 68 | # imagePullSecrets: |
| 69 | # - name: my-registry-secret |
| 70 | |
| 71 | # All values below are taken from the CephCluster CRD |
| 72 | # -- Cluster configuration. |
| 73 | # @default -- See [below](#ceph-cluster-spec) |
| 74 | cephClusterSpec: |
| 75 | # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/ |
| 76 | cephVersion: |
| 77 | # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). |
| 78 | # v16 is Pacific, v17 is Quincy. |
| 79 | # RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different |
| 80 | # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. |
| 81 | # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419 |
| 82 | # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities |
| 83 | image: quay.io/ceph/ceph:v17.2.5 |
| 84 | # Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported. |
| 85 | # Future versions such as `reef` (v18) would require this to be set to `true`. |
| 86 | # Do not set to true in production. |
| 87 | allowUnsupported: false |
| 88 | |
| 89 | # The path on the host where configuration files will be persisted. Must be specified. |
| 90 | # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. |
| 91 | # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. |
| 92 | dataDirHostPath: /var/lib/rook |
| 93 | |
| 94 | # Whether or not upgrade should continue even if a check fails |
| 95 | # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise |
| 96 | # Use at your OWN risk |
| 97 | # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/ |
| 98 | skipUpgradeChecks: false |
| 99 | |
| 100 | # Whether or not continue if PGs are not clean during an upgrade |
| 101 | continueUpgradeAfterChecksEvenIfNotHealthy: false |
| 102 | |
| 103 | # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. |
| 104 | # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one |
| 105 | # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would |
| 106 | # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. |
| 107 | # The default wait timeout is 10 minutes. |
| 108 | waitTimeoutForHealthyOSDInMinutes: 10 |
| 109 | |
| 110 | mon: |
| 111 | # Set the number of mons to be started. Generally recommended to be 3. |
| 112 | # For highest availability, an odd number of mons should be specified. |
| 113 | count: 3 |
| 114 | # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. |
| 115 | # Mons should only be allowed on the same node for test environments where data loss is acceptable. |
| 116 | allowMultiplePerNode: false |
| 117 | |
| 118 | mgr: |
| 119 | # When higher availability of the mgr is needed, increase the count to 2. |
| 120 | # In that case, one mgr will be active and one in standby. When Ceph updates which |
| 121 | # mgr is active, Rook will update the mgr services to match the active mgr. |
| 122 | count: 2 |
| 123 | allowMultiplePerNode: false |
| 124 | modules: |
| 125 | # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules |
| 126 | # are already enabled by other settings in the cluster CR. |
| 127 | - name: pg_autoscaler |
| 128 | enabled: true |
| 129 | |
| 130 | # enable the ceph dashboard for viewing cluster status |
| 131 | dashboard: |
| 132 | enabled: true |
| 133 | # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) |
| 134 | # urlPrefix: /ceph-dashboard |
| 135 | # serve the dashboard at the given port. |
| 136 | # port: 8443 |
| 137 | # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set |
| 138 | # the corresponding "backend protocol" annotation(s) for your ingress controller of choice) |
| 139 | ssl: true |
| 140 | |
| 141 | # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings |
| 142 | # network: |
| 143 | # # enable host networking |
| 144 | # provider: host |
| 145 | # # EXPERIMENTAL: enable the Multus network provider |
| 146 | # provider: multus |
| 147 | # selectors: |
| 148 | # # The selector keys are required to be `public` and `cluster`. |
| 149 | # # Based on the configuration, the operator will do the following: |
| 150 | # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface |
| 151 | # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' |
| 152 | # # |
| 153 | # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus |
| 154 | # # |
| 155 | # # public: public-conf --> NetworkAttachmentDefinition object name in Multus |
| 156 | # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus |
| 157 | # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 |
| 158 | # ipFamily: "IPv6" |
| 159 | # # Ceph daemons to listen on both IPv4 and Ipv6 networks |
| 160 | # dualStack: false |
| 161 | |
| 162 | # enable the crash collector for ceph daemon crash collection |
| 163 | crashCollector: |
| 164 | disable: false |
| 165 | # Uncomment daysToRetain to prune ceph crash entries older than the |
| 166 | # specified number of days. |
| 167 | # daysToRetain: 30 |
| 168 | |
| 169 | # enable log collector, daemons will log on files and rotate |
| 170 | logCollector: |
| 171 | enabled: true |
| 172 | periodicity: daily # one of: hourly, daily, weekly, monthly |
| 173 | maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M. |
| 174 | |
| 175 | # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. |
| 176 | cleanupPolicy: |
| 177 | # Since cluster cleanup is destructive to data, confirmation is required. |
| 178 | # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". |
| 179 | # This value should only be set when the cluster is about to be deleted. After the confirmation is set, |
| 180 | # Rook will immediately stop configuring the cluster and only wait for the delete command. |
| 181 | # If the empty string is set, Rook will not destroy any data on hosts during uninstall. |
| 182 | confirmation: "" |
| 183 | # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion |
| 184 | sanitizeDisks: |
| 185 | # method indicates if the entire disk should be sanitized or simply ceph's metadata |
| 186 | # in both case, re-install is possible |
| 187 | # possible choices are 'complete' or 'quick' (default) |
| 188 | method: quick |
| 189 | # dataSource indicate where to get random bytes from to write on the disk |
| 190 | # possible choices are 'zero' (default) or 'random' |
| 191 | # using random sources will consume entropy from the system and will take much more time then the zero source |
| 192 | dataSource: zero |
| 193 | # iteration overwrite N times instead of the default (1) |
| 194 | # takes an integer value |
| 195 | iteration: 1 |
| 196 | # allowUninstallWithVolumes defines how the uninstall should be performed |
| 197 | # If set to true, cephCluster deletion does not wait for the PVs to be deleted. |
| 198 | allowUninstallWithVolumes: false |
| 199 | |
| 200 | # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. |
| 201 | # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and |
| 202 | # tolerate taints with a key of 'storage-node'. |
| 203 | # placement: |
| 204 | # all: |
| 205 | # nodeAffinity: |
| 206 | # requiredDuringSchedulingIgnoredDuringExecution: |
| 207 | # nodeSelectorTerms: |
| 208 | # - matchExpressions: |
| 209 | # - key: role |
| 210 | # operator: In |
| 211 | # values: |
| 212 | # - storage-node |
| 213 | # podAffinity: |
| 214 | # podAntiAffinity: |
| 215 | # topologySpreadConstraints: |
| 216 | # tolerations: |
| 217 | # - key: storage-node |
| 218 | # operator: Exists |
| 219 | # # The above placement information can also be specified for mon, osd, and mgr components |
| 220 | # mon: |
| 221 | # # Monitor deployments may contain an anti-affinity rule for avoiding monitor |
| 222 | # # collocation on the same node. This is a required rule when host network is used |
| 223 | # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a |
| 224 | # # preferred rule with weight: 50. |
| 225 | # osd: |
| 226 | # mgr: |
| 227 | # cleanup: |
| 228 | |
| 229 | # annotations: |
| 230 | # all: |
| 231 | # mon: |
| 232 | # osd: |
| 233 | # cleanup: |
| 234 | # prepareosd: |
| 235 | # # If no mgr annotations are set, prometheus scrape annotations will be set by default. |
| 236 | # mgr: |
| 237 | |
| 238 | # labels: |
| 239 | # all: |
| 240 | # mon: |
| 241 | # osd: |
| 242 | # cleanup: |
| 243 | # mgr: |
| 244 | # prepareosd: |
| 245 | # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. |
| 246 | # # These labels can be passed as LabelSelector to Prometheus |
| 247 | # monitoring: |
| 248 | |
| 249 | resources: |
| 250 | mgr: |
| 251 | limits: |
| 252 | cpu: "1000m" |
| 253 | memory: "1Gi" |
| 254 | requests: |
| 255 | cpu: "500m" |
| 256 | memory: "512Mi" |
| 257 | mon: |
| 258 | limits: |
| 259 | cpu: "2000m" |
| 260 | memory: "2Gi" |
| 261 | requests: |
| 262 | cpu: "1000m" |
| 263 | memory: "1Gi" |
| 264 | osd: |
| 265 | limits: |
| 266 | cpu: "2000m" |
| 267 | memory: "4Gi" |
| 268 | requests: |
| 269 | cpu: "1000m" |
| 270 | memory: "4Gi" |
| 271 | prepareosd: |
| 272 | # limits: It is not recommended to set limits on the OSD prepare job |
| 273 | # since it's a one-time burst for memory that must be allowed to |
| 274 | # complete without an OOM kill. Note however that if a k8s |
| 275 | # limitRange guardrail is defined external to Rook, the lack of |
| 276 | # a limit here may result in a sync failure, in which case a |
| 277 | # limit should be added. 1200Mi may suffice for up to 15Ti |
| 278 | # OSDs ; for larger devices 2Gi may be required. |
| 279 | # cf. https://github.com/rook/rook/pull/11103 |
| 280 | requests: |
| 281 | cpu: "500m" |
| 282 | memory: "50Mi" |
| 283 | mgr-sidecar: |
| 284 | limits: |
| 285 | cpu: "500m" |
| 286 | memory: "100Mi" |
| 287 | requests: |
| 288 | cpu: "100m" |
| 289 | memory: "40Mi" |
| 290 | crashcollector: |
| 291 | limits: |
| 292 | cpu: "500m" |
| 293 | memory: "60Mi" |
| 294 | requests: |
| 295 | cpu: "100m" |
| 296 | memory: "60Mi" |
| 297 | logcollector: |
| 298 | limits: |
| 299 | cpu: "500m" |
| 300 | memory: "1Gi" |
| 301 | requests: |
| 302 | cpu: "100m" |
| 303 | memory: "100Mi" |
| 304 | cleanup: |
| 305 | limits: |
| 306 | cpu: "500m" |
| 307 | memory: "1Gi" |
| 308 | requests: |
| 309 | cpu: "500m" |
| 310 | memory: "100Mi" |
| 311 | |
| 312 | # The option to automatically remove OSDs that are out and are safe to destroy. |
| 313 | removeOSDsIfOutAndSafeToRemove: false |
| 314 | |
| 315 | # priority classes to apply to ceph resources |
| 316 | priorityClassNames: |
| 317 | mon: system-node-critical |
| 318 | osd: system-node-critical |
| 319 | mgr: system-cluster-critical |
| 320 | |
| 321 | storage: # cluster level storage configuration and selection |
| 322 | useAllNodes: true |
| 323 | useAllDevices: true |
| 324 | # deviceFilter: |
| 325 | # config: |
| 326 | # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map |
| 327 | # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. |
| 328 | # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB |
| 329 | # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller |
| 330 | # osdsPerDevice: "1" # this value can be overridden at the node or device level |
| 331 | # encryptedDevice: "true" # the default value for this option is "false" |
| 332 | # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named |
| 333 | # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. |
| 334 | # nodes: |
| 335 | # - name: "172.17.4.201" |
| 336 | # devices: # specific devices to use for storage can be specified for each node |
| 337 | # - name: "sdb" |
| 338 | # - name: "nvme01" # multiple osds can be created on high performance devices |
| 339 | # config: |
| 340 | # osdsPerDevice: "5" |
| 341 | # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths |
| 342 | # config: # configuration can be specified at the node level which overrides the cluster level config |
| 343 | # - name: "172.17.4.301" |
| 344 | # deviceFilter: "^sd." |
| 345 | |
| 346 | # The section for configuring management of daemon disruptions during upgrade or fencing. |
| 347 | disruptionManagement: |
| 348 | # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically |
| 349 | # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will |
| 350 | # block eviction of OSDs by default and unblock them safely when drains are detected. |
| 351 | managePodBudgets: true |
| 352 | # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the |
| 353 | # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. |
| 354 | osdMaintenanceTimeout: 30 |
| 355 | # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. |
| 356 | # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. |
| 357 | # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. |
| 358 | pgHealthCheckTimeout: 0 |
| 359 | # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. |
| 360 | # Only available on OpenShift. |
| 361 | manageMachineDisruptionBudgets: false |
| 362 | # Namespace in which to watch for the MachineDisruptionBudgets. |
| 363 | machineDisruptionBudgetNamespace: openshift-machine-api |
| 364 | |
| 365 | # Configure the healthcheck and liveness probes for ceph pods. |
| 366 | # Valid values for daemons are 'mon', 'osd', 'status' |
| 367 | healthCheck: |
| 368 | daemonHealth: |
| 369 | mon: |
| 370 | disabled: false |
| 371 | interval: 45s |
| 372 | osd: |
| 373 | disabled: false |
| 374 | interval: 60s |
| 375 | status: |
| 376 | disabled: false |
| 377 | interval: 60s |
| 378 | # Change pod liveness probe, it works for all mon, mgr, and osd pods. |
| 379 | livenessProbe: |
| 380 | mon: |
| 381 | disabled: false |
| 382 | mgr: |
| 383 | disabled: false |
| 384 | osd: |
| 385 | disabled: false |
| 386 | |
| 387 | ingress: |
| 388 | # -- Enable an ingress for the ceph-dashboard |
| 389 | dashboard: |
| 390 | {} |
| 391 | # annotations: |
| 392 | # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com |
| 393 | # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2 |
| 394 | # kubernetes.io/ingress.class: nginx |
| 395 | # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly |
| 396 | # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" |
| 397 | # nginx.ingress.kubernetes.io/server-snippet: | |
| 398 | # proxy_ssl_verify off; |
| 399 | # host: |
| 400 | # name: dashboard.example.com |
| 401 | # path: "/ceph-dashboard(/|$)(.*)" |
| 402 | # tls: |
| 403 | # - hosts: |
| 404 | # - dashboard.example.com |
| 405 | # secretName: testsecret-tls |
| 406 | ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time |
| 407 | ## to set the ingress class |
| 408 | # ingressClassName: nginx |
| 409 | |
| 410 | # -- A list of CephBlockPool configurations to deploy |
| 411 | # @default -- See [below](#ceph-block-pools) |
| 412 | cephBlockPools: |
| 413 | - name: ceph-blockpool |
| 414 | # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration |
| 415 | spec: |
| 416 | failureDomain: host |
| 417 | replicated: |
| 418 | size: 3 |
| 419 | storageClass: |
| 420 | enabled: true |
| 421 | name: ceph-block |
| 422 | isDefault: true |
| 423 | reclaimPolicy: Delete |
| 424 | allowVolumeExpansion: true |
| 425 | mountOptions: [] |
| 426 | # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies |
| 427 | allowedTopologies: [] |
| 428 | # - matchLabelExpressions: |
| 429 | # - key: rook-ceph-role |
| 430 | # values: |
| 431 | # - storage-node |
| 432 | # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration |
| 433 | parameters: |
| 434 | # (optional) mapOptions is a comma-separated list of map options. |
| 435 | # For krbd options refer |
| 436 | # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options |
| 437 | # For nbd options refer |
| 438 | # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options |
| 439 | # mapOptions: lock_on_read,queue_depth=1024 |
| 440 | |
| 441 | # (optional) unmapOptions is a comma-separated list of unmap options. |
| 442 | # For krbd options refer |
| 443 | # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options |
| 444 | # For nbd options refer |
| 445 | # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options |
| 446 | # unmapOptions: force |
| 447 | |
| 448 | # RBD image format. Defaults to "2". |
| 449 | imageFormat: "2" |
| 450 | |
| 451 | # RBD image features, equivalent to OR'd bitfield value: 63 |
| 452 | # Available for imageFormat: "2". Older releases of CSI RBD |
| 453 | # support only the `layering` feature. The Linux kernel (KRBD) supports the |
| 454 | # full feature complement as of 5.4 |
| 455 | imageFeatures: layering |
| 456 | |
| 457 | # These secrets contain Ceph admin credentials. |
| 458 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner |
| 459 | csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" |
| 460 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner |
| 461 | csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" |
| 462 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node |
| 463 | csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" |
| 464 | # Specify the filesystem type of the volume. If not specified, csi-provisioner |
| 465 | # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock |
| 466 | # in hyperconverged settings where the volume is mounted on the same node as the osds. |
| 467 | csi.storage.k8s.io/fstype: ext4 |
| 468 | |
| 469 | # -- A list of CephFileSystem configurations to deploy |
| 470 | # @default -- See [below](#ceph-file-systems) |
| 471 | cephFileSystems: |
| 472 | - name: ceph-filesystem |
| 473 | # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration |
| 474 | spec: |
| 475 | metadataPool: |
| 476 | replicated: |
| 477 | size: 3 |
| 478 | dataPools: |
| 479 | - failureDomain: host |
| 480 | replicated: |
| 481 | size: 3 |
| 482 | # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools |
| 483 | name: data0 |
| 484 | metadataServer: |
| 485 | activeCount: 1 |
| 486 | activeStandby: true |
| 487 | resources: |
| 488 | limits: |
| 489 | cpu: "2000m" |
| 490 | memory: "4Gi" |
| 491 | requests: |
| 492 | cpu: "1000m" |
| 493 | memory: "4Gi" |
| 494 | priorityClassName: system-cluster-critical |
| 495 | storageClass: |
| 496 | enabled: true |
| 497 | isDefault: false |
| 498 | name: ceph-filesystem |
| 499 | # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default |
| 500 | pool: data0 |
| 501 | reclaimPolicy: Delete |
| 502 | allowVolumeExpansion: true |
| 503 | mountOptions: [] |
| 504 | # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration |
| 505 | parameters: |
| 506 | # The secrets contain Ceph admin credentials. |
| 507 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner |
| 508 | csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" |
| 509 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner |
| 510 | csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" |
| 511 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node |
| 512 | csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" |
| 513 | # Specify the filesystem type of the volume. If not specified, csi-provisioner |
| 514 | # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock |
| 515 | # in hyperconverged settings where the volume is mounted on the same node as the osds. |
| 516 | csi.storage.k8s.io/fstype: ext4 |
| 517 | |
| 518 | # -- Settings for the filesystem snapshot class |
| 519 | # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots) |
| 520 | cephFileSystemVolumeSnapshotClass: |
| 521 | enabled: false |
| 522 | name: ceph-filesystem |
| 523 | isDefault: true |
| 524 | deletionPolicy: Delete |
| 525 | annotations: {} |
| 526 | labels: {} |
| 527 | # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration |
| 528 | parameters: {} |
| 529 | |
| 530 | # -- Settings for the block pool snapshot class |
| 531 | # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots) |
| 532 | cephBlockPoolsVolumeSnapshotClass: |
| 533 | enabled: false |
| 534 | name: ceph-block |
| 535 | isDefault: false |
| 536 | deletionPolicy: Delete |
| 537 | annotations: {} |
| 538 | labels: {} |
| 539 | # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration |
| 540 | parameters: {} |
| 541 | |
| 542 | # -- A list of CephObjectStore configurations to deploy |
| 543 | # @default -- See [below](#ceph-object-stores) |
| 544 | cephObjectStores: |
| 545 | - name: ceph-objectstore |
| 546 | # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration |
| 547 | spec: |
| 548 | metadataPool: |
| 549 | failureDomain: host |
| 550 | replicated: |
| 551 | size: 3 |
| 552 | dataPool: |
| 553 | failureDomain: host |
| 554 | erasureCoded: |
| 555 | dataChunks: 2 |
| 556 | codingChunks: 1 |
| 557 | preservePoolsOnDelete: true |
| 558 | gateway: |
| 559 | port: 80 |
| 560 | resources: |
| 561 | limits: |
| 562 | cpu: "2000m" |
| 563 | memory: "2Gi" |
| 564 | requests: |
| 565 | cpu: "1000m" |
| 566 | memory: "1Gi" |
| 567 | # securePort: 443 |
| 568 | # sslCertificateRef: |
| 569 | instances: 1 |
| 570 | priorityClassName: system-cluster-critical |
| 571 | healthCheck: |
| 572 | bucket: |
| 573 | interval: 60s |
| 574 | storageClass: |
| 575 | enabled: true |
| 576 | name: ceph-bucket |
| 577 | reclaimPolicy: Delete |
| 578 | # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration |
| 579 | parameters: |
| 580 | # note: objectStoreNamespace and objectStoreName are configured by the chart |
| 581 | region: us-east-1 |