blob: 4fa8418ea940dafc75fa97fb08527f5b73f71c74 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for cinder.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19storage: ceph
20
21labels:
22 api:
23 node_selector_key: openstack-control-plane
24 node_selector_value: enabled
25 backup:
26 node_selector_key: openstack-control-plane
27 node_selector_value: enabled
28 job:
29 node_selector_key: openstack-control-plane
30 node_selector_value: enabled
31 scheduler:
32 node_selector_key: openstack-control-plane
33 node_selector_value: enabled
34 test:
35 node_selector_key: openstack-control-plane
36 node_selector_value: enabled
37 volume:
38 node_selector_key: openstack-control-plane
39 node_selector_value: enabled
40
41release_group: null
42
43images:
44 tags:
45 test: docker.io/xrally/xrally-openstack:2.0.0
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000046 db_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
47 cinder_db_sync: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
48 db_drop: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050049 rabbit_init: docker.io/rabbitmq:3.7-management
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000050 ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
51 ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
52 ks_endpoints: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
53 cinder_api: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
54 bootstrap: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
55 cinder_scheduler: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
56 cinder_volume: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
57 cinder_volume_usage_audit: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020058 cinder_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000059 cinder_backup: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020060 cinder_backup_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050061 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
62 image_repo_sync: docker.io/docker:17.07.0
63 pull_policy: "IfNotPresent"
64 local_registry:
65 active: false
66 exclude:
67 - dep_check
68 - image_repo_sync
69
70jobs:
71 volume_usage_audit:
72 cron: "5 * * * *"
73 starting_deadline: 600
74 history:
75 success: 3
76 failed: 1
77
78pod:
79 security_context:
80 volume_usage_audit:
81 pod:
82 runAsUser: 42424
83 container:
84 cinder_volume_usage_audit:
85 readOnlyRootFilesystem: true
86 allowPrivilegeEscalation: false
87 cinder_api:
88 pod:
89 runAsUser: 42424
90 container:
91 ceph_coordination_volume_perms:
92 runAsUser: 0
93 readOnlyRootFilesystem: true
94 cinder_api:
95 readOnlyRootFilesystem: true
96 allowPrivilegeEscalation: false
97 cinder_backup:
98 pod:
99 runAsUser: 42424
100 container:
101 ceph_backup_keyring_placement:
102 runAsUser: 0
103 readOnlyRootFilesystem: true
104 ceph_keyring_placement:
105 runAsUser: 0
106 readOnlyRootFilesystem: true
107 ceph_backup_volume_perms:
108 runAsUser: 0
109 readOnlyRootFilesystem: true
110 ceph_coordination_volume_perms:
111 runAsUser: 0
112 readOnlyRootFilesystem: true
113 cinder_backup:
114 capabilities:
115 add:
116 - SYS_ADMIN
117 readOnlyRootFilesystem: true
118 runAsUser: 0
119 cinder_scheduler:
120 pod:
121 runAsUser: 42424
122 container:
123 ceph_coordination_volume_perms:
124 runAsUser: 0
125 readOnlyRootFilesystem: true
126 cinder_scheduler:
127 readOnlyRootFilesystem: true
128 allowPrivilegeEscalation: false
129 cinder_volume:
130 pod:
131 runAsUser: 42424
132 container:
133 ceph_keyring_placement:
134 runAsUser: 0
135 readOnlyRootFilesystem: true
136 ceph_coordination_volume_perms:
137 runAsUser: 0
138 readOnlyRootFilesystem: true
139 init_cinder_conf:
140 runAsUser: 0
141 readOnlyRootFilesystem: true
142 cinder_volume:
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000143 capabilities:
144 add:
145 - SYS_ADMIN
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500146 readOnlyRootFilesystem: true
147 storage_init:
148 pod:
149 runAsUser: 42424
150 container:
151 ceph_keyring_placement:
152 runAsUser: 0
153 readOnlyRootFilesystem: true
154 cinder_backup_storage_init:
155 readOnlyRootFilesystem: true
156 allowPrivilegeEscalation: false
157 clean:
158 pod:
159 runAsUser: 42424
160 container:
161 cinder_volume_rbd_secret_clean:
162 readOnlyRootFilesystem: true
163 allowPrivilegeEscalation: false
164 create_internal_tenant:
165 pod:
166 runAsUser: 42424
167 container:
168 create_internal_tenant:
169 readOnlyRootFilesystem: true
170 allowPrivilegeEscalation: false
171 affinity:
172 anti:
173 type:
174 default: preferredDuringSchedulingIgnoredDuringExecution
175 topologyKey:
176 default: kubernetes.io/hostname
177 weight:
178 default: 10
179 tolerations:
180 cinder:
181 enabled: false
182 tolerations:
183 - key: node-role.kubernetes.io/master
184 operator: Exists
185 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200186 - key: node-role.kubernetes.io/control-plane
187 operator: Exists
188 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500189 useHostNetwork:
190 volume: false
191 backup: false
192 mounts:
193 cinder_api:
194 init_container: null
195 cinder_api:
196 volumeMounts:
197 volumes:
198 cinder_scheduler:
199 init_container: null
200 cinder_scheduler:
201 volumeMounts:
202 volumes:
203 cinder_volume:
204 init_container: null
205 cinder_volume:
206 volumeMounts:
207 volumes:
208 cinder_volume_usage_audit:
209 init_container: null
210 cinder_volume_usage_audit:
211 volumeMounts:
212 volumes:
213 cinder_backup:
214 init_container: null
215 cinder_backup:
216 volumeMounts:
217 volumes:
218 cinder_tests:
219 init_container: null
220 cinder_tests:
221 volumeMounts:
222 volumes:
223 cinder_db_sync:
224 cinder_db_sync:
225 volumeMounts:
226 volumes:
227 replicas:
228 api: 1
229 volume: 1
230 scheduler: 1
231 backup: 1
232 lifecycle:
233 upgrades:
234 deployments:
235 revision_history: 3
236 pod_replacement_strategy: RollingUpdate
237 rolling_update:
238 max_unavailable: 1
239 max_surge: 3
240 disruption_budget:
241 api:
242 min_available: 0
243 termination_grace_period:
244 api:
245 timeout: 30
246 resources:
247 enabled: false
248 api:
249 requests:
250 memory: "128Mi"
251 cpu: "100m"
252 limits:
253 memory: "1024Mi"
254 cpu: "2000m"
255 scheduler:
256 requests:
257 memory: "128Mi"
258 cpu: "100m"
259 limits:
260 memory: "1024Mi"
261 cpu: "2000m"
262 volume:
263 requests:
264 memory: "128Mi"
265 cpu: "100m"
266 limits:
267 memory: "1024Mi"
268 cpu: "2000m"
269 jobs:
270 volume_usage_audit:
271 requests:
272 memory: "128Mi"
273 cpu: "100m"
274 limits:
275 memory: "1024Mi"
276 cpu: "2000m"
277 bootstrap:
278 requests:
279 memory: "128Mi"
280 cpu: "100m"
281 limits:
282 memory: "1024Mi"
283 cpu: "2000m"
284 rabbit_init:
285 requests:
286 memory: "128Mi"
287 cpu: "100m"
288 limits:
289 memory: "1024Mi"
290 cpu: "2000m"
291 db_init:
292 requests:
293 memory: "128Mi"
294 cpu: "100m"
295 limits:
296 memory: "1024Mi"
297 cpu: "2000m"
298 db_sync:
299 requests:
300 memory: "128Mi"
301 cpu: "100m"
302 limits:
303 memory: "1024Mi"
304 cpu: "2000m"
305 db_drop:
306 requests:
307 memory: "128Mi"
308 cpu: "100m"
309 limits:
310 memory: "1024Mi"
311 cpu: "2000m"
312 clean:
313 requests:
314 memory: "128Mi"
315 cpu: "100m"
316 limits:
317 memory: "1024Mi"
318 cpu: "2000m"
319 backup_storage_init:
320 requests:
321 memory: "128Mi"
322 cpu: "100m"
323 limits:
324 memory: "1024Mi"
325 cpu: "2000m"
326 storage_init:
327 requests:
328 memory: "128Mi"
329 cpu: "100m"
330 limits:
331 memory: "1024Mi"
332 cpu: "2000m"
333 ks_endpoints:
334 requests:
335 memory: "128Mi"
336 cpu: "100m"
337 limits:
338 memory: "1024Mi"
339 cpu: "2000m"
340 ks_service:
341 requests:
342 memory: "128Mi"
343 cpu: "100m"
344 limits:
345 memory: "1024Mi"
346 cpu: "2000m"
347 ks_user:
348 requests:
349 memory: "128Mi"
350 cpu: "100m"
351 limits:
352 memory: "1024Mi"
353 cpu: "2000m"
354 tests:
355 requests:
356 memory: "128Mi"
357 cpu: "100m"
358 limits:
359 memory: "1024Mi"
360 cpu: "2000m"
361 image_repo_sync:
362 requests:
363 memory: "128Mi"
364 cpu: "100m"
365 limits:
366 memory: "1024Mi"
367 cpu: "2000m"
368
369bootstrap:
370 enabled: true
371 ks_user: admin
372 bootstrap_conf_backends: true
373 volume_types:
374 name:
375 group:
376 volume_backend_name:
377 # access_type: "private"
378 # If you set up access_type to private, only the creator
379 # will get an access to the volume type. You can extend
380 # the access to your volume type by providing a list of
381 # domain names and projects as shown below
382 # grant_access:
383 # <domain name 1>:
384 # - <project name 1>
385 # - <project name 2>
386 # <...>
387 # <domain name 2>:
388 # - <project name 1>
389 # <...>
390 # Volume QoS if any. By default, None QoS is created.
391 # Below values with a number at the end need to be replaced
392 # with real names.
393 # volume_qos:
394 # qos_name_1:
395 # consumer: front-end
396 # properties:
397 # key_1: value_1
398 # key_2: value_2
399 # associates:
400 # - volume_type_1
401 # - volume_type_2
402
403network:
404 api:
405 ingress:
406 public: true
407 classes:
408 namespace: "nginx"
409 cluster: "nginx-cluster"
410 annotations:
411 nginx.ingress.kubernetes.io/rewrite-target: /
412 external_policy_local: false
413 node_port:
414 enabled: false
415 port: 30877
416
417ceph_client:
418 # enable this when there is a need to create second ceph backed pointing
419 # to external ceph cluster
420 enable_external_ceph_backend: false
421 # change this in case of first ceph backend name pointing to internal ceph cluster
422 # is diffrent
423 internal_ceph_backend: rbd1
424 configmap: ceph-etc
425 user_secret_name: pvc-ceph-client-key
426 external_ceph:
427 # Only when enable_external_ceph_backend is true and rbd_user is NOT null
428 # secret for external ceph keyring will be created.
429 rbd_user: null
430 rbd_user_keyring: null
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000431 configmap: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500432 conf:
433 global: null
434 osd: null
435conf:
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200436 paste: {}
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000437 policy: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500438 api_audit_map:
439 DEFAULT:
440 target_endpoint_type: None
441 custom_actions:
442 associate: update/associate
443 disassociate: update/disassociate_all
444 disassociate_all: update/disassociate_all
445 associations: read/list/associations
446 path_keywords:
447 defaults: None
448 detail: None
449 limits: None
450 os-quota-specs: project
451 qos-specs: qos-spec
452 snapshots: snapshot
453 types: type
454 volumes: volume
455 service_endpoints:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +0100456 volume: service/storage/block
457 volumev2: service/storage/block
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500458 volumev3: service/storage/block
459 cinder_sudoers: |
460 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
461 Defaults !requiretty
462 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
463 cinder ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *, /var/lib/openstack/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *
464 rootwrap: |
465 # Configuration for cinder-rootwrap
466 # This file should be owned by (and only-writeable by) the root user
467
468 [DEFAULT]
469 # List of directories to load filter definitions from (separated by ',').
470 # These directories MUST all be only writeable by root !
471 filters_path=/etc/cinder/rootwrap.d
472
473 # List of directories to search executables in, in case filters do not
474 # explicitely specify a full path (separated by ',')
475 # If not specified, defaults to system PATH environment variable.
476 # These directories MUST all be only writeable by root !
477 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
478
479 # Enable logging to syslog
480 # Default value is False
481 use_syslog=False
482
483 # Which syslog facility to use.
484 # Valid values include auth, authpriv, syslog, local0, local1...
485 # Default value is 'syslog'
486 syslog_log_facility=syslog
487
488 # Which messages to log.
489 # INFO means log all usage
490 # ERROR means only log unsuccessful attempts
491 syslog_log_level=ERROR
492 rootwrap_filters:
493 volume:
494 pods:
495 - volume
496 content: |
497 # cinder-rootwrap command filters for volume nodes
498 # This file should be owned by (and only-writeable by) the root user
499
500 [Filters]
501 # cinder/volume/iscsi.py: iscsi_helper '--op' ...
502 ietadm: CommandFilter, ietadm, root
503 tgtadm: CommandFilter, tgtadm, root
504 iscsictl: CommandFilter, iscsictl, root
505 tgt-admin: CommandFilter, tgt-admin, root
506 cinder-rtstool: CommandFilter, cinder-rtstool, root
507 scstadmin: CommandFilter, scstadmin, root
508
509 # LVM related show commands
510 pvs: EnvFilter, env, root, LC_ALL=C, pvs
511 vgs: EnvFilter, env, root, LC_ALL=C, vgs
512 lvs: EnvFilter, env, root, LC_ALL=C, lvs
513 lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay
514
515 # -LVM related show commands with suppress fd warnings
516 pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
517 vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
518 lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
519 lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
520
521
522 # -LVM related show commands conf var
523 pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs
524 vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs
525 lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs
526 lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay
527
528 # -LVM conf var with suppress fd_warnings
529 pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
530 vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
531 lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
532 lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
533
534 # os-brick library commands
535 # os_brick.privileged.run_as_root oslo.privsep context
536 # This line ties the superuser privs with the config files, context name,
537 # and (implicitly) the actual python code invoked.
538 privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
539 # The following and any cinder/brick/* entries should all be obsoleted
540 # by privsep, and may be removed once the os-brick version requirement
541 # is updated appropriately.
542 scsi_id: CommandFilter, /lib/udev/scsi_id, root
543 drbdadm: CommandFilter, drbdadm, root
544
545 # cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list
546 vgcreate: CommandFilter, vgcreate, root
547
548 # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
549 # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ...
550 lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate
551 lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate
552 lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate
553 lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate
554
555 # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
556 dd: CommandFilter, dd, root
557
558 # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ...
559 lvremove: CommandFilter, lvremove, root
560
561 # cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'...
562 lvrename: CommandFilter, lvrename, root
563
564 # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ...
565 # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ...
566 lvextend: EnvFilter, env, root, LC_ALL=C, lvextend
567 lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend
568 lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
569 lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
570
571 # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K <lv>'
572 lvchange: CommandFilter, lvchange, root
573
574 # cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name
575 lvconvert: CommandFilter, lvconvert, root
576
577 # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
578 # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
579 iscsiadm: CommandFilter, iscsiadm, root
580
581 # cinder/volume/utils.py: utils.temporary_chown(path, 0)
582 chown: CommandFilter, chown, root
583
584 # cinder/volume/utils.py: copy_volume(..., ionice='...')
585 ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7]
586 ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3]
587
588 # cinder/volume/utils.py: setup_blkio_cgroup()
589 cgcreate: CommandFilter, cgcreate, root
590 cgset: CommandFilter, cgset, root
591 cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+
592
593 # cinder/volume/driver.py
594 dmsetup: CommandFilter, dmsetup, root
595 ln: CommandFilter, ln, root
596
597 # cinder/image/image_utils.py
598 qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img
599 qemu-img_convert: CommandFilter, qemu-img, root
600
601 udevadm: CommandFilter, udevadm, root
602
603 # cinder/volume/driver.py: utils.read_file_as_root()
604 cat: CommandFilter, cat, root
605
606 # cinder/volume/nfs.py
607 stat: CommandFilter, stat, root
608 mount: CommandFilter, mount, root
609 df: CommandFilter, df, root
610 du: CommandFilter, du, root
611 truncate: CommandFilter, truncate, root
612 chmod: CommandFilter, chmod, root
613 rm: CommandFilter, rm, root
614
615 # cinder/volume/drivers/remotefs.py
616 mkdir: CommandFilter, mkdir, root
617
618 # cinder/volume/drivers/netapp/nfs.py:
619 netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+
620
621 # cinder/volume/drivers/glusterfs.py
622 chgrp: CommandFilter, chgrp, root
623 umount: CommandFilter, umount, root
624 fallocate: CommandFilter, fallocate, root
625
626 # cinder/volumes/drivers/hds/hds.py:
627 hus-cmd: CommandFilter, hus-cmd, root
628 hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root
629
630 # cinder/volumes/drivers/hds/hnas_backend.py
631 ssc: CommandFilter, ssc, root
632
633 # cinder/brick/initiator/connector.py:
634 ls: CommandFilter, ls, root
635 tee: CommandFilter, tee, root
636 multipath: CommandFilter, multipath, root
637 multipathd: CommandFilter, multipathd, root
638 systool: CommandFilter, systool, root
639
640 # cinder/volume/drivers/block_device.py
641 blockdev: CommandFilter, blockdev, root
642
643 # cinder/volume/drivers/ibm/gpfs.py
644 # cinder/volume/drivers/tintri.py
645 mv: CommandFilter, mv, root
646
647 # cinder/volume/drivers/ibm/gpfs.py
648 cp: CommandFilter, cp, root
649 mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root
650 mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root
651 mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root
652 mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root
653 mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root
654 mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root
655 mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root
656 mkfs: CommandFilter, mkfs, root
657 mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root
658 mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root
659 mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root
660 mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root
661 mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root
662 mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root
663
664 # cinder/volume/drivers/ibm/gpfs.py
665 # cinder/volume/drivers/ibm/ibmnas.py
666 find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit
667
668 # cinder/brick/initiator/connector.py:
669 aoe-revalidate: CommandFilter, aoe-revalidate, root
670 aoe-discover: CommandFilter, aoe-discover, root
671 aoe-flush: CommandFilter, aoe-flush, root
672
673 # cinder/brick/initiator/linuxscsi.py:
674 sg_scan: CommandFilter, sg_scan, root
675
676 #cinder/backup/services/tsm.py
677 dsmc:CommandFilter,/usr/bin/dsmc,root
678
679 # cinder/volume/drivers/hitachi/hbsd_horcm.py
680 raidqry: CommandFilter, raidqry, root
681 raidcom: CommandFilter, raidcom, root
682 pairsplit: CommandFilter, pairsplit, root
683 paircreate: CommandFilter, paircreate, root
684 pairdisplay: CommandFilter, pairdisplay, root
685 pairevtwait: CommandFilter, pairevtwait, root
686 horcmstart.sh: CommandFilter, horcmstart.sh, root
687 horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root
688 horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr
689
690 # cinder/volume/drivers/hitachi/hbsd_snm2.py
691 auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman
692 auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref
693 auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef
694 aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1
695 auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn
696 auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap
697 autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap
698 aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol
699 auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd
700 auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel
701 auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize
702 auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser
703 autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef
704 autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt
705 autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini
706 auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi
707 audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool
708 aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal
709 aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon
710
711 # cinder/volume/drivers/hgst.py
712 vgc-cluster: CommandFilter, vgc-cluster, root
713
714 # cinder/volume/drivers/vzstorage.py
715 pstorage-mount: CommandFilter, pstorage-mount, root
716 pstorage: CommandFilter, pstorage, root
717 ploop: CommandFilter, ploop, root
718
719 # initiator/connector.py:
720 drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
721 ceph:
722 override:
723 append:
724 monitors: []
725 admin_keyring: null
726 pools:
727 backup:
728 replication: 3
729 crush_rule: replicated_rule
730 chunk_size: 8
731 app_name: cinder-backup
732 cinder.volumes:
733 replication: 3
734 crush_rule: replicated_rule
735 chunk_size: 8
736 app_name: cinder-volume
737 cinder:
738 DEFAULT:
739 volume_usage_audit_period: hour
740 resource_query_filters_file: /etc/cinder/resource_filters.json
741 log_config_append: /etc/cinder/logging.conf
742 use_syslog: false
743 use_stderr: true
744 enable_v1_api: false
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +0100745 enable_v2_api: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500746 volume_name_template: "%s"
747 osapi_volume_workers: 1
748 glance_api_version: 2
749 os_region_name: RegionOne
750 host: cinder-volume-worker
751 # NOTE(portdirect): the bind port should not be defined, and is manipulated
752 # via the endpoints section.
753 osapi_volume_listen_port: null
754 enabled_backends: "rbd1"
755 default_volume_type: "rbd1"
756 # NOTE(portdirect): "cinder.backup.drivers.ceph" and
757 # "cinder.backup.drivers.posix" also supported
758 # NOTE(rchurch): As of Stein, drivers by class name are required
759 # - cinder.backup.drivers.swift.SwiftBackupDriver
760 # - cinder.backup.drivers.ceph.CephBackupDriver
761 # - cinder.backup.drivers.posix.PosixBackupDriver
762 backup_driver: "cinder.backup.drivers.swift.SwiftBackupDriver"
763 # Backup: Ceph RBD options
764 backup_ceph_conf: "/etc/ceph/ceph.conf"
765 backup_ceph_user: cinderbackup
766 backup_ceph_pool: cinder.backups
767 # Backup: Posix options
768 backup_posix_path: /var/lib/cinder/backup
769 auth_strategy: keystone
770 # Internal tenant id
771 internal_project_name: internal_cinder
772 internal_user_name: internal_cinder
773 database:
774 max_retries: -1
775 keystone_authtoken:
776 auth_version: v3
777 auth_type: password
778 memcache_security_strategy: ENCRYPT
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200779 service_type: volumev3
Mohammed Naser32137102023-02-23 17:26:32 +0000780 nova:
781 auth_type: password
782 auth_version: v3
783 interface: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500784 oslo_policy:
785 policy_file: /etc/cinder/policy.yaml
786 oslo_concurrency:
787 lock_path: "/var/lib/cinder/tmp"
788 oslo_messaging_notifications:
789 driver: messagingv2
790 oslo_middleware:
791 enable_proxy_headers_parsing: true
792 oslo_messaging_rabbit:
793 rabbit_ha_queues: true
794 coordination:
795 backend_url: file:///var/lib/cinder/coordination
796 service_user:
797 auth_type: password
798 send_service_user_token: false
799 logging:
800 loggers:
801 keys:
802 - root
803 - cinder
804 handlers:
805 keys:
806 - stdout
807 - stderr
808 - "null"
809 formatters:
810 keys:
811 - context
812 - default
813 logger_root:
814 level: WARNING
815 handlers: 'null'
816 logger_cinder:
817 level: INFO
818 handlers:
819 - stdout
820 qualname: cinder
821 logger_amqp:
822 level: WARNING
823 handlers: stderr
824 qualname: amqp
825 logger_amqplib:
826 level: WARNING
827 handlers: stderr
828 qualname: amqplib
829 logger_eventletwsgi:
830 level: WARNING
831 handlers: stderr
832 qualname: eventlet.wsgi.server
833 logger_sqlalchemy:
834 level: WARNING
835 handlers: stderr
836 qualname: sqlalchemy
837 logger_boto:
838 level: WARNING
839 handlers: stderr
840 qualname: boto
841 handler_null:
842 class: logging.NullHandler
843 formatter: default
844 args: ()
845 handler_stdout:
846 class: StreamHandler
847 args: (sys.stdout,)
848 formatter: context
849 handler_stderr:
850 class: StreamHandler
851 args: (sys.stderr,)
852 formatter: context
853 formatter_context:
854 class: oslo_log.formatters.ContextFormatter
855 datefmt: "%Y-%m-%d %H:%M:%S"
856 formatter_default:
857 format: "%(message)s"
858 datefmt: "%Y-%m-%d %H:%M:%S"
859 rabbitmq:
860 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
861 policies:
862 - vhost: "cinder"
863 name: "ha_ttl_cinder"
864 definition:
865 # mirror messges to other nodes in rmq cluster
866 ha-mode: "all"
867 ha-sync-mode: "automatic"
868 # 70s
869 message-ttl: 70000
870 priority: 0
871 apply-to: all
872 pattern: '^(?!(amq\.|reply_)).*'
873
874 backends:
875 # Those options will be written to backends.conf as-is.
876 rbd1:
877 volume_driver: cinder.volume.drivers.rbd.RBDDriver
878 volume_backend_name: rbd1
879 rbd_pool: cinder.volumes
880 rbd_ceph_conf: "/etc/ceph/ceph.conf"
881 rbd_flatten_volume_from_snapshot: false
882 report_discard_supported: true
883 rbd_max_clone_depth: 5
884 rbd_store_chunk_size: 4
885 rados_connect_timeout: -1
886 rbd_user: cinder
887 rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +0100888 image_volume_cache_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500889 image_volume_cache_max_size_gb: 200
890 image_volume_cache_max_count: 50
891 rally_tests:
892 run_tempest: false
893 clean_up: |
894 VOLUMES=$(openstack volume list -f value | grep -e "^s_rally_" | awk '{ print $1 }')
895 if [ -n "$VOLUMES" ]; then
896 echo $VOLUMES | xargs openstack volume delete
897 fi
898 tests:
899 CinderVolumes.create_and_delete_volume:
900 - args:
901 size: 1
902 runner:
903 concurrency: 1
904 times: 1
905 type: constant
906 sla:
907 failure_rate:
908 max: 0
909 - args:
910 size:
911 max: 5
912 min: 1
913 runner:
914 concurrency: 1
915 times: 1
916 type: constant
917 sla:
918 failure_rate:
919 max: 0
920 resource_filters:
921 volume:
922 - name
923 - status
924 - metadata
925 - bootable
926 - migration_status
927 - availability_zone
928 - group_id
929 backup:
930 - name
931 - status
932 - volume_id
933 snapshot:
934 - name
935 - status
936 - volume_id
937 - metadata
938 - availability_zone
939 group: []
940 group_snapshot:
941 - status
942 - group_id
943 attachment:
944 - volume_id
945 - status
946 - instance_id
947 - attach_status
948 message:
949 - resource_uuid
950 - resource_type
951 - event_id
952 - request_id
953 - message_level
954 pool:
955 - name
956 - volume_type
957 volume_type: []
958 enable_iscsi: false
959backup:
960 external_ceph_rbd:
961 enabled: false
962 admin_keyring: null
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000963 configmap: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500964 conf:
965 global: null
966 osd: null
967 posix:
968 volume:
969 class_name: general
970 size: 10Gi
971
972dependencies:
973 dynamic:
974 common:
975 local_image_registry:
976 jobs:
977 - cinder-image-repo-sync
978 services:
979 - endpoint: node
980 service: local_image_registry
981 static:
982 api:
983 jobs:
984 - cinder-db-sync
985 - cinder-ks-user
986 - cinder-ks-endpoints
987 - cinder-rabbit-init
988 - cinder-storage-init
989 services:
990 - endpoint: internal
991 service: oslo_db
992 - endpoint: internal
993 service: identity
994 backup:
995 jobs:
996 - cinder-db-sync
997 - cinder-ks-user
998 - cinder-ks-endpoints
999 - cinder-rabbit-init
1000 - cinder-storage-init
1001 - cinder-backup-storage-init
1002 services:
1003 - endpoint: internal
1004 service: identity
1005 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001006 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001007 backup_storage_init:
1008 jobs: null
1009 bootstrap:
1010 services:
1011 - endpoint: internal
1012 service: identity
1013 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001014 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001015 pod:
1016 - requireSameNode: false
1017 labels:
1018 application: cinder
1019 component: volume
1020 clean:
1021 jobs: null
1022 db_drop:
1023 services:
1024 - endpoint: internal
1025 service: oslo_db
1026 db_init:
1027 services:
1028 - endpoint: internal
1029 service: oslo_db
1030 db_sync:
1031 jobs:
1032 - cinder-db-init
1033 services:
1034 - endpoint: internal
1035 service: oslo_db
1036 ks_endpoints:
1037 jobs:
1038 - cinder-ks-service
1039 services:
1040 - endpoint: internal
1041 service: identity
1042 ks_service:
1043 services:
1044 - endpoint: internal
1045 service: identity
1046 ks_user:
1047 services:
1048 - endpoint: internal
1049 service: identity
1050 rabbit_init:
1051 services:
1052 - service: oslo_messaging
1053 endpoint: internal
1054 scheduler:
1055 jobs:
1056 - cinder-db-sync
1057 - cinder-ks-user
1058 - cinder-ks-endpoints
1059 - cinder-rabbit-init
1060 - cinder-storage-init
1061 services:
1062 - endpoint: internal
1063 service: identity
1064 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001065 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001066 storage_init:
1067 jobs: null
1068 tests:
1069 services:
1070 - endpoint: internal
1071 service: identity
1072 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001073 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001074 volume:
1075 jobs:
1076 - cinder-db-sync
1077 - cinder-ks-user
1078 - cinder-ks-endpoints
1079 - cinder-rabbit-init
1080 - cinder-storage-init
1081 services:
1082 - endpoint: internal
1083 service: identity
1084 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001085 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001086 volume_usage_audit:
1087 jobs:
1088 - cinder-db-sync
1089 - cinder-ks-user
1090 - cinder-ks-endpoints
1091 - cinder-rabbit-init
1092 - cinder-storage-init
1093 services:
1094 - endpoint: internal
1095 service: identity
1096 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001097 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001098 image_repo_sync:
1099 services:
1100 - endpoint: internal
1101 service: local_image_registry
1102 create_internal_tenant:
1103 services:
1104 - endpoint: internal
1105 service: identity
1106
1107# Names of secrets used by bootstrap and environmental checks
1108secrets:
1109 identity:
1110 admin: cinder-keystone-admin
1111 cinder: cinder-keystone-user
1112 test: cinder-keystone-test
1113 oslo_db:
1114 admin: cinder-db-admin
1115 cinder: cinder-db-user
1116 rbd:
1117 backup: cinder-backup-rbd-keyring
1118 volume: cinder-volume-rbd-keyring
1119 volume_external: cinder-volume-external-rbd-keyring
1120 oslo_messaging:
1121 admin: cinder-rabbitmq-admin
1122 cinder: cinder-rabbitmq-user
1123 tls:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001124 volume:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001125 api:
1126 public: cinder-tls-public
1127 internal: cinder-tls-api
1128 oci_image_registry:
1129 cinder: cinder-oci-image-registry
1130
1131# We use a different layout of the endpoints here to account for versioning
1132# this swaps the service name and type, and should be rolled out to other
1133# services.
1134endpoints:
1135 cluster_domain_suffix: cluster.local
1136 local_image_registry:
1137 name: docker-registry
1138 namespace: docker-registry
1139 hosts:
1140 default: localhost
1141 internal: docker-registry
1142 node: localhost
1143 host_fqdn_override:
1144 default: null
1145 port:
1146 registry:
1147 node: 5000
1148 oci_image_registry:
1149 name: oci-image-registry
1150 namespace: oci-image-registry
1151 auth:
1152 enabled: false
1153 cinder:
1154 username: cinder
1155 password: password
1156 hosts:
1157 default: localhost
1158 host_fqdn_override:
1159 default: null
1160 port:
1161 registry:
1162 default: null
1163 identity:
1164 name: keystone
1165 auth:
1166 admin:
1167 region_name: RegionOne
1168 username: admin
1169 password: password
1170 project_name: admin
1171 user_domain_name: default
1172 project_domain_name: default
1173 cinder:
1174 role: admin
1175 region_name: RegionOne
1176 username: cinder
1177 password: password
1178 project_name: service
1179 user_domain_name: service
1180 project_domain_name: service
1181 test:
1182 role: admin
1183 region_name: RegionOne
1184 username: cinder-test
1185 password: password
1186 project_name: test
1187 user_domain_name: service
1188 project_domain_name: service
1189 hosts:
1190 default: keystone
1191 internal: keystone-api
1192 host_fqdn_override:
1193 default: null
1194 path:
1195 default: /v3
1196 scheme:
1197 default: http
1198 port:
1199 api:
1200 default: 80
1201 internal: 5000
1202 image:
1203 name: glance
1204 hosts:
1205 default: glance-api
1206 public: glance
1207 host_fqdn_override:
1208 default: null
1209 path:
1210 default: null
1211 scheme:
1212 default: http
1213 port:
1214 api:
1215 default: 9292
1216 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001217 volume:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001218 name: cinder
1219 hosts:
1220 default: cinder-api
1221 public: cinder
1222 host_fqdn_override:
1223 default: null
1224 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1225 # endpoints using the following format:
1226 # public:
1227 # host: null
1228 # tls:
1229 # crt: null
1230 # key: null
1231 path:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001232 default: '/v1/%(tenant_id)s'
1233 scheme:
1234 default: 'http'
1235 port:
1236 api:
1237 default: 8776
1238 public: 80
1239 volumev2:
1240 name: cinderv2
1241 hosts:
1242 default: cinder-api
1243 public: cinder
1244 host_fqdn_override:
1245 default: null
1246 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1247 # endpoints using the following format:
1248 # public:
1249 # host: null
1250 # tls:
1251 # crt: null
1252 # key: null
1253 path:
1254 default: '/v2/%(tenant_id)s'
1255 scheme:
1256 default: 'http'
1257 port:
1258 api:
1259 default: 8776
1260 public: 80
1261 volumev3:
1262 name: cinderv3
1263 hosts:
1264 default: cinder-api
1265 public: cinder
1266 host_fqdn_override:
1267 default: null
1268 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1269 # endpoints using the following format:
1270 # public:
1271 # host: null
1272 # tls:
1273 # crt: null
1274 # key: null
1275 path:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001276 default: '/v3/%(tenant_id)s'
1277 scheme:
1278 default: 'http'
1279 port:
1280 api:
1281 default: 8776
1282 public: 80
1283 oslo_db:
1284 auth:
1285 admin:
1286 username: root
1287 password: password
1288 secret:
1289 tls:
1290 internal: mariadb-tls-direct
1291 cinder:
1292 username: cinder
1293 password: password
1294 hosts:
1295 default: mariadb
1296 host_fqdn_override:
1297 default: null
1298 path: /cinder
1299 scheme: mysql+pymysql
1300 port:
1301 mysql:
1302 default: 3306
1303 oslo_messaging:
1304 auth:
1305 admin:
1306 username: rabbitmq
1307 password: password
1308 secret:
1309 tls:
1310 internal: rabbitmq-tls-direct
1311 cinder:
1312 username: cinder
1313 password: password
1314 statefulset:
1315 replicas: 2
1316 name: rabbitmq-rabbitmq
1317 hosts:
1318 default: rabbitmq
1319 host_fqdn_override:
1320 default: null
1321 path: /cinder
1322 scheme: rabbit
1323 port:
1324 amqp:
1325 default: 5672
1326 http:
1327 default: 15672
1328 oslo_cache:
1329 auth:
1330 # NOTE(portdirect): this is used to define the value for keystone
1331 # authtoken cache encryption key, if not set it will be populated
1332 # automatically with a random value, but to take advantage of
1333 # this feature all services should be set to use the same key,
1334 # and memcache service.
1335 memcache_secret_key: null
1336 hosts:
1337 default: memcached
1338 host_fqdn_override:
1339 default: null
1340 port:
1341 memcache:
1342 default: 11211
1343 fluentd:
1344 namespace: null
1345 name: fluentd
1346 hosts:
1347 default: fluentd-logging
1348 host_fqdn_override:
1349 default: null
1350 path:
1351 default: null
1352 scheme: 'http'
1353 port:
1354 service:
1355 default: 24224
1356 metrics:
1357 default: 24220
1358 kube_dns:
1359 namespace: kube-system
1360 name: kubernetes-dns
1361 hosts:
1362 default: kube-dns
1363 host_fqdn_override:
1364 default: null
1365 path:
1366 default: null
1367 scheme: http
1368 port:
1369 dns:
1370 default: 53
1371 protocol: UDP
1372 ingress:
1373 namespace: null
1374 name: ingress
1375 hosts:
1376 default: ingress
1377 port:
1378 ingress:
1379 default: 80
1380
1381network_policy:
1382 cinder:
1383 ingress:
1384 - {}
1385 egress:
1386 - {}
1387
1388# NOTE(helm_hook): helm_hook might break for helm2 binary.
1389# set helm3_hook: false when using the helm2 binary.
1390helm3_hook: true
1391
Mohammed Naserbcdd25c2023-01-18 03:38:47 +00001392tls:
1393 identity: false
1394 oslo_messaging: false
1395 oslo_db: false
1396
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001397manifests:
1398 certificates: false
1399 configmap_bin: true
1400 configmap_etc: true
1401 cron_volume_usage_audit: true
1402 deployment_api: true
1403 deployment_backup: true
1404 deployment_scheduler: true
1405 deployment_volume: true
1406 ingress_api: true
1407 job_backup_storage_init: true
1408 job_bootstrap: true
1409 job_clean: true
1410 job_create_internal_tenant: true
1411 job_db_init: true
1412 job_image_repo_sync: true
1413 job_rabbit_init: true
1414 job_db_sync: true
1415 job_db_drop: false
1416 job_ks_endpoints: true
1417 job_ks_service: true
1418 job_ks_user: true
1419 job_storage_init: true
1420 pdb_api: true
1421 pod_rally_test: true
1422 pvc_backup: true
1423 network_policy: false
1424 secret_db: true
1425 secret_ingress_tls: true
1426 secret_keystone: true
1427 secret_rabbitmq: true
1428 secret_registry: true
1429 service_api: true
1430 service_ingress_api: true
1431...