blob: 7633d977399a25840806d3b0e0b514e9e9361b48 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for cinder.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19storage: ceph
20
21labels:
22 api:
23 node_selector_key: openstack-control-plane
24 node_selector_value: enabled
25 backup:
26 node_selector_key: openstack-control-plane
27 node_selector_value: enabled
28 job:
29 node_selector_key: openstack-control-plane
30 node_selector_value: enabled
31 scheduler:
32 node_selector_key: openstack-control-plane
33 node_selector_value: enabled
34 test:
35 node_selector_key: openstack-control-plane
36 node_selector_value: enabled
37 volume:
38 node_selector_key: openstack-control-plane
39 node_selector_value: enabled
40
41release_group: null
42
43images:
44 tags:
45 test: docker.io/xrally/xrally-openstack:2.0.0
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000046 db_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
47 cinder_db_sync: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
48 db_drop: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050049 rabbit_init: docker.io/rabbitmq:3.7-management
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000050 ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
51 ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
52 ks_endpoints: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
53 cinder_api: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
54 bootstrap: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
55 cinder_scheduler: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
56 cinder_volume: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
57 cinder_volume_usage_audit: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050058 cinder_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000059 cinder_backup: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050060 cinder_backup_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_bionic
61 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
62 image_repo_sync: docker.io/docker:17.07.0
63 pull_policy: "IfNotPresent"
64 local_registry:
65 active: false
66 exclude:
67 - dep_check
68 - image_repo_sync
69
70jobs:
71 volume_usage_audit:
72 cron: "5 * * * *"
73 starting_deadline: 600
74 history:
75 success: 3
76 failed: 1
77
78pod:
79 security_context:
80 volume_usage_audit:
81 pod:
82 runAsUser: 42424
83 container:
84 cinder_volume_usage_audit:
85 readOnlyRootFilesystem: true
86 allowPrivilegeEscalation: false
87 cinder_api:
88 pod:
89 runAsUser: 42424
90 container:
91 ceph_coordination_volume_perms:
92 runAsUser: 0
93 readOnlyRootFilesystem: true
94 cinder_api:
95 readOnlyRootFilesystem: true
96 allowPrivilegeEscalation: false
97 cinder_backup:
98 pod:
99 runAsUser: 42424
100 container:
101 ceph_backup_keyring_placement:
102 runAsUser: 0
103 readOnlyRootFilesystem: true
104 ceph_keyring_placement:
105 runAsUser: 0
106 readOnlyRootFilesystem: true
107 ceph_backup_volume_perms:
108 runAsUser: 0
109 readOnlyRootFilesystem: true
110 ceph_coordination_volume_perms:
111 runAsUser: 0
112 readOnlyRootFilesystem: true
113 cinder_backup:
114 capabilities:
115 add:
116 - SYS_ADMIN
117 readOnlyRootFilesystem: true
118 runAsUser: 0
119 cinder_scheduler:
120 pod:
121 runAsUser: 42424
122 container:
123 ceph_coordination_volume_perms:
124 runAsUser: 0
125 readOnlyRootFilesystem: true
126 cinder_scheduler:
127 readOnlyRootFilesystem: true
128 allowPrivilegeEscalation: false
129 cinder_volume:
130 pod:
131 runAsUser: 42424
132 container:
133 ceph_keyring_placement:
134 runAsUser: 0
135 readOnlyRootFilesystem: true
136 ceph_coordination_volume_perms:
137 runAsUser: 0
138 readOnlyRootFilesystem: true
139 init_cinder_conf:
140 runAsUser: 0
141 readOnlyRootFilesystem: true
142 cinder_volume:
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000143 capabilities:
144 add:
145 - SYS_ADMIN
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500146 readOnlyRootFilesystem: true
147 storage_init:
148 pod:
149 runAsUser: 42424
150 container:
151 ceph_keyring_placement:
152 runAsUser: 0
153 readOnlyRootFilesystem: true
154 cinder_backup_storage_init:
155 readOnlyRootFilesystem: true
156 allowPrivilegeEscalation: false
157 clean:
158 pod:
159 runAsUser: 42424
160 container:
161 cinder_volume_rbd_secret_clean:
162 readOnlyRootFilesystem: true
163 allowPrivilegeEscalation: false
164 create_internal_tenant:
165 pod:
166 runAsUser: 42424
167 container:
168 create_internal_tenant:
169 readOnlyRootFilesystem: true
170 allowPrivilegeEscalation: false
171 affinity:
172 anti:
173 type:
174 default: preferredDuringSchedulingIgnoredDuringExecution
175 topologyKey:
176 default: kubernetes.io/hostname
177 weight:
178 default: 10
179 tolerations:
180 cinder:
181 enabled: false
182 tolerations:
183 - key: node-role.kubernetes.io/master
184 operator: Exists
185 effect: NoSchedule
186 useHostNetwork:
187 volume: false
188 backup: false
189 mounts:
190 cinder_api:
191 init_container: null
192 cinder_api:
193 volumeMounts:
194 volumes:
195 cinder_scheduler:
196 init_container: null
197 cinder_scheduler:
198 volumeMounts:
199 volumes:
200 cinder_volume:
201 init_container: null
202 cinder_volume:
203 volumeMounts:
204 volumes:
205 cinder_volume_usage_audit:
206 init_container: null
207 cinder_volume_usage_audit:
208 volumeMounts:
209 volumes:
210 cinder_backup:
211 init_container: null
212 cinder_backup:
213 volumeMounts:
214 volumes:
215 cinder_tests:
216 init_container: null
217 cinder_tests:
218 volumeMounts:
219 volumes:
220 cinder_db_sync:
221 cinder_db_sync:
222 volumeMounts:
223 volumes:
224 replicas:
225 api: 1
226 volume: 1
227 scheduler: 1
228 backup: 1
229 lifecycle:
230 upgrades:
231 deployments:
232 revision_history: 3
233 pod_replacement_strategy: RollingUpdate
234 rolling_update:
235 max_unavailable: 1
236 max_surge: 3
237 disruption_budget:
238 api:
239 min_available: 0
240 termination_grace_period:
241 api:
242 timeout: 30
243 resources:
244 enabled: false
245 api:
246 requests:
247 memory: "128Mi"
248 cpu: "100m"
249 limits:
250 memory: "1024Mi"
251 cpu: "2000m"
252 scheduler:
253 requests:
254 memory: "128Mi"
255 cpu: "100m"
256 limits:
257 memory: "1024Mi"
258 cpu: "2000m"
259 volume:
260 requests:
261 memory: "128Mi"
262 cpu: "100m"
263 limits:
264 memory: "1024Mi"
265 cpu: "2000m"
266 jobs:
267 volume_usage_audit:
268 requests:
269 memory: "128Mi"
270 cpu: "100m"
271 limits:
272 memory: "1024Mi"
273 cpu: "2000m"
274 bootstrap:
275 requests:
276 memory: "128Mi"
277 cpu: "100m"
278 limits:
279 memory: "1024Mi"
280 cpu: "2000m"
281 rabbit_init:
282 requests:
283 memory: "128Mi"
284 cpu: "100m"
285 limits:
286 memory: "1024Mi"
287 cpu: "2000m"
288 db_init:
289 requests:
290 memory: "128Mi"
291 cpu: "100m"
292 limits:
293 memory: "1024Mi"
294 cpu: "2000m"
295 db_sync:
296 requests:
297 memory: "128Mi"
298 cpu: "100m"
299 limits:
300 memory: "1024Mi"
301 cpu: "2000m"
302 db_drop:
303 requests:
304 memory: "128Mi"
305 cpu: "100m"
306 limits:
307 memory: "1024Mi"
308 cpu: "2000m"
309 clean:
310 requests:
311 memory: "128Mi"
312 cpu: "100m"
313 limits:
314 memory: "1024Mi"
315 cpu: "2000m"
316 backup_storage_init:
317 requests:
318 memory: "128Mi"
319 cpu: "100m"
320 limits:
321 memory: "1024Mi"
322 cpu: "2000m"
323 storage_init:
324 requests:
325 memory: "128Mi"
326 cpu: "100m"
327 limits:
328 memory: "1024Mi"
329 cpu: "2000m"
330 ks_endpoints:
331 requests:
332 memory: "128Mi"
333 cpu: "100m"
334 limits:
335 memory: "1024Mi"
336 cpu: "2000m"
337 ks_service:
338 requests:
339 memory: "128Mi"
340 cpu: "100m"
341 limits:
342 memory: "1024Mi"
343 cpu: "2000m"
344 ks_user:
345 requests:
346 memory: "128Mi"
347 cpu: "100m"
348 limits:
349 memory: "1024Mi"
350 cpu: "2000m"
351 tests:
352 requests:
353 memory: "128Mi"
354 cpu: "100m"
355 limits:
356 memory: "1024Mi"
357 cpu: "2000m"
358 image_repo_sync:
359 requests:
360 memory: "128Mi"
361 cpu: "100m"
362 limits:
363 memory: "1024Mi"
364 cpu: "2000m"
365
366bootstrap:
367 enabled: true
368 ks_user: admin
369 bootstrap_conf_backends: true
370 volume_types:
371 name:
372 group:
373 volume_backend_name:
374 # access_type: "private"
375 # If you set up access_type to private, only the creator
376 # will get an access to the volume type. You can extend
377 # the access to your volume type by providing a list of
378 # domain names and projects as shown below
379 # grant_access:
380 # <domain name 1>:
381 # - <project name 1>
382 # - <project name 2>
383 # <...>
384 # <domain name 2>:
385 # - <project name 1>
386 # <...>
387 # Volume QoS if any. By default, None QoS is created.
388 # Below values with a number at the end need to be replaced
389 # with real names.
390 # volume_qos:
391 # qos_name_1:
392 # consumer: front-end
393 # properties:
394 # key_1: value_1
395 # key_2: value_2
396 # associates:
397 # - volume_type_1
398 # - volume_type_2
399
400network:
401 api:
402 ingress:
403 public: true
404 classes:
405 namespace: "nginx"
406 cluster: "nginx-cluster"
407 annotations:
408 nginx.ingress.kubernetes.io/rewrite-target: /
409 external_policy_local: false
410 node_port:
411 enabled: false
412 port: 30877
413
414ceph_client:
415 # enable this when there is a need to create second ceph backed pointing
416 # to external ceph cluster
417 enable_external_ceph_backend: false
418 # change this in case of first ceph backend name pointing to internal ceph cluster
419 # is diffrent
420 internal_ceph_backend: rbd1
421 configmap: ceph-etc
422 user_secret_name: pvc-ceph-client-key
423 external_ceph:
424 # Only when enable_external_ceph_backend is true and rbd_user is NOT null
425 # secret for external ceph keyring will be created.
426 rbd_user: null
427 rbd_user_keyring: null
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000428 configmap: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500429 conf:
430 global: null
431 osd: null
432conf:
433 paste:
434 composite:osapi_volume:
435 use: call:cinder.api:root_app_factory
436 /: apiversions
437 /v3: openstack_volume_api_v3
438 composite:openstack_volume_api_v3:
439 use: call:cinder.api.middleware.auth:pipeline_factory
440 noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
441 keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3
442 keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3
443 filter:request_id:
444 paste.filter_factory: oslo_middleware.request_id:RequestId.factory
445 filter:http_proxy_to_wsgi:
446 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
447 filter:cors:
448 paste.filter_factory: oslo_middleware.cors:filter_factory
449 oslo_config_project: cinder
450 filter:faultwrap:
451 paste.filter_factory: cinder.api.middleware.fault:FaultWrapper.factory
452 filter:osprofiler:
453 paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
454 filter:noauth:
455 paste.filter_factory: cinder.api.middleware.auth:NoAuthMiddleware.factory
456 filter:sizelimit:
457 paste.filter_factory: oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
458 app:apiv3:
459 paste.app_factory: cinder.api.v3.router:APIRouter.factory
460 pipeline:apiversions:
461 pipeline: cors http_proxy_to_wsgi faultwrap osvolumeversionapp
462 app:osvolumeversionapp:
463 paste.app_factory: cinder.api.versions:Versions.factory
464 filter:keystonecontext:
465 paste.filter_factory: cinder.api.middleware.auth:CinderKeystoneContext.factory
466 filter:authtoken:
467 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
468 filter:audit:
469 paste.filter_factory: keystonemiddleware.audit:filter_factory
470 audit_map_file: /etc/cinder/api_audit_map.conf
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000471 policy: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500472 api_audit_map:
473 DEFAULT:
474 target_endpoint_type: None
475 custom_actions:
476 associate: update/associate
477 disassociate: update/disassociate_all
478 disassociate_all: update/disassociate_all
479 associations: read/list/associations
480 path_keywords:
481 defaults: None
482 detail: None
483 limits: None
484 os-quota-specs: project
485 qos-specs: qos-spec
486 snapshots: snapshot
487 types: type
488 volumes: volume
489 service_endpoints:
490 volumev3: service/storage/block
491 cinder_sudoers: |
492 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
493 Defaults !requiretty
494 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
495 cinder ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *, /var/lib/openstack/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *
496 rootwrap: |
497 # Configuration for cinder-rootwrap
498 # This file should be owned by (and only-writeable by) the root user
499
500 [DEFAULT]
501 # List of directories to load filter definitions from (separated by ',').
502 # These directories MUST all be only writeable by root !
503 filters_path=/etc/cinder/rootwrap.d
504
505 # List of directories to search executables in, in case filters do not
506 # explicitely specify a full path (separated by ',')
507 # If not specified, defaults to system PATH environment variable.
508 # These directories MUST all be only writeable by root !
509 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
510
511 # Enable logging to syslog
512 # Default value is False
513 use_syslog=False
514
515 # Which syslog facility to use.
516 # Valid values include auth, authpriv, syslog, local0, local1...
517 # Default value is 'syslog'
518 syslog_log_facility=syslog
519
520 # Which messages to log.
521 # INFO means log all usage
522 # ERROR means only log unsuccessful attempts
523 syslog_log_level=ERROR
524 rootwrap_filters:
525 volume:
526 pods:
527 - volume
528 content: |
529 # cinder-rootwrap command filters for volume nodes
530 # This file should be owned by (and only-writeable by) the root user
531
532 [Filters]
533 # cinder/volume/iscsi.py: iscsi_helper '--op' ...
534 ietadm: CommandFilter, ietadm, root
535 tgtadm: CommandFilter, tgtadm, root
536 iscsictl: CommandFilter, iscsictl, root
537 tgt-admin: CommandFilter, tgt-admin, root
538 cinder-rtstool: CommandFilter, cinder-rtstool, root
539 scstadmin: CommandFilter, scstadmin, root
540
541 # LVM related show commands
542 pvs: EnvFilter, env, root, LC_ALL=C, pvs
543 vgs: EnvFilter, env, root, LC_ALL=C, vgs
544 lvs: EnvFilter, env, root, LC_ALL=C, lvs
545 lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay
546
547 # -LVM related show commands with suppress fd warnings
548 pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
549 vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
550 lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
551 lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
552
553
554 # -LVM related show commands conf var
555 pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs
556 vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs
557 lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs
558 lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay
559
560 # -LVM conf var with suppress fd_warnings
561 pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
562 vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
563 lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
564 lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
565
566 # os-brick library commands
567 # os_brick.privileged.run_as_root oslo.privsep context
568 # This line ties the superuser privs with the config files, context name,
569 # and (implicitly) the actual python code invoked.
570 privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
571 # The following and any cinder/brick/* entries should all be obsoleted
572 # by privsep, and may be removed once the os-brick version requirement
573 # is updated appropriately.
574 scsi_id: CommandFilter, /lib/udev/scsi_id, root
575 drbdadm: CommandFilter, drbdadm, root
576
577 # cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list
578 vgcreate: CommandFilter, vgcreate, root
579
580 # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
581 # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ...
582 lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate
583 lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate
584 lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate
585 lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate
586
587 # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
588 dd: CommandFilter, dd, root
589
590 # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ...
591 lvremove: CommandFilter, lvremove, root
592
593 # cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'...
594 lvrename: CommandFilter, lvrename, root
595
596 # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ...
597 # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ...
598 lvextend: EnvFilter, env, root, LC_ALL=C, lvextend
599 lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend
600 lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
601 lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
602
603 # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K <lv>'
604 lvchange: CommandFilter, lvchange, root
605
606 # cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name
607 lvconvert: CommandFilter, lvconvert, root
608
609 # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
610 # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
611 iscsiadm: CommandFilter, iscsiadm, root
612
613 # cinder/volume/utils.py: utils.temporary_chown(path, 0)
614 chown: CommandFilter, chown, root
615
616 # cinder/volume/utils.py: copy_volume(..., ionice='...')
617 ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7]
618 ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3]
619
620 # cinder/volume/utils.py: setup_blkio_cgroup()
621 cgcreate: CommandFilter, cgcreate, root
622 cgset: CommandFilter, cgset, root
623 cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+
624
625 # cinder/volume/driver.py
626 dmsetup: CommandFilter, dmsetup, root
627 ln: CommandFilter, ln, root
628
629 # cinder/image/image_utils.py
630 qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img
631 qemu-img_convert: CommandFilter, qemu-img, root
632
633 udevadm: CommandFilter, udevadm, root
634
635 # cinder/volume/driver.py: utils.read_file_as_root()
636 cat: CommandFilter, cat, root
637
638 # cinder/volume/nfs.py
639 stat: CommandFilter, stat, root
640 mount: CommandFilter, mount, root
641 df: CommandFilter, df, root
642 du: CommandFilter, du, root
643 truncate: CommandFilter, truncate, root
644 chmod: CommandFilter, chmod, root
645 rm: CommandFilter, rm, root
646
647 # cinder/volume/drivers/remotefs.py
648 mkdir: CommandFilter, mkdir, root
649
650 # cinder/volume/drivers/netapp/nfs.py:
651 netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+
652
653 # cinder/volume/drivers/glusterfs.py
654 chgrp: CommandFilter, chgrp, root
655 umount: CommandFilter, umount, root
656 fallocate: CommandFilter, fallocate, root
657
658 # cinder/volumes/drivers/hds/hds.py:
659 hus-cmd: CommandFilter, hus-cmd, root
660 hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root
661
662 # cinder/volumes/drivers/hds/hnas_backend.py
663 ssc: CommandFilter, ssc, root
664
665 # cinder/brick/initiator/connector.py:
666 ls: CommandFilter, ls, root
667 tee: CommandFilter, tee, root
668 multipath: CommandFilter, multipath, root
669 multipathd: CommandFilter, multipathd, root
670 systool: CommandFilter, systool, root
671
672 # cinder/volume/drivers/block_device.py
673 blockdev: CommandFilter, blockdev, root
674
675 # cinder/volume/drivers/ibm/gpfs.py
676 # cinder/volume/drivers/tintri.py
677 mv: CommandFilter, mv, root
678
679 # cinder/volume/drivers/ibm/gpfs.py
680 cp: CommandFilter, cp, root
681 mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root
682 mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root
683 mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root
684 mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root
685 mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root
686 mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root
687 mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root
688 mkfs: CommandFilter, mkfs, root
689 mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root
690 mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root
691 mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root
692 mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root
693 mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root
694 mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root
695
696 # cinder/volume/drivers/ibm/gpfs.py
697 # cinder/volume/drivers/ibm/ibmnas.py
698 find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit
699
700 # cinder/brick/initiator/connector.py:
701 aoe-revalidate: CommandFilter, aoe-revalidate, root
702 aoe-discover: CommandFilter, aoe-discover, root
703 aoe-flush: CommandFilter, aoe-flush, root
704
705 # cinder/brick/initiator/linuxscsi.py:
706 sg_scan: CommandFilter, sg_scan, root
707
708 #cinder/backup/services/tsm.py
709 dsmc:CommandFilter,/usr/bin/dsmc,root
710
711 # cinder/volume/drivers/hitachi/hbsd_horcm.py
712 raidqry: CommandFilter, raidqry, root
713 raidcom: CommandFilter, raidcom, root
714 pairsplit: CommandFilter, pairsplit, root
715 paircreate: CommandFilter, paircreate, root
716 pairdisplay: CommandFilter, pairdisplay, root
717 pairevtwait: CommandFilter, pairevtwait, root
718 horcmstart.sh: CommandFilter, horcmstart.sh, root
719 horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root
720 horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr
721
722 # cinder/volume/drivers/hitachi/hbsd_snm2.py
723 auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman
724 auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref
725 auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef
726 aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1
727 auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn
728 auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap
729 autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap
730 aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol
731 auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd
732 auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel
733 auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize
734 auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser
735 autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef
736 autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt
737 autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini
738 auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi
739 audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool
740 aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal
741 aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon
742
743 # cinder/volume/drivers/hgst.py
744 vgc-cluster: CommandFilter, vgc-cluster, root
745
746 # cinder/volume/drivers/vzstorage.py
747 pstorage-mount: CommandFilter, pstorage-mount, root
748 pstorage: CommandFilter, pstorage, root
749 ploop: CommandFilter, ploop, root
750
751 # initiator/connector.py:
752 drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
753 ceph:
754 override:
755 append:
756 monitors: []
757 admin_keyring: null
758 pools:
759 backup:
760 replication: 3
761 crush_rule: replicated_rule
762 chunk_size: 8
763 app_name: cinder-backup
764 cinder.volumes:
765 replication: 3
766 crush_rule: replicated_rule
767 chunk_size: 8
768 app_name: cinder-volume
769 cinder:
770 DEFAULT:
771 volume_usage_audit_period: hour
772 resource_query_filters_file: /etc/cinder/resource_filters.json
773 log_config_append: /etc/cinder/logging.conf
774 use_syslog: false
775 use_stderr: true
776 enable_v1_api: false
777 volume_name_template: "%s"
778 osapi_volume_workers: 1
779 glance_api_version: 2
780 os_region_name: RegionOne
781 host: cinder-volume-worker
782 # NOTE(portdirect): the bind port should not be defined, and is manipulated
783 # via the endpoints section.
784 osapi_volume_listen_port: null
785 enabled_backends: "rbd1"
786 default_volume_type: "rbd1"
787 # NOTE(portdirect): "cinder.backup.drivers.ceph" and
788 # "cinder.backup.drivers.posix" also supported
789 # NOTE(rchurch): As of Stein, drivers by class name are required
790 # - cinder.backup.drivers.swift.SwiftBackupDriver
791 # - cinder.backup.drivers.ceph.CephBackupDriver
792 # - cinder.backup.drivers.posix.PosixBackupDriver
793 backup_driver: "cinder.backup.drivers.swift.SwiftBackupDriver"
794 # Backup: Ceph RBD options
795 backup_ceph_conf: "/etc/ceph/ceph.conf"
796 backup_ceph_user: cinderbackup
797 backup_ceph_pool: cinder.backups
798 # Backup: Posix options
799 backup_posix_path: /var/lib/cinder/backup
800 auth_strategy: keystone
801 # Internal tenant id
802 internal_project_name: internal_cinder
803 internal_user_name: internal_cinder
804 database:
805 max_retries: -1
806 keystone_authtoken:
807 auth_version: v3
808 auth_type: password
809 memcache_security_strategy: ENCRYPT
810 oslo_policy:
811 policy_file: /etc/cinder/policy.yaml
812 oslo_concurrency:
813 lock_path: "/var/lib/cinder/tmp"
814 oslo_messaging_notifications:
815 driver: messagingv2
816 oslo_middleware:
817 enable_proxy_headers_parsing: true
818 oslo_messaging_rabbit:
819 rabbit_ha_queues: true
820 coordination:
821 backend_url: file:///var/lib/cinder/coordination
822 service_user:
823 auth_type: password
824 send_service_user_token: false
825 logging:
826 loggers:
827 keys:
828 - root
829 - cinder
830 handlers:
831 keys:
832 - stdout
833 - stderr
834 - "null"
835 formatters:
836 keys:
837 - context
838 - default
839 logger_root:
840 level: WARNING
841 handlers: 'null'
842 logger_cinder:
843 level: INFO
844 handlers:
845 - stdout
846 qualname: cinder
847 logger_amqp:
848 level: WARNING
849 handlers: stderr
850 qualname: amqp
851 logger_amqplib:
852 level: WARNING
853 handlers: stderr
854 qualname: amqplib
855 logger_eventletwsgi:
856 level: WARNING
857 handlers: stderr
858 qualname: eventlet.wsgi.server
859 logger_sqlalchemy:
860 level: WARNING
861 handlers: stderr
862 qualname: sqlalchemy
863 logger_boto:
864 level: WARNING
865 handlers: stderr
866 qualname: boto
867 handler_null:
868 class: logging.NullHandler
869 formatter: default
870 args: ()
871 handler_stdout:
872 class: StreamHandler
873 args: (sys.stdout,)
874 formatter: context
875 handler_stderr:
876 class: StreamHandler
877 args: (sys.stderr,)
878 formatter: context
879 formatter_context:
880 class: oslo_log.formatters.ContextFormatter
881 datefmt: "%Y-%m-%d %H:%M:%S"
882 formatter_default:
883 format: "%(message)s"
884 datefmt: "%Y-%m-%d %H:%M:%S"
885 rabbitmq:
886 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
887 policies:
888 - vhost: "cinder"
889 name: "ha_ttl_cinder"
890 definition:
891 # mirror messges to other nodes in rmq cluster
892 ha-mode: "all"
893 ha-sync-mode: "automatic"
894 # 70s
895 message-ttl: 70000
896 priority: 0
897 apply-to: all
898 pattern: '^(?!(amq\.|reply_)).*'
899
900 backends:
901 # Those options will be written to backends.conf as-is.
902 rbd1:
903 volume_driver: cinder.volume.drivers.rbd.RBDDriver
904 volume_backend_name: rbd1
905 rbd_pool: cinder.volumes
906 rbd_ceph_conf: "/etc/ceph/ceph.conf"
907 rbd_flatten_volume_from_snapshot: false
908 report_discard_supported: true
909 rbd_max_clone_depth: 5
910 rbd_store_chunk_size: 4
911 rados_connect_timeout: -1
912 rbd_user: cinder
913 rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
914 image_volume_cache_enabled: True
915 image_volume_cache_max_size_gb: 200
916 image_volume_cache_max_count: 50
917 rally_tests:
918 run_tempest: false
919 clean_up: |
920 VOLUMES=$(openstack volume list -f value | grep -e "^s_rally_" | awk '{ print $1 }')
921 if [ -n "$VOLUMES" ]; then
922 echo $VOLUMES | xargs openstack volume delete
923 fi
924 tests:
925 CinderVolumes.create_and_delete_volume:
926 - args:
927 size: 1
928 runner:
929 concurrency: 1
930 times: 1
931 type: constant
932 sla:
933 failure_rate:
934 max: 0
935 - args:
936 size:
937 max: 5
938 min: 1
939 runner:
940 concurrency: 1
941 times: 1
942 type: constant
943 sla:
944 failure_rate:
945 max: 0
946 resource_filters:
947 volume:
948 - name
949 - status
950 - metadata
951 - bootable
952 - migration_status
953 - availability_zone
954 - group_id
955 backup:
956 - name
957 - status
958 - volume_id
959 snapshot:
960 - name
961 - status
962 - volume_id
963 - metadata
964 - availability_zone
965 group: []
966 group_snapshot:
967 - status
968 - group_id
969 attachment:
970 - volume_id
971 - status
972 - instance_id
973 - attach_status
974 message:
975 - resource_uuid
976 - resource_type
977 - event_id
978 - request_id
979 - message_level
980 pool:
981 - name
982 - volume_type
983 volume_type: []
984 enable_iscsi: false
985backup:
986 external_ceph_rbd:
987 enabled: false
988 admin_keyring: null
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000989 configmap: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500990 conf:
991 global: null
992 osd: null
993 posix:
994 volume:
995 class_name: general
996 size: 10Gi
997
998dependencies:
999 dynamic:
1000 common:
1001 local_image_registry:
1002 jobs:
1003 - cinder-image-repo-sync
1004 services:
1005 - endpoint: node
1006 service: local_image_registry
1007 static:
1008 api:
1009 jobs:
1010 - cinder-db-sync
1011 - cinder-ks-user
1012 - cinder-ks-endpoints
1013 - cinder-rabbit-init
1014 - cinder-storage-init
1015 services:
1016 - endpoint: internal
1017 service: oslo_db
1018 - endpoint: internal
1019 service: identity
1020 backup:
1021 jobs:
1022 - cinder-db-sync
1023 - cinder-ks-user
1024 - cinder-ks-endpoints
1025 - cinder-rabbit-init
1026 - cinder-storage-init
1027 - cinder-backup-storage-init
1028 services:
1029 - endpoint: internal
1030 service: identity
1031 - endpoint: internal
1032 service: volumev3
1033 backup_storage_init:
1034 jobs: null
1035 bootstrap:
1036 services:
1037 - endpoint: internal
1038 service: identity
1039 - endpoint: internal
1040 service: volumev3
1041 pod:
1042 - requireSameNode: false
1043 labels:
1044 application: cinder
1045 component: volume
1046 clean:
1047 jobs: null
1048 db_drop:
1049 services:
1050 - endpoint: internal
1051 service: oslo_db
1052 db_init:
1053 services:
1054 - endpoint: internal
1055 service: oslo_db
1056 db_sync:
1057 jobs:
1058 - cinder-db-init
1059 services:
1060 - endpoint: internal
1061 service: oslo_db
1062 ks_endpoints:
1063 jobs:
1064 - cinder-ks-service
1065 services:
1066 - endpoint: internal
1067 service: identity
1068 ks_service:
1069 services:
1070 - endpoint: internal
1071 service: identity
1072 ks_user:
1073 services:
1074 - endpoint: internal
1075 service: identity
1076 rabbit_init:
1077 services:
1078 - service: oslo_messaging
1079 endpoint: internal
1080 scheduler:
1081 jobs:
1082 - cinder-db-sync
1083 - cinder-ks-user
1084 - cinder-ks-endpoints
1085 - cinder-rabbit-init
1086 - cinder-storage-init
1087 services:
1088 - endpoint: internal
1089 service: identity
1090 - endpoint: internal
1091 service: volumev3
1092 storage_init:
1093 jobs: null
1094 tests:
1095 services:
1096 - endpoint: internal
1097 service: identity
1098 - endpoint: internal
1099 service: volumev3
1100 volume:
1101 jobs:
1102 - cinder-db-sync
1103 - cinder-ks-user
1104 - cinder-ks-endpoints
1105 - cinder-rabbit-init
1106 - cinder-storage-init
1107 services:
1108 - endpoint: internal
1109 service: identity
1110 - endpoint: internal
1111 service: volumev3
1112 volume_usage_audit:
1113 jobs:
1114 - cinder-db-sync
1115 - cinder-ks-user
1116 - cinder-ks-endpoints
1117 - cinder-rabbit-init
1118 - cinder-storage-init
1119 services:
1120 - endpoint: internal
1121 service: identity
1122 - endpoint: internal
1123 service: volumev3
1124 image_repo_sync:
1125 services:
1126 - endpoint: internal
1127 service: local_image_registry
1128 create_internal_tenant:
1129 services:
1130 - endpoint: internal
1131 service: identity
1132
1133# Names of secrets used by bootstrap and environmental checks
1134secrets:
1135 identity:
1136 admin: cinder-keystone-admin
1137 cinder: cinder-keystone-user
1138 test: cinder-keystone-test
1139 oslo_db:
1140 admin: cinder-db-admin
1141 cinder: cinder-db-user
1142 rbd:
1143 backup: cinder-backup-rbd-keyring
1144 volume: cinder-volume-rbd-keyring
1145 volume_external: cinder-volume-external-rbd-keyring
1146 oslo_messaging:
1147 admin: cinder-rabbitmq-admin
1148 cinder: cinder-rabbitmq-user
1149 tls:
1150 volumev3:
1151 api:
1152 public: cinder-tls-public
1153 internal: cinder-tls-api
1154 oci_image_registry:
1155 cinder: cinder-oci-image-registry
1156
1157# We use a different layout of the endpoints here to account for versioning
1158# this swaps the service name and type, and should be rolled out to other
1159# services.
1160endpoints:
1161 cluster_domain_suffix: cluster.local
1162 local_image_registry:
1163 name: docker-registry
1164 namespace: docker-registry
1165 hosts:
1166 default: localhost
1167 internal: docker-registry
1168 node: localhost
1169 host_fqdn_override:
1170 default: null
1171 port:
1172 registry:
1173 node: 5000
1174 oci_image_registry:
1175 name: oci-image-registry
1176 namespace: oci-image-registry
1177 auth:
1178 enabled: false
1179 cinder:
1180 username: cinder
1181 password: password
1182 hosts:
1183 default: localhost
1184 host_fqdn_override:
1185 default: null
1186 port:
1187 registry:
1188 default: null
1189 identity:
1190 name: keystone
1191 auth:
1192 admin:
1193 region_name: RegionOne
1194 username: admin
1195 password: password
1196 project_name: admin
1197 user_domain_name: default
1198 project_domain_name: default
1199 cinder:
1200 role: admin
1201 region_name: RegionOne
1202 username: cinder
1203 password: password
1204 project_name: service
1205 user_domain_name: service
1206 project_domain_name: service
1207 test:
1208 role: admin
1209 region_name: RegionOne
1210 username: cinder-test
1211 password: password
1212 project_name: test
1213 user_domain_name: service
1214 project_domain_name: service
1215 hosts:
1216 default: keystone
1217 internal: keystone-api
1218 host_fqdn_override:
1219 default: null
1220 path:
1221 default: /v3
1222 scheme:
1223 default: http
1224 port:
1225 api:
1226 default: 80
1227 internal: 5000
1228 image:
1229 name: glance
1230 hosts:
1231 default: glance-api
1232 public: glance
1233 host_fqdn_override:
1234 default: null
1235 path:
1236 default: null
1237 scheme:
1238 default: http
1239 port:
1240 api:
1241 default: 9292
1242 public: 80
1243 volumev3:
1244 name: cinder
1245 hosts:
1246 default: cinder-api
1247 public: cinder
1248 host_fqdn_override:
1249 default: null
1250 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1251 # endpoints using the following format:
1252 # public:
1253 # host: null
1254 # tls:
1255 # crt: null
1256 # key: null
1257 path:
1258 default: '/v3/%(tenant_id)s'
1259 scheme:
1260 default: 'http'
1261 port:
1262 api:
1263 default: 8776
1264 public: 80
1265 oslo_db:
1266 auth:
1267 admin:
1268 username: root
1269 password: password
1270 secret:
1271 tls:
1272 internal: mariadb-tls-direct
1273 cinder:
1274 username: cinder
1275 password: password
1276 hosts:
1277 default: mariadb
1278 host_fqdn_override:
1279 default: null
1280 path: /cinder
1281 scheme: mysql+pymysql
1282 port:
1283 mysql:
1284 default: 3306
1285 oslo_messaging:
1286 auth:
1287 admin:
1288 username: rabbitmq
1289 password: password
1290 secret:
1291 tls:
1292 internal: rabbitmq-tls-direct
1293 cinder:
1294 username: cinder
1295 password: password
1296 statefulset:
1297 replicas: 2
1298 name: rabbitmq-rabbitmq
1299 hosts:
1300 default: rabbitmq
1301 host_fqdn_override:
1302 default: null
1303 path: /cinder
1304 scheme: rabbit
1305 port:
1306 amqp:
1307 default: 5672
1308 http:
1309 default: 15672
1310 oslo_cache:
1311 auth:
1312 # NOTE(portdirect): this is used to define the value for keystone
1313 # authtoken cache encryption key, if not set it will be populated
1314 # automatically with a random value, but to take advantage of
1315 # this feature all services should be set to use the same key,
1316 # and memcache service.
1317 memcache_secret_key: null
1318 hosts:
1319 default: memcached
1320 host_fqdn_override:
1321 default: null
1322 port:
1323 memcache:
1324 default: 11211
1325 fluentd:
1326 namespace: null
1327 name: fluentd
1328 hosts:
1329 default: fluentd-logging
1330 host_fqdn_override:
1331 default: null
1332 path:
1333 default: null
1334 scheme: 'http'
1335 port:
1336 service:
1337 default: 24224
1338 metrics:
1339 default: 24220
1340 kube_dns:
1341 namespace: kube-system
1342 name: kubernetes-dns
1343 hosts:
1344 default: kube-dns
1345 host_fqdn_override:
1346 default: null
1347 path:
1348 default: null
1349 scheme: http
1350 port:
1351 dns:
1352 default: 53
1353 protocol: UDP
1354 ingress:
1355 namespace: null
1356 name: ingress
1357 hosts:
1358 default: ingress
1359 port:
1360 ingress:
1361 default: 80
1362
1363network_policy:
1364 cinder:
1365 ingress:
1366 - {}
1367 egress:
1368 - {}
1369
1370# NOTE(helm_hook): helm_hook might break for helm2 binary.
1371# set helm3_hook: false when using the helm2 binary.
1372helm3_hook: true
1373
Mohammed Naserbcdd25c2023-01-18 03:38:47 +00001374tls:
1375 identity: false
1376 oslo_messaging: false
1377 oslo_db: false
1378
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001379manifests:
1380 certificates: false
1381 configmap_bin: true
1382 configmap_etc: true
1383 cron_volume_usage_audit: true
1384 deployment_api: true
1385 deployment_backup: true
1386 deployment_scheduler: true
1387 deployment_volume: true
1388 ingress_api: true
1389 job_backup_storage_init: true
1390 job_bootstrap: true
1391 job_clean: true
1392 job_create_internal_tenant: true
1393 job_db_init: true
1394 job_image_repo_sync: true
1395 job_rabbit_init: true
1396 job_db_sync: true
1397 job_db_drop: false
1398 job_ks_endpoints: true
1399 job_ks_service: true
1400 job_ks_user: true
1401 job_storage_init: true
1402 pdb_api: true
1403 pod_rally_test: true
1404 pvc_backup: true
1405 network_policy: false
1406 secret_db: true
1407 secret_ingress_tls: true
1408 secret_keystone: true
1409 secret_rabbitmq: true
1410 secret_registry: true
1411 service_api: true
1412 service_ingress_api: true
1413...