blob: 12351a60a99662f6618bb89724d4c68072062c9f [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for cinder.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19storage: ceph
20
21labels:
22 api:
23 node_selector_key: openstack-control-plane
24 node_selector_value: enabled
25 backup:
26 node_selector_key: openstack-control-plane
27 node_selector_value: enabled
28 job:
29 node_selector_key: openstack-control-plane
30 node_selector_value: enabled
31 scheduler:
32 node_selector_key: openstack-control-plane
33 node_selector_value: enabled
34 test:
35 node_selector_key: openstack-control-plane
36 node_selector_value: enabled
37 volume:
38 node_selector_key: openstack-control-plane
39 node_selector_value: enabled
40
41release_group: null
42
43images:
44 tags:
45 test: docker.io/xrally/xrally-openstack:2.0.0
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000046 db_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
47 cinder_db_sync: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
48 db_drop: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050049 rabbit_init: docker.io/rabbitmq:3.7-management
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000050 ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
51 ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
52 ks_endpoints: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
53 cinder_api: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
54 bootstrap: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
55 cinder_scheduler: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
56 cinder_volume: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
57 cinder_volume_usage_audit: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020058 cinder_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal
Mohammed Naserbcdd25c2023-01-18 03:38:47 +000059 cinder_backup: docker.io/openstackhelm/cinder:wallaby-ubuntu_focal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020060 cinder_backup_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050061 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
62 image_repo_sync: docker.io/docker:17.07.0
63 pull_policy: "IfNotPresent"
64 local_registry:
65 active: false
66 exclude:
67 - dep_check
68 - image_repo_sync
69
70jobs:
71 volume_usage_audit:
72 cron: "5 * * * *"
73 starting_deadline: 600
74 history:
75 success: 3
76 failed: 1
77
78pod:
79 security_context:
80 volume_usage_audit:
81 pod:
82 runAsUser: 42424
83 container:
84 cinder_volume_usage_audit:
85 readOnlyRootFilesystem: true
86 allowPrivilegeEscalation: false
87 cinder_api:
88 pod:
89 runAsUser: 42424
90 container:
91 ceph_coordination_volume_perms:
92 runAsUser: 0
93 readOnlyRootFilesystem: true
94 cinder_api:
95 readOnlyRootFilesystem: true
96 allowPrivilegeEscalation: false
97 cinder_backup:
98 pod:
99 runAsUser: 42424
100 container:
101 ceph_backup_keyring_placement:
102 runAsUser: 0
103 readOnlyRootFilesystem: true
104 ceph_keyring_placement:
105 runAsUser: 0
106 readOnlyRootFilesystem: true
107 ceph_backup_volume_perms:
108 runAsUser: 0
109 readOnlyRootFilesystem: true
110 ceph_coordination_volume_perms:
111 runAsUser: 0
112 readOnlyRootFilesystem: true
113 cinder_backup:
114 capabilities:
115 add:
116 - SYS_ADMIN
117 readOnlyRootFilesystem: true
118 runAsUser: 0
119 cinder_scheduler:
120 pod:
121 runAsUser: 42424
122 container:
123 ceph_coordination_volume_perms:
124 runAsUser: 0
125 readOnlyRootFilesystem: true
126 cinder_scheduler:
127 readOnlyRootFilesystem: true
128 allowPrivilegeEscalation: false
129 cinder_volume:
130 pod:
131 runAsUser: 42424
132 container:
133 ceph_keyring_placement:
134 runAsUser: 0
135 readOnlyRootFilesystem: true
136 ceph_coordination_volume_perms:
137 runAsUser: 0
138 readOnlyRootFilesystem: true
139 init_cinder_conf:
140 runAsUser: 0
141 readOnlyRootFilesystem: true
142 cinder_volume:
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000143 capabilities:
144 add:
145 - SYS_ADMIN
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500146 readOnlyRootFilesystem: true
147 storage_init:
148 pod:
149 runAsUser: 42424
150 container:
151 ceph_keyring_placement:
152 runAsUser: 0
153 readOnlyRootFilesystem: true
154 cinder_backup_storage_init:
155 readOnlyRootFilesystem: true
156 allowPrivilegeEscalation: false
157 clean:
158 pod:
159 runAsUser: 42424
160 container:
161 cinder_volume_rbd_secret_clean:
162 readOnlyRootFilesystem: true
163 allowPrivilegeEscalation: false
164 create_internal_tenant:
165 pod:
166 runAsUser: 42424
167 container:
168 create_internal_tenant:
169 readOnlyRootFilesystem: true
170 allowPrivilegeEscalation: false
171 affinity:
172 anti:
173 type:
174 default: preferredDuringSchedulingIgnoredDuringExecution
175 topologyKey:
176 default: kubernetes.io/hostname
177 weight:
178 default: 10
179 tolerations:
180 cinder:
181 enabled: false
182 tolerations:
183 - key: node-role.kubernetes.io/master
184 operator: Exists
185 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200186 - key: node-role.kubernetes.io/control-plane
187 operator: Exists
188 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500189 useHostNetwork:
190 volume: false
191 backup: false
192 mounts:
193 cinder_api:
194 init_container: null
195 cinder_api:
196 volumeMounts:
197 volumes:
198 cinder_scheduler:
199 init_container: null
200 cinder_scheduler:
201 volumeMounts:
202 volumes:
203 cinder_volume:
204 init_container: null
205 cinder_volume:
206 volumeMounts:
207 volumes:
208 cinder_volume_usage_audit:
209 init_container: null
210 cinder_volume_usage_audit:
211 volumeMounts:
212 volumes:
213 cinder_backup:
214 init_container: null
215 cinder_backup:
216 volumeMounts:
217 volumes:
218 cinder_tests:
219 init_container: null
220 cinder_tests:
221 volumeMounts:
222 volumes:
223 cinder_db_sync:
224 cinder_db_sync:
225 volumeMounts:
226 volumes:
227 replicas:
228 api: 1
229 volume: 1
230 scheduler: 1
231 backup: 1
232 lifecycle:
233 upgrades:
234 deployments:
235 revision_history: 3
236 pod_replacement_strategy: RollingUpdate
237 rolling_update:
238 max_unavailable: 1
239 max_surge: 3
240 disruption_budget:
241 api:
242 min_available: 0
243 termination_grace_period:
244 api:
245 timeout: 30
246 resources:
247 enabled: false
248 api:
249 requests:
250 memory: "128Mi"
251 cpu: "100m"
252 limits:
253 memory: "1024Mi"
254 cpu: "2000m"
255 scheduler:
256 requests:
257 memory: "128Mi"
258 cpu: "100m"
259 limits:
260 memory: "1024Mi"
261 cpu: "2000m"
262 volume:
263 requests:
264 memory: "128Mi"
265 cpu: "100m"
266 limits:
267 memory: "1024Mi"
268 cpu: "2000m"
269 jobs:
270 volume_usage_audit:
271 requests:
272 memory: "128Mi"
273 cpu: "100m"
274 limits:
275 memory: "1024Mi"
276 cpu: "2000m"
277 bootstrap:
278 requests:
279 memory: "128Mi"
280 cpu: "100m"
281 limits:
282 memory: "1024Mi"
283 cpu: "2000m"
284 rabbit_init:
285 requests:
286 memory: "128Mi"
287 cpu: "100m"
288 limits:
289 memory: "1024Mi"
290 cpu: "2000m"
291 db_init:
292 requests:
293 memory: "128Mi"
294 cpu: "100m"
295 limits:
296 memory: "1024Mi"
297 cpu: "2000m"
298 db_sync:
299 requests:
300 memory: "128Mi"
301 cpu: "100m"
302 limits:
303 memory: "1024Mi"
304 cpu: "2000m"
305 db_drop:
306 requests:
307 memory: "128Mi"
308 cpu: "100m"
309 limits:
310 memory: "1024Mi"
311 cpu: "2000m"
312 clean:
313 requests:
314 memory: "128Mi"
315 cpu: "100m"
316 limits:
317 memory: "1024Mi"
318 cpu: "2000m"
319 backup_storage_init:
320 requests:
321 memory: "128Mi"
322 cpu: "100m"
323 limits:
324 memory: "1024Mi"
325 cpu: "2000m"
326 storage_init:
327 requests:
328 memory: "128Mi"
329 cpu: "100m"
330 limits:
331 memory: "1024Mi"
332 cpu: "2000m"
333 ks_endpoints:
334 requests:
335 memory: "128Mi"
336 cpu: "100m"
337 limits:
338 memory: "1024Mi"
339 cpu: "2000m"
340 ks_service:
341 requests:
342 memory: "128Mi"
343 cpu: "100m"
344 limits:
345 memory: "1024Mi"
346 cpu: "2000m"
347 ks_user:
348 requests:
349 memory: "128Mi"
350 cpu: "100m"
351 limits:
352 memory: "1024Mi"
353 cpu: "2000m"
354 tests:
355 requests:
356 memory: "128Mi"
357 cpu: "100m"
358 limits:
359 memory: "1024Mi"
360 cpu: "2000m"
361 image_repo_sync:
362 requests:
363 memory: "128Mi"
364 cpu: "100m"
365 limits:
366 memory: "1024Mi"
367 cpu: "2000m"
368
369bootstrap:
370 enabled: true
371 ks_user: admin
372 bootstrap_conf_backends: true
373 volume_types:
374 name:
375 group:
376 volume_backend_name:
377 # access_type: "private"
378 # If you set up access_type to private, only the creator
379 # will get an access to the volume type. You can extend
380 # the access to your volume type by providing a list of
381 # domain names and projects as shown below
382 # grant_access:
383 # <domain name 1>:
384 # - <project name 1>
385 # - <project name 2>
386 # <...>
387 # <domain name 2>:
388 # - <project name 1>
389 # <...>
390 # Volume QoS if any. By default, None QoS is created.
391 # Below values with a number at the end need to be replaced
392 # with real names.
393 # volume_qos:
394 # qos_name_1:
395 # consumer: front-end
396 # properties:
397 # key_1: value_1
398 # key_2: value_2
399 # associates:
400 # - volume_type_1
401 # - volume_type_2
402
403network:
404 api:
405 ingress:
406 public: true
407 classes:
408 namespace: "nginx"
409 cluster: "nginx-cluster"
410 annotations:
411 nginx.ingress.kubernetes.io/rewrite-target: /
412 external_policy_local: false
413 node_port:
414 enabled: false
415 port: 30877
416
417ceph_client:
418 # enable this when there is a need to create second ceph backed pointing
419 # to external ceph cluster
420 enable_external_ceph_backend: false
421 # change this in case of first ceph backend name pointing to internal ceph cluster
422 # is diffrent
423 internal_ceph_backend: rbd1
424 configmap: ceph-etc
425 user_secret_name: pvc-ceph-client-key
426 external_ceph:
427 # Only when enable_external_ceph_backend is true and rbd_user is NOT null
428 # secret for external ceph keyring will be created.
429 rbd_user: null
430 rbd_user_keyring: null
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000431 configmap: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500432 conf:
433 global: null
434 osd: null
435conf:
okozachenko12035aa48492023-09-05 19:47:07 +1000436 paste:
437 composite:osapi_volume:
438 use: call:cinder.api:root_app_factory
439 /: apiversions
440 /v1: openstack_volume_api_v1
441 /v2: openstack_volume_api_v2
442 /v3: openstack_volume_api_v3
443 composite:openstack_volume_api_v1:
444 use: call:cinder.api.middleware.auth:pipeline_factory
445 noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
446 keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1
447 keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1
448 composite:openstack_volume_api_v2:
449 use: call:cinder.api.middleware.auth:pipeline_factory
450 noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
451 keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2
452 keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2
453 composite:openstack_volume_api_v3:
454 use: call:cinder.api.middleware.auth:pipeline_factory
455 noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
456 keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3
457 keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3
458 filter:request_id:
459 paste.filter_factory: oslo_middleware.request_id:RequestId.factory
460 filter:http_proxy_to_wsgi:
461 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
462 filter:cors:
463 paste.filter_factory: oslo_middleware.cors:filter_factory
464 oslo_config_project: cinder
465 filter:faultwrap:
466 paste.filter_factory: cinder.api.middleware.fault:FaultWrapper.factory
467 filter:osprofiler:
468 paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
469 filter:noauth:
470 paste.filter_factory: cinder.api.middleware.auth:NoAuthMiddleware.factory
471 filter:sizelimit:
472 paste.filter_factory: oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
473 app:apiv1:
474 paste.app_factory: cinder.api.v1.router:APIRouter.factory
475 app:apiv2:
476 paste.app_factory: cinder.api.v2.router:APIRouter.factory
477 app:apiv3:
478 paste.app_factory: cinder.api.v3.router:APIRouter.factory
479 pipeline:apiversions:
480 pipeline: cors http_proxy_to_wsgi faultwrap osvolumeversionapp
481 app:osvolumeversionapp:
482 paste.app_factory: cinder.api.versions:Versions.factory
483 filter:keystonecontext:
484 paste.filter_factory: cinder.api.middleware.auth:CinderKeystoneContext.factory
485 filter:authtoken:
486 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
487 filter:audit:
488 paste.filter_factory: keystonemiddleware.audit:filter_factory
489 audit_map_file: /etc/cinder/api_audit_map.conf
Mohammed Naserbcdd25c2023-01-18 03:38:47 +0000490 policy: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500491 api_audit_map:
492 DEFAULT:
493 target_endpoint_type: None
494 custom_actions:
495 associate: update/associate
496 disassociate: update/disassociate_all
497 disassociate_all: update/disassociate_all
498 associations: read/list/associations
499 path_keywords:
500 defaults: None
501 detail: None
502 limits: None
503 os-quota-specs: project
504 qos-specs: qos-spec
505 snapshots: snapshot
506 types: type
507 volumes: volume
508 service_endpoints:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +0100509 volume: service/storage/block
510 volumev2: service/storage/block
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500511 volumev3: service/storage/block
512 cinder_sudoers: |
513 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
514 Defaults !requiretty
515 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
516 cinder ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *, /var/lib/openstack/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *
517 rootwrap: |
518 # Configuration for cinder-rootwrap
519 # This file should be owned by (and only-writeable by) the root user
520
521 [DEFAULT]
522 # List of directories to load filter definitions from (separated by ',').
523 # These directories MUST all be only writeable by root !
524 filters_path=/etc/cinder/rootwrap.d
525
526 # List of directories to search executables in, in case filters do not
527 # explicitely specify a full path (separated by ',')
528 # If not specified, defaults to system PATH environment variable.
529 # These directories MUST all be only writeable by root !
530 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
531
532 # Enable logging to syslog
533 # Default value is False
534 use_syslog=False
535
536 # Which syslog facility to use.
537 # Valid values include auth, authpriv, syslog, local0, local1...
538 # Default value is 'syslog'
539 syslog_log_facility=syslog
540
541 # Which messages to log.
542 # INFO means log all usage
543 # ERROR means only log unsuccessful attempts
544 syslog_log_level=ERROR
545 rootwrap_filters:
546 volume:
547 pods:
548 - volume
549 content: |
550 # cinder-rootwrap command filters for volume nodes
551 # This file should be owned by (and only-writeable by) the root user
552
553 [Filters]
554 # cinder/volume/iscsi.py: iscsi_helper '--op' ...
555 ietadm: CommandFilter, ietadm, root
556 tgtadm: CommandFilter, tgtadm, root
557 iscsictl: CommandFilter, iscsictl, root
558 tgt-admin: CommandFilter, tgt-admin, root
559 cinder-rtstool: CommandFilter, cinder-rtstool, root
560 scstadmin: CommandFilter, scstadmin, root
561
562 # LVM related show commands
563 pvs: EnvFilter, env, root, LC_ALL=C, pvs
564 vgs: EnvFilter, env, root, LC_ALL=C, vgs
565 lvs: EnvFilter, env, root, LC_ALL=C, lvs
566 lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay
567
568 # -LVM related show commands with suppress fd warnings
569 pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
570 vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
571 lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
572 lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
573
574
575 # -LVM related show commands conf var
576 pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs
577 vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs
578 lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs
579 lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay
580
581 # -LVM conf var with suppress fd_warnings
582 pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
583 vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
584 lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
585 lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
586
587 # os-brick library commands
588 # os_brick.privileged.run_as_root oslo.privsep context
589 # This line ties the superuser privs with the config files, context name,
590 # and (implicitly) the actual python code invoked.
591 privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
592 # The following and any cinder/brick/* entries should all be obsoleted
593 # by privsep, and may be removed once the os-brick version requirement
594 # is updated appropriately.
595 scsi_id: CommandFilter, /lib/udev/scsi_id, root
596 drbdadm: CommandFilter, drbdadm, root
597
598 # cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list
599 vgcreate: CommandFilter, vgcreate, root
600
601 # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
602 # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ...
603 lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate
604 lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate
605 lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate
606 lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate
607
608 # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
609 dd: CommandFilter, dd, root
610
611 # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ...
612 lvremove: CommandFilter, lvremove, root
613
614 # cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'...
615 lvrename: CommandFilter, lvrename, root
616
617 # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ...
618 # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ...
619 lvextend: EnvFilter, env, root, LC_ALL=C, lvextend
620 lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend
621 lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
622 lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
623
624 # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K <lv>'
625 lvchange: CommandFilter, lvchange, root
626
627 # cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name
628 lvconvert: CommandFilter, lvconvert, root
629
630 # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
631 # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
632 iscsiadm: CommandFilter, iscsiadm, root
633
634 # cinder/volume/utils.py: utils.temporary_chown(path, 0)
635 chown: CommandFilter, chown, root
636
637 # cinder/volume/utils.py: copy_volume(..., ionice='...')
638 ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7]
639 ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3]
640
641 # cinder/volume/utils.py: setup_blkio_cgroup()
642 cgcreate: CommandFilter, cgcreate, root
643 cgset: CommandFilter, cgset, root
644 cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+
645
646 # cinder/volume/driver.py
647 dmsetup: CommandFilter, dmsetup, root
648 ln: CommandFilter, ln, root
649
650 # cinder/image/image_utils.py
651 qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img
652 qemu-img_convert: CommandFilter, qemu-img, root
653
654 udevadm: CommandFilter, udevadm, root
655
656 # cinder/volume/driver.py: utils.read_file_as_root()
657 cat: CommandFilter, cat, root
658
659 # cinder/volume/nfs.py
660 stat: CommandFilter, stat, root
661 mount: CommandFilter, mount, root
662 df: CommandFilter, df, root
663 du: CommandFilter, du, root
664 truncate: CommandFilter, truncate, root
665 chmod: CommandFilter, chmod, root
666 rm: CommandFilter, rm, root
667
668 # cinder/volume/drivers/remotefs.py
669 mkdir: CommandFilter, mkdir, root
670
671 # cinder/volume/drivers/netapp/nfs.py:
672 netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+
673
674 # cinder/volume/drivers/glusterfs.py
675 chgrp: CommandFilter, chgrp, root
676 umount: CommandFilter, umount, root
677 fallocate: CommandFilter, fallocate, root
678
679 # cinder/volumes/drivers/hds/hds.py:
680 hus-cmd: CommandFilter, hus-cmd, root
681 hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root
682
683 # cinder/volumes/drivers/hds/hnas_backend.py
684 ssc: CommandFilter, ssc, root
685
686 # cinder/brick/initiator/connector.py:
687 ls: CommandFilter, ls, root
688 tee: CommandFilter, tee, root
689 multipath: CommandFilter, multipath, root
690 multipathd: CommandFilter, multipathd, root
691 systool: CommandFilter, systool, root
692
693 # cinder/volume/drivers/block_device.py
694 blockdev: CommandFilter, blockdev, root
695
696 # cinder/volume/drivers/ibm/gpfs.py
697 # cinder/volume/drivers/tintri.py
698 mv: CommandFilter, mv, root
699
700 # cinder/volume/drivers/ibm/gpfs.py
701 cp: CommandFilter, cp, root
702 mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root
703 mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root
704 mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root
705 mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root
706 mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root
707 mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root
708 mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root
709 mkfs: CommandFilter, mkfs, root
710 mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root
711 mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root
712 mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root
713 mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root
714 mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root
715 mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root
716
717 # cinder/volume/drivers/ibm/gpfs.py
718 # cinder/volume/drivers/ibm/ibmnas.py
719 find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit
720
721 # cinder/brick/initiator/connector.py:
722 aoe-revalidate: CommandFilter, aoe-revalidate, root
723 aoe-discover: CommandFilter, aoe-discover, root
724 aoe-flush: CommandFilter, aoe-flush, root
725
726 # cinder/brick/initiator/linuxscsi.py:
727 sg_scan: CommandFilter, sg_scan, root
728
729 #cinder/backup/services/tsm.py
730 dsmc:CommandFilter,/usr/bin/dsmc,root
731
732 # cinder/volume/drivers/hitachi/hbsd_horcm.py
733 raidqry: CommandFilter, raidqry, root
734 raidcom: CommandFilter, raidcom, root
735 pairsplit: CommandFilter, pairsplit, root
736 paircreate: CommandFilter, paircreate, root
737 pairdisplay: CommandFilter, pairdisplay, root
738 pairevtwait: CommandFilter, pairevtwait, root
739 horcmstart.sh: CommandFilter, horcmstart.sh, root
740 horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root
741 horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr
742
743 # cinder/volume/drivers/hitachi/hbsd_snm2.py
744 auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman
745 auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref
746 auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef
747 aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1
748 auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn
749 auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap
750 autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap
751 aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol
752 auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd
753 auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel
754 auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize
755 auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser
756 autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef
757 autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt
758 autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini
759 auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi
760 audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool
761 aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal
762 aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon
763
764 # cinder/volume/drivers/hgst.py
765 vgc-cluster: CommandFilter, vgc-cluster, root
766
767 # cinder/volume/drivers/vzstorage.py
768 pstorage-mount: CommandFilter, pstorage-mount, root
769 pstorage: CommandFilter, pstorage, root
770 ploop: CommandFilter, ploop, root
771
772 # initiator/connector.py:
773 drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
774 ceph:
775 override:
776 append:
777 monitors: []
778 admin_keyring: null
779 pools:
780 backup:
781 replication: 3
782 crush_rule: replicated_rule
783 chunk_size: 8
784 app_name: cinder-backup
785 cinder.volumes:
786 replication: 3
787 crush_rule: replicated_rule
788 chunk_size: 8
789 app_name: cinder-volume
790 cinder:
791 DEFAULT:
792 volume_usage_audit_period: hour
793 resource_query_filters_file: /etc/cinder/resource_filters.json
794 log_config_append: /etc/cinder/logging.conf
795 use_syslog: false
796 use_stderr: true
797 enable_v1_api: false
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +0100798 enable_v2_api: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500799 volume_name_template: "%s"
800 osapi_volume_workers: 1
801 glance_api_version: 2
802 os_region_name: RegionOne
803 host: cinder-volume-worker
804 # NOTE(portdirect): the bind port should not be defined, and is manipulated
805 # via the endpoints section.
806 osapi_volume_listen_port: null
807 enabled_backends: "rbd1"
808 default_volume_type: "rbd1"
809 # NOTE(portdirect): "cinder.backup.drivers.ceph" and
810 # "cinder.backup.drivers.posix" also supported
811 # NOTE(rchurch): As of Stein, drivers by class name are required
812 # - cinder.backup.drivers.swift.SwiftBackupDriver
813 # - cinder.backup.drivers.ceph.CephBackupDriver
814 # - cinder.backup.drivers.posix.PosixBackupDriver
815 backup_driver: "cinder.backup.drivers.swift.SwiftBackupDriver"
816 # Backup: Ceph RBD options
817 backup_ceph_conf: "/etc/ceph/ceph.conf"
818 backup_ceph_user: cinderbackup
819 backup_ceph_pool: cinder.backups
820 # Backup: Posix options
821 backup_posix_path: /var/lib/cinder/backup
822 auth_strategy: keystone
823 # Internal tenant id
824 internal_project_name: internal_cinder
825 internal_user_name: internal_cinder
826 database:
827 max_retries: -1
828 keystone_authtoken:
okozachenko1203124d7122023-09-06 23:08:22 +1000829 service_token_roles: service
830 service_token_roles_required: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500831 auth_version: v3
832 auth_type: password
833 memcache_security_strategy: ENCRYPT
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200834 service_type: volumev3
Mohammed Naser32137102023-02-23 17:26:32 +0000835 nova:
836 auth_type: password
837 auth_version: v3
838 interface: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500839 oslo_policy:
840 policy_file: /etc/cinder/policy.yaml
841 oslo_concurrency:
842 lock_path: "/var/lib/cinder/tmp"
843 oslo_messaging_notifications:
844 driver: messagingv2
845 oslo_middleware:
846 enable_proxy_headers_parsing: true
847 oslo_messaging_rabbit:
848 rabbit_ha_queues: true
849 coordination:
850 backend_url: file:///var/lib/cinder/coordination
851 service_user:
852 auth_type: password
okozachenko1203124d7122023-09-06 23:08:22 +1000853 send_service_user_token: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500854 logging:
855 loggers:
856 keys:
857 - root
858 - cinder
859 handlers:
860 keys:
861 - stdout
862 - stderr
863 - "null"
864 formatters:
865 keys:
866 - context
867 - default
868 logger_root:
869 level: WARNING
870 handlers: 'null'
871 logger_cinder:
872 level: INFO
873 handlers:
874 - stdout
875 qualname: cinder
876 logger_amqp:
877 level: WARNING
878 handlers: stderr
879 qualname: amqp
880 logger_amqplib:
881 level: WARNING
882 handlers: stderr
883 qualname: amqplib
884 logger_eventletwsgi:
885 level: WARNING
886 handlers: stderr
887 qualname: eventlet.wsgi.server
888 logger_sqlalchemy:
889 level: WARNING
890 handlers: stderr
891 qualname: sqlalchemy
892 logger_boto:
893 level: WARNING
894 handlers: stderr
895 qualname: boto
896 handler_null:
897 class: logging.NullHandler
898 formatter: default
899 args: ()
900 handler_stdout:
901 class: StreamHandler
902 args: (sys.stdout,)
903 formatter: context
904 handler_stderr:
905 class: StreamHandler
906 args: (sys.stderr,)
907 formatter: context
908 formatter_context:
909 class: oslo_log.formatters.ContextFormatter
910 datefmt: "%Y-%m-%d %H:%M:%S"
911 formatter_default:
912 format: "%(message)s"
913 datefmt: "%Y-%m-%d %H:%M:%S"
914 rabbitmq:
915 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
916 policies:
917 - vhost: "cinder"
918 name: "ha_ttl_cinder"
919 definition:
920 # mirror messges to other nodes in rmq cluster
921 ha-mode: "all"
922 ha-sync-mode: "automatic"
923 # 70s
924 message-ttl: 70000
925 priority: 0
926 apply-to: all
927 pattern: '^(?!(amq\.|reply_)).*'
928
929 backends:
930 # Those options will be written to backends.conf as-is.
931 rbd1:
932 volume_driver: cinder.volume.drivers.rbd.RBDDriver
933 volume_backend_name: rbd1
934 rbd_pool: cinder.volumes
935 rbd_ceph_conf: "/etc/ceph/ceph.conf"
936 rbd_flatten_volume_from_snapshot: false
937 report_discard_supported: true
938 rbd_max_clone_depth: 5
939 rbd_store_chunk_size: 4
940 rados_connect_timeout: -1
941 rbd_user: cinder
942 rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +0100943 image_volume_cache_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500944 image_volume_cache_max_size_gb: 200
945 image_volume_cache_max_count: 50
946 rally_tests:
947 run_tempest: false
948 clean_up: |
949 VOLUMES=$(openstack volume list -f value | grep -e "^s_rally_" | awk '{ print $1 }')
950 if [ -n "$VOLUMES" ]; then
951 echo $VOLUMES | xargs openstack volume delete
952 fi
953 tests:
954 CinderVolumes.create_and_delete_volume:
955 - args:
956 size: 1
957 runner:
958 concurrency: 1
959 times: 1
960 type: constant
961 sla:
962 failure_rate:
963 max: 0
964 - args:
965 size:
966 max: 5
967 min: 1
968 runner:
969 concurrency: 1
970 times: 1
971 type: constant
972 sla:
973 failure_rate:
974 max: 0
975 resource_filters:
976 volume:
977 - name
978 - status
979 - metadata
980 - bootable
981 - migration_status
982 - availability_zone
983 - group_id
984 backup:
985 - name
986 - status
987 - volume_id
988 snapshot:
989 - name
990 - status
991 - volume_id
992 - metadata
993 - availability_zone
994 group: []
995 group_snapshot:
996 - status
997 - group_id
998 attachment:
999 - volume_id
1000 - status
1001 - instance_id
1002 - attach_status
1003 message:
1004 - resource_uuid
1005 - resource_type
1006 - event_id
1007 - request_id
1008 - message_level
1009 pool:
1010 - name
1011 - volume_type
1012 volume_type: []
1013 enable_iscsi: false
Oleksandr Kozachenko7ac2d722023-10-03 15:19:33 +02001014 cinder_api_uwsgi:
1015 uwsgi:
1016 add-header: "Connection: close"
1017 buffer-size: 65535
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001018 chunked-input-limit: "4096000"
Oleksandr Kozachenko7ac2d722023-10-03 15:19:33 +02001019 die-on-term: true
1020 enable-threads: true
1021 exit-on-reload: false
1022 hook-master-start: unix_signal:15 gracefully_kill_them_all
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001023 http-auto-chunked: true
1024 http-raw-body: true
Oleksandr Kozachenko7ac2d722023-10-03 15:19:33 +02001025 lazy-apps: true
1026 log-x-forwarded-for: true
1027 master: true
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001028 need-app: true
Oleksandr Kozachenko7ac2d722023-10-03 15:19:33 +02001029 procname-prefix-spaced: "cinder-api:"
1030 route-user-agent: '^kube-probe.* donotlog:'
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001031 socket-timeout: 10
Oleksandr Kozachenko7ac2d722023-10-03 15:19:33 +02001032 thunder-lock: true
1033 worker-reload-mercy: 80
1034 wsgi-file: /var/lib/openstack/bin/cinder-wsgi
1035
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001036backup:
1037 external_ceph_rbd:
1038 enabled: false
1039 admin_keyring: null
Mohammed Naserbcdd25c2023-01-18 03:38:47 +00001040 configmap: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001041 conf:
1042 global: null
1043 osd: null
1044 posix:
1045 volume:
1046 class_name: general
1047 size: 10Gi
1048
1049dependencies:
1050 dynamic:
1051 common:
1052 local_image_registry:
1053 jobs:
1054 - cinder-image-repo-sync
1055 services:
1056 - endpoint: node
1057 service: local_image_registry
1058 static:
1059 api:
1060 jobs:
1061 - cinder-db-sync
1062 - cinder-ks-user
1063 - cinder-ks-endpoints
1064 - cinder-rabbit-init
1065 - cinder-storage-init
1066 services:
1067 - endpoint: internal
1068 service: oslo_db
1069 - endpoint: internal
1070 service: identity
1071 backup:
1072 jobs:
1073 - cinder-db-sync
1074 - cinder-ks-user
1075 - cinder-ks-endpoints
1076 - cinder-rabbit-init
1077 - cinder-storage-init
1078 - cinder-backup-storage-init
1079 services:
1080 - endpoint: internal
1081 service: identity
1082 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001083 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001084 backup_storage_init:
1085 jobs: null
1086 bootstrap:
1087 services:
1088 - endpoint: internal
1089 service: identity
1090 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001091 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001092 pod:
1093 - requireSameNode: false
1094 labels:
1095 application: cinder
1096 component: volume
1097 clean:
1098 jobs: null
1099 db_drop:
1100 services:
1101 - endpoint: internal
1102 service: oslo_db
1103 db_init:
1104 services:
1105 - endpoint: internal
1106 service: oslo_db
1107 db_sync:
1108 jobs:
1109 - cinder-db-init
1110 services:
1111 - endpoint: internal
1112 service: oslo_db
1113 ks_endpoints:
1114 jobs:
1115 - cinder-ks-service
1116 services:
1117 - endpoint: internal
1118 service: identity
1119 ks_service:
1120 services:
1121 - endpoint: internal
1122 service: identity
1123 ks_user:
1124 services:
1125 - endpoint: internal
1126 service: identity
1127 rabbit_init:
1128 services:
1129 - service: oslo_messaging
1130 endpoint: internal
1131 scheduler:
1132 jobs:
1133 - cinder-db-sync
1134 - cinder-ks-user
1135 - cinder-ks-endpoints
1136 - cinder-rabbit-init
1137 - cinder-storage-init
1138 services:
1139 - endpoint: internal
1140 service: identity
1141 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001142 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001143 storage_init:
1144 jobs: null
1145 tests:
1146 services:
1147 - endpoint: internal
1148 service: identity
1149 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001150 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001151 volume:
1152 jobs:
1153 - cinder-db-sync
1154 - cinder-ks-user
1155 - cinder-ks-endpoints
1156 - cinder-rabbit-init
1157 - cinder-storage-init
1158 services:
1159 - endpoint: internal
1160 service: identity
1161 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001162 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001163 volume_usage_audit:
1164 jobs:
1165 - cinder-db-sync
1166 - cinder-ks-user
1167 - cinder-ks-endpoints
1168 - cinder-rabbit-init
1169 - cinder-storage-init
1170 services:
1171 - endpoint: internal
1172 service: identity
1173 - endpoint: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001174 service: volume
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001175 image_repo_sync:
1176 services:
1177 - endpoint: internal
1178 service: local_image_registry
1179 create_internal_tenant:
1180 services:
1181 - endpoint: internal
1182 service: identity
1183
1184# Names of secrets used by bootstrap and environmental checks
1185secrets:
1186 identity:
1187 admin: cinder-keystone-admin
1188 cinder: cinder-keystone-user
1189 test: cinder-keystone-test
1190 oslo_db:
1191 admin: cinder-db-admin
1192 cinder: cinder-db-user
1193 rbd:
1194 backup: cinder-backup-rbd-keyring
1195 volume: cinder-volume-rbd-keyring
1196 volume_external: cinder-volume-external-rbd-keyring
1197 oslo_messaging:
1198 admin: cinder-rabbitmq-admin
1199 cinder: cinder-rabbitmq-user
1200 tls:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001201 volume:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001202 api:
1203 public: cinder-tls-public
1204 internal: cinder-tls-api
1205 oci_image_registry:
1206 cinder: cinder-oci-image-registry
1207
1208# We use a different layout of the endpoints here to account for versioning
1209# this swaps the service name and type, and should be rolled out to other
1210# services.
1211endpoints:
1212 cluster_domain_suffix: cluster.local
1213 local_image_registry:
1214 name: docker-registry
1215 namespace: docker-registry
1216 hosts:
1217 default: localhost
1218 internal: docker-registry
1219 node: localhost
1220 host_fqdn_override:
1221 default: null
1222 port:
1223 registry:
1224 node: 5000
1225 oci_image_registry:
1226 name: oci-image-registry
1227 namespace: oci-image-registry
1228 auth:
1229 enabled: false
1230 cinder:
1231 username: cinder
1232 password: password
1233 hosts:
1234 default: localhost
1235 host_fqdn_override:
1236 default: null
1237 port:
1238 registry:
1239 default: null
1240 identity:
1241 name: keystone
1242 auth:
1243 admin:
1244 region_name: RegionOne
1245 username: admin
1246 password: password
1247 project_name: admin
1248 user_domain_name: default
1249 project_domain_name: default
1250 cinder:
okozachenko1203124d7122023-09-06 23:08:22 +10001251 role: admin,service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001252 region_name: RegionOne
1253 username: cinder
1254 password: password
1255 project_name: service
1256 user_domain_name: service
1257 project_domain_name: service
1258 test:
1259 role: admin
1260 region_name: RegionOne
1261 username: cinder-test
1262 password: password
1263 project_name: test
1264 user_domain_name: service
1265 project_domain_name: service
1266 hosts:
1267 default: keystone
1268 internal: keystone-api
1269 host_fqdn_override:
1270 default: null
1271 path:
1272 default: /v3
1273 scheme:
1274 default: http
1275 port:
1276 api:
1277 default: 80
1278 internal: 5000
1279 image:
1280 name: glance
1281 hosts:
1282 default: glance-api
1283 public: glance
1284 host_fqdn_override:
1285 default: null
1286 path:
1287 default: null
1288 scheme:
1289 default: http
1290 port:
1291 api:
1292 default: 9292
1293 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001294 volume:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001295 name: cinder
1296 hosts:
1297 default: cinder-api
1298 public: cinder
1299 host_fqdn_override:
1300 default: null
1301 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1302 # endpoints using the following format:
1303 # public:
1304 # host: null
1305 # tls:
1306 # crt: null
1307 # key: null
1308 path:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001309 default: '/v1/%(tenant_id)s'
1310 scheme:
1311 default: 'http'
1312 port:
1313 api:
1314 default: 8776
1315 public: 80
1316 volumev2:
1317 name: cinderv2
1318 hosts:
1319 default: cinder-api
1320 public: cinder
1321 host_fqdn_override:
1322 default: null
1323 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1324 # endpoints using the following format:
1325 # public:
1326 # host: null
1327 # tls:
1328 # crt: null
1329 # key: null
1330 path:
1331 default: '/v2/%(tenant_id)s'
1332 scheme:
1333 default: 'http'
1334 port:
1335 api:
1336 default: 8776
1337 public: 80
1338 volumev3:
1339 name: cinderv3
1340 hosts:
1341 default: cinder-api
1342 public: cinder
1343 host_fqdn_override:
1344 default: null
1345 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1346 # endpoints using the following format:
1347 # public:
1348 # host: null
1349 # tls:
1350 # crt: null
1351 # key: null
1352 path:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001353 default: '/v3/%(tenant_id)s'
1354 scheme:
1355 default: 'http'
1356 port:
1357 api:
1358 default: 8776
1359 public: 80
1360 oslo_db:
1361 auth:
1362 admin:
1363 username: root
1364 password: password
1365 secret:
1366 tls:
1367 internal: mariadb-tls-direct
1368 cinder:
1369 username: cinder
1370 password: password
1371 hosts:
1372 default: mariadb
1373 host_fqdn_override:
1374 default: null
1375 path: /cinder
1376 scheme: mysql+pymysql
1377 port:
1378 mysql:
1379 default: 3306
1380 oslo_messaging:
1381 auth:
1382 admin:
1383 username: rabbitmq
1384 password: password
1385 secret:
1386 tls:
1387 internal: rabbitmq-tls-direct
1388 cinder:
1389 username: cinder
1390 password: password
1391 statefulset:
1392 replicas: 2
1393 name: rabbitmq-rabbitmq
1394 hosts:
1395 default: rabbitmq
1396 host_fqdn_override:
1397 default: null
1398 path: /cinder
1399 scheme: rabbit
1400 port:
1401 amqp:
1402 default: 5672
1403 http:
1404 default: 15672
1405 oslo_cache:
1406 auth:
1407 # NOTE(portdirect): this is used to define the value for keystone
1408 # authtoken cache encryption key, if not set it will be populated
1409 # automatically with a random value, but to take advantage of
1410 # this feature all services should be set to use the same key,
1411 # and memcache service.
1412 memcache_secret_key: null
1413 hosts:
1414 default: memcached
1415 host_fqdn_override:
1416 default: null
1417 port:
1418 memcache:
1419 default: 11211
1420 fluentd:
1421 namespace: null
1422 name: fluentd
1423 hosts:
1424 default: fluentd-logging
1425 host_fqdn_override:
1426 default: null
1427 path:
1428 default: null
1429 scheme: 'http'
1430 port:
1431 service:
1432 default: 24224
1433 metrics:
1434 default: 24220
1435 kube_dns:
1436 namespace: kube-system
1437 name: kubernetes-dns
1438 hosts:
1439 default: kube-dns
1440 host_fqdn_override:
1441 default: null
1442 path:
1443 default: null
1444 scheme: http
1445 port:
1446 dns:
1447 default: 53
1448 protocol: UDP
1449 ingress:
1450 namespace: null
1451 name: ingress
1452 hosts:
1453 default: ingress
1454 port:
1455 ingress:
1456 default: 80
1457
1458network_policy:
1459 cinder:
1460 ingress:
1461 - {}
1462 egress:
1463 - {}
1464
1465# NOTE(helm_hook): helm_hook might break for helm2 binary.
1466# set helm3_hook: false when using the helm2 binary.
1467helm3_hook: true
1468
Mohammed Naserbcdd25c2023-01-18 03:38:47 +00001469tls:
1470 identity: false
1471 oslo_messaging: false
1472 oslo_db: false
1473
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001474manifests:
1475 certificates: false
1476 configmap_bin: true
1477 configmap_etc: true
1478 cron_volume_usage_audit: true
1479 deployment_api: true
1480 deployment_backup: true
1481 deployment_scheduler: true
1482 deployment_volume: true
1483 ingress_api: true
1484 job_backup_storage_init: true
1485 job_bootstrap: true
1486 job_clean: true
1487 job_create_internal_tenant: true
1488 job_db_init: true
1489 job_image_repo_sync: true
1490 job_rabbit_init: true
1491 job_db_sync: true
1492 job_db_drop: false
1493 job_ks_endpoints: true
1494 job_ks_service: true
1495 job_ks_user: true
1496 job_storage_init: true
1497 pdb_api: true
1498 pod_rally_test: true
1499 pvc_backup: true
1500 network_policy: false
1501 secret_db: true
1502 secret_ingress_tls: true
1503 secret_keystone: true
1504 secret_rabbitmq: true
1505 secret_registry: true
1506 service_api: true
1507 service_ingress_api: true
1508...