blob: 1a0da3e5f78d531501bcbc0ac3ea532b7af77424 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for nova.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21labels:
22 agent:
23 compute:
24 node_selector_key: openstack-compute-node
25 node_selector_value: enabled
26 compute_ironic:
27 node_selector_key: openstack-compute-node
28 node_selector_value: enabled
29 api_metadata:
30 node_selector_key: openstack-control-plane
31 node_selector_value: enabled
32 conductor:
33 node_selector_key: openstack-control-plane
34 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050035 job:
36 node_selector_key: openstack-control-plane
37 node_selector_value: enabled
38 novncproxy:
39 node_selector_key: openstack-control-plane
40 node_selector_value: enabled
41 osapi:
42 node_selector_key: openstack-control-plane
43 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050044 scheduler:
45 node_selector_key: openstack-control-plane
46 node_selector_value: enabled
47 spiceproxy:
48 node_selector_key: openstack-control-plane
49 node_selector_value: enabled
50 test:
51 node_selector_key: openstack-control-plane
52 node_selector_value: enabled
53
54images:
55 pull_policy: IfNotPresent
56 tags:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010057 bootstrap: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
58 db_drop: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
59 db_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050060 dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
61 rabbit_init: docker.io/rabbitmq:3.7-management
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010062 ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
63 ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
64 nova_archive_deleted_rows: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
65 ks_endpoints: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
66 nova_api: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
67 nova_cell_setup: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
68 nova_cell_setup_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
69 nova_compute: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
70 nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby'
71 nova_compute_ssh: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
72 nova_conductor: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
73 nova_db_sync: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
74 nova_novncproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
75 nova_novncproxy_assets: 'docker.io/kolla/ubuntu-source-nova-novncproxy:wallaby'
76 nova_scheduler: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050077 # NOTE(portdirect): we simply use the ceph config helper here,
78 # as it has both oscli and jq.
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010079 nova_service_cleaner: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'
80 nova_spiceproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
81 nova_spiceproxy_assets: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050082 test: docker.io/xrally/xrally-openstack:2.0.0
83 image_repo_sync: docker.io/docker:17.07.0
84 nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6
85 local_registry:
86 active: false
87 exclude:
88 - dep_check
89 - image_repo_sync
90
91jobs:
92 # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default.
93 # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves.
94 cell_setup:
95 cron: "0 */1 * * *"
96 starting_deadline: 600
97 history:
98 success: 3
99 failed: 1
100 extended_wait:
101 enabled: false
102 iteration: 3
103 duration: 5
104 service_cleaner:
105 cron: "0 */1 * * *"
106 starting_deadline: 600
107 history:
108 success: 3
109 failed: 1
110 sleep_time: 60
111 archive_deleted_rows:
112 cron: "0 */1 * * *"
113 starting_deadline: 600
114 history:
115 success: 3
116 failed: 1
117
118bootstrap:
119 enabled: true
120 ks_user: admin
121 script: null
122 structured:
123 flavors:
124 enabled: true
125 options:
126 m1_tiny:
127 name: "m1.tiny"
128 ram: 512
129 disk: 1
130 vcpus: 1
131 m1_small:
132 name: "m1.small"
133 ram: 2048
134 disk: 20
135 vcpus: 1
136 m1_medium:
137 name: "m1.medium"
138 ram: 4096
139 disk: 40
140 vcpus: 2
141 m1_large:
142 name: "m1.large"
143 ram: 8192
144 disk: 80
145 vcpus: 4
146 m1_xlarge:
147 name: "m1.xlarge"
148 ram: 16384
149 disk: 160
150 vcpus: 8
151 wait_for_computes:
152 enabled: false
153 # Wait percentage is the minimum percentage of compute hypervisors which
154 # must be available before the remainder of the bootstrap script can be run.
155 wait_percentage: 70
156 # Once the wait_percentage above is achieved, the remaining_wait is the
157 # amount of time in seconds to wait before executing the remainder of the
158 # boostrap script.
159 remaining_wait: 300
160 scripts:
161 init_script: |
162 # This runs in a bootstrap init container. It counts the number of compute nodes.
163 COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
164 /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
165 wait_script: |
166 # This script runs in the main bootstrap container just before the
167 # bootstrap.script is called.
168 COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
169 if [[ $COMPUTE_HOSTS == 0 ]]; then
170 echo "There are no compute hosts found!"
171 exit 1
172 fi
173
174 # Wait for all hypervisors to come up before moving on with the deployment
175 HYPERVISOR_WAIT=true
176 WAIT_AFTER_READY=0
177 SLEEP=5
178 while [[ $HYPERVISOR_WAIT == true ]]; do
179 # Its possible that openstack command may fail due to not being able to
180 # reach the compute service
181 set +e
182 HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
183 set -e
184
185 PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
186 if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
187 echo "Hypervisor ready percentage is $PERCENT_READY"
188 if [[ $PERCENT_READY == 100 ]]; then
189 HYPERVISOR_WAIT=false
190 echo "All hypervisors are ready."
191 elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
192 HYPERVISOR_WAIT=false
193 echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
194 else
195 sleep $SLEEP
196 WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
197 fi
198 else
199 echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
200 sleep $SLEEP
201 fi
202 done
203
204network:
205 # provide what type of network wiring will be used
206 # possible options: openvswitch, linuxbridge, sriov
207 backend:
208 - openvswitch
209 osapi:
210 port: 8774
211 ingress:
212 public: true
213 classes:
214 namespace: "nginx"
215 cluster: "nginx-cluster"
216 annotations:
217 nginx.ingress.kubernetes.io/rewrite-target: /
218 external_policy_local: false
219 node_port:
220 enabled: false
221 port: 30774
222 metadata:
223 port: 8775
224 ingress:
225 public: true
226 classes:
227 namespace: "nginx"
228 cluster: "nginx-cluster"
229 annotations:
230 nginx.ingress.kubernetes.io/rewrite-target: /
231 external_policy_local: false
232 node_port:
233 enabled: false
234 port: 30775
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500235 novncproxy:
236 ingress:
237 public: true
238 classes:
239 namespace: "nginx"
240 cluster: "nginx-cluster"
241 annotations:
242 nginx.ingress.kubernetes.io/rewrite-target: /
243 node_port:
244 enabled: false
245 port: 30680
246 spiceproxy:
247 node_port:
248 enabled: false
249 port: 30682
250 ssh:
251 enabled: false
252 port: 8022
253 from_subnet: 0.0.0.0/0
254 key_types:
255 - rsa
256 - dsa
257 - ecdsa
258 - ed25519
259 private_key: 'null'
260 public_key: 'null'
261
262dependencies:
263 dynamic:
264 common:
265 local_image_registry:
266 jobs:
267 - nova-image-repo-sync
268 services:
269 - endpoint: node
270 service: local_image_registry
271 targeted:
272 openvswitch:
273 compute:
274 pod:
275 - requireSameNode: true
276 labels:
277 application: neutron
278 component: neutron-ovs-agent
279 linuxbridge:
280 compute:
281 pod:
282 - requireSameNode: true
283 labels:
284 application: neutron
285 component: neutron-lb-agent
286 sriov:
287 compute:
288 pod:
289 - requireSameNode: true
290 labels:
291 application: neutron
292 component: neutron-sriov-agent
293 static:
294 api:
295 jobs:
296 - nova-db-sync
297 - nova-ks-user
298 - nova-ks-endpoints
299 - nova-rabbit-init
300 services:
301 - endpoint: internal
302 service: oslo_messaging
303 - endpoint: internal
304 service: oslo_db
305 - endpoint: internal
306 service: identity
307 api_metadata:
308 jobs:
309 - nova-db-sync
310 - nova-ks-user
311 - nova-ks-endpoints
312 - nova-rabbit-init
313 services:
314 - endpoint: internal
315 service: oslo_messaging
316 - endpoint: internal
317 service: oslo_db
318 - endpoint: internal
319 service: identity
320 bootstrap:
321 services:
322 - endpoint: internal
323 service: identity
324 - endpoint: internal
325 service: compute
326 cell_setup:
327 jobs:
328 - nova-db-sync
329 - nova-rabbit-init
330 services:
331 - endpoint: internal
332 service: oslo_messaging
333 - endpoint: internal
334 service: oslo_db
335 - endpoint: internal
336 service: identity
337 - endpoint: internal
338 service: compute
339 pod:
340 - requireSameNode: false
341 labels:
342 application: nova
343 component: compute
344 service_cleaner:
345 jobs:
346 - nova-db-sync
347 - nova-rabbit-init
348 services:
349 - endpoint: internal
350 service: oslo_messaging
351 - endpoint: internal
352 service: oslo_db
353 - endpoint: internal
354 service: identity
355 - endpoint: internal
356 service: compute
357 compute:
358 pod:
359 - requireSameNode: true
360 labels:
361 application: libvirt
362 component: libvirt
363 jobs:
364 - nova-db-sync
365 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500366 services:
367 - endpoint: internal
368 service: oslo_messaging
369 - endpoint: internal
370 service: image
371 - endpoint: internal
372 service: compute
373 - endpoint: internal
374 service: network
375 - endpoint: internal
376 service: compute_metadata
377 compute_ironic:
378 jobs:
379 - nova-db-sync
380 - nova-rabbit-init
381 services:
382 - endpoint: internal
383 service: oslo_messaging
384 - endpoint: internal
385 service: image
386 - endpoint: internal
387 service: compute
388 - endpoint: internal
389 service: network
390 - endpoint: internal
391 service: baremetal
392 conductor:
393 jobs:
394 - nova-db-sync
395 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500396 services:
397 - endpoint: internal
398 service: oslo_messaging
399 - endpoint: internal
400 service: oslo_db
401 - endpoint: internal
402 service: identity
403 - endpoint: internal
404 service: compute
405 db_drop:
406 services:
407 - endpoint: internal
408 service: oslo_db
409 archive_deleted_rows:
410 jobs:
411 - nova-db-init
412 - nova-db-sync
413 db_init:
414 services:
415 - endpoint: internal
416 service: oslo_db
417 db_sync:
418 jobs:
419 - nova-db-init
420 services:
421 - endpoint: internal
422 service: oslo_db
423 ks_endpoints:
424 jobs:
425 - nova-ks-service
426 services:
427 - endpoint: internal
428 service: identity
429 ks_service:
430 services:
431 - endpoint: internal
432 service: identity
433 ks_user:
434 services:
435 - endpoint: internal
436 service: identity
437 rabbit_init:
438 services:
439 - service: oslo_messaging
440 endpoint: internal
441 novncproxy:
442 jobs:
443 - nova-db-sync
444 services:
445 - endpoint: internal
446 service: oslo_db
447 spiceproxy:
448 jobs:
449 - nova-db-sync
450 services:
451 - endpoint: internal
452 service: oslo_db
453 scheduler:
454 jobs:
455 - nova-db-sync
456 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500457 services:
458 - endpoint: internal
459 service: oslo_messaging
460 - endpoint: internal
461 service: oslo_db
462 - endpoint: internal
463 service: identity
464 - endpoint: internal
465 service: compute
466 tests:
467 services:
468 - endpoint: internal
469 service: image
470 - endpoint: internal
471 service: compute
472 - endpoint: internal
473 service: network
474 - endpoint: internal
475 service: compute_metadata
476 image_repo_sync:
477 services:
478 - endpoint: internal
479 service: local_image_registry
480
481console:
482 # serial | spice | novnc | none
483 console_kind: novnc
484 serial:
485 spice:
486 compute:
487 # IF blank, search default routing interface
488 server_proxyclient_interface:
489 proxy:
490 # IF blank, search default routing interface
491 server_proxyclient_interface:
492 novnc:
493 compute:
494 # IF blank, search default routing interface
495 vncserver_proxyclient_interface:
496 vncproxy:
497 # IF blank, search default routing interface
498 vncserver_proxyclient_interface:
499
500ceph_client:
501 configmap: ceph-etc
502 user_secret_name: pvc-ceph-client-key
503
504conf:
505 security: |
506 #
507 # Disable access to the entire file system except for the directories that
508 # are explicitly allowed later.
509 #
510 # This currently breaks the configurations that come with some web application
511 # Debian packages.
512 #
513 #<Directory />
514 # AllowOverride None
515 # Require all denied
516 #</Directory>
517
518 # Changing the following options will not really affect the security of the
519 # server, but might make attacks slightly more difficult in some cases.
520
521 #
522 # ServerTokens
523 # This directive configures what you return as the Server HTTP response
524 # Header. The default is 'Full' which sends information about the OS-Type
525 # and compiled in modules.
526 # Set to one of: Full | OS | Minimal | Minor | Major | Prod
527 # where Full conveys the most information, and Prod the least.
528 ServerTokens Prod
529
530 #
531 # Optionally add a line containing the server version and virtual host
532 # name to server-generated pages (internal error documents, FTP directory
533 # listings, mod_status and mod_info output etc., but not CGI generated
534 # documents or custom error documents).
535 # Set to "EMail" to also include a mailto: link to the ServerAdmin.
536 # Set to one of: On | Off | EMail
537 ServerSignature Off
538
539 #
540 # Allow TRACE method
541 #
542 # Set to "extended" to also reflect the request body (only for testing and
543 # diagnostic purposes).
544 #
545 # Set to one of: On | Off | extended
546 TraceEnable Off
547
548 #
549 # Forbid access to version control directories
550 #
551 # If you use version control systems in your document root, you should
552 # probably deny access to their directories. For example, for subversion:
553 #
554 #<DirectoryMatch "/\.svn">
555 # Require all denied
556 #</DirectoryMatch>
557
558 #
559 # Setting this header will prevent MSIE from interpreting files as something
560 # else than declared by the content type in the HTTP headers.
561 # Requires mod_headers to be enabled.
562 #
563 #Header set X-Content-Type-Options: "nosniff"
564
565 #
566 # Setting this header will prevent other sites from embedding pages from this
567 # site as frames. This defends against clickjacking attacks.
568 # Requires mod_headers to be enabled.
569 #
570 #Header set X-Frame-Options: "sameorigin"
571 software:
572 apache2:
573 binary: apache2
574 start_parameters: -DFOREGROUND
575 conf_dir: /etc/apache2/conf-enabled
576 site_dir: /etc/apache2/sites-enable
577 mods_dir: /etc/apache2/mods-available
578 a2enmod: null
579 a2dismod: null
580 ceph:
581 enabled: true
582 admin_keyring: null
583 cinder:
584 user: "cinder"
585 keyring: null
586 secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
587 rally_tests:
588 run_tempest: false
589 clean_up: |
590 FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
591 if [ -n "$FLAVORS" ]; then
592 echo $FLAVORS | xargs openstack flavor delete
593 fi
594 SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
595 if [ -n "$SERVERS" ]; then
596 echo $SERVERS | xargs openstack server delete
597 fi
598 IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
599 if [ -n "$IMAGES" ]; then
600 echo $IMAGES | xargs openstack image delete
601 fi
602 tests:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500603 NovaAggregates.create_and_get_aggregate_details:
604 - args:
605 availability_zone: nova
606 runner:
607 concurrency: 1
608 times: 1
609 type: constant
610 sla:
611 failure_rate:
612 max: 0
613 NovaAggregates.create_and_update_aggregate:
614 - args:
615 availability_zone: nova
616 runner:
617 concurrency: 1
618 times: 1
619 type: constant
620 sla:
621 failure_rate:
622 max: 0
623 NovaAggregates.list_aggregates:
624 - runner:
625 concurrency: 1
626 times: 1
627 type: constant
628 sla:
629 failure_rate:
630 max: 0
631 NovaAvailabilityZones.list_availability_zones:
632 - args:
633 detailed: true
634 runner:
635 concurrency: 1
636 times: 1
637 type: constant
638 sla:
639 failure_rate:
640 max: 0
641 NovaFlavors.create_and_delete_flavor:
642 - args:
643 disk: 1
644 ram: 500
645 vcpus: 1
646 runner:
647 concurrency: 1
648 times: 1
649 type: constant
650 sla:
651 failure_rate:
652 max: 0
653 NovaFlavors.create_and_list_flavor_access:
654 - args:
655 disk: 1
656 ram: 500
657 vcpus: 1
658 runner:
659 concurrency: 1
660 times: 1
661 type: constant
662 sla:
663 failure_rate:
664 max: 0
665 NovaFlavors.create_flavor:
666 - args:
667 disk: 1
668 ram: 500
669 vcpus: 1
670 runner:
671 concurrency: 1
672 times: 1
673 type: constant
674 sla:
675 failure_rate:
676 max: 0
677 NovaFlavors.create_flavor_and_add_tenant_access:
678 - args:
679 disk: 1
680 ram: 500
681 vcpus: 1
682 runner:
683 concurrency: 1
684 times: 1
685 type: constant
686 sla:
687 failure_rate:
688 max: 0
689 NovaFlavors.create_flavor_and_set_keys:
690 - args:
691 disk: 1
692 extra_specs:
693 'quota:disk_read_bytes_sec': 10240
694 ram: 500
695 vcpus: 1
696 runner:
697 concurrency: 1
698 times: 1
699 type: constant
700 sla:
701 failure_rate:
702 max: 0
703 NovaFlavors.list_flavors:
704 - args:
705 detailed: true
706 runner:
707 concurrency: 1
708 times: 1
709 type: constant
710 sla:
711 failure_rate:
712 max: 0
713 NovaHypervisors.list_and_get_hypervisors:
714 - args:
715 detailed: true
716 runner:
717 concurrency: 1
718 times: 1
719 type: constant
720 sla:
721 failure_rate:
722 max: 0
723 NovaHypervisors.list_and_get_uptime_hypervisors:
724 - args:
725 detailed: true
726 runner:
727 concurrency: 1
728 times: 1
729 type: constant
730 sla:
731 failure_rate:
732 max: 0
733 NovaHypervisors.list_and_search_hypervisors:
734 - args:
735 detailed: true
736 runner:
737 concurrency: 1
738 times: 1
739 type: constant
740 sla:
741 failure_rate:
742 max: 0
743 NovaHypervisors.list_hypervisors:
744 - args:
745 detailed: true
746 runner:
747 concurrency: 1
748 times: 1
749 type: constant
750 sla:
751 failure_rate:
752 max: 0
753 NovaHypervisors.statistics_hypervisors:
754 - args: {}
755 runner:
756 concurrency: 1
757 times: 1
758 type: constant
759 sla:
760 failure_rate:
761 max: 0
762 NovaKeypair.create_and_delete_keypair:
763 - runner:
764 concurrency: 1
765 times: 1
766 type: constant
767 sla:
768 failure_rate:
769 max: 0
770 NovaKeypair.create_and_list_keypairs:
771 - runner:
772 concurrency: 1
773 times: 1
774 type: constant
775 sla:
776 failure_rate:
777 max: 0
778 NovaServerGroups.create_and_list_server_groups:
779 - args:
780 all_projects: false
781 kwargs:
782 policies:
783 - affinity
784 runner:
785 concurrency: 1
786 times: 1
787 type: constant
788 sla:
789 failure_rate:
790 max: 0
791 NovaServices.list_services:
792 - runner:
793 concurrency: 1
794 times: 1
795 type: constant
796 sla:
797 failure_rate:
798 max: 0
799 paste:
800 composite:metadata:
801 use: egg:Paste#urlmap
802 /: meta
803 pipeline:meta:
804 pipeline: cors metaapp
805 app:metaapp:
806 paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory
807 composite:osapi_compute:
808 use: call:nova.api.openstack.urlmap:urlmap_factory
809 /: oscomputeversions
810 /v2: openstack_compute_api_v21_legacy_v2_compatible
811 /v2.1: openstack_compute_api_v21
812 composite:openstack_compute_api_v21:
813 use: call:nova.api.auth:pipeline_factory_v21
814 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
815 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21
816 composite:openstack_compute_api_v21_legacy_v2_compatible:
817 use: call:nova.api.auth:pipeline_factory_v21
818 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
819 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
820 filter:request_id:
821 paste.filter_factory: oslo_middleware:RequestId.factory
822 filter:compute_req_id:
823 paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory
824 filter:faultwrap:
825 paste.filter_factory: nova.api.openstack:FaultWrapper.factory
826 filter:noauth2:
827 paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory
828 filter:sizelimit:
829 paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory
830 filter:http_proxy_to_wsgi:
831 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
832 filter:legacy_v2_compatible:
833 paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory
834 app:osapi_compute_app_v21:
835 paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory
836 pipeline:oscomputeversions:
837 pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp
838 app:oscomputeversionapp:
839 paste.app_factory: nova.api.openstack.compute.versions:Versions.factory
840 filter:cors:
841 paste.filter_factory: oslo_middleware.cors:filter_factory
842 oslo_config_project: nova
843 filter:keystonecontext:
844 paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory
845 filter:authtoken:
846 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
847 filter:audit:
848 paste.filter_factory: keystonemiddleware.audit:filter_factory
849 audit_map_file: /etc/nova/api_audit_map.conf
850 policy: {}
851 nova_sudoers: |
852 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
853 Defaults !requiretty
854 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
855 nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
856 api_audit_map:
857 DEFAULT:
858 target_endpoint_type: None
859 custom_actions:
860 enable: enable
861 disable: disable
862 delete: delete
863 startup: start/startup
864 shutdown: stop/shutdown
865 reboot: start/reboot
866 os-migrations/get: read
867 os-server-password/post: update
868 path_keywords:
869 add: None
870 action: None
871 enable: None
872 disable: None
873 configure-project: None
874 defaults: None
875 delete: None
876 detail: None
877 diagnostics: None
878 entries: entry
879 extensions: alias
880 flavors: flavor
881 images: image
882 ips: label
883 limits: None
884 metadata: key
885 os-agents: os-agent
886 os-aggregates: os-aggregate
887 os-availability-zone: None
888 os-certificates: None
889 os-cloudpipe: None
890 os-fixed-ips: ip
891 os-extra_specs: key
892 os-flavor-access: None
893 os-floating-ip-dns: domain
894 os-floating-ips-bulk: host
895 os-floating-ip-pools: None
896 os-floating-ips: floating-ip
897 os-hosts: host
898 os-hypervisors: hypervisor
899 os-instance-actions: instance-action
900 os-keypairs: keypair
901 os-migrations: None
902 os-networks: network
903 os-quota-sets: tenant
904 os-security-groups: security_group
905 os-security-group-rules: rule
906 os-server-password: None
907 os-services: None
908 os-simple-tenant-usage: tenant
909 os-virtual-interfaces: None
910 os-volume_attachments: attachment
911 os-volumes_boot: None
912 os-volumes: volume
913 os-volume-types: volume-type
914 os-snapshots: snapshot
915 reboot: None
916 servers: server
917 shutdown: None
918 startup: None
919 statistics: None
920 service_endpoints:
921 compute: service/compute
922 rootwrap: |
923 # Configuration for nova-rootwrap
924 # This file should be owned by (and only-writeable by) the root user
925
926 [DEFAULT]
927 # List of directories to load filter definitions from (separated by ',').
928 # These directories MUST all be only writeable by root !
929 filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
930
931 # List of directories to search executables in, in case filters do not
932 # explicitely specify a full path (separated by ',')
933 # If not specified, defaults to system PATH environment variable.
934 # These directories MUST all be only writeable by root !
935 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
936
937 # Enable logging to syslog
938 # Default value is False
939 use_syslog=False
940
941 # Which syslog facility to use.
942 # Valid values include auth, authpriv, syslog, local0, local1...
943 # Default value is 'syslog'
944 syslog_log_facility=syslog
945
946 # Which messages to log.
947 # INFO means log all usage
948 # ERROR means only log unsuccessful attempts
949 syslog_log_level=ERROR
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500950 rootwrap_filters:
951 api_metadata:
952 pods:
953 - metadata
954 content: |
955 # nova-rootwrap command filters for api-metadata nodes
956 # This is needed on nova-api hosts running with "metadata" in enabled_apis
957 # or when running nova-api-metadata
958 # This file should be owned by (and only-writeable by) the root user
959
960 [Filters]
961 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
962 iptables-save: CommandFilter, iptables-save, root
963 ip6tables-save: CommandFilter, ip6tables-save, root
964
965 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
966 iptables-restore: CommandFilter, iptables-restore, root
967 ip6tables-restore: CommandFilter, ip6tables-restore, root
968 compute:
969 pods:
970 - compute
971 content: |
972 # nova-rootwrap command filters for compute nodes
973 # This file should be owned by (and only-writeable by) the root user
974
975 [Filters]
976 # nova/virt/disk/mount/api.py: 'kpartx', '-a', device
977 # nova/virt/disk/mount/api.py: 'kpartx', '-d', device
978 kpartx: CommandFilter, kpartx, root
979
980 # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
981 # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
982 tune2fs: CommandFilter, tune2fs, root
983
984 # nova/virt/disk/mount/api.py: 'mount', mapped_device
985 # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
986 # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
987 # nova/virt/configdrive.py: 'mount', device, mountdir
988 # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
989 mount: CommandFilter, mount, root
990
991 # nova/virt/disk/mount/api.py: 'umount', mapped_device
992 # nova/virt/disk/api.py: 'umount' target
993 # nova/virt/xenapi/vm_utils.py: 'umount', dev_path
994 # nova/virt/configdrive.py: 'umount', mountdir
995 umount: CommandFilter, umount, root
996
997 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
998 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
999 qemu-nbd: CommandFilter, qemu-nbd, root
1000
1001 # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
1002 # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
1003 losetup: CommandFilter, losetup, root
1004
1005 # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
1006 blkid: CommandFilter, blkid, root
1007
1008 # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
1009 # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
1010 blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
1011
1012 # nova/virt/disk/vfs/localfs.py: 'tee', canonpath
1013 tee: CommandFilter, tee, root
1014
1015 # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
1016 mkdir: CommandFilter, mkdir, root
1017
1018 # nova/virt/disk/vfs/localfs.py: 'chown'
1019 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1020 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1021 # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
1022 chown: CommandFilter, chown, root
1023
1024 # nova/virt/disk/vfs/localfs.py: 'chmod'
1025 chmod: CommandFilter, chmod, root
1026
1027 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1028 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1029 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1030 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1031 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1032 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1033 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1034 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1035 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1036 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1037 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1038 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1039 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1040 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1041 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1042 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1043 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1044 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1045 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1046 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1047 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1048 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1049 ip: CommandFilter, ip, root
1050
1051 # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
1052 # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
1053 tunctl: CommandFilter, tunctl, root
1054
1055 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1056 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1057 # nova/network/linux_net.py: 'ovs-vsctl', ....
1058 ovs-vsctl: CommandFilter, ovs-vsctl, root
1059
1060 # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
1061 vrouter-port-control: CommandFilter, vrouter-port-control, root
1062
1063 # nova/virt/libvirt/vif.py: 'ebrctl', ...
1064 ebrctl: CommandFilter, ebrctl, root
1065
1066 # nova/virt/libvirt/vif.py: 'mm-ctl', ...
1067 mm-ctl: CommandFilter, mm-ctl, root
1068
1069 # nova/network/linux_net.py: 'ovs-ofctl', ....
1070 ovs-ofctl: CommandFilter, ovs-ofctl, root
1071
1072 # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
1073 dd: CommandFilter, dd, root
1074
1075 # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
1076 iscsiadm: CommandFilter, iscsiadm, root
1077
1078 # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
1079 # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
1080 aoe-revalidate: CommandFilter, aoe-revalidate, root
1081 aoe-discover: CommandFilter, aoe-discover, root
1082
1083 # nova/virt/xenapi/vm_utils.py: parted, --script, ...
1084 # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
1085 parted: CommandFilter, parted, root
1086
1087 # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
1088 pygrub: CommandFilter, pygrub, root
1089
1090 # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
1091 fdisk: CommandFilter, fdisk, root
1092
1093 # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
1094 # nova/virt/disk/api.py: e2fsck, -f, -p, image
1095 e2fsck: CommandFilter, e2fsck, root
1096
1097 # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
1098 # nova/virt/disk/api.py: resize2fs, image
1099 resize2fs: CommandFilter, resize2fs, root
1100
1101 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1102 iptables-save: CommandFilter, iptables-save, root
1103 ip6tables-save: CommandFilter, ip6tables-save, root
1104
1105 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1106 iptables-restore: CommandFilter, iptables-restore, root
1107 ip6tables-restore: CommandFilter, ip6tables-restore, root
1108
1109 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1110 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1111 arping: CommandFilter, arping, root
1112
1113 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1114 dhcp_release: CommandFilter, dhcp_release, root
1115
1116 # nova/network/linux_net.py: 'kill', '-9', pid
1117 # nova/network/linux_net.py: 'kill', '-HUP', pid
1118 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1119
1120 # nova/network/linux_net.py: 'kill', pid
1121 kill_radvd: KillFilter, root, /usr/sbin/radvd
1122
1123 # nova/network/linux_net.py: dnsmasq call
1124 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1125
1126 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1127 radvd: CommandFilter, radvd, root
1128
1129 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1130 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1131 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1132 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1133 brctl: CommandFilter, brctl, root
1134
1135 # nova/virt/libvirt/utils.py: 'mkswap'
1136 # nova/virt/xenapi/vm_utils.py: 'mkswap'
1137 mkswap: CommandFilter, mkswap, root
1138
1139 # nova/virt/libvirt/utils.py: 'nova-idmapshift'
1140 nova-idmapshift: CommandFilter, nova-idmapshift, root
1141
1142 # nova/virt/xenapi/vm_utils.py: 'mkfs'
1143 # nova/utils.py: 'mkfs', fs, path, label
1144 mkfs: CommandFilter, mkfs, root
1145
1146 # nova/virt/libvirt/utils.py: 'qemu-img'
1147 qemu-img: CommandFilter, qemu-img, root
1148
1149 # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
1150 readlink: CommandFilter, readlink, root
1151
1152 # nova/virt/disk/api.py:
1153 mkfs.ext3: CommandFilter, mkfs.ext3, root
1154 mkfs.ext4: CommandFilter, mkfs.ext4, root
1155 mkfs.ntfs: CommandFilter, mkfs.ntfs, root
1156
1157 # nova/virt/libvirt/connection.py:
1158 lvremove: CommandFilter, lvremove, root
1159
1160 # nova/virt/libvirt/utils.py:
1161 lvcreate: CommandFilter, lvcreate, root
1162
1163 # nova/virt/libvirt/utils.py:
1164 lvs: CommandFilter, lvs, root
1165
1166 # nova/virt/libvirt/utils.py:
1167 vgs: CommandFilter, vgs, root
1168
1169 # nova/utils.py:read_file_as_root: 'cat', file_path
1170 # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
1171 read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
1172 read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
1173
1174 # os-brick needed commands
1175 read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
1176 multipath: CommandFilter, multipath, root
1177 # multipathd show status
1178 multipathd: CommandFilter, multipathd, root
1179 systool: CommandFilter, systool, root
1180 vgc-cluster: CommandFilter, vgc-cluster, root
1181 # os_brick/initiator/connector.py
1182 drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
1183
1184 # TODO(smcginnis) Temporary fix.
1185 # Need to pull in os-brick os-brick.filters file instead and clean
1186 # out stale brick values from this file.
1187 scsi_id: CommandFilter, /lib/udev/scsi_id, root
1188 # os_brick.privileged.default oslo.privsep context
1189 # This line ties the superuser privs with the config files, context name,
1190 # and (implicitly) the actual python code invoked.
1191 privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
1192
1193 # nova/storage/linuxscsi.py: sg_scan device
1194 sg_scan: CommandFilter, sg_scan, root
1195
1196 # nova/volume/encryptors/cryptsetup.py:
1197 # nova/volume/encryptors/luks.py:
1198 ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+
1199
1200 # nova/volume/encryptors.py:
1201 # nova/virt/libvirt/dmcrypt.py:
1202 cryptsetup: CommandFilter, cryptsetup, root
1203
1204 # nova/virt/xenapi/vm_utils.py:
1205 xenstore-read: CommandFilter, xenstore-read, root
1206
1207 # nova/virt/libvirt/utils.py:
1208 rbd: CommandFilter, rbd, root
1209
1210 # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
1211 shred: CommandFilter, shred, root
1212
1213 # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
1214 cp: CommandFilter, cp, root
1215
1216 # nova/virt/xenapi/vm_utils.py:
1217 sync: CommandFilter, sync, root
1218
1219 # nova/virt/libvirt/imagebackend.py:
1220 ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*
1221 prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*
1222
1223 # nova/virt/libvirt/utils.py: 'xend', 'status'
1224 xend: CommandFilter, xend, root
1225
1226 # nova/virt/libvirt/utils.py:
1227 touch: CommandFilter, touch, root
1228
1229 # nova/virt/libvirt/volume/vzstorage.py
1230 pstorage-mount: CommandFilter, pstorage-mount, root
1231 network:
1232 pods:
1233 - compute
1234 content: |
1235 # nova-rootwrap command filters for network nodes
1236 # This file should be owned by (and only-writeable by) the root user
1237
1238 [Filters]
1239 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1240 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1241 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1242 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1243 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1244 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1245 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1246 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1247 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1248 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1249 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1250 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1251 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1252 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1253 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1254 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1255 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1256 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1257 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1258 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1259 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1260 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1261 ip: CommandFilter, ip, root
1262
1263 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1264 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1265 # nova/network/linux_net.py: 'ovs-vsctl', ....
1266 ovs-vsctl: CommandFilter, ovs-vsctl, root
1267
1268 # nova/network/linux_net.py: 'ovs-ofctl', ....
1269 ovs-ofctl: CommandFilter, ovs-ofctl, root
1270
1271 # nova/virt/libvirt/vif.py: 'ivs-ctl', ...
1272 # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
1273 # nova/network/linux_net.py: 'ivs-ctl', ....
1274 ivs-ctl: CommandFilter, ivs-ctl, root
1275
1276 # nova/virt/libvirt/vif.py: 'ifc_ctl', ...
1277 ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root
1278
1279 # nova/network/linux_net.py: 'ebtables', '-D' ...
1280 # nova/network/linux_net.py: 'ebtables', '-I' ...
1281 ebtables: CommandFilter, ebtables, root
1282 ebtables_usr: CommandFilter, ebtables, root
1283
1284 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1285 iptables-save: CommandFilter, iptables-save, root
1286 ip6tables-save: CommandFilter, ip6tables-save, root
1287
1288 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1289 iptables-restore: CommandFilter, iptables-restore, root
1290 ip6tables-restore: CommandFilter, ip6tables-restore, root
1291
1292 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1293 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1294 arping: CommandFilter, arping, root
1295
1296 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1297 dhcp_release: CommandFilter, dhcp_release, root
1298
1299 # nova/network/linux_net.py: 'kill', '-9', pid
1300 # nova/network/linux_net.py: 'kill', '-HUP', pid
1301 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1302
1303 # nova/network/linux_net.py: 'kill', pid
1304 kill_radvd: KillFilter, root, /usr/sbin/radvd
1305
1306 # nova/network/linux_net.py: dnsmasq call
1307 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1308
1309 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1310 radvd: CommandFilter, radvd, root
1311
1312 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1313 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1314 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1315 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1316 brctl: CommandFilter, brctl, root
1317
1318 # nova/network/linux_net.py: 'sysctl', ....
1319 sysctl: CommandFilter, sysctl, root
1320
1321 # nova/network/linux_net.py: 'conntrack'
1322 conntrack: CommandFilter, conntrack, root
1323
1324 # nova/network/linux_net.py: 'fp-vdev'
1325 fp-vdev: CommandFilter, fp-vdev, root
1326 nova_ironic:
1327 DEFAULT:
1328 scheduler_host_manager: ironic_host_manager
1329 compute_driver: ironic.IronicDriver
1330 ram_allocation_ratio: 1.0
1331 cpu_allocation_ratio: 1.0
1332 reserved_host_memory_mb: 0
1333 libvirt:
1334 # Get the IP address to be used as the target for live migration traffic using interface name.
1335 # If this option is set to None, the hostname of the migration target compute node will be used.
1336 live_migration_interface:
1337 hypervisor:
1338 # my_ip can be set automatically through this interface name.
1339 host_interface:
1340 # This list is the keys to exclude from the config file ingested by nova-compute
1341 nova_compute_redactions:
1342 - database
1343 - api_database
1344 - cell0_database
1345 nova:
1346 DEFAULT:
1347 log_config_append: /etc/nova/logging.conf
1348 default_ephemeral_format: ext4
1349 ram_allocation_ratio: 1.0
1350 disk_allocation_ratio: 1.0
1351 cpu_allocation_ratio: 3.0
1352 state_path: /var/lib/nova
1353 osapi_compute_listen: 0.0.0.0
1354 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1355 # via the endpoints section.
1356 osapi_compute_listen_port: null
1357 osapi_compute_workers: 1
1358 metadata_workers: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001359 compute_driver: libvirt.LibvirtDriver
1360 my_ip: 0.0.0.0
1361 instance_usage_audit: True
1362 instance_usage_audit_period: hour
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001363 resume_guests_state_on_host_boot: True
1364 vnc:
1365 novncproxy_host: 0.0.0.0
1366 vncserver_listen: 0.0.0.0
1367 # This would be set by each compute nodes's ip
1368 # server_proxyclient_address: 127.0.0.1
1369 spice:
1370 html5proxy_host: 0.0.0.0
1371 server_listen: 0.0.0.0
1372 # This would be set by each compute nodes's ip
1373 # server_proxyclient_address: 127.0.0.1
1374 conductor:
1375 workers: 1
1376 oslo_policy:
1377 policy_file: /etc/nova/policy.yaml
1378 oslo_concurrency:
1379 lock_path: /var/lib/nova/tmp
1380 oslo_middleware:
1381 enable_proxy_headers_parsing: true
1382 glance:
1383 num_retries: 3
1384 ironic:
1385 api_endpoint: null
1386 auth_url: null
1387 neutron:
1388 metadata_proxy_shared_secret: "password"
1389 service_metadata_proxy: True
1390 auth_type: password
1391 auth_version: v3
1392 database:
1393 max_retries: -1
1394 api_database:
1395 max_retries: -1
1396 cell0_database:
1397 max_retries: -1
1398 keystone_authtoken:
1399 auth_type: password
1400 auth_version: v3
1401 memcache_security_strategy: ENCRYPT
Mohammed Naser0a13cee2023-03-02 11:28:29 +01001402 notifications:
1403 notify_on_state_change: vm_and_task_state
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001404 service_user:
1405 auth_type: password
1406 send_service_user_token: false
1407 libvirt:
1408 connection_uri: "qemu+unix:///system?socket=/run/libvirt/libvirt-sock"
1409 images_type: qcow2
1410 images_rbd_pool: vms
1411 images_rbd_ceph_conf: /etc/ceph/ceph.conf
1412 rbd_user: cinder
1413 rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
1414 disk_cachemodes: "network=writeback"
1415 hw_disk_discard: unmap
1416 upgrade_levels:
1417 compute: auto
1418 cache:
1419 enabled: true
1420 backend: dogpile.cache.memcached
1421 wsgi:
1422 api_paste_config: /etc/nova/api-paste.ini
1423 oslo_messaging_notifications:
1424 driver: messagingv2
1425 oslo_messaging_rabbit:
1426 rabbit_ha_queues: true
1427 placement:
1428 auth_type: password
1429 auth_version: v3
1430 logging:
1431 loggers:
1432 keys:
1433 - root
1434 - nova
1435 - os.brick
1436 handlers:
1437 keys:
1438 - stdout
1439 - stderr
1440 - "null"
1441 formatters:
1442 keys:
1443 - context
1444 - default
1445 logger_root:
1446 level: WARNING
1447 handlers: 'null'
1448 logger_nova:
1449 level: INFO
1450 handlers:
1451 - stdout
1452 qualname: nova
1453 logger_os.brick:
1454 level: INFO
1455 handlers:
1456 - stdout
1457 qualname: os.brick
1458 logger_amqp:
1459 level: WARNING
1460 handlers: stderr
1461 qualname: amqp
1462 logger_amqplib:
1463 level: WARNING
1464 handlers: stderr
1465 qualname: amqplib
1466 logger_eventletwsgi:
1467 level: WARNING
1468 handlers: stderr
1469 qualname: eventlet.wsgi.server
1470 logger_sqlalchemy:
1471 level: WARNING
1472 handlers: stderr
1473 qualname: sqlalchemy
1474 logger_boto:
1475 level: WARNING
1476 handlers: stderr
1477 qualname: boto
1478 handler_null:
1479 class: logging.NullHandler
1480 formatter: default
1481 args: ()
1482 handler_stdout:
1483 class: StreamHandler
1484 args: (sys.stdout,)
1485 formatter: context
1486 handler_stderr:
1487 class: StreamHandler
1488 args: (sys.stderr,)
1489 formatter: context
1490 formatter_context:
1491 class: oslo_log.formatters.ContextFormatter
1492 datefmt: "%Y-%m-%d %H:%M:%S"
1493 formatter_default:
1494 format: "%(message)s"
1495 datefmt: "%Y-%m-%d %H:%M:%S"
1496 rabbitmq:
1497 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1498 policies:
1499 - vhost: "nova"
1500 name: "ha_ttl_nova"
1501 definition:
1502 # mirror messges to other nodes in rmq cluster
1503 ha-mode: "all"
1504 ha-sync-mode: "automatic"
1505 # 70s
1506 message-ttl: 70000
1507 priority: 0
1508 apply-to: all
1509 pattern: '^(?!(amq\.|reply_)).*'
1510 enable_iscsi: false
1511 archive_deleted_rows:
1512 purge_deleted_rows: false
1513 until_completion: true
1514 all_cells: false
1515 max_rows:
1516 enabled: False
1517 rows: 1000
1518 before:
1519 enabled: false
1520 date: 'nil'
1521
1522# Names of secrets used by bootstrap and environmental checks
1523secrets:
1524 identity:
1525 admin: nova-keystone-admin
1526 nova: nova-keystone-user
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001527 test: nova-keystone-test
1528 oslo_db:
1529 admin: nova-db-admin
1530 nova: nova-db-user
1531 oslo_db_api:
1532 admin: nova-db-api-admin
1533 nova: nova-db-api-user
1534 oslo_db_cell0:
1535 admin: nova-db-cell0-admin
1536 nova: nova-db-cell0-user
1537 oslo_messaging:
1538 admin: nova-rabbitmq-admin
1539 nova: nova-rabbitmq-user
1540 tls:
1541 compute:
1542 osapi:
1543 public: nova-tls-public
1544 internal: nova-tls-api
1545 compute_novnc_proxy:
1546 novncproxy:
1547 public: nova-novncproxy-tls-public
1548 internal: nova-novncproxy-tls-proxy
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001549 compute_metadata:
1550 metadata:
1551 public: metadata-tls-public
1552 internal: metadata-tls-metadata
1553 compute_spice_proxy:
1554 spiceproxy:
1555 internal: nova-tls-spiceproxy
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001556 oci_image_registry:
1557 nova: nova-oci-image-registry
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001558
1559# typically overridden by environmental
1560# values, but should include all endpoints
1561# required by this chart
1562endpoints:
1563 cluster_domain_suffix: cluster.local
1564 local_image_registry:
1565 name: docker-registry
1566 namespace: docker-registry
1567 hosts:
1568 default: localhost
1569 internal: docker-registry
1570 node: localhost
1571 host_fqdn_override:
1572 default: null
1573 port:
1574 registry:
1575 node: 5000
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001576 oci_image_registry:
1577 name: oci-image-registry
1578 namespace: oci-image-registry
1579 auth:
1580 enabled: false
1581 nova:
1582 username: nova
1583 password: password
1584 hosts:
1585 default: localhost
1586 host_fqdn_override:
1587 default: null
1588 port:
1589 registry:
1590 default: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001591 oslo_db:
1592 auth:
1593 admin:
1594 username: root
1595 password: password
1596 secret:
1597 tls:
1598 internal: mariadb-tls-direct
1599 nova:
1600 username: nova
1601 password: password
1602 hosts:
1603 default: mariadb
1604 host_fqdn_override:
1605 default: null
1606 path: /nova
1607 scheme: mysql+pymysql
1608 port:
1609 mysql:
1610 default: 3306
1611 oslo_db_api:
1612 auth:
1613 admin:
1614 username: root
1615 password: password
1616 nova:
1617 username: nova
1618 password: password
1619 hosts:
1620 default: mariadb
1621 host_fqdn_override:
1622 default: null
1623 path: /nova_api
1624 scheme: mysql+pymysql
1625 port:
1626 mysql:
1627 default: 3306
1628 oslo_db_cell0:
1629 auth:
1630 admin:
1631 username: root
1632 password: password
1633 nova:
1634 username: nova
1635 password: password
1636 hosts:
1637 default: mariadb
1638 host_fqdn_override:
1639 default: null
1640 path: /nova_cell0
1641 scheme: mysql+pymysql
1642 port:
1643 mysql:
1644 default: 3306
1645 oslo_messaging:
1646 auth:
1647 admin:
1648 username: rabbitmq
1649 password: password
1650 secret:
1651 tls:
1652 internal: rabbitmq-tls-direct
1653 nova:
1654 username: nova
1655 password: password
1656 statefulset:
1657 replicas: 2
1658 name: rabbitmq-rabbitmq
1659 hosts:
1660 default: rabbitmq
1661 host_fqdn_override:
1662 default: null
1663 path: /nova
1664 scheme: rabbit
1665 port:
1666 amqp:
1667 default: 5672
1668 http:
1669 default: 15672
1670 oslo_cache:
1671 auth:
1672 # NOTE(portdirect): this is used to define the value for keystone
1673 # authtoken cache encryption key, if not set it will be populated
1674 # automatically with a random value, but to take advantage of
1675 # this feature all services should be set to use the same key,
1676 # and memcache service.
1677 memcache_secret_key: null
1678 hosts:
1679 default: memcached
1680 host_fqdn_override:
1681 default: null
1682 port:
1683 memcache:
1684 default: 11211
1685 identity:
1686 name: keystone
1687 auth:
1688 admin:
1689 region_name: RegionOne
1690 username: admin
1691 password: password
1692 project_name: admin
1693 user_domain_name: default
1694 project_domain_name: default
1695 nova:
1696 role: admin
1697 region_name: RegionOne
1698 username: nova
1699 password: password
1700 project_name: service
1701 user_domain_name: service
1702 project_domain_name: service
1703 # NOTE(portdirect): the neutron user is not managed by the nova chart
1704 # these values should match those set in the neutron chart.
1705 neutron:
1706 region_name: RegionOne
1707 project_name: service
1708 user_domain_name: service
1709 project_domain_name: service
1710 username: neutron
1711 password: password
1712 # NOTE(portdirect): the ironic user is not managed by the nova chart
1713 # these values should match those set in the ironic chart.
1714 ironic:
1715 auth_type: password
1716 auth_version: v3
1717 region_name: RegionOne
1718 project_name: service
1719 user_domain_name: service
1720 project_domain_name: service
1721 username: ironic
1722 password: password
1723 placement:
1724 role: admin
1725 region_name: RegionOne
1726 username: placement
1727 password: password
1728 project_name: service
1729 user_domain_name: service
1730 project_domain_name: service
1731 test:
1732 role: admin
1733 region_name: RegionOne
1734 username: nova-test
1735 password: password
1736 project_name: test
1737 user_domain_name: service
1738 project_domain_name: service
1739 hosts:
1740 default: keystone
1741 internal: keystone-api
1742 host_fqdn_override:
1743 default: null
1744 path:
1745 default: /v3
1746 scheme:
1747 default: http
1748 port:
1749 api:
1750 default: 80
1751 internal: 5000
1752 image:
1753 name: glance
1754 hosts:
1755 default: glance-api
1756 public: glance
1757 host_fqdn_override:
1758 default: null
1759 path:
1760 default: null
1761 scheme:
1762 default: http
1763 port:
1764 api:
1765 default: 9292
1766 public: 80
1767 compute:
1768 name: nova
1769 hosts:
1770 default: nova-api
1771 public: nova
1772 host_fqdn_override:
1773 default: null
1774 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1775 # endpoints using the following format:
1776 # public:
1777 # host: null
1778 # tls:
1779 # crt: null
1780 # key: null
1781 path:
1782 default: "/v2.1/%(tenant_id)s"
1783 scheme:
1784 default: 'http'
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001785 service: 'http'
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001786 port:
1787 api:
1788 default: 8774
1789 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001790 service: 8774
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001791 novncproxy:
1792 default: 6080
1793 compute_metadata:
1794 name: nova
1795 ip:
1796 # IF blank, set clusterIP and metadata_host dynamically
1797 ingress: null
1798 hosts:
1799 default: nova-metadata
1800 public: metadata
1801 host_fqdn_override:
1802 default: null
1803 path:
1804 default: /
1805 scheme:
1806 default: 'http'
1807 port:
1808 metadata:
1809 default: 8775
1810 public: 80
1811 compute_novnc_proxy:
1812 name: nova
1813 hosts:
1814 default: nova-novncproxy
1815 public: novncproxy
1816 host_fqdn_override:
1817 default: null
1818 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1819 # endpoints using the following format:
1820 # public:
1821 # host: null
1822 # tls:
1823 # crt: null
1824 # key: null
1825 path:
1826 default: /vnc_auto.html
1827 scheme:
1828 default: 'http'
1829 port:
1830 novnc_proxy:
1831 default: 6080
1832 public: 80
1833 compute_spice_proxy:
1834 name: nova
1835 hosts:
1836 default: nova-spiceproxy
1837 public: placement
1838 host_fqdn_override:
1839 default: null
1840 path:
1841 default: /spice_auto.html
1842 scheme:
1843 default: 'http'
1844 port:
1845 spice_proxy:
1846 default: 6082
1847 placement:
1848 name: placement
1849 hosts:
1850 default: placement-api
1851 public: placement
1852 host_fqdn_override:
1853 default: null
1854 path:
1855 default: /
1856 scheme:
1857 default: 'http'
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001858 service: 'http'
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001859 port:
1860 api:
1861 default: 8778
1862 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001863 service: 8778
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001864 network:
1865 name: neutron
1866 hosts:
1867 default: neutron-server
1868 public: neutron
1869 host_fqdn_override:
1870 default: null
1871 path:
1872 default: null
1873 scheme:
1874 default: 'http'
1875 port:
1876 api:
1877 default: 9696
1878 public: 80
1879 baremetal:
1880 name: ironic
1881 hosts:
1882 default: ironic-api
1883 public: ironic
1884 host_fqdn_override:
1885 default: null
1886 path:
1887 default: null
1888 scheme:
1889 default: http
1890 port:
1891 api:
1892 default: 6385
1893 public: 80
1894 fluentd:
1895 namespace: null
1896 name: fluentd
1897 hosts:
1898 default: fluentd-logging
1899 host_fqdn_override:
1900 default: null
1901 path:
1902 default: null
1903 scheme: 'http'
1904 port:
1905 service:
1906 default: 24224
1907 metrics:
1908 default: 24220
1909 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
1910 # They are using to enable the Egress K8s network policy.
1911 kube_dns:
1912 namespace: kube-system
1913 name: kubernetes-dns
1914 hosts:
1915 default: kube-dns
1916 host_fqdn_override:
1917 default: null
1918 path:
1919 default: null
1920 scheme: http
1921 port:
1922 dns:
1923 default: 53
1924 protocol: UDP
1925 ingress:
1926 namespace: null
1927 name: ingress
1928 hosts:
1929 default: ingress
1930 port:
1931 ingress:
1932 default: 80
1933
1934pod:
1935 probes:
1936 rpc_timeout: 60
1937 rpc_retries: 2
1938 compute:
1939 default:
1940 liveness:
1941 enabled: True
1942 params:
1943 initialDelaySeconds: 120
1944 periodSeconds: 90
1945 timeoutSeconds: 70
1946 readiness:
1947 enabled: True
1948 params:
1949 initialDelaySeconds: 80
1950 periodSeconds: 90
1951 timeoutSeconds: 70
1952 api-metadata:
1953 default:
1954 liveness:
1955 enabled: True
1956 params:
1957 initialDelaySeconds: 30
1958 periodSeconds: 60
1959 timeoutSeconds: 15
1960 readiness:
1961 enabled: True
1962 params:
1963 initialDelaySeconds: 30
1964 periodSeconds: 60
1965 timeoutSeconds: 15
1966 api-osapi:
1967 default:
1968 liveness:
1969 enabled: True
1970 params:
1971 initialDelaySeconds: 30
1972 periodSeconds: 60
1973 timeoutSeconds: 15
1974 readiness:
1975 enabled: True
1976 params:
1977 initialDelaySeconds: 30
1978 periodSeconds: 60
1979 timeoutSeconds: 15
1980 conductor:
1981 default:
1982 liveness:
1983 enabled: True
1984 params:
1985 initialDelaySeconds: 120
1986 periodSeconds: 90
1987 timeoutSeconds: 70
1988 readiness:
1989 enabled: True
1990 params:
1991 initialDelaySeconds: 80
1992 periodSeconds: 90
1993 timeoutSeconds: 70
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001994 novncproxy:
1995 default:
1996 liveness:
1997 enabled: True
1998 params:
1999 initialDelaySeconds: 30
2000 periodSeconds: 60
2001 timeoutSeconds: 15
2002 readiness:
2003 enabled: True
2004 params:
2005 initialDelaySeconds: 30
2006 periodSeconds: 60
2007 timeoutSeconds: 15
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002008 scheduler:
2009 default:
2010 liveness:
2011 enabled: True
2012 params:
2013 initialDelaySeconds: 120
2014 periodSeconds: 90
2015 timeoutSeconds: 70
2016 readiness:
2017 enabled: True
2018 params:
2019 initialDelaySeconds: 80
2020 periodSeconds: 90
2021 timeoutSeconds: 70
2022 compute-spice-proxy:
2023 default:
2024 liveness:
2025 enabled: True
2026 params:
2027 initialDelaySeconds: 30
2028 periodSeconds: 60
2029 timeoutSeconds: 15
2030 readiness:
2031 enabled: True
2032 params:
2033 initialDelaySeconds: 30
2034 periodSeconds: 60
2035 timeoutSeconds: 15
2036 security_context:
2037 nova:
2038 pod:
2039 runAsUser: 42424
2040 container:
2041 nova_compute_init:
2042 readOnlyRootFilesystem: true
2043 runAsUser: 0
2044 tungstenfabric_compute_init:
2045 readOnlyRootFilesystem: true
2046 allowPrivilegeEscalation: false
2047 ceph_perms:
2048 readOnlyRootFilesystem: true
2049 runAsUser: 0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002050 nova_compute_vnc_init:
2051 readOnlyRootFilesystem: true
2052 allowPrivilegeEscalation: false
2053 nova_compute_spice_init:
2054 readOnlyRootFilesystem: true
2055 allowPrivilegeEscalation: false
2056 nova_compute:
2057 readOnlyRootFilesystem: true
2058 privileged: true
2059 nova_compute_ssh:
2060 privileged: true
2061 runAsUser: 0
2062 nova_compute_ssh_init:
2063 runAsUser: 0
2064 nova_api_metadata_init:
2065 readOnlyRootFilesystem: true
2066 allowPrivilegeEscalation: false
2067 nova_api:
2068 readOnlyRootFilesystem: true
2069 allowPrivilegeEscalation: false
2070 nova_osapi:
2071 readOnlyRootFilesystem: true
2072 allowPrivilegeEscalation: false
2073 nova_conductor:
2074 readOnlyRootFilesystem: true
2075 allowPrivilegeEscalation: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002076 nova_novncproxy_init:
2077 readOnlyRootFilesystem: true
2078 allowPrivilegeEscalation: false
2079 nova_novncproxy_init_assests:
2080 readOnlyRootFilesystem: true
2081 allowPrivilegeEscalation: false
2082 nova_novncproxy:
2083 readOnlyRootFilesystem: true
2084 allowPrivilegeEscalation: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002085 nova_scheduler:
2086 readOnlyRootFilesystem: true
2087 allowPrivilegeEscalation: false
2088 nova_spiceproxy_init:
2089 readOnlyRootFilesystem: true
2090 allowPrivilegeEscalation: false
2091 nova_spiceproxy_init_assets:
2092 readOnlyRootFilesystem: true
2093 allowPrivilegeEscalation: false
2094 nova_spiceproxy:
2095 readOnlyRootFilesystem: true
2096 allowPrivilegeEscalation: false
2097 bootstrap:
2098 pod:
2099 runAsUser: 42424
2100 container:
2101 nova_wait_for_computes_init:
2102 readOnlyRootFilesystem: true
2103 allowPrivilegeEscalation: false
2104 bootstrap:
2105 readOnlyRootFilesystem: true
2106 allowPrivilegeEscalation: false
2107 nova_cell_setup:
2108 pod:
2109 runAsUser: 42424
2110 container:
2111 nova_wait_for_computes_init:
2112 readOnlyRootFilesystem: true
2113 allowPrivilegeEscalation: false
2114 nova_cell_setup_init:
2115 readOnlyRootFilesystem: true
2116 allowPrivilegeEscalation: false
2117 nova_cell_setup:
2118 readOnlyRootFilesystem: true
2119 allowPrivilegeEscalation: false
2120 archive_deleted_rows:
2121 pod:
2122 runAsUser: 42424
2123 container:
2124 nova_archive_deleted_rows_init:
2125 readOnlyRootFilesystem: true
2126 allowPrivilegeEscalation: false
2127 nova_archive_deleted_rows:
2128 readOnlyRootFilesystem: true
2129 allowPrivilegeEscalation: false
2130 cell_setup:
2131 pod:
2132 runAsUser: 42424
2133 container:
2134 nova_cell_setup:
2135 readOnlyRootFilesystem: true
2136 allowPrivilegeEscalation: false
2137 service_cleaner:
2138 pod:
2139 runAsUser: 42424
2140 container:
2141 nova_service_cleaner:
2142 readOnlyRootFilesystem: true
2143 allowPrivilegeEscalation: false
2144 use_fqdn:
2145 # NOTE: If the option "host" is not specified in nova.conf, the host name
2146 # shown in the hypervisor host is defaulted to the short name of the host.
2147 # Setting the option here to true will cause use $(hostname --fqdn) as the
2148 # host name by default. If the short name is desired $(hostname --short),
2149 # set the option to false. Specifying a host in the nova.conf via the conf:
2150 # section will supersede the value of this option.
2151 compute: true
2152 affinity:
2153 anti:
2154 type:
2155 default: preferredDuringSchedulingIgnoredDuringExecution
2156 topologyKey:
2157 default: kubernetes.io/hostname
2158 weight:
2159 default: 10
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002160 tolerations:
2161 nova:
2162 enabled: false
2163 tolerations:
2164 - key: node-role.kubernetes.io/master
2165 operator: Exists
2166 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002167 mounts:
2168 nova_compute:
2169 init_container: null
2170 nova_compute:
2171 volumeMounts:
2172 volumes:
2173 nova_compute_ironic:
2174 init_container: null
2175 nova_compute_ironic:
2176 volumeMounts:
2177 volumes:
2178 nova_api_metadata:
2179 init_container: null
2180 nova_api_metadata:
2181 volumeMounts:
2182 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002183 nova_api_osapi:
2184 init_container: null
2185 nova_api_osapi:
2186 volumeMounts:
2187 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002188 nova_conductor:
2189 init_container: null
2190 nova_conductor:
2191 volumeMounts:
2192 volumes:
2193 nova_scheduler:
2194 init_container: null
2195 nova_scheduler:
2196 volumeMounts:
2197 volumes:
2198 nova_bootstrap:
2199 init_container: null
2200 nova_bootstrap:
2201 volumeMounts:
2202 volumes:
2203 nova_tests:
2204 init_container: null
2205 nova_tests:
2206 volumeMounts:
2207 volumes:
2208 nova_novncproxy:
2209 init_novncproxy: null
2210 nova_novncproxy:
2211 volumeMounts:
2212 volumes:
2213 nova_spiceproxy:
2214 init_spiceproxy: null
2215 nova_spiceproxy:
2216 volumeMounts:
2217 volumes:
2218 nova_db_sync:
2219 nova_db_sync:
2220 volumeMounts:
2221 volumes:
2222 useHostNetwork:
2223 novncproxy: true
2224 replicas:
2225 api_metadata: 1
2226 compute_ironic: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002227 osapi: 1
2228 conductor: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002229 scheduler: 1
2230 novncproxy: 1
2231 spiceproxy: 1
2232 lifecycle:
2233 upgrades:
2234 deployments:
2235 revision_history: 3
2236 pod_replacement_strategy: RollingUpdate
2237 rolling_update:
2238 max_unavailable: 1
2239 max_surge: 3
2240 daemonsets:
2241 pod_replacement_strategy: RollingUpdate
2242 compute:
2243 enabled: true
2244 min_ready_seconds: 0
2245 max_unavailable: 1
2246 disruption_budget:
2247 metadata:
2248 min_available: 0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002249 osapi:
2250 min_available: 0
2251 termination_grace_period:
2252 metadata:
2253 timeout: 30
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002254 osapi:
2255 timeout: 30
2256 resources:
2257 enabled: false
2258 compute:
2259 requests:
2260 memory: "128Mi"
2261 cpu: "100m"
2262 limits:
2263 memory: "1024Mi"
2264 cpu: "2000m"
2265 compute_ironic:
2266 requests:
2267 memory: "128Mi"
2268 cpu: "100m"
2269 limits:
2270 memory: "1024Mi"
2271 cpu: "2000m"
2272 api_metadata:
2273 requests:
2274 memory: "128Mi"
2275 cpu: "100m"
2276 limits:
2277 memory: "1024Mi"
2278 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002279 api:
2280 requests:
2281 memory: "128Mi"
2282 cpu: "100m"
2283 limits:
2284 memory: "1024Mi"
2285 cpu: "2000m"
2286 conductor:
2287 requests:
2288 memory: "128Mi"
2289 cpu: "100m"
2290 limits:
2291 memory: "1024Mi"
2292 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002293 scheduler:
2294 requests:
2295 memory: "128Mi"
2296 cpu: "100m"
2297 limits:
2298 memory: "1024Mi"
2299 cpu: "2000m"
2300 ssh:
2301 requests:
2302 memory: "128Mi"
2303 cpu: "100m"
2304 limits:
2305 memory: "1024Mi"
2306 cpu: "2000m"
2307 novncproxy:
2308 requests:
2309 memory: "128Mi"
2310 cpu: "100m"
2311 limits:
2312 memory: "1024Mi"
2313 cpu: "2000m"
2314 spiceproxy:
2315 requests:
2316 memory: "128Mi"
2317 cpu: "100m"
2318 limits:
2319 memory: "1024Mi"
2320 cpu: "2000m"
2321 jobs:
2322 bootstrap:
2323 requests:
2324 memory: "128Mi"
2325 cpu: "100m"
2326 limits:
2327 memory: "1024Mi"
2328 cpu: "2000m"
2329 db_init:
2330 requests:
2331 memory: "128Mi"
2332 cpu: "100m"
2333 limits:
2334 memory: "1024Mi"
2335 cpu: "2000m"
2336 rabbit_init:
2337 requests:
2338 memory: "128Mi"
2339 cpu: "100m"
2340 limits:
2341 memory: "1024Mi"
2342 cpu: "2000m"
2343 db_sync:
2344 requests:
2345 memory: "128Mi"
2346 cpu: "100m"
2347 limits:
2348 memory: "1024Mi"
2349 cpu: "2000m"
2350 archive_deleted_rows:
2351 requests:
2352 memory: "128Mi"
2353 cpu: "100m"
2354 limits:
2355 memory: "1024Mi"
2356 cpu: "2000m"
2357 db_drop:
2358 requests:
2359 memory: "128Mi"
2360 cpu: "100m"
2361 limits:
2362 memory: "1024Mi"
2363 cpu: "2000m"
2364 ks_endpoints:
2365 requests:
2366 memory: "128Mi"
2367 cpu: "100m"
2368 limits:
2369 memory: "1024Mi"
2370 cpu: "2000m"
2371 ks_service:
2372 requests:
2373 memory: "128Mi"
2374 cpu: "100m"
2375 limits:
2376 memory: "1024Mi"
2377 cpu: "2000m"
2378 ks_user:
2379 requests:
2380 memory: "128Mi"
2381 cpu: "100m"
2382 limits:
2383 memory: "1024Mi"
2384 cpu: "2000m"
2385 tests:
2386 requests:
2387 memory: "128Mi"
2388 cpu: "100m"
2389 limits:
2390 memory: "1024Mi"
2391 cpu: "2000m"
2392 cell_setup:
2393 requests:
2394 memory: "128Mi"
2395 cpu: "100m"
2396 limits:
2397 memory: "1024Mi"
2398 cpu: "2000m"
2399 service_cleaner:
2400 requests:
2401 memory: "128Mi"
2402 cpu: "100m"
2403 limits:
2404 memory: "1024Mi"
2405 cpu: "2000m"
2406 image_repo_sync:
2407 requests:
2408 memory: "128Mi"
2409 cpu: "100m"
2410 limits:
2411 memory: "1024Mi"
2412 cpu: "2000m"
2413
2414network_policy:
2415 nova:
2416 # TODO(lamt): Need to tighten this ingress for security.
2417 ingress:
2418 - {}
2419 egress:
2420 - {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002421
2422# NOTE(helm_hook): helm_hook might break for helm2 binary.
2423# set helm3_hook: false when using the helm2 binary.
2424helm3_hook: true
2425
2426health_probe:
2427 logging:
2428 level: ERROR
2429
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002430tls:
2431 identity: false
2432 oslo_messaging: false
2433 oslo_db: false
2434
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002435manifests:
2436 certificates: false
2437 configmap_bin: true
2438 configmap_etc: true
2439 cron_job_cell_setup: true
2440 cron_job_service_cleaner: true
2441 cron_job_archive_deleted_rows: false
2442 daemonset_compute: true
2443 deployment_api_metadata: true
2444 deployment_api_osapi: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002445 deployment_conductor: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002446 deployment_novncproxy: true
2447 deployment_spiceproxy: true
2448 deployment_scheduler: true
2449 ingress_metadata: true
2450 ingress_novncproxy: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002451 ingress_osapi: true
2452 job_bootstrap: true
2453 job_db_init: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002454 job_db_sync: true
2455 job_db_drop: false
2456 job_image_repo_sync: true
2457 job_rabbit_init: true
2458 job_ks_endpoints: true
2459 job_ks_service: true
2460 job_ks_user: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002461 job_cell_setup: true
2462 pdb_metadata: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002463 pdb_osapi: true
2464 pod_rally_test: true
2465 network_policy: false
2466 secret_db_api: true
2467 secret_db_cell0: true
2468 secret_db: true
2469 secret_ingress_tls: true
2470 secret_keystone: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002471 secret_rabbitmq: true
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002472 secret_registry: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002473 service_ingress_metadata: true
2474 service_ingress_novncproxy: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002475 service_ingress_osapi: true
2476 service_metadata: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002477 service_novncproxy: true
2478 service_spiceproxy: true
2479 service_osapi: true
2480 statefulset_compute_ironic: false
2481...