blob: fe02e605dba8d48634441cc377195708d3a7b030 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for nova.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21labels:
22 agent:
23 compute:
24 node_selector_key: openstack-compute-node
25 node_selector_value: enabled
26 compute_ironic:
27 node_selector_key: openstack-compute-node
28 node_selector_value: enabled
29 api_metadata:
30 node_selector_key: openstack-control-plane
31 node_selector_value: enabled
32 conductor:
33 node_selector_key: openstack-control-plane
34 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050035 job:
36 node_selector_key: openstack-control-plane
37 node_selector_value: enabled
38 novncproxy:
39 node_selector_key: openstack-control-plane
40 node_selector_value: enabled
41 osapi:
42 node_selector_key: openstack-control-plane
43 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050044 scheduler:
45 node_selector_key: openstack-control-plane
46 node_selector_value: enabled
47 spiceproxy:
48 node_selector_key: openstack-control-plane
49 node_selector_value: enabled
50 test:
51 node_selector_key: openstack-control-plane
52 node_selector_value: enabled
53
54images:
55 pull_policy: IfNotPresent
56 tags:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010057 bootstrap: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
58 db_drop: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
59 db_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050060 dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
61 rabbit_init: docker.io/rabbitmq:3.7-management
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010062 ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
63 ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
64 nova_archive_deleted_rows: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
65 ks_endpoints: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
66 nova_api: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
67 nova_cell_setup: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
68 nova_cell_setup_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
69 nova_compute: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
70 nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby'
71 nova_compute_ssh: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
72 nova_conductor: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
73 nova_db_sync: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
74 nova_novncproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
75 nova_novncproxy_assets: 'docker.io/kolla/ubuntu-source-nova-novncproxy:wallaby'
76 nova_scheduler: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050077 # NOTE(portdirect): we simply use the ceph config helper here,
78 # as it has both oscli and jq.
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010079 nova_service_cleaner: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'
80 nova_spiceproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
81 nova_spiceproxy_assets: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050082 test: docker.io/xrally/xrally-openstack:2.0.0
83 image_repo_sync: docker.io/docker:17.07.0
84 nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6
85 local_registry:
86 active: false
87 exclude:
88 - dep_check
89 - image_repo_sync
90
91jobs:
92 # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default.
93 # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves.
94 cell_setup:
95 cron: "0 */1 * * *"
96 starting_deadline: 600
97 history:
98 success: 3
99 failed: 1
100 extended_wait:
101 enabled: false
102 iteration: 3
103 duration: 5
104 service_cleaner:
105 cron: "0 */1 * * *"
106 starting_deadline: 600
107 history:
108 success: 3
109 failed: 1
110 sleep_time: 60
111 archive_deleted_rows:
112 cron: "0 */1 * * *"
113 starting_deadline: 600
114 history:
115 success: 3
116 failed: 1
117
118bootstrap:
119 enabled: true
120 ks_user: admin
121 script: null
122 structured:
123 flavors:
124 enabled: true
125 options:
126 m1_tiny:
127 name: "m1.tiny"
128 ram: 512
129 disk: 1
130 vcpus: 1
131 m1_small:
132 name: "m1.small"
133 ram: 2048
134 disk: 20
135 vcpus: 1
136 m1_medium:
137 name: "m1.medium"
138 ram: 4096
139 disk: 40
140 vcpus: 2
141 m1_large:
142 name: "m1.large"
143 ram: 8192
144 disk: 80
145 vcpus: 4
146 m1_xlarge:
147 name: "m1.xlarge"
148 ram: 16384
149 disk: 160
150 vcpus: 8
151 wait_for_computes:
152 enabled: false
153 # Wait percentage is the minimum percentage of compute hypervisors which
154 # must be available before the remainder of the bootstrap script can be run.
155 wait_percentage: 70
156 # Once the wait_percentage above is achieved, the remaining_wait is the
157 # amount of time in seconds to wait before executing the remainder of the
158 # boostrap script.
159 remaining_wait: 300
160 scripts:
161 init_script: |
162 # This runs in a bootstrap init container. It counts the number of compute nodes.
163 COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
164 /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
165 wait_script: |
166 # This script runs in the main bootstrap container just before the
167 # bootstrap.script is called.
168 COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
169 if [[ $COMPUTE_HOSTS == 0 ]]; then
170 echo "There are no compute hosts found!"
171 exit 1
172 fi
173
174 # Wait for all hypervisors to come up before moving on with the deployment
175 HYPERVISOR_WAIT=true
176 WAIT_AFTER_READY=0
177 SLEEP=5
178 while [[ $HYPERVISOR_WAIT == true ]]; do
179 # Its possible that openstack command may fail due to not being able to
180 # reach the compute service
181 set +e
182 HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
183 set -e
184
185 PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
186 if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
187 echo "Hypervisor ready percentage is $PERCENT_READY"
188 if [[ $PERCENT_READY == 100 ]]; then
189 HYPERVISOR_WAIT=false
190 echo "All hypervisors are ready."
191 elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
192 HYPERVISOR_WAIT=false
193 echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
194 else
195 sleep $SLEEP
196 WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
197 fi
198 else
199 echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
200 sleep $SLEEP
201 fi
202 done
203
204network:
205 # provide what type of network wiring will be used
206 # possible options: openvswitch, linuxbridge, sriov
207 backend:
208 - openvswitch
209 osapi:
210 port: 8774
211 ingress:
212 public: true
213 classes:
214 namespace: "nginx"
215 cluster: "nginx-cluster"
216 annotations:
217 nginx.ingress.kubernetes.io/rewrite-target: /
218 external_policy_local: false
219 node_port:
220 enabled: false
221 port: 30774
222 metadata:
223 port: 8775
224 ingress:
225 public: true
226 classes:
227 namespace: "nginx"
228 cluster: "nginx-cluster"
229 annotations:
230 nginx.ingress.kubernetes.io/rewrite-target: /
231 external_policy_local: false
232 node_port:
233 enabled: false
234 port: 30775
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500235 novncproxy:
236 ingress:
237 public: true
238 classes:
239 namespace: "nginx"
240 cluster: "nginx-cluster"
241 annotations:
242 nginx.ingress.kubernetes.io/rewrite-target: /
243 node_port:
244 enabled: false
245 port: 30680
246 spiceproxy:
247 node_port:
248 enabled: false
249 port: 30682
250 ssh:
251 enabled: false
252 port: 8022
253 from_subnet: 0.0.0.0/0
254 key_types:
255 - rsa
256 - dsa
257 - ecdsa
258 - ed25519
259 private_key: 'null'
260 public_key: 'null'
261
262dependencies:
263 dynamic:
264 common:
265 local_image_registry:
266 jobs:
267 - nova-image-repo-sync
268 services:
269 - endpoint: node
270 service: local_image_registry
271 targeted:
Mohammed Naserd6db2452023-07-23 14:34:59 +0000272 ovn:
okozachenko1203567fc082023-08-21 22:50:02 +1000273 compute:
Mohammed Naserd6db2452023-07-23 14:34:59 +0000274 pod:
275 - requireSameNode: true
276 labels:
277 application: ovn
278 component: ovn-controller
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500279 openvswitch:
280 compute:
281 pod:
282 - requireSameNode: true
283 labels:
284 application: neutron
285 component: neutron-ovs-agent
286 linuxbridge:
287 compute:
288 pod:
289 - requireSameNode: true
290 labels:
291 application: neutron
292 component: neutron-lb-agent
293 sriov:
294 compute:
295 pod:
296 - requireSameNode: true
297 labels:
298 application: neutron
299 component: neutron-sriov-agent
300 static:
301 api:
302 jobs:
303 - nova-db-sync
304 - nova-ks-user
305 - nova-ks-endpoints
306 - nova-rabbit-init
307 services:
308 - endpoint: internal
309 service: oslo_messaging
310 - endpoint: internal
311 service: oslo_db
312 - endpoint: internal
313 service: identity
314 api_metadata:
315 jobs:
316 - nova-db-sync
317 - nova-ks-user
318 - nova-ks-endpoints
319 - nova-rabbit-init
320 services:
321 - endpoint: internal
322 service: oslo_messaging
323 - endpoint: internal
324 service: oslo_db
325 - endpoint: internal
326 service: identity
327 bootstrap:
328 services:
329 - endpoint: internal
330 service: identity
331 - endpoint: internal
332 service: compute
333 cell_setup:
334 jobs:
335 - nova-db-sync
336 - nova-rabbit-init
337 services:
338 - endpoint: internal
339 service: oslo_messaging
340 - endpoint: internal
341 service: oslo_db
342 - endpoint: internal
343 service: identity
344 - endpoint: internal
345 service: compute
346 pod:
347 - requireSameNode: false
348 labels:
349 application: nova
350 component: compute
351 service_cleaner:
352 jobs:
353 - nova-db-sync
354 - nova-rabbit-init
355 services:
356 - endpoint: internal
357 service: oslo_messaging
358 - endpoint: internal
359 service: oslo_db
360 - endpoint: internal
361 service: identity
362 - endpoint: internal
363 service: compute
364 compute:
365 pod:
366 - requireSameNode: true
367 labels:
368 application: libvirt
369 component: libvirt
370 jobs:
371 - nova-db-sync
372 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500373 services:
374 - endpoint: internal
375 service: oslo_messaging
376 - endpoint: internal
377 service: image
378 - endpoint: internal
379 service: compute
380 - endpoint: internal
381 service: network
382 - endpoint: internal
383 service: compute_metadata
384 compute_ironic:
385 jobs:
386 - nova-db-sync
387 - nova-rabbit-init
388 services:
389 - endpoint: internal
390 service: oslo_messaging
391 - endpoint: internal
392 service: image
393 - endpoint: internal
394 service: compute
395 - endpoint: internal
396 service: network
397 - endpoint: internal
398 service: baremetal
399 conductor:
400 jobs:
401 - nova-db-sync
402 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500403 services:
404 - endpoint: internal
405 service: oslo_messaging
406 - endpoint: internal
407 service: oslo_db
408 - endpoint: internal
409 service: identity
410 - endpoint: internal
411 service: compute
412 db_drop:
413 services:
414 - endpoint: internal
415 service: oslo_db
416 archive_deleted_rows:
417 jobs:
418 - nova-db-init
419 - nova-db-sync
420 db_init:
421 services:
422 - endpoint: internal
423 service: oslo_db
424 db_sync:
425 jobs:
426 - nova-db-init
427 services:
428 - endpoint: internal
429 service: oslo_db
430 ks_endpoints:
431 jobs:
432 - nova-ks-service
433 services:
434 - endpoint: internal
435 service: identity
436 ks_service:
437 services:
438 - endpoint: internal
439 service: identity
440 ks_user:
441 services:
442 - endpoint: internal
443 service: identity
444 rabbit_init:
445 services:
446 - service: oslo_messaging
447 endpoint: internal
448 novncproxy:
449 jobs:
450 - nova-db-sync
451 services:
452 - endpoint: internal
453 service: oslo_db
454 spiceproxy:
455 jobs:
456 - nova-db-sync
457 services:
458 - endpoint: internal
459 service: oslo_db
460 scheduler:
461 jobs:
462 - nova-db-sync
463 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500464 services:
465 - endpoint: internal
466 service: oslo_messaging
467 - endpoint: internal
468 service: oslo_db
469 - endpoint: internal
470 service: identity
471 - endpoint: internal
472 service: compute
473 tests:
474 services:
475 - endpoint: internal
476 service: image
477 - endpoint: internal
478 service: compute
479 - endpoint: internal
480 service: network
481 - endpoint: internal
482 service: compute_metadata
483 image_repo_sync:
484 services:
485 - endpoint: internal
486 service: local_image_registry
487
488console:
489 # serial | spice | novnc | none
490 console_kind: novnc
491 serial:
492 spice:
493 compute:
494 # IF blank, search default routing interface
495 server_proxyclient_interface:
496 proxy:
497 # IF blank, search default routing interface
498 server_proxyclient_interface:
499 novnc:
500 compute:
501 # IF blank, search default routing interface
502 vncserver_proxyclient_interface:
503 vncproxy:
504 # IF blank, search default routing interface
505 vncserver_proxyclient_interface:
Rico Lin885c5152023-11-02 01:28:20 +0800506 address_search_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500507
508ceph_client:
509 configmap: ceph-etc
510 user_secret_name: pvc-ceph-client-key
511
512conf:
513 security: |
514 #
515 # Disable access to the entire file system except for the directories that
516 # are explicitly allowed later.
517 #
518 # This currently breaks the configurations that come with some web application
519 # Debian packages.
520 #
521 #<Directory />
522 # AllowOverride None
523 # Require all denied
524 #</Directory>
525
526 # Changing the following options will not really affect the security of the
527 # server, but might make attacks slightly more difficult in some cases.
528
529 #
530 # ServerTokens
531 # This directive configures what you return as the Server HTTP response
532 # Header. The default is 'Full' which sends information about the OS-Type
533 # and compiled in modules.
534 # Set to one of: Full | OS | Minimal | Minor | Major | Prod
535 # where Full conveys the most information, and Prod the least.
536 ServerTokens Prod
537
538 #
539 # Optionally add a line containing the server version and virtual host
540 # name to server-generated pages (internal error documents, FTP directory
541 # listings, mod_status and mod_info output etc., but not CGI generated
542 # documents or custom error documents).
543 # Set to "EMail" to also include a mailto: link to the ServerAdmin.
544 # Set to one of: On | Off | EMail
545 ServerSignature Off
546
547 #
548 # Allow TRACE method
549 #
550 # Set to "extended" to also reflect the request body (only for testing and
551 # diagnostic purposes).
552 #
553 # Set to one of: On | Off | extended
554 TraceEnable Off
555
556 #
557 # Forbid access to version control directories
558 #
559 # If you use version control systems in your document root, you should
560 # probably deny access to their directories. For example, for subversion:
561 #
562 #<DirectoryMatch "/\.svn">
563 # Require all denied
564 #</DirectoryMatch>
565
566 #
567 # Setting this header will prevent MSIE from interpreting files as something
568 # else than declared by the content type in the HTTP headers.
569 # Requires mod_headers to be enabled.
570 #
571 #Header set X-Content-Type-Options: "nosniff"
572
573 #
574 # Setting this header will prevent other sites from embedding pages from this
575 # site as frames. This defends against clickjacking attacks.
576 # Requires mod_headers to be enabled.
577 #
578 #Header set X-Frame-Options: "sameorigin"
579 software:
580 apache2:
581 binary: apache2
582 start_parameters: -DFOREGROUND
583 conf_dir: /etc/apache2/conf-enabled
584 site_dir: /etc/apache2/sites-enable
585 mods_dir: /etc/apache2/mods-available
586 a2enmod: null
587 a2dismod: null
588 ceph:
589 enabled: true
590 admin_keyring: null
591 cinder:
592 user: "cinder"
593 keyring: null
594 secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
595 rally_tests:
596 run_tempest: false
597 clean_up: |
598 FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
599 if [ -n "$FLAVORS" ]; then
600 echo $FLAVORS | xargs openstack flavor delete
601 fi
602 SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
603 if [ -n "$SERVERS" ]; then
604 echo $SERVERS | xargs openstack server delete
605 fi
606 IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
607 if [ -n "$IMAGES" ]; then
608 echo $IMAGES | xargs openstack image delete
609 fi
610 tests:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500611 NovaAggregates.create_and_get_aggregate_details:
612 - args:
613 availability_zone: nova
614 runner:
615 concurrency: 1
616 times: 1
617 type: constant
618 sla:
619 failure_rate:
620 max: 0
621 NovaAggregates.create_and_update_aggregate:
622 - args:
623 availability_zone: nova
624 runner:
625 concurrency: 1
626 times: 1
627 type: constant
628 sla:
629 failure_rate:
630 max: 0
631 NovaAggregates.list_aggregates:
632 - runner:
633 concurrency: 1
634 times: 1
635 type: constant
636 sla:
637 failure_rate:
638 max: 0
639 NovaAvailabilityZones.list_availability_zones:
640 - args:
641 detailed: true
642 runner:
643 concurrency: 1
644 times: 1
645 type: constant
646 sla:
647 failure_rate:
648 max: 0
649 NovaFlavors.create_and_delete_flavor:
650 - args:
651 disk: 1
652 ram: 500
653 vcpus: 1
654 runner:
655 concurrency: 1
656 times: 1
657 type: constant
658 sla:
659 failure_rate:
660 max: 0
661 NovaFlavors.create_and_list_flavor_access:
662 - args:
663 disk: 1
664 ram: 500
665 vcpus: 1
666 runner:
667 concurrency: 1
668 times: 1
669 type: constant
670 sla:
671 failure_rate:
672 max: 0
673 NovaFlavors.create_flavor:
674 - args:
675 disk: 1
676 ram: 500
677 vcpus: 1
678 runner:
679 concurrency: 1
680 times: 1
681 type: constant
682 sla:
683 failure_rate:
684 max: 0
685 NovaFlavors.create_flavor_and_add_tenant_access:
686 - args:
687 disk: 1
688 ram: 500
689 vcpus: 1
690 runner:
691 concurrency: 1
692 times: 1
693 type: constant
694 sla:
695 failure_rate:
696 max: 0
697 NovaFlavors.create_flavor_and_set_keys:
698 - args:
699 disk: 1
700 extra_specs:
701 'quota:disk_read_bytes_sec': 10240
702 ram: 500
703 vcpus: 1
704 runner:
705 concurrency: 1
706 times: 1
707 type: constant
708 sla:
709 failure_rate:
710 max: 0
711 NovaFlavors.list_flavors:
712 - args:
713 detailed: true
714 runner:
715 concurrency: 1
716 times: 1
717 type: constant
718 sla:
719 failure_rate:
720 max: 0
721 NovaHypervisors.list_and_get_hypervisors:
722 - args:
723 detailed: true
724 runner:
725 concurrency: 1
726 times: 1
727 type: constant
728 sla:
729 failure_rate:
730 max: 0
731 NovaHypervisors.list_and_get_uptime_hypervisors:
732 - args:
733 detailed: true
734 runner:
735 concurrency: 1
736 times: 1
737 type: constant
738 sla:
739 failure_rate:
740 max: 0
741 NovaHypervisors.list_and_search_hypervisors:
742 - args:
743 detailed: true
744 runner:
745 concurrency: 1
746 times: 1
747 type: constant
748 sla:
749 failure_rate:
750 max: 0
751 NovaHypervisors.list_hypervisors:
752 - args:
753 detailed: true
754 runner:
755 concurrency: 1
756 times: 1
757 type: constant
758 sla:
759 failure_rate:
760 max: 0
761 NovaHypervisors.statistics_hypervisors:
762 - args: {}
763 runner:
764 concurrency: 1
765 times: 1
766 type: constant
767 sla:
768 failure_rate:
769 max: 0
770 NovaKeypair.create_and_delete_keypair:
771 - runner:
772 concurrency: 1
773 times: 1
774 type: constant
775 sla:
776 failure_rate:
777 max: 0
778 NovaKeypair.create_and_list_keypairs:
779 - runner:
780 concurrency: 1
781 times: 1
782 type: constant
783 sla:
784 failure_rate:
785 max: 0
786 NovaServerGroups.create_and_list_server_groups:
787 - args:
788 all_projects: false
789 kwargs:
790 policies:
791 - affinity
792 runner:
793 concurrency: 1
794 times: 1
795 type: constant
796 sla:
797 failure_rate:
798 max: 0
799 NovaServices.list_services:
800 - runner:
801 concurrency: 1
802 times: 1
803 type: constant
804 sla:
805 failure_rate:
806 max: 0
okozachenko120317930d42023-09-06 00:24:05 +1000807 paste:
808 composite:metadata:
809 use: egg:Paste#urlmap
810 /: meta
811 pipeline:meta:
812 pipeline: cors metaapp
813 app:metaapp:
814 paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory
815 composite:osapi_compute:
816 use: call:nova.api.openstack.urlmap:urlmap_factory
817 /: oscomputeversions
818 /v2: openstack_compute_api_v21_legacy_v2_compatible
819 /v2.1: openstack_compute_api_v21
820 composite:openstack_compute_api_v21:
821 use: call:nova.api.auth:pipeline_factory_v21
822 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
823 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21
824 composite:openstack_compute_api_v21_legacy_v2_compatible:
825 use: call:nova.api.auth:pipeline_factory_v21
826 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
827 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
828 filter:request_id:
829 paste.filter_factory: oslo_middleware:RequestId.factory
830 filter:compute_req_id:
831 paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory
832 filter:faultwrap:
833 paste.filter_factory: nova.api.openstack:FaultWrapper.factory
834 filter:noauth2:
835 paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory
836 filter:sizelimit:
837 paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory
838 filter:http_proxy_to_wsgi:
839 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
840 filter:legacy_v2_compatible:
841 paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory
842 app:osapi_compute_app_v21:
843 paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory
844 pipeline:oscomputeversions:
845 pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp
846 app:oscomputeversionapp:
847 paste.app_factory: nova.api.openstack.compute.versions:Versions.factory
848 filter:cors:
849 paste.filter_factory: oslo_middleware.cors:filter_factory
850 oslo_config_project: nova
851 filter:keystonecontext:
852 paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory
853 filter:authtoken:
854 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
855 filter:audit:
856 paste.filter_factory: keystonemiddleware.audit:filter_factory
857 audit_map_file: /etc/nova/api_audit_map.conf
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500858 policy: {}
859 nova_sudoers: |
860 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
861 Defaults !requiretty
862 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
863 nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
864 api_audit_map:
865 DEFAULT:
866 target_endpoint_type: None
867 custom_actions:
868 enable: enable
869 disable: disable
870 delete: delete
871 startup: start/startup
872 shutdown: stop/shutdown
873 reboot: start/reboot
874 os-migrations/get: read
875 os-server-password/post: update
876 path_keywords:
877 add: None
878 action: None
879 enable: None
880 disable: None
881 configure-project: None
882 defaults: None
883 delete: None
884 detail: None
885 diagnostics: None
886 entries: entry
887 extensions: alias
888 flavors: flavor
889 images: image
890 ips: label
891 limits: None
892 metadata: key
893 os-agents: os-agent
894 os-aggregates: os-aggregate
895 os-availability-zone: None
896 os-certificates: None
897 os-cloudpipe: None
898 os-fixed-ips: ip
899 os-extra_specs: key
900 os-flavor-access: None
901 os-floating-ip-dns: domain
902 os-floating-ips-bulk: host
903 os-floating-ip-pools: None
904 os-floating-ips: floating-ip
905 os-hosts: host
906 os-hypervisors: hypervisor
907 os-instance-actions: instance-action
908 os-keypairs: keypair
909 os-migrations: None
910 os-networks: network
911 os-quota-sets: tenant
912 os-security-groups: security_group
913 os-security-group-rules: rule
914 os-server-password: None
915 os-services: None
916 os-simple-tenant-usage: tenant
917 os-virtual-interfaces: None
918 os-volume_attachments: attachment
919 os-volumes_boot: None
920 os-volumes: volume
921 os-volume-types: volume-type
922 os-snapshots: snapshot
923 reboot: None
924 servers: server
925 shutdown: None
926 startup: None
927 statistics: None
928 service_endpoints:
929 compute: service/compute
930 rootwrap: |
931 # Configuration for nova-rootwrap
932 # This file should be owned by (and only-writeable by) the root user
933
934 [DEFAULT]
935 # List of directories to load filter definitions from (separated by ',').
936 # These directories MUST all be only writeable by root !
937 filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
938
939 # List of directories to search executables in, in case filters do not
940 # explicitely specify a full path (separated by ',')
941 # If not specified, defaults to system PATH environment variable.
942 # These directories MUST all be only writeable by root !
943 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
944
945 # Enable logging to syslog
946 # Default value is False
947 use_syslog=False
948
949 # Which syslog facility to use.
950 # Valid values include auth, authpriv, syslog, local0, local1...
951 # Default value is 'syslog'
952 syslog_log_facility=syslog
953
954 # Which messages to log.
955 # INFO means log all usage
956 # ERROR means only log unsuccessful attempts
957 syslog_log_level=ERROR
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500958 rootwrap_filters:
959 api_metadata:
960 pods:
961 - metadata
962 content: |
963 # nova-rootwrap command filters for api-metadata nodes
964 # This is needed on nova-api hosts running with "metadata" in enabled_apis
965 # or when running nova-api-metadata
966 # This file should be owned by (and only-writeable by) the root user
967
968 [Filters]
969 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
970 iptables-save: CommandFilter, iptables-save, root
971 ip6tables-save: CommandFilter, ip6tables-save, root
972
973 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
974 iptables-restore: CommandFilter, iptables-restore, root
975 ip6tables-restore: CommandFilter, ip6tables-restore, root
976 compute:
977 pods:
978 - compute
979 content: |
980 # nova-rootwrap command filters for compute nodes
981 # This file should be owned by (and only-writeable by) the root user
982
983 [Filters]
984 # nova/virt/disk/mount/api.py: 'kpartx', '-a', device
985 # nova/virt/disk/mount/api.py: 'kpartx', '-d', device
986 kpartx: CommandFilter, kpartx, root
987
988 # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
989 # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
990 tune2fs: CommandFilter, tune2fs, root
991
992 # nova/virt/disk/mount/api.py: 'mount', mapped_device
993 # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
994 # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
995 # nova/virt/configdrive.py: 'mount', device, mountdir
996 # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
997 mount: CommandFilter, mount, root
998
999 # nova/virt/disk/mount/api.py: 'umount', mapped_device
1000 # nova/virt/disk/api.py: 'umount' target
1001 # nova/virt/xenapi/vm_utils.py: 'umount', dev_path
1002 # nova/virt/configdrive.py: 'umount', mountdir
1003 umount: CommandFilter, umount, root
1004
1005 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
1006 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
1007 qemu-nbd: CommandFilter, qemu-nbd, root
1008
1009 # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
1010 # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
1011 losetup: CommandFilter, losetup, root
1012
1013 # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
1014 blkid: CommandFilter, blkid, root
1015
1016 # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
1017 # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
1018 blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
1019
1020 # nova/virt/disk/vfs/localfs.py: 'tee', canonpath
1021 tee: CommandFilter, tee, root
1022
1023 # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
1024 mkdir: CommandFilter, mkdir, root
1025
1026 # nova/virt/disk/vfs/localfs.py: 'chown'
1027 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1028 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1029 # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
1030 chown: CommandFilter, chown, root
1031
1032 # nova/virt/disk/vfs/localfs.py: 'chmod'
1033 chmod: CommandFilter, chmod, root
1034
1035 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1036 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1037 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1038 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1039 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1040 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1041 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1042 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1043 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1044 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1045 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1046 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1047 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1048 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1049 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1050 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1051 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1052 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1053 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1054 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1055 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1056 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1057 ip: CommandFilter, ip, root
1058
1059 # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
1060 # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
1061 tunctl: CommandFilter, tunctl, root
1062
1063 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1064 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1065 # nova/network/linux_net.py: 'ovs-vsctl', ....
1066 ovs-vsctl: CommandFilter, ovs-vsctl, root
1067
1068 # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
1069 vrouter-port-control: CommandFilter, vrouter-port-control, root
1070
1071 # nova/virt/libvirt/vif.py: 'ebrctl', ...
1072 ebrctl: CommandFilter, ebrctl, root
1073
1074 # nova/virt/libvirt/vif.py: 'mm-ctl', ...
1075 mm-ctl: CommandFilter, mm-ctl, root
1076
1077 # nova/network/linux_net.py: 'ovs-ofctl', ....
1078 ovs-ofctl: CommandFilter, ovs-ofctl, root
1079
1080 # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
1081 dd: CommandFilter, dd, root
1082
1083 # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
1084 iscsiadm: CommandFilter, iscsiadm, root
1085
1086 # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
1087 # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
1088 aoe-revalidate: CommandFilter, aoe-revalidate, root
1089 aoe-discover: CommandFilter, aoe-discover, root
1090
1091 # nova/virt/xenapi/vm_utils.py: parted, --script, ...
1092 # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
1093 parted: CommandFilter, parted, root
1094
1095 # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
1096 pygrub: CommandFilter, pygrub, root
1097
1098 # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
1099 fdisk: CommandFilter, fdisk, root
1100
1101 # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
1102 # nova/virt/disk/api.py: e2fsck, -f, -p, image
1103 e2fsck: CommandFilter, e2fsck, root
1104
1105 # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
1106 # nova/virt/disk/api.py: resize2fs, image
1107 resize2fs: CommandFilter, resize2fs, root
1108
1109 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1110 iptables-save: CommandFilter, iptables-save, root
1111 ip6tables-save: CommandFilter, ip6tables-save, root
1112
1113 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1114 iptables-restore: CommandFilter, iptables-restore, root
1115 ip6tables-restore: CommandFilter, ip6tables-restore, root
1116
1117 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1118 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1119 arping: CommandFilter, arping, root
1120
1121 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1122 dhcp_release: CommandFilter, dhcp_release, root
1123
1124 # nova/network/linux_net.py: 'kill', '-9', pid
1125 # nova/network/linux_net.py: 'kill', '-HUP', pid
1126 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1127
1128 # nova/network/linux_net.py: 'kill', pid
1129 kill_radvd: KillFilter, root, /usr/sbin/radvd
1130
1131 # nova/network/linux_net.py: dnsmasq call
1132 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1133
1134 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1135 radvd: CommandFilter, radvd, root
1136
1137 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1138 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1139 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1140 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1141 brctl: CommandFilter, brctl, root
1142
1143 # nova/virt/libvirt/utils.py: 'mkswap'
1144 # nova/virt/xenapi/vm_utils.py: 'mkswap'
1145 mkswap: CommandFilter, mkswap, root
1146
1147 # nova/virt/libvirt/utils.py: 'nova-idmapshift'
1148 nova-idmapshift: CommandFilter, nova-idmapshift, root
1149
1150 # nova/virt/xenapi/vm_utils.py: 'mkfs'
1151 # nova/utils.py: 'mkfs', fs, path, label
1152 mkfs: CommandFilter, mkfs, root
1153
1154 # nova/virt/libvirt/utils.py: 'qemu-img'
1155 qemu-img: CommandFilter, qemu-img, root
1156
1157 # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
1158 readlink: CommandFilter, readlink, root
1159
1160 # nova/virt/disk/api.py:
1161 mkfs.ext3: CommandFilter, mkfs.ext3, root
1162 mkfs.ext4: CommandFilter, mkfs.ext4, root
1163 mkfs.ntfs: CommandFilter, mkfs.ntfs, root
1164
1165 # nova/virt/libvirt/connection.py:
1166 lvremove: CommandFilter, lvremove, root
1167
1168 # nova/virt/libvirt/utils.py:
1169 lvcreate: CommandFilter, lvcreate, root
1170
1171 # nova/virt/libvirt/utils.py:
1172 lvs: CommandFilter, lvs, root
1173
1174 # nova/virt/libvirt/utils.py:
1175 vgs: CommandFilter, vgs, root
1176
1177 # nova/utils.py:read_file_as_root: 'cat', file_path
1178 # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
1179 read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
1180 read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
1181
1182 # os-brick needed commands
1183 read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
1184 multipath: CommandFilter, multipath, root
1185 # multipathd show status
1186 multipathd: CommandFilter, multipathd, root
1187 systool: CommandFilter, systool, root
1188 vgc-cluster: CommandFilter, vgc-cluster, root
1189 # os_brick/initiator/connector.py
1190 drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
1191
1192 # TODO(smcginnis) Temporary fix.
1193 # Need to pull in os-brick os-brick.filters file instead and clean
1194 # out stale brick values from this file.
1195 scsi_id: CommandFilter, /lib/udev/scsi_id, root
1196 # os_brick.privileged.default oslo.privsep context
1197 # This line ties the superuser privs with the config files, context name,
1198 # and (implicitly) the actual python code invoked.
1199 privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
1200
1201 # nova/storage/linuxscsi.py: sg_scan device
1202 sg_scan: CommandFilter, sg_scan, root
1203
1204 # nova/volume/encryptors/cryptsetup.py:
1205 # nova/volume/encryptors/luks.py:
1206 ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+
1207
1208 # nova/volume/encryptors.py:
1209 # nova/virt/libvirt/dmcrypt.py:
1210 cryptsetup: CommandFilter, cryptsetup, root
1211
1212 # nova/virt/xenapi/vm_utils.py:
1213 xenstore-read: CommandFilter, xenstore-read, root
1214
1215 # nova/virt/libvirt/utils.py:
1216 rbd: CommandFilter, rbd, root
1217
1218 # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
1219 shred: CommandFilter, shred, root
1220
1221 # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
1222 cp: CommandFilter, cp, root
1223
1224 # nova/virt/xenapi/vm_utils.py:
1225 sync: CommandFilter, sync, root
1226
1227 # nova/virt/libvirt/imagebackend.py:
1228 ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*
1229 prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*
1230
1231 # nova/virt/libvirt/utils.py: 'xend', 'status'
1232 xend: CommandFilter, xend, root
1233
1234 # nova/virt/libvirt/utils.py:
1235 touch: CommandFilter, touch, root
1236
1237 # nova/virt/libvirt/volume/vzstorage.py
1238 pstorage-mount: CommandFilter, pstorage-mount, root
1239 network:
1240 pods:
1241 - compute
1242 content: |
1243 # nova-rootwrap command filters for network nodes
1244 # This file should be owned by (and only-writeable by) the root user
1245
1246 [Filters]
1247 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1248 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1249 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1250 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1251 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1252 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1253 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1254 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1255 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1256 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1257 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1258 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1259 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1260 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1261 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1262 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1263 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1264 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1265 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1266 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1267 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1268 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1269 ip: CommandFilter, ip, root
1270
1271 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1272 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1273 # nova/network/linux_net.py: 'ovs-vsctl', ....
1274 ovs-vsctl: CommandFilter, ovs-vsctl, root
1275
1276 # nova/network/linux_net.py: 'ovs-ofctl', ....
1277 ovs-ofctl: CommandFilter, ovs-ofctl, root
1278
1279 # nova/virt/libvirt/vif.py: 'ivs-ctl', ...
1280 # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
1281 # nova/network/linux_net.py: 'ivs-ctl', ....
1282 ivs-ctl: CommandFilter, ivs-ctl, root
1283
1284 # nova/virt/libvirt/vif.py: 'ifc_ctl', ...
1285 ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root
1286
1287 # nova/network/linux_net.py: 'ebtables', '-D' ...
1288 # nova/network/linux_net.py: 'ebtables', '-I' ...
1289 ebtables: CommandFilter, ebtables, root
1290 ebtables_usr: CommandFilter, ebtables, root
1291
1292 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1293 iptables-save: CommandFilter, iptables-save, root
1294 ip6tables-save: CommandFilter, ip6tables-save, root
1295
1296 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1297 iptables-restore: CommandFilter, iptables-restore, root
1298 ip6tables-restore: CommandFilter, ip6tables-restore, root
1299
1300 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1301 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1302 arping: CommandFilter, arping, root
1303
1304 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1305 dhcp_release: CommandFilter, dhcp_release, root
1306
1307 # nova/network/linux_net.py: 'kill', '-9', pid
1308 # nova/network/linux_net.py: 'kill', '-HUP', pid
1309 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1310
1311 # nova/network/linux_net.py: 'kill', pid
1312 kill_radvd: KillFilter, root, /usr/sbin/radvd
1313
1314 # nova/network/linux_net.py: dnsmasq call
1315 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1316
1317 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1318 radvd: CommandFilter, radvd, root
1319
1320 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1321 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1322 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1323 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1324 brctl: CommandFilter, brctl, root
1325
1326 # nova/network/linux_net.py: 'sysctl', ....
1327 sysctl: CommandFilter, sysctl, root
1328
1329 # nova/network/linux_net.py: 'conntrack'
1330 conntrack: CommandFilter, conntrack, root
1331
1332 # nova/network/linux_net.py: 'fp-vdev'
1333 fp-vdev: CommandFilter, fp-vdev, root
1334 nova_ironic:
1335 DEFAULT:
1336 scheduler_host_manager: ironic_host_manager
1337 compute_driver: ironic.IronicDriver
1338 ram_allocation_ratio: 1.0
1339 cpu_allocation_ratio: 1.0
1340 reserved_host_memory_mb: 0
1341 libvirt:
Rico Lin885c5152023-11-02 01:28:20 +08001342 address_search_enabled: true
1343 # When "address_search_enabled", get the IP address to be used as the target for live migration
1344 # traffic using interface name.
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001345 # If this option is set to None, the hostname of the migration target compute node will be used.
1346 live_migration_interface:
1347 hypervisor:
Rico Lin885c5152023-11-02 01:28:20 +08001348 address_search_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001349 # my_ip can be set automatically through this interface name.
1350 host_interface:
1351 # This list is the keys to exclude from the config file ingested by nova-compute
1352 nova_compute_redactions:
1353 - database
1354 - api_database
1355 - cell0_database
1356 nova:
1357 DEFAULT:
1358 log_config_append: /etc/nova/logging.conf
1359 default_ephemeral_format: ext4
1360 ram_allocation_ratio: 1.0
1361 disk_allocation_ratio: 1.0
1362 cpu_allocation_ratio: 3.0
1363 state_path: /var/lib/nova
1364 osapi_compute_listen: 0.0.0.0
1365 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1366 # via the endpoints section.
1367 osapi_compute_listen_port: null
1368 osapi_compute_workers: 1
1369 metadata_workers: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001370 compute_driver: libvirt.LibvirtDriver
1371 my_ip: 0.0.0.0
1372 instance_usage_audit: True
1373 instance_usage_audit_period: hour
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001374 resume_guests_state_on_host_boot: True
1375 vnc:
Mohammed Naser56484d72023-07-10 17:08:26 -04001376 auth_schemes: none
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001377 novncproxy_host: 0.0.0.0
Mohammed Nasere4c14ad2023-03-24 19:50:39 +00001378 server_listen: 0.0.0.0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001379 # This would be set by each compute nodes's ip
1380 # server_proxyclient_address: 127.0.0.1
1381 spice:
1382 html5proxy_host: 0.0.0.0
1383 server_listen: 0.0.0.0
1384 # This would be set by each compute nodes's ip
1385 # server_proxyclient_address: 127.0.0.1
1386 conductor:
1387 workers: 1
1388 oslo_policy:
1389 policy_file: /etc/nova/policy.yaml
1390 oslo_concurrency:
1391 lock_path: /var/lib/nova/tmp
1392 oslo_middleware:
1393 enable_proxy_headers_parsing: true
1394 glance:
1395 num_retries: 3
1396 ironic:
1397 api_endpoint: null
1398 auth_url: null
1399 neutron:
1400 metadata_proxy_shared_secret: "password"
1401 service_metadata_proxy: True
1402 auth_type: password
1403 auth_version: v3
okozachenko1203567fc082023-08-21 22:50:02 +10001404 cinder:
1405 catalog_info: volumev3::internalURL
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001406 database:
1407 max_retries: -1
1408 api_database:
1409 max_retries: -1
1410 cell0_database:
1411 max_retries: -1
1412 keystone_authtoken:
okozachenko1203567fc082023-08-21 22:50:02 +10001413 service_token_roles: service
1414 service_token_roles_required: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001415 auth_type: password
1416 auth_version: v3
1417 memcache_security_strategy: ENCRYPT
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001418 service_type: compute
Mohammed Naser0a13cee2023-03-02 11:28:29 +01001419 notifications:
1420 notify_on_state_change: vm_and_task_state
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001421 service_user:
1422 auth_type: password
okozachenko1203567fc082023-08-21 22:50:02 +10001423 send_service_user_token: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001424 libvirt:
1425 connection_uri: "qemu+unix:///system?socket=/run/libvirt/libvirt-sock"
1426 images_type: qcow2
1427 images_rbd_pool: vms
1428 images_rbd_ceph_conf: /etc/ceph/ceph.conf
1429 rbd_user: cinder
1430 rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
1431 disk_cachemodes: "network=writeback"
1432 hw_disk_discard: unmap
1433 upgrade_levels:
1434 compute: auto
1435 cache:
1436 enabled: true
1437 backend: dogpile.cache.memcached
1438 wsgi:
1439 api_paste_config: /etc/nova/api-paste.ini
1440 oslo_messaging_notifications:
1441 driver: messagingv2
1442 oslo_messaging_rabbit:
1443 rabbit_ha_queues: true
1444 placement:
1445 auth_type: password
1446 auth_version: v3
1447 logging:
1448 loggers:
1449 keys:
1450 - root
1451 - nova
1452 - os.brick
1453 handlers:
1454 keys:
1455 - stdout
1456 - stderr
1457 - "null"
1458 formatters:
1459 keys:
1460 - context
1461 - default
1462 logger_root:
1463 level: WARNING
1464 handlers: 'null'
1465 logger_nova:
1466 level: INFO
1467 handlers:
1468 - stdout
1469 qualname: nova
1470 logger_os.brick:
1471 level: INFO
1472 handlers:
1473 - stdout
1474 qualname: os.brick
1475 logger_amqp:
1476 level: WARNING
1477 handlers: stderr
1478 qualname: amqp
1479 logger_amqplib:
1480 level: WARNING
1481 handlers: stderr
1482 qualname: amqplib
1483 logger_eventletwsgi:
1484 level: WARNING
1485 handlers: stderr
1486 qualname: eventlet.wsgi.server
1487 logger_sqlalchemy:
1488 level: WARNING
1489 handlers: stderr
1490 qualname: sqlalchemy
1491 logger_boto:
1492 level: WARNING
1493 handlers: stderr
1494 qualname: boto
1495 handler_null:
1496 class: logging.NullHandler
1497 formatter: default
1498 args: ()
1499 handler_stdout:
1500 class: StreamHandler
1501 args: (sys.stdout,)
1502 formatter: context
1503 handler_stderr:
1504 class: StreamHandler
1505 args: (sys.stderr,)
1506 formatter: context
1507 formatter_context:
1508 class: oslo_log.formatters.ContextFormatter
1509 datefmt: "%Y-%m-%d %H:%M:%S"
1510 formatter_default:
1511 format: "%(message)s"
1512 datefmt: "%Y-%m-%d %H:%M:%S"
1513 rabbitmq:
1514 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1515 policies:
1516 - vhost: "nova"
1517 name: "ha_ttl_nova"
1518 definition:
1519 # mirror messges to other nodes in rmq cluster
1520 ha-mode: "all"
1521 ha-sync-mode: "automatic"
1522 # 70s
1523 message-ttl: 70000
1524 priority: 0
1525 apply-to: all
1526 pattern: '^(?!(amq\.|reply_)).*'
1527 enable_iscsi: false
1528 archive_deleted_rows:
1529 purge_deleted_rows: false
1530 until_completion: true
1531 all_cells: false
1532 max_rows:
1533 enabled: False
1534 rows: 1000
1535 before:
1536 enabled: false
1537 date: 'nil'
Mohammed Naser69247252023-09-26 22:23:46 -04001538 nova_api_uwsgi:
1539 uwsgi:
1540 add-header: "Connection: close"
1541 buffer-size: 65535
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001542 chunked-input-limit: "4096000"
Mohammed Naser69247252023-09-26 22:23:46 -04001543 die-on-term: true
1544 enable-threads: true
1545 exit-on-reload: false
1546 hook-master-start: unix_signal:15 gracefully_kill_them_all
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001547 http-auto-chunked: true
1548 http-raw-body: true
Mohammed Naser69247252023-09-26 22:23:46 -04001549 lazy-apps: true
1550 log-x-forwarded-for: true
1551 master: true
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001552 need-app: true
Mohammed Naser69247252023-09-26 22:23:46 -04001553 procname-prefix-spaced: "nova-api:"
1554 route-user-agent: '^kube-probe.* donotlog:'
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001555 socket-timeout: 10
Mohammed Naser69247252023-09-26 22:23:46 -04001556 thunder-lock: true
1557 worker-reload-mercy: 80
1558 wsgi-file: /var/lib/openstack/bin/nova-api-wsgi
1559 nova_metadata_uwsgi:
1560 uwsgi:
1561 add-header: "Connection: close"
1562 buffer-size: 65535
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001563 chunked-input-limit: 4096000
Mohammed Naser69247252023-09-26 22:23:46 -04001564 die-on-term: true
1565 enable-threads: true
1566 exit-on-reload: false
1567 hook-master-start: unix_signal:15 gracefully_kill_them_all
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001568 http-auto-chunked: true
1569 http-raw-body: true
Mohammed Naser69247252023-09-26 22:23:46 -04001570 lazy-apps: true
1571 log-x-forwarded-for: true
1572 master: true
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001573 need-app: true
Mohammed Naser69247252023-09-26 22:23:46 -04001574 procname-prefix-spaced: "nova-metadata:"
1575 route-user-agent: '^kube-probe.* donotlog:'
Mohammed Nasercb5d9c32024-04-03 16:19:01 -04001576 socket-timeout: 10
Mohammed Naser69247252023-09-26 22:23:46 -04001577 thunder-lock: true
1578 worker-reload-mercy: 80
1579 wsgi-file: /var/lib/openstack/bin/nova-metadata-wsgi
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001580
1581# Names of secrets used by bootstrap and environmental checks
1582secrets:
1583 identity:
1584 admin: nova-keystone-admin
1585 nova: nova-keystone-user
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001586 test: nova-keystone-test
1587 oslo_db:
1588 admin: nova-db-admin
1589 nova: nova-db-user
1590 oslo_db_api:
1591 admin: nova-db-api-admin
1592 nova: nova-db-api-user
1593 oslo_db_cell0:
1594 admin: nova-db-cell0-admin
1595 nova: nova-db-cell0-user
1596 oslo_messaging:
1597 admin: nova-rabbitmq-admin
1598 nova: nova-rabbitmq-user
1599 tls:
1600 compute:
1601 osapi:
1602 public: nova-tls-public
1603 internal: nova-tls-api
1604 compute_novnc_proxy:
1605 novncproxy:
1606 public: nova-novncproxy-tls-public
1607 internal: nova-novncproxy-tls-proxy
okozachenko1203ea639e72023-08-30 23:25:38 +10001608 vencrypt:
1609 internal: nova-novncproxy-vencrypt
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001610 compute_metadata:
1611 metadata:
1612 public: metadata-tls-public
1613 internal: metadata-tls-metadata
1614 compute_spice_proxy:
1615 spiceproxy:
1616 internal: nova-tls-spiceproxy
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001617 oci_image_registry:
1618 nova: nova-oci-image-registry
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001619
1620# typically overridden by environmental
1621# values, but should include all endpoints
1622# required by this chart
1623endpoints:
1624 cluster_domain_suffix: cluster.local
1625 local_image_registry:
1626 name: docker-registry
1627 namespace: docker-registry
1628 hosts:
1629 default: localhost
1630 internal: docker-registry
1631 node: localhost
1632 host_fqdn_override:
1633 default: null
1634 port:
1635 registry:
1636 node: 5000
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001637 oci_image_registry:
1638 name: oci-image-registry
1639 namespace: oci-image-registry
1640 auth:
1641 enabled: false
1642 nova:
1643 username: nova
1644 password: password
1645 hosts:
1646 default: localhost
1647 host_fqdn_override:
1648 default: null
1649 port:
1650 registry:
1651 default: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001652 oslo_db:
1653 auth:
1654 admin:
1655 username: root
1656 password: password
1657 secret:
1658 tls:
1659 internal: mariadb-tls-direct
1660 nova:
1661 username: nova
1662 password: password
1663 hosts:
1664 default: mariadb
1665 host_fqdn_override:
1666 default: null
1667 path: /nova
1668 scheme: mysql+pymysql
1669 port:
1670 mysql:
1671 default: 3306
1672 oslo_db_api:
1673 auth:
1674 admin:
1675 username: root
1676 password: password
1677 nova:
1678 username: nova
1679 password: password
1680 hosts:
1681 default: mariadb
1682 host_fqdn_override:
1683 default: null
1684 path: /nova_api
1685 scheme: mysql+pymysql
1686 port:
1687 mysql:
1688 default: 3306
1689 oslo_db_cell0:
1690 auth:
1691 admin:
1692 username: root
1693 password: password
1694 nova:
1695 username: nova
1696 password: password
1697 hosts:
1698 default: mariadb
1699 host_fqdn_override:
1700 default: null
1701 path: /nova_cell0
1702 scheme: mysql+pymysql
1703 port:
1704 mysql:
1705 default: 3306
1706 oslo_messaging:
1707 auth:
1708 admin:
1709 username: rabbitmq
1710 password: password
1711 secret:
1712 tls:
1713 internal: rabbitmq-tls-direct
1714 nova:
1715 username: nova
1716 password: password
1717 statefulset:
1718 replicas: 2
1719 name: rabbitmq-rabbitmq
1720 hosts:
1721 default: rabbitmq
1722 host_fqdn_override:
1723 default: null
1724 path: /nova
1725 scheme: rabbit
1726 port:
1727 amqp:
1728 default: 5672
1729 http:
1730 default: 15672
1731 oslo_cache:
1732 auth:
1733 # NOTE(portdirect): this is used to define the value for keystone
1734 # authtoken cache encryption key, if not set it will be populated
1735 # automatically with a random value, but to take advantage of
1736 # this feature all services should be set to use the same key,
1737 # and memcache service.
1738 memcache_secret_key: null
1739 hosts:
1740 default: memcached
1741 host_fqdn_override:
1742 default: null
1743 port:
1744 memcache:
1745 default: 11211
1746 identity:
1747 name: keystone
1748 auth:
1749 admin:
1750 region_name: RegionOne
1751 username: admin
1752 password: password
1753 project_name: admin
1754 user_domain_name: default
1755 project_domain_name: default
1756 nova:
okozachenko1203567fc082023-08-21 22:50:02 +10001757 role: admin,service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001758 region_name: RegionOne
1759 username: nova
1760 password: password
1761 project_name: service
1762 user_domain_name: service
1763 project_domain_name: service
1764 # NOTE(portdirect): the neutron user is not managed by the nova chart
1765 # these values should match those set in the neutron chart.
1766 neutron:
1767 region_name: RegionOne
1768 project_name: service
1769 user_domain_name: service
1770 project_domain_name: service
1771 username: neutron
1772 password: password
1773 # NOTE(portdirect): the ironic user is not managed by the nova chart
1774 # these values should match those set in the ironic chart.
1775 ironic:
1776 auth_type: password
1777 auth_version: v3
1778 region_name: RegionOne
1779 project_name: service
1780 user_domain_name: service
1781 project_domain_name: service
1782 username: ironic
1783 password: password
1784 placement:
1785 role: admin
1786 region_name: RegionOne
1787 username: placement
1788 password: password
1789 project_name: service
1790 user_domain_name: service
1791 project_domain_name: service
okozachenko1203567fc082023-08-21 22:50:02 +10001792 cinder:
1793 role: admin,service
1794 region_name: RegionOne
1795 username: cinder
1796 password: password
1797 project_name: service
1798 user_domain_name: service
1799 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001800 test:
1801 role: admin
1802 region_name: RegionOne
1803 username: nova-test
1804 password: password
1805 project_name: test
1806 user_domain_name: service
1807 project_domain_name: service
1808 hosts:
1809 default: keystone
1810 internal: keystone-api
1811 host_fqdn_override:
1812 default: null
1813 path:
1814 default: /v3
1815 scheme:
1816 default: http
1817 port:
1818 api:
1819 default: 80
1820 internal: 5000
1821 image:
1822 name: glance
1823 hosts:
1824 default: glance-api
1825 public: glance
1826 host_fqdn_override:
1827 default: null
1828 path:
1829 default: null
1830 scheme:
1831 default: http
1832 port:
1833 api:
1834 default: 9292
1835 public: 80
1836 compute:
1837 name: nova
1838 hosts:
1839 default: nova-api
1840 public: nova
1841 host_fqdn_override:
1842 default: null
1843 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1844 # endpoints using the following format:
1845 # public:
1846 # host: null
1847 # tls:
1848 # crt: null
1849 # key: null
1850 path:
1851 default: "/v2.1/%(tenant_id)s"
1852 scheme:
1853 default: 'http'
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001854 service: 'http'
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001855 port:
1856 api:
1857 default: 8774
1858 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001859 service: 8774
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001860 novncproxy:
1861 default: 6080
1862 compute_metadata:
1863 name: nova
1864 ip:
1865 # IF blank, set clusterIP and metadata_host dynamically
1866 ingress: null
1867 hosts:
1868 default: nova-metadata
1869 public: metadata
1870 host_fqdn_override:
1871 default: null
1872 path:
1873 default: /
1874 scheme:
1875 default: 'http'
1876 port:
1877 metadata:
1878 default: 8775
1879 public: 80
1880 compute_novnc_proxy:
1881 name: nova
1882 hosts:
1883 default: nova-novncproxy
1884 public: novncproxy
1885 host_fqdn_override:
1886 default: null
1887 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1888 # endpoints using the following format:
1889 # public:
1890 # host: null
1891 # tls:
1892 # crt: null
1893 # key: null
1894 path:
1895 default: /vnc_auto.html
1896 scheme:
1897 default: 'http'
1898 port:
1899 novnc_proxy:
1900 default: 6080
1901 public: 80
okozachenko1203ea639e72023-08-30 23:25:38 +10001902 # This endpoint is only to allow configuring the cert used specifically for
1903 # vencrypt. Specifically, the same CA/issuer needs to be used to sign both
1904 # this cert, and the libvirt/qemu certs.
1905 compute_novnc_vencrypt:
1906 hosts:
1907 default: nova-novncproxy
1908 host_fqdn_override:
1909 default:
1910 commonName: nova-novncproxy
1911 usages:
1912 - client auth
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001913 compute_spice_proxy:
1914 name: nova
1915 hosts:
1916 default: nova-spiceproxy
1917 public: placement
1918 host_fqdn_override:
1919 default: null
1920 path:
1921 default: /spice_auto.html
1922 scheme:
1923 default: 'http'
1924 port:
1925 spice_proxy:
1926 default: 6082
1927 placement:
1928 name: placement
1929 hosts:
1930 default: placement-api
1931 public: placement
1932 host_fqdn_override:
1933 default: null
1934 path:
1935 default: /
1936 scheme:
1937 default: 'http'
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001938 service: 'http'
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001939 port:
1940 api:
1941 default: 8778
1942 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001943 service: 8778
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001944 network:
1945 name: neutron
1946 hosts:
1947 default: neutron-server
1948 public: neutron
1949 host_fqdn_override:
1950 default: null
1951 path:
1952 default: null
1953 scheme:
1954 default: 'http'
1955 port:
1956 api:
1957 default: 9696
1958 public: 80
1959 baremetal:
1960 name: ironic
1961 hosts:
1962 default: ironic-api
1963 public: ironic
1964 host_fqdn_override:
1965 default: null
1966 path:
1967 default: null
1968 scheme:
1969 default: http
1970 port:
1971 api:
1972 default: 6385
1973 public: 80
1974 fluentd:
1975 namespace: null
1976 name: fluentd
1977 hosts:
1978 default: fluentd-logging
1979 host_fqdn_override:
1980 default: null
1981 path:
1982 default: null
1983 scheme: 'http'
1984 port:
1985 service:
1986 default: 24224
1987 metrics:
1988 default: 24220
1989 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
1990 # They are using to enable the Egress K8s network policy.
1991 kube_dns:
1992 namespace: kube-system
1993 name: kubernetes-dns
1994 hosts:
1995 default: kube-dns
1996 host_fqdn_override:
1997 default: null
1998 path:
1999 default: null
2000 scheme: http
2001 port:
2002 dns:
2003 default: 53
2004 protocol: UDP
2005 ingress:
2006 namespace: null
2007 name: ingress
2008 hosts:
2009 default: ingress
2010 port:
2011 ingress:
2012 default: 80
2013
2014pod:
2015 probes:
2016 rpc_timeout: 60
2017 rpc_retries: 2
2018 compute:
2019 default:
2020 liveness:
2021 enabled: True
2022 params:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002023 periodSeconds: 90
2024 timeoutSeconds: 70
2025 readiness:
2026 enabled: True
2027 params:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002028 periodSeconds: 90
2029 timeoutSeconds: 70
Oleksandr K177a7ae2023-12-28 14:22:03 +01002030 startup:
2031 enabled: True
2032 params:
2033 failureThreshold: 120
2034 periodSeconds: 10
2035 successThreshold: 1
2036 timeoutSeconds: 70
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002037 api-metadata:
2038 default:
2039 liveness:
2040 enabled: True
2041 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002042 initialDelaySeconds: 5
2043 periodSeconds: 10
2044 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002045 readiness:
2046 enabled: True
2047 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002048 initialDelaySeconds: 5
2049 periodSeconds: 10
2050 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002051 api-osapi:
2052 default:
2053 liveness:
2054 enabled: True
2055 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002056 initialDelaySeconds: 5
2057 periodSeconds: 10
2058 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002059 readiness:
2060 enabled: True
2061 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002062 initialDelaySeconds: 5
2063 periodSeconds: 10
2064 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002065 conductor:
2066 default:
2067 liveness:
2068 enabled: True
2069 params:
2070 initialDelaySeconds: 120
2071 periodSeconds: 90
2072 timeoutSeconds: 70
2073 readiness:
2074 enabled: True
2075 params:
2076 initialDelaySeconds: 80
2077 periodSeconds: 90
2078 timeoutSeconds: 70
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002079 novncproxy:
2080 default:
2081 liveness:
2082 enabled: True
2083 params:
2084 initialDelaySeconds: 30
2085 periodSeconds: 60
2086 timeoutSeconds: 15
2087 readiness:
2088 enabled: True
2089 params:
2090 initialDelaySeconds: 30
2091 periodSeconds: 60
2092 timeoutSeconds: 15
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002093 scheduler:
2094 default:
2095 liveness:
2096 enabled: True
2097 params:
2098 initialDelaySeconds: 120
2099 periodSeconds: 90
2100 timeoutSeconds: 70
2101 readiness:
2102 enabled: True
2103 params:
2104 initialDelaySeconds: 80
2105 periodSeconds: 90
2106 timeoutSeconds: 70
2107 compute-spice-proxy:
2108 default:
2109 liveness:
2110 enabled: True
2111 params:
2112 initialDelaySeconds: 30
2113 periodSeconds: 60
2114 timeoutSeconds: 15
2115 readiness:
2116 enabled: True
2117 params:
2118 initialDelaySeconds: 30
2119 periodSeconds: 60
2120 timeoutSeconds: 15
2121 security_context:
2122 nova:
2123 pod:
2124 runAsUser: 42424
2125 container:
2126 nova_compute_init:
2127 readOnlyRootFilesystem: true
2128 runAsUser: 0
2129 tungstenfabric_compute_init:
2130 readOnlyRootFilesystem: true
2131 allowPrivilegeEscalation: false
2132 ceph_perms:
2133 readOnlyRootFilesystem: true
2134 runAsUser: 0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002135 nova_compute_vnc_init:
2136 readOnlyRootFilesystem: true
2137 allowPrivilegeEscalation: false
2138 nova_compute_spice_init:
2139 readOnlyRootFilesystem: true
2140 allowPrivilegeEscalation: false
2141 nova_compute:
2142 readOnlyRootFilesystem: true
2143 privileged: true
2144 nova_compute_ssh:
2145 privileged: true
2146 runAsUser: 0
2147 nova_compute_ssh_init:
2148 runAsUser: 0
2149 nova_api_metadata_init:
2150 readOnlyRootFilesystem: true
2151 allowPrivilegeEscalation: false
2152 nova_api:
2153 readOnlyRootFilesystem: true
2154 allowPrivilegeEscalation: false
2155 nova_osapi:
2156 readOnlyRootFilesystem: true
2157 allowPrivilegeEscalation: false
2158 nova_conductor:
2159 readOnlyRootFilesystem: true
2160 allowPrivilegeEscalation: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002161 nova_novncproxy_init:
2162 readOnlyRootFilesystem: true
2163 allowPrivilegeEscalation: false
2164 nova_novncproxy_init_assests:
2165 readOnlyRootFilesystem: true
2166 allowPrivilegeEscalation: false
2167 nova_novncproxy:
2168 readOnlyRootFilesystem: true
2169 allowPrivilegeEscalation: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002170 nova_scheduler:
2171 readOnlyRootFilesystem: true
2172 allowPrivilegeEscalation: false
2173 nova_spiceproxy_init:
2174 readOnlyRootFilesystem: true
2175 allowPrivilegeEscalation: false
2176 nova_spiceproxy_init_assets:
2177 readOnlyRootFilesystem: true
2178 allowPrivilegeEscalation: false
2179 nova_spiceproxy:
2180 readOnlyRootFilesystem: true
2181 allowPrivilegeEscalation: false
2182 bootstrap:
2183 pod:
2184 runAsUser: 42424
2185 container:
2186 nova_wait_for_computes_init:
2187 readOnlyRootFilesystem: true
2188 allowPrivilegeEscalation: false
2189 bootstrap:
2190 readOnlyRootFilesystem: true
2191 allowPrivilegeEscalation: false
2192 nova_cell_setup:
2193 pod:
2194 runAsUser: 42424
2195 container:
2196 nova_wait_for_computes_init:
2197 readOnlyRootFilesystem: true
2198 allowPrivilegeEscalation: false
2199 nova_cell_setup_init:
2200 readOnlyRootFilesystem: true
2201 allowPrivilegeEscalation: false
2202 nova_cell_setup:
2203 readOnlyRootFilesystem: true
2204 allowPrivilegeEscalation: false
2205 archive_deleted_rows:
2206 pod:
2207 runAsUser: 42424
2208 container:
2209 nova_archive_deleted_rows_init:
2210 readOnlyRootFilesystem: true
2211 allowPrivilegeEscalation: false
2212 nova_archive_deleted_rows:
2213 readOnlyRootFilesystem: true
2214 allowPrivilegeEscalation: false
2215 cell_setup:
2216 pod:
2217 runAsUser: 42424
2218 container:
2219 nova_cell_setup:
2220 readOnlyRootFilesystem: true
2221 allowPrivilegeEscalation: false
2222 service_cleaner:
2223 pod:
2224 runAsUser: 42424
2225 container:
2226 nova_service_cleaner:
2227 readOnlyRootFilesystem: true
2228 allowPrivilegeEscalation: false
2229 use_fqdn:
2230 # NOTE: If the option "host" is not specified in nova.conf, the host name
2231 # shown in the hypervisor host is defaulted to the short name of the host.
2232 # Setting the option here to true will cause use $(hostname --fqdn) as the
2233 # host name by default. If the short name is desired $(hostname --short),
2234 # set the option to false. Specifying a host in the nova.conf via the conf:
2235 # section will supersede the value of this option.
2236 compute: true
2237 affinity:
2238 anti:
2239 type:
2240 default: preferredDuringSchedulingIgnoredDuringExecution
2241 topologyKey:
2242 default: kubernetes.io/hostname
2243 weight:
2244 default: 10
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002245 tolerations:
2246 nova:
2247 enabled: false
2248 tolerations:
2249 - key: node-role.kubernetes.io/master
2250 operator: Exists
2251 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02002252 - key: node-role.kubernetes.io/control-plane
2253 operator: Exists
2254 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002255 mounts:
2256 nova_compute:
2257 init_container: null
2258 nova_compute:
2259 volumeMounts:
2260 volumes:
2261 nova_compute_ironic:
2262 init_container: null
2263 nova_compute_ironic:
2264 volumeMounts:
2265 volumes:
2266 nova_api_metadata:
2267 init_container: null
2268 nova_api_metadata:
2269 volumeMounts:
2270 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002271 nova_api_osapi:
2272 init_container: null
2273 nova_api_osapi:
2274 volumeMounts:
2275 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002276 nova_conductor:
2277 init_container: null
2278 nova_conductor:
2279 volumeMounts:
2280 volumes:
2281 nova_scheduler:
2282 init_container: null
2283 nova_scheduler:
2284 volumeMounts:
2285 volumes:
2286 nova_bootstrap:
2287 init_container: null
2288 nova_bootstrap:
2289 volumeMounts:
2290 volumes:
2291 nova_tests:
2292 init_container: null
2293 nova_tests:
2294 volumeMounts:
2295 volumes:
2296 nova_novncproxy:
2297 init_novncproxy: null
2298 nova_novncproxy:
2299 volumeMounts:
2300 volumes:
2301 nova_spiceproxy:
2302 init_spiceproxy: null
2303 nova_spiceproxy:
2304 volumeMounts:
2305 volumes:
2306 nova_db_sync:
2307 nova_db_sync:
2308 volumeMounts:
2309 volumes:
2310 useHostNetwork:
2311 novncproxy: true
2312 replicas:
2313 api_metadata: 1
2314 compute_ironic: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002315 osapi: 1
2316 conductor: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002317 scheduler: 1
2318 novncproxy: 1
2319 spiceproxy: 1
2320 lifecycle:
2321 upgrades:
2322 deployments:
2323 revision_history: 3
2324 pod_replacement_strategy: RollingUpdate
2325 rolling_update:
2326 max_unavailable: 1
2327 max_surge: 3
2328 daemonsets:
2329 pod_replacement_strategy: RollingUpdate
2330 compute:
2331 enabled: true
2332 min_ready_seconds: 0
2333 max_unavailable: 1
2334 disruption_budget:
2335 metadata:
2336 min_available: 0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002337 osapi:
2338 min_available: 0
2339 termination_grace_period:
2340 metadata:
2341 timeout: 30
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002342 osapi:
2343 timeout: 30
2344 resources:
2345 enabled: false
2346 compute:
2347 requests:
2348 memory: "128Mi"
2349 cpu: "100m"
2350 limits:
2351 memory: "1024Mi"
2352 cpu: "2000m"
2353 compute_ironic:
2354 requests:
2355 memory: "128Mi"
2356 cpu: "100m"
2357 limits:
2358 memory: "1024Mi"
2359 cpu: "2000m"
2360 api_metadata:
2361 requests:
2362 memory: "128Mi"
2363 cpu: "100m"
2364 limits:
2365 memory: "1024Mi"
2366 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002367 api:
2368 requests:
2369 memory: "128Mi"
2370 cpu: "100m"
2371 limits:
2372 memory: "1024Mi"
2373 cpu: "2000m"
2374 conductor:
2375 requests:
2376 memory: "128Mi"
2377 cpu: "100m"
2378 limits:
2379 memory: "1024Mi"
2380 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002381 scheduler:
2382 requests:
2383 memory: "128Mi"
2384 cpu: "100m"
2385 limits:
2386 memory: "1024Mi"
2387 cpu: "2000m"
2388 ssh:
2389 requests:
2390 memory: "128Mi"
2391 cpu: "100m"
2392 limits:
2393 memory: "1024Mi"
2394 cpu: "2000m"
2395 novncproxy:
2396 requests:
2397 memory: "128Mi"
2398 cpu: "100m"
2399 limits:
2400 memory: "1024Mi"
2401 cpu: "2000m"
2402 spiceproxy:
2403 requests:
2404 memory: "128Mi"
2405 cpu: "100m"
2406 limits:
2407 memory: "1024Mi"
2408 cpu: "2000m"
2409 jobs:
2410 bootstrap:
2411 requests:
2412 memory: "128Mi"
2413 cpu: "100m"
2414 limits:
2415 memory: "1024Mi"
2416 cpu: "2000m"
2417 db_init:
2418 requests:
2419 memory: "128Mi"
2420 cpu: "100m"
2421 limits:
2422 memory: "1024Mi"
2423 cpu: "2000m"
2424 rabbit_init:
2425 requests:
2426 memory: "128Mi"
2427 cpu: "100m"
2428 limits:
2429 memory: "1024Mi"
2430 cpu: "2000m"
2431 db_sync:
2432 requests:
2433 memory: "128Mi"
2434 cpu: "100m"
2435 limits:
2436 memory: "1024Mi"
2437 cpu: "2000m"
2438 archive_deleted_rows:
2439 requests:
2440 memory: "128Mi"
2441 cpu: "100m"
2442 limits:
2443 memory: "1024Mi"
2444 cpu: "2000m"
2445 db_drop:
2446 requests:
2447 memory: "128Mi"
2448 cpu: "100m"
2449 limits:
2450 memory: "1024Mi"
2451 cpu: "2000m"
2452 ks_endpoints:
2453 requests:
2454 memory: "128Mi"
2455 cpu: "100m"
2456 limits:
2457 memory: "1024Mi"
2458 cpu: "2000m"
2459 ks_service:
2460 requests:
2461 memory: "128Mi"
2462 cpu: "100m"
2463 limits:
2464 memory: "1024Mi"
2465 cpu: "2000m"
2466 ks_user:
2467 requests:
2468 memory: "128Mi"
2469 cpu: "100m"
2470 limits:
2471 memory: "1024Mi"
2472 cpu: "2000m"
2473 tests:
2474 requests:
2475 memory: "128Mi"
2476 cpu: "100m"
2477 limits:
2478 memory: "1024Mi"
2479 cpu: "2000m"
2480 cell_setup:
2481 requests:
2482 memory: "128Mi"
2483 cpu: "100m"
2484 limits:
2485 memory: "1024Mi"
2486 cpu: "2000m"
2487 service_cleaner:
2488 requests:
2489 memory: "128Mi"
2490 cpu: "100m"
2491 limits:
2492 memory: "1024Mi"
2493 cpu: "2000m"
2494 image_repo_sync:
2495 requests:
2496 memory: "128Mi"
2497 cpu: "100m"
2498 limits:
2499 memory: "1024Mi"
2500 cpu: "2000m"
2501
2502network_policy:
2503 nova:
2504 # TODO(lamt): Need to tighten this ingress for security.
2505 ingress:
2506 - {}
2507 egress:
2508 - {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002509
2510# NOTE(helm_hook): helm_hook might break for helm2 binary.
2511# set helm3_hook: false when using the helm2 binary.
2512helm3_hook: true
2513
2514health_probe:
2515 logging:
2516 level: ERROR
2517
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002518tls:
2519 identity: false
2520 oslo_messaging: false
2521 oslo_db: false
2522
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002523manifests:
2524 certificates: false
2525 configmap_bin: true
2526 configmap_etc: true
2527 cron_job_cell_setup: true
2528 cron_job_service_cleaner: true
2529 cron_job_archive_deleted_rows: false
2530 daemonset_compute: true
2531 deployment_api_metadata: true
2532 deployment_api_osapi: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002533 deployment_conductor: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002534 deployment_novncproxy: true
2535 deployment_spiceproxy: true
2536 deployment_scheduler: true
2537 ingress_metadata: true
2538 ingress_novncproxy: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002539 ingress_osapi: true
2540 job_bootstrap: true
2541 job_db_init: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002542 job_db_sync: true
2543 job_db_drop: false
2544 job_image_repo_sync: true
2545 job_rabbit_init: true
2546 job_ks_endpoints: true
2547 job_ks_service: true
2548 job_ks_user: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002549 job_cell_setup: true
2550 pdb_metadata: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002551 pdb_osapi: true
2552 pod_rally_test: true
2553 network_policy: false
2554 secret_db_api: true
2555 secret_db_cell0: true
2556 secret_db: true
2557 secret_ingress_tls: true
2558 secret_keystone: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002559 secret_rabbitmq: true
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002560 secret_registry: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002561 service_ingress_metadata: true
2562 service_ingress_novncproxy: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002563 service_ingress_osapi: true
2564 service_metadata: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002565 service_novncproxy: true
2566 service_spiceproxy: true
2567 service_osapi: true
2568 statefulset_compute_ironic: false
2569...