blob: e15dbb5d2a15375967ecfb6b7348df387e54198e [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for nova.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21labels:
22 agent:
23 compute:
24 node_selector_key: openstack-compute-node
25 node_selector_value: enabled
26 compute_ironic:
27 node_selector_key: openstack-compute-node
28 node_selector_value: enabled
29 api_metadata:
30 node_selector_key: openstack-control-plane
31 node_selector_value: enabled
32 conductor:
33 node_selector_key: openstack-control-plane
34 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050035 job:
36 node_selector_key: openstack-control-plane
37 node_selector_value: enabled
38 novncproxy:
39 node_selector_key: openstack-control-plane
40 node_selector_value: enabled
41 osapi:
42 node_selector_key: openstack-control-plane
43 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050044 scheduler:
45 node_selector_key: openstack-control-plane
46 node_selector_value: enabled
47 spiceproxy:
48 node_selector_key: openstack-control-plane
49 node_selector_value: enabled
50 test:
51 node_selector_key: openstack-control-plane
52 node_selector_value: enabled
53
54images:
55 pull_policy: IfNotPresent
56 tags:
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010057 bootstrap: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
58 db_drop: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
59 db_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050060 dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
61 rabbit_init: docker.io/rabbitmq:3.7-management
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010062 ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
63 ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
64 nova_archive_deleted_rows: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
65 ks_endpoints: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
66 nova_api: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
67 nova_cell_setup: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
68 nova_cell_setup_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal
69 nova_compute: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
70 nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby'
71 nova_compute_ssh: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
72 nova_conductor: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
73 nova_db_sync: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
74 nova_novncproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
75 nova_novncproxy_assets: 'docker.io/kolla/ubuntu-source-nova-novncproxy:wallaby'
76 nova_scheduler: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050077 # NOTE(portdirect): we simply use the ceph config helper here,
78 # as it has both oscli and jq.
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +010079 nova_service_cleaner: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal'
80 nova_spiceproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
81 nova_spiceproxy_assets: docker.io/openstackhelm/nova:wallaby-ubuntu_focal
Mohammed Naserf3f59a72023-01-15 21:02:04 -050082 test: docker.io/xrally/xrally-openstack:2.0.0
83 image_repo_sync: docker.io/docker:17.07.0
84 nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6
85 local_registry:
86 active: false
87 exclude:
88 - dep_check
89 - image_repo_sync
90
91jobs:
92 # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default.
93 # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves.
94 cell_setup:
95 cron: "0 */1 * * *"
96 starting_deadline: 600
97 history:
98 success: 3
99 failed: 1
100 extended_wait:
101 enabled: false
102 iteration: 3
103 duration: 5
104 service_cleaner:
105 cron: "0 */1 * * *"
106 starting_deadline: 600
107 history:
108 success: 3
109 failed: 1
110 sleep_time: 60
111 archive_deleted_rows:
112 cron: "0 */1 * * *"
113 starting_deadline: 600
114 history:
115 success: 3
116 failed: 1
117
118bootstrap:
119 enabled: true
120 ks_user: admin
121 script: null
122 structured:
123 flavors:
124 enabled: true
125 options:
126 m1_tiny:
127 name: "m1.tiny"
128 ram: 512
129 disk: 1
130 vcpus: 1
131 m1_small:
132 name: "m1.small"
133 ram: 2048
134 disk: 20
135 vcpus: 1
136 m1_medium:
137 name: "m1.medium"
138 ram: 4096
139 disk: 40
140 vcpus: 2
141 m1_large:
142 name: "m1.large"
143 ram: 8192
144 disk: 80
145 vcpus: 4
146 m1_xlarge:
147 name: "m1.xlarge"
148 ram: 16384
149 disk: 160
150 vcpus: 8
151 wait_for_computes:
152 enabled: false
153 # Wait percentage is the minimum percentage of compute hypervisors which
154 # must be available before the remainder of the bootstrap script can be run.
155 wait_percentage: 70
156 # Once the wait_percentage above is achieved, the remaining_wait is the
157 # amount of time in seconds to wait before executing the remainder of the
158 # boostrap script.
159 remaining_wait: 300
160 scripts:
161 init_script: |
162 # This runs in a bootstrap init container. It counts the number of compute nodes.
163 COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
164 /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
165 wait_script: |
166 # This script runs in the main bootstrap container just before the
167 # bootstrap.script is called.
168 COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
169 if [[ $COMPUTE_HOSTS == 0 ]]; then
170 echo "There are no compute hosts found!"
171 exit 1
172 fi
173
174 # Wait for all hypervisors to come up before moving on with the deployment
175 HYPERVISOR_WAIT=true
176 WAIT_AFTER_READY=0
177 SLEEP=5
178 while [[ $HYPERVISOR_WAIT == true ]]; do
179 # Its possible that openstack command may fail due to not being able to
180 # reach the compute service
181 set +e
182 HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
183 set -e
184
185 PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
186 if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
187 echo "Hypervisor ready percentage is $PERCENT_READY"
188 if [[ $PERCENT_READY == 100 ]]; then
189 HYPERVISOR_WAIT=false
190 echo "All hypervisors are ready."
191 elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
192 HYPERVISOR_WAIT=false
193 echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
194 else
195 sleep $SLEEP
196 WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
197 fi
198 else
199 echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
200 sleep $SLEEP
201 fi
202 done
203
204network:
205 # provide what type of network wiring will be used
206 # possible options: openvswitch, linuxbridge, sriov
207 backend:
208 - openvswitch
209 osapi:
210 port: 8774
211 ingress:
212 public: true
213 classes:
214 namespace: "nginx"
215 cluster: "nginx-cluster"
216 annotations:
217 nginx.ingress.kubernetes.io/rewrite-target: /
218 external_policy_local: false
219 node_port:
220 enabled: false
221 port: 30774
222 metadata:
223 port: 8775
224 ingress:
225 public: true
226 classes:
227 namespace: "nginx"
228 cluster: "nginx-cluster"
229 annotations:
230 nginx.ingress.kubernetes.io/rewrite-target: /
231 external_policy_local: false
232 node_port:
233 enabled: false
234 port: 30775
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500235 novncproxy:
236 ingress:
237 public: true
238 classes:
239 namespace: "nginx"
240 cluster: "nginx-cluster"
241 annotations:
242 nginx.ingress.kubernetes.io/rewrite-target: /
243 node_port:
244 enabled: false
245 port: 30680
246 spiceproxy:
247 node_port:
248 enabled: false
249 port: 30682
250 ssh:
251 enabled: false
252 port: 8022
253 from_subnet: 0.0.0.0/0
254 key_types:
255 - rsa
256 - dsa
257 - ecdsa
258 - ed25519
259 private_key: 'null'
260 public_key: 'null'
261
262dependencies:
263 dynamic:
264 common:
265 local_image_registry:
266 jobs:
267 - nova-image-repo-sync
268 services:
269 - endpoint: node
270 service: local_image_registry
271 targeted:
Mohammed Naserd6db2452023-07-23 14:34:59 +0000272 ovn:
okozachenko1203567fc082023-08-21 22:50:02 +1000273 compute:
Mohammed Naserd6db2452023-07-23 14:34:59 +0000274 pod:
275 - requireSameNode: true
276 labels:
277 application: ovn
278 component: ovn-controller
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500279 openvswitch:
280 compute:
281 pod:
282 - requireSameNode: true
283 labels:
284 application: neutron
285 component: neutron-ovs-agent
286 linuxbridge:
287 compute:
288 pod:
289 - requireSameNode: true
290 labels:
291 application: neutron
292 component: neutron-lb-agent
293 sriov:
294 compute:
295 pod:
296 - requireSameNode: true
297 labels:
298 application: neutron
299 component: neutron-sriov-agent
300 static:
301 api:
302 jobs:
303 - nova-db-sync
304 - nova-ks-user
305 - nova-ks-endpoints
306 - nova-rabbit-init
307 services:
308 - endpoint: internal
309 service: oslo_messaging
310 - endpoint: internal
311 service: oslo_db
312 - endpoint: internal
313 service: identity
314 api_metadata:
315 jobs:
316 - nova-db-sync
317 - nova-ks-user
318 - nova-ks-endpoints
319 - nova-rabbit-init
320 services:
321 - endpoint: internal
322 service: oslo_messaging
323 - endpoint: internal
324 service: oslo_db
325 - endpoint: internal
326 service: identity
327 bootstrap:
328 services:
329 - endpoint: internal
330 service: identity
331 - endpoint: internal
332 service: compute
333 cell_setup:
334 jobs:
335 - nova-db-sync
336 - nova-rabbit-init
337 services:
338 - endpoint: internal
339 service: oslo_messaging
340 - endpoint: internal
341 service: oslo_db
342 - endpoint: internal
343 service: identity
344 - endpoint: internal
345 service: compute
346 pod:
347 - requireSameNode: false
348 labels:
349 application: nova
350 component: compute
351 service_cleaner:
352 jobs:
353 - nova-db-sync
354 - nova-rabbit-init
355 services:
356 - endpoint: internal
357 service: oslo_messaging
358 - endpoint: internal
359 service: oslo_db
360 - endpoint: internal
361 service: identity
362 - endpoint: internal
363 service: compute
364 compute:
365 pod:
366 - requireSameNode: true
367 labels:
368 application: libvirt
369 component: libvirt
370 jobs:
371 - nova-db-sync
372 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500373 services:
374 - endpoint: internal
375 service: oslo_messaging
376 - endpoint: internal
377 service: image
378 - endpoint: internal
379 service: compute
380 - endpoint: internal
381 service: network
382 - endpoint: internal
383 service: compute_metadata
384 compute_ironic:
385 jobs:
386 - nova-db-sync
387 - nova-rabbit-init
388 services:
389 - endpoint: internal
390 service: oslo_messaging
391 - endpoint: internal
392 service: image
393 - endpoint: internal
394 service: compute
395 - endpoint: internal
396 service: network
397 - endpoint: internal
398 service: baremetal
399 conductor:
400 jobs:
401 - nova-db-sync
402 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500403 services:
404 - endpoint: internal
405 service: oslo_messaging
406 - endpoint: internal
407 service: oslo_db
408 - endpoint: internal
409 service: identity
410 - endpoint: internal
411 service: compute
412 db_drop:
413 services:
414 - endpoint: internal
415 service: oslo_db
416 archive_deleted_rows:
417 jobs:
418 - nova-db-init
419 - nova-db-sync
420 db_init:
421 services:
422 - endpoint: internal
423 service: oslo_db
424 db_sync:
425 jobs:
426 - nova-db-init
427 services:
428 - endpoint: internal
429 service: oslo_db
430 ks_endpoints:
431 jobs:
432 - nova-ks-service
433 services:
434 - endpoint: internal
435 service: identity
436 ks_service:
437 services:
438 - endpoint: internal
439 service: identity
440 ks_user:
441 services:
442 - endpoint: internal
443 service: identity
444 rabbit_init:
445 services:
446 - service: oslo_messaging
447 endpoint: internal
448 novncproxy:
449 jobs:
450 - nova-db-sync
451 services:
452 - endpoint: internal
453 service: oslo_db
454 spiceproxy:
455 jobs:
456 - nova-db-sync
457 services:
458 - endpoint: internal
459 service: oslo_db
460 scheduler:
461 jobs:
462 - nova-db-sync
463 - nova-rabbit-init
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500464 services:
465 - endpoint: internal
466 service: oslo_messaging
467 - endpoint: internal
468 service: oslo_db
469 - endpoint: internal
470 service: identity
471 - endpoint: internal
472 service: compute
473 tests:
474 services:
475 - endpoint: internal
476 service: image
477 - endpoint: internal
478 service: compute
479 - endpoint: internal
480 service: network
481 - endpoint: internal
482 service: compute_metadata
483 image_repo_sync:
484 services:
485 - endpoint: internal
486 service: local_image_registry
487
488console:
489 # serial | spice | novnc | none
490 console_kind: novnc
491 serial:
492 spice:
493 compute:
494 # IF blank, search default routing interface
495 server_proxyclient_interface:
496 proxy:
497 # IF blank, search default routing interface
498 server_proxyclient_interface:
499 novnc:
500 compute:
501 # IF blank, search default routing interface
502 vncserver_proxyclient_interface:
503 vncproxy:
504 # IF blank, search default routing interface
505 vncserver_proxyclient_interface:
506
507ceph_client:
508 configmap: ceph-etc
509 user_secret_name: pvc-ceph-client-key
510
511conf:
512 security: |
513 #
514 # Disable access to the entire file system except for the directories that
515 # are explicitly allowed later.
516 #
517 # This currently breaks the configurations that come with some web application
518 # Debian packages.
519 #
520 #<Directory />
521 # AllowOverride None
522 # Require all denied
523 #</Directory>
524
525 # Changing the following options will not really affect the security of the
526 # server, but might make attacks slightly more difficult in some cases.
527
528 #
529 # ServerTokens
530 # This directive configures what you return as the Server HTTP response
531 # Header. The default is 'Full' which sends information about the OS-Type
532 # and compiled in modules.
533 # Set to one of: Full | OS | Minimal | Minor | Major | Prod
534 # where Full conveys the most information, and Prod the least.
535 ServerTokens Prod
536
537 #
538 # Optionally add a line containing the server version and virtual host
539 # name to server-generated pages (internal error documents, FTP directory
540 # listings, mod_status and mod_info output etc., but not CGI generated
541 # documents or custom error documents).
542 # Set to "EMail" to also include a mailto: link to the ServerAdmin.
543 # Set to one of: On | Off | EMail
544 ServerSignature Off
545
546 #
547 # Allow TRACE method
548 #
549 # Set to "extended" to also reflect the request body (only for testing and
550 # diagnostic purposes).
551 #
552 # Set to one of: On | Off | extended
553 TraceEnable Off
554
555 #
556 # Forbid access to version control directories
557 #
558 # If you use version control systems in your document root, you should
559 # probably deny access to their directories. For example, for subversion:
560 #
561 #<DirectoryMatch "/\.svn">
562 # Require all denied
563 #</DirectoryMatch>
564
565 #
566 # Setting this header will prevent MSIE from interpreting files as something
567 # else than declared by the content type in the HTTP headers.
568 # Requires mod_headers to be enabled.
569 #
570 #Header set X-Content-Type-Options: "nosniff"
571
572 #
573 # Setting this header will prevent other sites from embedding pages from this
574 # site as frames. This defends against clickjacking attacks.
575 # Requires mod_headers to be enabled.
576 #
577 #Header set X-Frame-Options: "sameorigin"
578 software:
579 apache2:
580 binary: apache2
581 start_parameters: -DFOREGROUND
582 conf_dir: /etc/apache2/conf-enabled
583 site_dir: /etc/apache2/sites-enable
584 mods_dir: /etc/apache2/mods-available
585 a2enmod: null
586 a2dismod: null
587 ceph:
588 enabled: true
589 admin_keyring: null
590 cinder:
591 user: "cinder"
592 keyring: null
593 secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
594 rally_tests:
595 run_tempest: false
596 clean_up: |
597 FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
598 if [ -n "$FLAVORS" ]; then
599 echo $FLAVORS | xargs openstack flavor delete
600 fi
601 SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
602 if [ -n "$SERVERS" ]; then
603 echo $SERVERS | xargs openstack server delete
604 fi
605 IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
606 if [ -n "$IMAGES" ]; then
607 echo $IMAGES | xargs openstack image delete
608 fi
609 tests:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500610 NovaAggregates.create_and_get_aggregate_details:
611 - args:
612 availability_zone: nova
613 runner:
614 concurrency: 1
615 times: 1
616 type: constant
617 sla:
618 failure_rate:
619 max: 0
620 NovaAggregates.create_and_update_aggregate:
621 - args:
622 availability_zone: nova
623 runner:
624 concurrency: 1
625 times: 1
626 type: constant
627 sla:
628 failure_rate:
629 max: 0
630 NovaAggregates.list_aggregates:
631 - runner:
632 concurrency: 1
633 times: 1
634 type: constant
635 sla:
636 failure_rate:
637 max: 0
638 NovaAvailabilityZones.list_availability_zones:
639 - args:
640 detailed: true
641 runner:
642 concurrency: 1
643 times: 1
644 type: constant
645 sla:
646 failure_rate:
647 max: 0
648 NovaFlavors.create_and_delete_flavor:
649 - args:
650 disk: 1
651 ram: 500
652 vcpus: 1
653 runner:
654 concurrency: 1
655 times: 1
656 type: constant
657 sla:
658 failure_rate:
659 max: 0
660 NovaFlavors.create_and_list_flavor_access:
661 - args:
662 disk: 1
663 ram: 500
664 vcpus: 1
665 runner:
666 concurrency: 1
667 times: 1
668 type: constant
669 sla:
670 failure_rate:
671 max: 0
672 NovaFlavors.create_flavor:
673 - args:
674 disk: 1
675 ram: 500
676 vcpus: 1
677 runner:
678 concurrency: 1
679 times: 1
680 type: constant
681 sla:
682 failure_rate:
683 max: 0
684 NovaFlavors.create_flavor_and_add_tenant_access:
685 - args:
686 disk: 1
687 ram: 500
688 vcpus: 1
689 runner:
690 concurrency: 1
691 times: 1
692 type: constant
693 sla:
694 failure_rate:
695 max: 0
696 NovaFlavors.create_flavor_and_set_keys:
697 - args:
698 disk: 1
699 extra_specs:
700 'quota:disk_read_bytes_sec': 10240
701 ram: 500
702 vcpus: 1
703 runner:
704 concurrency: 1
705 times: 1
706 type: constant
707 sla:
708 failure_rate:
709 max: 0
710 NovaFlavors.list_flavors:
711 - args:
712 detailed: true
713 runner:
714 concurrency: 1
715 times: 1
716 type: constant
717 sla:
718 failure_rate:
719 max: 0
720 NovaHypervisors.list_and_get_hypervisors:
721 - args:
722 detailed: true
723 runner:
724 concurrency: 1
725 times: 1
726 type: constant
727 sla:
728 failure_rate:
729 max: 0
730 NovaHypervisors.list_and_get_uptime_hypervisors:
731 - args:
732 detailed: true
733 runner:
734 concurrency: 1
735 times: 1
736 type: constant
737 sla:
738 failure_rate:
739 max: 0
740 NovaHypervisors.list_and_search_hypervisors:
741 - args:
742 detailed: true
743 runner:
744 concurrency: 1
745 times: 1
746 type: constant
747 sla:
748 failure_rate:
749 max: 0
750 NovaHypervisors.list_hypervisors:
751 - args:
752 detailed: true
753 runner:
754 concurrency: 1
755 times: 1
756 type: constant
757 sla:
758 failure_rate:
759 max: 0
760 NovaHypervisors.statistics_hypervisors:
761 - args: {}
762 runner:
763 concurrency: 1
764 times: 1
765 type: constant
766 sla:
767 failure_rate:
768 max: 0
769 NovaKeypair.create_and_delete_keypair:
770 - runner:
771 concurrency: 1
772 times: 1
773 type: constant
774 sla:
775 failure_rate:
776 max: 0
777 NovaKeypair.create_and_list_keypairs:
778 - runner:
779 concurrency: 1
780 times: 1
781 type: constant
782 sla:
783 failure_rate:
784 max: 0
785 NovaServerGroups.create_and_list_server_groups:
786 - args:
787 all_projects: false
788 kwargs:
789 policies:
790 - affinity
791 runner:
792 concurrency: 1
793 times: 1
794 type: constant
795 sla:
796 failure_rate:
797 max: 0
798 NovaServices.list_services:
799 - runner:
800 concurrency: 1
801 times: 1
802 type: constant
803 sla:
804 failure_rate:
805 max: 0
okozachenko120317930d42023-09-06 00:24:05 +1000806 paste:
807 composite:metadata:
808 use: egg:Paste#urlmap
809 /: meta
810 pipeline:meta:
811 pipeline: cors metaapp
812 app:metaapp:
813 paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory
814 composite:osapi_compute:
815 use: call:nova.api.openstack.urlmap:urlmap_factory
816 /: oscomputeversions
817 /v2: openstack_compute_api_v21_legacy_v2_compatible
818 /v2.1: openstack_compute_api_v21
819 composite:openstack_compute_api_v21:
820 use: call:nova.api.auth:pipeline_factory_v21
821 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
822 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21
823 composite:openstack_compute_api_v21_legacy_v2_compatible:
824 use: call:nova.api.auth:pipeline_factory_v21
825 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
826 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
827 filter:request_id:
828 paste.filter_factory: oslo_middleware:RequestId.factory
829 filter:compute_req_id:
830 paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory
831 filter:faultwrap:
832 paste.filter_factory: nova.api.openstack:FaultWrapper.factory
833 filter:noauth2:
834 paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory
835 filter:sizelimit:
836 paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory
837 filter:http_proxy_to_wsgi:
838 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
839 filter:legacy_v2_compatible:
840 paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory
841 app:osapi_compute_app_v21:
842 paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory
843 pipeline:oscomputeversions:
844 pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp
845 app:oscomputeversionapp:
846 paste.app_factory: nova.api.openstack.compute.versions:Versions.factory
847 filter:cors:
848 paste.filter_factory: oslo_middleware.cors:filter_factory
849 oslo_config_project: nova
850 filter:keystonecontext:
851 paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory
852 filter:authtoken:
853 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
854 filter:audit:
855 paste.filter_factory: keystonemiddleware.audit:filter_factory
856 audit_map_file: /etc/nova/api_audit_map.conf
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500857 policy: {}
858 nova_sudoers: |
859 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
860 Defaults !requiretty
861 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
862 nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
863 api_audit_map:
864 DEFAULT:
865 target_endpoint_type: None
866 custom_actions:
867 enable: enable
868 disable: disable
869 delete: delete
870 startup: start/startup
871 shutdown: stop/shutdown
872 reboot: start/reboot
873 os-migrations/get: read
874 os-server-password/post: update
875 path_keywords:
876 add: None
877 action: None
878 enable: None
879 disable: None
880 configure-project: None
881 defaults: None
882 delete: None
883 detail: None
884 diagnostics: None
885 entries: entry
886 extensions: alias
887 flavors: flavor
888 images: image
889 ips: label
890 limits: None
891 metadata: key
892 os-agents: os-agent
893 os-aggregates: os-aggregate
894 os-availability-zone: None
895 os-certificates: None
896 os-cloudpipe: None
897 os-fixed-ips: ip
898 os-extra_specs: key
899 os-flavor-access: None
900 os-floating-ip-dns: domain
901 os-floating-ips-bulk: host
902 os-floating-ip-pools: None
903 os-floating-ips: floating-ip
904 os-hosts: host
905 os-hypervisors: hypervisor
906 os-instance-actions: instance-action
907 os-keypairs: keypair
908 os-migrations: None
909 os-networks: network
910 os-quota-sets: tenant
911 os-security-groups: security_group
912 os-security-group-rules: rule
913 os-server-password: None
914 os-services: None
915 os-simple-tenant-usage: tenant
916 os-virtual-interfaces: None
917 os-volume_attachments: attachment
918 os-volumes_boot: None
919 os-volumes: volume
920 os-volume-types: volume-type
921 os-snapshots: snapshot
922 reboot: None
923 servers: server
924 shutdown: None
925 startup: None
926 statistics: None
927 service_endpoints:
928 compute: service/compute
929 rootwrap: |
930 # Configuration for nova-rootwrap
931 # This file should be owned by (and only-writeable by) the root user
932
933 [DEFAULT]
934 # List of directories to load filter definitions from (separated by ',').
935 # These directories MUST all be only writeable by root !
936 filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
937
938 # List of directories to search executables in, in case filters do not
939 # explicitely specify a full path (separated by ',')
940 # If not specified, defaults to system PATH environment variable.
941 # These directories MUST all be only writeable by root !
942 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
943
944 # Enable logging to syslog
945 # Default value is False
946 use_syslog=False
947
948 # Which syslog facility to use.
949 # Valid values include auth, authpriv, syslog, local0, local1...
950 # Default value is 'syslog'
951 syslog_log_facility=syslog
952
953 # Which messages to log.
954 # INFO means log all usage
955 # ERROR means only log unsuccessful attempts
956 syslog_log_level=ERROR
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500957 rootwrap_filters:
958 api_metadata:
959 pods:
960 - metadata
961 content: |
962 # nova-rootwrap command filters for api-metadata nodes
963 # This is needed on nova-api hosts running with "metadata" in enabled_apis
964 # or when running nova-api-metadata
965 # This file should be owned by (and only-writeable by) the root user
966
967 [Filters]
968 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
969 iptables-save: CommandFilter, iptables-save, root
970 ip6tables-save: CommandFilter, ip6tables-save, root
971
972 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
973 iptables-restore: CommandFilter, iptables-restore, root
974 ip6tables-restore: CommandFilter, ip6tables-restore, root
975 compute:
976 pods:
977 - compute
978 content: |
979 # nova-rootwrap command filters for compute nodes
980 # This file should be owned by (and only-writeable by) the root user
981
982 [Filters]
983 # nova/virt/disk/mount/api.py: 'kpartx', '-a', device
984 # nova/virt/disk/mount/api.py: 'kpartx', '-d', device
985 kpartx: CommandFilter, kpartx, root
986
987 # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
988 # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
989 tune2fs: CommandFilter, tune2fs, root
990
991 # nova/virt/disk/mount/api.py: 'mount', mapped_device
992 # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
993 # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
994 # nova/virt/configdrive.py: 'mount', device, mountdir
995 # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
996 mount: CommandFilter, mount, root
997
998 # nova/virt/disk/mount/api.py: 'umount', mapped_device
999 # nova/virt/disk/api.py: 'umount' target
1000 # nova/virt/xenapi/vm_utils.py: 'umount', dev_path
1001 # nova/virt/configdrive.py: 'umount', mountdir
1002 umount: CommandFilter, umount, root
1003
1004 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
1005 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
1006 qemu-nbd: CommandFilter, qemu-nbd, root
1007
1008 # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
1009 # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
1010 losetup: CommandFilter, losetup, root
1011
1012 # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
1013 blkid: CommandFilter, blkid, root
1014
1015 # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
1016 # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
1017 blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
1018
1019 # nova/virt/disk/vfs/localfs.py: 'tee', canonpath
1020 tee: CommandFilter, tee, root
1021
1022 # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
1023 mkdir: CommandFilter, mkdir, root
1024
1025 # nova/virt/disk/vfs/localfs.py: 'chown'
1026 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1027 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1028 # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
1029 chown: CommandFilter, chown, root
1030
1031 # nova/virt/disk/vfs/localfs.py: 'chmod'
1032 chmod: CommandFilter, chmod, root
1033
1034 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1035 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1036 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1037 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1038 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1039 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1040 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1041 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1042 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1043 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1044 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1045 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1046 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1047 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1048 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1049 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1050 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1051 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1052 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1053 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1054 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1055 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1056 ip: CommandFilter, ip, root
1057
1058 # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
1059 # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
1060 tunctl: CommandFilter, tunctl, root
1061
1062 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1063 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1064 # nova/network/linux_net.py: 'ovs-vsctl', ....
1065 ovs-vsctl: CommandFilter, ovs-vsctl, root
1066
1067 # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
1068 vrouter-port-control: CommandFilter, vrouter-port-control, root
1069
1070 # nova/virt/libvirt/vif.py: 'ebrctl', ...
1071 ebrctl: CommandFilter, ebrctl, root
1072
1073 # nova/virt/libvirt/vif.py: 'mm-ctl', ...
1074 mm-ctl: CommandFilter, mm-ctl, root
1075
1076 # nova/network/linux_net.py: 'ovs-ofctl', ....
1077 ovs-ofctl: CommandFilter, ovs-ofctl, root
1078
1079 # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
1080 dd: CommandFilter, dd, root
1081
1082 # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
1083 iscsiadm: CommandFilter, iscsiadm, root
1084
1085 # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
1086 # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
1087 aoe-revalidate: CommandFilter, aoe-revalidate, root
1088 aoe-discover: CommandFilter, aoe-discover, root
1089
1090 # nova/virt/xenapi/vm_utils.py: parted, --script, ...
1091 # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
1092 parted: CommandFilter, parted, root
1093
1094 # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
1095 pygrub: CommandFilter, pygrub, root
1096
1097 # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
1098 fdisk: CommandFilter, fdisk, root
1099
1100 # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
1101 # nova/virt/disk/api.py: e2fsck, -f, -p, image
1102 e2fsck: CommandFilter, e2fsck, root
1103
1104 # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
1105 # nova/virt/disk/api.py: resize2fs, image
1106 resize2fs: CommandFilter, resize2fs, root
1107
1108 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1109 iptables-save: CommandFilter, iptables-save, root
1110 ip6tables-save: CommandFilter, ip6tables-save, root
1111
1112 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1113 iptables-restore: CommandFilter, iptables-restore, root
1114 ip6tables-restore: CommandFilter, ip6tables-restore, root
1115
1116 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1117 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1118 arping: CommandFilter, arping, root
1119
1120 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1121 dhcp_release: CommandFilter, dhcp_release, root
1122
1123 # nova/network/linux_net.py: 'kill', '-9', pid
1124 # nova/network/linux_net.py: 'kill', '-HUP', pid
1125 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1126
1127 # nova/network/linux_net.py: 'kill', pid
1128 kill_radvd: KillFilter, root, /usr/sbin/radvd
1129
1130 # nova/network/linux_net.py: dnsmasq call
1131 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1132
1133 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1134 radvd: CommandFilter, radvd, root
1135
1136 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1137 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1138 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1139 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1140 brctl: CommandFilter, brctl, root
1141
1142 # nova/virt/libvirt/utils.py: 'mkswap'
1143 # nova/virt/xenapi/vm_utils.py: 'mkswap'
1144 mkswap: CommandFilter, mkswap, root
1145
1146 # nova/virt/libvirt/utils.py: 'nova-idmapshift'
1147 nova-idmapshift: CommandFilter, nova-idmapshift, root
1148
1149 # nova/virt/xenapi/vm_utils.py: 'mkfs'
1150 # nova/utils.py: 'mkfs', fs, path, label
1151 mkfs: CommandFilter, mkfs, root
1152
1153 # nova/virt/libvirt/utils.py: 'qemu-img'
1154 qemu-img: CommandFilter, qemu-img, root
1155
1156 # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
1157 readlink: CommandFilter, readlink, root
1158
1159 # nova/virt/disk/api.py:
1160 mkfs.ext3: CommandFilter, mkfs.ext3, root
1161 mkfs.ext4: CommandFilter, mkfs.ext4, root
1162 mkfs.ntfs: CommandFilter, mkfs.ntfs, root
1163
1164 # nova/virt/libvirt/connection.py:
1165 lvremove: CommandFilter, lvremove, root
1166
1167 # nova/virt/libvirt/utils.py:
1168 lvcreate: CommandFilter, lvcreate, root
1169
1170 # nova/virt/libvirt/utils.py:
1171 lvs: CommandFilter, lvs, root
1172
1173 # nova/virt/libvirt/utils.py:
1174 vgs: CommandFilter, vgs, root
1175
1176 # nova/utils.py:read_file_as_root: 'cat', file_path
1177 # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
1178 read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
1179 read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
1180
1181 # os-brick needed commands
1182 read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
1183 multipath: CommandFilter, multipath, root
1184 # multipathd show status
1185 multipathd: CommandFilter, multipathd, root
1186 systool: CommandFilter, systool, root
1187 vgc-cluster: CommandFilter, vgc-cluster, root
1188 # os_brick/initiator/connector.py
1189 drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
1190
1191 # TODO(smcginnis) Temporary fix.
1192 # Need to pull in os-brick os-brick.filters file instead and clean
1193 # out stale brick values from this file.
1194 scsi_id: CommandFilter, /lib/udev/scsi_id, root
1195 # os_brick.privileged.default oslo.privsep context
1196 # This line ties the superuser privs with the config files, context name,
1197 # and (implicitly) the actual python code invoked.
1198 privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
1199
1200 # nova/storage/linuxscsi.py: sg_scan device
1201 sg_scan: CommandFilter, sg_scan, root
1202
1203 # nova/volume/encryptors/cryptsetup.py:
1204 # nova/volume/encryptors/luks.py:
1205 ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+
1206
1207 # nova/volume/encryptors.py:
1208 # nova/virt/libvirt/dmcrypt.py:
1209 cryptsetup: CommandFilter, cryptsetup, root
1210
1211 # nova/virt/xenapi/vm_utils.py:
1212 xenstore-read: CommandFilter, xenstore-read, root
1213
1214 # nova/virt/libvirt/utils.py:
1215 rbd: CommandFilter, rbd, root
1216
1217 # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
1218 shred: CommandFilter, shred, root
1219
1220 # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
1221 cp: CommandFilter, cp, root
1222
1223 # nova/virt/xenapi/vm_utils.py:
1224 sync: CommandFilter, sync, root
1225
1226 # nova/virt/libvirt/imagebackend.py:
1227 ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*
1228 prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*
1229
1230 # nova/virt/libvirt/utils.py: 'xend', 'status'
1231 xend: CommandFilter, xend, root
1232
1233 # nova/virt/libvirt/utils.py:
1234 touch: CommandFilter, touch, root
1235
1236 # nova/virt/libvirt/volume/vzstorage.py
1237 pstorage-mount: CommandFilter, pstorage-mount, root
1238 network:
1239 pods:
1240 - compute
1241 content: |
1242 # nova-rootwrap command filters for network nodes
1243 # This file should be owned by (and only-writeable by) the root user
1244
1245 [Filters]
1246 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1247 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1248 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1249 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1250 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1251 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1252 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1253 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1254 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1255 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1256 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1257 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1258 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1259 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1260 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1261 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1262 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1263 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1264 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1265 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1266 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1267 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1268 ip: CommandFilter, ip, root
1269
1270 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1271 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1272 # nova/network/linux_net.py: 'ovs-vsctl', ....
1273 ovs-vsctl: CommandFilter, ovs-vsctl, root
1274
1275 # nova/network/linux_net.py: 'ovs-ofctl', ....
1276 ovs-ofctl: CommandFilter, ovs-ofctl, root
1277
1278 # nova/virt/libvirt/vif.py: 'ivs-ctl', ...
1279 # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
1280 # nova/network/linux_net.py: 'ivs-ctl', ....
1281 ivs-ctl: CommandFilter, ivs-ctl, root
1282
1283 # nova/virt/libvirt/vif.py: 'ifc_ctl', ...
1284 ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root
1285
1286 # nova/network/linux_net.py: 'ebtables', '-D' ...
1287 # nova/network/linux_net.py: 'ebtables', '-I' ...
1288 ebtables: CommandFilter, ebtables, root
1289 ebtables_usr: CommandFilter, ebtables, root
1290
1291 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1292 iptables-save: CommandFilter, iptables-save, root
1293 ip6tables-save: CommandFilter, ip6tables-save, root
1294
1295 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1296 iptables-restore: CommandFilter, iptables-restore, root
1297 ip6tables-restore: CommandFilter, ip6tables-restore, root
1298
1299 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1300 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1301 arping: CommandFilter, arping, root
1302
1303 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1304 dhcp_release: CommandFilter, dhcp_release, root
1305
1306 # nova/network/linux_net.py: 'kill', '-9', pid
1307 # nova/network/linux_net.py: 'kill', '-HUP', pid
1308 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1309
1310 # nova/network/linux_net.py: 'kill', pid
1311 kill_radvd: KillFilter, root, /usr/sbin/radvd
1312
1313 # nova/network/linux_net.py: dnsmasq call
1314 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1315
1316 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1317 radvd: CommandFilter, radvd, root
1318
1319 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1320 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1321 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1322 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1323 brctl: CommandFilter, brctl, root
1324
1325 # nova/network/linux_net.py: 'sysctl', ....
1326 sysctl: CommandFilter, sysctl, root
1327
1328 # nova/network/linux_net.py: 'conntrack'
1329 conntrack: CommandFilter, conntrack, root
1330
1331 # nova/network/linux_net.py: 'fp-vdev'
1332 fp-vdev: CommandFilter, fp-vdev, root
1333 nova_ironic:
1334 DEFAULT:
1335 scheduler_host_manager: ironic_host_manager
1336 compute_driver: ironic.IronicDriver
1337 ram_allocation_ratio: 1.0
1338 cpu_allocation_ratio: 1.0
1339 reserved_host_memory_mb: 0
1340 libvirt:
1341 # Get the IP address to be used as the target for live migration traffic using interface name.
1342 # If this option is set to None, the hostname of the migration target compute node will be used.
1343 live_migration_interface:
1344 hypervisor:
1345 # my_ip can be set automatically through this interface name.
1346 host_interface:
1347 # This list is the keys to exclude from the config file ingested by nova-compute
1348 nova_compute_redactions:
1349 - database
1350 - api_database
1351 - cell0_database
1352 nova:
1353 DEFAULT:
1354 log_config_append: /etc/nova/logging.conf
1355 default_ephemeral_format: ext4
1356 ram_allocation_ratio: 1.0
1357 disk_allocation_ratio: 1.0
1358 cpu_allocation_ratio: 3.0
1359 state_path: /var/lib/nova
1360 osapi_compute_listen: 0.0.0.0
1361 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1362 # via the endpoints section.
1363 osapi_compute_listen_port: null
1364 osapi_compute_workers: 1
1365 metadata_workers: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001366 compute_driver: libvirt.LibvirtDriver
1367 my_ip: 0.0.0.0
1368 instance_usage_audit: True
1369 instance_usage_audit_period: hour
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001370 resume_guests_state_on_host_boot: True
1371 vnc:
Mohammed Naser56484d72023-07-10 17:08:26 -04001372 auth_schemes: none
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001373 novncproxy_host: 0.0.0.0
Mohammed Nasere4c14ad2023-03-24 19:50:39 +00001374 server_listen: 0.0.0.0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001375 # This would be set by each compute nodes's ip
1376 # server_proxyclient_address: 127.0.0.1
1377 spice:
1378 html5proxy_host: 0.0.0.0
1379 server_listen: 0.0.0.0
1380 # This would be set by each compute nodes's ip
1381 # server_proxyclient_address: 127.0.0.1
1382 conductor:
1383 workers: 1
1384 oslo_policy:
1385 policy_file: /etc/nova/policy.yaml
1386 oslo_concurrency:
1387 lock_path: /var/lib/nova/tmp
1388 oslo_middleware:
1389 enable_proxy_headers_parsing: true
1390 glance:
1391 num_retries: 3
1392 ironic:
1393 api_endpoint: null
1394 auth_url: null
1395 neutron:
1396 metadata_proxy_shared_secret: "password"
1397 service_metadata_proxy: True
1398 auth_type: password
1399 auth_version: v3
okozachenko1203567fc082023-08-21 22:50:02 +10001400 cinder:
1401 catalog_info: volumev3::internalURL
1402 auth_url: null
1403 auth_type: password
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001404 database:
1405 max_retries: -1
1406 api_database:
1407 max_retries: -1
1408 cell0_database:
1409 max_retries: -1
1410 keystone_authtoken:
okozachenko1203567fc082023-08-21 22:50:02 +10001411 service_token_roles: service
1412 service_token_roles_required: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001413 auth_type: password
1414 auth_version: v3
1415 memcache_security_strategy: ENCRYPT
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001416 service_type: compute
Mohammed Naser0a13cee2023-03-02 11:28:29 +01001417 notifications:
1418 notify_on_state_change: vm_and_task_state
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001419 service_user:
1420 auth_type: password
okozachenko1203567fc082023-08-21 22:50:02 +10001421 send_service_user_token: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001422 libvirt:
1423 connection_uri: "qemu+unix:///system?socket=/run/libvirt/libvirt-sock"
1424 images_type: qcow2
1425 images_rbd_pool: vms
1426 images_rbd_ceph_conf: /etc/ceph/ceph.conf
1427 rbd_user: cinder
1428 rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
1429 disk_cachemodes: "network=writeback"
1430 hw_disk_discard: unmap
1431 upgrade_levels:
1432 compute: auto
1433 cache:
1434 enabled: true
1435 backend: dogpile.cache.memcached
1436 wsgi:
1437 api_paste_config: /etc/nova/api-paste.ini
1438 oslo_messaging_notifications:
1439 driver: messagingv2
1440 oslo_messaging_rabbit:
1441 rabbit_ha_queues: true
1442 placement:
1443 auth_type: password
1444 auth_version: v3
1445 logging:
1446 loggers:
1447 keys:
1448 - root
1449 - nova
1450 - os.brick
1451 handlers:
1452 keys:
1453 - stdout
1454 - stderr
1455 - "null"
1456 formatters:
1457 keys:
1458 - context
1459 - default
1460 logger_root:
1461 level: WARNING
1462 handlers: 'null'
1463 logger_nova:
1464 level: INFO
1465 handlers:
1466 - stdout
1467 qualname: nova
1468 logger_os.brick:
1469 level: INFO
1470 handlers:
1471 - stdout
1472 qualname: os.brick
1473 logger_amqp:
1474 level: WARNING
1475 handlers: stderr
1476 qualname: amqp
1477 logger_amqplib:
1478 level: WARNING
1479 handlers: stderr
1480 qualname: amqplib
1481 logger_eventletwsgi:
1482 level: WARNING
1483 handlers: stderr
1484 qualname: eventlet.wsgi.server
1485 logger_sqlalchemy:
1486 level: WARNING
1487 handlers: stderr
1488 qualname: sqlalchemy
1489 logger_boto:
1490 level: WARNING
1491 handlers: stderr
1492 qualname: boto
1493 handler_null:
1494 class: logging.NullHandler
1495 formatter: default
1496 args: ()
1497 handler_stdout:
1498 class: StreamHandler
1499 args: (sys.stdout,)
1500 formatter: context
1501 handler_stderr:
1502 class: StreamHandler
1503 args: (sys.stderr,)
1504 formatter: context
1505 formatter_context:
1506 class: oslo_log.formatters.ContextFormatter
1507 datefmt: "%Y-%m-%d %H:%M:%S"
1508 formatter_default:
1509 format: "%(message)s"
1510 datefmt: "%Y-%m-%d %H:%M:%S"
1511 rabbitmq:
1512 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1513 policies:
1514 - vhost: "nova"
1515 name: "ha_ttl_nova"
1516 definition:
1517 # mirror messges to other nodes in rmq cluster
1518 ha-mode: "all"
1519 ha-sync-mode: "automatic"
1520 # 70s
1521 message-ttl: 70000
1522 priority: 0
1523 apply-to: all
1524 pattern: '^(?!(amq\.|reply_)).*'
1525 enable_iscsi: false
1526 archive_deleted_rows:
1527 purge_deleted_rows: false
1528 until_completion: true
1529 all_cells: false
1530 max_rows:
1531 enabled: False
1532 rows: 1000
1533 before:
1534 enabled: false
1535 date: 'nil'
Mohammed Naser69247252023-09-26 22:23:46 -04001536 nova_api_uwsgi:
1537 uwsgi:
1538 add-header: "Connection: close"
1539 buffer-size: 65535
1540 die-on-term: true
1541 enable-threads: true
1542 exit-on-reload: false
1543 hook-master-start: unix_signal:15 gracefully_kill_them_all
1544 lazy-apps: true
1545 log-x-forwarded-for: true
1546 master: true
1547 procname-prefix-spaced: "nova-api:"
1548 route-user-agent: '^kube-probe.* donotlog:'
1549 thunder-lock: true
1550 worker-reload-mercy: 80
1551 wsgi-file: /var/lib/openstack/bin/nova-api-wsgi
1552 nova_metadata_uwsgi:
1553 uwsgi:
1554 add-header: "Connection: close"
1555 buffer-size: 65535
1556 die-on-term: true
1557 enable-threads: true
1558 exit-on-reload: false
1559 hook-master-start: unix_signal:15 gracefully_kill_them_all
1560 lazy-apps: true
1561 log-x-forwarded-for: true
1562 master: true
1563 procname-prefix-spaced: "nova-metadata:"
1564 route-user-agent: '^kube-probe.* donotlog:'
1565 thunder-lock: true
1566 worker-reload-mercy: 80
1567 wsgi-file: /var/lib/openstack/bin/nova-metadata-wsgi
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001568
1569# Names of secrets used by bootstrap and environmental checks
1570secrets:
1571 identity:
1572 admin: nova-keystone-admin
1573 nova: nova-keystone-user
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001574 test: nova-keystone-test
1575 oslo_db:
1576 admin: nova-db-admin
1577 nova: nova-db-user
1578 oslo_db_api:
1579 admin: nova-db-api-admin
1580 nova: nova-db-api-user
1581 oslo_db_cell0:
1582 admin: nova-db-cell0-admin
1583 nova: nova-db-cell0-user
1584 oslo_messaging:
1585 admin: nova-rabbitmq-admin
1586 nova: nova-rabbitmq-user
1587 tls:
1588 compute:
1589 osapi:
1590 public: nova-tls-public
1591 internal: nova-tls-api
1592 compute_novnc_proxy:
1593 novncproxy:
1594 public: nova-novncproxy-tls-public
1595 internal: nova-novncproxy-tls-proxy
okozachenko1203ea639e72023-08-30 23:25:38 +10001596 vencrypt:
1597 internal: nova-novncproxy-vencrypt
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001598 compute_metadata:
1599 metadata:
1600 public: metadata-tls-public
1601 internal: metadata-tls-metadata
1602 compute_spice_proxy:
1603 spiceproxy:
1604 internal: nova-tls-spiceproxy
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001605 oci_image_registry:
1606 nova: nova-oci-image-registry
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001607
1608# typically overridden by environmental
1609# values, but should include all endpoints
1610# required by this chart
1611endpoints:
1612 cluster_domain_suffix: cluster.local
1613 local_image_registry:
1614 name: docker-registry
1615 namespace: docker-registry
1616 hosts:
1617 default: localhost
1618 internal: docker-registry
1619 node: localhost
1620 host_fqdn_override:
1621 default: null
1622 port:
1623 registry:
1624 node: 5000
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001625 oci_image_registry:
1626 name: oci-image-registry
1627 namespace: oci-image-registry
1628 auth:
1629 enabled: false
1630 nova:
1631 username: nova
1632 password: password
1633 hosts:
1634 default: localhost
1635 host_fqdn_override:
1636 default: null
1637 port:
1638 registry:
1639 default: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001640 oslo_db:
1641 auth:
1642 admin:
1643 username: root
1644 password: password
1645 secret:
1646 tls:
1647 internal: mariadb-tls-direct
1648 nova:
1649 username: nova
1650 password: password
1651 hosts:
1652 default: mariadb
1653 host_fqdn_override:
1654 default: null
1655 path: /nova
1656 scheme: mysql+pymysql
1657 port:
1658 mysql:
1659 default: 3306
1660 oslo_db_api:
1661 auth:
1662 admin:
1663 username: root
1664 password: password
1665 nova:
1666 username: nova
1667 password: password
1668 hosts:
1669 default: mariadb
1670 host_fqdn_override:
1671 default: null
1672 path: /nova_api
1673 scheme: mysql+pymysql
1674 port:
1675 mysql:
1676 default: 3306
1677 oslo_db_cell0:
1678 auth:
1679 admin:
1680 username: root
1681 password: password
1682 nova:
1683 username: nova
1684 password: password
1685 hosts:
1686 default: mariadb
1687 host_fqdn_override:
1688 default: null
1689 path: /nova_cell0
1690 scheme: mysql+pymysql
1691 port:
1692 mysql:
1693 default: 3306
1694 oslo_messaging:
1695 auth:
1696 admin:
1697 username: rabbitmq
1698 password: password
1699 secret:
1700 tls:
1701 internal: rabbitmq-tls-direct
1702 nova:
1703 username: nova
1704 password: password
1705 statefulset:
1706 replicas: 2
1707 name: rabbitmq-rabbitmq
1708 hosts:
1709 default: rabbitmq
1710 host_fqdn_override:
1711 default: null
1712 path: /nova
1713 scheme: rabbit
1714 port:
1715 amqp:
1716 default: 5672
1717 http:
1718 default: 15672
1719 oslo_cache:
1720 auth:
1721 # NOTE(portdirect): this is used to define the value for keystone
1722 # authtoken cache encryption key, if not set it will be populated
1723 # automatically with a random value, but to take advantage of
1724 # this feature all services should be set to use the same key,
1725 # and memcache service.
1726 memcache_secret_key: null
1727 hosts:
1728 default: memcached
1729 host_fqdn_override:
1730 default: null
1731 port:
1732 memcache:
1733 default: 11211
1734 identity:
1735 name: keystone
1736 auth:
1737 admin:
1738 region_name: RegionOne
1739 username: admin
1740 password: password
1741 project_name: admin
1742 user_domain_name: default
1743 project_domain_name: default
1744 nova:
okozachenko1203567fc082023-08-21 22:50:02 +10001745 role: admin,service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001746 region_name: RegionOne
1747 username: nova
1748 password: password
1749 project_name: service
1750 user_domain_name: service
1751 project_domain_name: service
1752 # NOTE(portdirect): the neutron user is not managed by the nova chart
1753 # these values should match those set in the neutron chart.
1754 neutron:
1755 region_name: RegionOne
1756 project_name: service
1757 user_domain_name: service
1758 project_domain_name: service
1759 username: neutron
1760 password: password
1761 # NOTE(portdirect): the ironic user is not managed by the nova chart
1762 # these values should match those set in the ironic chart.
1763 ironic:
1764 auth_type: password
1765 auth_version: v3
1766 region_name: RegionOne
1767 project_name: service
1768 user_domain_name: service
1769 project_domain_name: service
1770 username: ironic
1771 password: password
1772 placement:
1773 role: admin
1774 region_name: RegionOne
1775 username: placement
1776 password: password
1777 project_name: service
1778 user_domain_name: service
1779 project_domain_name: service
okozachenko1203567fc082023-08-21 22:50:02 +10001780 cinder:
1781 role: admin,service
1782 region_name: RegionOne
1783 username: cinder
1784 password: password
1785 project_name: service
1786 user_domain_name: service
1787 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001788 test:
1789 role: admin
1790 region_name: RegionOne
1791 username: nova-test
1792 password: password
1793 project_name: test
1794 user_domain_name: service
1795 project_domain_name: service
1796 hosts:
1797 default: keystone
1798 internal: keystone-api
1799 host_fqdn_override:
1800 default: null
1801 path:
1802 default: /v3
1803 scheme:
1804 default: http
1805 port:
1806 api:
1807 default: 80
1808 internal: 5000
1809 image:
1810 name: glance
1811 hosts:
1812 default: glance-api
1813 public: glance
1814 host_fqdn_override:
1815 default: null
1816 path:
1817 default: null
1818 scheme:
1819 default: http
1820 port:
1821 api:
1822 default: 9292
1823 public: 80
1824 compute:
1825 name: nova
1826 hosts:
1827 default: nova-api
1828 public: nova
1829 host_fqdn_override:
1830 default: null
1831 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1832 # endpoints using the following format:
1833 # public:
1834 # host: null
1835 # tls:
1836 # crt: null
1837 # key: null
1838 path:
1839 default: "/v2.1/%(tenant_id)s"
1840 scheme:
1841 default: 'http'
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001842 service: 'http'
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001843 port:
1844 api:
1845 default: 8774
1846 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001847 service: 8774
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001848 novncproxy:
1849 default: 6080
1850 compute_metadata:
1851 name: nova
1852 ip:
1853 # IF blank, set clusterIP and metadata_host dynamically
1854 ingress: null
1855 hosts:
1856 default: nova-metadata
1857 public: metadata
1858 host_fqdn_override:
1859 default: null
1860 path:
1861 default: /
1862 scheme:
1863 default: 'http'
1864 port:
1865 metadata:
1866 default: 8775
1867 public: 80
1868 compute_novnc_proxy:
1869 name: nova
1870 hosts:
1871 default: nova-novncproxy
1872 public: novncproxy
1873 host_fqdn_override:
1874 default: null
1875 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1876 # endpoints using the following format:
1877 # public:
1878 # host: null
1879 # tls:
1880 # crt: null
1881 # key: null
1882 path:
1883 default: /vnc_auto.html
1884 scheme:
1885 default: 'http'
1886 port:
1887 novnc_proxy:
1888 default: 6080
1889 public: 80
okozachenko1203ea639e72023-08-30 23:25:38 +10001890 # This endpoint is only to allow configuring the cert used specifically for
1891 # vencrypt. Specifically, the same CA/issuer needs to be used to sign both
1892 # this cert, and the libvirt/qemu certs.
1893 compute_novnc_vencrypt:
1894 hosts:
1895 default: nova-novncproxy
1896 host_fqdn_override:
1897 default:
1898 commonName: nova-novncproxy
1899 usages:
1900 - client auth
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001901 compute_spice_proxy:
1902 name: nova
1903 hosts:
1904 default: nova-spiceproxy
1905 public: placement
1906 host_fqdn_override:
1907 default: null
1908 path:
1909 default: /spice_auto.html
1910 scheme:
1911 default: 'http'
1912 port:
1913 spice_proxy:
1914 default: 6082
1915 placement:
1916 name: placement
1917 hosts:
1918 default: placement-api
1919 public: placement
1920 host_fqdn_override:
1921 default: null
1922 path:
1923 default: /
1924 scheme:
1925 default: 'http'
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001926 service: 'http'
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001927 port:
1928 api:
1929 default: 8778
1930 public: 80
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001931 service: 8778
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001932 network:
1933 name: neutron
1934 hosts:
1935 default: neutron-server
1936 public: neutron
1937 host_fqdn_override:
1938 default: null
1939 path:
1940 default: null
1941 scheme:
1942 default: 'http'
1943 port:
1944 api:
1945 default: 9696
1946 public: 80
1947 baremetal:
1948 name: ironic
1949 hosts:
1950 default: ironic-api
1951 public: ironic
1952 host_fqdn_override:
1953 default: null
1954 path:
1955 default: null
1956 scheme:
1957 default: http
1958 port:
1959 api:
1960 default: 6385
1961 public: 80
1962 fluentd:
1963 namespace: null
1964 name: fluentd
1965 hosts:
1966 default: fluentd-logging
1967 host_fqdn_override:
1968 default: null
1969 path:
1970 default: null
1971 scheme: 'http'
1972 port:
1973 service:
1974 default: 24224
1975 metrics:
1976 default: 24220
1977 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
1978 # They are using to enable the Egress K8s network policy.
1979 kube_dns:
1980 namespace: kube-system
1981 name: kubernetes-dns
1982 hosts:
1983 default: kube-dns
1984 host_fqdn_override:
1985 default: null
1986 path:
1987 default: null
1988 scheme: http
1989 port:
1990 dns:
1991 default: 53
1992 protocol: UDP
1993 ingress:
1994 namespace: null
1995 name: ingress
1996 hosts:
1997 default: ingress
1998 port:
1999 ingress:
2000 default: 80
2001
2002pod:
2003 probes:
2004 rpc_timeout: 60
2005 rpc_retries: 2
2006 compute:
2007 default:
2008 liveness:
2009 enabled: True
2010 params:
2011 initialDelaySeconds: 120
2012 periodSeconds: 90
2013 timeoutSeconds: 70
2014 readiness:
2015 enabled: True
2016 params:
2017 initialDelaySeconds: 80
2018 periodSeconds: 90
2019 timeoutSeconds: 70
2020 api-metadata:
2021 default:
2022 liveness:
2023 enabled: True
2024 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002025 initialDelaySeconds: 5
2026 periodSeconds: 10
2027 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002028 readiness:
2029 enabled: True
2030 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002031 initialDelaySeconds: 5
2032 periodSeconds: 10
2033 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002034 api-osapi:
2035 default:
2036 liveness:
2037 enabled: True
2038 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002039 initialDelaySeconds: 5
2040 periodSeconds: 10
2041 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002042 readiness:
2043 enabled: True
2044 params:
Mohammed Naser69247252023-09-26 22:23:46 -04002045 initialDelaySeconds: 5
2046 periodSeconds: 10
2047 timeoutSeconds: 5
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002048 conductor:
2049 default:
2050 liveness:
2051 enabled: True
2052 params:
2053 initialDelaySeconds: 120
2054 periodSeconds: 90
2055 timeoutSeconds: 70
2056 readiness:
2057 enabled: True
2058 params:
2059 initialDelaySeconds: 80
2060 periodSeconds: 90
2061 timeoutSeconds: 70
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002062 novncproxy:
2063 default:
2064 liveness:
2065 enabled: True
2066 params:
2067 initialDelaySeconds: 30
2068 periodSeconds: 60
2069 timeoutSeconds: 15
2070 readiness:
2071 enabled: True
2072 params:
2073 initialDelaySeconds: 30
2074 periodSeconds: 60
2075 timeoutSeconds: 15
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002076 scheduler:
2077 default:
2078 liveness:
2079 enabled: True
2080 params:
2081 initialDelaySeconds: 120
2082 periodSeconds: 90
2083 timeoutSeconds: 70
2084 readiness:
2085 enabled: True
2086 params:
2087 initialDelaySeconds: 80
2088 periodSeconds: 90
2089 timeoutSeconds: 70
2090 compute-spice-proxy:
2091 default:
2092 liveness:
2093 enabled: True
2094 params:
2095 initialDelaySeconds: 30
2096 periodSeconds: 60
2097 timeoutSeconds: 15
2098 readiness:
2099 enabled: True
2100 params:
2101 initialDelaySeconds: 30
2102 periodSeconds: 60
2103 timeoutSeconds: 15
2104 security_context:
2105 nova:
2106 pod:
2107 runAsUser: 42424
2108 container:
2109 nova_compute_init:
2110 readOnlyRootFilesystem: true
2111 runAsUser: 0
2112 tungstenfabric_compute_init:
2113 readOnlyRootFilesystem: true
2114 allowPrivilegeEscalation: false
2115 ceph_perms:
2116 readOnlyRootFilesystem: true
2117 runAsUser: 0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002118 nova_compute_vnc_init:
2119 readOnlyRootFilesystem: true
2120 allowPrivilegeEscalation: false
2121 nova_compute_spice_init:
2122 readOnlyRootFilesystem: true
2123 allowPrivilegeEscalation: false
2124 nova_compute:
2125 readOnlyRootFilesystem: true
2126 privileged: true
2127 nova_compute_ssh:
2128 privileged: true
2129 runAsUser: 0
2130 nova_compute_ssh_init:
2131 runAsUser: 0
2132 nova_api_metadata_init:
2133 readOnlyRootFilesystem: true
2134 allowPrivilegeEscalation: false
2135 nova_api:
2136 readOnlyRootFilesystem: true
2137 allowPrivilegeEscalation: false
2138 nova_osapi:
2139 readOnlyRootFilesystem: true
2140 allowPrivilegeEscalation: false
2141 nova_conductor:
2142 readOnlyRootFilesystem: true
2143 allowPrivilegeEscalation: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002144 nova_novncproxy_init:
2145 readOnlyRootFilesystem: true
2146 allowPrivilegeEscalation: false
2147 nova_novncproxy_init_assests:
2148 readOnlyRootFilesystem: true
2149 allowPrivilegeEscalation: false
2150 nova_novncproxy:
2151 readOnlyRootFilesystem: true
2152 allowPrivilegeEscalation: false
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002153 nova_scheduler:
2154 readOnlyRootFilesystem: true
2155 allowPrivilegeEscalation: false
2156 nova_spiceproxy_init:
2157 readOnlyRootFilesystem: true
2158 allowPrivilegeEscalation: false
2159 nova_spiceproxy_init_assets:
2160 readOnlyRootFilesystem: true
2161 allowPrivilegeEscalation: false
2162 nova_spiceproxy:
2163 readOnlyRootFilesystem: true
2164 allowPrivilegeEscalation: false
2165 bootstrap:
2166 pod:
2167 runAsUser: 42424
2168 container:
2169 nova_wait_for_computes_init:
2170 readOnlyRootFilesystem: true
2171 allowPrivilegeEscalation: false
2172 bootstrap:
2173 readOnlyRootFilesystem: true
2174 allowPrivilegeEscalation: false
2175 nova_cell_setup:
2176 pod:
2177 runAsUser: 42424
2178 container:
2179 nova_wait_for_computes_init:
2180 readOnlyRootFilesystem: true
2181 allowPrivilegeEscalation: false
2182 nova_cell_setup_init:
2183 readOnlyRootFilesystem: true
2184 allowPrivilegeEscalation: false
2185 nova_cell_setup:
2186 readOnlyRootFilesystem: true
2187 allowPrivilegeEscalation: false
2188 archive_deleted_rows:
2189 pod:
2190 runAsUser: 42424
2191 container:
2192 nova_archive_deleted_rows_init:
2193 readOnlyRootFilesystem: true
2194 allowPrivilegeEscalation: false
2195 nova_archive_deleted_rows:
2196 readOnlyRootFilesystem: true
2197 allowPrivilegeEscalation: false
2198 cell_setup:
2199 pod:
2200 runAsUser: 42424
2201 container:
2202 nova_cell_setup:
2203 readOnlyRootFilesystem: true
2204 allowPrivilegeEscalation: false
2205 service_cleaner:
2206 pod:
2207 runAsUser: 42424
2208 container:
2209 nova_service_cleaner:
2210 readOnlyRootFilesystem: true
2211 allowPrivilegeEscalation: false
2212 use_fqdn:
2213 # NOTE: If the option "host" is not specified in nova.conf, the host name
2214 # shown in the hypervisor host is defaulted to the short name of the host.
2215 # Setting the option here to true will cause use $(hostname --fqdn) as the
2216 # host name by default. If the short name is desired $(hostname --short),
2217 # set the option to false. Specifying a host in the nova.conf via the conf:
2218 # section will supersede the value of this option.
2219 compute: true
2220 affinity:
2221 anti:
2222 type:
2223 default: preferredDuringSchedulingIgnoredDuringExecution
2224 topologyKey:
2225 default: kubernetes.io/hostname
2226 weight:
2227 default: 10
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002228 tolerations:
2229 nova:
2230 enabled: false
2231 tolerations:
2232 - key: node-role.kubernetes.io/master
2233 operator: Exists
2234 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02002235 - key: node-role.kubernetes.io/control-plane
2236 operator: Exists
2237 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002238 mounts:
2239 nova_compute:
2240 init_container: null
2241 nova_compute:
2242 volumeMounts:
2243 volumes:
2244 nova_compute_ironic:
2245 init_container: null
2246 nova_compute_ironic:
2247 volumeMounts:
2248 volumes:
2249 nova_api_metadata:
2250 init_container: null
2251 nova_api_metadata:
2252 volumeMounts:
2253 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002254 nova_api_osapi:
2255 init_container: null
2256 nova_api_osapi:
2257 volumeMounts:
2258 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002259 nova_conductor:
2260 init_container: null
2261 nova_conductor:
2262 volumeMounts:
2263 volumes:
2264 nova_scheduler:
2265 init_container: null
2266 nova_scheduler:
2267 volumeMounts:
2268 volumes:
2269 nova_bootstrap:
2270 init_container: null
2271 nova_bootstrap:
2272 volumeMounts:
2273 volumes:
2274 nova_tests:
2275 init_container: null
2276 nova_tests:
2277 volumeMounts:
2278 volumes:
2279 nova_novncproxy:
2280 init_novncproxy: null
2281 nova_novncproxy:
2282 volumeMounts:
2283 volumes:
2284 nova_spiceproxy:
2285 init_spiceproxy: null
2286 nova_spiceproxy:
2287 volumeMounts:
2288 volumes:
2289 nova_db_sync:
2290 nova_db_sync:
2291 volumeMounts:
2292 volumes:
2293 useHostNetwork:
2294 novncproxy: true
2295 replicas:
2296 api_metadata: 1
2297 compute_ironic: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002298 osapi: 1
2299 conductor: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002300 scheduler: 1
2301 novncproxy: 1
2302 spiceproxy: 1
2303 lifecycle:
2304 upgrades:
2305 deployments:
2306 revision_history: 3
2307 pod_replacement_strategy: RollingUpdate
2308 rolling_update:
2309 max_unavailable: 1
2310 max_surge: 3
2311 daemonsets:
2312 pod_replacement_strategy: RollingUpdate
2313 compute:
2314 enabled: true
2315 min_ready_seconds: 0
2316 max_unavailable: 1
2317 disruption_budget:
2318 metadata:
2319 min_available: 0
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002320 osapi:
2321 min_available: 0
2322 termination_grace_period:
2323 metadata:
2324 timeout: 30
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002325 osapi:
2326 timeout: 30
2327 resources:
2328 enabled: false
2329 compute:
2330 requests:
2331 memory: "128Mi"
2332 cpu: "100m"
2333 limits:
2334 memory: "1024Mi"
2335 cpu: "2000m"
2336 compute_ironic:
2337 requests:
2338 memory: "128Mi"
2339 cpu: "100m"
2340 limits:
2341 memory: "1024Mi"
2342 cpu: "2000m"
2343 api_metadata:
2344 requests:
2345 memory: "128Mi"
2346 cpu: "100m"
2347 limits:
2348 memory: "1024Mi"
2349 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002350 api:
2351 requests:
2352 memory: "128Mi"
2353 cpu: "100m"
2354 limits:
2355 memory: "1024Mi"
2356 cpu: "2000m"
2357 conductor:
2358 requests:
2359 memory: "128Mi"
2360 cpu: "100m"
2361 limits:
2362 memory: "1024Mi"
2363 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002364 scheduler:
2365 requests:
2366 memory: "128Mi"
2367 cpu: "100m"
2368 limits:
2369 memory: "1024Mi"
2370 cpu: "2000m"
2371 ssh:
2372 requests:
2373 memory: "128Mi"
2374 cpu: "100m"
2375 limits:
2376 memory: "1024Mi"
2377 cpu: "2000m"
2378 novncproxy:
2379 requests:
2380 memory: "128Mi"
2381 cpu: "100m"
2382 limits:
2383 memory: "1024Mi"
2384 cpu: "2000m"
2385 spiceproxy:
2386 requests:
2387 memory: "128Mi"
2388 cpu: "100m"
2389 limits:
2390 memory: "1024Mi"
2391 cpu: "2000m"
2392 jobs:
2393 bootstrap:
2394 requests:
2395 memory: "128Mi"
2396 cpu: "100m"
2397 limits:
2398 memory: "1024Mi"
2399 cpu: "2000m"
2400 db_init:
2401 requests:
2402 memory: "128Mi"
2403 cpu: "100m"
2404 limits:
2405 memory: "1024Mi"
2406 cpu: "2000m"
2407 rabbit_init:
2408 requests:
2409 memory: "128Mi"
2410 cpu: "100m"
2411 limits:
2412 memory: "1024Mi"
2413 cpu: "2000m"
2414 db_sync:
2415 requests:
2416 memory: "128Mi"
2417 cpu: "100m"
2418 limits:
2419 memory: "1024Mi"
2420 cpu: "2000m"
2421 archive_deleted_rows:
2422 requests:
2423 memory: "128Mi"
2424 cpu: "100m"
2425 limits:
2426 memory: "1024Mi"
2427 cpu: "2000m"
2428 db_drop:
2429 requests:
2430 memory: "128Mi"
2431 cpu: "100m"
2432 limits:
2433 memory: "1024Mi"
2434 cpu: "2000m"
2435 ks_endpoints:
2436 requests:
2437 memory: "128Mi"
2438 cpu: "100m"
2439 limits:
2440 memory: "1024Mi"
2441 cpu: "2000m"
2442 ks_service:
2443 requests:
2444 memory: "128Mi"
2445 cpu: "100m"
2446 limits:
2447 memory: "1024Mi"
2448 cpu: "2000m"
2449 ks_user:
2450 requests:
2451 memory: "128Mi"
2452 cpu: "100m"
2453 limits:
2454 memory: "1024Mi"
2455 cpu: "2000m"
2456 tests:
2457 requests:
2458 memory: "128Mi"
2459 cpu: "100m"
2460 limits:
2461 memory: "1024Mi"
2462 cpu: "2000m"
2463 cell_setup:
2464 requests:
2465 memory: "128Mi"
2466 cpu: "100m"
2467 limits:
2468 memory: "1024Mi"
2469 cpu: "2000m"
2470 service_cleaner:
2471 requests:
2472 memory: "128Mi"
2473 cpu: "100m"
2474 limits:
2475 memory: "1024Mi"
2476 cpu: "2000m"
2477 image_repo_sync:
2478 requests:
2479 memory: "128Mi"
2480 cpu: "100m"
2481 limits:
2482 memory: "1024Mi"
2483 cpu: "2000m"
2484
2485network_policy:
2486 nova:
2487 # TODO(lamt): Need to tighten this ingress for security.
2488 ingress:
2489 - {}
2490 egress:
2491 - {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002492
2493# NOTE(helm_hook): helm_hook might break for helm2 binary.
2494# set helm3_hook: false when using the helm2 binary.
2495helm3_hook: true
2496
2497health_probe:
2498 logging:
2499 level: ERROR
2500
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002501tls:
2502 identity: false
2503 oslo_messaging: false
2504 oslo_db: false
2505
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002506manifests:
2507 certificates: false
2508 configmap_bin: true
2509 configmap_etc: true
2510 cron_job_cell_setup: true
2511 cron_job_service_cleaner: true
2512 cron_job_archive_deleted_rows: false
2513 daemonset_compute: true
2514 deployment_api_metadata: true
2515 deployment_api_osapi: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002516 deployment_conductor: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002517 deployment_novncproxy: true
2518 deployment_spiceproxy: true
2519 deployment_scheduler: true
2520 ingress_metadata: true
2521 ingress_novncproxy: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002522 ingress_osapi: true
2523 job_bootstrap: true
2524 job_db_init: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002525 job_db_sync: true
2526 job_db_drop: false
2527 job_image_repo_sync: true
2528 job_rabbit_init: true
2529 job_ks_endpoints: true
2530 job_ks_service: true
2531 job_ks_user: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002532 job_cell_setup: true
2533 pdb_metadata: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002534 pdb_osapi: true
2535 pod_rally_test: true
2536 network_policy: false
2537 secret_db_api: true
2538 secret_db_cell0: true
2539 secret_db: true
2540 secret_ingress_tls: true
2541 secret_keystone: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002542 secret_rabbitmq: true
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002543 secret_registry: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002544 service_ingress_metadata: true
2545 service_ingress_novncproxy: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002546 service_ingress_osapi: true
2547 service_metadata: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002548 service_novncproxy: true
2549 service_spiceproxy: true
2550 service_osapi: true
2551 statefulset_compute_ironic: false
2552...