blob: 7d4c1e589c26f6adea9f9a4e5f18708f95949b9d [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for nova.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21labels:
22 agent:
23 compute:
24 node_selector_key: openstack-compute-node
25 node_selector_value: enabled
26 compute_ironic:
27 node_selector_key: openstack-compute-node
28 node_selector_value: enabled
29 api_metadata:
30 node_selector_key: openstack-control-plane
31 node_selector_value: enabled
32 conductor:
33 node_selector_key: openstack-control-plane
34 node_selector_value: enabled
35 consoleauth:
36 node_selector_key: openstack-control-plane
37 node_selector_value: enabled
38 job:
39 node_selector_key: openstack-control-plane
40 node_selector_value: enabled
41 novncproxy:
42 node_selector_key: openstack-control-plane
43 node_selector_value: enabled
44 osapi:
45 node_selector_key: openstack-control-plane
46 node_selector_value: enabled
47 placement:
48 node_selector_key: openstack-control-plane
49 node_selector_value: enabled
50 scheduler:
51 node_selector_key: openstack-control-plane
52 node_selector_value: enabled
53 spiceproxy:
54 node_selector_key: openstack-control-plane
55 node_selector_value: enabled
56 test:
57 node_selector_key: openstack-control-plane
58 node_selector_value: enabled
59
60images:
61 pull_policy: IfNotPresent
62 tags:
63 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
64 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
65 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
66 dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
67 rabbit_init: docker.io/rabbitmq:3.7-management
68 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
69 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
70 nova_archive_deleted_rows: docker.io/openstackhelm/nova:stein-ubuntu_bionic
71 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
72 nova_api: docker.io/openstackhelm/nova:stein-ubuntu_bionic
73 nova_cell_setup: docker.io/openstackhelm/nova:stein-ubuntu_bionic
74 nova_cell_setup_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
75 nova_compute: docker.io/openstackhelm/nova:stein-ubuntu_bionic
76 nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:ocata'
77 nova_compute_ssh: docker.io/openstackhelm/nova:stein-ubuntu_bionic
78 nova_conductor: docker.io/openstackhelm/nova:stein-ubuntu_bionic
79 nova_consoleauth: docker.io/openstackhelm/nova:stein-ubuntu_bionic
80 nova_db_sync: docker.io/openstackhelm/nova:stein-ubuntu_bionic
81 nova_novncproxy: docker.io/openstackhelm/nova:stein-ubuntu_bionic
82 nova_novncproxy_assets: 'docker.io/kolla/ubuntu-source-nova-novncproxy:ocata'
83 nova_placement: docker.io/openstackhelm/nova:stein-ubuntu_bionic
84 nova_scheduler: docker.io/openstackhelm/nova:stein-ubuntu_bionic
85 # NOTE(portdirect): we simply use the ceph config helper here,
86 # as it has both oscli and jq.
87 nova_service_cleaner: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_xenial'
88 nova_spiceproxy: docker.io/openstackhelm/nova:stein-ubuntu_bionic
89 nova_spiceproxy_assets: docker.io/openstackhelm/nova:stein-ubuntu_bionic
90 test: docker.io/xrally/xrally-openstack:2.0.0
91 image_repo_sync: docker.io/docker:17.07.0
92 nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6
93 local_registry:
94 active: false
95 exclude:
96 - dep_check
97 - image_repo_sync
98
99jobs:
100 # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default.
101 # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves.
102 cell_setup:
103 cron: "0 */1 * * *"
104 starting_deadline: 600
105 history:
106 success: 3
107 failed: 1
108 extended_wait:
109 enabled: false
110 iteration: 3
111 duration: 5
112 service_cleaner:
113 cron: "0 */1 * * *"
114 starting_deadline: 600
115 history:
116 success: 3
117 failed: 1
118 sleep_time: 60
119 archive_deleted_rows:
120 cron: "0 */1 * * *"
121 starting_deadline: 600
122 history:
123 success: 3
124 failed: 1
125
126bootstrap:
127 enabled: true
128 ks_user: admin
129 script: null
130 structured:
131 flavors:
132 enabled: true
133 options:
134 m1_tiny:
135 name: "m1.tiny"
136 ram: 512
137 disk: 1
138 vcpus: 1
139 m1_small:
140 name: "m1.small"
141 ram: 2048
142 disk: 20
143 vcpus: 1
144 m1_medium:
145 name: "m1.medium"
146 ram: 4096
147 disk: 40
148 vcpus: 2
149 m1_large:
150 name: "m1.large"
151 ram: 8192
152 disk: 80
153 vcpus: 4
154 m1_xlarge:
155 name: "m1.xlarge"
156 ram: 16384
157 disk: 160
158 vcpus: 8
159 wait_for_computes:
160 enabled: false
161 # Wait percentage is the minimum percentage of compute hypervisors which
162 # must be available before the remainder of the bootstrap script can be run.
163 wait_percentage: 70
164 # Once the wait_percentage above is achieved, the remaining_wait is the
165 # amount of time in seconds to wait before executing the remainder of the
166 # boostrap script.
167 remaining_wait: 300
168 scripts:
169 init_script: |
170 # This runs in a bootstrap init container. It counts the number of compute nodes.
171 COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
172 /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
173 wait_script: |
174 # This script runs in the main bootstrap container just before the
175 # bootstrap.script is called.
176 COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
177 if [[ $COMPUTE_HOSTS == 0 ]]; then
178 echo "There are no compute hosts found!"
179 exit 1
180 fi
181
182 # Wait for all hypervisors to come up before moving on with the deployment
183 HYPERVISOR_WAIT=true
184 WAIT_AFTER_READY=0
185 SLEEP=5
186 while [[ $HYPERVISOR_WAIT == true ]]; do
187 # Its possible that openstack command may fail due to not being able to
188 # reach the compute service
189 set +e
190 HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
191 set -e
192
193 PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
194 if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
195 echo "Hypervisor ready percentage is $PERCENT_READY"
196 if [[ $PERCENT_READY == 100 ]]; then
197 HYPERVISOR_WAIT=false
198 echo "All hypervisors are ready."
199 elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
200 HYPERVISOR_WAIT=false
201 echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
202 else
203 sleep $SLEEP
204 WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
205 fi
206 else
207 echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
208 sleep $SLEEP
209 fi
210 done
211
212network:
213 # provide what type of network wiring will be used
214 # possible options: openvswitch, linuxbridge, sriov
215 backend:
216 - openvswitch
217 osapi:
218 port: 8774
219 ingress:
220 public: true
221 classes:
222 namespace: "nginx"
223 cluster: "nginx-cluster"
224 annotations:
225 nginx.ingress.kubernetes.io/rewrite-target: /
226 external_policy_local: false
227 node_port:
228 enabled: false
229 port: 30774
230 metadata:
231 port: 8775
232 ingress:
233 public: true
234 classes:
235 namespace: "nginx"
236 cluster: "nginx-cluster"
237 annotations:
238 nginx.ingress.kubernetes.io/rewrite-target: /
239 external_policy_local: false
240 node_port:
241 enabled: false
242 port: 30775
243 placement:
244 port: 8778
245 ingress:
246 public: true
247 classes:
248 namespace: "nginx"
249 cluster: "nginx-cluster"
250 annotations:
251 nginx.ingress.kubernetes.io/rewrite-target: /
252 node_port:
253 enabled: false
254 port: 30778
255 novncproxy:
256 ingress:
257 public: true
258 classes:
259 namespace: "nginx"
260 cluster: "nginx-cluster"
261 annotations:
262 nginx.ingress.kubernetes.io/rewrite-target: /
263 node_port:
264 enabled: false
265 port: 30680
266 spiceproxy:
267 node_port:
268 enabled: false
269 port: 30682
270 ssh:
271 enabled: false
272 port: 8022
273 from_subnet: 0.0.0.0/0
274 key_types:
275 - rsa
276 - dsa
277 - ecdsa
278 - ed25519
279 private_key: 'null'
280 public_key: 'null'
281
282dependencies:
283 dynamic:
284 common:
285 local_image_registry:
286 jobs:
287 - nova-image-repo-sync
288 services:
289 - endpoint: node
290 service: local_image_registry
291 targeted:
292 openvswitch:
293 compute:
294 pod:
295 - requireSameNode: true
296 labels:
297 application: neutron
298 component: neutron-ovs-agent
299 linuxbridge:
300 compute:
301 pod:
302 - requireSameNode: true
303 labels:
304 application: neutron
305 component: neutron-lb-agent
306 sriov:
307 compute:
308 pod:
309 - requireSameNode: true
310 labels:
311 application: neutron
312 component: neutron-sriov-agent
313 static:
314 api:
315 jobs:
316 - nova-db-sync
317 - nova-ks-user
318 - nova-ks-endpoints
319 - nova-rabbit-init
320 services:
321 - endpoint: internal
322 service: oslo_messaging
323 - endpoint: internal
324 service: oslo_db
325 - endpoint: internal
326 service: identity
327 api_metadata:
328 jobs:
329 - nova-db-sync
330 - nova-ks-user
331 - nova-ks-endpoints
332 - nova-rabbit-init
333 services:
334 - endpoint: internal
335 service: oslo_messaging
336 - endpoint: internal
337 service: oslo_db
338 - endpoint: internal
339 service: identity
340 bootstrap:
341 services:
342 - endpoint: internal
343 service: identity
344 - endpoint: internal
345 service: compute
346 cell_setup:
347 jobs:
348 - nova-db-sync
349 - nova-rabbit-init
350 services:
351 - endpoint: internal
352 service: oslo_messaging
353 - endpoint: internal
354 service: oslo_db
355 - endpoint: internal
356 service: identity
357 - endpoint: internal
358 service: compute
359 pod:
360 - requireSameNode: false
361 labels:
362 application: nova
363 component: compute
364 service_cleaner:
365 jobs:
366 - nova-db-sync
367 - nova-rabbit-init
368 services:
369 - endpoint: internal
370 service: oslo_messaging
371 - endpoint: internal
372 service: oslo_db
373 - endpoint: internal
374 service: identity
375 - endpoint: internal
376 service: compute
377 compute:
378 pod:
379 - requireSameNode: true
380 labels:
381 application: libvirt
382 component: libvirt
383 jobs:
384 - nova-db-sync
385 - nova-rabbit-init
386 - placement-ks-endpoints
387 services:
388 - endpoint: internal
389 service: oslo_messaging
390 - endpoint: internal
391 service: image
392 - endpoint: internal
393 service: compute
394 - endpoint: internal
395 service: network
396 - endpoint: internal
397 service: compute_metadata
398 compute_ironic:
399 jobs:
400 - nova-db-sync
401 - nova-rabbit-init
402 services:
403 - endpoint: internal
404 service: oslo_messaging
405 - endpoint: internal
406 service: image
407 - endpoint: internal
408 service: compute
409 - endpoint: internal
410 service: network
411 - endpoint: internal
412 service: baremetal
413 conductor:
414 jobs:
415 - nova-db-sync
416 - nova-rabbit-init
417 - placement-ks-endpoints
418 services:
419 - endpoint: internal
420 service: oslo_messaging
421 - endpoint: internal
422 service: oslo_db
423 - endpoint: internal
424 service: identity
425 - endpoint: internal
426 service: compute
427 consoleauth:
428 jobs:
429 - nova-db-sync
430 - nova-rabbit-init
431 services:
432 - endpoint: internal
433 service: oslo_messaging
434 - endpoint: internal
435 service: oslo_db
436 - endpoint: internal
437 service: identity
438 - endpoint: internal
439 service: compute
440 db_drop:
441 services:
442 - endpoint: internal
443 service: oslo_db
444 archive_deleted_rows:
445 jobs:
446 - nova-db-init
447 - nova-db-sync
448 db_init:
449 services:
450 - endpoint: internal
451 service: oslo_db
452 db_sync:
453 jobs:
454 - nova-db-init
455 services:
456 - endpoint: internal
457 service: oslo_db
458 ks_endpoints:
459 jobs:
460 - nova-ks-service
461 services:
462 - endpoint: internal
463 service: identity
464 ks_service:
465 services:
466 - endpoint: internal
467 service: identity
468 ks_user:
469 services:
470 - endpoint: internal
471 service: identity
472 rabbit_init:
473 services:
474 - service: oslo_messaging
475 endpoint: internal
476 novncproxy:
477 jobs:
478 - nova-db-sync
479 services:
480 - endpoint: internal
481 service: oslo_db
482 spiceproxy:
483 jobs:
484 - nova-db-sync
485 services:
486 - endpoint: internal
487 service: oslo_db
488 scheduler:
489 jobs:
490 - nova-db-sync
491 - nova-rabbit-init
492 - placement-ks-endpoints
493 services:
494 - endpoint: internal
495 service: oslo_messaging
496 - endpoint: internal
497 service: oslo_db
498 - endpoint: internal
499 service: identity
500 - endpoint: internal
501 service: compute
502 tests:
503 services:
504 - endpoint: internal
505 service: image
506 - endpoint: internal
507 service: compute
508 - endpoint: internal
509 service: network
510 - endpoint: internal
511 service: compute_metadata
512 image_repo_sync:
513 services:
514 - endpoint: internal
515 service: local_image_registry
516
517console:
518 # serial | spice | novnc | none
519 console_kind: novnc
520 serial:
521 spice:
522 compute:
523 # IF blank, search default routing interface
524 server_proxyclient_interface:
525 proxy:
526 # IF blank, search default routing interface
527 server_proxyclient_interface:
528 novnc:
529 compute:
530 # IF blank, search default routing interface
531 vncserver_proxyclient_interface:
532 vncproxy:
533 # IF blank, search default routing interface
534 vncserver_proxyclient_interface:
535
536ceph_client:
537 configmap: ceph-etc
538 user_secret_name: pvc-ceph-client-key
539
540conf:
541 security: |
542 #
543 # Disable access to the entire file system except for the directories that
544 # are explicitly allowed later.
545 #
546 # This currently breaks the configurations that come with some web application
547 # Debian packages.
548 #
549 #<Directory />
550 # AllowOverride None
551 # Require all denied
552 #</Directory>
553
554 # Changing the following options will not really affect the security of the
555 # server, but might make attacks slightly more difficult in some cases.
556
557 #
558 # ServerTokens
559 # This directive configures what you return as the Server HTTP response
560 # Header. The default is 'Full' which sends information about the OS-Type
561 # and compiled in modules.
562 # Set to one of: Full | OS | Minimal | Minor | Major | Prod
563 # where Full conveys the most information, and Prod the least.
564 ServerTokens Prod
565
566 #
567 # Optionally add a line containing the server version and virtual host
568 # name to server-generated pages (internal error documents, FTP directory
569 # listings, mod_status and mod_info output etc., but not CGI generated
570 # documents or custom error documents).
571 # Set to "EMail" to also include a mailto: link to the ServerAdmin.
572 # Set to one of: On | Off | EMail
573 ServerSignature Off
574
575 #
576 # Allow TRACE method
577 #
578 # Set to "extended" to also reflect the request body (only for testing and
579 # diagnostic purposes).
580 #
581 # Set to one of: On | Off | extended
582 TraceEnable Off
583
584 #
585 # Forbid access to version control directories
586 #
587 # If you use version control systems in your document root, you should
588 # probably deny access to their directories. For example, for subversion:
589 #
590 #<DirectoryMatch "/\.svn">
591 # Require all denied
592 #</DirectoryMatch>
593
594 #
595 # Setting this header will prevent MSIE from interpreting files as something
596 # else than declared by the content type in the HTTP headers.
597 # Requires mod_headers to be enabled.
598 #
599 #Header set X-Content-Type-Options: "nosniff"
600
601 #
602 # Setting this header will prevent other sites from embedding pages from this
603 # site as frames. This defends against clickjacking attacks.
604 # Requires mod_headers to be enabled.
605 #
606 #Header set X-Frame-Options: "sameorigin"
607 software:
608 apache2:
609 binary: apache2
610 start_parameters: -DFOREGROUND
611 conf_dir: /etc/apache2/conf-enabled
612 site_dir: /etc/apache2/sites-enable
613 mods_dir: /etc/apache2/mods-available
614 a2enmod: null
615 a2dismod: null
616 ceph:
617 enabled: true
618 admin_keyring: null
619 cinder:
620 user: "cinder"
621 keyring: null
622 secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
623 rally_tests:
624 run_tempest: false
625 clean_up: |
626 FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
627 if [ -n "$FLAVORS" ]; then
628 echo $FLAVORS | xargs openstack flavor delete
629 fi
630 SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
631 if [ -n "$SERVERS" ]; then
632 echo $SERVERS | xargs openstack server delete
633 fi
634 IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
635 if [ -n "$IMAGES" ]; then
636 echo $IMAGES | xargs openstack image delete
637 fi
638 tests:
639 NovaAgents.list_agents:
640 - runner:
641 concurrency: 1
642 times: 1
643 type: constant
644 sla:
645 failure_rate:
646 max: 0
647 NovaAggregates.create_and_get_aggregate_details:
648 - args:
649 availability_zone: nova
650 runner:
651 concurrency: 1
652 times: 1
653 type: constant
654 sla:
655 failure_rate:
656 max: 0
657 NovaAggregates.create_and_update_aggregate:
658 - args:
659 availability_zone: nova
660 runner:
661 concurrency: 1
662 times: 1
663 type: constant
664 sla:
665 failure_rate:
666 max: 0
667 NovaAggregates.list_aggregates:
668 - runner:
669 concurrency: 1
670 times: 1
671 type: constant
672 sla:
673 failure_rate:
674 max: 0
675 NovaAvailabilityZones.list_availability_zones:
676 - args:
677 detailed: true
678 runner:
679 concurrency: 1
680 times: 1
681 type: constant
682 sla:
683 failure_rate:
684 max: 0
685 NovaFlavors.create_and_delete_flavor:
686 - args:
687 disk: 1
688 ram: 500
689 vcpus: 1
690 runner:
691 concurrency: 1
692 times: 1
693 type: constant
694 sla:
695 failure_rate:
696 max: 0
697 NovaFlavors.create_and_list_flavor_access:
698 - args:
699 disk: 1
700 ram: 500
701 vcpus: 1
702 runner:
703 concurrency: 1
704 times: 1
705 type: constant
706 sla:
707 failure_rate:
708 max: 0
709 NovaFlavors.create_flavor:
710 - args:
711 disk: 1
712 ram: 500
713 vcpus: 1
714 runner:
715 concurrency: 1
716 times: 1
717 type: constant
718 sla:
719 failure_rate:
720 max: 0
721 NovaFlavors.create_flavor_and_add_tenant_access:
722 - args:
723 disk: 1
724 ram: 500
725 vcpus: 1
726 runner:
727 concurrency: 1
728 times: 1
729 type: constant
730 sla:
731 failure_rate:
732 max: 0
733 NovaFlavors.create_flavor_and_set_keys:
734 - args:
735 disk: 1
736 extra_specs:
737 'quota:disk_read_bytes_sec': 10240
738 ram: 500
739 vcpus: 1
740 runner:
741 concurrency: 1
742 times: 1
743 type: constant
744 sla:
745 failure_rate:
746 max: 0
747 NovaFlavors.list_flavors:
748 - args:
749 detailed: true
750 runner:
751 concurrency: 1
752 times: 1
753 type: constant
754 sla:
755 failure_rate:
756 max: 0
757 NovaHypervisors.list_and_get_hypervisors:
758 - args:
759 detailed: true
760 runner:
761 concurrency: 1
762 times: 1
763 type: constant
764 sla:
765 failure_rate:
766 max: 0
767 NovaHypervisors.list_and_get_uptime_hypervisors:
768 - args:
769 detailed: true
770 runner:
771 concurrency: 1
772 times: 1
773 type: constant
774 sla:
775 failure_rate:
776 max: 0
777 NovaHypervisors.list_and_search_hypervisors:
778 - args:
779 detailed: true
780 runner:
781 concurrency: 1
782 times: 1
783 type: constant
784 sla:
785 failure_rate:
786 max: 0
787 NovaHypervisors.list_hypervisors:
788 - args:
789 detailed: true
790 runner:
791 concurrency: 1
792 times: 1
793 type: constant
794 sla:
795 failure_rate:
796 max: 0
797 NovaHypervisors.statistics_hypervisors:
798 - args: {}
799 runner:
800 concurrency: 1
801 times: 1
802 type: constant
803 sla:
804 failure_rate:
805 max: 0
806 NovaKeypair.create_and_delete_keypair:
807 - runner:
808 concurrency: 1
809 times: 1
810 type: constant
811 sla:
812 failure_rate:
813 max: 0
814 NovaKeypair.create_and_list_keypairs:
815 - runner:
816 concurrency: 1
817 times: 1
818 type: constant
819 sla:
820 failure_rate:
821 max: 0
822 NovaServerGroups.create_and_list_server_groups:
823 - args:
824 all_projects: false
825 kwargs:
826 policies:
827 - affinity
828 runner:
829 concurrency: 1
830 times: 1
831 type: constant
832 sla:
833 failure_rate:
834 max: 0
835 NovaServices.list_services:
836 - runner:
837 concurrency: 1
838 times: 1
839 type: constant
840 sla:
841 failure_rate:
842 max: 0
843 paste:
844 composite:metadata:
845 use: egg:Paste#urlmap
846 /: meta
847 pipeline:meta:
848 pipeline: cors metaapp
849 app:metaapp:
850 paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory
851 composite:osapi_compute:
852 use: call:nova.api.openstack.urlmap:urlmap_factory
853 /: oscomputeversions
854 /v2: openstack_compute_api_v21_legacy_v2_compatible
855 /v2.1: openstack_compute_api_v21
856 composite:openstack_compute_api_v21:
857 use: call:nova.api.auth:pipeline_factory_v21
858 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
859 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21
860 composite:openstack_compute_api_v21_legacy_v2_compatible:
861 use: call:nova.api.auth:pipeline_factory_v21
862 noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
863 keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
864 filter:request_id:
865 paste.filter_factory: oslo_middleware:RequestId.factory
866 filter:compute_req_id:
867 paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory
868 filter:faultwrap:
869 paste.filter_factory: nova.api.openstack:FaultWrapper.factory
870 filter:noauth2:
871 paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory
872 filter:sizelimit:
873 paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory
874 filter:http_proxy_to_wsgi:
875 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
876 filter:legacy_v2_compatible:
877 paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory
878 app:osapi_compute_app_v21:
879 paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory
880 pipeline:oscomputeversions:
881 pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp
882 app:oscomputeversionapp:
883 paste.app_factory: nova.api.openstack.compute.versions:Versions.factory
884 filter:cors:
885 paste.filter_factory: oslo_middleware.cors:filter_factory
886 oslo_config_project: nova
887 filter:keystonecontext:
888 paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory
889 filter:authtoken:
890 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
891 filter:audit:
892 paste.filter_factory: keystonemiddleware.audit:filter_factory
893 audit_map_file: /etc/nova/api_audit_map.conf
894 policy: {}
895 nova_sudoers: |
896 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
897 Defaults !requiretty
898 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
899 nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
900 api_audit_map:
901 DEFAULT:
902 target_endpoint_type: None
903 custom_actions:
904 enable: enable
905 disable: disable
906 delete: delete
907 startup: start/startup
908 shutdown: stop/shutdown
909 reboot: start/reboot
910 os-migrations/get: read
911 os-server-password/post: update
912 path_keywords:
913 add: None
914 action: None
915 enable: None
916 disable: None
917 configure-project: None
918 defaults: None
919 delete: None
920 detail: None
921 diagnostics: None
922 entries: entry
923 extensions: alias
924 flavors: flavor
925 images: image
926 ips: label
927 limits: None
928 metadata: key
929 os-agents: os-agent
930 os-aggregates: os-aggregate
931 os-availability-zone: None
932 os-certificates: None
933 os-cloudpipe: None
934 os-fixed-ips: ip
935 os-extra_specs: key
936 os-flavor-access: None
937 os-floating-ip-dns: domain
938 os-floating-ips-bulk: host
939 os-floating-ip-pools: None
940 os-floating-ips: floating-ip
941 os-hosts: host
942 os-hypervisors: hypervisor
943 os-instance-actions: instance-action
944 os-keypairs: keypair
945 os-migrations: None
946 os-networks: network
947 os-quota-sets: tenant
948 os-security-groups: security_group
949 os-security-group-rules: rule
950 os-server-password: None
951 os-services: None
952 os-simple-tenant-usage: tenant
953 os-virtual-interfaces: None
954 os-volume_attachments: attachment
955 os-volumes_boot: None
956 os-volumes: volume
957 os-volume-types: volume-type
958 os-snapshots: snapshot
959 reboot: None
960 servers: server
961 shutdown: None
962 startup: None
963 statistics: None
964 service_endpoints:
965 compute: service/compute
966 rootwrap: |
967 # Configuration for nova-rootwrap
968 # This file should be owned by (and only-writeable by) the root user
969
970 [DEFAULT]
971 # List of directories to load filter definitions from (separated by ',').
972 # These directories MUST all be only writeable by root !
973 filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
974
975 # List of directories to search executables in, in case filters do not
976 # explicitely specify a full path (separated by ',')
977 # If not specified, defaults to system PATH environment variable.
978 # These directories MUST all be only writeable by root !
979 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
980
981 # Enable logging to syslog
982 # Default value is False
983 use_syslog=False
984
985 # Which syslog facility to use.
986 # Valid values include auth, authpriv, syslog, local0, local1...
987 # Default value is 'syslog'
988 syslog_log_facility=syslog
989
990 # Which messages to log.
991 # INFO means log all usage
992 # ERROR means only log unsuccessful attempts
993 syslog_log_level=ERROR
994 wsgi_placement: |
995 Listen 0.0.0.0:{{ tuple "placement" "internal" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
996
997 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
998 LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy
999
1000 SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
1001 CustomLog /dev/stdout combined env=!forwarded
1002 CustomLog /dev/stdout proxy env=forwarded
1003
1004 <VirtualHost *:{{ tuple "placement" "internal" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}>
1005 WSGIDaemonProcess placement-api processes=4 threads=1 user=nova group=nova display-name=%{GROUP}
1006 WSGIProcessGroup placement-api
1007 WSGIScriptAlias / /var/www/cgi-bin/nova/nova-placement-api
1008 WSGIApplicationGroup %{GLOBAL}
1009 WSGIPassAuthorization On
1010 <IfVersion >= 2.4>
1011 ErrorLogFormat "%{cu}t %M"
1012 </IfVersion>
1013 ErrorLog /dev/stdout
1014
1015 SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
1016 CustomLog /dev/stdout combined env=!forwarded
1017 CustomLog /dev/stdout proxy env=forwarded
1018 </VirtualHost>
1019
1020 Alias /placement /var/www/cgi-bin/nova/nova-placement-api
1021 <Location /placement>
1022 SetHandler wsgi-script
1023 Options +ExecCGI
1024
1025 WSGIProcessGroup placement-api
1026 WSGIApplicationGroup %{GLOBAL}
1027 WSGIPassAuthorization On
1028 </Location>
1029 rootwrap_filters:
1030 api_metadata:
1031 pods:
1032 - metadata
1033 content: |
1034 # nova-rootwrap command filters for api-metadata nodes
1035 # This is needed on nova-api hosts running with "metadata" in enabled_apis
1036 # or when running nova-api-metadata
1037 # This file should be owned by (and only-writeable by) the root user
1038
1039 [Filters]
1040 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1041 iptables-save: CommandFilter, iptables-save, root
1042 ip6tables-save: CommandFilter, ip6tables-save, root
1043
1044 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1045 iptables-restore: CommandFilter, iptables-restore, root
1046 ip6tables-restore: CommandFilter, ip6tables-restore, root
1047 compute:
1048 pods:
1049 - compute
1050 content: |
1051 # nova-rootwrap command filters for compute nodes
1052 # This file should be owned by (and only-writeable by) the root user
1053
1054 [Filters]
1055 # nova/virt/disk/mount/api.py: 'kpartx', '-a', device
1056 # nova/virt/disk/mount/api.py: 'kpartx', '-d', device
1057 kpartx: CommandFilter, kpartx, root
1058
1059 # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
1060 # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
1061 tune2fs: CommandFilter, tune2fs, root
1062
1063 # nova/virt/disk/mount/api.py: 'mount', mapped_device
1064 # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
1065 # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
1066 # nova/virt/configdrive.py: 'mount', device, mountdir
1067 # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
1068 mount: CommandFilter, mount, root
1069
1070 # nova/virt/disk/mount/api.py: 'umount', mapped_device
1071 # nova/virt/disk/api.py: 'umount' target
1072 # nova/virt/xenapi/vm_utils.py: 'umount', dev_path
1073 # nova/virt/configdrive.py: 'umount', mountdir
1074 umount: CommandFilter, umount, root
1075
1076 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
1077 # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
1078 qemu-nbd: CommandFilter, qemu-nbd, root
1079
1080 # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
1081 # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
1082 losetup: CommandFilter, losetup, root
1083
1084 # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
1085 blkid: CommandFilter, blkid, root
1086
1087 # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
1088 # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
1089 blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
1090
1091 # nova/virt/disk/vfs/localfs.py: 'tee', canonpath
1092 tee: CommandFilter, tee, root
1093
1094 # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
1095 mkdir: CommandFilter, mkdir, root
1096
1097 # nova/virt/disk/vfs/localfs.py: 'chown'
1098 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1099 # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
1100 # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
1101 chown: CommandFilter, chown, root
1102
1103 # nova/virt/disk/vfs/localfs.py: 'chmod'
1104 chmod: CommandFilter, chmod, root
1105
1106 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1107 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1108 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1109 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1110 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1111 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1112 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1113 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1114 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1115 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1116 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1117 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1118 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1119 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1120 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1121 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1122 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1123 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1124 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1125 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1126 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1127 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1128 ip: CommandFilter, ip, root
1129
1130 # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
1131 # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
1132 tunctl: CommandFilter, tunctl, root
1133
1134 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1135 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1136 # nova/network/linux_net.py: 'ovs-vsctl', ....
1137 ovs-vsctl: CommandFilter, ovs-vsctl, root
1138
1139 # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
1140 vrouter-port-control: CommandFilter, vrouter-port-control, root
1141
1142 # nova/virt/libvirt/vif.py: 'ebrctl', ...
1143 ebrctl: CommandFilter, ebrctl, root
1144
1145 # nova/virt/libvirt/vif.py: 'mm-ctl', ...
1146 mm-ctl: CommandFilter, mm-ctl, root
1147
1148 # nova/network/linux_net.py: 'ovs-ofctl', ....
1149 ovs-ofctl: CommandFilter, ovs-ofctl, root
1150
1151 # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
1152 dd: CommandFilter, dd, root
1153
1154 # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
1155 iscsiadm: CommandFilter, iscsiadm, root
1156
1157 # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
1158 # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
1159 aoe-revalidate: CommandFilter, aoe-revalidate, root
1160 aoe-discover: CommandFilter, aoe-discover, root
1161
1162 # nova/virt/xenapi/vm_utils.py: parted, --script, ...
1163 # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
1164 parted: CommandFilter, parted, root
1165
1166 # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
1167 pygrub: CommandFilter, pygrub, root
1168
1169 # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
1170 fdisk: CommandFilter, fdisk, root
1171
1172 # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
1173 # nova/virt/disk/api.py: e2fsck, -f, -p, image
1174 e2fsck: CommandFilter, e2fsck, root
1175
1176 # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
1177 # nova/virt/disk/api.py: resize2fs, image
1178 resize2fs: CommandFilter, resize2fs, root
1179
1180 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1181 iptables-save: CommandFilter, iptables-save, root
1182 ip6tables-save: CommandFilter, ip6tables-save, root
1183
1184 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1185 iptables-restore: CommandFilter, iptables-restore, root
1186 ip6tables-restore: CommandFilter, ip6tables-restore, root
1187
1188 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1189 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1190 arping: CommandFilter, arping, root
1191
1192 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1193 dhcp_release: CommandFilter, dhcp_release, root
1194
1195 # nova/network/linux_net.py: 'kill', '-9', pid
1196 # nova/network/linux_net.py: 'kill', '-HUP', pid
1197 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1198
1199 # nova/network/linux_net.py: 'kill', pid
1200 kill_radvd: KillFilter, root, /usr/sbin/radvd
1201
1202 # nova/network/linux_net.py: dnsmasq call
1203 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1204
1205 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1206 radvd: CommandFilter, radvd, root
1207
1208 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1209 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1210 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1211 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1212 brctl: CommandFilter, brctl, root
1213
1214 # nova/virt/libvirt/utils.py: 'mkswap'
1215 # nova/virt/xenapi/vm_utils.py: 'mkswap'
1216 mkswap: CommandFilter, mkswap, root
1217
1218 # nova/virt/libvirt/utils.py: 'nova-idmapshift'
1219 nova-idmapshift: CommandFilter, nova-idmapshift, root
1220
1221 # nova/virt/xenapi/vm_utils.py: 'mkfs'
1222 # nova/utils.py: 'mkfs', fs, path, label
1223 mkfs: CommandFilter, mkfs, root
1224
1225 # nova/virt/libvirt/utils.py: 'qemu-img'
1226 qemu-img: CommandFilter, qemu-img, root
1227
1228 # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
1229 readlink: CommandFilter, readlink, root
1230
1231 # nova/virt/disk/api.py:
1232 mkfs.ext3: CommandFilter, mkfs.ext3, root
1233 mkfs.ext4: CommandFilter, mkfs.ext4, root
1234 mkfs.ntfs: CommandFilter, mkfs.ntfs, root
1235
1236 # nova/virt/libvirt/connection.py:
1237 lvremove: CommandFilter, lvremove, root
1238
1239 # nova/virt/libvirt/utils.py:
1240 lvcreate: CommandFilter, lvcreate, root
1241
1242 # nova/virt/libvirt/utils.py:
1243 lvs: CommandFilter, lvs, root
1244
1245 # nova/virt/libvirt/utils.py:
1246 vgs: CommandFilter, vgs, root
1247
1248 # nova/utils.py:read_file_as_root: 'cat', file_path
1249 # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
1250 read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
1251 read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
1252
1253 # os-brick needed commands
1254 read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
1255 multipath: CommandFilter, multipath, root
1256 # multipathd show status
1257 multipathd: CommandFilter, multipathd, root
1258 systool: CommandFilter, systool, root
1259 vgc-cluster: CommandFilter, vgc-cluster, root
1260 # os_brick/initiator/connector.py
1261 drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
1262
1263 # TODO(smcginnis) Temporary fix.
1264 # Need to pull in os-brick os-brick.filters file instead and clean
1265 # out stale brick values from this file.
1266 scsi_id: CommandFilter, /lib/udev/scsi_id, root
1267 # os_brick.privileged.default oslo.privsep context
1268 # This line ties the superuser privs with the config files, context name,
1269 # and (implicitly) the actual python code invoked.
1270 privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
1271
1272 # nova/storage/linuxscsi.py: sg_scan device
1273 sg_scan: CommandFilter, sg_scan, root
1274
1275 # nova/volume/encryptors/cryptsetup.py:
1276 # nova/volume/encryptors/luks.py:
1277 ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+
1278
1279 # nova/volume/encryptors.py:
1280 # nova/virt/libvirt/dmcrypt.py:
1281 cryptsetup: CommandFilter, cryptsetup, root
1282
1283 # nova/virt/xenapi/vm_utils.py:
1284 xenstore-read: CommandFilter, xenstore-read, root
1285
1286 # nova/virt/libvirt/utils.py:
1287 rbd: CommandFilter, rbd, root
1288
1289 # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
1290 shred: CommandFilter, shred, root
1291
1292 # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
1293 cp: CommandFilter, cp, root
1294
1295 # nova/virt/xenapi/vm_utils.py:
1296 sync: CommandFilter, sync, root
1297
1298 # nova/virt/libvirt/imagebackend.py:
1299 ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*
1300 prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*
1301
1302 # nova/virt/libvirt/utils.py: 'xend', 'status'
1303 xend: CommandFilter, xend, root
1304
1305 # nova/virt/libvirt/utils.py:
1306 touch: CommandFilter, touch, root
1307
1308 # nova/virt/libvirt/volume/vzstorage.py
1309 pstorage-mount: CommandFilter, pstorage-mount, root
1310 network:
1311 pods:
1312 - compute
1313 content: |
1314 # nova-rootwrap command filters for network nodes
1315 # This file should be owned by (and only-writeable by) the root user
1316
1317 [Filters]
1318 # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
1319 # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
1320 # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
1321 # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
1322 # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
1323 # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
1324 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
1325 # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
1326 # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
1327 # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
1328 # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
1329 # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
1330 # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
1331 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
1332 # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
1333 # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
1334 # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
1335 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
1336 # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
1337 # nova/network/linux_net.py: 'ip', 'route', 'add', ..
1338 # nova/network/linux_net.py: 'ip', 'route', 'del', .
1339 # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
1340 ip: CommandFilter, ip, root
1341
1342 # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
1343 # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
1344 # nova/network/linux_net.py: 'ovs-vsctl', ....
1345 ovs-vsctl: CommandFilter, ovs-vsctl, root
1346
1347 # nova/network/linux_net.py: 'ovs-ofctl', ....
1348 ovs-ofctl: CommandFilter, ovs-ofctl, root
1349
1350 # nova/virt/libvirt/vif.py: 'ivs-ctl', ...
1351 # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
1352 # nova/network/linux_net.py: 'ivs-ctl', ....
1353 ivs-ctl: CommandFilter, ivs-ctl, root
1354
1355 # nova/virt/libvirt/vif.py: 'ifc_ctl', ...
1356 ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root
1357
1358 # nova/network/linux_net.py: 'ebtables', '-D' ...
1359 # nova/network/linux_net.py: 'ebtables', '-I' ...
1360 ebtables: CommandFilter, ebtables, root
1361 ebtables_usr: CommandFilter, ebtables, root
1362
1363 # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
1364 iptables-save: CommandFilter, iptables-save, root
1365 ip6tables-save: CommandFilter, ip6tables-save, root
1366
1367 # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
1368 iptables-restore: CommandFilter, iptables-restore, root
1369 ip6tables-restore: CommandFilter, ip6tables-restore, root
1370
1371 # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
1372 # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
1373 arping: CommandFilter, arping, root
1374
1375 # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
1376 dhcp_release: CommandFilter, dhcp_release, root
1377
1378 # nova/network/linux_net.py: 'kill', '-9', pid
1379 # nova/network/linux_net.py: 'kill', '-HUP', pid
1380 kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
1381
1382 # nova/network/linux_net.py: 'kill', pid
1383 kill_radvd: KillFilter, root, /usr/sbin/radvd
1384
1385 # nova/network/linux_net.py: dnsmasq call
1386 dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
1387
1388 # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
1389 radvd: CommandFilter, radvd, root
1390
1391 # nova/network/linux_net.py: 'brctl', 'addbr', bridge
1392 # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
1393 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
1394 # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
1395 brctl: CommandFilter, brctl, root
1396
1397 # nova/network/linux_net.py: 'sysctl', ....
1398 sysctl: CommandFilter, sysctl, root
1399
1400 # nova/network/linux_net.py: 'conntrack'
1401 conntrack: CommandFilter, conntrack, root
1402
1403 # nova/network/linux_net.py: 'fp-vdev'
1404 fp-vdev: CommandFilter, fp-vdev, root
1405 nova_ironic:
1406 DEFAULT:
1407 scheduler_host_manager: ironic_host_manager
1408 compute_driver: ironic.IronicDriver
1409 ram_allocation_ratio: 1.0
1410 cpu_allocation_ratio: 1.0
1411 reserved_host_memory_mb: 0
1412 libvirt:
1413 # Get the IP address to be used as the target for live migration traffic using interface name.
1414 # If this option is set to None, the hostname of the migration target compute node will be used.
1415 live_migration_interface:
1416 hypervisor:
1417 # my_ip can be set automatically through this interface name.
1418 host_interface:
1419 # This list is the keys to exclude from the config file ingested by nova-compute
1420 nova_compute_redactions:
1421 - database
1422 - api_database
1423 - cell0_database
1424 nova:
1425 DEFAULT:
1426 log_config_append: /etc/nova/logging.conf
1427 default_ephemeral_format: ext4
1428 ram_allocation_ratio: 1.0
1429 disk_allocation_ratio: 1.0
1430 cpu_allocation_ratio: 3.0
1431 state_path: /var/lib/nova
1432 osapi_compute_listen: 0.0.0.0
1433 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1434 # via the endpoints section.
1435 osapi_compute_listen_port: null
1436 osapi_compute_workers: 1
1437 metadata_workers: 1
1438 use_neutron: true
1439 firewall_driver: nova.virt.firewall.NoopFirewallDriver
1440 linuxnet_interface_driver: openvswitch
1441 compute_driver: libvirt.LibvirtDriver
1442 my_ip: 0.0.0.0
1443 instance_usage_audit: True
1444 instance_usage_audit_period: hour
1445 notify_on_state_change: vm_and_task_state
1446 resume_guests_state_on_host_boot: True
1447 vnc:
1448 novncproxy_host: 0.0.0.0
1449 vncserver_listen: 0.0.0.0
1450 # This would be set by each compute nodes's ip
1451 # server_proxyclient_address: 127.0.0.1
1452 spice:
1453 html5proxy_host: 0.0.0.0
1454 server_listen: 0.0.0.0
1455 # This would be set by each compute nodes's ip
1456 # server_proxyclient_address: 127.0.0.1
1457 conductor:
1458 workers: 1
1459 oslo_policy:
1460 policy_file: /etc/nova/policy.yaml
1461 oslo_concurrency:
1462 lock_path: /var/lib/nova/tmp
1463 oslo_middleware:
1464 enable_proxy_headers_parsing: true
1465 glance:
1466 num_retries: 3
1467 ironic:
1468 api_endpoint: null
1469 auth_url: null
1470 neutron:
1471 metadata_proxy_shared_secret: "password"
1472 service_metadata_proxy: True
1473 auth_type: password
1474 auth_version: v3
1475 database:
1476 max_retries: -1
1477 api_database:
1478 max_retries: -1
1479 cell0_database:
1480 max_retries: -1
1481 keystone_authtoken:
1482 auth_type: password
1483 auth_version: v3
1484 memcache_security_strategy: ENCRYPT
1485 service_user:
1486 auth_type: password
1487 send_service_user_token: false
1488 libvirt:
1489 connection_uri: "qemu+unix:///system?socket=/run/libvirt/libvirt-sock"
1490 images_type: qcow2
1491 images_rbd_pool: vms
1492 images_rbd_ceph_conf: /etc/ceph/ceph.conf
1493 rbd_user: cinder
1494 rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
1495 disk_cachemodes: "network=writeback"
1496 hw_disk_discard: unmap
1497 upgrade_levels:
1498 compute: auto
1499 cache:
1500 enabled: true
1501 backend: dogpile.cache.memcached
1502 wsgi:
1503 api_paste_config: /etc/nova/api-paste.ini
1504 oslo_messaging_notifications:
1505 driver: messagingv2
1506 oslo_messaging_rabbit:
1507 rabbit_ha_queues: true
1508 placement:
1509 auth_type: password
1510 auth_version: v3
1511 logging:
1512 loggers:
1513 keys:
1514 - root
1515 - nova
1516 - os.brick
1517 handlers:
1518 keys:
1519 - stdout
1520 - stderr
1521 - "null"
1522 formatters:
1523 keys:
1524 - context
1525 - default
1526 logger_root:
1527 level: WARNING
1528 handlers: 'null'
1529 logger_nova:
1530 level: INFO
1531 handlers:
1532 - stdout
1533 qualname: nova
1534 logger_os.brick:
1535 level: INFO
1536 handlers:
1537 - stdout
1538 qualname: os.brick
1539 logger_amqp:
1540 level: WARNING
1541 handlers: stderr
1542 qualname: amqp
1543 logger_amqplib:
1544 level: WARNING
1545 handlers: stderr
1546 qualname: amqplib
1547 logger_eventletwsgi:
1548 level: WARNING
1549 handlers: stderr
1550 qualname: eventlet.wsgi.server
1551 logger_sqlalchemy:
1552 level: WARNING
1553 handlers: stderr
1554 qualname: sqlalchemy
1555 logger_boto:
1556 level: WARNING
1557 handlers: stderr
1558 qualname: boto
1559 handler_null:
1560 class: logging.NullHandler
1561 formatter: default
1562 args: ()
1563 handler_stdout:
1564 class: StreamHandler
1565 args: (sys.stdout,)
1566 formatter: context
1567 handler_stderr:
1568 class: StreamHandler
1569 args: (sys.stderr,)
1570 formatter: context
1571 formatter_context:
1572 class: oslo_log.formatters.ContextFormatter
1573 datefmt: "%Y-%m-%d %H:%M:%S"
1574 formatter_default:
1575 format: "%(message)s"
1576 datefmt: "%Y-%m-%d %H:%M:%S"
1577 rabbitmq:
1578 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1579 policies:
1580 - vhost: "nova"
1581 name: "ha_ttl_nova"
1582 definition:
1583 # mirror messges to other nodes in rmq cluster
1584 ha-mode: "all"
1585 ha-sync-mode: "automatic"
1586 # 70s
1587 message-ttl: 70000
1588 priority: 0
1589 apply-to: all
1590 pattern: '^(?!(amq\.|reply_)).*'
1591 enable_iscsi: false
1592 archive_deleted_rows:
1593 purge_deleted_rows: false
1594 until_completion: true
1595 all_cells: false
1596 max_rows:
1597 enabled: False
1598 rows: 1000
1599 before:
1600 enabled: false
1601 date: 'nil'
1602
1603# Names of secrets used by bootstrap and environmental checks
1604secrets:
1605 identity:
1606 admin: nova-keystone-admin
1607 nova: nova-keystone-user
1608 placement: nova-keystone-placement
1609 test: nova-keystone-test
1610 oslo_db:
1611 admin: nova-db-admin
1612 nova: nova-db-user
1613 oslo_db_api:
1614 admin: nova-db-api-admin
1615 nova: nova-db-api-user
1616 oslo_db_cell0:
1617 admin: nova-db-cell0-admin
1618 nova: nova-db-cell0-user
1619 oslo_messaging:
1620 admin: nova-rabbitmq-admin
1621 nova: nova-rabbitmq-user
1622 tls:
1623 compute:
1624 osapi:
1625 public: nova-tls-public
1626 internal: nova-tls-api
1627 compute_novnc_proxy:
1628 novncproxy:
1629 public: nova-novncproxy-tls-public
1630 internal: nova-novncproxy-tls-proxy
1631 placement:
1632 placement:
1633 public: placement-tls-public
1634 internal: placement-tls-api
1635 compute_metadata:
1636 metadata:
1637 public: metadata-tls-public
1638 internal: metadata-tls-metadata
1639 compute_spice_proxy:
1640 spiceproxy:
1641 internal: nova-tls-spiceproxy
1642
1643# typically overridden by environmental
1644# values, but should include all endpoints
1645# required by this chart
1646endpoints:
1647 cluster_domain_suffix: cluster.local
1648 local_image_registry:
1649 name: docker-registry
1650 namespace: docker-registry
1651 hosts:
1652 default: localhost
1653 internal: docker-registry
1654 node: localhost
1655 host_fqdn_override:
1656 default: null
1657 port:
1658 registry:
1659 node: 5000
1660 oslo_db:
1661 auth:
1662 admin:
1663 username: root
1664 password: password
1665 secret:
1666 tls:
1667 internal: mariadb-tls-direct
1668 nova:
1669 username: nova
1670 password: password
1671 hosts:
1672 default: mariadb
1673 host_fqdn_override:
1674 default: null
1675 path: /nova
1676 scheme: mysql+pymysql
1677 port:
1678 mysql:
1679 default: 3306
1680 oslo_db_api:
1681 auth:
1682 admin:
1683 username: root
1684 password: password
1685 nova:
1686 username: nova
1687 password: password
1688 hosts:
1689 default: mariadb
1690 host_fqdn_override:
1691 default: null
1692 path: /nova_api
1693 scheme: mysql+pymysql
1694 port:
1695 mysql:
1696 default: 3306
1697 oslo_db_cell0:
1698 auth:
1699 admin:
1700 username: root
1701 password: password
1702 nova:
1703 username: nova
1704 password: password
1705 hosts:
1706 default: mariadb
1707 host_fqdn_override:
1708 default: null
1709 path: /nova_cell0
1710 scheme: mysql+pymysql
1711 port:
1712 mysql:
1713 default: 3306
1714 oslo_messaging:
1715 auth:
1716 admin:
1717 username: rabbitmq
1718 password: password
1719 secret:
1720 tls:
1721 internal: rabbitmq-tls-direct
1722 nova:
1723 username: nova
1724 password: password
1725 statefulset:
1726 replicas: 2
1727 name: rabbitmq-rabbitmq
1728 hosts:
1729 default: rabbitmq
1730 host_fqdn_override:
1731 default: null
1732 path: /nova
1733 scheme: rabbit
1734 port:
1735 amqp:
1736 default: 5672
1737 http:
1738 default: 15672
1739 oslo_cache:
1740 auth:
1741 # NOTE(portdirect): this is used to define the value for keystone
1742 # authtoken cache encryption key, if not set it will be populated
1743 # automatically with a random value, but to take advantage of
1744 # this feature all services should be set to use the same key,
1745 # and memcache service.
1746 memcache_secret_key: null
1747 hosts:
1748 default: memcached
1749 host_fqdn_override:
1750 default: null
1751 port:
1752 memcache:
1753 default: 11211
1754 identity:
1755 name: keystone
1756 auth:
1757 admin:
1758 region_name: RegionOne
1759 username: admin
1760 password: password
1761 project_name: admin
1762 user_domain_name: default
1763 project_domain_name: default
1764 nova:
1765 role: admin
1766 region_name: RegionOne
1767 username: nova
1768 password: password
1769 project_name: service
1770 user_domain_name: service
1771 project_domain_name: service
1772 # NOTE(portdirect): the neutron user is not managed by the nova chart
1773 # these values should match those set in the neutron chart.
1774 neutron:
1775 region_name: RegionOne
1776 project_name: service
1777 user_domain_name: service
1778 project_domain_name: service
1779 username: neutron
1780 password: password
1781 # NOTE(portdirect): the ironic user is not managed by the nova chart
1782 # these values should match those set in the ironic chart.
1783 ironic:
1784 auth_type: password
1785 auth_version: v3
1786 region_name: RegionOne
1787 project_name: service
1788 user_domain_name: service
1789 project_domain_name: service
1790 username: ironic
1791 password: password
1792 placement:
1793 role: admin
1794 region_name: RegionOne
1795 username: placement
1796 password: password
1797 project_name: service
1798 user_domain_name: service
1799 project_domain_name: service
1800 test:
1801 role: admin
1802 region_name: RegionOne
1803 username: nova-test
1804 password: password
1805 project_name: test
1806 user_domain_name: service
1807 project_domain_name: service
1808 hosts:
1809 default: keystone
1810 internal: keystone-api
1811 host_fqdn_override:
1812 default: null
1813 path:
1814 default: /v3
1815 scheme:
1816 default: http
1817 port:
1818 api:
1819 default: 80
1820 internal: 5000
1821 image:
1822 name: glance
1823 hosts:
1824 default: glance-api
1825 public: glance
1826 host_fqdn_override:
1827 default: null
1828 path:
1829 default: null
1830 scheme:
1831 default: http
1832 port:
1833 api:
1834 default: 9292
1835 public: 80
1836 compute:
1837 name: nova
1838 hosts:
1839 default: nova-api
1840 public: nova
1841 host_fqdn_override:
1842 default: null
1843 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1844 # endpoints using the following format:
1845 # public:
1846 # host: null
1847 # tls:
1848 # crt: null
1849 # key: null
1850 path:
1851 default: "/v2.1/%(tenant_id)s"
1852 scheme:
1853 default: 'http'
1854 port:
1855 api:
1856 default: 8774
1857 public: 80
1858 novncproxy:
1859 default: 6080
1860 compute_metadata:
1861 name: nova
1862 ip:
1863 # IF blank, set clusterIP and metadata_host dynamically
1864 ingress: null
1865 hosts:
1866 default: nova-metadata
1867 public: metadata
1868 host_fqdn_override:
1869 default: null
1870 path:
1871 default: /
1872 scheme:
1873 default: 'http'
1874 port:
1875 metadata:
1876 default: 8775
1877 public: 80
1878 compute_novnc_proxy:
1879 name: nova
1880 hosts:
1881 default: nova-novncproxy
1882 public: novncproxy
1883 host_fqdn_override:
1884 default: null
1885 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
1886 # endpoints using the following format:
1887 # public:
1888 # host: null
1889 # tls:
1890 # crt: null
1891 # key: null
1892 path:
1893 default: /vnc_auto.html
1894 scheme:
1895 default: 'http'
1896 port:
1897 novnc_proxy:
1898 default: 6080
1899 public: 80
1900 compute_spice_proxy:
1901 name: nova
1902 hosts:
1903 default: nova-spiceproxy
1904 public: placement
1905 host_fqdn_override:
1906 default: null
1907 path:
1908 default: /spice_auto.html
1909 scheme:
1910 default: 'http'
1911 port:
1912 spice_proxy:
1913 default: 6082
1914 placement:
1915 name: placement
1916 hosts:
1917 default: placement-api
1918 public: placement
1919 host_fqdn_override:
1920 default: null
1921 path:
1922 default: /
1923 scheme:
1924 default: 'http'
1925 port:
1926 api:
1927 default: 8778
1928 public: 80
1929 network:
1930 name: neutron
1931 hosts:
1932 default: neutron-server
1933 public: neutron
1934 host_fqdn_override:
1935 default: null
1936 path:
1937 default: null
1938 scheme:
1939 default: 'http'
1940 port:
1941 api:
1942 default: 9696
1943 public: 80
1944 baremetal:
1945 name: ironic
1946 hosts:
1947 default: ironic-api
1948 public: ironic
1949 host_fqdn_override:
1950 default: null
1951 path:
1952 default: null
1953 scheme:
1954 default: http
1955 port:
1956 api:
1957 default: 6385
1958 public: 80
1959 fluentd:
1960 namespace: null
1961 name: fluentd
1962 hosts:
1963 default: fluentd-logging
1964 host_fqdn_override:
1965 default: null
1966 path:
1967 default: null
1968 scheme: 'http'
1969 port:
1970 service:
1971 default: 24224
1972 metrics:
1973 default: 24220
1974 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
1975 # They are using to enable the Egress K8s network policy.
1976 kube_dns:
1977 namespace: kube-system
1978 name: kubernetes-dns
1979 hosts:
1980 default: kube-dns
1981 host_fqdn_override:
1982 default: null
1983 path:
1984 default: null
1985 scheme: http
1986 port:
1987 dns:
1988 default: 53
1989 protocol: UDP
1990 ingress:
1991 namespace: null
1992 name: ingress
1993 hosts:
1994 default: ingress
1995 port:
1996 ingress:
1997 default: 80
1998
1999pod:
2000 probes:
2001 rpc_timeout: 60
2002 rpc_retries: 2
2003 compute:
2004 default:
2005 liveness:
2006 enabled: True
2007 params:
2008 initialDelaySeconds: 120
2009 periodSeconds: 90
2010 timeoutSeconds: 70
2011 readiness:
2012 enabled: True
2013 params:
2014 initialDelaySeconds: 80
2015 periodSeconds: 90
2016 timeoutSeconds: 70
2017 api-metadata:
2018 default:
2019 liveness:
2020 enabled: True
2021 params:
2022 initialDelaySeconds: 30
2023 periodSeconds: 60
2024 timeoutSeconds: 15
2025 readiness:
2026 enabled: True
2027 params:
2028 initialDelaySeconds: 30
2029 periodSeconds: 60
2030 timeoutSeconds: 15
2031 api-osapi:
2032 default:
2033 liveness:
2034 enabled: True
2035 params:
2036 initialDelaySeconds: 30
2037 periodSeconds: 60
2038 timeoutSeconds: 15
2039 readiness:
2040 enabled: True
2041 params:
2042 initialDelaySeconds: 30
2043 periodSeconds: 60
2044 timeoutSeconds: 15
2045 conductor:
2046 default:
2047 liveness:
2048 enabled: True
2049 params:
2050 initialDelaySeconds: 120
2051 periodSeconds: 90
2052 timeoutSeconds: 70
2053 readiness:
2054 enabled: True
2055 params:
2056 initialDelaySeconds: 80
2057 periodSeconds: 90
2058 timeoutSeconds: 70
2059 consoleauth:
2060 default:
2061 liveness:
2062 enabled: True
2063 params:
2064 initialDelaySeconds: 120
2065 periodSeconds: 90
2066 timeoutSeconds: 70
2067 readiness:
2068 enabled: True
2069 params:
2070 initialDelaySeconds: 80
2071 periodSeconds: 90
2072 timeoutSeconds: 70
2073 novncproxy:
2074 default:
2075 liveness:
2076 enabled: True
2077 params:
2078 initialDelaySeconds: 30
2079 periodSeconds: 60
2080 timeoutSeconds: 15
2081 readiness:
2082 enabled: True
2083 params:
2084 initialDelaySeconds: 30
2085 periodSeconds: 60
2086 timeoutSeconds: 15
2087 placement:
2088 default:
2089 liveness:
2090 enabled: True
2091 params:
2092 initialDelaySeconds: 50
2093 periodSeconds: 30
2094 timeoutSeconds: 10
2095 readiness:
2096 enabled: True
2097 params:
2098 initialDelaySeconds: 15
2099 periodSeconds: 30
2100 timeoutSeconds: 10
2101 scheduler:
2102 default:
2103 liveness:
2104 enabled: True
2105 params:
2106 initialDelaySeconds: 120
2107 periodSeconds: 90
2108 timeoutSeconds: 70
2109 readiness:
2110 enabled: True
2111 params:
2112 initialDelaySeconds: 80
2113 periodSeconds: 90
2114 timeoutSeconds: 70
2115 compute-spice-proxy:
2116 default:
2117 liveness:
2118 enabled: True
2119 params:
2120 initialDelaySeconds: 30
2121 periodSeconds: 60
2122 timeoutSeconds: 15
2123 readiness:
2124 enabled: True
2125 params:
2126 initialDelaySeconds: 30
2127 periodSeconds: 60
2128 timeoutSeconds: 15
2129 security_context:
2130 nova:
2131 pod:
2132 runAsUser: 42424
2133 container:
2134 nova_compute_init:
2135 readOnlyRootFilesystem: true
2136 runAsUser: 0
2137 tungstenfabric_compute_init:
2138 readOnlyRootFilesystem: true
2139 allowPrivilegeEscalation: false
2140 ceph_perms:
2141 readOnlyRootFilesystem: true
2142 runAsUser: 0
2143 ceph_admin_keyring_placement:
2144 readOnlyRootFilesystem: true
2145 ceph_keyring_placement:
2146 readOnlyRootFilesystem: true
2147 allowPrivilegeEscalation: false
2148 nova_compute_vnc_init:
2149 readOnlyRootFilesystem: true
2150 allowPrivilegeEscalation: false
2151 nova_compute_spice_init:
2152 readOnlyRootFilesystem: true
2153 allowPrivilegeEscalation: false
2154 nova_compute:
2155 readOnlyRootFilesystem: true
2156 privileged: true
2157 nova_compute_ssh:
2158 privileged: true
2159 runAsUser: 0
2160 nova_compute_ssh_init:
2161 runAsUser: 0
2162 nova_api_metadata_init:
2163 readOnlyRootFilesystem: true
2164 allowPrivilegeEscalation: false
2165 nova_api:
2166 readOnlyRootFilesystem: true
2167 allowPrivilegeEscalation: false
2168 nova_osapi:
2169 readOnlyRootFilesystem: true
2170 allowPrivilegeEscalation: false
2171 nova_conductor:
2172 readOnlyRootFilesystem: true
2173 allowPrivilegeEscalation: false
2174 nova_consoleauth:
2175 readOnlyRootFilesystem: true
2176 allowPrivilegeEscalation: false
2177 nova_novncproxy_init:
2178 readOnlyRootFilesystem: true
2179 allowPrivilegeEscalation: false
2180 nova_novncproxy_init_assests:
2181 readOnlyRootFilesystem: true
2182 allowPrivilegeEscalation: false
2183 nova_novncproxy:
2184 readOnlyRootFilesystem: true
2185 allowPrivilegeEscalation: false
2186 nova_placement_api:
2187 readOnlyRootFilesystem: false
2188 allowPrivilegeEscalation: false
2189 nova_scheduler:
2190 readOnlyRootFilesystem: true
2191 allowPrivilegeEscalation: false
2192 nova_spiceproxy_init:
2193 readOnlyRootFilesystem: true
2194 allowPrivilegeEscalation: false
2195 nova_spiceproxy_init_assets:
2196 readOnlyRootFilesystem: true
2197 allowPrivilegeEscalation: false
2198 nova_spiceproxy:
2199 readOnlyRootFilesystem: true
2200 allowPrivilegeEscalation: false
2201 bootstrap:
2202 pod:
2203 runAsUser: 42424
2204 container:
2205 nova_wait_for_computes_init:
2206 readOnlyRootFilesystem: true
2207 allowPrivilegeEscalation: false
2208 bootstrap:
2209 readOnlyRootFilesystem: true
2210 allowPrivilegeEscalation: false
2211 nova_cell_setup:
2212 pod:
2213 runAsUser: 42424
2214 container:
2215 nova_wait_for_computes_init:
2216 readOnlyRootFilesystem: true
2217 allowPrivilegeEscalation: false
2218 nova_cell_setup_init:
2219 readOnlyRootFilesystem: true
2220 allowPrivilegeEscalation: false
2221 nova_cell_setup:
2222 readOnlyRootFilesystem: true
2223 allowPrivilegeEscalation: false
2224 archive_deleted_rows:
2225 pod:
2226 runAsUser: 42424
2227 container:
2228 nova_archive_deleted_rows_init:
2229 readOnlyRootFilesystem: true
2230 allowPrivilegeEscalation: false
2231 nova_archive_deleted_rows:
2232 readOnlyRootFilesystem: true
2233 allowPrivilegeEscalation: false
2234 cell_setup:
2235 pod:
2236 runAsUser: 42424
2237 container:
2238 nova_cell_setup:
2239 readOnlyRootFilesystem: true
2240 allowPrivilegeEscalation: false
2241 service_cleaner:
2242 pod:
2243 runAsUser: 42424
2244 container:
2245 nova_service_cleaner:
2246 readOnlyRootFilesystem: true
2247 allowPrivilegeEscalation: false
2248 use_fqdn:
2249 # NOTE: If the option "host" is not specified in nova.conf, the host name
2250 # shown in the hypervisor host is defaulted to the short name of the host.
2251 # Setting the option here to true will cause use $(hostname --fqdn) as the
2252 # host name by default. If the short name is desired $(hostname --short),
2253 # set the option to false. Specifying a host in the nova.conf via the conf:
2254 # section will supersede the value of this option.
2255 compute: true
2256 affinity:
2257 anti:
2258 type:
2259 default: preferredDuringSchedulingIgnoredDuringExecution
2260 topologyKey:
2261 default: kubernetes.io/hostname
2262 weight:
2263 default: 10
2264 mounts:
2265 nova_compute:
2266 init_container: null
2267 nova_compute:
2268 volumeMounts:
2269 volumes:
2270 nova_compute_ironic:
2271 init_container: null
2272 nova_compute_ironic:
2273 volumeMounts:
2274 volumes:
2275 nova_api_metadata:
2276 init_container: null
2277 nova_api_metadata:
2278 volumeMounts:
2279 volumes:
2280 nova_placement:
2281 init_container: null
2282 nova_placement:
2283 volumeMounts:
2284 volumes:
2285 nova_api_osapi:
2286 init_container: null
2287 nova_api_osapi:
2288 volumeMounts:
2289 volumes:
2290 nova_consoleauth:
2291 init_container: null
2292 nova_consoleauth:
2293 volumeMounts:
2294 volumes:
2295 nova_conductor:
2296 init_container: null
2297 nova_conductor:
2298 volumeMounts:
2299 volumes:
2300 nova_scheduler:
2301 init_container: null
2302 nova_scheduler:
2303 volumeMounts:
2304 volumes:
2305 nova_bootstrap:
2306 init_container: null
2307 nova_bootstrap:
2308 volumeMounts:
2309 volumes:
2310 nova_tests:
2311 init_container: null
2312 nova_tests:
2313 volumeMounts:
2314 volumes:
2315 nova_novncproxy:
2316 init_novncproxy: null
2317 nova_novncproxy:
2318 volumeMounts:
2319 volumes:
2320 nova_spiceproxy:
2321 init_spiceproxy: null
2322 nova_spiceproxy:
2323 volumeMounts:
2324 volumes:
2325 nova_db_sync:
2326 nova_db_sync:
2327 volumeMounts:
2328 volumes:
2329 useHostNetwork:
2330 novncproxy: true
2331 replicas:
2332 api_metadata: 1
2333 compute_ironic: 1
2334 placement: 1
2335 osapi: 1
2336 conductor: 1
2337 consoleauth: 1
2338 scheduler: 1
2339 novncproxy: 1
2340 spiceproxy: 1
2341 lifecycle:
2342 upgrades:
2343 deployments:
2344 revision_history: 3
2345 pod_replacement_strategy: RollingUpdate
2346 rolling_update:
2347 max_unavailable: 1
2348 max_surge: 3
2349 daemonsets:
2350 pod_replacement_strategy: RollingUpdate
2351 compute:
2352 enabled: true
2353 min_ready_seconds: 0
2354 max_unavailable: 1
2355 disruption_budget:
2356 metadata:
2357 min_available: 0
2358 placement:
2359 min_available: 0
2360 osapi:
2361 min_available: 0
2362 termination_grace_period:
2363 metadata:
2364 timeout: 30
2365 placement:
2366 timeout: 30
2367 osapi:
2368 timeout: 30
2369 resources:
2370 enabled: false
2371 compute:
2372 requests:
2373 memory: "128Mi"
2374 cpu: "100m"
2375 limits:
2376 memory: "1024Mi"
2377 cpu: "2000m"
2378 compute_ironic:
2379 requests:
2380 memory: "128Mi"
2381 cpu: "100m"
2382 limits:
2383 memory: "1024Mi"
2384 cpu: "2000m"
2385 api_metadata:
2386 requests:
2387 memory: "128Mi"
2388 cpu: "100m"
2389 limits:
2390 memory: "1024Mi"
2391 cpu: "2000m"
2392 placement:
2393 requests:
2394 memory: "128Mi"
2395 cpu: "100m"
2396 limits:
2397 memory: "1024Mi"
2398 cpu: "2000m"
2399 api:
2400 requests:
2401 memory: "128Mi"
2402 cpu: "100m"
2403 limits:
2404 memory: "1024Mi"
2405 cpu: "2000m"
2406 conductor:
2407 requests:
2408 memory: "128Mi"
2409 cpu: "100m"
2410 limits:
2411 memory: "1024Mi"
2412 cpu: "2000m"
2413 consoleauth:
2414 requests:
2415 memory: "128Mi"
2416 cpu: "100m"
2417 limits:
2418 memory: "1024Mi"
2419 cpu: "2000m"
2420 scheduler:
2421 requests:
2422 memory: "128Mi"
2423 cpu: "100m"
2424 limits:
2425 memory: "1024Mi"
2426 cpu: "2000m"
2427 ssh:
2428 requests:
2429 memory: "128Mi"
2430 cpu: "100m"
2431 limits:
2432 memory: "1024Mi"
2433 cpu: "2000m"
2434 novncproxy:
2435 requests:
2436 memory: "128Mi"
2437 cpu: "100m"
2438 limits:
2439 memory: "1024Mi"
2440 cpu: "2000m"
2441 spiceproxy:
2442 requests:
2443 memory: "128Mi"
2444 cpu: "100m"
2445 limits:
2446 memory: "1024Mi"
2447 cpu: "2000m"
2448 jobs:
2449 bootstrap:
2450 requests:
2451 memory: "128Mi"
2452 cpu: "100m"
2453 limits:
2454 memory: "1024Mi"
2455 cpu: "2000m"
2456 db_init:
2457 requests:
2458 memory: "128Mi"
2459 cpu: "100m"
2460 limits:
2461 memory: "1024Mi"
2462 cpu: "2000m"
2463 rabbit_init:
2464 requests:
2465 memory: "128Mi"
2466 cpu: "100m"
2467 limits:
2468 memory: "1024Mi"
2469 cpu: "2000m"
2470 db_sync:
2471 requests:
2472 memory: "128Mi"
2473 cpu: "100m"
2474 limits:
2475 memory: "1024Mi"
2476 cpu: "2000m"
2477 archive_deleted_rows:
2478 requests:
2479 memory: "128Mi"
2480 cpu: "100m"
2481 limits:
2482 memory: "1024Mi"
2483 cpu: "2000m"
2484 db_drop:
2485 requests:
2486 memory: "128Mi"
2487 cpu: "100m"
2488 limits:
2489 memory: "1024Mi"
2490 cpu: "2000m"
2491 ks_endpoints:
2492 requests:
2493 memory: "128Mi"
2494 cpu: "100m"
2495 limits:
2496 memory: "1024Mi"
2497 cpu: "2000m"
2498 ks_service:
2499 requests:
2500 memory: "128Mi"
2501 cpu: "100m"
2502 limits:
2503 memory: "1024Mi"
2504 cpu: "2000m"
2505 ks_user:
2506 requests:
2507 memory: "128Mi"
2508 cpu: "100m"
2509 limits:
2510 memory: "1024Mi"
2511 cpu: "2000m"
2512 tests:
2513 requests:
2514 memory: "128Mi"
2515 cpu: "100m"
2516 limits:
2517 memory: "1024Mi"
2518 cpu: "2000m"
2519 cell_setup:
2520 requests:
2521 memory: "128Mi"
2522 cpu: "100m"
2523 limits:
2524 memory: "1024Mi"
2525 cpu: "2000m"
2526 service_cleaner:
2527 requests:
2528 memory: "128Mi"
2529 cpu: "100m"
2530 limits:
2531 memory: "1024Mi"
2532 cpu: "2000m"
2533 image_repo_sync:
2534 requests:
2535 memory: "128Mi"
2536 cpu: "100m"
2537 limits:
2538 memory: "1024Mi"
2539 cpu: "2000m"
2540
2541network_policy:
2542 nova:
2543 # TODO(lamt): Need to tighten this ingress for security.
2544 ingress:
2545 - {}
2546 egress:
2547 - {}
2548 placement:
2549 # TODO(lamt): Need to tighten this ingress for security.
2550 ingress:
2551 - {}
2552 egress:
2553 - {}
2554
2555# NOTE(helm_hook): helm_hook might break for helm2 binary.
2556# set helm3_hook: false when using the helm2 binary.
2557helm3_hook: true
2558
2559health_probe:
2560 logging:
2561 level: ERROR
2562
2563manifests:
2564 certificates: false
2565 configmap_bin: true
2566 configmap_etc: true
2567 cron_job_cell_setup: true
2568 cron_job_service_cleaner: true
2569 cron_job_archive_deleted_rows: false
2570 daemonset_compute: true
2571 deployment_api_metadata: true
2572 deployment_api_osapi: true
2573 deployment_placement: true
2574 deployment_conductor: true
2575 deployment_consoleauth: true
2576 deployment_novncproxy: true
2577 deployment_spiceproxy: true
2578 deployment_scheduler: true
2579 ingress_metadata: true
2580 ingress_novncproxy: true
2581 ingress_placement: true
2582 ingress_osapi: true
2583 job_bootstrap: true
2584 job_db_init: true
2585 job_db_init_placement: true
2586 job_db_sync: true
2587 job_db_drop: false
2588 job_image_repo_sync: true
2589 job_rabbit_init: true
2590 job_ks_endpoints: true
2591 job_ks_service: true
2592 job_ks_user: true
2593 job_ks_placement_endpoints: true
2594 job_ks_placement_service: true
2595 job_ks_placement_user: true
2596 job_cell_setup: true
2597 pdb_metadata: true
2598 pdb_placement: true
2599 pdb_osapi: true
2600 pod_rally_test: true
2601 network_policy: false
2602 secret_db_api: true
2603 secret_db_cell0: true
2604 secret_db: true
2605 secret_ingress_tls: true
2606 secret_keystone: true
2607 secret_keystone_placement: true
2608 secret_rabbitmq: true
2609 service_ingress_metadata: true
2610 service_ingress_novncproxy: true
2611 service_ingress_placement: true
2612 service_ingress_osapi: true
2613 service_metadata: true
2614 service_placement: true
2615 service_novncproxy: true
2616 service_spiceproxy: true
2617 service_osapi: true
2618 statefulset_compute_ironic: false
2619...