blob: 57a93a871a4bf6ba58a0f0724030f4202fc1655c [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
33 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
34 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
37 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
38 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
41 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
42 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
43 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
44 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
46 image_repo_sync: docker.io/docker:17.07.0
47 pull_policy: "IfNotPresent"
48 local_registry:
49 active: false
50 exclude:
51 - dep_check
52 - image_repo_sync
53
54labels:
55 agent:
56 dhcp:
57 node_selector_key: openstack-control-plane
58 node_selector_value: enabled
59 l3:
60 node_selector_key: openstack-control-plane
61 node_selector_value: enabled
62 metadata:
63 node_selector_key: openstack-control-plane
64 node_selector_value: enabled
65 l2gw:
66 node_selector_key: openstack-control-plane
67 node_selector_value: enabled
68 job:
69 node_selector_key: openstack-control-plane
70 node_selector_value: enabled
71 lb:
72 node_selector_key: linuxbridge
73 node_selector_value: enabled
74 # openvswitch is a special case, requiring a special
75 # label that can apply to both control hosts
76 # and compute hosts, until we get more sophisticated
77 # with our daemonset scheduling
78 ovs:
79 node_selector_key: openvswitch
80 node_selector_value: enabled
81 sriov:
82 node_selector_key: sriov
83 node_selector_value: enabled
84 bagpipe_bgp:
85 node_selector_key: openstack-compute-node
86 node_selector_value: enabled
87 server:
88 node_selector_key: openstack-control-plane
89 node_selector_value: enabled
90 ironic_agent:
91 node_selector_key: openstack-control-plane
92 node_selector_value: enabled
93 netns_cleanup_cron:
94 node_selector_key: openstack-control-plane
95 node_selector_value: enabled
96 test:
97 node_selector_key: openstack-control-plane
98 node_selector_value: enabled
99
100network:
101 # provide what type of network wiring will be used
102 backend:
103 - openvswitch
104 # NOTE(Portdirect): Share network namespaces with the host,
105 # allowing agents to be restarted without packet loss and simpler
106 # debugging. This feature requires mount propagation support.
107 share_namespaces: true
108 interface:
109 # Tunnel interface will be used for VXLAN tunneling.
110 tunnel: null
111 # If tunnel is null there is a fallback mechanism to search
112 # for interface with routing using tunnel network cidr.
113 tunnel_network_cidr: "0/0"
114 # To perform setup of network interfaces using the SR-IOV init
115 # container you can use a section similar to:
116 # sriov:
117 # - device: ${DEV}
118 # num_vfs: 8
119 # mtu: 9214
120 # promisc: false
121 # qos:
122 # - vf_num: 0
123 # share: 10
124 # queues_per_vf:
125 # - num_queues: 16
126 # exclude_vf: 0,11,21
127 server:
128 ingress:
129 public: true
130 classes:
131 namespace: "nginx"
132 cluster: "nginx-cluster"
133 annotations:
134 nginx.ingress.kubernetes.io/rewrite-target: /
135 external_policy_local: false
136 node_port:
137 enabled: false
138 port: 30096
139
140bootstrap:
141 enabled: false
142 ks_user: neutron
143 script: |
144 openstack token issue
145
146dependencies:
147 dynamic:
148 common:
149 local_image_registry:
150 jobs:
151 - neutron-image-repo-sync
152 services:
153 - endpoint: node
154 service: local_image_registry
155 targeted:
156 sriov: {}
157 l2gateway: {}
158 bagpipe_bgp: {}
159 openvswitch:
160 dhcp:
161 pod:
162 - requireSameNode: true
163 labels:
164 application: neutron
165 component: neutron-ovs-agent
166 l3:
167 pod:
168 - requireSameNode: true
169 labels:
170 application: neutron
171 component: neutron-ovs-agent
172 metadata:
173 pod:
174 - requireSameNode: true
175 labels:
176 application: neutron
177 component: neutron-ovs-agent
178 linuxbridge:
179 dhcp:
180 pod:
181 - requireSameNode: true
182 labels:
183 application: neutron
184 component: neutron-lb-agent
185 l3:
186 pod:
187 - requireSameNode: true
188 labels:
189 application: neutron
190 component: neutron-lb-agent
191 metadata:
192 pod:
193 - requireSameNode: true
194 labels:
195 application: neutron
196 component: neutron-lb-agent
197 lb_agent:
198 pod: null
199 static:
200 bootstrap:
201 services:
202 - endpoint: internal
203 service: network
204 - endpoint: internal
205 service: compute
206 db_drop:
207 services:
208 - endpoint: internal
209 service: oslo_db
210 db_init:
211 services:
212 - endpoint: internal
213 service: oslo_db
214 db_sync:
215 jobs:
216 - neutron-db-init
217 services:
218 - endpoint: internal
219 service: oslo_db
220 dhcp:
221 pod: null
222 jobs:
223 - neutron-rabbit-init
224 services:
225 - endpoint: internal
226 service: oslo_messaging
227 - endpoint: internal
228 service: network
229 - endpoint: internal
230 service: compute
231 ks_endpoints:
232 jobs:
233 - neutron-ks-service
234 services:
235 - endpoint: internal
236 service: identity
237 ks_service:
238 services:
239 - endpoint: internal
240 service: identity
241 ks_user:
242 services:
243 - endpoint: internal
244 service: identity
245 rabbit_init:
246 services:
247 - service: oslo_messaging
248 endpoint: internal
249 l3:
250 pod: null
251 jobs:
252 - neutron-rabbit-init
253 services:
254 - endpoint: internal
255 service: oslo_messaging
256 - endpoint: internal
257 service: network
258 - endpoint: internal
259 service: compute
260 lb_agent:
261 pod: null
262 jobs:
263 - neutron-rabbit-init
264 services:
265 - endpoint: internal
266 service: oslo_messaging
267 - endpoint: internal
268 service: network
269 metadata:
270 pod: null
271 jobs:
272 - neutron-rabbit-init
273 services:
274 - endpoint: internal
275 service: oslo_messaging
276 - endpoint: internal
277 service: network
278 - endpoint: internal
279 service: compute
280 - endpoint: public
281 service: compute_metadata
282 ovs_agent:
283 jobs:
284 - neutron-rabbit-init
285 pod:
286 - requireSameNode: true
287 labels:
288 application: openvswitch
289 component: server
290 services:
291 - endpoint: internal
292 service: oslo_messaging
293 - endpoint: internal
294 service: network
295 server:
296 jobs:
297 - neutron-db-sync
298 - neutron-ks-user
299 - neutron-ks-endpoints
300 - neutron-rabbit-init
301 services:
302 - endpoint: internal
303 service: oslo_db
304 - endpoint: internal
305 service: oslo_messaging
306 - endpoint: internal
307 service: oslo_cache
308 - endpoint: internal
309 service: identity
310 ironic_agent:
311 jobs:
312 - neutron-db-sync
313 - neutron-ks-user
314 - neutron-ks-endpoints
315 - neutron-rabbit-init
316 services:
317 - endpoint: internal
318 service: oslo_db
319 - endpoint: internal
320 service: oslo_messaging
321 - endpoint: internal
322 service: oslo_cache
323 - endpoint: internal
324 service: identity
325 tests:
326 services:
327 - endpoint: internal
328 service: network
329 - endpoint: internal
330 service: compute
331 image_repo_sync:
332 services:
333 - endpoint: internal
334 service: local_image_registry
335
336pod:
337 use_fqdn:
338 neutron_agent: true
339 probes:
340 rpc_timeout: 60
341 rpc_retries: 2
342 dhcp_agent:
343 dhcp_agent:
344 readiness:
345 enabled: true
346 params:
347 initialDelaySeconds: 30
348 periodSeconds: 190
349 timeoutSeconds: 185
350 liveness:
351 enabled: true
352 params:
353 initialDelaySeconds: 120
354 periodSeconds: 600
355 timeoutSeconds: 580
356 l3_agent:
357 l3_agent:
358 readiness:
359 enabled: true
360 params:
361 initialDelaySeconds: 30
362 periodSeconds: 190
363 timeoutSeconds: 185
364 liveness:
365 enabled: true
366 params:
367 initialDelaySeconds: 120
368 periodSeconds: 600
369 timeoutSeconds: 580
370 lb_agent:
371 lb_agent:
372 readiness:
373 enabled: true
374 metadata_agent:
375 metadata_agent:
376 readiness:
377 enabled: true
378 params:
379 initialDelaySeconds: 30
380 periodSeconds: 190
381 timeoutSeconds: 185
382 liveness:
383 enabled: true
384 params:
385 initialDelaySeconds: 120
386 periodSeconds: 600
387 timeoutSeconds: 580
388 ovs_agent:
389 ovs_agent:
390 readiness:
391 enabled: true
392 params:
393 liveness:
394 enabled: true
395 params:
396 initialDelaySeconds: 120
397 periodSeconds: 600
398 timeoutSeconds: 580
399 sriov_agent:
400 sriov_agent:
401 readiness:
402 enabled: true
403 params:
404 initialDelaySeconds: 30
405 periodSeconds: 190
406 timeoutSeconds: 185
407 bagpipe_bgp:
408 bagpipe_bgp:
409 readiness:
410 enabled: true
411 params:
412 liveness:
413 enabled: true
414 params:
415 initialDelaySeconds: 60
416 l2gw_agent:
417 l2gw_agent:
418 readiness:
419 enabled: true
420 params:
421 initialDelaySeconds: 30
422 periodSeconds: 15
423 timeoutSeconds: 65
424 liveness:
425 enabled: true
426 params:
427 initialDelaySeconds: 120
428 periodSeconds: 90
429 timeoutSeconds: 70
430 server:
431 server:
432 readiness:
433 enabled: true
434 params:
435 liveness:
436 enabled: true
437 params:
438 initialDelaySeconds: 60
439 security_context:
440 neutron_dhcp_agent:
441 pod:
442 runAsUser: 42424
443 container:
444 neutron_dhcp_agent:
445 readOnlyRootFilesystem: true
446 privileged: true
447 neutron_l2gw_agent:
448 pod:
449 runAsUser: 42424
450 container:
451 neutron_l2gw_agent:
452 readOnlyRootFilesystem: true
453 privileged: true
454 neutron_bagpipe_bgp:
455 pod:
456 runAsUser: 42424
457 container:
458 neutron_bagpipe_bgp:
459 readOnlyRootFilesystem: true
460 privileged: true
461 neutron_l3_agent:
462 pod:
463 runAsUser: 42424
464 container:
465 neutron_l3_agent:
466 readOnlyRootFilesystem: true
467 privileged: true
468 neutron_lb_agent:
469 pod:
470 runAsUser: 42424
471 container:
472 neutron_lb_agent_kernel_modules:
473 capabilities:
474 add:
475 - SYS_MODULE
476 - SYS_CHROOT
477 runAsUser: 0
478 readOnlyRootFilesystem: true
479 neutron_lb_agent_init:
480 privileged: true
481 runAsUser: 0
482 readOnlyRootFilesystem: true
483 neutron_lb_agent:
484 readOnlyRootFilesystem: true
485 privileged: true
486 neutron_metadata_agent:
487 pod:
488 runAsUser: 42424
489 container:
490 neutron_metadata_agent_init:
491 runAsUser: 0
492 readOnlyRootFilesystem: true
493 neutron_ovs_agent:
494 pod:
495 runAsUser: 42424
496 container:
497 neutron_openvswitch_agent_kernel_modules:
498 capabilities:
499 add:
500 - SYS_MODULE
501 - SYS_CHROOT
502 runAsUser: 0
503 readOnlyRootFilesystem: true
504 neutron_ovs_agent_init:
505 privileged: true
506 runAsUser: 0
507 readOnlyRootFilesystem: true
508 neutron_ovs_agent:
509 readOnlyRootFilesystem: true
510 privileged: true
511 neutron_server:
512 pod:
513 runAsUser: 42424
514 container:
515 nginx:
516 runAsUser: 0
517 readOnlyRootFilesystem: false
518 neutron_server:
519 allowPrivilegeEscalation: false
520 readOnlyRootFilesystem: true
521 neutron_sriov_agent:
522 pod:
523 runAsUser: 42424
524 container:
525 neutron_sriov_agent_init:
526 privileged: true
527 runAsUser: 0
528 readOnlyRootFilesystem: false
529 neutron_sriov_agent:
530 readOnlyRootFilesystem: true
531 privileged: true
532 neutron_ironic_agent:
533 pod:
534 runAsUser: 42424
535 container:
536 neutron_ironic_agent:
537 allowPrivilegeEscalation: false
538 readOnlyRootFilesystem: true
539 neutron_netns_cleanup_cron:
540 pod:
541 runAsUser: 42424
542 container:
543 neutron_netns_cleanup_cron:
544 readOnlyRootFilesystem: true
545 privileged: true
546 affinity:
547 anti:
548 type:
549 default: preferredDuringSchedulingIgnoredDuringExecution
550 topologyKey:
551 default: kubernetes.io/hostname
552 weight:
553 default: 10
554 tolerations:
555 neutron:
556 enabled: false
557 tolerations:
558 - key: node-role.kubernetes.io/master
559 operator: Exists
560 effect: NoSchedule
561 mounts:
562 neutron_server:
563 init_container: null
564 neutron_server:
565 volumeMounts:
566 volumes:
567 neutron_dhcp_agent:
568 init_container: null
569 neutron_dhcp_agent:
570 volumeMounts:
571 volumes:
572 neutron_l3_agent:
573 init_container: null
574 neutron_l3_agent:
575 volumeMounts:
576 volumes:
577 neutron_lb_agent:
578 init_container: null
579 neutron_lb_agent:
580 volumeMounts:
581 volumes:
582 neutron_metadata_agent:
583 init_container: null
584 neutron_metadata_agent:
585 volumeMounts:
586 volumes:
587 neutron_ovs_agent:
588 init_container: null
589 neutron_ovs_agent:
590 volumeMounts:
591 volumes:
592 neutron_sriov_agent:
593 init_container: null
594 neutron_sriov_agent:
595 volumeMounts:
596 volumes:
597 neutron_l2gw_agent:
598 init_container: null
599 neutron_l2gw_agent:
600 volumeMounts:
601 volumes:
602 bagpipe_bgp:
603 init_container: null
604 bagpipe_bgp:
605 volumeMounts:
606 volumes:
607 neutron_ironic_agent:
608 init_container: null
609 neutron_ironic_agent:
610 volumeMounts:
611 volumes:
612 neutron_netns_cleanup_cron:
613 init_container: null
614 neutron_netns_cleanup_cron:
615 volumeMounts:
616 volumes:
617 neutron_tests:
618 init_container: null
619 neutron_tests:
620 volumeMounts:
621 volumes:
622 neutron_bootstrap:
623 init_container: null
624 neutron_bootstrap:
625 volumeMounts:
626 volumes:
627 neutron_db_sync:
628 neutron_db_sync:
629 volumeMounts:
630 - name: db-sync-conf
631 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
632 subPath: ml2_conf.ini
633 readOnly: true
634 volumes:
635 replicas:
636 server: 1
637 ironic_agent: 1
638 lifecycle:
639 upgrades:
640 deployments:
641 revision_history: 3
642 pod_replacement_strategy: RollingUpdate
643 rolling_update:
644 max_unavailable: 1
645 max_surge: 3
646 daemonsets:
647 pod_replacement_strategy: RollingUpdate
648 dhcp_agent:
649 enabled: true
650 min_ready_seconds: 0
651 max_unavailable: 1
652 l3_agent:
653 enabled: true
654 min_ready_seconds: 0
655 max_unavailable: 1
656 lb_agent:
657 enabled: true
658 min_ready_seconds: 0
659 max_unavailable: 1
660 metadata_agent:
661 enabled: true
662 min_ready_seconds: 0
663 max_unavailable: 1
664 ovs_agent:
665 enabled: true
666 min_ready_seconds: 0
667 max_unavailable: 1
668 sriov_agent:
669 enabled: true
670 min_ready_seconds: 0
671 max_unavailable: 1
672 netns_cleanup_cron:
673 enabled: true
674 min_ready_seconds: 0
675 max_unavailable: 1
676 disruption_budget:
677 server:
678 min_available: 0
679 termination_grace_period:
680 server:
681 timeout: 30
682 ironic_agent:
683 timeout: 30
684 resources:
685 enabled: false
686 agent:
687 dhcp:
688 requests:
689 memory: "128Mi"
690 cpu: "100m"
691 limits:
692 memory: "1024Mi"
693 cpu: "2000m"
694 l3:
695 requests:
696 memory: "128Mi"
697 cpu: "100m"
698 limits:
699 memory: "1024Mi"
700 cpu: "2000m"
701 lb:
702 requests:
703 memory: "128Mi"
704 cpu: "100m"
705 limits:
706 memory: "1024Mi"
707 cpu: "2000m"
708 metadata:
709 requests:
710 memory: "128Mi"
711 cpu: "100m"
712 limits:
713 memory: "1024Mi"
714 cpu: "2000m"
715 ovs:
716 requests:
717 memory: "128Mi"
718 cpu: "100m"
719 limits:
720 memory: "1024Mi"
721 cpu: "2000m"
722 sriov:
723 requests:
724 memory: "128Mi"
725 cpu: "100m"
726 limits:
727 memory: "1024Mi"
728 cpu: "2000m"
729 l2gw:
730 requests:
731 memory: "128Mi"
732 cpu: "100m"
733 limits:
734 memory: "1024Mi"
735 cpu: "2000m"
736 bagpipe_bgp:
737 requests:
738 memory: "128Mi"
739 cpu: "100m"
740 limits:
741 memory: "1024Mi"
742 cpu: "2000m"
743 server:
744 requests:
745 memory: "128Mi"
746 cpu: "100m"
747 limits:
748 memory: "1024Mi"
749 cpu: "2000m"
750 ironic_agent:
751 requests:
752 memory: "128Mi"
753 cpu: "100m"
754 limits:
755 memory: "1024Mi"
756 cpu: "2000m"
757 netns_cleanup_cron:
758 requests:
759 memory: "128Mi"
760 cpu: "100m"
761 limits:
762 memory: "1024Mi"
763 cpu: "2000m"
764 jobs:
765 bootstrap:
766 requests:
767 memory: "128Mi"
768 cpu: "100m"
769 limits:
770 memory: "1024Mi"
771 cpu: "2000m"
772 db_init:
773 requests:
774 memory: "128Mi"
775 cpu: "100m"
776 limits:
777 memory: "1024Mi"
778 cpu: "2000m"
779 rabbit_init:
780 requests:
781 memory: "128Mi"
782 cpu: "100m"
783 limits:
784 memory: "1024Mi"
785 cpu: "2000m"
786 db_sync:
787 requests:
788 memory: "128Mi"
789 cpu: "100m"
790 limits:
791 memory: "1024Mi"
792 cpu: "2000m"
793 db_drop:
794 requests:
795 memory: "128Mi"
796 cpu: "100m"
797 limits:
798 memory: "1024Mi"
799 cpu: "2000m"
800 ks_endpoints:
801 requests:
802 memory: "128Mi"
803 cpu: "100m"
804 limits:
805 memory: "1024Mi"
806 cpu: "2000m"
807 ks_service:
808 requests:
809 memory: "128Mi"
810 cpu: "100m"
811 limits:
812 memory: "1024Mi"
813 cpu: "2000m"
814 ks_user:
815 requests:
816 memory: "128Mi"
817 cpu: "100m"
818 limits:
819 memory: "1024Mi"
820 cpu: "2000m"
821 tests:
822 requests:
823 memory: "128Mi"
824 cpu: "100m"
825 limits:
826 memory: "1024Mi"
827 cpu: "2000m"
828 image_repo_sync:
829 requests:
830 memory: "128Mi"
831 cpu: "100m"
832 limits:
833 memory: "1024Mi"
834 cpu: "2000m"
835
836conf:
837 rally_tests:
838 force_project_purge: false
839 run_tempest: false
840 clean_up: |
841 # NOTE: We will make the best effort to clean up rally generated networks and routers,
842 # but should not block further automated deployment.
843 set +e
844 PATTERN="^[sc]_rally_"
845
846 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
847 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
848
849 for ROUTER in $ROUTERS
850 do
851 openstack router unset --external-gateway $ROUTER
852 openstack router set --disable --no-ha $ROUTER
853
854 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
855 for SUBN in $SUBNS
856 do
857 openstack router remove subnet $ROUTER $SUBN
858 done
859
860 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
861 do
862 openstack router remove port $ROUTER $PORT
863 done
864
865 openstack router delete $ROUTER
866 done
867
868 for NETWORK in $NETWORKS
869 do
870 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
871 do
872 openstack port delete $PORT
873 done
874 openstack network delete $NETWORK
875 done
876 set -e
877 tests:
878 NeutronNetworks.create_and_delete_networks:
879 - args:
880 network_create_args: {}
881 context:
882 quotas:
883 neutron:
884 network: -1
885 runner:
886 concurrency: 1
887 times: 1
888 type: constant
889 sla:
890 failure_rate:
891 max: 0
892 NeutronNetworks.create_and_delete_ports:
893 - args:
894 network_create_args: {}
895 port_create_args: {}
896 ports_per_network: 10
897 context:
898 network: {}
899 quotas:
900 neutron:
901 network: -1
902 port: -1
903 runner:
904 concurrency: 1
905 times: 1
906 type: constant
907 sla:
908 failure_rate:
909 max: 0
910 NeutronNetworks.create_and_delete_routers:
911 - args:
912 network_create_args: {}
913 router_create_args: {}
914 subnet_cidr_start: 1.1.0.0/30
915 subnet_create_args: {}
916 subnets_per_network: 2
917 context:
918 network: {}
919 quotas:
920 neutron:
921 network: -1
922 router: -1
923 subnet: -1
924 runner:
925 concurrency: 1
926 times: 1
927 type: constant
928 sla:
929 failure_rate:
930 max: 0
931 NeutronNetworks.create_and_delete_subnets:
932 - args:
933 network_create_args: {}
934 subnet_cidr_start: 1.1.0.0/30
935 subnet_create_args: {}
936 subnets_per_network: 2
937 context:
938 network: {}
939 quotas:
940 neutron:
941 network: -1
942 subnet: -1
943 runner:
944 concurrency: 1
945 times: 1
946 type: constant
947 sla:
948 failure_rate:
949 max: 0
950 NeutronNetworks.create_and_list_routers:
951 - args:
952 network_create_args: {}
953 router_create_args: {}
954 subnet_cidr_start: 1.1.0.0/30
955 subnet_create_args: {}
956 subnets_per_network: 2
957 context:
958 network: {}
959 quotas:
960 neutron:
961 network: -1
962 router: -1
963 subnet: -1
964 runner:
965 concurrency: 1
966 times: 1
967 type: constant
968 sla:
969 failure_rate:
970 max: 0
971 NeutronNetworks.create_and_list_subnets:
972 - args:
973 network_create_args: {}
974 subnet_cidr_start: 1.1.0.0/30
975 subnet_create_args: {}
976 subnets_per_network: 2
977 context:
978 network: {}
979 quotas:
980 neutron:
981 network: -1
982 subnet: -1
983 runner:
984 concurrency: 1
985 times: 1
986 type: constant
987 sla:
988 failure_rate:
989 max: 0
990 NeutronNetworks.create_and_show_network:
991 - args:
992 network_create_args: {}
993 context:
994 quotas:
995 neutron:
996 network: -1
997 runner:
998 concurrency: 1
999 times: 1
1000 type: constant
1001 sla:
1002 failure_rate:
1003 max: 0
1004 NeutronNetworks.create_and_update_networks:
1005 - args:
1006 network_create_args: {}
1007 network_update_args:
1008 admin_state_up: false
1009 context:
1010 quotas:
1011 neutron:
1012 network: -1
1013 runner:
1014 concurrency: 1
1015 times: 1
1016 type: constant
1017 sla:
1018 failure_rate:
1019 max: 0
1020 NeutronNetworks.create_and_update_ports:
1021 - args:
1022 network_create_args: {}
1023 port_create_args: {}
1024 port_update_args:
1025 admin_state_up: false
1026 device_id: dummy_id
1027 device_owner: dummy_owner
1028 ports_per_network: 5
1029 context:
1030 network: {}
1031 quotas:
1032 neutron:
1033 network: -1
1034 port: -1
1035 runner:
1036 concurrency: 1
1037 times: 1
1038 type: constant
1039 sla:
1040 failure_rate:
1041 max: 0
1042 NeutronNetworks.create_and_update_routers:
1043 - args:
1044 network_create_args: {}
1045 router_create_args: {}
1046 router_update_args:
1047 admin_state_up: false
1048 subnet_cidr_start: 1.1.0.0/30
1049 subnet_create_args: {}
1050 subnets_per_network: 2
1051 context:
1052 network: {}
1053 quotas:
1054 neutron:
1055 network: -1
1056 router: -1
1057 subnet: -1
1058 runner:
1059 concurrency: 1
1060 times: 1
1061 type: constant
1062 sla:
1063 failure_rate:
1064 max: 0
1065 NeutronNetworks.create_and_update_subnets:
1066 - args:
1067 network_create_args: {}
1068 subnet_cidr_start: 1.4.0.0/16
1069 subnet_create_args: {}
1070 subnet_update_args:
1071 enable_dhcp: false
1072 subnets_per_network: 2
1073 context:
1074 network: {}
1075 quotas:
1076 neutron:
1077 network: -1
1078 subnet: -1
1079 runner:
1080 concurrency: 1
1081 times: 1
1082 type: constant
1083 sla:
1084 failure_rate:
1085 max: 0
1086 NeutronNetworks.list_agents:
1087 - args:
1088 agent_args: {}
1089 runner:
1090 concurrency: 1
1091 times: 1
1092 type: constant
1093 sla:
1094 failure_rate:
1095 max: 0
1096 NeutronSecurityGroup.create_and_list_security_groups:
1097 - args:
1098 security_group_create_args: {}
1099 context:
1100 quotas:
1101 neutron:
1102 security_group: -1
1103 runner:
1104 concurrency: 1
1105 times: 1
1106 type: constant
1107 sla:
1108 failure_rate:
1109 max: 0
1110 NeutronSecurityGroup.create_and_update_security_groups:
1111 - args:
1112 security_group_create_args: {}
1113 security_group_update_args: {}
1114 context:
1115 quotas:
1116 neutron:
1117 security_group: -1
1118 runner:
1119 concurrency: 1
1120 times: 1
1121 type: constant
1122 sla:
1123 failure_rate:
1124 max: 0
1125 paste:
1126 composite:neutron:
1127 use: egg:Paste#urlmap
1128 /: neutronversions_composite
1129 /v2.0: neutronapi_v2_0
1130 composite:neutronapi_v2_0:
1131 use: call:neutron.auth:pipeline_factory
1132 noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
1133 keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0
1134 composite:neutronversions_composite:
1135 use: call:neutron.auth:pipeline_factory
1136 noauth: cors http_proxy_to_wsgi neutronversions
1137 keystone: cors http_proxy_to_wsgi neutronversions
1138 filter:request_id:
1139 paste.filter_factory: oslo_middleware:RequestId.factory
1140 filter:catch_errors:
1141 paste.filter_factory: oslo_middleware:CatchErrors.factory
1142 filter:cors:
1143 paste.filter_factory: oslo_middleware.cors:filter_factory
1144 oslo_config_project: neutron
1145 filter:http_proxy_to_wsgi:
1146 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
1147 filter:keystonecontext:
1148 paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory
1149 filter:authtoken:
1150 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
1151 filter:audit:
1152 paste.filter_factory: keystonemiddleware.audit:filter_factory
1153 audit_map_file: /etc/neutron/api_audit_map.conf
1154 filter:extensions:
1155 paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory
1156 app:neutronversions:
1157 paste.app_factory: neutron.pecan_wsgi.app:versions_factory
1158 app:neutronapiapp_v2_0:
1159 paste.app_factory: neutron.api.v2.router:APIRouter.factory
1160 filter:osprofiler:
1161 paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
1162 policy: {}
1163 api_audit_map:
1164 DEFAULT:
1165 target_endpoint_type: None
1166 custom_actions:
1167 add_router_interface: update/add
1168 remove_router_interface: update/remove
1169 path_keywords:
1170 floatingips: ip
1171 healthmonitors: healthmonitor
1172 health_monitors: health_monitor
1173 lb: None
1174 members: member
1175 metering-labels: label
1176 metering-label-rules: rule
1177 networks: network
1178 pools: pool
1179 ports: port
1180 routers: router
1181 quotas: quota
1182 security-groups: security-group
1183 security-group-rules: rule
1184 subnets: subnet
1185 vips: vip
1186 service_endpoints:
1187 network: service/network
1188 neutron_sudoers: |
1189 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1190 Defaults !requiretty
1191 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1192 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1193 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1194 rootwrap: |
1195 # Configuration for neutron-rootwrap
1196 # This file should be owned by (and only-writeable by) the root user
1197
1198 [DEFAULT]
1199 # List of directories to load filter definitions from (separated by ',').
1200 # These directories MUST all be only writeable by root !
1201 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1202
1203 # List of directories to search executables in, in case filters do not
1204 # explicitely specify a full path (separated by ',')
1205 # If not specified, defaults to system PATH environment variable.
1206 # These directories MUST all be only writeable by root !
1207 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1208
1209 # Enable logging to syslog
1210 # Default value is False
1211 use_syslog=False
1212
1213 # Which syslog facility to use.
1214 # Valid values include auth, authpriv, syslog, local0, local1...
1215 # Default value is 'syslog'
1216 syslog_log_facility=syslog
1217
1218 # Which messages to log.
1219 # INFO means log all usage
1220 # ERROR means only log unsuccessful attempts
1221 syslog_log_level=ERROR
1222
1223 [xenapi]
1224 # XenAPI configuration is only required by the L2 agent if it is to
1225 # target a XenServer/XCP compute host's dom0.
1226 xenapi_connection_url=<None>
1227 xenapi_connection_username=root
1228 xenapi_connection_password=<None>
1229 rootwrap_filters:
1230 debug:
1231 pods:
1232 - dhcp_agent
1233 - l3_agent
1234 - lb_agent
1235 - metadata_agent
1236 - ovs_agent
1237 - sriov_agent
1238 content: |
1239 # neutron-rootwrap command filters for nodes on which neutron is
1240 # expected to control network
1241 #
1242 # This file should be owned by (and only-writeable by) the root user
1243
1244 # format seems to be
1245 # cmd-name: filter-name, raw-command, user, args
1246
1247 [Filters]
1248
1249 # This is needed because we should ping
1250 # from inside a namespace which requires root
1251 # _alt variants allow to match -c and -w in any order
1252 # (used by NeutronDebugAgent.ping_all)
1253 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1254 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1255 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1256 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1257 dibbler:
1258 pods:
1259 - dhcp_agent
1260 - l3_agent
1261 - lb_agent
1262 - metadata_agent
1263 - ovs_agent
1264 - sriov_agent
1265 content: |
1266 # neutron-rootwrap command filters for nodes on which neutron is
1267 # expected to control network
1268 #
1269 # This file should be owned by (and only-writeable by) the root user
1270
1271 # format seems to be
1272 # cmd-name: filter-name, raw-command, user, args
1273
1274 [Filters]
1275
1276 # Filters for the dibbler-based reference implementation of the pluggable
1277 # Prefix Delegation driver. Other implementations using an alternative agent
1278 # should include a similar filter in this folder.
1279
1280 # prefix_delegation_agent
1281 dibbler-client: CommandFilter, dibbler-client, root
1282 ipset_firewall:
1283 pods:
1284 - dhcp_agent
1285 - l3_agent
1286 - lb_agent
1287 - metadata_agent
1288 - ovs_agent
1289 - sriov_agent
1290 content: |
1291 # neutron-rootwrap command filters for nodes on which neutron is
1292 # expected to control network
1293 #
1294 # This file should be owned by (and only-writeable by) the root user
1295
1296 # format seems to be
1297 # cmd-name: filter-name, raw-command, user, args
1298
1299 [Filters]
1300 # neutron/agent/linux/iptables_firewall.py
1301 # "ipset", "-A", ...
1302 ipset: CommandFilter, ipset, root
1303 l3:
1304 pods:
1305 - dhcp_agent
1306 - l3_agent
1307 - lb_agent
1308 - metadata_agent
1309 - ovs_agent
1310 - sriov_agent
1311 content: |
1312 # neutron-rootwrap command filters for nodes on which neutron is
1313 # expected to control network
1314 #
1315 # This file should be owned by (and only-writeable by) the root user
1316
1317 # format seems to be
1318 # cmd-name: filter-name, raw-command, user, args
1319
1320 [Filters]
1321
1322 # arping
1323 arping: CommandFilter, arping, root
1324
1325 # l3_agent
1326 sysctl: CommandFilter, sysctl, root
1327 route: CommandFilter, route, root
1328 radvd: CommandFilter, radvd, root
1329
1330 # haproxy
1331 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1332 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1333
1334 # metadata proxy
1335 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1336 # RHEL invocation of the metadata proxy will report /usr/bin/python
1337 kill_metadata: KillFilter, root, python, -15, -9
1338 kill_metadata2: KillFilter, root, python2, -15, -9
1339 kill_metadata7: KillFilter, root, python2.7, -15, -9
1340 kill_metadata3: KillFilter, root, python3, -15, -9
1341 kill_metadata35: KillFilter, root, python3.5, -15, -9
1342 kill_metadata36: KillFilter, root, python3.6, -15, -9
1343 kill_metadata37: KillFilter, root, python3.7, -15, -9
1344 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1345 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1346
1347 # ip_lib
1348 ip: IpFilter, ip, root
1349 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1350 ip_exec: IpNetnsExecFilter, ip, root
1351
1352 # l3_tc_lib
1353 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1354 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1355 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1356 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1357 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1358 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1359 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1360
1361 # For ip monitor
1362 kill_ip_monitor: KillFilter, root, ip, -9
1363
1364 # ovs_lib (if OVSInterfaceDriver is used)
1365 ovs-vsctl: CommandFilter, ovs-vsctl, root
1366
1367 # iptables_manager
1368 iptables-save: CommandFilter, iptables-save, root
1369 iptables-restore: CommandFilter, iptables-restore, root
1370 ip6tables-save: CommandFilter, ip6tables-save, root
1371 ip6tables-restore: CommandFilter, ip6tables-restore, root
1372
1373 # Keepalived
1374 keepalived: CommandFilter, keepalived, root
1375 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1376
1377 # l3 agent to delete floatingip's conntrack state
1378 conntrack: CommandFilter, conntrack, root
1379
1380 # keepalived state change monitor
1381 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1382 # The following filters are used to kill the keepalived state change monitor.
1383 # Since the monitor runs as a Python script, the system reports that the
1384 # command of the process to be killed is python.
1385 # TODO(mlavalle) These kill filters will be updated once we come up with a
1386 # mechanism to kill using the name of the script being executed by Python
1387 kill_keepalived_monitor_py: KillFilter, root, python, -15
1388 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1389 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1390 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1391 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1392 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1393 netns_cleanup:
1394 pods:
1395 - dhcp_agent
1396 - l3_agent
1397 - lb_agent
1398 - metadata_agent
1399 - ovs_agent
1400 - sriov_agent
1401 - netns_cleanup_cron
1402 content: |
1403 # neutron-rootwrap command filters for nodes on which neutron is
1404 # expected to control network
1405 #
1406 # This file should be owned by (and only-writeable by) the root user
1407
1408 # format seems to be
1409 # cmd-name: filter-name, raw-command, user, args
1410
1411 [Filters]
1412
1413 # netns-cleanup
1414 netstat: CommandFilter, netstat, root
1415 dhcp:
1416 pods:
1417 - dhcp_agent
1418 - l3_agent
1419 - lb_agent
1420 - metadata_agent
1421 - ovs_agent
1422 - sriov_agent
1423 - netns_cleanup_cron
1424 content: |
1425 # neutron-rootwrap command filters for nodes on which neutron is
1426 # expected to control network
1427 #
1428 # This file should be owned by (and only-writeable by) the root user
1429
1430 # format seems to be
1431 # cmd-name: filter-name, raw-command, user, args
1432
1433 [Filters]
1434
1435 # dhcp-agent
1436 dnsmasq: CommandFilter, dnsmasq, root
1437 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1438 # it looks like these are the only signals needed, per
1439 # neutron/agent/linux/dhcp.py
1440 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1441 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1442
1443 ovs-vsctl: CommandFilter, ovs-vsctl, root
1444 ivs-ctl: CommandFilter, ivs-ctl, root
1445 mm-ctl: CommandFilter, mm-ctl, root
1446 dhcp_release: CommandFilter, dhcp_release, root
1447 dhcp_release6: CommandFilter, dhcp_release6, root
1448
1449 # metadata proxy
1450 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1451 # RHEL invocation of the metadata proxy will report /usr/bin/python
1452 kill_metadata: KillFilter, root, python, -9
1453 kill_metadata2: KillFilter, root, python2, -9
1454 kill_metadata7: KillFilter, root, python2.7, -9
1455 kill_metadata3: KillFilter, root, python3, -9
1456 kill_metadata35: KillFilter, root, python3.5, -9
1457 kill_metadata36: KillFilter, root, python3.6, -9
1458 kill_metadata37: KillFilter, root, python3.7, -9
1459
1460 # ip_lib
1461 ip: IpFilter, ip, root
1462 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1463 ip_exec: IpNetnsExecFilter, ip, root
1464 ebtables:
1465 pods:
1466 - dhcp_agent
1467 - l3_agent
1468 - lb_agent
1469 - metadata_agent
1470 - ovs_agent
1471 - sriov_agent
1472 content: |
1473 # neutron-rootwrap command filters for nodes on which neutron is
1474 # expected to control network
1475 #
1476 # This file should be owned by (and only-writeable by) the root user
1477
1478 # format seems to be
1479 # cmd-name: filter-name, raw-command, user, args
1480
1481 [Filters]
1482
1483 ebtables: CommandFilter, ebtables, root
1484 iptables_firewall:
1485 pods:
1486 - dhcp_agent
1487 - l3_agent
1488 - lb_agent
1489 - metadata_agent
1490 - ovs_agent
1491 - sriov_agent
1492 content: |
1493 # neutron-rootwrap command filters for nodes on which neutron is
1494 # expected to control network
1495 #
1496 # This file should be owned by (and only-writeable by) the root user
1497
1498 # format seems to be
1499 # cmd-name: filter-name, raw-command, user, args
1500
1501 [Filters]
1502
1503 # neutron/agent/linux/iptables_firewall.py
1504 # "iptables-save", ...
1505 iptables-save: CommandFilter, iptables-save, root
1506 iptables-restore: CommandFilter, iptables-restore, root
1507 ip6tables-save: CommandFilter, ip6tables-save, root
1508 ip6tables-restore: CommandFilter, ip6tables-restore, root
1509
1510 # neutron/agent/linux/iptables_firewall.py
1511 # "iptables", "-A", ...
1512 iptables: CommandFilter, iptables, root
1513 ip6tables: CommandFilter, ip6tables, root
1514
1515 # neutron/agent/linux/iptables_firewall.py
1516 sysctl: CommandFilter, sysctl, root
1517
1518 # neutron/agent/linux/ip_conntrack.py
1519 conntrack: CommandFilter, conntrack, root
1520 linuxbridge_plugin:
1521 pods:
1522 - dhcp_agent
1523 - l3_agent
1524 - lb_agent
1525 - metadata_agent
1526 - ovs_agent
1527 - sriov_agent
1528 content: |
1529 # neutron-rootwrap command filters for nodes on which neutron is
1530 # expected to control network
1531 #
1532 # This file should be owned by (and only-writeable by) the root user
1533
1534 # format seems to be
1535 # cmd-name: filter-name, raw-command, user, args
1536
1537 [Filters]
1538
1539 # linuxbridge-agent
1540 # unclear whether both variants are necessary, but I'm transliterating
1541 # from the old mechanism
1542 brctl: CommandFilter, brctl, root
1543 bridge: CommandFilter, bridge, root
1544
1545 # ip_lib
1546 ip: IpFilter, ip, root
1547 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1548 ip_exec: IpNetnsExecFilter, ip, root
1549
1550 # tc commands needed for QoS support
1551 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1552 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1553 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1554 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1555 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1556 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1557 openvswitch_plugin:
1558 pods:
1559 - dhcp_agent
1560 - l3_agent
1561 - lb_agent
1562 - metadata_agent
1563 - ovs_agent
1564 - sriov_agent
1565 content: |
1566 # neutron-rootwrap command filters for nodes on which neutron is
1567 # expected to control network
1568 #
1569 # This file should be owned by (and only-writeable by) the root user
1570
1571 # format seems to be
1572 # cmd-name: filter-name, raw-command, user, args
1573
1574 [Filters]
1575
1576 # openvswitch-agent
1577 # unclear whether both variants are necessary, but I'm transliterating
1578 # from the old mechanism
1579 ovs-vsctl: CommandFilter, ovs-vsctl, root
1580 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1581 ovs-ofctl: CommandFilter, ovs-ofctl, root
1582 ovs-appctl: CommandFilter, ovs-appctl, root
1583 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1584 ovsdb-client: CommandFilter, ovsdb-client, root
1585 xe: CommandFilter, xe, root
1586
1587 # ip_lib
1588 ip: IpFilter, ip, root
1589 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1590 ip_exec: IpNetnsExecFilter, ip, root
1591
1592 # needed for FDB extension
1593 bridge: CommandFilter, bridge, root
1594 privsep:
1595 pods:
1596 - dhcp_agent
1597 - l3_agent
1598 - lb_agent
1599 - metadata_agent
1600 - ovs_agent
1601 - sriov_agent
1602 - netns_cleanup_cron
1603 content: |
1604 # Command filters to allow privsep daemon to be started via rootwrap.
1605 #
1606 # This file should be owned by (and only-writeable by) the root user
1607
1608 [Filters]
1609
1610 # By installing the following, the local admin is asserting that:
1611 #
1612 # 1. The python module load path used by privsep-helper
1613 # command as root (as started by sudo/rootwrap) is trusted.
1614 # 2. Any oslo.config files matching the --config-file
1615 # arguments below are trusted.
1616 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1617 # also allowed to invoke python "entrypoint" functions from
1618 # --privsep_context with the additional (possibly root) privileges
1619 # configured for that context.
1620 #
1621 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1622 #
1623 # In particular, the oslo.config and python module path must not
1624 # be writeable by the unprivileged user.
1625
1626 # oslo.privsep default neutron context
1627 privsep: PathFilter, privsep-helper, root,
1628 --config-file, /etc,
1629 --privsep_context, neutron.privileged.default,
1630 --privsep_sock_path, /
1631
1632 # NOTE: A second `--config-file` arg can also be added above. Since
1633 # many neutron components are installed like that (eg: by devstack).
1634 # Adjust to suit local requirements.
1635 linux_vxlan:
1636 pods:
1637 - bagpipe_bgp
1638 content: |
1639 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1640 # expected to control VXLAN Linux Bridge dataplane
1641 #
1642 # This file should be owned by (and only-writeable by) the root user
1643
1644 # format seems to be
1645 # cmd-name: filter-name, raw-command, user, args
1646
1647 [Filters]
1648
1649 #
1650 modprobe: CommandFilter, modprobe, root
1651
1652 #
1653 brctl: CommandFilter, brctl, root
1654 bridge: CommandFilter, bridge, root
1655
1656 # ip_lib
1657 ip: IpFilter, ip, root
1658 ip_exec: IpNetnsExecFilter, ip, root
1659
1660 # shell (for piped commands)
1661 sh: CommandFilter, sh, root
1662 mpls_ovs_dataplane:
1663 pods:
1664 - bagpipe_bgp
1665 content: |
1666 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1667 # expected to control MPLS OpenVSwitch dataplane
1668 #
1669 # This file should be owned by (and only-writeable by) the root user
1670
1671 # format seems to be
1672 # cmd-name: filter-name, raw-command, user, args
1673
1674 [Filters]
1675
1676 # openvswitch
1677 ovs-vsctl: CommandFilter, ovs-vsctl, root
1678 ovs-ofctl: CommandFilter, ovs-ofctl, root
1679
1680 # ip_lib
1681 ip: IpFilter, ip, root
1682 ip_exec: IpNetnsExecFilter, ip, root
1683
1684 # shell (for piped commands)
1685 sh: CommandFilter, sh, root
1686 neutron:
1687 DEFAULT:
1688 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1689 log_config_append: /etc/neutron/logging.conf
1690 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1691 # via the endpoints section.
1692 bind_port: null
1693 default_availability_zones: nova
1694 api_workers: 1
1695 rpc_workers: 4
1696 allow_overlapping_ips: True
1697 state_path: /var/lib/neutron
1698 # core_plugin can be: ml2, calico
1699 core_plugin: ml2
1700 # service_plugin can be: router, odl-router, empty for calico,
1701 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1702 service_plugins: router
1703 allow_automatic_l3agent_failover: True
1704 l3_ha: True
1705 max_l3_agents_per_router: 2
1706 l3_ha_network_type: vxlan
1707 network_auto_schedule: True
1708 router_auto_schedule: True
1709 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1710 # 'network.backend' to sane defaults.
1711 interface_driver: null
1712 oslo_concurrency:
1713 lock_path: /var/lib/neutron/tmp
1714 database:
1715 max_retries: -1
1716 agent:
1717 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1718 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1719 oslo_messaging_notifications:
1720 driver: messagingv2
1721 oslo_messaging_rabbit:
1722 rabbit_ha_queues: true
1723 oslo_middleware:
1724 enable_proxy_headers_parsing: true
1725 oslo_policy:
1726 policy_file: /etc/neutron/policy.yaml
1727 nova:
1728 auth_type: password
1729 auth_version: v3
1730 endpoint_type: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001731 placement:
1732 auth_type: password
1733 auth_version: v3
1734 endpoint_type: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001735 designate:
1736 auth_type: password
1737 auth_version: v3
1738 endpoint_type: internal
1739 allow_reverse_dns_lookup: true
1740 ironic:
1741 endpoint_type: internal
1742 keystone_authtoken:
1743 memcache_security_strategy: ENCRYPT
1744 auth_type: password
1745 auth_version: v3
1746 octavia:
1747 request_poll_timeout: 3000
1748 logging:
1749 loggers:
1750 keys:
1751 - root
1752 - neutron
1753 - neutron_taas
1754 handlers:
1755 keys:
1756 - stdout
1757 - stderr
1758 - "null"
1759 formatters:
1760 keys:
1761 - context
1762 - default
1763 logger_root:
1764 level: WARNING
1765 handlers: 'null'
1766 logger_neutron:
1767 level: INFO
1768 handlers:
1769 - stdout
1770 qualname: neutron
1771 logger_neutron_taas:
1772 level: INFO
1773 handlers:
1774 - stdout
1775 qualname: neutron_taas
1776 logger_amqp:
1777 level: WARNING
1778 handlers: stderr
1779 qualname: amqp
1780 logger_amqplib:
1781 level: WARNING
1782 handlers: stderr
1783 qualname: amqplib
1784 logger_eventletwsgi:
1785 level: WARNING
1786 handlers: stderr
1787 qualname: eventlet.wsgi.server
1788 logger_sqlalchemy:
1789 level: WARNING
1790 handlers: stderr
1791 qualname: sqlalchemy
1792 logger_boto:
1793 level: WARNING
1794 handlers: stderr
1795 qualname: boto
1796 handler_null:
1797 class: logging.NullHandler
1798 formatter: default
1799 args: ()
1800 handler_stdout:
1801 class: StreamHandler
1802 args: (sys.stdout,)
1803 formatter: context
1804 handler_stderr:
1805 class: StreamHandler
1806 args: (sys.stderr,)
1807 formatter: context
1808 formatter_context:
1809 class: oslo_log.formatters.ContextFormatter
1810 datefmt: "%Y-%m-%d %H:%M:%S"
1811 formatter_default:
1812 format: "%(message)s"
1813 datefmt: "%Y-%m-%d %H:%M:%S"
1814 plugins:
1815 ml2_conf:
1816 ml2:
1817 extension_drivers: port_security
1818 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1819 # in 'network.backend' to sane defaults.
1820 mechanism_drivers: null
1821 type_drivers: flat,vlan,vxlan
1822 tenant_network_types: vxlan
1823 ml2_type_vxlan:
1824 vni_ranges: 1:1000
1825 vxlan_group: 239.1.1.1
1826 ml2_type_flat:
1827 flat_networks: "*"
1828 # If you want to use the external network as a tagged provider network,
1829 # a range should be specified including the intended VLAN target
1830 # using ml2_type_vlan.network_vlan_ranges:
1831 # ml2_type_vlan:
1832 # network_vlan_ranges: "external:1100:1110"
1833 agent:
1834 extensions: ""
1835 ml2_conf_sriov: null
1836 taas:
1837 taas:
1838 enabled: False
1839 openvswitch_agent:
1840 agent:
1841 tunnel_types: vxlan
1842 l2_population: True
1843 arp_responder: True
1844 ovs:
1845 bridge_mappings: "external:br-ex"
1846 securitygroup:
1847 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1848 linuxbridge_agent:
1849 linux_bridge:
1850 # To define Flat and VLAN connections, in LB we can assign
1851 # specific interface to the flat/vlan network name using:
1852 # physical_interface_mappings: "external:eth3"
1853 # Or we can set the mapping between the network and bridge:
1854 bridge_mappings: "external:br-ex"
1855 # The two above options are exclusive, do not use both of them at once
1856 securitygroup:
1857 firewall_driver: iptables
1858 vxlan:
1859 l2_population: True
1860 arp_responder: True
1861 macvtap_agent: null
1862 sriov_agent:
1863 securitygroup:
1864 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1865 sriov_nic:
1866 physical_device_mappings: physnet2:enp3s0f1
1867 # NOTE: do not use null here, use an empty string
1868 exclude_devices: ""
1869 dhcp_agent:
1870 DEFAULT:
1871 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1872 # 'network.backend' to sane defaults.
1873 interface_driver: null
1874 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1875 force_metadata: True
1876 dnsmasq: |
1877 #no-hosts
1878 #port=5353
1879 #cache-size=500
1880 #no-negcache
1881 #dns-forward-max=100
1882 #resolve-file=
1883 #strict-order
1884 #bind-interface
1885 #bind-dynamic
1886 #domain=
1887 #dhcp-range=10.10.10.10,10.10.10.100,24h
1888 #dhcp-lease-max=150
1889 #dhcp-host=11:22:33:44:55:66,ignore
1890 #dhcp-option=3,10.10.10.1
1891 #dhcp-option-force=26,1450
1892
1893 l3_agent:
1894 DEFAULT:
1895 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1896 # 'network.backend' to sane defaults.
1897 interface_driver: null
1898 agent_mode: legacy
1899 metering_agent: null
1900 metadata_agent:
1901 DEFAULT:
1902 # we cannot change the proxy socket path as it is declared
1903 # as a hostPath volume from agent daemonsets
1904 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1905 metadata_proxy_shared_secret: "password"
1906 cache:
1907 enabled: true
1908 backend: dogpile.cache.memcached
1909 bagpipe_bgp: {}
1910
1911 rabbitmq:
1912 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1913 policies:
1914 - vhost: "neutron"
1915 name: "ha_ttl_neutron"
1916 definition:
1917 # mirror messges to other nodes in rmq cluster
1918 ha-mode: "all"
1919 ha-sync-mode: "automatic"
1920 # 70s
1921 message-ttl: 70000
1922 priority: 0
1923 apply-to: all
1924 pattern: '^(?!(amq\.|reply_)).*'
1925 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
1926 ## This helps prevent sriov init script from failing due to mis-matched NIC
1927 ## For prod env, target NIC should match and init script should fail otherwise.
1928 ## sriov_init:
1929 ## - besteffort
1930 sriov_init:
1931 -
1932 # auto_bridge_add is a table of "bridge: interface" pairs
1933 # To automatically add a physical interfaces to a specific bridges,
1934 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
1935 # to br1 do something like:
1936 #
1937 # auto_bridge_add:
1938 # br-physnet1: eth3
1939 # br0: if0
1940 # br1: iface_two
1941 # br-ex will be added by default
1942 auto_bridge_add:
1943 br-ex: null
1944
1945 # configuration of OVS DPDK bridges and NICs
1946 # this is a separate section and not part of the auto_bridge_add section
1947 # because additional parameters are needed
1948 ovs_dpdk:
1949 enabled: false
1950 # setting update_dpdk_bond_config to true will have default behavior,
1951 # which may cause disruptions in ovs dpdk traffic in case of neutron
1952 # ovs agent restart or when dpdk nic/bond configurations are changed.
1953 # Setting this to false will configure dpdk in the first run and
1954 # disable nic/bond config on event of restart or config update.
1955 update_dpdk_bond_config: true
1956 driver: uio_pci_generic
1957 # In case bonds are configured, the nics which are part of those bonds
1958 # must NOT be provided here.
1959 nics:
1960 - name: dpdk0
1961 pci_id: '0000:05:00.0'
1962 # Set VF Index in case some particular VF(s) need to be
1963 # used with ovs-dpdk.
1964 # vf_index: 0
1965 bridge: br-phy
1966 migrate_ip: true
1967 n_rxq: 2
1968 n_txq: 2
1969 pmd_rxq_affinity: "0:3,1:27"
1970 ofport_request: 1
1971 # optional parameters for tuning the OVS DPDK config
1972 # in alignment with the available hardware resources
1973 # mtu: 2000
1974 # n_rxq_size: 1024
1975 # n_txq_size: 1024
1976 # vhost-iommu-support: true
1977 bridges:
1978 - name: br-phy
1979 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
1980 # - tunnel_underlay_vlan: 45
1981 # Optional parameter for configuring bonding in OVS-DPDK
1982 # - name: br-phy-bond0
1983 # bonds:
1984 # - name: dpdkbond0
1985 # bridge: br-phy-bond0
1986 # # The IP from the first nic in nics list shall be used
1987 # migrate_ip: true
1988 # mtu: 2000
1989 # # Please note that n_rxq is set for each NIC individually
1990 # # rather than denoting the total number of rx queues for
1991 # # the bond as a whole. So setting n_rxq = 2 below for ex.
1992 # # would be 4 rx queues in total for the bond.
1993 # # Same for n_txq
1994 # n_rxq: 2
1995 # n_txq: 2
1996 # ofport_request: 1
1997 # n_rxq_size: 1024
1998 # n_txq_size: 1024
1999 # vhost-iommu-support: true
2000 # ovs_options: "bond_mode=active-backup"
2001 # nics:
2002 # - name: dpdk_b0s0
2003 # pci_id: '0000:06:00.0'
2004 # pmd_rxq_affinity: "0:3,1:27"
2005 # # Set VF Index in case some particular VF(s) need to be
2006 # # used with ovs-dpdk. In which case pci_id of PF must be
2007 # # provided above.
2008 # # vf_index: 0
2009 # - name: dpdk_b0s1
2010 # pci_id: '0000:07:00.0'
2011 # pmd_rxq_affinity: "0:3,1:27"
2012 # # Set VF Index in case some particular VF(s) need to be
2013 # # used with ovs-dpdk. In which case pci_id of PF must be
2014 # # provided above.
2015 # # vf_index: 0
2016 #
2017 # Set the log level for each target module (default level is always dbg)
2018 # Supported log levels are: off, emer, err, warn, info, dbg
2019 #
2020 # modules:
2021 # - name: dpdk
2022 # log_level: info
2023
2024# Names of secrets used by bootstrap and environmental checks
2025secrets:
2026 identity:
2027 admin: neutron-keystone-admin
2028 neutron: neutron-keystone-user
2029 test: neutron-keystone-test
2030 oslo_db:
2031 admin: neutron-db-admin
2032 neutron: neutron-db-user
2033 oslo_messaging:
2034 admin: neutron-rabbitmq-admin
2035 neutron: neutron-rabbitmq-user
2036 tls:
2037 compute_metadata:
2038 metadata:
2039 internal: metadata-tls-metadata
2040 network:
2041 server:
2042 public: neutron-tls-public
2043 internal: neutron-tls-server
2044 oci_image_registry:
2045 neutron: neutron-oci-image-registry
2046
2047# typically overridden by environmental
2048# values, but should include all endpoints
2049# required by this chart
2050endpoints:
2051 cluster_domain_suffix: cluster.local
2052 local_image_registry:
2053 name: docker-registry
2054 namespace: docker-registry
2055 hosts:
2056 default: localhost
2057 internal: docker-registry
2058 node: localhost
2059 host_fqdn_override:
2060 default: null
2061 port:
2062 registry:
2063 node: 5000
2064 oci_image_registry:
2065 name: oci-image-registry
2066 namespace: oci-image-registry
2067 auth:
2068 enabled: false
2069 neutron:
2070 username: neutron
2071 password: password
2072 hosts:
2073 default: localhost
2074 host_fqdn_override:
2075 default: null
2076 port:
2077 registry:
2078 default: null
2079 oslo_db:
2080 auth:
2081 admin:
2082 username: root
2083 password: password
2084 secret:
2085 tls:
2086 internal: mariadb-tls-direct
2087 neutron:
2088 username: neutron
2089 password: password
2090 hosts:
2091 default: mariadb
2092 host_fqdn_override:
2093 default: null
2094 path: /neutron
2095 scheme: mysql+pymysql
2096 port:
2097 mysql:
2098 default: 3306
2099 oslo_messaging:
2100 auth:
2101 admin:
2102 username: rabbitmq
2103 password: password
2104 secret:
2105 tls:
2106 internal: rabbitmq-tls-direct
2107 neutron:
2108 username: neutron
2109 password: password
2110 statefulset:
2111 replicas: 2
2112 name: rabbitmq-rabbitmq
2113 hosts:
2114 default: rabbitmq
2115 host_fqdn_override:
2116 default: null
2117 path: /neutron
2118 scheme: rabbit
2119 port:
2120 amqp:
2121 default: 5672
2122 http:
2123 default: 15672
2124 oslo_cache:
2125 auth:
2126 # NOTE(portdirect): this is used to define the value for keystone
2127 # authtoken cache encryption key, if not set it will be populated
2128 # automatically with a random value, but to take advantage of
2129 # this feature all services should be set to use the same key,
2130 # and memcache service.
2131 memcache_secret_key: null
2132 hosts:
2133 default: memcached
2134 host_fqdn_override:
2135 default: null
2136 port:
2137 memcache:
2138 default: 11211
2139 compute:
2140 name: nova
2141 hosts:
2142 default: nova-api
2143 public: nova
2144 host_fqdn_override:
2145 default: null
2146 path:
2147 default: "/v2.1/%(tenant_id)s"
2148 scheme:
2149 default: 'http'
2150 port:
2151 api:
2152 default: 8774
2153 public: 80
2154 novncproxy:
2155 default: 6080
2156 compute_metadata:
2157 name: nova
2158 hosts:
2159 default: nova-metadata
2160 public: metadata
2161 host_fqdn_override:
2162 default: null
2163 path:
2164 default: /
2165 scheme:
2166 default: 'http'
2167 port:
2168 metadata:
2169 default: 8775
2170 public: 80
2171 identity:
2172 name: keystone
2173 auth:
2174 admin:
2175 region_name: RegionOne
2176 username: admin
2177 password: password
2178 project_name: admin
2179 user_domain_name: default
2180 project_domain_name: default
2181 neutron:
2182 role: admin
2183 region_name: RegionOne
2184 username: neutron
2185 password: password
2186 project_name: service
2187 user_domain_name: service
2188 project_domain_name: service
2189 nova:
2190 region_name: RegionOne
2191 project_name: service
2192 username: nova
2193 password: password
2194 user_domain_name: service
2195 project_domain_name: service
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002196 placement:
2197 region_name: RegionOne
2198 project_name: service
2199 username: placement
2200 password: password
2201 user_domain_name: service
2202 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002203 designate:
2204 region_name: RegionOne
2205 project_name: service
2206 username: designate
2207 password: password
2208 user_domain_name: service
2209 project_domain_name: service
2210 ironic:
2211 region_name: RegionOne
2212 project_name: service
2213 username: ironic
2214 password: password
2215 user_domain_name: service
2216 project_domain_name: service
2217 test:
2218 role: admin
2219 region_name: RegionOne
2220 username: neutron-test
2221 password: password
2222 # NOTE: this project will be purged and reset if
2223 # conf.rally_tests.force_project_purge is set to true
2224 # which may be required upon test failure, but be aware that this will
2225 # expunge all openstack objects, so if this is used a seperate project
2226 # should be used for each helm test, and also it should be ensured
2227 # that this project is not in use by other tenants
2228 project_name: test
2229 user_domain_name: service
2230 project_domain_name: service
2231 hosts:
2232 default: keystone
2233 internal: keystone-api
2234 host_fqdn_override:
2235 default: null
2236 path:
2237 default: /v3
2238 scheme:
2239 default: http
2240 port:
2241 api:
2242 default: 80
2243 internal: 5000
2244 network:
2245 name: neutron
2246 hosts:
2247 default: neutron-server
2248 public: neutron
2249 host_fqdn_override:
2250 default: null
2251 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2252 # endpoints using the following format:
2253 # public:
2254 # host: null
2255 # tls:
2256 # crt: null
2257 # key: null
2258 path:
2259 default: null
2260 scheme:
2261 default: 'http'
2262 service: 'http'
2263 port:
2264 api:
2265 default: 9696
2266 public: 80
2267 service: 9696
2268 load_balancer:
2269 name: octavia
2270 hosts:
2271 default: octavia-api
2272 public: octavia
2273 host_fqdn_override:
2274 default: null
2275 path:
2276 default: null
2277 scheme:
2278 default: http
2279 port:
2280 api:
2281 default: 9876
2282 public: 80
2283 fluentd:
2284 namespace: osh-infra
2285 name: fluentd
2286 hosts:
2287 default: fluentd-logging
2288 host_fqdn_override:
2289 default: null
2290 path:
2291 default: null
2292 scheme: 'http'
2293 port:
2294 service:
2295 default: 24224
2296 metrics:
2297 default: 24220
2298 dns:
2299 name: designate
2300 hosts:
2301 default: designate-api
2302 public: designate
2303 host_fqdn_override:
2304 default: null
2305 path:
2306 default: /
2307 scheme:
2308 default: 'http'
2309 port:
2310 api:
2311 default: 9001
2312 public: 80
2313 baremetal:
2314 name: ironic
2315 hosts:
2316 default: ironic-api
2317 public: ironic
2318 host_fqdn_override:
2319 default: null
2320 path:
2321 default: null
2322 scheme:
2323 default: 'http'
2324 port:
2325 api:
2326 default: 6385
2327 public: 80
2328 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2329 # They are using to enable the Egress K8s network policy.
2330 kube_dns:
2331 namespace: kube-system
2332 name: kubernetes-dns
2333 hosts:
2334 default: kube-dns
2335 host_fqdn_override:
2336 default: null
2337 path:
2338 default: null
2339 scheme: http
2340 port:
2341 dns:
2342 default: 53
2343 protocol: UDP
2344 ingress:
2345 namespace: null
2346 name: ingress
2347 hosts:
2348 default: ingress
2349 port:
2350 ingress:
2351 default: 80
2352
2353network_policy:
2354 neutron:
2355 # TODO(lamt): Need to tighten this ingress for security.
2356 ingress:
2357 - {}
2358 egress:
2359 - {}
2360
2361helm3_hook: true
2362
2363health_probe:
2364 logging:
2365 level: ERROR
2366
2367tls:
2368 identity: false
2369 oslo_messaging: false
2370 oslo_db: false
2371
2372manifests:
2373 certificates: false
2374 configmap_bin: true
2375 configmap_etc: true
2376 daemonset_dhcp_agent: true
2377 daemonset_l3_agent: true
2378 daemonset_lb_agent: true
2379 daemonset_metadata_agent: true
2380 daemonset_ovs_agent: true
2381 daemonset_sriov_agent: true
2382 daemonset_l2gw_agent: false
2383 daemonset_bagpipe_bgp: false
2384 daemonset_netns_cleanup_cron: true
2385 deployment_ironic_agent: false
2386 deployment_server: true
2387 ingress_server: true
2388 job_bootstrap: true
2389 job_db_init: true
2390 job_db_sync: true
2391 job_db_drop: false
2392 job_image_repo_sync: true
2393 job_ks_endpoints: true
2394 job_ks_service: true
2395 job_ks_user: true
2396 job_rabbit_init: true
2397 pdb_server: true
2398 pod_rally_test: true
2399 network_policy: false
2400 secret_db: true
2401 secret_ingress_tls: true
2402 secret_keystone: true
2403 secret_rabbitmq: true
2404 secret_registry: true
2405 service_ingress_server: true
2406 service_server: true
2407...