blob: 0fb26452fd3d84b26a2ec4cb74115a2a240d9d55 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
33 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
34 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
37 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
38 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
41 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
42 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
43 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
44 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
46 image_repo_sync: docker.io/docker:17.07.0
47 pull_policy: "IfNotPresent"
48 local_registry:
49 active: false
50 exclude:
51 - dep_check
52 - image_repo_sync
53
54labels:
55 agent:
56 dhcp:
57 node_selector_key: openstack-control-plane
58 node_selector_value: enabled
59 l3:
60 node_selector_key: openstack-control-plane
61 node_selector_value: enabled
62 metadata:
63 node_selector_key: openstack-control-plane
64 node_selector_value: enabled
65 l2gw:
66 node_selector_key: openstack-control-plane
67 node_selector_value: enabled
68 job:
69 node_selector_key: openstack-control-plane
70 node_selector_value: enabled
71 lb:
72 node_selector_key: linuxbridge
73 node_selector_value: enabled
74 # openvswitch is a special case, requiring a special
75 # label that can apply to both control hosts
76 # and compute hosts, until we get more sophisticated
77 # with our daemonset scheduling
78 ovs:
79 node_selector_key: openvswitch
80 node_selector_value: enabled
81 sriov:
82 node_selector_key: sriov
83 node_selector_value: enabled
84 bagpipe_bgp:
85 node_selector_key: openstack-compute-node
86 node_selector_value: enabled
87 server:
88 node_selector_key: openstack-control-plane
89 node_selector_value: enabled
90 ironic_agent:
91 node_selector_key: openstack-control-plane
92 node_selector_value: enabled
93 netns_cleanup_cron:
94 node_selector_key: openstack-control-plane
95 node_selector_value: enabled
96 test:
97 node_selector_key: openstack-control-plane
98 node_selector_value: enabled
99
100network:
101 # provide what type of network wiring will be used
102 backend:
103 - openvswitch
104 # NOTE(Portdirect): Share network namespaces with the host,
105 # allowing agents to be restarted without packet loss and simpler
106 # debugging. This feature requires mount propagation support.
107 share_namespaces: true
108 interface:
109 # Tunnel interface will be used for VXLAN tunneling.
110 tunnel: null
111 # If tunnel is null there is a fallback mechanism to search
112 # for interface with routing using tunnel network cidr.
113 tunnel_network_cidr: "0/0"
114 # To perform setup of network interfaces using the SR-IOV init
115 # container you can use a section similar to:
116 # sriov:
117 # - device: ${DEV}
118 # num_vfs: 8
119 # mtu: 9214
120 # promisc: false
121 # qos:
122 # - vf_num: 0
123 # share: 10
124 # queues_per_vf:
125 # - num_queues: 16
126 # exclude_vf: 0,11,21
127 server:
128 ingress:
129 public: true
130 classes:
131 namespace: "nginx"
132 cluster: "nginx-cluster"
133 annotations:
134 nginx.ingress.kubernetes.io/rewrite-target: /
135 external_policy_local: false
136 node_port:
137 enabled: false
138 port: 30096
139
140bootstrap:
141 enabled: false
142 ks_user: neutron
143 script: |
144 openstack token issue
145
146dependencies:
147 dynamic:
148 common:
149 local_image_registry:
150 jobs:
151 - neutron-image-repo-sync
152 services:
153 - endpoint: node
154 service: local_image_registry
155 targeted:
156 sriov: {}
157 l2gateway: {}
158 bagpipe_bgp: {}
159 openvswitch:
160 dhcp:
161 pod:
162 - requireSameNode: true
163 labels:
164 application: neutron
165 component: neutron-ovs-agent
166 l3:
167 pod:
168 - requireSameNode: true
169 labels:
170 application: neutron
171 component: neutron-ovs-agent
172 metadata:
173 pod:
174 - requireSameNode: true
175 labels:
176 application: neutron
177 component: neutron-ovs-agent
178 linuxbridge:
179 dhcp:
180 pod:
181 - requireSameNode: true
182 labels:
183 application: neutron
184 component: neutron-lb-agent
185 l3:
186 pod:
187 - requireSameNode: true
188 labels:
189 application: neutron
190 component: neutron-lb-agent
191 metadata:
192 pod:
193 - requireSameNode: true
194 labels:
195 application: neutron
196 component: neutron-lb-agent
197 lb_agent:
198 pod: null
199 static:
200 bootstrap:
201 services:
202 - endpoint: internal
203 service: network
204 - endpoint: internal
205 service: compute
206 db_drop:
207 services:
208 - endpoint: internal
209 service: oslo_db
210 db_init:
211 services:
212 - endpoint: internal
213 service: oslo_db
214 db_sync:
215 jobs:
216 - neutron-db-init
217 services:
218 - endpoint: internal
219 service: oslo_db
220 dhcp:
221 pod: null
222 jobs:
223 - neutron-rabbit-init
224 services:
225 - endpoint: internal
226 service: oslo_messaging
227 - endpoint: internal
228 service: network
229 - endpoint: internal
230 service: compute
231 ks_endpoints:
232 jobs:
233 - neutron-ks-service
234 services:
235 - endpoint: internal
236 service: identity
237 ks_service:
238 services:
239 - endpoint: internal
240 service: identity
241 ks_user:
242 services:
243 - endpoint: internal
244 service: identity
245 rabbit_init:
246 services:
247 - service: oslo_messaging
248 endpoint: internal
249 l3:
250 pod: null
251 jobs:
252 - neutron-rabbit-init
253 services:
254 - endpoint: internal
255 service: oslo_messaging
256 - endpoint: internal
257 service: network
258 - endpoint: internal
259 service: compute
260 lb_agent:
261 pod: null
262 jobs:
263 - neutron-rabbit-init
264 services:
265 - endpoint: internal
266 service: oslo_messaging
267 - endpoint: internal
268 service: network
269 metadata:
270 pod: null
271 jobs:
272 - neutron-rabbit-init
273 services:
274 - endpoint: internal
275 service: oslo_messaging
276 - endpoint: internal
277 service: network
278 - endpoint: internal
279 service: compute
280 - endpoint: public
281 service: compute_metadata
282 ovs_agent:
283 jobs:
284 - neutron-rabbit-init
285 pod:
286 - requireSameNode: true
287 labels:
288 application: openvswitch
289 component: server
290 services:
291 - endpoint: internal
292 service: oslo_messaging
293 - endpoint: internal
294 service: network
295 server:
296 jobs:
297 - neutron-db-sync
298 - neutron-ks-user
299 - neutron-ks-endpoints
300 - neutron-rabbit-init
301 services:
302 - endpoint: internal
303 service: oslo_db
304 - endpoint: internal
305 service: oslo_messaging
306 - endpoint: internal
307 service: oslo_cache
308 - endpoint: internal
309 service: identity
310 ironic_agent:
311 jobs:
312 - neutron-db-sync
313 - neutron-ks-user
314 - neutron-ks-endpoints
315 - neutron-rabbit-init
316 services:
317 - endpoint: internal
318 service: oslo_db
319 - endpoint: internal
320 service: oslo_messaging
321 - endpoint: internal
322 service: oslo_cache
323 - endpoint: internal
324 service: identity
325 tests:
326 services:
327 - endpoint: internal
328 service: network
329 - endpoint: internal
330 service: compute
331 image_repo_sync:
332 services:
333 - endpoint: internal
334 service: local_image_registry
335
336pod:
337 use_fqdn:
338 neutron_agent: true
339 probes:
340 rpc_timeout: 60
341 rpc_retries: 2
342 dhcp_agent:
343 dhcp_agent:
344 readiness:
345 enabled: true
346 params:
347 initialDelaySeconds: 30
348 periodSeconds: 190
349 timeoutSeconds: 185
350 liveness:
351 enabled: true
352 params:
353 initialDelaySeconds: 120
354 periodSeconds: 600
355 timeoutSeconds: 580
356 l3_agent:
357 l3_agent:
358 readiness:
359 enabled: true
360 params:
361 initialDelaySeconds: 30
362 periodSeconds: 190
363 timeoutSeconds: 185
364 liveness:
365 enabled: true
366 params:
367 initialDelaySeconds: 120
368 periodSeconds: 600
369 timeoutSeconds: 580
370 lb_agent:
371 lb_agent:
372 readiness:
373 enabled: true
374 metadata_agent:
375 metadata_agent:
376 readiness:
377 enabled: true
378 params:
379 initialDelaySeconds: 30
380 periodSeconds: 190
381 timeoutSeconds: 185
382 liveness:
383 enabled: true
384 params:
385 initialDelaySeconds: 120
386 periodSeconds: 600
387 timeoutSeconds: 580
388 ovs_agent:
389 ovs_agent:
390 readiness:
391 enabled: true
392 params:
393 liveness:
394 enabled: true
395 params:
396 initialDelaySeconds: 120
397 periodSeconds: 600
398 timeoutSeconds: 580
399 sriov_agent:
400 sriov_agent:
401 readiness:
402 enabled: true
403 params:
404 initialDelaySeconds: 30
405 periodSeconds: 190
406 timeoutSeconds: 185
407 bagpipe_bgp:
408 bagpipe_bgp:
409 readiness:
410 enabled: true
411 params:
412 liveness:
413 enabled: true
414 params:
415 initialDelaySeconds: 60
416 l2gw_agent:
417 l2gw_agent:
418 readiness:
419 enabled: true
420 params:
421 initialDelaySeconds: 30
422 periodSeconds: 15
423 timeoutSeconds: 65
424 liveness:
425 enabled: true
426 params:
427 initialDelaySeconds: 120
428 periodSeconds: 90
429 timeoutSeconds: 70
430 server:
431 server:
432 readiness:
433 enabled: true
434 params:
435 liveness:
436 enabled: true
437 params:
438 initialDelaySeconds: 60
439 security_context:
440 neutron_dhcp_agent:
441 pod:
442 runAsUser: 42424
443 container:
444 neutron_dhcp_agent:
445 readOnlyRootFilesystem: true
446 privileged: true
447 neutron_l2gw_agent:
448 pod:
449 runAsUser: 42424
450 container:
451 neutron_l2gw_agent:
452 readOnlyRootFilesystem: true
453 privileged: true
454 neutron_bagpipe_bgp:
455 pod:
456 runAsUser: 42424
457 container:
458 neutron_bagpipe_bgp:
459 readOnlyRootFilesystem: true
460 privileged: true
461 neutron_l3_agent:
462 pod:
463 runAsUser: 42424
464 container:
465 neutron_l3_agent:
466 readOnlyRootFilesystem: true
467 privileged: true
468 neutron_lb_agent:
469 pod:
470 runAsUser: 42424
471 container:
472 neutron_lb_agent_kernel_modules:
473 capabilities:
474 add:
475 - SYS_MODULE
476 - SYS_CHROOT
477 runAsUser: 0
478 readOnlyRootFilesystem: true
479 neutron_lb_agent_init:
480 privileged: true
481 runAsUser: 0
482 readOnlyRootFilesystem: true
483 neutron_lb_agent:
484 readOnlyRootFilesystem: true
485 privileged: true
486 neutron_metadata_agent:
487 pod:
488 runAsUser: 42424
489 container:
490 neutron_metadata_agent_init:
491 runAsUser: 0
492 readOnlyRootFilesystem: true
493 neutron_ovs_agent:
494 pod:
495 runAsUser: 42424
496 container:
497 neutron_openvswitch_agent_kernel_modules:
498 capabilities:
499 add:
500 - SYS_MODULE
501 - SYS_CHROOT
502 runAsUser: 0
503 readOnlyRootFilesystem: true
504 neutron_ovs_agent_init:
505 privileged: true
506 runAsUser: 0
507 readOnlyRootFilesystem: true
508 neutron_ovs_agent:
509 readOnlyRootFilesystem: true
510 privileged: true
511 neutron_server:
512 pod:
513 runAsUser: 42424
514 container:
515 nginx:
516 runAsUser: 0
517 readOnlyRootFilesystem: false
518 neutron_server:
519 allowPrivilegeEscalation: false
520 readOnlyRootFilesystem: true
521 neutron_sriov_agent:
522 pod:
523 runAsUser: 42424
524 container:
525 neutron_sriov_agent_init:
526 privileged: true
527 runAsUser: 0
528 readOnlyRootFilesystem: false
529 neutron_sriov_agent:
530 readOnlyRootFilesystem: true
531 privileged: true
532 neutron_ironic_agent:
533 pod:
534 runAsUser: 42424
535 container:
536 neutron_ironic_agent:
537 allowPrivilegeEscalation: false
538 readOnlyRootFilesystem: true
539 neutron_netns_cleanup_cron:
540 pod:
541 runAsUser: 42424
542 container:
543 neutron_netns_cleanup_cron:
544 readOnlyRootFilesystem: true
545 privileged: true
546 affinity:
547 anti:
548 type:
549 default: preferredDuringSchedulingIgnoredDuringExecution
550 topologyKey:
551 default: kubernetes.io/hostname
552 weight:
553 default: 10
554 tolerations:
555 neutron:
556 enabled: false
557 tolerations:
558 - key: node-role.kubernetes.io/master
559 operator: Exists
560 effect: NoSchedule
561 mounts:
562 neutron_server:
563 init_container: null
564 neutron_server:
565 volumeMounts:
566 volumes:
567 neutron_dhcp_agent:
568 init_container: null
569 neutron_dhcp_agent:
570 volumeMounts:
571 volumes:
572 neutron_l3_agent:
573 init_container: null
574 neutron_l3_agent:
575 volumeMounts:
576 volumes:
577 neutron_lb_agent:
578 init_container: null
579 neutron_lb_agent:
580 volumeMounts:
581 volumes:
582 neutron_metadata_agent:
583 init_container: null
584 neutron_metadata_agent:
585 volumeMounts:
586 volumes:
587 neutron_ovs_agent:
588 init_container: null
589 neutron_ovs_agent:
590 volumeMounts:
591 volumes:
592 neutron_sriov_agent:
593 init_container: null
594 neutron_sriov_agent:
595 volumeMounts:
596 volumes:
597 neutron_l2gw_agent:
598 init_container: null
599 neutron_l2gw_agent:
600 volumeMounts:
601 volumes:
602 bagpipe_bgp:
603 init_container: null
604 bagpipe_bgp:
605 volumeMounts:
606 volumes:
607 neutron_ironic_agent:
608 init_container: null
609 neutron_ironic_agent:
610 volumeMounts:
611 volumes:
612 neutron_netns_cleanup_cron:
613 init_container: null
614 neutron_netns_cleanup_cron:
615 volumeMounts:
616 volumes:
617 neutron_tests:
618 init_container: null
619 neutron_tests:
620 volumeMounts:
621 volumes:
622 neutron_bootstrap:
623 init_container: null
624 neutron_bootstrap:
625 volumeMounts:
626 volumes:
627 neutron_db_sync:
628 neutron_db_sync:
629 volumeMounts:
630 - name: db-sync-conf
631 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
632 subPath: ml2_conf.ini
633 readOnly: true
634 volumes:
635 replicas:
636 server: 1
637 ironic_agent: 1
638 lifecycle:
639 upgrades:
640 deployments:
641 revision_history: 3
642 pod_replacement_strategy: RollingUpdate
643 rolling_update:
644 max_unavailable: 1
645 max_surge: 3
646 daemonsets:
647 pod_replacement_strategy: RollingUpdate
648 dhcp_agent:
649 enabled: true
650 min_ready_seconds: 0
651 max_unavailable: 1
652 l3_agent:
653 enabled: true
654 min_ready_seconds: 0
655 max_unavailable: 1
656 lb_agent:
657 enabled: true
658 min_ready_seconds: 0
659 max_unavailable: 1
660 metadata_agent:
661 enabled: true
662 min_ready_seconds: 0
663 max_unavailable: 1
664 ovs_agent:
665 enabled: true
666 min_ready_seconds: 0
667 max_unavailable: 1
668 sriov_agent:
669 enabled: true
670 min_ready_seconds: 0
671 max_unavailable: 1
672 netns_cleanup_cron:
673 enabled: true
674 min_ready_seconds: 0
675 max_unavailable: 1
676 disruption_budget:
677 server:
678 min_available: 0
679 termination_grace_period:
680 server:
681 timeout: 30
682 ironic_agent:
683 timeout: 30
684 resources:
685 enabled: false
686 agent:
687 dhcp:
688 requests:
689 memory: "128Mi"
690 cpu: "100m"
691 limits:
692 memory: "1024Mi"
693 cpu: "2000m"
694 l3:
695 requests:
696 memory: "128Mi"
697 cpu: "100m"
698 limits:
699 memory: "1024Mi"
700 cpu: "2000m"
701 lb:
702 requests:
703 memory: "128Mi"
704 cpu: "100m"
705 limits:
706 memory: "1024Mi"
707 cpu: "2000m"
708 metadata:
709 requests:
710 memory: "128Mi"
711 cpu: "100m"
712 limits:
713 memory: "1024Mi"
714 cpu: "2000m"
715 ovs:
716 requests:
717 memory: "128Mi"
718 cpu: "100m"
719 limits:
720 memory: "1024Mi"
721 cpu: "2000m"
722 sriov:
723 requests:
724 memory: "128Mi"
725 cpu: "100m"
726 limits:
727 memory: "1024Mi"
728 cpu: "2000m"
729 l2gw:
730 requests:
731 memory: "128Mi"
732 cpu: "100m"
733 limits:
734 memory: "1024Mi"
735 cpu: "2000m"
736 bagpipe_bgp:
737 requests:
738 memory: "128Mi"
739 cpu: "100m"
740 limits:
741 memory: "1024Mi"
742 cpu: "2000m"
743 server:
744 requests:
745 memory: "128Mi"
746 cpu: "100m"
747 limits:
748 memory: "1024Mi"
749 cpu: "2000m"
750 ironic_agent:
751 requests:
752 memory: "128Mi"
753 cpu: "100m"
754 limits:
755 memory: "1024Mi"
756 cpu: "2000m"
757 netns_cleanup_cron:
758 requests:
759 memory: "128Mi"
760 cpu: "100m"
761 limits:
762 memory: "1024Mi"
763 cpu: "2000m"
764 jobs:
765 bootstrap:
766 requests:
767 memory: "128Mi"
768 cpu: "100m"
769 limits:
770 memory: "1024Mi"
771 cpu: "2000m"
772 db_init:
773 requests:
774 memory: "128Mi"
775 cpu: "100m"
776 limits:
777 memory: "1024Mi"
778 cpu: "2000m"
779 rabbit_init:
780 requests:
781 memory: "128Mi"
782 cpu: "100m"
783 limits:
784 memory: "1024Mi"
785 cpu: "2000m"
786 db_sync:
787 requests:
788 memory: "128Mi"
789 cpu: "100m"
790 limits:
791 memory: "1024Mi"
792 cpu: "2000m"
793 db_drop:
794 requests:
795 memory: "128Mi"
796 cpu: "100m"
797 limits:
798 memory: "1024Mi"
799 cpu: "2000m"
800 ks_endpoints:
801 requests:
802 memory: "128Mi"
803 cpu: "100m"
804 limits:
805 memory: "1024Mi"
806 cpu: "2000m"
807 ks_service:
808 requests:
809 memory: "128Mi"
810 cpu: "100m"
811 limits:
812 memory: "1024Mi"
813 cpu: "2000m"
814 ks_user:
815 requests:
816 memory: "128Mi"
817 cpu: "100m"
818 limits:
819 memory: "1024Mi"
820 cpu: "2000m"
821 tests:
822 requests:
823 memory: "128Mi"
824 cpu: "100m"
825 limits:
826 memory: "1024Mi"
827 cpu: "2000m"
828 image_repo_sync:
829 requests:
830 memory: "128Mi"
831 cpu: "100m"
832 limits:
833 memory: "1024Mi"
834 cpu: "2000m"
835
836conf:
837 rally_tests:
838 force_project_purge: false
839 run_tempest: false
840 clean_up: |
841 # NOTE: We will make the best effort to clean up rally generated networks and routers,
842 # but should not block further automated deployment.
843 set +e
844 PATTERN="^[sc]_rally_"
845
846 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
847 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
848
849 for ROUTER in $ROUTERS
850 do
851 openstack router unset --external-gateway $ROUTER
852 openstack router set --disable --no-ha $ROUTER
853
854 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
855 for SUBN in $SUBNS
856 do
857 openstack router remove subnet $ROUTER $SUBN
858 done
859
860 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
861 do
862 openstack router remove port $ROUTER $PORT
863 done
864
865 openstack router delete $ROUTER
866 done
867
868 for NETWORK in $NETWORKS
869 do
870 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
871 do
872 openstack port delete $PORT
873 done
874 openstack network delete $NETWORK
875 done
876 set -e
877 tests:
878 NeutronNetworks.create_and_delete_networks:
879 - args:
880 network_create_args: {}
881 context:
882 quotas:
883 neutron:
884 network: -1
885 runner:
886 concurrency: 1
887 times: 1
888 type: constant
889 sla:
890 failure_rate:
891 max: 0
892 NeutronNetworks.create_and_delete_ports:
893 - args:
894 network_create_args: {}
895 port_create_args: {}
896 ports_per_network: 10
897 context:
898 network: {}
899 quotas:
900 neutron:
901 network: -1
902 port: -1
903 runner:
904 concurrency: 1
905 times: 1
906 type: constant
907 sla:
908 failure_rate:
909 max: 0
910 NeutronNetworks.create_and_delete_routers:
911 - args:
912 network_create_args: {}
913 router_create_args: {}
914 subnet_cidr_start: 1.1.0.0/30
915 subnet_create_args: {}
916 subnets_per_network: 2
917 context:
918 network: {}
919 quotas:
920 neutron:
921 network: -1
922 router: -1
923 subnet: -1
924 runner:
925 concurrency: 1
926 times: 1
927 type: constant
928 sla:
929 failure_rate:
930 max: 0
931 NeutronNetworks.create_and_delete_subnets:
932 - args:
933 network_create_args: {}
934 subnet_cidr_start: 1.1.0.0/30
935 subnet_create_args: {}
936 subnets_per_network: 2
937 context:
938 network: {}
939 quotas:
940 neutron:
941 network: -1
942 subnet: -1
943 runner:
944 concurrency: 1
945 times: 1
946 type: constant
947 sla:
948 failure_rate:
949 max: 0
950 NeutronNetworks.create_and_list_routers:
951 - args:
952 network_create_args: {}
953 router_create_args: {}
954 subnet_cidr_start: 1.1.0.0/30
955 subnet_create_args: {}
956 subnets_per_network: 2
957 context:
958 network: {}
959 quotas:
960 neutron:
961 network: -1
962 router: -1
963 subnet: -1
964 runner:
965 concurrency: 1
966 times: 1
967 type: constant
968 sla:
969 failure_rate:
970 max: 0
971 NeutronNetworks.create_and_list_subnets:
972 - args:
973 network_create_args: {}
974 subnet_cidr_start: 1.1.0.0/30
975 subnet_create_args: {}
976 subnets_per_network: 2
977 context:
978 network: {}
979 quotas:
980 neutron:
981 network: -1
982 subnet: -1
983 runner:
984 concurrency: 1
985 times: 1
986 type: constant
987 sla:
988 failure_rate:
989 max: 0
990 NeutronNetworks.create_and_show_network:
991 - args:
992 network_create_args: {}
993 context:
994 quotas:
995 neutron:
996 network: -1
997 runner:
998 concurrency: 1
999 times: 1
1000 type: constant
1001 sla:
1002 failure_rate:
1003 max: 0
1004 NeutronNetworks.create_and_update_networks:
1005 - args:
1006 network_create_args: {}
1007 network_update_args:
1008 admin_state_up: false
1009 context:
1010 quotas:
1011 neutron:
1012 network: -1
1013 runner:
1014 concurrency: 1
1015 times: 1
1016 type: constant
1017 sla:
1018 failure_rate:
1019 max: 0
1020 NeutronNetworks.create_and_update_ports:
1021 - args:
1022 network_create_args: {}
1023 port_create_args: {}
1024 port_update_args:
1025 admin_state_up: false
1026 device_id: dummy_id
1027 device_owner: dummy_owner
1028 ports_per_network: 5
1029 context:
1030 network: {}
1031 quotas:
1032 neutron:
1033 network: -1
1034 port: -1
1035 runner:
1036 concurrency: 1
1037 times: 1
1038 type: constant
1039 sla:
1040 failure_rate:
1041 max: 0
1042 NeutronNetworks.create_and_update_routers:
1043 - args:
1044 network_create_args: {}
1045 router_create_args: {}
1046 router_update_args:
1047 admin_state_up: false
1048 subnet_cidr_start: 1.1.0.0/30
1049 subnet_create_args: {}
1050 subnets_per_network: 2
1051 context:
1052 network: {}
1053 quotas:
1054 neutron:
1055 network: -1
1056 router: -1
1057 subnet: -1
1058 runner:
1059 concurrency: 1
1060 times: 1
1061 type: constant
1062 sla:
1063 failure_rate:
1064 max: 0
1065 NeutronNetworks.create_and_update_subnets:
1066 - args:
1067 network_create_args: {}
1068 subnet_cidr_start: 1.4.0.0/16
1069 subnet_create_args: {}
1070 subnet_update_args:
1071 enable_dhcp: false
1072 subnets_per_network: 2
1073 context:
1074 network: {}
1075 quotas:
1076 neutron:
1077 network: -1
1078 subnet: -1
1079 runner:
1080 concurrency: 1
1081 times: 1
1082 type: constant
1083 sla:
1084 failure_rate:
1085 max: 0
1086 NeutronNetworks.list_agents:
1087 - args:
1088 agent_args: {}
1089 runner:
1090 concurrency: 1
1091 times: 1
1092 type: constant
1093 sla:
1094 failure_rate:
1095 max: 0
1096 NeutronSecurityGroup.create_and_list_security_groups:
1097 - args:
1098 security_group_create_args: {}
1099 context:
1100 quotas:
1101 neutron:
1102 security_group: -1
1103 runner:
1104 concurrency: 1
1105 times: 1
1106 type: constant
1107 sla:
1108 failure_rate:
1109 max: 0
1110 NeutronSecurityGroup.create_and_update_security_groups:
1111 - args:
1112 security_group_create_args: {}
1113 security_group_update_args: {}
1114 context:
1115 quotas:
1116 neutron:
1117 security_group: -1
1118 runner:
1119 concurrency: 1
1120 times: 1
1121 type: constant
1122 sla:
1123 failure_rate:
1124 max: 0
1125 paste:
1126 composite:neutron:
1127 use: egg:Paste#urlmap
1128 /: neutronversions_composite
1129 /v2.0: neutronapi_v2_0
1130 composite:neutronapi_v2_0:
1131 use: call:neutron.auth:pipeline_factory
1132 noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
1133 keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0
1134 composite:neutronversions_composite:
1135 use: call:neutron.auth:pipeline_factory
1136 noauth: cors http_proxy_to_wsgi neutronversions
1137 keystone: cors http_proxy_to_wsgi neutronversions
1138 filter:request_id:
1139 paste.filter_factory: oslo_middleware:RequestId.factory
1140 filter:catch_errors:
1141 paste.filter_factory: oslo_middleware:CatchErrors.factory
1142 filter:cors:
1143 paste.filter_factory: oslo_middleware.cors:filter_factory
1144 oslo_config_project: neutron
1145 filter:http_proxy_to_wsgi:
1146 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
1147 filter:keystonecontext:
1148 paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory
1149 filter:authtoken:
1150 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
1151 filter:audit:
1152 paste.filter_factory: keystonemiddleware.audit:filter_factory
1153 audit_map_file: /etc/neutron/api_audit_map.conf
1154 filter:extensions:
1155 paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory
1156 app:neutronversions:
1157 paste.app_factory: neutron.pecan_wsgi.app:versions_factory
1158 app:neutronapiapp_v2_0:
1159 paste.app_factory: neutron.api.v2.router:APIRouter.factory
1160 filter:osprofiler:
1161 paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
1162 policy: {}
1163 api_audit_map:
1164 DEFAULT:
1165 target_endpoint_type: None
1166 custom_actions:
1167 add_router_interface: update/add
1168 remove_router_interface: update/remove
1169 path_keywords:
1170 floatingips: ip
1171 healthmonitors: healthmonitor
1172 health_monitors: health_monitor
1173 lb: None
1174 members: member
1175 metering-labels: label
1176 metering-label-rules: rule
1177 networks: network
1178 pools: pool
1179 ports: port
1180 routers: router
1181 quotas: quota
1182 security-groups: security-group
1183 security-group-rules: rule
1184 subnets: subnet
1185 vips: vip
1186 service_endpoints:
1187 network: service/network
1188 neutron_sudoers: |
1189 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1190 Defaults !requiretty
1191 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1192 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1193 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1194 rootwrap: |
1195 # Configuration for neutron-rootwrap
1196 # This file should be owned by (and only-writeable by) the root user
1197
1198 [DEFAULT]
1199 # List of directories to load filter definitions from (separated by ',').
1200 # These directories MUST all be only writeable by root !
1201 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1202
1203 # List of directories to search executables in, in case filters do not
1204 # explicitely specify a full path (separated by ',')
1205 # If not specified, defaults to system PATH environment variable.
1206 # These directories MUST all be only writeable by root !
1207 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1208
1209 # Enable logging to syslog
1210 # Default value is False
1211 use_syslog=False
1212
1213 # Which syslog facility to use.
1214 # Valid values include auth, authpriv, syslog, local0, local1...
1215 # Default value is 'syslog'
1216 syslog_log_facility=syslog
1217
1218 # Which messages to log.
1219 # INFO means log all usage
1220 # ERROR means only log unsuccessful attempts
1221 syslog_log_level=ERROR
1222
1223 [xenapi]
1224 # XenAPI configuration is only required by the L2 agent if it is to
1225 # target a XenServer/XCP compute host's dom0.
1226 xenapi_connection_url=<None>
1227 xenapi_connection_username=root
1228 xenapi_connection_password=<None>
1229 rootwrap_filters:
1230 debug:
1231 pods:
1232 - dhcp_agent
1233 - l3_agent
1234 - lb_agent
1235 - metadata_agent
1236 - ovs_agent
1237 - sriov_agent
1238 content: |
1239 # neutron-rootwrap command filters for nodes on which neutron is
1240 # expected to control network
1241 #
1242 # This file should be owned by (and only-writeable by) the root user
1243
1244 # format seems to be
1245 # cmd-name: filter-name, raw-command, user, args
1246
1247 [Filters]
1248
1249 # This is needed because we should ping
1250 # from inside a namespace which requires root
1251 # _alt variants allow to match -c and -w in any order
1252 # (used by NeutronDebugAgent.ping_all)
1253 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1254 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1255 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1256 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1257 dibbler:
1258 pods:
1259 - dhcp_agent
1260 - l3_agent
1261 - lb_agent
1262 - metadata_agent
1263 - ovs_agent
1264 - sriov_agent
1265 content: |
1266 # neutron-rootwrap command filters for nodes on which neutron is
1267 # expected to control network
1268 #
1269 # This file should be owned by (and only-writeable by) the root user
1270
1271 # format seems to be
1272 # cmd-name: filter-name, raw-command, user, args
1273
1274 [Filters]
1275
1276 # Filters for the dibbler-based reference implementation of the pluggable
1277 # Prefix Delegation driver. Other implementations using an alternative agent
1278 # should include a similar filter in this folder.
1279
1280 # prefix_delegation_agent
1281 dibbler-client: CommandFilter, dibbler-client, root
1282 ipset_firewall:
1283 pods:
1284 - dhcp_agent
1285 - l3_agent
1286 - lb_agent
1287 - metadata_agent
1288 - ovs_agent
1289 - sriov_agent
1290 content: |
1291 # neutron-rootwrap command filters for nodes on which neutron is
1292 # expected to control network
1293 #
1294 # This file should be owned by (and only-writeable by) the root user
1295
1296 # format seems to be
1297 # cmd-name: filter-name, raw-command, user, args
1298
1299 [Filters]
1300 # neutron/agent/linux/iptables_firewall.py
1301 # "ipset", "-A", ...
1302 ipset: CommandFilter, ipset, root
1303 l3:
1304 pods:
1305 - dhcp_agent
1306 - l3_agent
1307 - lb_agent
1308 - metadata_agent
1309 - ovs_agent
1310 - sriov_agent
1311 content: |
1312 # neutron-rootwrap command filters for nodes on which neutron is
1313 # expected to control network
1314 #
1315 # This file should be owned by (and only-writeable by) the root user
1316
1317 # format seems to be
1318 # cmd-name: filter-name, raw-command, user, args
1319
1320 [Filters]
1321
1322 # arping
1323 arping: CommandFilter, arping, root
1324
1325 # l3_agent
1326 sysctl: CommandFilter, sysctl, root
1327 route: CommandFilter, route, root
1328 radvd: CommandFilter, radvd, root
1329
1330 # haproxy
1331 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1332 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1333
1334 # metadata proxy
1335 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1336 # RHEL invocation of the metadata proxy will report /usr/bin/python
1337 kill_metadata: KillFilter, root, python, -15, -9
1338 kill_metadata2: KillFilter, root, python2, -15, -9
1339 kill_metadata7: KillFilter, root, python2.7, -15, -9
1340 kill_metadata3: KillFilter, root, python3, -15, -9
1341 kill_metadata35: KillFilter, root, python3.5, -15, -9
1342 kill_metadata36: KillFilter, root, python3.6, -15, -9
1343 kill_metadata37: KillFilter, root, python3.7, -15, -9
1344 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1345 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1346
1347 # ip_lib
1348 ip: IpFilter, ip, root
1349 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1350 ip_exec: IpNetnsExecFilter, ip, root
1351
1352 # l3_tc_lib
1353 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1354 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1355 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1356 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1357 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1358 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1359 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1360
1361 # For ip monitor
1362 kill_ip_monitor: KillFilter, root, ip, -9
1363
1364 # ovs_lib (if OVSInterfaceDriver is used)
1365 ovs-vsctl: CommandFilter, ovs-vsctl, root
1366
1367 # iptables_manager
1368 iptables-save: CommandFilter, iptables-save, root
1369 iptables-restore: CommandFilter, iptables-restore, root
1370 ip6tables-save: CommandFilter, ip6tables-save, root
1371 ip6tables-restore: CommandFilter, ip6tables-restore, root
1372
1373 # Keepalived
1374 keepalived: CommandFilter, keepalived, root
1375 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1376
1377 # l3 agent to delete floatingip's conntrack state
1378 conntrack: CommandFilter, conntrack, root
1379
1380 # keepalived state change monitor
1381 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1382 # The following filters are used to kill the keepalived state change monitor.
1383 # Since the monitor runs as a Python script, the system reports that the
1384 # command of the process to be killed is python.
1385 # TODO(mlavalle) These kill filters will be updated once we come up with a
1386 # mechanism to kill using the name of the script being executed by Python
1387 kill_keepalived_monitor_py: KillFilter, root, python, -15
1388 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1389 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1390 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1391 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1392 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1393 netns_cleanup:
1394 pods:
1395 - dhcp_agent
1396 - l3_agent
1397 - lb_agent
1398 - metadata_agent
1399 - ovs_agent
1400 - sriov_agent
1401 - netns_cleanup_cron
1402 content: |
1403 # neutron-rootwrap command filters for nodes on which neutron is
1404 # expected to control network
1405 #
1406 # This file should be owned by (and only-writeable by) the root user
1407
1408 # format seems to be
1409 # cmd-name: filter-name, raw-command, user, args
1410
1411 [Filters]
1412
1413 # netns-cleanup
1414 netstat: CommandFilter, netstat, root
1415 dhcp:
1416 pods:
1417 - dhcp_agent
1418 - l3_agent
1419 - lb_agent
1420 - metadata_agent
1421 - ovs_agent
1422 - sriov_agent
1423 - netns_cleanup_cron
1424 content: |
1425 # neutron-rootwrap command filters for nodes on which neutron is
1426 # expected to control network
1427 #
1428 # This file should be owned by (and only-writeable by) the root user
1429
1430 # format seems to be
1431 # cmd-name: filter-name, raw-command, user, args
1432
1433 [Filters]
1434
1435 # dhcp-agent
1436 dnsmasq: CommandFilter, dnsmasq, root
1437 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1438 # it looks like these are the only signals needed, per
1439 # neutron/agent/linux/dhcp.py
1440 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1441 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1442
1443 ovs-vsctl: CommandFilter, ovs-vsctl, root
1444 ivs-ctl: CommandFilter, ivs-ctl, root
1445 mm-ctl: CommandFilter, mm-ctl, root
1446 dhcp_release: CommandFilter, dhcp_release, root
1447 dhcp_release6: CommandFilter, dhcp_release6, root
1448
1449 # metadata proxy
1450 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1451 # RHEL invocation of the metadata proxy will report /usr/bin/python
1452 kill_metadata: KillFilter, root, python, -9
1453 kill_metadata2: KillFilter, root, python2, -9
1454 kill_metadata7: KillFilter, root, python2.7, -9
1455 kill_metadata3: KillFilter, root, python3, -9
1456 kill_metadata35: KillFilter, root, python3.5, -9
1457 kill_metadata36: KillFilter, root, python3.6, -9
1458 kill_metadata37: KillFilter, root, python3.7, -9
1459
1460 # ip_lib
1461 ip: IpFilter, ip, root
1462 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1463 ip_exec: IpNetnsExecFilter, ip, root
1464 ebtables:
1465 pods:
1466 - dhcp_agent
1467 - l3_agent
1468 - lb_agent
1469 - metadata_agent
1470 - ovs_agent
1471 - sriov_agent
1472 content: |
1473 # neutron-rootwrap command filters for nodes on which neutron is
1474 # expected to control network
1475 #
1476 # This file should be owned by (and only-writeable by) the root user
1477
1478 # format seems to be
1479 # cmd-name: filter-name, raw-command, user, args
1480
1481 [Filters]
1482
1483 ebtables: CommandFilter, ebtables, root
1484 iptables_firewall:
1485 pods:
1486 - dhcp_agent
1487 - l3_agent
1488 - lb_agent
1489 - metadata_agent
1490 - ovs_agent
1491 - sriov_agent
1492 content: |
1493 # neutron-rootwrap command filters for nodes on which neutron is
1494 # expected to control network
1495 #
1496 # This file should be owned by (and only-writeable by) the root user
1497
1498 # format seems to be
1499 # cmd-name: filter-name, raw-command, user, args
1500
1501 [Filters]
1502
1503 # neutron/agent/linux/iptables_firewall.py
1504 # "iptables-save", ...
1505 iptables-save: CommandFilter, iptables-save, root
1506 iptables-restore: CommandFilter, iptables-restore, root
1507 ip6tables-save: CommandFilter, ip6tables-save, root
1508 ip6tables-restore: CommandFilter, ip6tables-restore, root
1509
1510 # neutron/agent/linux/iptables_firewall.py
1511 # "iptables", "-A", ...
1512 iptables: CommandFilter, iptables, root
1513 ip6tables: CommandFilter, ip6tables, root
1514
1515 # neutron/agent/linux/iptables_firewall.py
1516 sysctl: CommandFilter, sysctl, root
1517
1518 # neutron/agent/linux/ip_conntrack.py
1519 conntrack: CommandFilter, conntrack, root
1520 linuxbridge_plugin:
1521 pods:
1522 - dhcp_agent
1523 - l3_agent
1524 - lb_agent
1525 - metadata_agent
1526 - ovs_agent
1527 - sriov_agent
1528 content: |
1529 # neutron-rootwrap command filters for nodes on which neutron is
1530 # expected to control network
1531 #
1532 # This file should be owned by (and only-writeable by) the root user
1533
1534 # format seems to be
1535 # cmd-name: filter-name, raw-command, user, args
1536
1537 [Filters]
1538
1539 # linuxbridge-agent
1540 # unclear whether both variants are necessary, but I'm transliterating
1541 # from the old mechanism
1542 brctl: CommandFilter, brctl, root
1543 bridge: CommandFilter, bridge, root
1544
1545 # ip_lib
1546 ip: IpFilter, ip, root
1547 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1548 ip_exec: IpNetnsExecFilter, ip, root
1549
1550 # tc commands needed for QoS support
1551 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1552 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1553 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1554 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1555 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1556 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1557 openvswitch_plugin:
1558 pods:
1559 - dhcp_agent
1560 - l3_agent
1561 - lb_agent
1562 - metadata_agent
1563 - ovs_agent
1564 - sriov_agent
1565 content: |
1566 # neutron-rootwrap command filters for nodes on which neutron is
1567 # expected to control network
1568 #
1569 # This file should be owned by (and only-writeable by) the root user
1570
1571 # format seems to be
1572 # cmd-name: filter-name, raw-command, user, args
1573
1574 [Filters]
1575
1576 # openvswitch-agent
1577 # unclear whether both variants are necessary, but I'm transliterating
1578 # from the old mechanism
1579 ovs-vsctl: CommandFilter, ovs-vsctl, root
1580 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1581 ovs-ofctl: CommandFilter, ovs-ofctl, root
1582 ovs-appctl: CommandFilter, ovs-appctl, root
1583 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1584 ovsdb-client: CommandFilter, ovsdb-client, root
1585 xe: CommandFilter, xe, root
1586
1587 # ip_lib
1588 ip: IpFilter, ip, root
1589 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1590 ip_exec: IpNetnsExecFilter, ip, root
1591
1592 # needed for FDB extension
1593 bridge: CommandFilter, bridge, root
1594 privsep:
1595 pods:
1596 - dhcp_agent
1597 - l3_agent
1598 - lb_agent
1599 - metadata_agent
1600 - ovs_agent
1601 - sriov_agent
1602 - netns_cleanup_cron
1603 content: |
1604 # Command filters to allow privsep daemon to be started via rootwrap.
1605 #
1606 # This file should be owned by (and only-writeable by) the root user
1607
1608 [Filters]
1609
1610 # By installing the following, the local admin is asserting that:
1611 #
1612 # 1. The python module load path used by privsep-helper
1613 # command as root (as started by sudo/rootwrap) is trusted.
1614 # 2. Any oslo.config files matching the --config-file
1615 # arguments below are trusted.
1616 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1617 # also allowed to invoke python "entrypoint" functions from
1618 # --privsep_context with the additional (possibly root) privileges
1619 # configured for that context.
1620 #
1621 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1622 #
1623 # In particular, the oslo.config and python module path must not
1624 # be writeable by the unprivileged user.
1625
1626 # oslo.privsep default neutron context
1627 privsep: PathFilter, privsep-helper, root,
1628 --config-file, /etc,
1629 --privsep_context, neutron.privileged.default,
1630 --privsep_sock_path, /
1631
1632 # NOTE: A second `--config-file` arg can also be added above. Since
1633 # many neutron components are installed like that (eg: by devstack).
1634 # Adjust to suit local requirements.
1635 linux_vxlan:
1636 pods:
1637 - bagpipe_bgp
1638 content: |
1639 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1640 # expected to control VXLAN Linux Bridge dataplane
1641 #
1642 # This file should be owned by (and only-writeable by) the root user
1643
1644 # format seems to be
1645 # cmd-name: filter-name, raw-command, user, args
1646
1647 [Filters]
1648
1649 #
1650 modprobe: CommandFilter, modprobe, root
1651
1652 #
1653 brctl: CommandFilter, brctl, root
1654 bridge: CommandFilter, bridge, root
1655
1656 # ip_lib
1657 ip: IpFilter, ip, root
1658 ip_exec: IpNetnsExecFilter, ip, root
1659
1660 # shell (for piped commands)
1661 sh: CommandFilter, sh, root
1662 mpls_ovs_dataplane:
1663 pods:
1664 - bagpipe_bgp
1665 content: |
1666 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1667 # expected to control MPLS OpenVSwitch dataplane
1668 #
1669 # This file should be owned by (and only-writeable by) the root user
1670
1671 # format seems to be
1672 # cmd-name: filter-name, raw-command, user, args
1673
1674 [Filters]
1675
1676 # openvswitch
1677 ovs-vsctl: CommandFilter, ovs-vsctl, root
1678 ovs-ofctl: CommandFilter, ovs-ofctl, root
1679
1680 # ip_lib
1681 ip: IpFilter, ip, root
1682 ip_exec: IpNetnsExecFilter, ip, root
1683
1684 # shell (for piped commands)
1685 sh: CommandFilter, sh, root
1686 neutron:
1687 DEFAULT:
1688 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1689 log_config_append: /etc/neutron/logging.conf
1690 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1691 # via the endpoints section.
1692 bind_port: null
1693 default_availability_zones: nova
1694 api_workers: 1
1695 rpc_workers: 4
1696 allow_overlapping_ips: True
1697 state_path: /var/lib/neutron
1698 # core_plugin can be: ml2, calico
1699 core_plugin: ml2
1700 # service_plugin can be: router, odl-router, empty for calico,
1701 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1702 service_plugins: router
1703 allow_automatic_l3agent_failover: True
1704 l3_ha: True
1705 max_l3_agents_per_router: 2
1706 l3_ha_network_type: vxlan
1707 network_auto_schedule: True
1708 router_auto_schedule: True
1709 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1710 # 'network.backend' to sane defaults.
1711 interface_driver: null
1712 oslo_concurrency:
1713 lock_path: /var/lib/neutron/tmp
1714 database:
1715 max_retries: -1
1716 agent:
1717 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1718 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1719 oslo_messaging_notifications:
1720 driver: messagingv2
1721 oslo_messaging_rabbit:
1722 rabbit_ha_queues: true
1723 oslo_middleware:
1724 enable_proxy_headers_parsing: true
1725 oslo_policy:
1726 policy_file: /etc/neutron/policy.yaml
1727 nova:
1728 auth_type: password
1729 auth_version: v3
1730 endpoint_type: internal
1731 designate:
1732 auth_type: password
1733 auth_version: v3
1734 endpoint_type: internal
1735 allow_reverse_dns_lookup: true
1736 ironic:
1737 endpoint_type: internal
1738 keystone_authtoken:
1739 memcache_security_strategy: ENCRYPT
1740 auth_type: password
1741 auth_version: v3
1742 octavia:
1743 request_poll_timeout: 3000
1744 logging:
1745 loggers:
1746 keys:
1747 - root
1748 - neutron
1749 - neutron_taas
1750 handlers:
1751 keys:
1752 - stdout
1753 - stderr
1754 - "null"
1755 formatters:
1756 keys:
1757 - context
1758 - default
1759 logger_root:
1760 level: WARNING
1761 handlers: 'null'
1762 logger_neutron:
1763 level: INFO
1764 handlers:
1765 - stdout
1766 qualname: neutron
1767 logger_neutron_taas:
1768 level: INFO
1769 handlers:
1770 - stdout
1771 qualname: neutron_taas
1772 logger_amqp:
1773 level: WARNING
1774 handlers: stderr
1775 qualname: amqp
1776 logger_amqplib:
1777 level: WARNING
1778 handlers: stderr
1779 qualname: amqplib
1780 logger_eventletwsgi:
1781 level: WARNING
1782 handlers: stderr
1783 qualname: eventlet.wsgi.server
1784 logger_sqlalchemy:
1785 level: WARNING
1786 handlers: stderr
1787 qualname: sqlalchemy
1788 logger_boto:
1789 level: WARNING
1790 handlers: stderr
1791 qualname: boto
1792 handler_null:
1793 class: logging.NullHandler
1794 formatter: default
1795 args: ()
1796 handler_stdout:
1797 class: StreamHandler
1798 args: (sys.stdout,)
1799 formatter: context
1800 handler_stderr:
1801 class: StreamHandler
1802 args: (sys.stderr,)
1803 formatter: context
1804 formatter_context:
1805 class: oslo_log.formatters.ContextFormatter
1806 datefmt: "%Y-%m-%d %H:%M:%S"
1807 formatter_default:
1808 format: "%(message)s"
1809 datefmt: "%Y-%m-%d %H:%M:%S"
1810 plugins:
1811 ml2_conf:
1812 ml2:
1813 extension_drivers: port_security
1814 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1815 # in 'network.backend' to sane defaults.
1816 mechanism_drivers: null
1817 type_drivers: flat,vlan,vxlan
1818 tenant_network_types: vxlan
1819 ml2_type_vxlan:
1820 vni_ranges: 1:1000
1821 vxlan_group: 239.1.1.1
1822 ml2_type_flat:
1823 flat_networks: "*"
1824 # If you want to use the external network as a tagged provider network,
1825 # a range should be specified including the intended VLAN target
1826 # using ml2_type_vlan.network_vlan_ranges:
1827 # ml2_type_vlan:
1828 # network_vlan_ranges: "external:1100:1110"
1829 agent:
1830 extensions: ""
1831 ml2_conf_sriov: null
1832 taas:
1833 taas:
1834 enabled: False
1835 openvswitch_agent:
1836 agent:
1837 tunnel_types: vxlan
1838 l2_population: True
1839 arp_responder: True
1840 ovs:
1841 bridge_mappings: "external:br-ex"
1842 securitygroup:
1843 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1844 linuxbridge_agent:
1845 linux_bridge:
1846 # To define Flat and VLAN connections, in LB we can assign
1847 # specific interface to the flat/vlan network name using:
1848 # physical_interface_mappings: "external:eth3"
1849 # Or we can set the mapping between the network and bridge:
1850 bridge_mappings: "external:br-ex"
1851 # The two above options are exclusive, do not use both of them at once
1852 securitygroup:
1853 firewall_driver: iptables
1854 vxlan:
1855 l2_population: True
1856 arp_responder: True
1857 macvtap_agent: null
1858 sriov_agent:
1859 securitygroup:
1860 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1861 sriov_nic:
1862 physical_device_mappings: physnet2:enp3s0f1
1863 # NOTE: do not use null here, use an empty string
1864 exclude_devices: ""
1865 dhcp_agent:
1866 DEFAULT:
1867 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1868 # 'network.backend' to sane defaults.
1869 interface_driver: null
1870 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1871 force_metadata: True
1872 dnsmasq: |
1873 #no-hosts
1874 #port=5353
1875 #cache-size=500
1876 #no-negcache
1877 #dns-forward-max=100
1878 #resolve-file=
1879 #strict-order
1880 #bind-interface
1881 #bind-dynamic
1882 #domain=
1883 #dhcp-range=10.10.10.10,10.10.10.100,24h
1884 #dhcp-lease-max=150
1885 #dhcp-host=11:22:33:44:55:66,ignore
1886 #dhcp-option=3,10.10.10.1
1887 #dhcp-option-force=26,1450
1888
1889 l3_agent:
1890 DEFAULT:
1891 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1892 # 'network.backend' to sane defaults.
1893 interface_driver: null
1894 agent_mode: legacy
1895 metering_agent: null
1896 metadata_agent:
1897 DEFAULT:
1898 # we cannot change the proxy socket path as it is declared
1899 # as a hostPath volume from agent daemonsets
1900 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1901 metadata_proxy_shared_secret: "password"
1902 cache:
1903 enabled: true
1904 backend: dogpile.cache.memcached
1905 bagpipe_bgp: {}
1906
1907 rabbitmq:
1908 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1909 policies:
1910 - vhost: "neutron"
1911 name: "ha_ttl_neutron"
1912 definition:
1913 # mirror messges to other nodes in rmq cluster
1914 ha-mode: "all"
1915 ha-sync-mode: "automatic"
1916 # 70s
1917 message-ttl: 70000
1918 priority: 0
1919 apply-to: all
1920 pattern: '^(?!(amq\.|reply_)).*'
1921 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
1922 ## This helps prevent sriov init script from failing due to mis-matched NIC
1923 ## For prod env, target NIC should match and init script should fail otherwise.
1924 ## sriov_init:
1925 ## - besteffort
1926 sriov_init:
1927 -
1928 # auto_bridge_add is a table of "bridge: interface" pairs
1929 # To automatically add a physical interfaces to a specific bridges,
1930 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
1931 # to br1 do something like:
1932 #
1933 # auto_bridge_add:
1934 # br-physnet1: eth3
1935 # br0: if0
1936 # br1: iface_two
1937 # br-ex will be added by default
1938 auto_bridge_add:
1939 br-ex: null
1940
1941 # configuration of OVS DPDK bridges and NICs
1942 # this is a separate section and not part of the auto_bridge_add section
1943 # because additional parameters are needed
1944 ovs_dpdk:
1945 enabled: false
1946 # setting update_dpdk_bond_config to true will have default behavior,
1947 # which may cause disruptions in ovs dpdk traffic in case of neutron
1948 # ovs agent restart or when dpdk nic/bond configurations are changed.
1949 # Setting this to false will configure dpdk in the first run and
1950 # disable nic/bond config on event of restart or config update.
1951 update_dpdk_bond_config: true
1952 driver: uio_pci_generic
1953 # In case bonds are configured, the nics which are part of those bonds
1954 # must NOT be provided here.
1955 nics:
1956 - name: dpdk0
1957 pci_id: '0000:05:00.0'
1958 # Set VF Index in case some particular VF(s) need to be
1959 # used with ovs-dpdk.
1960 # vf_index: 0
1961 bridge: br-phy
1962 migrate_ip: true
1963 n_rxq: 2
1964 n_txq: 2
1965 pmd_rxq_affinity: "0:3,1:27"
1966 ofport_request: 1
1967 # optional parameters for tuning the OVS DPDK config
1968 # in alignment with the available hardware resources
1969 # mtu: 2000
1970 # n_rxq_size: 1024
1971 # n_txq_size: 1024
1972 # vhost-iommu-support: true
1973 bridges:
1974 - name: br-phy
1975 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
1976 # - tunnel_underlay_vlan: 45
1977 # Optional parameter for configuring bonding in OVS-DPDK
1978 # - name: br-phy-bond0
1979 # bonds:
1980 # - name: dpdkbond0
1981 # bridge: br-phy-bond0
1982 # # The IP from the first nic in nics list shall be used
1983 # migrate_ip: true
1984 # mtu: 2000
1985 # # Please note that n_rxq is set for each NIC individually
1986 # # rather than denoting the total number of rx queues for
1987 # # the bond as a whole. So setting n_rxq = 2 below for ex.
1988 # # would be 4 rx queues in total for the bond.
1989 # # Same for n_txq
1990 # n_rxq: 2
1991 # n_txq: 2
1992 # ofport_request: 1
1993 # n_rxq_size: 1024
1994 # n_txq_size: 1024
1995 # vhost-iommu-support: true
1996 # ovs_options: "bond_mode=active-backup"
1997 # nics:
1998 # - name: dpdk_b0s0
1999 # pci_id: '0000:06:00.0'
2000 # pmd_rxq_affinity: "0:3,1:27"
2001 # # Set VF Index in case some particular VF(s) need to be
2002 # # used with ovs-dpdk. In which case pci_id of PF must be
2003 # # provided above.
2004 # # vf_index: 0
2005 # - name: dpdk_b0s1
2006 # pci_id: '0000:07:00.0'
2007 # pmd_rxq_affinity: "0:3,1:27"
2008 # # Set VF Index in case some particular VF(s) need to be
2009 # # used with ovs-dpdk. In which case pci_id of PF must be
2010 # # provided above.
2011 # # vf_index: 0
2012 #
2013 # Set the log level for each target module (default level is always dbg)
2014 # Supported log levels are: off, emer, err, warn, info, dbg
2015 #
2016 # modules:
2017 # - name: dpdk
2018 # log_level: info
2019
2020# Names of secrets used by bootstrap and environmental checks
2021secrets:
2022 identity:
2023 admin: neutron-keystone-admin
2024 neutron: neutron-keystone-user
2025 test: neutron-keystone-test
2026 oslo_db:
2027 admin: neutron-db-admin
2028 neutron: neutron-db-user
2029 oslo_messaging:
2030 admin: neutron-rabbitmq-admin
2031 neutron: neutron-rabbitmq-user
2032 tls:
2033 compute_metadata:
2034 metadata:
2035 internal: metadata-tls-metadata
2036 network:
2037 server:
2038 public: neutron-tls-public
2039 internal: neutron-tls-server
2040 oci_image_registry:
2041 neutron: neutron-oci-image-registry
2042
2043# typically overridden by environmental
2044# values, but should include all endpoints
2045# required by this chart
2046endpoints:
2047 cluster_domain_suffix: cluster.local
2048 local_image_registry:
2049 name: docker-registry
2050 namespace: docker-registry
2051 hosts:
2052 default: localhost
2053 internal: docker-registry
2054 node: localhost
2055 host_fqdn_override:
2056 default: null
2057 port:
2058 registry:
2059 node: 5000
2060 oci_image_registry:
2061 name: oci-image-registry
2062 namespace: oci-image-registry
2063 auth:
2064 enabled: false
2065 neutron:
2066 username: neutron
2067 password: password
2068 hosts:
2069 default: localhost
2070 host_fqdn_override:
2071 default: null
2072 port:
2073 registry:
2074 default: null
2075 oslo_db:
2076 auth:
2077 admin:
2078 username: root
2079 password: password
2080 secret:
2081 tls:
2082 internal: mariadb-tls-direct
2083 neutron:
2084 username: neutron
2085 password: password
2086 hosts:
2087 default: mariadb
2088 host_fqdn_override:
2089 default: null
2090 path: /neutron
2091 scheme: mysql+pymysql
2092 port:
2093 mysql:
2094 default: 3306
2095 oslo_messaging:
2096 auth:
2097 admin:
2098 username: rabbitmq
2099 password: password
2100 secret:
2101 tls:
2102 internal: rabbitmq-tls-direct
2103 neutron:
2104 username: neutron
2105 password: password
2106 statefulset:
2107 replicas: 2
2108 name: rabbitmq-rabbitmq
2109 hosts:
2110 default: rabbitmq
2111 host_fqdn_override:
2112 default: null
2113 path: /neutron
2114 scheme: rabbit
2115 port:
2116 amqp:
2117 default: 5672
2118 http:
2119 default: 15672
2120 oslo_cache:
2121 auth:
2122 # NOTE(portdirect): this is used to define the value for keystone
2123 # authtoken cache encryption key, if not set it will be populated
2124 # automatically with a random value, but to take advantage of
2125 # this feature all services should be set to use the same key,
2126 # and memcache service.
2127 memcache_secret_key: null
2128 hosts:
2129 default: memcached
2130 host_fqdn_override:
2131 default: null
2132 port:
2133 memcache:
2134 default: 11211
2135 compute:
2136 name: nova
2137 hosts:
2138 default: nova-api
2139 public: nova
2140 host_fqdn_override:
2141 default: null
2142 path:
2143 default: "/v2.1/%(tenant_id)s"
2144 scheme:
2145 default: 'http'
2146 port:
2147 api:
2148 default: 8774
2149 public: 80
2150 novncproxy:
2151 default: 6080
2152 compute_metadata:
2153 name: nova
2154 hosts:
2155 default: nova-metadata
2156 public: metadata
2157 host_fqdn_override:
2158 default: null
2159 path:
2160 default: /
2161 scheme:
2162 default: 'http'
2163 port:
2164 metadata:
2165 default: 8775
2166 public: 80
2167 identity:
2168 name: keystone
2169 auth:
2170 admin:
2171 region_name: RegionOne
2172 username: admin
2173 password: password
2174 project_name: admin
2175 user_domain_name: default
2176 project_domain_name: default
2177 neutron:
2178 role: admin
2179 region_name: RegionOne
2180 username: neutron
2181 password: password
2182 project_name: service
2183 user_domain_name: service
2184 project_domain_name: service
2185 nova:
2186 region_name: RegionOne
2187 project_name: service
2188 username: nova
2189 password: password
2190 user_domain_name: service
2191 project_domain_name: service
2192 designate:
2193 region_name: RegionOne
2194 project_name: service
2195 username: designate
2196 password: password
2197 user_domain_name: service
2198 project_domain_name: service
2199 ironic:
2200 region_name: RegionOne
2201 project_name: service
2202 username: ironic
2203 password: password
2204 user_domain_name: service
2205 project_domain_name: service
2206 test:
2207 role: admin
2208 region_name: RegionOne
2209 username: neutron-test
2210 password: password
2211 # NOTE: this project will be purged and reset if
2212 # conf.rally_tests.force_project_purge is set to true
2213 # which may be required upon test failure, but be aware that this will
2214 # expunge all openstack objects, so if this is used a seperate project
2215 # should be used for each helm test, and also it should be ensured
2216 # that this project is not in use by other tenants
2217 project_name: test
2218 user_domain_name: service
2219 project_domain_name: service
2220 hosts:
2221 default: keystone
2222 internal: keystone-api
2223 host_fqdn_override:
2224 default: null
2225 path:
2226 default: /v3
2227 scheme:
2228 default: http
2229 port:
2230 api:
2231 default: 80
2232 internal: 5000
2233 network:
2234 name: neutron
2235 hosts:
2236 default: neutron-server
2237 public: neutron
2238 host_fqdn_override:
2239 default: null
2240 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2241 # endpoints using the following format:
2242 # public:
2243 # host: null
2244 # tls:
2245 # crt: null
2246 # key: null
2247 path:
2248 default: null
2249 scheme:
2250 default: 'http'
2251 service: 'http'
2252 port:
2253 api:
2254 default: 9696
2255 public: 80
2256 service: 9696
2257 load_balancer:
2258 name: octavia
2259 hosts:
2260 default: octavia-api
2261 public: octavia
2262 host_fqdn_override:
2263 default: null
2264 path:
2265 default: null
2266 scheme:
2267 default: http
2268 port:
2269 api:
2270 default: 9876
2271 public: 80
2272 fluentd:
2273 namespace: osh-infra
2274 name: fluentd
2275 hosts:
2276 default: fluentd-logging
2277 host_fqdn_override:
2278 default: null
2279 path:
2280 default: null
2281 scheme: 'http'
2282 port:
2283 service:
2284 default: 24224
2285 metrics:
2286 default: 24220
2287 dns:
2288 name: designate
2289 hosts:
2290 default: designate-api
2291 public: designate
2292 host_fqdn_override:
2293 default: null
2294 path:
2295 default: /
2296 scheme:
2297 default: 'http'
2298 port:
2299 api:
2300 default: 9001
2301 public: 80
2302 baremetal:
2303 name: ironic
2304 hosts:
2305 default: ironic-api
2306 public: ironic
2307 host_fqdn_override:
2308 default: null
2309 path:
2310 default: null
2311 scheme:
2312 default: 'http'
2313 port:
2314 api:
2315 default: 6385
2316 public: 80
2317 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2318 # They are using to enable the Egress K8s network policy.
2319 kube_dns:
2320 namespace: kube-system
2321 name: kubernetes-dns
2322 hosts:
2323 default: kube-dns
2324 host_fqdn_override:
2325 default: null
2326 path:
2327 default: null
2328 scheme: http
2329 port:
2330 dns:
2331 default: 53
2332 protocol: UDP
2333 ingress:
2334 namespace: null
2335 name: ingress
2336 hosts:
2337 default: ingress
2338 port:
2339 ingress:
2340 default: 80
2341
2342network_policy:
2343 neutron:
2344 # TODO(lamt): Need to tighten this ingress for security.
2345 ingress:
2346 - {}
2347 egress:
2348 - {}
2349
2350helm3_hook: true
2351
2352health_probe:
2353 logging:
2354 level: ERROR
2355
2356tls:
2357 identity: false
2358 oslo_messaging: false
2359 oslo_db: false
2360
2361manifests:
2362 certificates: false
2363 configmap_bin: true
2364 configmap_etc: true
2365 daemonset_dhcp_agent: true
2366 daemonset_l3_agent: true
2367 daemonset_lb_agent: true
2368 daemonset_metadata_agent: true
2369 daemonset_ovs_agent: true
2370 daemonset_sriov_agent: true
2371 daemonset_l2gw_agent: false
2372 daemonset_bagpipe_bgp: false
2373 daemonset_netns_cleanup_cron: true
2374 deployment_ironic_agent: false
2375 deployment_server: true
2376 ingress_server: true
2377 job_bootstrap: true
2378 job_db_init: true
2379 job_db_sync: true
2380 job_db_drop: false
2381 job_image_repo_sync: true
2382 job_ks_endpoints: true
2383 job_ks_service: true
2384 job_ks_user: true
2385 job_rabbit_init: true
2386 pdb_server: true
2387 pod_rally_test: true
2388 network_policy: false
2389 secret_db: true
2390 secret_ingress_tls: true
2391 secret_keystone: true
2392 secret_rabbitmq: true
2393 secret_registry: true
2394 service_ingress_server: true
2395 service_server: true
2396...