blob: df6c0fa9c1d839a1d1588d8516dfbe70bf2189f5 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
Mohammed Nasera720f882023-06-30 23:48:02 -040033 netoffload: ghcr.io/vexxhost/netoffload:v1.0.1
Mohammed Naserf3f59a72023-01-15 21:02:04 -050034 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020037 neutron_ovn_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Mohammed Naserf3f59a72023-01-15 21:02:04 -050038 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
41 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
42 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
43 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
44 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
46 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
47 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
48 image_repo_sync: docker.io/docker:17.07.0
49 pull_policy: "IfNotPresent"
50 local_registry:
51 active: false
52 exclude:
53 - dep_check
54 - image_repo_sync
55
56labels:
57 agent:
58 dhcp:
59 node_selector_key: openstack-control-plane
60 node_selector_value: enabled
61 l3:
62 node_selector_key: openstack-control-plane
63 node_selector_value: enabled
64 metadata:
65 node_selector_key: openstack-control-plane
66 node_selector_value: enabled
67 l2gw:
68 node_selector_key: openstack-control-plane
69 node_selector_value: enabled
70 job:
71 node_selector_key: openstack-control-plane
72 node_selector_value: enabled
73 lb:
74 node_selector_key: linuxbridge
75 node_selector_value: enabled
76 # openvswitch is a special case, requiring a special
77 # label that can apply to both control hosts
78 # and compute hosts, until we get more sophisticated
79 # with our daemonset scheduling
80 ovs:
81 node_selector_key: openvswitch
82 node_selector_value: enabled
83 sriov:
84 node_selector_key: sriov
85 node_selector_value: enabled
86 bagpipe_bgp:
87 node_selector_key: openstack-compute-node
88 node_selector_value: enabled
89 server:
90 node_selector_key: openstack-control-plane
91 node_selector_value: enabled
92 ironic_agent:
93 node_selector_key: openstack-control-plane
94 node_selector_value: enabled
95 netns_cleanup_cron:
96 node_selector_key: openstack-control-plane
97 node_selector_value: enabled
98 test:
99 node_selector_key: openstack-control-plane
100 node_selector_value: enabled
101
102network:
103 # provide what type of network wiring will be used
104 backend:
105 - openvswitch
106 # NOTE(Portdirect): Share network namespaces with the host,
107 # allowing agents to be restarted without packet loss and simpler
108 # debugging. This feature requires mount propagation support.
109 share_namespaces: true
110 interface:
111 # Tunnel interface will be used for VXLAN tunneling.
112 tunnel: null
113 # If tunnel is null there is a fallback mechanism to search
114 # for interface with routing using tunnel network cidr.
115 tunnel_network_cidr: "0/0"
116 # To perform setup of network interfaces using the SR-IOV init
117 # container you can use a section similar to:
118 # sriov:
119 # - device: ${DEV}
120 # num_vfs: 8
121 # mtu: 9214
122 # promisc: false
123 # qos:
124 # - vf_num: 0
125 # share: 10
126 # queues_per_vf:
127 # - num_queues: 16
128 # exclude_vf: 0,11,21
129 server:
130 ingress:
131 public: true
132 classes:
133 namespace: "nginx"
134 cluster: "nginx-cluster"
135 annotations:
136 nginx.ingress.kubernetes.io/rewrite-target: /
137 external_policy_local: false
138 node_port:
139 enabled: false
140 port: 30096
141
142bootstrap:
143 enabled: false
144 ks_user: neutron
145 script: |
146 openstack token issue
147
148dependencies:
149 dynamic:
150 common:
151 local_image_registry:
152 jobs:
153 - neutron-image-repo-sync
154 services:
155 - endpoint: node
156 service: local_image_registry
157 targeted:
158 sriov: {}
159 l2gateway: {}
160 bagpipe_bgp: {}
Mohammed Naserfd8edcc2023-09-06 22:32:16 +0000161 ovn:
162 server:
163 pod: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500164 openvswitch:
165 dhcp:
166 pod:
167 - requireSameNode: true
168 labels:
169 application: neutron
170 component: neutron-ovs-agent
171 l3:
172 pod:
173 - requireSameNode: true
174 labels:
175 application: neutron
176 component: neutron-ovs-agent
177 metadata:
178 pod:
179 - requireSameNode: true
180 labels:
181 application: neutron
182 component: neutron-ovs-agent
183 linuxbridge:
184 dhcp:
185 pod:
186 - requireSameNode: true
187 labels:
188 application: neutron
189 component: neutron-lb-agent
190 l3:
191 pod:
192 - requireSameNode: true
193 labels:
194 application: neutron
195 component: neutron-lb-agent
196 metadata:
197 pod:
198 - requireSameNode: true
199 labels:
200 application: neutron
201 component: neutron-lb-agent
202 lb_agent:
203 pod: null
204 static:
205 bootstrap:
206 services:
207 - endpoint: internal
208 service: network
209 - endpoint: internal
210 service: compute
211 db_drop:
212 services:
213 - endpoint: internal
214 service: oslo_db
215 db_init:
216 services:
217 - endpoint: internal
218 service: oslo_db
219 db_sync:
220 jobs:
221 - neutron-db-init
222 services:
223 - endpoint: internal
224 service: oslo_db
225 dhcp:
226 pod: null
227 jobs:
228 - neutron-rabbit-init
229 services:
230 - endpoint: internal
231 service: oslo_messaging
232 - endpoint: internal
233 service: network
234 - endpoint: internal
235 service: compute
236 ks_endpoints:
237 jobs:
238 - neutron-ks-service
239 services:
240 - endpoint: internal
241 service: identity
242 ks_service:
243 services:
244 - endpoint: internal
245 service: identity
246 ks_user:
247 services:
248 - endpoint: internal
249 service: identity
250 rabbit_init:
251 services:
252 - service: oslo_messaging
253 endpoint: internal
254 l3:
255 pod: null
256 jobs:
257 - neutron-rabbit-init
258 services:
259 - endpoint: internal
260 service: oslo_messaging
261 - endpoint: internal
262 service: network
263 - endpoint: internal
264 service: compute
265 lb_agent:
266 pod: null
267 jobs:
268 - neutron-rabbit-init
269 services:
270 - endpoint: internal
271 service: oslo_messaging
272 - endpoint: internal
273 service: network
274 metadata:
275 pod: null
276 jobs:
277 - neutron-rabbit-init
278 services:
279 - endpoint: internal
280 service: oslo_messaging
281 - endpoint: internal
282 service: network
283 - endpoint: internal
284 service: compute
285 - endpoint: public
286 service: compute_metadata
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200287 ovn_metadata:
Mohammed Naser593ec012023-07-23 09:20:05 +0000288 pod:
289 - requireSameNode: true
290 labels:
291 application: ovn
292 component: ovn-controller
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200293 services:
294 - endpoint: internal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200295 service: compute_metadata
Mohammed Naserfd8edcc2023-09-06 22:32:16 +0000296 - endpoint: internal
297 service: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500298 ovs_agent:
299 jobs:
300 - neutron-rabbit-init
301 pod:
302 - requireSameNode: true
303 labels:
304 application: openvswitch
305 component: server
306 services:
307 - endpoint: internal
308 service: oslo_messaging
309 - endpoint: internal
310 service: network
311 server:
312 jobs:
313 - neutron-db-sync
314 - neutron-ks-user
315 - neutron-ks-endpoints
316 - neutron-rabbit-init
317 services:
318 - endpoint: internal
319 service: oslo_db
320 - endpoint: internal
321 service: oslo_messaging
322 - endpoint: internal
323 service: oslo_cache
324 - endpoint: internal
325 service: identity
326 ironic_agent:
327 jobs:
328 - neutron-db-sync
329 - neutron-ks-user
330 - neutron-ks-endpoints
331 - neutron-rabbit-init
332 services:
333 - endpoint: internal
334 service: oslo_db
335 - endpoint: internal
336 service: oslo_messaging
337 - endpoint: internal
338 service: oslo_cache
339 - endpoint: internal
340 service: identity
341 tests:
342 services:
343 - endpoint: internal
344 service: network
345 - endpoint: internal
346 service: compute
347 image_repo_sync:
348 services:
349 - endpoint: internal
350 service: local_image_registry
351
352pod:
353 use_fqdn:
354 neutron_agent: true
355 probes:
356 rpc_timeout: 60
357 rpc_retries: 2
358 dhcp_agent:
359 dhcp_agent:
360 readiness:
361 enabled: true
362 params:
363 initialDelaySeconds: 30
364 periodSeconds: 190
365 timeoutSeconds: 185
366 liveness:
367 enabled: true
368 params:
369 initialDelaySeconds: 120
370 periodSeconds: 600
371 timeoutSeconds: 580
372 l3_agent:
373 l3_agent:
374 readiness:
375 enabled: true
376 params:
377 initialDelaySeconds: 30
378 periodSeconds: 190
379 timeoutSeconds: 185
380 liveness:
381 enabled: true
382 params:
383 initialDelaySeconds: 120
384 periodSeconds: 600
385 timeoutSeconds: 580
386 lb_agent:
387 lb_agent:
388 readiness:
389 enabled: true
390 metadata_agent:
391 metadata_agent:
392 readiness:
393 enabled: true
394 params:
395 initialDelaySeconds: 30
396 periodSeconds: 190
397 timeoutSeconds: 185
398 liveness:
399 enabled: true
400 params:
401 initialDelaySeconds: 120
402 periodSeconds: 600
403 timeoutSeconds: 580
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200404 ovn_metadata_agent:
405 ovn_metadata_agent:
406 readiness:
407 enabled: true
408 params:
409 initialDelaySeconds: 30
410 periodSeconds: 190
411 timeoutSeconds: 185
412 liveness:
413 enabled: true
414 params:
415 initialDelaySeconds: 120
416 periodSeconds: 600
417 timeoutSeconds: 580
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500418 ovs_agent:
419 ovs_agent:
420 readiness:
421 enabled: true
422 params:
okozachenko120317930d42023-09-06 00:24:05 +1000423 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500424 liveness:
425 enabled: true
426 params:
427 initialDelaySeconds: 120
428 periodSeconds: 600
429 timeoutSeconds: 580
430 sriov_agent:
431 sriov_agent:
432 readiness:
433 enabled: true
434 params:
435 initialDelaySeconds: 30
436 periodSeconds: 190
437 timeoutSeconds: 185
438 bagpipe_bgp:
439 bagpipe_bgp:
440 readiness:
441 enabled: true
442 params:
443 liveness:
444 enabled: true
445 params:
446 initialDelaySeconds: 60
447 l2gw_agent:
448 l2gw_agent:
449 readiness:
450 enabled: true
451 params:
452 initialDelaySeconds: 30
453 periodSeconds: 15
454 timeoutSeconds: 65
455 liveness:
456 enabled: true
457 params:
458 initialDelaySeconds: 120
459 periodSeconds: 90
460 timeoutSeconds: 70
461 server:
462 server:
463 readiness:
464 enabled: true
465 params:
okozachenko120317930d42023-09-06 00:24:05 +1000466 periodSeconds: 15
467 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500468 liveness:
469 enabled: true
470 params:
471 initialDelaySeconds: 60
okozachenko120317930d42023-09-06 00:24:05 +1000472 periodSeconds: 15
473 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500474 security_context:
475 neutron_dhcp_agent:
476 pod:
477 runAsUser: 42424
478 container:
479 neutron_dhcp_agent:
480 readOnlyRootFilesystem: true
481 privileged: true
482 neutron_l2gw_agent:
483 pod:
484 runAsUser: 42424
485 container:
486 neutron_l2gw_agent:
487 readOnlyRootFilesystem: true
488 privileged: true
489 neutron_bagpipe_bgp:
490 pod:
491 runAsUser: 42424
492 container:
493 neutron_bagpipe_bgp:
494 readOnlyRootFilesystem: true
495 privileged: true
496 neutron_l3_agent:
497 pod:
498 runAsUser: 42424
499 container:
500 neutron_l3_agent:
501 readOnlyRootFilesystem: true
502 privileged: true
503 neutron_lb_agent:
504 pod:
505 runAsUser: 42424
506 container:
507 neutron_lb_agent_kernel_modules:
508 capabilities:
509 add:
510 - SYS_MODULE
511 - SYS_CHROOT
512 runAsUser: 0
513 readOnlyRootFilesystem: true
514 neutron_lb_agent_init:
515 privileged: true
516 runAsUser: 0
517 readOnlyRootFilesystem: true
518 neutron_lb_agent:
519 readOnlyRootFilesystem: true
520 privileged: true
521 neutron_metadata_agent:
522 pod:
523 runAsUser: 42424
524 container:
525 neutron_metadata_agent_init:
526 runAsUser: 0
527 readOnlyRootFilesystem: true
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200528 neutron_ovn_metadata_agent:
529 pod:
530 runAsUser: 42424
531 container:
532 neutron_ovn_metadata_agent_init:
533 runAsUser: 0
534 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500535 neutron_ovs_agent:
536 pod:
537 runAsUser: 42424
538 container:
539 neutron_openvswitch_agent_kernel_modules:
540 capabilities:
541 add:
542 - SYS_MODULE
543 - SYS_CHROOT
544 runAsUser: 0
545 readOnlyRootFilesystem: true
Mohammed Nasera720f882023-06-30 23:48:02 -0400546 netoffload:
547 privileged: true
548 runAsUser: 0
549 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500550 neutron_ovs_agent_init:
551 privileged: true
552 runAsUser: 0
553 readOnlyRootFilesystem: true
554 neutron_ovs_agent:
555 readOnlyRootFilesystem: true
556 privileged: true
557 neutron_server:
558 pod:
559 runAsUser: 42424
560 container:
561 nginx:
562 runAsUser: 0
563 readOnlyRootFilesystem: false
564 neutron_server:
565 allowPrivilegeEscalation: false
566 readOnlyRootFilesystem: true
567 neutron_sriov_agent:
568 pod:
569 runAsUser: 42424
570 container:
571 neutron_sriov_agent_init:
572 privileged: true
573 runAsUser: 0
574 readOnlyRootFilesystem: false
575 neutron_sriov_agent:
576 readOnlyRootFilesystem: true
577 privileged: true
578 neutron_ironic_agent:
579 pod:
580 runAsUser: 42424
581 container:
582 neutron_ironic_agent:
583 allowPrivilegeEscalation: false
584 readOnlyRootFilesystem: true
585 neutron_netns_cleanup_cron:
586 pod:
587 runAsUser: 42424
588 container:
589 neutron_netns_cleanup_cron:
590 readOnlyRootFilesystem: true
591 privileged: true
592 affinity:
593 anti:
594 type:
595 default: preferredDuringSchedulingIgnoredDuringExecution
596 topologyKey:
597 default: kubernetes.io/hostname
598 weight:
599 default: 10
600 tolerations:
601 neutron:
602 enabled: false
603 tolerations:
604 - key: node-role.kubernetes.io/master
605 operator: Exists
606 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200607 - key: node-role.kubernetes.io/control-plane
608 operator: Exists
609 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500610 mounts:
611 neutron_server:
612 init_container: null
613 neutron_server:
614 volumeMounts:
615 volumes:
616 neutron_dhcp_agent:
617 init_container: null
618 neutron_dhcp_agent:
619 volumeMounts:
620 volumes:
621 neutron_l3_agent:
622 init_container: null
623 neutron_l3_agent:
624 volumeMounts:
625 volumes:
626 neutron_lb_agent:
627 init_container: null
628 neutron_lb_agent:
629 volumeMounts:
630 volumes:
631 neutron_metadata_agent:
632 init_container: null
633 neutron_metadata_agent:
634 volumeMounts:
635 volumes:
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200636 neutron_ovn_metadata_agent:
637 init_container: null
638 neutron_ovn_metadata_agent:
639 volumeMounts:
640 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500641 neutron_ovs_agent:
642 init_container: null
643 neutron_ovs_agent:
644 volumeMounts:
645 volumes:
646 neutron_sriov_agent:
647 init_container: null
648 neutron_sriov_agent:
649 volumeMounts:
650 volumes:
651 neutron_l2gw_agent:
652 init_container: null
653 neutron_l2gw_agent:
654 volumeMounts:
655 volumes:
656 bagpipe_bgp:
657 init_container: null
658 bagpipe_bgp:
659 volumeMounts:
660 volumes:
661 neutron_ironic_agent:
662 init_container: null
663 neutron_ironic_agent:
664 volumeMounts:
665 volumes:
666 neutron_netns_cleanup_cron:
667 init_container: null
668 neutron_netns_cleanup_cron:
669 volumeMounts:
670 volumes:
671 neutron_tests:
672 init_container: null
673 neutron_tests:
674 volumeMounts:
675 volumes:
676 neutron_bootstrap:
677 init_container: null
678 neutron_bootstrap:
679 volumeMounts:
680 volumes:
681 neutron_db_sync:
682 neutron_db_sync:
683 volumeMounts:
684 - name: db-sync-conf
685 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
686 subPath: ml2_conf.ini
687 readOnly: true
688 volumes:
689 replicas:
690 server: 1
691 ironic_agent: 1
692 lifecycle:
693 upgrades:
694 deployments:
695 revision_history: 3
696 pod_replacement_strategy: RollingUpdate
697 rolling_update:
698 max_unavailable: 1
699 max_surge: 3
700 daemonsets:
701 pod_replacement_strategy: RollingUpdate
702 dhcp_agent:
703 enabled: true
704 min_ready_seconds: 0
705 max_unavailable: 1
706 l3_agent:
707 enabled: true
708 min_ready_seconds: 0
709 max_unavailable: 1
710 lb_agent:
711 enabled: true
712 min_ready_seconds: 0
713 max_unavailable: 1
714 metadata_agent:
715 enabled: true
716 min_ready_seconds: 0
717 max_unavailable: 1
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200718 ovn_metadata_agent:
719 enabled: true
720 min_ready_seconds: 0
721 max_unavailable: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500722 ovs_agent:
723 enabled: true
724 min_ready_seconds: 0
725 max_unavailable: 1
726 sriov_agent:
727 enabled: true
728 min_ready_seconds: 0
729 max_unavailable: 1
730 netns_cleanup_cron:
731 enabled: true
732 min_ready_seconds: 0
733 max_unavailable: 1
734 disruption_budget:
735 server:
736 min_available: 0
737 termination_grace_period:
738 server:
739 timeout: 30
740 ironic_agent:
741 timeout: 30
742 resources:
743 enabled: false
744 agent:
745 dhcp:
746 requests:
747 memory: "128Mi"
748 cpu: "100m"
749 limits:
750 memory: "1024Mi"
751 cpu: "2000m"
752 l3:
753 requests:
754 memory: "128Mi"
755 cpu: "100m"
756 limits:
757 memory: "1024Mi"
758 cpu: "2000m"
759 lb:
760 requests:
761 memory: "128Mi"
762 cpu: "100m"
763 limits:
764 memory: "1024Mi"
765 cpu: "2000m"
766 metadata:
767 requests:
768 memory: "128Mi"
769 cpu: "100m"
770 limits:
771 memory: "1024Mi"
772 cpu: "2000m"
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200773 ovn_metadata:
774 requests:
775 memory: "128Mi"
776 cpu: "100m"
777 limits:
778 memory: "1024Mi"
779 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500780 ovs:
781 requests:
782 memory: "128Mi"
783 cpu: "100m"
784 limits:
785 memory: "1024Mi"
786 cpu: "2000m"
787 sriov:
788 requests:
789 memory: "128Mi"
790 cpu: "100m"
791 limits:
792 memory: "1024Mi"
793 cpu: "2000m"
794 l2gw:
795 requests:
796 memory: "128Mi"
797 cpu: "100m"
798 limits:
799 memory: "1024Mi"
800 cpu: "2000m"
801 bagpipe_bgp:
802 requests:
803 memory: "128Mi"
804 cpu: "100m"
805 limits:
806 memory: "1024Mi"
807 cpu: "2000m"
808 server:
809 requests:
810 memory: "128Mi"
811 cpu: "100m"
812 limits:
813 memory: "1024Mi"
814 cpu: "2000m"
815 ironic_agent:
816 requests:
817 memory: "128Mi"
818 cpu: "100m"
819 limits:
820 memory: "1024Mi"
821 cpu: "2000m"
822 netns_cleanup_cron:
823 requests:
824 memory: "128Mi"
825 cpu: "100m"
826 limits:
827 memory: "1024Mi"
828 cpu: "2000m"
829 jobs:
830 bootstrap:
831 requests:
832 memory: "128Mi"
833 cpu: "100m"
834 limits:
835 memory: "1024Mi"
836 cpu: "2000m"
837 db_init:
838 requests:
839 memory: "128Mi"
840 cpu: "100m"
841 limits:
842 memory: "1024Mi"
843 cpu: "2000m"
844 rabbit_init:
845 requests:
846 memory: "128Mi"
847 cpu: "100m"
848 limits:
849 memory: "1024Mi"
850 cpu: "2000m"
851 db_sync:
852 requests:
853 memory: "128Mi"
854 cpu: "100m"
855 limits:
856 memory: "1024Mi"
857 cpu: "2000m"
858 db_drop:
859 requests:
860 memory: "128Mi"
861 cpu: "100m"
862 limits:
863 memory: "1024Mi"
864 cpu: "2000m"
865 ks_endpoints:
866 requests:
867 memory: "128Mi"
868 cpu: "100m"
869 limits:
870 memory: "1024Mi"
871 cpu: "2000m"
872 ks_service:
873 requests:
874 memory: "128Mi"
875 cpu: "100m"
876 limits:
877 memory: "1024Mi"
878 cpu: "2000m"
879 ks_user:
880 requests:
881 memory: "128Mi"
882 cpu: "100m"
883 limits:
884 memory: "1024Mi"
885 cpu: "2000m"
886 tests:
887 requests:
888 memory: "128Mi"
889 cpu: "100m"
890 limits:
891 memory: "1024Mi"
892 cpu: "2000m"
893 image_repo_sync:
894 requests:
895 memory: "128Mi"
896 cpu: "100m"
897 limits:
898 memory: "1024Mi"
899 cpu: "2000m"
900
901conf:
902 rally_tests:
903 force_project_purge: false
904 run_tempest: false
905 clean_up: |
906 # NOTE: We will make the best effort to clean up rally generated networks and routers,
907 # but should not block further automated deployment.
908 set +e
909 PATTERN="^[sc]_rally_"
910
911 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
912 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
913
914 for ROUTER in $ROUTERS
915 do
916 openstack router unset --external-gateway $ROUTER
917 openstack router set --disable --no-ha $ROUTER
918
919 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
920 for SUBN in $SUBNS
921 do
922 openstack router remove subnet $ROUTER $SUBN
923 done
924
925 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
926 do
927 openstack router remove port $ROUTER $PORT
928 done
929
930 openstack router delete $ROUTER
931 done
932
933 for NETWORK in $NETWORKS
934 do
935 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
936 do
937 openstack port delete $PORT
938 done
939 openstack network delete $NETWORK
940 done
941 set -e
942 tests:
943 NeutronNetworks.create_and_delete_networks:
944 - args:
945 network_create_args: {}
946 context:
947 quotas:
948 neutron:
949 network: -1
950 runner:
951 concurrency: 1
952 times: 1
953 type: constant
954 sla:
955 failure_rate:
956 max: 0
957 NeutronNetworks.create_and_delete_ports:
958 - args:
959 network_create_args: {}
960 port_create_args: {}
961 ports_per_network: 10
962 context:
963 network: {}
964 quotas:
965 neutron:
966 network: -1
967 port: -1
968 runner:
969 concurrency: 1
970 times: 1
971 type: constant
972 sla:
973 failure_rate:
974 max: 0
975 NeutronNetworks.create_and_delete_routers:
976 - args:
977 network_create_args: {}
978 router_create_args: {}
979 subnet_cidr_start: 1.1.0.0/30
980 subnet_create_args: {}
981 subnets_per_network: 2
982 context:
983 network: {}
984 quotas:
985 neutron:
986 network: -1
987 router: -1
988 subnet: -1
989 runner:
990 concurrency: 1
991 times: 1
992 type: constant
993 sla:
994 failure_rate:
995 max: 0
996 NeutronNetworks.create_and_delete_subnets:
997 - args:
998 network_create_args: {}
999 subnet_cidr_start: 1.1.0.0/30
1000 subnet_create_args: {}
1001 subnets_per_network: 2
1002 context:
1003 network: {}
1004 quotas:
1005 neutron:
1006 network: -1
1007 subnet: -1
1008 runner:
1009 concurrency: 1
1010 times: 1
1011 type: constant
1012 sla:
1013 failure_rate:
1014 max: 0
1015 NeutronNetworks.create_and_list_routers:
1016 - args:
1017 network_create_args: {}
1018 router_create_args: {}
1019 subnet_cidr_start: 1.1.0.0/30
1020 subnet_create_args: {}
1021 subnets_per_network: 2
1022 context:
1023 network: {}
1024 quotas:
1025 neutron:
1026 network: -1
1027 router: -1
1028 subnet: -1
1029 runner:
1030 concurrency: 1
1031 times: 1
1032 type: constant
1033 sla:
1034 failure_rate:
1035 max: 0
1036 NeutronNetworks.create_and_list_subnets:
1037 - args:
1038 network_create_args: {}
1039 subnet_cidr_start: 1.1.0.0/30
1040 subnet_create_args: {}
1041 subnets_per_network: 2
1042 context:
1043 network: {}
1044 quotas:
1045 neutron:
1046 network: -1
1047 subnet: -1
1048 runner:
1049 concurrency: 1
1050 times: 1
1051 type: constant
1052 sla:
1053 failure_rate:
1054 max: 0
1055 NeutronNetworks.create_and_show_network:
1056 - args:
1057 network_create_args: {}
1058 context:
1059 quotas:
1060 neutron:
1061 network: -1
1062 runner:
1063 concurrency: 1
1064 times: 1
1065 type: constant
1066 sla:
1067 failure_rate:
1068 max: 0
1069 NeutronNetworks.create_and_update_networks:
1070 - args:
1071 network_create_args: {}
1072 network_update_args:
1073 admin_state_up: false
1074 context:
1075 quotas:
1076 neutron:
1077 network: -1
1078 runner:
1079 concurrency: 1
1080 times: 1
1081 type: constant
1082 sla:
1083 failure_rate:
1084 max: 0
1085 NeutronNetworks.create_and_update_ports:
1086 - args:
1087 network_create_args: {}
1088 port_create_args: {}
1089 port_update_args:
1090 admin_state_up: false
1091 device_id: dummy_id
1092 device_owner: dummy_owner
1093 ports_per_network: 5
1094 context:
1095 network: {}
1096 quotas:
1097 neutron:
1098 network: -1
1099 port: -1
1100 runner:
1101 concurrency: 1
1102 times: 1
1103 type: constant
1104 sla:
1105 failure_rate:
1106 max: 0
1107 NeutronNetworks.create_and_update_routers:
1108 - args:
1109 network_create_args: {}
1110 router_create_args: {}
1111 router_update_args:
1112 admin_state_up: false
1113 subnet_cidr_start: 1.1.0.0/30
1114 subnet_create_args: {}
1115 subnets_per_network: 2
1116 context:
1117 network: {}
1118 quotas:
1119 neutron:
1120 network: -1
1121 router: -1
1122 subnet: -1
1123 runner:
1124 concurrency: 1
1125 times: 1
1126 type: constant
1127 sla:
1128 failure_rate:
1129 max: 0
1130 NeutronNetworks.create_and_update_subnets:
1131 - args:
1132 network_create_args: {}
1133 subnet_cidr_start: 1.4.0.0/16
1134 subnet_create_args: {}
1135 subnet_update_args:
1136 enable_dhcp: false
1137 subnets_per_network: 2
1138 context:
1139 network: {}
1140 quotas:
1141 neutron:
1142 network: -1
1143 subnet: -1
1144 runner:
1145 concurrency: 1
1146 times: 1
1147 type: constant
1148 sla:
1149 failure_rate:
1150 max: 0
1151 NeutronNetworks.list_agents:
1152 - args:
1153 agent_args: {}
1154 runner:
1155 concurrency: 1
1156 times: 1
1157 type: constant
1158 sla:
1159 failure_rate:
1160 max: 0
1161 NeutronSecurityGroup.create_and_list_security_groups:
1162 - args:
1163 security_group_create_args: {}
1164 context:
1165 quotas:
1166 neutron:
1167 security_group: -1
1168 runner:
1169 concurrency: 1
1170 times: 1
1171 type: constant
1172 sla:
1173 failure_rate:
1174 max: 0
1175 NeutronSecurityGroup.create_and_update_security_groups:
1176 - args:
1177 security_group_create_args: {}
1178 security_group_update_args: {}
1179 context:
1180 quotas:
1181 neutron:
1182 security_group: -1
1183 runner:
1184 concurrency: 1
1185 times: 1
1186 type: constant
1187 sla:
1188 failure_rate:
1189 max: 0
okozachenko120317930d42023-09-06 00:24:05 +10001190 paste:
1191 composite:neutron:
1192 use: egg:Paste#urlmap
1193 /: neutronversions_composite
1194 /v2.0: neutronapi_v2_0
1195 composite:neutronapi_v2_0:
1196 use: call:neutron.auth:pipeline_factory
1197 noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
1198 keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0
1199 composite:neutronversions_composite:
1200 use: call:neutron.auth:pipeline_factory
1201 noauth: cors http_proxy_to_wsgi neutronversions
1202 keystone: cors http_proxy_to_wsgi neutronversions
1203 filter:request_id:
1204 paste.filter_factory: oslo_middleware:RequestId.factory
1205 filter:catch_errors:
1206 paste.filter_factory: oslo_middleware:CatchErrors.factory
1207 filter:cors:
1208 paste.filter_factory: oslo_middleware.cors:filter_factory
1209 oslo_config_project: neutron
1210 filter:http_proxy_to_wsgi:
1211 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
1212 filter:keystonecontext:
1213 paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory
1214 filter:authtoken:
1215 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
1216 filter:audit:
1217 paste.filter_factory: keystonemiddleware.audit:filter_factory
1218 audit_map_file: /etc/neutron/api_audit_map.conf
1219 filter:extensions:
1220 paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory
1221 app:neutronversions:
1222 paste.app_factory: neutron.pecan_wsgi.app:versions_factory
1223 app:neutronapiapp_v2_0:
1224 paste.app_factory: neutron.api.v2.router:APIRouter.factory
1225 filter:osprofiler:
1226 paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001227 policy: {}
1228 api_audit_map:
1229 DEFAULT:
1230 target_endpoint_type: None
1231 custom_actions:
1232 add_router_interface: update/add
1233 remove_router_interface: update/remove
1234 path_keywords:
1235 floatingips: ip
1236 healthmonitors: healthmonitor
1237 health_monitors: health_monitor
1238 lb: None
1239 members: member
1240 metering-labels: label
1241 metering-label-rules: rule
1242 networks: network
1243 pools: pool
1244 ports: port
1245 routers: router
1246 quotas: quota
1247 security-groups: security-group
1248 security-group-rules: rule
1249 subnets: subnet
1250 vips: vip
1251 service_endpoints:
1252 network: service/network
1253 neutron_sudoers: |
1254 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1255 Defaults !requiretty
1256 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1257 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1258 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1259 rootwrap: |
1260 # Configuration for neutron-rootwrap
1261 # This file should be owned by (and only-writeable by) the root user
1262
1263 [DEFAULT]
1264 # List of directories to load filter definitions from (separated by ',').
1265 # These directories MUST all be only writeable by root !
1266 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1267
1268 # List of directories to search executables in, in case filters do not
1269 # explicitely specify a full path (separated by ',')
1270 # If not specified, defaults to system PATH environment variable.
1271 # These directories MUST all be only writeable by root !
1272 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1273
1274 # Enable logging to syslog
1275 # Default value is False
1276 use_syslog=False
1277
1278 # Which syslog facility to use.
1279 # Valid values include auth, authpriv, syslog, local0, local1...
1280 # Default value is 'syslog'
1281 syslog_log_facility=syslog
1282
1283 # Which messages to log.
1284 # INFO means log all usage
1285 # ERROR means only log unsuccessful attempts
1286 syslog_log_level=ERROR
1287
1288 [xenapi]
1289 # XenAPI configuration is only required by the L2 agent if it is to
1290 # target a XenServer/XCP compute host's dom0.
1291 xenapi_connection_url=<None>
1292 xenapi_connection_username=root
1293 xenapi_connection_password=<None>
1294 rootwrap_filters:
1295 debug:
1296 pods:
1297 - dhcp_agent
1298 - l3_agent
1299 - lb_agent
1300 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001301 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001302 - ovs_agent
1303 - sriov_agent
1304 content: |
1305 # neutron-rootwrap command filters for nodes on which neutron is
1306 # expected to control network
1307 #
1308 # This file should be owned by (and only-writeable by) the root user
1309
1310 # format seems to be
1311 # cmd-name: filter-name, raw-command, user, args
1312
1313 [Filters]
1314
1315 # This is needed because we should ping
1316 # from inside a namespace which requires root
1317 # _alt variants allow to match -c and -w in any order
1318 # (used by NeutronDebugAgent.ping_all)
1319 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1320 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1321 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1322 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1323 dibbler:
1324 pods:
1325 - dhcp_agent
1326 - l3_agent
1327 - lb_agent
1328 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001329 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001330 - ovs_agent
1331 - sriov_agent
1332 content: |
1333 # neutron-rootwrap command filters for nodes on which neutron is
1334 # expected to control network
1335 #
1336 # This file should be owned by (and only-writeable by) the root user
1337
1338 # format seems to be
1339 # cmd-name: filter-name, raw-command, user, args
1340
1341 [Filters]
1342
1343 # Filters for the dibbler-based reference implementation of the pluggable
1344 # Prefix Delegation driver. Other implementations using an alternative agent
1345 # should include a similar filter in this folder.
1346
1347 # prefix_delegation_agent
1348 dibbler-client: CommandFilter, dibbler-client, root
1349 ipset_firewall:
1350 pods:
1351 - dhcp_agent
1352 - l3_agent
1353 - lb_agent
1354 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001355 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001356 - ovs_agent
1357 - sriov_agent
1358 content: |
1359 # neutron-rootwrap command filters for nodes on which neutron is
1360 # expected to control network
1361 #
1362 # This file should be owned by (and only-writeable by) the root user
1363
1364 # format seems to be
1365 # cmd-name: filter-name, raw-command, user, args
1366
1367 [Filters]
1368 # neutron/agent/linux/iptables_firewall.py
1369 # "ipset", "-A", ...
1370 ipset: CommandFilter, ipset, root
1371 l3:
1372 pods:
1373 - dhcp_agent
1374 - l3_agent
1375 - lb_agent
1376 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001377 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001378 - ovs_agent
1379 - sriov_agent
1380 content: |
1381 # neutron-rootwrap command filters for nodes on which neutron is
1382 # expected to control network
1383 #
1384 # This file should be owned by (and only-writeable by) the root user
1385
1386 # format seems to be
1387 # cmd-name: filter-name, raw-command, user, args
1388
1389 [Filters]
1390
1391 # arping
1392 arping: CommandFilter, arping, root
1393
1394 # l3_agent
1395 sysctl: CommandFilter, sysctl, root
1396 route: CommandFilter, route, root
1397 radvd: CommandFilter, radvd, root
1398
1399 # haproxy
1400 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1401 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1402
1403 # metadata proxy
1404 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1405 # RHEL invocation of the metadata proxy will report /usr/bin/python
1406 kill_metadata: KillFilter, root, python, -15, -9
1407 kill_metadata2: KillFilter, root, python2, -15, -9
1408 kill_metadata7: KillFilter, root, python2.7, -15, -9
1409 kill_metadata3: KillFilter, root, python3, -15, -9
1410 kill_metadata35: KillFilter, root, python3.5, -15, -9
1411 kill_metadata36: KillFilter, root, python3.6, -15, -9
1412 kill_metadata37: KillFilter, root, python3.7, -15, -9
1413 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1414 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1415
1416 # ip_lib
1417 ip: IpFilter, ip, root
1418 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1419 ip_exec: IpNetnsExecFilter, ip, root
1420
1421 # l3_tc_lib
1422 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1423 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1424 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1425 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1426 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1427 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1428 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1429
1430 # For ip monitor
1431 kill_ip_monitor: KillFilter, root, ip, -9
1432
1433 # ovs_lib (if OVSInterfaceDriver is used)
1434 ovs-vsctl: CommandFilter, ovs-vsctl, root
1435
1436 # iptables_manager
1437 iptables-save: CommandFilter, iptables-save, root
1438 iptables-restore: CommandFilter, iptables-restore, root
1439 ip6tables-save: CommandFilter, ip6tables-save, root
1440 ip6tables-restore: CommandFilter, ip6tables-restore, root
1441
1442 # Keepalived
1443 keepalived: CommandFilter, keepalived, root
1444 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1445
1446 # l3 agent to delete floatingip's conntrack state
1447 conntrack: CommandFilter, conntrack, root
1448
1449 # keepalived state change monitor
1450 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1451 # The following filters are used to kill the keepalived state change monitor.
1452 # Since the monitor runs as a Python script, the system reports that the
1453 # command of the process to be killed is python.
1454 # TODO(mlavalle) These kill filters will be updated once we come up with a
1455 # mechanism to kill using the name of the script being executed by Python
1456 kill_keepalived_monitor_py: KillFilter, root, python, -15
1457 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1458 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1459 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1460 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1461 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1462 netns_cleanup:
1463 pods:
1464 - dhcp_agent
1465 - l3_agent
1466 - lb_agent
1467 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001468 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001469 - ovs_agent
1470 - sriov_agent
1471 - netns_cleanup_cron
1472 content: |
1473 # neutron-rootwrap command filters for nodes on which neutron is
1474 # expected to control network
1475 #
1476 # This file should be owned by (and only-writeable by) the root user
1477
1478 # format seems to be
1479 # cmd-name: filter-name, raw-command, user, args
1480
1481 [Filters]
1482
1483 # netns-cleanup
1484 netstat: CommandFilter, netstat, root
1485 dhcp:
1486 pods:
1487 - dhcp_agent
1488 - l3_agent
1489 - lb_agent
1490 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001491 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001492 - ovs_agent
1493 - sriov_agent
1494 - netns_cleanup_cron
1495 content: |
1496 # neutron-rootwrap command filters for nodes on which neutron is
1497 # expected to control network
1498 #
1499 # This file should be owned by (and only-writeable by) the root user
1500
1501 # format seems to be
1502 # cmd-name: filter-name, raw-command, user, args
1503
1504 [Filters]
1505
1506 # dhcp-agent
1507 dnsmasq: CommandFilter, dnsmasq, root
1508 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1509 # it looks like these are the only signals needed, per
1510 # neutron/agent/linux/dhcp.py
1511 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1512 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1513
1514 ovs-vsctl: CommandFilter, ovs-vsctl, root
1515 ivs-ctl: CommandFilter, ivs-ctl, root
1516 mm-ctl: CommandFilter, mm-ctl, root
1517 dhcp_release: CommandFilter, dhcp_release, root
1518 dhcp_release6: CommandFilter, dhcp_release6, root
1519
1520 # metadata proxy
1521 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1522 # RHEL invocation of the metadata proxy will report /usr/bin/python
1523 kill_metadata: KillFilter, root, python, -9
1524 kill_metadata2: KillFilter, root, python2, -9
1525 kill_metadata7: KillFilter, root, python2.7, -9
1526 kill_metadata3: KillFilter, root, python3, -9
1527 kill_metadata35: KillFilter, root, python3.5, -9
1528 kill_metadata36: KillFilter, root, python3.6, -9
1529 kill_metadata37: KillFilter, root, python3.7, -9
1530
1531 # ip_lib
1532 ip: IpFilter, ip, root
1533 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1534 ip_exec: IpNetnsExecFilter, ip, root
1535 ebtables:
1536 pods:
1537 - dhcp_agent
1538 - l3_agent
1539 - lb_agent
1540 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001541 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001542 - ovs_agent
1543 - sriov_agent
1544 content: |
1545 # neutron-rootwrap command filters for nodes on which neutron is
1546 # expected to control network
1547 #
1548 # This file should be owned by (and only-writeable by) the root user
1549
1550 # format seems to be
1551 # cmd-name: filter-name, raw-command, user, args
1552
1553 [Filters]
1554
1555 ebtables: CommandFilter, ebtables, root
1556 iptables_firewall:
1557 pods:
1558 - dhcp_agent
1559 - l3_agent
1560 - lb_agent
1561 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001562 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001563 - ovs_agent
1564 - sriov_agent
1565 content: |
1566 # neutron-rootwrap command filters for nodes on which neutron is
1567 # expected to control network
1568 #
1569 # This file should be owned by (and only-writeable by) the root user
1570
1571 # format seems to be
1572 # cmd-name: filter-name, raw-command, user, args
1573
1574 [Filters]
1575
1576 # neutron/agent/linux/iptables_firewall.py
1577 # "iptables-save", ...
1578 iptables-save: CommandFilter, iptables-save, root
1579 iptables-restore: CommandFilter, iptables-restore, root
1580 ip6tables-save: CommandFilter, ip6tables-save, root
1581 ip6tables-restore: CommandFilter, ip6tables-restore, root
1582
1583 # neutron/agent/linux/iptables_firewall.py
1584 # "iptables", "-A", ...
1585 iptables: CommandFilter, iptables, root
1586 ip6tables: CommandFilter, ip6tables, root
1587
1588 # neutron/agent/linux/iptables_firewall.py
1589 sysctl: CommandFilter, sysctl, root
1590
1591 # neutron/agent/linux/ip_conntrack.py
1592 conntrack: CommandFilter, conntrack, root
1593 linuxbridge_plugin:
1594 pods:
1595 - dhcp_agent
1596 - l3_agent
1597 - lb_agent
1598 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001599 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001600 - ovs_agent
1601 - sriov_agent
1602 content: |
1603 # neutron-rootwrap command filters for nodes on which neutron is
1604 # expected to control network
1605 #
1606 # This file should be owned by (and only-writeable by) the root user
1607
1608 # format seems to be
1609 # cmd-name: filter-name, raw-command, user, args
1610
1611 [Filters]
1612
1613 # linuxbridge-agent
1614 # unclear whether both variants are necessary, but I'm transliterating
1615 # from the old mechanism
1616 brctl: CommandFilter, brctl, root
1617 bridge: CommandFilter, bridge, root
1618
1619 # ip_lib
1620 ip: IpFilter, ip, root
1621 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1622 ip_exec: IpNetnsExecFilter, ip, root
1623
1624 # tc commands needed for QoS support
1625 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1626 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1627 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1628 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1629 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1630 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1631 openvswitch_plugin:
1632 pods:
1633 - dhcp_agent
1634 - l3_agent
1635 - lb_agent
1636 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001637 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001638 - ovs_agent
1639 - sriov_agent
1640 content: |
1641 # neutron-rootwrap command filters for nodes on which neutron is
1642 # expected to control network
1643 #
1644 # This file should be owned by (and only-writeable by) the root user
1645
1646 # format seems to be
1647 # cmd-name: filter-name, raw-command, user, args
1648
1649 [Filters]
1650
1651 # openvswitch-agent
1652 # unclear whether both variants are necessary, but I'm transliterating
1653 # from the old mechanism
1654 ovs-vsctl: CommandFilter, ovs-vsctl, root
1655 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1656 ovs-ofctl: CommandFilter, ovs-ofctl, root
1657 ovs-appctl: CommandFilter, ovs-appctl, root
1658 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1659 ovsdb-client: CommandFilter, ovsdb-client, root
1660 xe: CommandFilter, xe, root
1661
1662 # ip_lib
1663 ip: IpFilter, ip, root
1664 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1665 ip_exec: IpNetnsExecFilter, ip, root
1666
1667 # needed for FDB extension
1668 bridge: CommandFilter, bridge, root
1669 privsep:
1670 pods:
1671 - dhcp_agent
1672 - l3_agent
1673 - lb_agent
1674 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001675 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001676 - ovs_agent
1677 - sriov_agent
1678 - netns_cleanup_cron
1679 content: |
1680 # Command filters to allow privsep daemon to be started via rootwrap.
1681 #
1682 # This file should be owned by (and only-writeable by) the root user
1683
1684 [Filters]
1685
1686 # By installing the following, the local admin is asserting that:
1687 #
1688 # 1. The python module load path used by privsep-helper
1689 # command as root (as started by sudo/rootwrap) is trusted.
1690 # 2. Any oslo.config files matching the --config-file
1691 # arguments below are trusted.
1692 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1693 # also allowed to invoke python "entrypoint" functions from
1694 # --privsep_context with the additional (possibly root) privileges
1695 # configured for that context.
1696 #
1697 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1698 #
1699 # In particular, the oslo.config and python module path must not
1700 # be writeable by the unprivileged user.
1701
1702 # oslo.privsep default neutron context
1703 privsep: PathFilter, privsep-helper, root,
1704 --config-file, /etc,
1705 --privsep_context, neutron.privileged.default,
1706 --privsep_sock_path, /
1707
1708 # NOTE: A second `--config-file` arg can also be added above. Since
1709 # many neutron components are installed like that (eg: by devstack).
1710 # Adjust to suit local requirements.
1711 linux_vxlan:
1712 pods:
1713 - bagpipe_bgp
1714 content: |
1715 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1716 # expected to control VXLAN Linux Bridge dataplane
1717 #
1718 # This file should be owned by (and only-writeable by) the root user
1719
1720 # format seems to be
1721 # cmd-name: filter-name, raw-command, user, args
1722
1723 [Filters]
1724
1725 #
1726 modprobe: CommandFilter, modprobe, root
1727
1728 #
1729 brctl: CommandFilter, brctl, root
1730 bridge: CommandFilter, bridge, root
1731
1732 # ip_lib
1733 ip: IpFilter, ip, root
1734 ip_exec: IpNetnsExecFilter, ip, root
1735
1736 # shell (for piped commands)
1737 sh: CommandFilter, sh, root
1738 mpls_ovs_dataplane:
1739 pods:
1740 - bagpipe_bgp
1741 content: |
1742 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1743 # expected to control MPLS OpenVSwitch dataplane
1744 #
1745 # This file should be owned by (and only-writeable by) the root user
1746
1747 # format seems to be
1748 # cmd-name: filter-name, raw-command, user, args
1749
1750 [Filters]
1751
1752 # openvswitch
1753 ovs-vsctl: CommandFilter, ovs-vsctl, root
1754 ovs-ofctl: CommandFilter, ovs-ofctl, root
1755
1756 # ip_lib
1757 ip: IpFilter, ip, root
1758 ip_exec: IpNetnsExecFilter, ip, root
1759
1760 # shell (for piped commands)
1761 sh: CommandFilter, sh, root
1762 neutron:
1763 DEFAULT:
1764 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1765 log_config_append: /etc/neutron/logging.conf
1766 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1767 # via the endpoints section.
1768 bind_port: null
1769 default_availability_zones: nova
1770 api_workers: 1
1771 rpc_workers: 4
1772 allow_overlapping_ips: True
1773 state_path: /var/lib/neutron
1774 # core_plugin can be: ml2, calico
1775 core_plugin: ml2
1776 # service_plugin can be: router, odl-router, empty for calico,
1777 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1778 service_plugins: router
1779 allow_automatic_l3agent_failover: True
1780 l3_ha: True
1781 max_l3_agents_per_router: 2
1782 l3_ha_network_type: vxlan
1783 network_auto_schedule: True
1784 router_auto_schedule: True
1785 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1786 # 'network.backend' to sane defaults.
1787 interface_driver: null
1788 oslo_concurrency:
1789 lock_path: /var/lib/neutron/tmp
1790 database:
1791 max_retries: -1
1792 agent:
1793 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1794 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1795 oslo_messaging_notifications:
1796 driver: messagingv2
1797 oslo_messaging_rabbit:
1798 rabbit_ha_queues: true
1799 oslo_middleware:
1800 enable_proxy_headers_parsing: true
1801 oslo_policy:
1802 policy_file: /etc/neutron/policy.yaml
Mohammed Naser593ec012023-07-23 09:20:05 +00001803 ovn:
1804 enable_distributed_floating_ip: true
1805 ovn_metadata_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001806 nova:
1807 auth_type: password
1808 auth_version: v3
1809 endpoint_type: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001810 placement:
1811 auth_type: password
1812 auth_version: v3
1813 endpoint_type: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001814 designate:
1815 auth_type: password
1816 auth_version: v3
1817 endpoint_type: internal
1818 allow_reverse_dns_lookup: true
1819 ironic:
1820 endpoint_type: internal
1821 keystone_authtoken:
okozachenko120317930d42023-09-06 00:24:05 +10001822 service_token_roles: service
1823 service_token_roles_required: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001824 memcache_security_strategy: ENCRYPT
1825 auth_type: password
1826 auth_version: v3
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001827 service_type: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001828 octavia:
1829 request_poll_timeout: 3000
1830 logging:
1831 loggers:
1832 keys:
1833 - root
1834 - neutron
1835 - neutron_taas
1836 handlers:
1837 keys:
1838 - stdout
1839 - stderr
1840 - "null"
1841 formatters:
1842 keys:
1843 - context
1844 - default
1845 logger_root:
1846 level: WARNING
1847 handlers: 'null'
1848 logger_neutron:
1849 level: INFO
1850 handlers:
1851 - stdout
1852 qualname: neutron
1853 logger_neutron_taas:
1854 level: INFO
1855 handlers:
1856 - stdout
1857 qualname: neutron_taas
1858 logger_amqp:
1859 level: WARNING
1860 handlers: stderr
1861 qualname: amqp
1862 logger_amqplib:
1863 level: WARNING
1864 handlers: stderr
1865 qualname: amqplib
1866 logger_eventletwsgi:
1867 level: WARNING
1868 handlers: stderr
1869 qualname: eventlet.wsgi.server
1870 logger_sqlalchemy:
1871 level: WARNING
1872 handlers: stderr
1873 qualname: sqlalchemy
1874 logger_boto:
1875 level: WARNING
1876 handlers: stderr
1877 qualname: boto
1878 handler_null:
1879 class: logging.NullHandler
1880 formatter: default
1881 args: ()
1882 handler_stdout:
1883 class: StreamHandler
1884 args: (sys.stdout,)
1885 formatter: context
1886 handler_stderr:
1887 class: StreamHandler
1888 args: (sys.stderr,)
1889 formatter: context
1890 formatter_context:
1891 class: oslo_log.formatters.ContextFormatter
1892 datefmt: "%Y-%m-%d %H:%M:%S"
1893 formatter_default:
1894 format: "%(message)s"
1895 datefmt: "%Y-%m-%d %H:%M:%S"
1896 plugins:
1897 ml2_conf:
1898 ml2:
1899 extension_drivers: port_security
1900 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1901 # in 'network.backend' to sane defaults.
1902 mechanism_drivers: null
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001903 type_drivers: flat,vlan,vxlan,local
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001904 tenant_network_types: vxlan
1905 ml2_type_vxlan:
1906 vni_ranges: 1:1000
1907 vxlan_group: 239.1.1.1
1908 ml2_type_flat:
1909 flat_networks: "*"
1910 # If you want to use the external network as a tagged provider network,
1911 # a range should be specified including the intended VLAN target
1912 # using ml2_type_vlan.network_vlan_ranges:
1913 # ml2_type_vlan:
1914 # network_vlan_ranges: "external:1100:1110"
Mohammed Naser593ec012023-07-23 09:20:05 +00001915 ml2_type_geneve:
1916 vni_ranges: 1:65536
1917 max_header_size: 38
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001918 agent:
1919 extensions: ""
1920 ml2_conf_sriov: null
1921 taas:
1922 taas:
1923 enabled: False
1924 openvswitch_agent:
1925 agent:
1926 tunnel_types: vxlan
1927 l2_population: True
1928 arp_responder: True
1929 ovs:
1930 bridge_mappings: "external:br-ex"
1931 securitygroup:
1932 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1933 linuxbridge_agent:
1934 linux_bridge:
1935 # To define Flat and VLAN connections, in LB we can assign
1936 # specific interface to the flat/vlan network name using:
1937 # physical_interface_mappings: "external:eth3"
1938 # Or we can set the mapping between the network and bridge:
1939 bridge_mappings: "external:br-ex"
1940 # The two above options are exclusive, do not use both of them at once
1941 securitygroup:
1942 firewall_driver: iptables
1943 vxlan:
1944 l2_population: True
1945 arp_responder: True
1946 macvtap_agent: null
1947 sriov_agent:
1948 securitygroup:
1949 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1950 sriov_nic:
1951 physical_device_mappings: physnet2:enp3s0f1
1952 # NOTE: do not use null here, use an empty string
1953 exclude_devices: ""
1954 dhcp_agent:
1955 DEFAULT:
1956 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1957 # 'network.backend' to sane defaults.
1958 interface_driver: null
1959 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1960 force_metadata: True
1961 dnsmasq: |
1962 #no-hosts
1963 #port=5353
1964 #cache-size=500
1965 #no-negcache
1966 #dns-forward-max=100
1967 #resolve-file=
1968 #strict-order
1969 #bind-interface
1970 #bind-dynamic
1971 #domain=
1972 #dhcp-range=10.10.10.10,10.10.10.100,24h
1973 #dhcp-lease-max=150
1974 #dhcp-host=11:22:33:44:55:66,ignore
1975 #dhcp-option=3,10.10.10.1
1976 #dhcp-option-force=26,1450
1977
1978 l3_agent:
1979 DEFAULT:
1980 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1981 # 'network.backend' to sane defaults.
1982 interface_driver: null
1983 agent_mode: legacy
1984 metering_agent: null
1985 metadata_agent:
1986 DEFAULT:
1987 # we cannot change the proxy socket path as it is declared
1988 # as a hostPath volume from agent daemonsets
1989 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1990 metadata_proxy_shared_secret: "password"
1991 cache:
1992 enabled: true
1993 backend: dogpile.cache.memcached
1994 bagpipe_bgp: {}
Mohammed Naser593ec012023-07-23 09:20:05 +00001995 ovn_metadata_agent:
1996 DEFAULT:
1997 # we cannot change the proxy socket path as it is declared
1998 # as a hostPath volume from agent daemonsets
1999 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
2000 metadata_proxy_shared_secret: "password"
2001 metadata_workers: 2
2002 cache:
2003 enabled: true
2004 backend: dogpile.cache.memcached
2005 ovs:
2006 ovsdb_connection: unix:/run/openvswitch/db.sock
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002007
2008 rabbitmq:
2009 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
2010 policies:
2011 - vhost: "neutron"
2012 name: "ha_ttl_neutron"
2013 definition:
2014 # mirror messges to other nodes in rmq cluster
2015 ha-mode: "all"
2016 ha-sync-mode: "automatic"
2017 # 70s
2018 message-ttl: 70000
2019 priority: 0
2020 apply-to: all
2021 pattern: '^(?!(amq\.|reply_)).*'
2022 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
2023 ## This helps prevent sriov init script from failing due to mis-matched NIC
2024 ## For prod env, target NIC should match and init script should fail otherwise.
2025 ## sriov_init:
2026 ## - besteffort
2027 sriov_init:
2028 -
2029 # auto_bridge_add is a table of "bridge: interface" pairs
2030 # To automatically add a physical interfaces to a specific bridges,
2031 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
2032 # to br1 do something like:
2033 #
2034 # auto_bridge_add:
2035 # br-physnet1: eth3
2036 # br0: if0
2037 # br1: iface_two
2038 # br-ex will be added by default
2039 auto_bridge_add:
2040 br-ex: null
2041
Mohammed Nasera720f882023-06-30 23:48:02 -04002042 # Network off-loading configuration
2043 netoffload:
ricolin18e6fd32023-07-17 06:17:15 +00002044 enabled: false
Mohammed Nasera720f882023-06-30 23:48:02 -04002045 asap2:
2046 # - dev: enp97s0f0
2047 # vfs: 16
2048
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002049 # configuration of OVS DPDK bridges and NICs
2050 # this is a separate section and not part of the auto_bridge_add section
2051 # because additional parameters are needed
2052 ovs_dpdk:
2053 enabled: false
2054 # setting update_dpdk_bond_config to true will have default behavior,
2055 # which may cause disruptions in ovs dpdk traffic in case of neutron
2056 # ovs agent restart or when dpdk nic/bond configurations are changed.
2057 # Setting this to false will configure dpdk in the first run and
2058 # disable nic/bond config on event of restart or config update.
2059 update_dpdk_bond_config: true
2060 driver: uio_pci_generic
2061 # In case bonds are configured, the nics which are part of those bonds
2062 # must NOT be provided here.
2063 nics:
2064 - name: dpdk0
2065 pci_id: '0000:05:00.0'
2066 # Set VF Index in case some particular VF(s) need to be
2067 # used with ovs-dpdk.
2068 # vf_index: 0
2069 bridge: br-phy
2070 migrate_ip: true
2071 n_rxq: 2
2072 n_txq: 2
2073 pmd_rxq_affinity: "0:3,1:27"
2074 ofport_request: 1
2075 # optional parameters for tuning the OVS DPDK config
2076 # in alignment with the available hardware resources
2077 # mtu: 2000
2078 # n_rxq_size: 1024
2079 # n_txq_size: 1024
2080 # vhost-iommu-support: true
2081 bridges:
2082 - name: br-phy
2083 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
2084 # - tunnel_underlay_vlan: 45
2085 # Optional parameter for configuring bonding in OVS-DPDK
2086 # - name: br-phy-bond0
2087 # bonds:
2088 # - name: dpdkbond0
2089 # bridge: br-phy-bond0
2090 # # The IP from the first nic in nics list shall be used
2091 # migrate_ip: true
2092 # mtu: 2000
2093 # # Please note that n_rxq is set for each NIC individually
2094 # # rather than denoting the total number of rx queues for
2095 # # the bond as a whole. So setting n_rxq = 2 below for ex.
2096 # # would be 4 rx queues in total for the bond.
2097 # # Same for n_txq
2098 # n_rxq: 2
2099 # n_txq: 2
2100 # ofport_request: 1
2101 # n_rxq_size: 1024
2102 # n_txq_size: 1024
2103 # vhost-iommu-support: true
2104 # ovs_options: "bond_mode=active-backup"
2105 # nics:
2106 # - name: dpdk_b0s0
2107 # pci_id: '0000:06:00.0'
2108 # pmd_rxq_affinity: "0:3,1:27"
2109 # # Set VF Index in case some particular VF(s) need to be
2110 # # used with ovs-dpdk. In which case pci_id of PF must be
2111 # # provided above.
2112 # # vf_index: 0
2113 # - name: dpdk_b0s1
2114 # pci_id: '0000:07:00.0'
2115 # pmd_rxq_affinity: "0:3,1:27"
2116 # # Set VF Index in case some particular VF(s) need to be
2117 # # used with ovs-dpdk. In which case pci_id of PF must be
2118 # # provided above.
2119 # # vf_index: 0
2120 #
2121 # Set the log level for each target module (default level is always dbg)
2122 # Supported log levels are: off, emer, err, warn, info, dbg
2123 #
2124 # modules:
2125 # - name: dpdk
2126 # log_level: info
2127
2128# Names of secrets used by bootstrap and environmental checks
2129secrets:
2130 identity:
2131 admin: neutron-keystone-admin
2132 neutron: neutron-keystone-user
2133 test: neutron-keystone-test
2134 oslo_db:
2135 admin: neutron-db-admin
2136 neutron: neutron-db-user
2137 oslo_messaging:
2138 admin: neutron-rabbitmq-admin
2139 neutron: neutron-rabbitmq-user
2140 tls:
2141 compute_metadata:
2142 metadata:
2143 internal: metadata-tls-metadata
2144 network:
2145 server:
2146 public: neutron-tls-public
2147 internal: neutron-tls-server
2148 oci_image_registry:
2149 neutron: neutron-oci-image-registry
2150
2151# typically overridden by environmental
2152# values, but should include all endpoints
2153# required by this chart
2154endpoints:
2155 cluster_domain_suffix: cluster.local
2156 local_image_registry:
2157 name: docker-registry
2158 namespace: docker-registry
2159 hosts:
2160 default: localhost
2161 internal: docker-registry
2162 node: localhost
2163 host_fqdn_override:
2164 default: null
2165 port:
2166 registry:
2167 node: 5000
2168 oci_image_registry:
2169 name: oci-image-registry
2170 namespace: oci-image-registry
2171 auth:
2172 enabled: false
2173 neutron:
2174 username: neutron
2175 password: password
2176 hosts:
2177 default: localhost
2178 host_fqdn_override:
2179 default: null
2180 port:
2181 registry:
2182 default: null
2183 oslo_db:
2184 auth:
2185 admin:
2186 username: root
2187 password: password
2188 secret:
2189 tls:
2190 internal: mariadb-tls-direct
2191 neutron:
2192 username: neutron
2193 password: password
2194 hosts:
2195 default: mariadb
2196 host_fqdn_override:
2197 default: null
2198 path: /neutron
2199 scheme: mysql+pymysql
2200 port:
2201 mysql:
2202 default: 3306
2203 oslo_messaging:
2204 auth:
2205 admin:
2206 username: rabbitmq
2207 password: password
2208 secret:
2209 tls:
2210 internal: rabbitmq-tls-direct
2211 neutron:
2212 username: neutron
2213 password: password
2214 statefulset:
2215 replicas: 2
2216 name: rabbitmq-rabbitmq
2217 hosts:
2218 default: rabbitmq
2219 host_fqdn_override:
2220 default: null
2221 path: /neutron
2222 scheme: rabbit
2223 port:
2224 amqp:
2225 default: 5672
2226 http:
2227 default: 15672
2228 oslo_cache:
2229 auth:
2230 # NOTE(portdirect): this is used to define the value for keystone
2231 # authtoken cache encryption key, if not set it will be populated
2232 # automatically with a random value, but to take advantage of
2233 # this feature all services should be set to use the same key,
2234 # and memcache service.
2235 memcache_secret_key: null
2236 hosts:
2237 default: memcached
2238 host_fqdn_override:
2239 default: null
2240 port:
2241 memcache:
2242 default: 11211
2243 compute:
2244 name: nova
2245 hosts:
2246 default: nova-api
2247 public: nova
2248 host_fqdn_override:
2249 default: null
2250 path:
2251 default: "/v2.1/%(tenant_id)s"
2252 scheme:
2253 default: 'http'
2254 port:
2255 api:
2256 default: 8774
2257 public: 80
2258 novncproxy:
2259 default: 6080
2260 compute_metadata:
2261 name: nova
2262 hosts:
2263 default: nova-metadata
2264 public: metadata
2265 host_fqdn_override:
2266 default: null
2267 path:
2268 default: /
2269 scheme:
2270 default: 'http'
2271 port:
2272 metadata:
2273 default: 8775
2274 public: 80
2275 identity:
2276 name: keystone
2277 auth:
2278 admin:
2279 region_name: RegionOne
2280 username: admin
2281 password: password
2282 project_name: admin
2283 user_domain_name: default
2284 project_domain_name: default
2285 neutron:
2286 role: admin
2287 region_name: RegionOne
2288 username: neutron
2289 password: password
2290 project_name: service
2291 user_domain_name: service
2292 project_domain_name: service
2293 nova:
2294 region_name: RegionOne
2295 project_name: service
2296 username: nova
2297 password: password
2298 user_domain_name: service
2299 project_domain_name: service
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002300 placement:
2301 region_name: RegionOne
2302 project_name: service
2303 username: placement
2304 password: password
2305 user_domain_name: service
2306 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002307 designate:
2308 region_name: RegionOne
2309 project_name: service
2310 username: designate
2311 password: password
2312 user_domain_name: service
2313 project_domain_name: service
2314 ironic:
2315 region_name: RegionOne
2316 project_name: service
2317 username: ironic
2318 password: password
2319 user_domain_name: service
2320 project_domain_name: service
2321 test:
2322 role: admin
2323 region_name: RegionOne
2324 username: neutron-test
2325 password: password
2326 # NOTE: this project will be purged and reset if
2327 # conf.rally_tests.force_project_purge is set to true
2328 # which may be required upon test failure, but be aware that this will
2329 # expunge all openstack objects, so if this is used a seperate project
2330 # should be used for each helm test, and also it should be ensured
2331 # that this project is not in use by other tenants
2332 project_name: test
2333 user_domain_name: service
2334 project_domain_name: service
2335 hosts:
2336 default: keystone
2337 internal: keystone-api
2338 host_fqdn_override:
2339 default: null
2340 path:
2341 default: /v3
2342 scheme:
2343 default: http
2344 port:
2345 api:
2346 default: 80
2347 internal: 5000
2348 network:
2349 name: neutron
2350 hosts:
2351 default: neutron-server
2352 public: neutron
2353 host_fqdn_override:
2354 default: null
2355 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2356 # endpoints using the following format:
2357 # public:
2358 # host: null
2359 # tls:
2360 # crt: null
2361 # key: null
2362 path:
2363 default: null
2364 scheme:
2365 default: 'http'
2366 service: 'http'
2367 port:
2368 api:
2369 default: 9696
2370 public: 80
2371 service: 9696
2372 load_balancer:
2373 name: octavia
2374 hosts:
2375 default: octavia-api
2376 public: octavia
2377 host_fqdn_override:
2378 default: null
2379 path:
2380 default: null
2381 scheme:
2382 default: http
2383 port:
2384 api:
2385 default: 9876
2386 public: 80
2387 fluentd:
2388 namespace: osh-infra
2389 name: fluentd
2390 hosts:
2391 default: fluentd-logging
2392 host_fqdn_override:
2393 default: null
2394 path:
2395 default: null
2396 scheme: 'http'
2397 port:
2398 service:
2399 default: 24224
2400 metrics:
2401 default: 24220
2402 dns:
2403 name: designate
2404 hosts:
2405 default: designate-api
2406 public: designate
2407 host_fqdn_override:
2408 default: null
2409 path:
2410 default: /
2411 scheme:
2412 default: 'http'
2413 port:
2414 api:
2415 default: 9001
2416 public: 80
2417 baremetal:
2418 name: ironic
2419 hosts:
2420 default: ironic-api
2421 public: ironic
2422 host_fqdn_override:
2423 default: null
2424 path:
2425 default: null
2426 scheme:
2427 default: 'http'
2428 port:
2429 api:
2430 default: 6385
2431 public: 80
2432 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2433 # They are using to enable the Egress K8s network policy.
2434 kube_dns:
2435 namespace: kube-system
2436 name: kubernetes-dns
2437 hosts:
2438 default: kube-dns
2439 host_fqdn_override:
2440 default: null
2441 path:
2442 default: null
2443 scheme: http
2444 port:
2445 dns:
2446 default: 53
2447 protocol: UDP
2448 ingress:
2449 namespace: null
2450 name: ingress
2451 hosts:
2452 default: ingress
2453 port:
2454 ingress:
2455 default: 80
2456
2457network_policy:
2458 neutron:
2459 # TODO(lamt): Need to tighten this ingress for security.
2460 ingress:
2461 - {}
2462 egress:
2463 - {}
2464
2465helm3_hook: true
2466
2467health_probe:
2468 logging:
2469 level: ERROR
2470
2471tls:
2472 identity: false
2473 oslo_messaging: false
2474 oslo_db: false
2475
2476manifests:
2477 certificates: false
2478 configmap_bin: true
2479 configmap_etc: true
2480 daemonset_dhcp_agent: true
2481 daemonset_l3_agent: true
2482 daemonset_lb_agent: true
2483 daemonset_metadata_agent: true
2484 daemonset_ovs_agent: true
2485 daemonset_sriov_agent: true
2486 daemonset_l2gw_agent: false
2487 daemonset_bagpipe_bgp: false
2488 daemonset_netns_cleanup_cron: true
2489 deployment_ironic_agent: false
2490 deployment_server: true
2491 ingress_server: true
2492 job_bootstrap: true
2493 job_db_init: true
2494 job_db_sync: true
2495 job_db_drop: false
2496 job_image_repo_sync: true
2497 job_ks_endpoints: true
2498 job_ks_service: true
2499 job_ks_user: true
2500 job_rabbit_init: true
2501 pdb_server: true
2502 pod_rally_test: true
2503 network_policy: false
2504 secret_db: true
2505 secret_ingress_tls: true
2506 secret_keystone: true
2507 secret_rabbitmq: true
2508 secret_registry: true
2509 service_ingress_server: true
2510 service_server: true
2511...