blob: ba6d51fad0cb1df6ff90e7d1b5824617ffaa2415 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
Mohammed Nasera720f882023-06-30 23:48:02 -040033 netoffload: ghcr.io/vexxhost/netoffload:v1.0.1
Mohammed Naserf3f59a72023-01-15 21:02:04 -050034 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020037 neutron_ovn_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Mohammed Naserf3f59a72023-01-15 21:02:04 -050038 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
41 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
42 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
43 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
44 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
46 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
47 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
48 image_repo_sync: docker.io/docker:17.07.0
49 pull_policy: "IfNotPresent"
50 local_registry:
51 active: false
52 exclude:
53 - dep_check
54 - image_repo_sync
55
56labels:
57 agent:
58 dhcp:
59 node_selector_key: openstack-control-plane
60 node_selector_value: enabled
61 l3:
62 node_selector_key: openstack-control-plane
63 node_selector_value: enabled
64 metadata:
65 node_selector_key: openstack-control-plane
66 node_selector_value: enabled
67 l2gw:
68 node_selector_key: openstack-control-plane
69 node_selector_value: enabled
70 job:
71 node_selector_key: openstack-control-plane
72 node_selector_value: enabled
73 lb:
74 node_selector_key: linuxbridge
75 node_selector_value: enabled
76 # openvswitch is a special case, requiring a special
77 # label that can apply to both control hosts
78 # and compute hosts, until we get more sophisticated
79 # with our daemonset scheduling
80 ovs:
81 node_selector_key: openvswitch
82 node_selector_value: enabled
83 sriov:
84 node_selector_key: sriov
85 node_selector_value: enabled
86 bagpipe_bgp:
87 node_selector_key: openstack-compute-node
88 node_selector_value: enabled
89 server:
90 node_selector_key: openstack-control-plane
91 node_selector_value: enabled
92 ironic_agent:
93 node_selector_key: openstack-control-plane
94 node_selector_value: enabled
95 netns_cleanup_cron:
96 node_selector_key: openstack-control-plane
97 node_selector_value: enabled
98 test:
99 node_selector_key: openstack-control-plane
100 node_selector_value: enabled
101
102network:
103 # provide what type of network wiring will be used
104 backend:
105 - openvswitch
106 # NOTE(Portdirect): Share network namespaces with the host,
107 # allowing agents to be restarted without packet loss and simpler
108 # debugging. This feature requires mount propagation support.
109 share_namespaces: true
110 interface:
111 # Tunnel interface will be used for VXLAN tunneling.
112 tunnel: null
113 # If tunnel is null there is a fallback mechanism to search
114 # for interface with routing using tunnel network cidr.
115 tunnel_network_cidr: "0/0"
116 # To perform setup of network interfaces using the SR-IOV init
117 # container you can use a section similar to:
118 # sriov:
119 # - device: ${DEV}
120 # num_vfs: 8
121 # mtu: 9214
122 # promisc: false
123 # qos:
124 # - vf_num: 0
125 # share: 10
126 # queues_per_vf:
127 # - num_queues: 16
128 # exclude_vf: 0,11,21
129 server:
130 ingress:
131 public: true
132 classes:
133 namespace: "nginx"
134 cluster: "nginx-cluster"
135 annotations:
136 nginx.ingress.kubernetes.io/rewrite-target: /
137 external_policy_local: false
138 node_port:
139 enabled: false
140 port: 30096
141
142bootstrap:
143 enabled: false
144 ks_user: neutron
145 script: |
146 openstack token issue
147
148dependencies:
149 dynamic:
150 common:
151 local_image_registry:
152 jobs:
153 - neutron-image-repo-sync
154 services:
155 - endpoint: node
156 service: local_image_registry
157 targeted:
158 sriov: {}
159 l2gateway: {}
160 bagpipe_bgp: {}
Mohammed Naserfd8edcc2023-09-06 22:32:16 +0000161 ovn:
162 server:
163 pod: null
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500164 openvswitch:
165 dhcp:
166 pod:
167 - requireSameNode: true
168 labels:
169 application: neutron
170 component: neutron-ovs-agent
171 l3:
172 pod:
173 - requireSameNode: true
174 labels:
175 application: neutron
176 component: neutron-ovs-agent
177 metadata:
178 pod:
179 - requireSameNode: true
180 labels:
181 application: neutron
182 component: neutron-ovs-agent
183 linuxbridge:
184 dhcp:
185 pod:
186 - requireSameNode: true
187 labels:
188 application: neutron
189 component: neutron-lb-agent
190 l3:
191 pod:
192 - requireSameNode: true
193 labels:
194 application: neutron
195 component: neutron-lb-agent
196 metadata:
197 pod:
198 - requireSameNode: true
199 labels:
200 application: neutron
201 component: neutron-lb-agent
202 lb_agent:
203 pod: null
204 static:
205 bootstrap:
206 services:
207 - endpoint: internal
208 service: network
209 - endpoint: internal
210 service: compute
211 db_drop:
212 services:
213 - endpoint: internal
214 service: oslo_db
215 db_init:
216 services:
217 - endpoint: internal
218 service: oslo_db
219 db_sync:
220 jobs:
221 - neutron-db-init
222 services:
223 - endpoint: internal
224 service: oslo_db
225 dhcp:
226 pod: null
227 jobs:
228 - neutron-rabbit-init
229 services:
230 - endpoint: internal
231 service: oslo_messaging
232 - endpoint: internal
233 service: network
234 - endpoint: internal
235 service: compute
236 ks_endpoints:
237 jobs:
238 - neutron-ks-service
239 services:
240 - endpoint: internal
241 service: identity
242 ks_service:
243 services:
244 - endpoint: internal
245 service: identity
246 ks_user:
247 services:
248 - endpoint: internal
249 service: identity
250 rabbit_init:
251 services:
252 - service: oslo_messaging
253 endpoint: internal
254 l3:
255 pod: null
256 jobs:
257 - neutron-rabbit-init
258 services:
259 - endpoint: internal
260 service: oslo_messaging
261 - endpoint: internal
262 service: network
263 - endpoint: internal
264 service: compute
265 lb_agent:
266 pod: null
267 jobs:
268 - neutron-rabbit-init
269 services:
270 - endpoint: internal
271 service: oslo_messaging
272 - endpoint: internal
273 service: network
274 metadata:
275 pod: null
276 jobs:
277 - neutron-rabbit-init
278 services:
279 - endpoint: internal
280 service: oslo_messaging
281 - endpoint: internal
282 service: network
283 - endpoint: internal
284 service: compute
285 - endpoint: public
286 service: compute_metadata
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200287 ovn_metadata:
Mohammed Naser593ec012023-07-23 09:20:05 +0000288 pod:
289 - requireSameNode: true
290 labels:
291 application: ovn
292 component: ovn-controller
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200293 services:
294 - endpoint: internal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200295 service: compute_metadata
Mohammed Naserfd8edcc2023-09-06 22:32:16 +0000296 - endpoint: internal
297 service: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500298 ovs_agent:
299 jobs:
300 - neutron-rabbit-init
301 pod:
302 - requireSameNode: true
303 labels:
304 application: openvswitch
305 component: server
306 services:
307 - endpoint: internal
308 service: oslo_messaging
309 - endpoint: internal
310 service: network
311 server:
312 jobs:
313 - neutron-db-sync
314 - neutron-ks-user
315 - neutron-ks-endpoints
316 - neutron-rabbit-init
317 services:
318 - endpoint: internal
319 service: oslo_db
320 - endpoint: internal
321 service: oslo_messaging
322 - endpoint: internal
323 service: oslo_cache
324 - endpoint: internal
325 service: identity
326 ironic_agent:
327 jobs:
328 - neutron-db-sync
329 - neutron-ks-user
330 - neutron-ks-endpoints
331 - neutron-rabbit-init
332 services:
333 - endpoint: internal
334 service: oslo_db
335 - endpoint: internal
336 service: oslo_messaging
337 - endpoint: internal
338 service: oslo_cache
339 - endpoint: internal
340 service: identity
341 tests:
342 services:
343 - endpoint: internal
344 service: network
345 - endpoint: internal
346 service: compute
347 image_repo_sync:
348 services:
349 - endpoint: internal
350 service: local_image_registry
351
352pod:
353 use_fqdn:
354 neutron_agent: true
355 probes:
356 rpc_timeout: 60
357 rpc_retries: 2
358 dhcp_agent:
359 dhcp_agent:
360 readiness:
361 enabled: true
362 params:
363 initialDelaySeconds: 30
364 periodSeconds: 190
365 timeoutSeconds: 185
366 liveness:
367 enabled: true
368 params:
369 initialDelaySeconds: 120
370 periodSeconds: 600
371 timeoutSeconds: 580
372 l3_agent:
373 l3_agent:
374 readiness:
375 enabled: true
376 params:
377 initialDelaySeconds: 30
378 periodSeconds: 190
379 timeoutSeconds: 185
380 liveness:
381 enabled: true
382 params:
383 initialDelaySeconds: 120
384 periodSeconds: 600
385 timeoutSeconds: 580
386 lb_agent:
387 lb_agent:
388 readiness:
389 enabled: true
390 metadata_agent:
391 metadata_agent:
392 readiness:
393 enabled: true
394 params:
395 initialDelaySeconds: 30
396 periodSeconds: 190
397 timeoutSeconds: 185
398 liveness:
399 enabled: true
400 params:
401 initialDelaySeconds: 120
402 periodSeconds: 600
403 timeoutSeconds: 580
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200404 ovn_metadata_agent:
405 ovn_metadata_agent:
406 readiness:
407 enabled: true
408 params:
409 initialDelaySeconds: 30
410 periodSeconds: 190
411 timeoutSeconds: 185
412 liveness:
413 enabled: true
414 params:
415 initialDelaySeconds: 120
416 periodSeconds: 600
417 timeoutSeconds: 580
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500418 ovs_agent:
419 ovs_agent:
420 readiness:
421 enabled: true
422 params:
okozachenko120317930d42023-09-06 00:24:05 +1000423 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500424 liveness:
425 enabled: true
426 params:
427 initialDelaySeconds: 120
428 periodSeconds: 600
429 timeoutSeconds: 580
430 sriov_agent:
431 sriov_agent:
432 readiness:
433 enabled: true
434 params:
435 initialDelaySeconds: 30
436 periodSeconds: 190
437 timeoutSeconds: 185
438 bagpipe_bgp:
439 bagpipe_bgp:
440 readiness:
441 enabled: true
442 params:
443 liveness:
444 enabled: true
445 params:
446 initialDelaySeconds: 60
447 l2gw_agent:
448 l2gw_agent:
449 readiness:
450 enabled: true
451 params:
452 initialDelaySeconds: 30
453 periodSeconds: 15
454 timeoutSeconds: 65
455 liveness:
456 enabled: true
457 params:
458 initialDelaySeconds: 120
459 periodSeconds: 90
460 timeoutSeconds: 70
461 server:
462 server:
463 readiness:
464 enabled: true
465 params:
okozachenko120317930d42023-09-06 00:24:05 +1000466 periodSeconds: 15
467 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500468 liveness:
469 enabled: true
470 params:
471 initialDelaySeconds: 60
okozachenko120317930d42023-09-06 00:24:05 +1000472 periodSeconds: 15
473 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500474 security_context:
475 neutron_dhcp_agent:
476 pod:
477 runAsUser: 42424
478 container:
479 neutron_dhcp_agent:
480 readOnlyRootFilesystem: true
481 privileged: true
482 neutron_l2gw_agent:
483 pod:
484 runAsUser: 42424
485 container:
486 neutron_l2gw_agent:
487 readOnlyRootFilesystem: true
488 privileged: true
489 neutron_bagpipe_bgp:
490 pod:
491 runAsUser: 42424
492 container:
493 neutron_bagpipe_bgp:
494 readOnlyRootFilesystem: true
495 privileged: true
496 neutron_l3_agent:
497 pod:
498 runAsUser: 42424
499 container:
500 neutron_l3_agent:
501 readOnlyRootFilesystem: true
502 privileged: true
503 neutron_lb_agent:
504 pod:
505 runAsUser: 42424
506 container:
507 neutron_lb_agent_kernel_modules:
508 capabilities:
509 add:
510 - SYS_MODULE
511 - SYS_CHROOT
512 runAsUser: 0
513 readOnlyRootFilesystem: true
514 neutron_lb_agent_init:
515 privileged: true
516 runAsUser: 0
517 readOnlyRootFilesystem: true
518 neutron_lb_agent:
519 readOnlyRootFilesystem: true
520 privileged: true
521 neutron_metadata_agent:
522 pod:
523 runAsUser: 42424
524 container:
525 neutron_metadata_agent_init:
526 runAsUser: 0
527 readOnlyRootFilesystem: true
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200528 neutron_ovn_metadata_agent:
529 pod:
530 runAsUser: 42424
531 container:
532 neutron_ovn_metadata_agent_init:
533 runAsUser: 0
534 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500535 neutron_ovs_agent:
536 pod:
537 runAsUser: 42424
538 container:
539 neutron_openvswitch_agent_kernel_modules:
540 capabilities:
541 add:
542 - SYS_MODULE
543 - SYS_CHROOT
544 runAsUser: 0
545 readOnlyRootFilesystem: true
Mohammed Nasera720f882023-06-30 23:48:02 -0400546 netoffload:
547 privileged: true
548 runAsUser: 0
549 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500550 neutron_ovs_agent_init:
551 privileged: true
552 runAsUser: 0
553 readOnlyRootFilesystem: true
554 neutron_ovs_agent:
555 readOnlyRootFilesystem: true
556 privileged: true
557 neutron_server:
558 pod:
559 runAsUser: 42424
560 container:
561 nginx:
562 runAsUser: 0
563 readOnlyRootFilesystem: false
564 neutron_server:
565 allowPrivilegeEscalation: false
566 readOnlyRootFilesystem: true
567 neutron_sriov_agent:
568 pod:
569 runAsUser: 42424
570 container:
571 neutron_sriov_agent_init:
572 privileged: true
573 runAsUser: 0
574 readOnlyRootFilesystem: false
575 neutron_sriov_agent:
576 readOnlyRootFilesystem: true
577 privileged: true
578 neutron_ironic_agent:
579 pod:
580 runAsUser: 42424
581 container:
582 neutron_ironic_agent:
583 allowPrivilegeEscalation: false
584 readOnlyRootFilesystem: true
585 neutron_netns_cleanup_cron:
586 pod:
587 runAsUser: 42424
588 container:
589 neutron_netns_cleanup_cron:
590 readOnlyRootFilesystem: true
591 privileged: true
592 affinity:
593 anti:
594 type:
595 default: preferredDuringSchedulingIgnoredDuringExecution
596 topologyKey:
597 default: kubernetes.io/hostname
598 weight:
599 default: 10
600 tolerations:
601 neutron:
602 enabled: false
603 tolerations:
604 - key: node-role.kubernetes.io/master
605 operator: Exists
606 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200607 - key: node-role.kubernetes.io/control-plane
608 operator: Exists
609 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500610 mounts:
611 neutron_server:
612 init_container: null
613 neutron_server:
614 volumeMounts:
615 volumes:
616 neutron_dhcp_agent:
617 init_container: null
618 neutron_dhcp_agent:
619 volumeMounts:
620 volumes:
621 neutron_l3_agent:
622 init_container: null
623 neutron_l3_agent:
624 volumeMounts:
625 volumes:
626 neutron_lb_agent:
627 init_container: null
628 neutron_lb_agent:
629 volumeMounts:
630 volumes:
631 neutron_metadata_agent:
632 init_container: null
633 neutron_metadata_agent:
634 volumeMounts:
635 volumes:
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200636 neutron_ovn_metadata_agent:
637 init_container: null
638 neutron_ovn_metadata_agent:
639 volumeMounts:
640 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500641 neutron_ovs_agent:
642 init_container: null
643 neutron_ovs_agent:
644 volumeMounts:
645 volumes:
646 neutron_sriov_agent:
647 init_container: null
648 neutron_sriov_agent:
649 volumeMounts:
650 volumes:
651 neutron_l2gw_agent:
652 init_container: null
653 neutron_l2gw_agent:
654 volumeMounts:
655 volumes:
656 bagpipe_bgp:
657 init_container: null
658 bagpipe_bgp:
659 volumeMounts:
660 volumes:
661 neutron_ironic_agent:
662 init_container: null
663 neutron_ironic_agent:
664 volumeMounts:
665 volumes:
666 neutron_netns_cleanup_cron:
667 init_container: null
668 neutron_netns_cleanup_cron:
669 volumeMounts:
670 volumes:
671 neutron_tests:
672 init_container: null
673 neutron_tests:
674 volumeMounts:
675 volumes:
676 neutron_bootstrap:
677 init_container: null
678 neutron_bootstrap:
679 volumeMounts:
680 volumes:
681 neutron_db_sync:
682 neutron_db_sync:
683 volumeMounts:
684 - name: db-sync-conf
685 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
686 subPath: ml2_conf.ini
687 readOnly: true
688 volumes:
689 replicas:
690 server: 1
691 ironic_agent: 1
692 lifecycle:
693 upgrades:
694 deployments:
695 revision_history: 3
696 pod_replacement_strategy: RollingUpdate
697 rolling_update:
698 max_unavailable: 1
699 max_surge: 3
700 daemonsets:
701 pod_replacement_strategy: RollingUpdate
702 dhcp_agent:
703 enabled: true
704 min_ready_seconds: 0
705 max_unavailable: 1
706 l3_agent:
707 enabled: true
708 min_ready_seconds: 0
709 max_unavailable: 1
710 lb_agent:
711 enabled: true
712 min_ready_seconds: 0
713 max_unavailable: 1
714 metadata_agent:
715 enabled: true
716 min_ready_seconds: 0
717 max_unavailable: 1
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200718 ovn_metadata_agent:
719 enabled: true
720 min_ready_seconds: 0
721 max_unavailable: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500722 ovs_agent:
723 enabled: true
724 min_ready_seconds: 0
725 max_unavailable: 1
726 sriov_agent:
727 enabled: true
728 min_ready_seconds: 0
729 max_unavailable: 1
730 netns_cleanup_cron:
731 enabled: true
732 min_ready_seconds: 0
733 max_unavailable: 1
734 disruption_budget:
735 server:
736 min_available: 0
737 termination_grace_period:
738 server:
739 timeout: 30
740 ironic_agent:
741 timeout: 30
742 resources:
743 enabled: false
744 agent:
745 dhcp:
746 requests:
747 memory: "128Mi"
748 cpu: "100m"
749 limits:
750 memory: "1024Mi"
751 cpu: "2000m"
752 l3:
753 requests:
754 memory: "128Mi"
755 cpu: "100m"
756 limits:
757 memory: "1024Mi"
758 cpu: "2000m"
759 lb:
760 requests:
761 memory: "128Mi"
762 cpu: "100m"
763 limits:
764 memory: "1024Mi"
765 cpu: "2000m"
766 metadata:
767 requests:
768 memory: "128Mi"
769 cpu: "100m"
770 limits:
771 memory: "1024Mi"
772 cpu: "2000m"
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200773 ovn_metadata:
774 requests:
775 memory: "128Mi"
776 cpu: "100m"
777 limits:
778 memory: "1024Mi"
779 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500780 ovs:
781 requests:
782 memory: "128Mi"
783 cpu: "100m"
784 limits:
785 memory: "1024Mi"
786 cpu: "2000m"
787 sriov:
788 requests:
789 memory: "128Mi"
790 cpu: "100m"
791 limits:
792 memory: "1024Mi"
793 cpu: "2000m"
794 l2gw:
795 requests:
796 memory: "128Mi"
797 cpu: "100m"
798 limits:
799 memory: "1024Mi"
800 cpu: "2000m"
801 bagpipe_bgp:
802 requests:
803 memory: "128Mi"
804 cpu: "100m"
805 limits:
806 memory: "1024Mi"
807 cpu: "2000m"
808 server:
809 requests:
810 memory: "128Mi"
811 cpu: "100m"
812 limits:
813 memory: "1024Mi"
814 cpu: "2000m"
815 ironic_agent:
816 requests:
817 memory: "128Mi"
818 cpu: "100m"
819 limits:
820 memory: "1024Mi"
821 cpu: "2000m"
822 netns_cleanup_cron:
823 requests:
824 memory: "128Mi"
825 cpu: "100m"
826 limits:
827 memory: "1024Mi"
828 cpu: "2000m"
829 jobs:
830 bootstrap:
831 requests:
832 memory: "128Mi"
833 cpu: "100m"
834 limits:
835 memory: "1024Mi"
836 cpu: "2000m"
837 db_init:
838 requests:
839 memory: "128Mi"
840 cpu: "100m"
841 limits:
842 memory: "1024Mi"
843 cpu: "2000m"
844 rabbit_init:
845 requests:
846 memory: "128Mi"
847 cpu: "100m"
848 limits:
849 memory: "1024Mi"
850 cpu: "2000m"
851 db_sync:
852 requests:
853 memory: "128Mi"
854 cpu: "100m"
855 limits:
856 memory: "1024Mi"
857 cpu: "2000m"
858 db_drop:
859 requests:
860 memory: "128Mi"
861 cpu: "100m"
862 limits:
863 memory: "1024Mi"
864 cpu: "2000m"
865 ks_endpoints:
866 requests:
867 memory: "128Mi"
868 cpu: "100m"
869 limits:
870 memory: "1024Mi"
871 cpu: "2000m"
872 ks_service:
873 requests:
874 memory: "128Mi"
875 cpu: "100m"
876 limits:
877 memory: "1024Mi"
878 cpu: "2000m"
879 ks_user:
880 requests:
881 memory: "128Mi"
882 cpu: "100m"
883 limits:
884 memory: "1024Mi"
885 cpu: "2000m"
886 tests:
887 requests:
888 memory: "128Mi"
889 cpu: "100m"
890 limits:
891 memory: "1024Mi"
892 cpu: "2000m"
893 image_repo_sync:
894 requests:
895 memory: "128Mi"
896 cpu: "100m"
897 limits:
898 memory: "1024Mi"
899 cpu: "2000m"
900
901conf:
902 rally_tests:
903 force_project_purge: false
904 run_tempest: false
905 clean_up: |
906 # NOTE: We will make the best effort to clean up rally generated networks and routers,
907 # but should not block further automated deployment.
908 set +e
909 PATTERN="^[sc]_rally_"
910
911 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
912 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
913
914 for ROUTER in $ROUTERS
915 do
916 openstack router unset --external-gateway $ROUTER
917 openstack router set --disable --no-ha $ROUTER
918
919 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
920 for SUBN in $SUBNS
921 do
922 openstack router remove subnet $ROUTER $SUBN
923 done
924
925 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
926 do
927 openstack router remove port $ROUTER $PORT
928 done
929
930 openstack router delete $ROUTER
931 done
932
933 for NETWORK in $NETWORKS
934 do
935 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
936 do
937 openstack port delete $PORT
938 done
939 openstack network delete $NETWORK
940 done
941 set -e
942 tests:
943 NeutronNetworks.create_and_delete_networks:
944 - args:
945 network_create_args: {}
946 context:
947 quotas:
948 neutron:
949 network: -1
950 runner:
951 concurrency: 1
952 times: 1
953 type: constant
954 sla:
955 failure_rate:
956 max: 0
957 NeutronNetworks.create_and_delete_ports:
958 - args:
959 network_create_args: {}
960 port_create_args: {}
961 ports_per_network: 10
962 context:
963 network: {}
964 quotas:
965 neutron:
966 network: -1
967 port: -1
968 runner:
969 concurrency: 1
970 times: 1
971 type: constant
972 sla:
973 failure_rate:
974 max: 0
975 NeutronNetworks.create_and_delete_routers:
976 - args:
977 network_create_args: {}
978 router_create_args: {}
979 subnet_cidr_start: 1.1.0.0/30
980 subnet_create_args: {}
981 subnets_per_network: 2
982 context:
983 network: {}
984 quotas:
985 neutron:
986 network: -1
987 router: -1
988 subnet: -1
989 runner:
990 concurrency: 1
991 times: 1
992 type: constant
993 sla:
994 failure_rate:
995 max: 0
996 NeutronNetworks.create_and_delete_subnets:
997 - args:
998 network_create_args: {}
999 subnet_cidr_start: 1.1.0.0/30
1000 subnet_create_args: {}
1001 subnets_per_network: 2
1002 context:
1003 network: {}
1004 quotas:
1005 neutron:
1006 network: -1
1007 subnet: -1
1008 runner:
1009 concurrency: 1
1010 times: 1
1011 type: constant
1012 sla:
1013 failure_rate:
1014 max: 0
1015 NeutronNetworks.create_and_list_routers:
1016 - args:
1017 network_create_args: {}
1018 router_create_args: {}
1019 subnet_cidr_start: 1.1.0.0/30
1020 subnet_create_args: {}
1021 subnets_per_network: 2
1022 context:
1023 network: {}
1024 quotas:
1025 neutron:
1026 network: -1
1027 router: -1
1028 subnet: -1
1029 runner:
1030 concurrency: 1
1031 times: 1
1032 type: constant
1033 sla:
1034 failure_rate:
1035 max: 0
1036 NeutronNetworks.create_and_list_subnets:
1037 - args:
1038 network_create_args: {}
1039 subnet_cidr_start: 1.1.0.0/30
1040 subnet_create_args: {}
1041 subnets_per_network: 2
1042 context:
1043 network: {}
1044 quotas:
1045 neutron:
1046 network: -1
1047 subnet: -1
1048 runner:
1049 concurrency: 1
1050 times: 1
1051 type: constant
1052 sla:
1053 failure_rate:
1054 max: 0
1055 NeutronNetworks.create_and_show_network:
1056 - args:
1057 network_create_args: {}
1058 context:
1059 quotas:
1060 neutron:
1061 network: -1
1062 runner:
1063 concurrency: 1
1064 times: 1
1065 type: constant
1066 sla:
1067 failure_rate:
1068 max: 0
1069 NeutronNetworks.create_and_update_networks:
1070 - args:
1071 network_create_args: {}
1072 network_update_args:
1073 admin_state_up: false
1074 context:
1075 quotas:
1076 neutron:
1077 network: -1
1078 runner:
1079 concurrency: 1
1080 times: 1
1081 type: constant
1082 sla:
1083 failure_rate:
1084 max: 0
1085 NeutronNetworks.create_and_update_ports:
1086 - args:
1087 network_create_args: {}
1088 port_create_args: {}
1089 port_update_args:
1090 admin_state_up: false
1091 device_id: dummy_id
1092 device_owner: dummy_owner
1093 ports_per_network: 5
1094 context:
1095 network: {}
1096 quotas:
1097 neutron:
1098 network: -1
1099 port: -1
1100 runner:
1101 concurrency: 1
1102 times: 1
1103 type: constant
1104 sla:
1105 failure_rate:
1106 max: 0
1107 NeutronNetworks.create_and_update_routers:
1108 - args:
1109 network_create_args: {}
1110 router_create_args: {}
1111 router_update_args:
1112 admin_state_up: false
1113 subnet_cidr_start: 1.1.0.0/30
1114 subnet_create_args: {}
1115 subnets_per_network: 2
1116 context:
1117 network: {}
1118 quotas:
1119 neutron:
1120 network: -1
1121 router: -1
1122 subnet: -1
1123 runner:
1124 concurrency: 1
1125 times: 1
1126 type: constant
1127 sla:
1128 failure_rate:
1129 max: 0
1130 NeutronNetworks.create_and_update_subnets:
1131 - args:
1132 network_create_args: {}
1133 subnet_cidr_start: 1.4.0.0/16
1134 subnet_create_args: {}
1135 subnet_update_args:
1136 enable_dhcp: false
1137 subnets_per_network: 2
1138 context:
1139 network: {}
1140 quotas:
1141 neutron:
1142 network: -1
1143 subnet: -1
1144 runner:
1145 concurrency: 1
1146 times: 1
1147 type: constant
1148 sla:
1149 failure_rate:
1150 max: 0
1151 NeutronNetworks.list_agents:
1152 - args:
1153 agent_args: {}
1154 runner:
1155 concurrency: 1
1156 times: 1
1157 type: constant
1158 sla:
1159 failure_rate:
1160 max: 0
1161 NeutronSecurityGroup.create_and_list_security_groups:
1162 - args:
1163 security_group_create_args: {}
1164 context:
1165 quotas:
1166 neutron:
1167 security_group: -1
1168 runner:
1169 concurrency: 1
1170 times: 1
1171 type: constant
1172 sla:
1173 failure_rate:
1174 max: 0
1175 NeutronSecurityGroup.create_and_update_security_groups:
1176 - args:
1177 security_group_create_args: {}
1178 security_group_update_args: {}
1179 context:
1180 quotas:
1181 neutron:
1182 security_group: -1
1183 runner:
1184 concurrency: 1
1185 times: 1
1186 type: constant
1187 sla:
1188 failure_rate:
1189 max: 0
okozachenko120317930d42023-09-06 00:24:05 +10001190 paste:
1191 composite:neutron:
1192 use: egg:Paste#urlmap
1193 /: neutronversions_composite
1194 /v2.0: neutronapi_v2_0
1195 composite:neutronapi_v2_0:
1196 use: call:neutron.auth:pipeline_factory
1197 noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
1198 keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0
1199 composite:neutronversions_composite:
1200 use: call:neutron.auth:pipeline_factory
1201 noauth: cors http_proxy_to_wsgi neutronversions
1202 keystone: cors http_proxy_to_wsgi neutronversions
1203 filter:request_id:
1204 paste.filter_factory: oslo_middleware:RequestId.factory
1205 filter:catch_errors:
1206 paste.filter_factory: oslo_middleware:CatchErrors.factory
1207 filter:cors:
1208 paste.filter_factory: oslo_middleware.cors:filter_factory
1209 oslo_config_project: neutron
1210 filter:http_proxy_to_wsgi:
1211 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
1212 filter:keystonecontext:
1213 paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory
1214 filter:authtoken:
1215 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
1216 filter:audit:
1217 paste.filter_factory: keystonemiddleware.audit:filter_factory
1218 audit_map_file: /etc/neutron/api_audit_map.conf
1219 filter:extensions:
1220 paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory
1221 app:neutronversions:
1222 paste.app_factory: neutron.pecan_wsgi.app:versions_factory
1223 app:neutronapiapp_v2_0:
1224 paste.app_factory: neutron.api.v2.router:APIRouter.factory
1225 filter:osprofiler:
1226 paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001227 policy: {}
1228 api_audit_map:
1229 DEFAULT:
1230 target_endpoint_type: None
1231 custom_actions:
1232 add_router_interface: update/add
1233 remove_router_interface: update/remove
1234 path_keywords:
1235 floatingips: ip
1236 healthmonitors: healthmonitor
1237 health_monitors: health_monitor
1238 lb: None
1239 members: member
1240 metering-labels: label
1241 metering-label-rules: rule
1242 networks: network
1243 pools: pool
1244 ports: port
1245 routers: router
1246 quotas: quota
1247 security-groups: security-group
1248 security-group-rules: rule
1249 subnets: subnet
1250 vips: vip
1251 service_endpoints:
1252 network: service/network
1253 neutron_sudoers: |
1254 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1255 Defaults !requiretty
1256 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1257 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1258 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1259 rootwrap: |
1260 # Configuration for neutron-rootwrap
1261 # This file should be owned by (and only-writeable by) the root user
1262
1263 [DEFAULT]
1264 # List of directories to load filter definitions from (separated by ',').
1265 # These directories MUST all be only writeable by root !
1266 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1267
1268 # List of directories to search executables in, in case filters do not
1269 # explicitely specify a full path (separated by ',')
1270 # If not specified, defaults to system PATH environment variable.
1271 # These directories MUST all be only writeable by root !
1272 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1273
1274 # Enable logging to syslog
1275 # Default value is False
1276 use_syslog=False
1277
1278 # Which syslog facility to use.
1279 # Valid values include auth, authpriv, syslog, local0, local1...
1280 # Default value is 'syslog'
1281 syslog_log_facility=syslog
1282
1283 # Which messages to log.
1284 # INFO means log all usage
1285 # ERROR means only log unsuccessful attempts
1286 syslog_log_level=ERROR
1287
1288 [xenapi]
1289 # XenAPI configuration is only required by the L2 agent if it is to
1290 # target a XenServer/XCP compute host's dom0.
1291 xenapi_connection_url=<None>
1292 xenapi_connection_username=root
1293 xenapi_connection_password=<None>
1294 rootwrap_filters:
1295 debug:
1296 pods:
1297 - dhcp_agent
1298 - l3_agent
1299 - lb_agent
1300 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001301 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001302 - ovs_agent
1303 - sriov_agent
1304 content: |
1305 # neutron-rootwrap command filters for nodes on which neutron is
1306 # expected to control network
1307 #
1308 # This file should be owned by (and only-writeable by) the root user
1309
1310 # format seems to be
1311 # cmd-name: filter-name, raw-command, user, args
1312
1313 [Filters]
1314
1315 # This is needed because we should ping
1316 # from inside a namespace which requires root
1317 # _alt variants allow to match -c and -w in any order
1318 # (used by NeutronDebugAgent.ping_all)
1319 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1320 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1321 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1322 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1323 dibbler:
1324 pods:
1325 - dhcp_agent
1326 - l3_agent
1327 - lb_agent
1328 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001329 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001330 - ovs_agent
1331 - sriov_agent
1332 content: |
1333 # neutron-rootwrap command filters for nodes on which neutron is
1334 # expected to control network
1335 #
1336 # This file should be owned by (and only-writeable by) the root user
1337
1338 # format seems to be
1339 # cmd-name: filter-name, raw-command, user, args
1340
1341 [Filters]
1342
1343 # Filters for the dibbler-based reference implementation of the pluggable
1344 # Prefix Delegation driver. Other implementations using an alternative agent
1345 # should include a similar filter in this folder.
1346
1347 # prefix_delegation_agent
1348 dibbler-client: CommandFilter, dibbler-client, root
1349 ipset_firewall:
1350 pods:
1351 - dhcp_agent
1352 - l3_agent
1353 - lb_agent
1354 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001355 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001356 - ovs_agent
1357 - sriov_agent
1358 content: |
1359 # neutron-rootwrap command filters for nodes on which neutron is
1360 # expected to control network
1361 #
1362 # This file should be owned by (and only-writeable by) the root user
1363
1364 # format seems to be
1365 # cmd-name: filter-name, raw-command, user, args
1366
1367 [Filters]
1368 # neutron/agent/linux/iptables_firewall.py
1369 # "ipset", "-A", ...
1370 ipset: CommandFilter, ipset, root
1371 l3:
1372 pods:
1373 - dhcp_agent
1374 - l3_agent
1375 - lb_agent
1376 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001377 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001378 - ovs_agent
1379 - sriov_agent
1380 content: |
1381 # neutron-rootwrap command filters for nodes on which neutron is
1382 # expected to control network
1383 #
1384 # This file should be owned by (and only-writeable by) the root user
1385
1386 # format seems to be
1387 # cmd-name: filter-name, raw-command, user, args
1388
1389 [Filters]
1390
1391 # arping
1392 arping: CommandFilter, arping, root
1393
1394 # l3_agent
1395 sysctl: CommandFilter, sysctl, root
1396 route: CommandFilter, route, root
1397 radvd: CommandFilter, radvd, root
1398
1399 # haproxy
1400 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1401 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1402
1403 # metadata proxy
1404 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1405 # RHEL invocation of the metadata proxy will report /usr/bin/python
1406 kill_metadata: KillFilter, root, python, -15, -9
1407 kill_metadata2: KillFilter, root, python2, -15, -9
1408 kill_metadata7: KillFilter, root, python2.7, -15, -9
1409 kill_metadata3: KillFilter, root, python3, -15, -9
1410 kill_metadata35: KillFilter, root, python3.5, -15, -9
1411 kill_metadata36: KillFilter, root, python3.6, -15, -9
1412 kill_metadata37: KillFilter, root, python3.7, -15, -9
1413 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1414 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1415
1416 # ip_lib
1417 ip: IpFilter, ip, root
1418 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1419 ip_exec: IpNetnsExecFilter, ip, root
1420
1421 # l3_tc_lib
1422 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1423 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1424 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1425 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1426 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1427 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1428 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1429
1430 # For ip monitor
1431 kill_ip_monitor: KillFilter, root, ip, -9
1432
1433 # ovs_lib (if OVSInterfaceDriver is used)
1434 ovs-vsctl: CommandFilter, ovs-vsctl, root
1435
1436 # iptables_manager
1437 iptables-save: CommandFilter, iptables-save, root
1438 iptables-restore: CommandFilter, iptables-restore, root
1439 ip6tables-save: CommandFilter, ip6tables-save, root
1440 ip6tables-restore: CommandFilter, ip6tables-restore, root
1441
1442 # Keepalived
1443 keepalived: CommandFilter, keepalived, root
1444 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1445
1446 # l3 agent to delete floatingip's conntrack state
1447 conntrack: CommandFilter, conntrack, root
1448
1449 # keepalived state change monitor
1450 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1451 # The following filters are used to kill the keepalived state change monitor.
1452 # Since the monitor runs as a Python script, the system reports that the
1453 # command of the process to be killed is python.
1454 # TODO(mlavalle) These kill filters will be updated once we come up with a
1455 # mechanism to kill using the name of the script being executed by Python
1456 kill_keepalived_monitor_py: KillFilter, root, python, -15
1457 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1458 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1459 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1460 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1461 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1462 netns_cleanup:
1463 pods:
1464 - dhcp_agent
1465 - l3_agent
1466 - lb_agent
1467 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001468 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001469 - ovs_agent
1470 - sriov_agent
1471 - netns_cleanup_cron
1472 content: |
1473 # neutron-rootwrap command filters for nodes on which neutron is
1474 # expected to control network
1475 #
1476 # This file should be owned by (and only-writeable by) the root user
1477
1478 # format seems to be
1479 # cmd-name: filter-name, raw-command, user, args
1480
1481 [Filters]
1482
1483 # netns-cleanup
1484 netstat: CommandFilter, netstat, root
1485 dhcp:
1486 pods:
1487 - dhcp_agent
1488 - l3_agent
1489 - lb_agent
1490 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001491 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001492 - ovs_agent
1493 - sriov_agent
1494 - netns_cleanup_cron
1495 content: |
1496 # neutron-rootwrap command filters for nodes on which neutron is
1497 # expected to control network
1498 #
1499 # This file should be owned by (and only-writeable by) the root user
1500
1501 # format seems to be
1502 # cmd-name: filter-name, raw-command, user, args
1503
1504 [Filters]
1505
1506 # dhcp-agent
1507 dnsmasq: CommandFilter, dnsmasq, root
1508 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1509 # it looks like these are the only signals needed, per
1510 # neutron/agent/linux/dhcp.py
1511 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1512 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1513
1514 ovs-vsctl: CommandFilter, ovs-vsctl, root
1515 ivs-ctl: CommandFilter, ivs-ctl, root
1516 mm-ctl: CommandFilter, mm-ctl, root
1517 dhcp_release: CommandFilter, dhcp_release, root
1518 dhcp_release6: CommandFilter, dhcp_release6, root
1519
1520 # metadata proxy
1521 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1522 # RHEL invocation of the metadata proxy will report /usr/bin/python
1523 kill_metadata: KillFilter, root, python, -9
1524 kill_metadata2: KillFilter, root, python2, -9
1525 kill_metadata7: KillFilter, root, python2.7, -9
1526 kill_metadata3: KillFilter, root, python3, -9
1527 kill_metadata35: KillFilter, root, python3.5, -9
1528 kill_metadata36: KillFilter, root, python3.6, -9
1529 kill_metadata37: KillFilter, root, python3.7, -9
1530
1531 # ip_lib
1532 ip: IpFilter, ip, root
1533 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1534 ip_exec: IpNetnsExecFilter, ip, root
1535 ebtables:
1536 pods:
1537 - dhcp_agent
1538 - l3_agent
1539 - lb_agent
1540 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001541 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001542 - ovs_agent
1543 - sriov_agent
1544 content: |
1545 # neutron-rootwrap command filters for nodes on which neutron is
1546 # expected to control network
1547 #
1548 # This file should be owned by (and only-writeable by) the root user
1549
1550 # format seems to be
1551 # cmd-name: filter-name, raw-command, user, args
1552
1553 [Filters]
1554
1555 ebtables: CommandFilter, ebtables, root
1556 iptables_firewall:
1557 pods:
1558 - dhcp_agent
1559 - l3_agent
1560 - lb_agent
1561 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001562 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001563 - ovs_agent
1564 - sriov_agent
1565 content: |
1566 # neutron-rootwrap command filters for nodes on which neutron is
1567 # expected to control network
1568 #
1569 # This file should be owned by (and only-writeable by) the root user
1570
1571 # format seems to be
1572 # cmd-name: filter-name, raw-command, user, args
1573
1574 [Filters]
1575
1576 # neutron/agent/linux/iptables_firewall.py
1577 # "iptables-save", ...
1578 iptables-save: CommandFilter, iptables-save, root
1579 iptables-restore: CommandFilter, iptables-restore, root
1580 ip6tables-save: CommandFilter, ip6tables-save, root
1581 ip6tables-restore: CommandFilter, ip6tables-restore, root
1582
1583 # neutron/agent/linux/iptables_firewall.py
1584 # "iptables", "-A", ...
1585 iptables: CommandFilter, iptables, root
1586 ip6tables: CommandFilter, ip6tables, root
1587
1588 # neutron/agent/linux/iptables_firewall.py
1589 sysctl: CommandFilter, sysctl, root
1590
1591 # neutron/agent/linux/ip_conntrack.py
1592 conntrack: CommandFilter, conntrack, root
1593 linuxbridge_plugin:
1594 pods:
1595 - dhcp_agent
1596 - l3_agent
1597 - lb_agent
1598 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001599 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001600 - ovs_agent
1601 - sriov_agent
1602 content: |
1603 # neutron-rootwrap command filters for nodes on which neutron is
1604 # expected to control network
1605 #
1606 # This file should be owned by (and only-writeable by) the root user
1607
1608 # format seems to be
1609 # cmd-name: filter-name, raw-command, user, args
1610
1611 [Filters]
1612
1613 # linuxbridge-agent
1614 # unclear whether both variants are necessary, but I'm transliterating
1615 # from the old mechanism
1616 brctl: CommandFilter, brctl, root
1617 bridge: CommandFilter, bridge, root
1618
1619 # ip_lib
1620 ip: IpFilter, ip, root
1621 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1622 ip_exec: IpNetnsExecFilter, ip, root
1623
1624 # tc commands needed for QoS support
1625 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1626 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1627 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1628 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1629 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1630 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1631 openvswitch_plugin:
1632 pods:
1633 - dhcp_agent
1634 - l3_agent
1635 - lb_agent
1636 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001637 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001638 - ovs_agent
1639 - sriov_agent
1640 content: |
1641 # neutron-rootwrap command filters for nodes on which neutron is
1642 # expected to control network
1643 #
1644 # This file should be owned by (and only-writeable by) the root user
1645
1646 # format seems to be
1647 # cmd-name: filter-name, raw-command, user, args
1648
1649 [Filters]
1650
1651 # openvswitch-agent
1652 # unclear whether both variants are necessary, but I'm transliterating
1653 # from the old mechanism
1654 ovs-vsctl: CommandFilter, ovs-vsctl, root
1655 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1656 ovs-ofctl: CommandFilter, ovs-ofctl, root
1657 ovs-appctl: CommandFilter, ovs-appctl, root
1658 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1659 ovsdb-client: CommandFilter, ovsdb-client, root
1660 xe: CommandFilter, xe, root
1661
1662 # ip_lib
1663 ip: IpFilter, ip, root
1664 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1665 ip_exec: IpNetnsExecFilter, ip, root
1666
1667 # needed for FDB extension
1668 bridge: CommandFilter, bridge, root
1669 privsep:
1670 pods:
1671 - dhcp_agent
1672 - l3_agent
1673 - lb_agent
1674 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001675 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001676 - ovs_agent
1677 - sriov_agent
1678 - netns_cleanup_cron
1679 content: |
1680 # Command filters to allow privsep daemon to be started via rootwrap.
1681 #
1682 # This file should be owned by (and only-writeable by) the root user
1683
1684 [Filters]
1685
1686 # By installing the following, the local admin is asserting that:
1687 #
1688 # 1. The python module load path used by privsep-helper
1689 # command as root (as started by sudo/rootwrap) is trusted.
1690 # 2. Any oslo.config files matching the --config-file
1691 # arguments below are trusted.
1692 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1693 # also allowed to invoke python "entrypoint" functions from
1694 # --privsep_context with the additional (possibly root) privileges
1695 # configured for that context.
1696 #
1697 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1698 #
1699 # In particular, the oslo.config and python module path must not
1700 # be writeable by the unprivileged user.
1701
1702 # oslo.privsep default neutron context
1703 privsep: PathFilter, privsep-helper, root,
1704 --config-file, /etc,
1705 --privsep_context, neutron.privileged.default,
1706 --privsep_sock_path, /
1707
1708 # NOTE: A second `--config-file` arg can also be added above. Since
1709 # many neutron components are installed like that (eg: by devstack).
1710 # Adjust to suit local requirements.
1711 linux_vxlan:
1712 pods:
1713 - bagpipe_bgp
1714 content: |
1715 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1716 # expected to control VXLAN Linux Bridge dataplane
1717 #
1718 # This file should be owned by (and only-writeable by) the root user
1719
1720 # format seems to be
1721 # cmd-name: filter-name, raw-command, user, args
1722
1723 [Filters]
1724
1725 #
1726 modprobe: CommandFilter, modprobe, root
1727
1728 #
1729 brctl: CommandFilter, brctl, root
1730 bridge: CommandFilter, bridge, root
1731
1732 # ip_lib
1733 ip: IpFilter, ip, root
1734 ip_exec: IpNetnsExecFilter, ip, root
1735
1736 # shell (for piped commands)
1737 sh: CommandFilter, sh, root
1738 mpls_ovs_dataplane:
1739 pods:
1740 - bagpipe_bgp
1741 content: |
1742 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1743 # expected to control MPLS OpenVSwitch dataplane
1744 #
1745 # This file should be owned by (and only-writeable by) the root user
1746
1747 # format seems to be
1748 # cmd-name: filter-name, raw-command, user, args
1749
1750 [Filters]
1751
1752 # openvswitch
1753 ovs-vsctl: CommandFilter, ovs-vsctl, root
1754 ovs-ofctl: CommandFilter, ovs-ofctl, root
1755
1756 # ip_lib
1757 ip: IpFilter, ip, root
1758 ip_exec: IpNetnsExecFilter, ip, root
1759
1760 # shell (for piped commands)
1761 sh: CommandFilter, sh, root
1762 neutron:
1763 DEFAULT:
1764 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1765 log_config_append: /etc/neutron/logging.conf
1766 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1767 # via the endpoints section.
1768 bind_port: null
1769 default_availability_zones: nova
1770 api_workers: 1
1771 rpc_workers: 4
1772 allow_overlapping_ips: True
1773 state_path: /var/lib/neutron
1774 # core_plugin can be: ml2, calico
1775 core_plugin: ml2
1776 # service_plugin can be: router, odl-router, empty for calico,
1777 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1778 service_plugins: router
1779 allow_automatic_l3agent_failover: True
1780 l3_ha: True
1781 max_l3_agents_per_router: 2
1782 l3_ha_network_type: vxlan
1783 network_auto_schedule: True
1784 router_auto_schedule: True
1785 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1786 # 'network.backend' to sane defaults.
1787 interface_driver: null
1788 oslo_concurrency:
1789 lock_path: /var/lib/neutron/tmp
1790 database:
1791 max_retries: -1
1792 agent:
1793 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1794 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1795 oslo_messaging_notifications:
1796 driver: messagingv2
1797 oslo_messaging_rabbit:
1798 rabbit_ha_queues: true
1799 oslo_middleware:
1800 enable_proxy_headers_parsing: true
1801 oslo_policy:
1802 policy_file: /etc/neutron/policy.yaml
Mohammed Naser593ec012023-07-23 09:20:05 +00001803 ovn:
Mohammed Naser593ec012023-07-23 09:20:05 +00001804 ovn_metadata_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001805 nova:
1806 auth_type: password
1807 auth_version: v3
1808 endpoint_type: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001809 placement:
1810 auth_type: password
1811 auth_version: v3
1812 endpoint_type: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001813 designate:
1814 auth_type: password
1815 auth_version: v3
1816 endpoint_type: internal
1817 allow_reverse_dns_lookup: true
1818 ironic:
1819 endpoint_type: internal
1820 keystone_authtoken:
okozachenko120317930d42023-09-06 00:24:05 +10001821 service_token_roles: service
1822 service_token_roles_required: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001823 memcache_security_strategy: ENCRYPT
1824 auth_type: password
1825 auth_version: v3
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001826 service_type: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001827 octavia:
1828 request_poll_timeout: 3000
1829 logging:
1830 loggers:
1831 keys:
1832 - root
1833 - neutron
1834 - neutron_taas
1835 handlers:
1836 keys:
1837 - stdout
1838 - stderr
1839 - "null"
1840 formatters:
1841 keys:
1842 - context
1843 - default
1844 logger_root:
1845 level: WARNING
1846 handlers: 'null'
1847 logger_neutron:
1848 level: INFO
1849 handlers:
1850 - stdout
1851 qualname: neutron
1852 logger_neutron_taas:
1853 level: INFO
1854 handlers:
1855 - stdout
1856 qualname: neutron_taas
1857 logger_amqp:
1858 level: WARNING
1859 handlers: stderr
1860 qualname: amqp
1861 logger_amqplib:
1862 level: WARNING
1863 handlers: stderr
1864 qualname: amqplib
1865 logger_eventletwsgi:
1866 level: WARNING
1867 handlers: stderr
1868 qualname: eventlet.wsgi.server
1869 logger_sqlalchemy:
1870 level: WARNING
1871 handlers: stderr
1872 qualname: sqlalchemy
1873 logger_boto:
1874 level: WARNING
1875 handlers: stderr
1876 qualname: boto
1877 handler_null:
1878 class: logging.NullHandler
1879 formatter: default
1880 args: ()
1881 handler_stdout:
1882 class: StreamHandler
1883 args: (sys.stdout,)
1884 formatter: context
1885 handler_stderr:
1886 class: StreamHandler
1887 args: (sys.stderr,)
1888 formatter: context
1889 formatter_context:
1890 class: oslo_log.formatters.ContextFormatter
1891 datefmt: "%Y-%m-%d %H:%M:%S"
1892 formatter_default:
1893 format: "%(message)s"
1894 datefmt: "%Y-%m-%d %H:%M:%S"
1895 plugins:
1896 ml2_conf:
1897 ml2:
1898 extension_drivers: port_security
1899 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1900 # in 'network.backend' to sane defaults.
1901 mechanism_drivers: null
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001902 type_drivers: flat,vlan,vxlan,local
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001903 tenant_network_types: vxlan
1904 ml2_type_vxlan:
1905 vni_ranges: 1:1000
1906 vxlan_group: 239.1.1.1
1907 ml2_type_flat:
1908 flat_networks: "*"
1909 # If you want to use the external network as a tagged provider network,
1910 # a range should be specified including the intended VLAN target
1911 # using ml2_type_vlan.network_vlan_ranges:
1912 # ml2_type_vlan:
1913 # network_vlan_ranges: "external:1100:1110"
Mohammed Naser593ec012023-07-23 09:20:05 +00001914 ml2_type_geneve:
1915 vni_ranges: 1:65536
1916 max_header_size: 38
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001917 agent:
1918 extensions: ""
1919 ml2_conf_sriov: null
1920 taas:
1921 taas:
1922 enabled: False
1923 openvswitch_agent:
1924 agent:
1925 tunnel_types: vxlan
1926 l2_population: True
1927 arp_responder: True
1928 ovs:
1929 bridge_mappings: "external:br-ex"
1930 securitygroup:
1931 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1932 linuxbridge_agent:
1933 linux_bridge:
1934 # To define Flat and VLAN connections, in LB we can assign
1935 # specific interface to the flat/vlan network name using:
1936 # physical_interface_mappings: "external:eth3"
1937 # Or we can set the mapping between the network and bridge:
1938 bridge_mappings: "external:br-ex"
1939 # The two above options are exclusive, do not use both of them at once
1940 securitygroup:
1941 firewall_driver: iptables
1942 vxlan:
1943 l2_population: True
1944 arp_responder: True
1945 macvtap_agent: null
1946 sriov_agent:
1947 securitygroup:
1948 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1949 sriov_nic:
1950 physical_device_mappings: physnet2:enp3s0f1
1951 # NOTE: do not use null here, use an empty string
1952 exclude_devices: ""
1953 dhcp_agent:
1954 DEFAULT:
1955 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1956 # 'network.backend' to sane defaults.
1957 interface_driver: null
1958 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1959 force_metadata: True
1960 dnsmasq: |
1961 #no-hosts
1962 #port=5353
1963 #cache-size=500
1964 #no-negcache
1965 #dns-forward-max=100
1966 #resolve-file=
1967 #strict-order
1968 #bind-interface
1969 #bind-dynamic
1970 #domain=
1971 #dhcp-range=10.10.10.10,10.10.10.100,24h
1972 #dhcp-lease-max=150
1973 #dhcp-host=11:22:33:44:55:66,ignore
1974 #dhcp-option=3,10.10.10.1
1975 #dhcp-option-force=26,1450
1976
1977 l3_agent:
1978 DEFAULT:
1979 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1980 # 'network.backend' to sane defaults.
1981 interface_driver: null
1982 agent_mode: legacy
1983 metering_agent: null
1984 metadata_agent:
1985 DEFAULT:
1986 # we cannot change the proxy socket path as it is declared
1987 # as a hostPath volume from agent daemonsets
1988 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1989 metadata_proxy_shared_secret: "password"
1990 cache:
1991 enabled: true
1992 backend: dogpile.cache.memcached
1993 bagpipe_bgp: {}
Mohammed Naser593ec012023-07-23 09:20:05 +00001994 ovn_metadata_agent:
1995 DEFAULT:
1996 # we cannot change the proxy socket path as it is declared
1997 # as a hostPath volume from agent daemonsets
1998 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1999 metadata_proxy_shared_secret: "password"
2000 metadata_workers: 2
2001 cache:
2002 enabled: true
2003 backend: dogpile.cache.memcached
2004 ovs:
2005 ovsdb_connection: unix:/run/openvswitch/db.sock
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002006
2007 rabbitmq:
2008 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
2009 policies:
2010 - vhost: "neutron"
2011 name: "ha_ttl_neutron"
2012 definition:
2013 # mirror messges to other nodes in rmq cluster
2014 ha-mode: "all"
2015 ha-sync-mode: "automatic"
2016 # 70s
2017 message-ttl: 70000
2018 priority: 0
2019 apply-to: all
2020 pattern: '^(?!(amq\.|reply_)).*'
2021 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
2022 ## This helps prevent sriov init script from failing due to mis-matched NIC
2023 ## For prod env, target NIC should match and init script should fail otherwise.
2024 ## sriov_init:
2025 ## - besteffort
2026 sriov_init:
2027 -
2028 # auto_bridge_add is a table of "bridge: interface" pairs
2029 # To automatically add a physical interfaces to a specific bridges,
2030 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
2031 # to br1 do something like:
2032 #
2033 # auto_bridge_add:
2034 # br-physnet1: eth3
2035 # br0: if0
2036 # br1: iface_two
2037 # br-ex will be added by default
2038 auto_bridge_add:
2039 br-ex: null
2040
Mohammed Nasera720f882023-06-30 23:48:02 -04002041 # Network off-loading configuration
2042 netoffload:
ricolin18e6fd32023-07-17 06:17:15 +00002043 enabled: false
Mohammed Nasera720f882023-06-30 23:48:02 -04002044 asap2:
2045 # - dev: enp97s0f0
2046 # vfs: 16
2047
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002048 # configuration of OVS DPDK bridges and NICs
2049 # this is a separate section and not part of the auto_bridge_add section
2050 # because additional parameters are needed
2051 ovs_dpdk:
2052 enabled: false
2053 # setting update_dpdk_bond_config to true will have default behavior,
2054 # which may cause disruptions in ovs dpdk traffic in case of neutron
2055 # ovs agent restart or when dpdk nic/bond configurations are changed.
2056 # Setting this to false will configure dpdk in the first run and
2057 # disable nic/bond config on event of restart or config update.
2058 update_dpdk_bond_config: true
2059 driver: uio_pci_generic
2060 # In case bonds are configured, the nics which are part of those bonds
2061 # must NOT be provided here.
2062 nics:
2063 - name: dpdk0
2064 pci_id: '0000:05:00.0'
2065 # Set VF Index in case some particular VF(s) need to be
2066 # used with ovs-dpdk.
2067 # vf_index: 0
2068 bridge: br-phy
2069 migrate_ip: true
2070 n_rxq: 2
2071 n_txq: 2
2072 pmd_rxq_affinity: "0:3,1:27"
2073 ofport_request: 1
2074 # optional parameters for tuning the OVS DPDK config
2075 # in alignment with the available hardware resources
2076 # mtu: 2000
2077 # n_rxq_size: 1024
2078 # n_txq_size: 1024
2079 # vhost-iommu-support: true
2080 bridges:
2081 - name: br-phy
2082 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
2083 # - tunnel_underlay_vlan: 45
2084 # Optional parameter for configuring bonding in OVS-DPDK
2085 # - name: br-phy-bond0
2086 # bonds:
2087 # - name: dpdkbond0
2088 # bridge: br-phy-bond0
2089 # # The IP from the first nic in nics list shall be used
2090 # migrate_ip: true
2091 # mtu: 2000
2092 # # Please note that n_rxq is set for each NIC individually
2093 # # rather than denoting the total number of rx queues for
2094 # # the bond as a whole. So setting n_rxq = 2 below for ex.
2095 # # would be 4 rx queues in total for the bond.
2096 # # Same for n_txq
2097 # n_rxq: 2
2098 # n_txq: 2
2099 # ofport_request: 1
2100 # n_rxq_size: 1024
2101 # n_txq_size: 1024
2102 # vhost-iommu-support: true
2103 # ovs_options: "bond_mode=active-backup"
2104 # nics:
2105 # - name: dpdk_b0s0
2106 # pci_id: '0000:06:00.0'
2107 # pmd_rxq_affinity: "0:3,1:27"
2108 # # Set VF Index in case some particular VF(s) need to be
2109 # # used with ovs-dpdk. In which case pci_id of PF must be
2110 # # provided above.
2111 # # vf_index: 0
2112 # - name: dpdk_b0s1
2113 # pci_id: '0000:07:00.0'
2114 # pmd_rxq_affinity: "0:3,1:27"
2115 # # Set VF Index in case some particular VF(s) need to be
2116 # # used with ovs-dpdk. In which case pci_id of PF must be
2117 # # provided above.
2118 # # vf_index: 0
2119 #
2120 # Set the log level for each target module (default level is always dbg)
2121 # Supported log levels are: off, emer, err, warn, info, dbg
2122 #
2123 # modules:
2124 # - name: dpdk
2125 # log_level: info
2126
2127# Names of secrets used by bootstrap and environmental checks
2128secrets:
2129 identity:
2130 admin: neutron-keystone-admin
2131 neutron: neutron-keystone-user
2132 test: neutron-keystone-test
2133 oslo_db:
2134 admin: neutron-db-admin
2135 neutron: neutron-db-user
2136 oslo_messaging:
2137 admin: neutron-rabbitmq-admin
2138 neutron: neutron-rabbitmq-user
2139 tls:
2140 compute_metadata:
2141 metadata:
2142 internal: metadata-tls-metadata
2143 network:
2144 server:
2145 public: neutron-tls-public
2146 internal: neutron-tls-server
2147 oci_image_registry:
2148 neutron: neutron-oci-image-registry
2149
2150# typically overridden by environmental
2151# values, but should include all endpoints
2152# required by this chart
2153endpoints:
2154 cluster_domain_suffix: cluster.local
2155 local_image_registry:
2156 name: docker-registry
2157 namespace: docker-registry
2158 hosts:
2159 default: localhost
2160 internal: docker-registry
2161 node: localhost
2162 host_fqdn_override:
2163 default: null
2164 port:
2165 registry:
2166 node: 5000
2167 oci_image_registry:
2168 name: oci-image-registry
2169 namespace: oci-image-registry
2170 auth:
2171 enabled: false
2172 neutron:
2173 username: neutron
2174 password: password
2175 hosts:
2176 default: localhost
2177 host_fqdn_override:
2178 default: null
2179 port:
2180 registry:
2181 default: null
2182 oslo_db:
2183 auth:
2184 admin:
2185 username: root
2186 password: password
2187 secret:
2188 tls:
2189 internal: mariadb-tls-direct
2190 neutron:
2191 username: neutron
2192 password: password
2193 hosts:
2194 default: mariadb
2195 host_fqdn_override:
2196 default: null
2197 path: /neutron
2198 scheme: mysql+pymysql
2199 port:
2200 mysql:
2201 default: 3306
2202 oslo_messaging:
2203 auth:
2204 admin:
2205 username: rabbitmq
2206 password: password
2207 secret:
2208 tls:
2209 internal: rabbitmq-tls-direct
2210 neutron:
2211 username: neutron
2212 password: password
2213 statefulset:
2214 replicas: 2
2215 name: rabbitmq-rabbitmq
2216 hosts:
2217 default: rabbitmq
2218 host_fqdn_override:
2219 default: null
2220 path: /neutron
2221 scheme: rabbit
2222 port:
2223 amqp:
2224 default: 5672
2225 http:
2226 default: 15672
2227 oslo_cache:
2228 auth:
2229 # NOTE(portdirect): this is used to define the value for keystone
2230 # authtoken cache encryption key, if not set it will be populated
2231 # automatically with a random value, but to take advantage of
2232 # this feature all services should be set to use the same key,
2233 # and memcache service.
2234 memcache_secret_key: null
2235 hosts:
2236 default: memcached
2237 host_fqdn_override:
2238 default: null
2239 port:
2240 memcache:
2241 default: 11211
2242 compute:
2243 name: nova
2244 hosts:
2245 default: nova-api
2246 public: nova
2247 host_fqdn_override:
2248 default: null
2249 path:
2250 default: "/v2.1/%(tenant_id)s"
2251 scheme:
2252 default: 'http'
2253 port:
2254 api:
2255 default: 8774
2256 public: 80
2257 novncproxy:
2258 default: 6080
2259 compute_metadata:
2260 name: nova
2261 hosts:
2262 default: nova-metadata
2263 public: metadata
2264 host_fqdn_override:
2265 default: null
2266 path:
2267 default: /
2268 scheme:
2269 default: 'http'
2270 port:
2271 metadata:
2272 default: 8775
2273 public: 80
2274 identity:
2275 name: keystone
2276 auth:
2277 admin:
2278 region_name: RegionOne
2279 username: admin
2280 password: password
2281 project_name: admin
2282 user_domain_name: default
2283 project_domain_name: default
2284 neutron:
2285 role: admin
2286 region_name: RegionOne
2287 username: neutron
2288 password: password
2289 project_name: service
2290 user_domain_name: service
2291 project_domain_name: service
2292 nova:
2293 region_name: RegionOne
2294 project_name: service
2295 username: nova
2296 password: password
2297 user_domain_name: service
2298 project_domain_name: service
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002299 placement:
2300 region_name: RegionOne
2301 project_name: service
2302 username: placement
2303 password: password
2304 user_domain_name: service
2305 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002306 designate:
2307 region_name: RegionOne
2308 project_name: service
2309 username: designate
2310 password: password
2311 user_domain_name: service
2312 project_domain_name: service
2313 ironic:
2314 region_name: RegionOne
2315 project_name: service
2316 username: ironic
2317 password: password
2318 user_domain_name: service
2319 project_domain_name: service
2320 test:
2321 role: admin
2322 region_name: RegionOne
2323 username: neutron-test
2324 password: password
2325 # NOTE: this project will be purged and reset if
2326 # conf.rally_tests.force_project_purge is set to true
2327 # which may be required upon test failure, but be aware that this will
2328 # expunge all openstack objects, so if this is used a seperate project
2329 # should be used for each helm test, and also it should be ensured
2330 # that this project is not in use by other tenants
2331 project_name: test
2332 user_domain_name: service
2333 project_domain_name: service
2334 hosts:
2335 default: keystone
2336 internal: keystone-api
2337 host_fqdn_override:
2338 default: null
2339 path:
2340 default: /v3
2341 scheme:
2342 default: http
2343 port:
2344 api:
2345 default: 80
2346 internal: 5000
2347 network:
2348 name: neutron
2349 hosts:
2350 default: neutron-server
2351 public: neutron
2352 host_fqdn_override:
2353 default: null
2354 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2355 # endpoints using the following format:
2356 # public:
2357 # host: null
2358 # tls:
2359 # crt: null
2360 # key: null
2361 path:
2362 default: null
2363 scheme:
2364 default: 'http'
2365 service: 'http'
2366 port:
2367 api:
2368 default: 9696
2369 public: 80
2370 service: 9696
2371 load_balancer:
2372 name: octavia
2373 hosts:
2374 default: octavia-api
2375 public: octavia
2376 host_fqdn_override:
2377 default: null
2378 path:
2379 default: null
2380 scheme:
2381 default: http
2382 port:
2383 api:
2384 default: 9876
2385 public: 80
2386 fluentd:
2387 namespace: osh-infra
2388 name: fluentd
2389 hosts:
2390 default: fluentd-logging
2391 host_fqdn_override:
2392 default: null
2393 path:
2394 default: null
2395 scheme: 'http'
2396 port:
2397 service:
2398 default: 24224
2399 metrics:
2400 default: 24220
2401 dns:
2402 name: designate
2403 hosts:
2404 default: designate-api
2405 public: designate
2406 host_fqdn_override:
2407 default: null
2408 path:
2409 default: /
2410 scheme:
2411 default: 'http'
2412 port:
2413 api:
2414 default: 9001
2415 public: 80
2416 baremetal:
2417 name: ironic
2418 hosts:
2419 default: ironic-api
2420 public: ironic
2421 host_fqdn_override:
2422 default: null
2423 path:
2424 default: null
2425 scheme:
2426 default: 'http'
2427 port:
2428 api:
2429 default: 6385
2430 public: 80
2431 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2432 # They are using to enable the Egress K8s network policy.
2433 kube_dns:
2434 namespace: kube-system
2435 name: kubernetes-dns
2436 hosts:
2437 default: kube-dns
2438 host_fqdn_override:
2439 default: null
2440 path:
2441 default: null
2442 scheme: http
2443 port:
2444 dns:
2445 default: 53
2446 protocol: UDP
2447 ingress:
2448 namespace: null
2449 name: ingress
2450 hosts:
2451 default: ingress
2452 port:
2453 ingress:
2454 default: 80
2455
2456network_policy:
2457 neutron:
2458 # TODO(lamt): Need to tighten this ingress for security.
2459 ingress:
2460 - {}
2461 egress:
2462 - {}
2463
2464helm3_hook: true
2465
2466health_probe:
2467 logging:
2468 level: ERROR
2469
2470tls:
2471 identity: false
2472 oslo_messaging: false
2473 oslo_db: false
2474
2475manifests:
2476 certificates: false
2477 configmap_bin: true
2478 configmap_etc: true
2479 daemonset_dhcp_agent: true
2480 daemonset_l3_agent: true
2481 daemonset_lb_agent: true
2482 daemonset_metadata_agent: true
2483 daemonset_ovs_agent: true
2484 daemonset_sriov_agent: true
2485 daemonset_l2gw_agent: false
2486 daemonset_bagpipe_bgp: false
2487 daemonset_netns_cleanup_cron: true
2488 deployment_ironic_agent: false
2489 deployment_server: true
2490 ingress_server: true
2491 job_bootstrap: true
2492 job_db_init: true
2493 job_db_sync: true
2494 job_db_drop: false
2495 job_image_repo_sync: true
2496 job_ks_endpoints: true
2497 job_ks_service: true
2498 job_ks_user: true
2499 job_rabbit_init: true
2500 pdb_server: true
2501 pod_rally_test: true
2502 network_policy: false
2503 secret_db: true
2504 secret_ingress_tls: true
2505 secret_keystone: true
2506 secret_rabbitmq: true
2507 secret_registry: true
2508 service_ingress_server: true
2509 service_server: true
2510...