blob: 70f808fb41b4ed9223c1353df7d10d92aaef0904 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
Mohammed Nasera720f882023-06-30 23:48:02 -040033 netoffload: ghcr.io/vexxhost/netoffload:v1.0.1
Mohammed Naserf3f59a72023-01-15 21:02:04 -050034 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020037 neutron_ovn_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Mohammed Naserf3f59a72023-01-15 21:02:04 -050038 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
41 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
42 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
43 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
44 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
46 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
47 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
48 image_repo_sync: docker.io/docker:17.07.0
49 pull_policy: "IfNotPresent"
50 local_registry:
51 active: false
52 exclude:
53 - dep_check
54 - image_repo_sync
55
56labels:
57 agent:
58 dhcp:
59 node_selector_key: openstack-control-plane
60 node_selector_value: enabled
61 l3:
62 node_selector_key: openstack-control-plane
63 node_selector_value: enabled
64 metadata:
65 node_selector_key: openstack-control-plane
66 node_selector_value: enabled
67 l2gw:
68 node_selector_key: openstack-control-plane
69 node_selector_value: enabled
70 job:
71 node_selector_key: openstack-control-plane
72 node_selector_value: enabled
73 lb:
74 node_selector_key: linuxbridge
75 node_selector_value: enabled
76 # openvswitch is a special case, requiring a special
77 # label that can apply to both control hosts
78 # and compute hosts, until we get more sophisticated
79 # with our daemonset scheduling
80 ovs:
81 node_selector_key: openvswitch
82 node_selector_value: enabled
83 sriov:
84 node_selector_key: sriov
85 node_selector_value: enabled
86 bagpipe_bgp:
87 node_selector_key: openstack-compute-node
88 node_selector_value: enabled
89 server:
90 node_selector_key: openstack-control-plane
91 node_selector_value: enabled
92 ironic_agent:
93 node_selector_key: openstack-control-plane
94 node_selector_value: enabled
95 netns_cleanup_cron:
96 node_selector_key: openstack-control-plane
97 node_selector_value: enabled
98 test:
99 node_selector_key: openstack-control-plane
100 node_selector_value: enabled
101
102network:
103 # provide what type of network wiring will be used
104 backend:
105 - openvswitch
106 # NOTE(Portdirect): Share network namespaces with the host,
107 # allowing agents to be restarted without packet loss and simpler
108 # debugging. This feature requires mount propagation support.
109 share_namespaces: true
110 interface:
111 # Tunnel interface will be used for VXLAN tunneling.
112 tunnel: null
113 # If tunnel is null there is a fallback mechanism to search
114 # for interface with routing using tunnel network cidr.
115 tunnel_network_cidr: "0/0"
116 # To perform setup of network interfaces using the SR-IOV init
117 # container you can use a section similar to:
118 # sriov:
119 # - device: ${DEV}
120 # num_vfs: 8
121 # mtu: 9214
122 # promisc: false
123 # qos:
124 # - vf_num: 0
125 # share: 10
126 # queues_per_vf:
127 # - num_queues: 16
128 # exclude_vf: 0,11,21
129 server:
130 ingress:
131 public: true
132 classes:
133 namespace: "nginx"
134 cluster: "nginx-cluster"
135 annotations:
136 nginx.ingress.kubernetes.io/rewrite-target: /
137 external_policy_local: false
138 node_port:
139 enabled: false
140 port: 30096
141
142bootstrap:
143 enabled: false
144 ks_user: neutron
145 script: |
146 openstack token issue
147
148dependencies:
149 dynamic:
150 common:
151 local_image_registry:
152 jobs:
153 - neutron-image-repo-sync
154 services:
155 - endpoint: node
156 service: local_image_registry
157 targeted:
158 sriov: {}
159 l2gateway: {}
160 bagpipe_bgp: {}
okozachenko1203151efed2023-07-29 02:34:25 +1000161 ovn: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500162 openvswitch:
163 dhcp:
164 pod:
165 - requireSameNode: true
166 labels:
167 application: neutron
168 component: neutron-ovs-agent
169 l3:
170 pod:
171 - requireSameNode: true
172 labels:
173 application: neutron
174 component: neutron-ovs-agent
175 metadata:
176 pod:
177 - requireSameNode: true
178 labels:
179 application: neutron
180 component: neutron-ovs-agent
181 linuxbridge:
182 dhcp:
183 pod:
184 - requireSameNode: true
185 labels:
186 application: neutron
187 component: neutron-lb-agent
188 l3:
189 pod:
190 - requireSameNode: true
191 labels:
192 application: neutron
193 component: neutron-lb-agent
194 metadata:
195 pod:
196 - requireSameNode: true
197 labels:
198 application: neutron
199 component: neutron-lb-agent
200 lb_agent:
201 pod: null
202 static:
203 bootstrap:
204 services:
205 - endpoint: internal
206 service: network
207 - endpoint: internal
208 service: compute
209 db_drop:
210 services:
211 - endpoint: internal
212 service: oslo_db
213 db_init:
214 services:
215 - endpoint: internal
216 service: oslo_db
217 db_sync:
218 jobs:
219 - neutron-db-init
220 services:
221 - endpoint: internal
222 service: oslo_db
223 dhcp:
224 pod: null
225 jobs:
226 - neutron-rabbit-init
227 services:
228 - endpoint: internal
229 service: oslo_messaging
230 - endpoint: internal
231 service: network
232 - endpoint: internal
233 service: compute
234 ks_endpoints:
235 jobs:
236 - neutron-ks-service
237 services:
238 - endpoint: internal
239 service: identity
240 ks_service:
241 services:
242 - endpoint: internal
243 service: identity
244 ks_user:
245 services:
246 - endpoint: internal
247 service: identity
248 rabbit_init:
249 services:
250 - service: oslo_messaging
251 endpoint: internal
252 l3:
253 pod: null
254 jobs:
255 - neutron-rabbit-init
256 services:
257 - endpoint: internal
258 service: oslo_messaging
259 - endpoint: internal
260 service: network
261 - endpoint: internal
262 service: compute
263 lb_agent:
264 pod: null
265 jobs:
266 - neutron-rabbit-init
267 services:
268 - endpoint: internal
269 service: oslo_messaging
270 - endpoint: internal
271 service: network
272 metadata:
273 pod: null
274 jobs:
275 - neutron-rabbit-init
276 services:
277 - endpoint: internal
278 service: oslo_messaging
279 - endpoint: internal
280 service: network
281 - endpoint: internal
282 service: compute
283 - endpoint: public
284 service: compute_metadata
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200285 ovn_metadata:
Mohammed Naser593ec012023-07-23 09:20:05 +0000286 pod:
287 - requireSameNode: true
288 labels:
289 application: ovn
290 component: ovn-controller
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200291 services:
292 - endpoint: internal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200293 service: compute_metadata
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500294 ovs_agent:
295 jobs:
296 - neutron-rabbit-init
297 pod:
298 - requireSameNode: true
299 labels:
300 application: openvswitch
301 component: server
302 services:
303 - endpoint: internal
304 service: oslo_messaging
305 - endpoint: internal
306 service: network
307 server:
308 jobs:
309 - neutron-db-sync
310 - neutron-ks-user
311 - neutron-ks-endpoints
312 - neutron-rabbit-init
313 services:
314 - endpoint: internal
315 service: oslo_db
316 - endpoint: internal
317 service: oslo_messaging
318 - endpoint: internal
319 service: oslo_cache
320 - endpoint: internal
321 service: identity
322 ironic_agent:
323 jobs:
324 - neutron-db-sync
325 - neutron-ks-user
326 - neutron-ks-endpoints
327 - neutron-rabbit-init
328 services:
329 - endpoint: internal
330 service: oslo_db
331 - endpoint: internal
332 service: oslo_messaging
333 - endpoint: internal
334 service: oslo_cache
335 - endpoint: internal
336 service: identity
337 tests:
338 services:
339 - endpoint: internal
340 service: network
341 - endpoint: internal
342 service: compute
343 image_repo_sync:
344 services:
345 - endpoint: internal
346 service: local_image_registry
347
348pod:
349 use_fqdn:
350 neutron_agent: true
351 probes:
352 rpc_timeout: 60
353 rpc_retries: 2
354 dhcp_agent:
355 dhcp_agent:
356 readiness:
357 enabled: true
358 params:
359 initialDelaySeconds: 30
360 periodSeconds: 190
361 timeoutSeconds: 185
362 liveness:
363 enabled: true
364 params:
365 initialDelaySeconds: 120
366 periodSeconds: 600
367 timeoutSeconds: 580
368 l3_agent:
369 l3_agent:
370 readiness:
371 enabled: true
372 params:
373 initialDelaySeconds: 30
374 periodSeconds: 190
375 timeoutSeconds: 185
376 liveness:
377 enabled: true
378 params:
379 initialDelaySeconds: 120
380 periodSeconds: 600
381 timeoutSeconds: 580
382 lb_agent:
383 lb_agent:
384 readiness:
385 enabled: true
386 metadata_agent:
387 metadata_agent:
388 readiness:
389 enabled: true
390 params:
391 initialDelaySeconds: 30
392 periodSeconds: 190
393 timeoutSeconds: 185
394 liveness:
395 enabled: true
396 params:
397 initialDelaySeconds: 120
398 periodSeconds: 600
399 timeoutSeconds: 580
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200400 ovn_metadata_agent:
401 ovn_metadata_agent:
402 readiness:
403 enabled: true
404 params:
405 initialDelaySeconds: 30
406 periodSeconds: 190
407 timeoutSeconds: 185
408 liveness:
409 enabled: true
410 params:
411 initialDelaySeconds: 120
412 periodSeconds: 600
413 timeoutSeconds: 580
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500414 ovs_agent:
415 ovs_agent:
416 readiness:
417 enabled: true
418 params:
okozachenko120317930d42023-09-06 00:24:05 +1000419 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500420 liveness:
421 enabled: true
422 params:
423 initialDelaySeconds: 120
424 periodSeconds: 600
425 timeoutSeconds: 580
426 sriov_agent:
427 sriov_agent:
428 readiness:
429 enabled: true
430 params:
431 initialDelaySeconds: 30
432 periodSeconds: 190
433 timeoutSeconds: 185
434 bagpipe_bgp:
435 bagpipe_bgp:
436 readiness:
437 enabled: true
438 params:
439 liveness:
440 enabled: true
441 params:
442 initialDelaySeconds: 60
443 l2gw_agent:
444 l2gw_agent:
445 readiness:
446 enabled: true
447 params:
448 initialDelaySeconds: 30
449 periodSeconds: 15
450 timeoutSeconds: 65
451 liveness:
452 enabled: true
453 params:
454 initialDelaySeconds: 120
455 periodSeconds: 90
456 timeoutSeconds: 70
457 server:
458 server:
459 readiness:
460 enabled: true
461 params:
okozachenko120317930d42023-09-06 00:24:05 +1000462 periodSeconds: 15
463 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500464 liveness:
465 enabled: true
466 params:
467 initialDelaySeconds: 60
okozachenko120317930d42023-09-06 00:24:05 +1000468 periodSeconds: 15
469 timeoutSeconds: 10
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500470 security_context:
471 neutron_dhcp_agent:
472 pod:
473 runAsUser: 42424
474 container:
475 neutron_dhcp_agent:
476 readOnlyRootFilesystem: true
477 privileged: true
478 neutron_l2gw_agent:
479 pod:
480 runAsUser: 42424
481 container:
482 neutron_l2gw_agent:
483 readOnlyRootFilesystem: true
484 privileged: true
485 neutron_bagpipe_bgp:
486 pod:
487 runAsUser: 42424
488 container:
489 neutron_bagpipe_bgp:
490 readOnlyRootFilesystem: true
491 privileged: true
492 neutron_l3_agent:
493 pod:
494 runAsUser: 42424
495 container:
496 neutron_l3_agent:
497 readOnlyRootFilesystem: true
498 privileged: true
499 neutron_lb_agent:
500 pod:
501 runAsUser: 42424
502 container:
503 neutron_lb_agent_kernel_modules:
504 capabilities:
505 add:
506 - SYS_MODULE
507 - SYS_CHROOT
508 runAsUser: 0
509 readOnlyRootFilesystem: true
510 neutron_lb_agent_init:
511 privileged: true
512 runAsUser: 0
513 readOnlyRootFilesystem: true
514 neutron_lb_agent:
515 readOnlyRootFilesystem: true
516 privileged: true
517 neutron_metadata_agent:
518 pod:
519 runAsUser: 42424
520 container:
521 neutron_metadata_agent_init:
522 runAsUser: 0
523 readOnlyRootFilesystem: true
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200524 neutron_ovn_metadata_agent:
525 pod:
526 runAsUser: 42424
527 container:
528 neutron_ovn_metadata_agent_init:
529 runAsUser: 0
530 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500531 neutron_ovs_agent:
532 pod:
533 runAsUser: 42424
534 container:
535 neutron_openvswitch_agent_kernel_modules:
536 capabilities:
537 add:
538 - SYS_MODULE
539 - SYS_CHROOT
540 runAsUser: 0
541 readOnlyRootFilesystem: true
Mohammed Nasera720f882023-06-30 23:48:02 -0400542 netoffload:
543 privileged: true
544 runAsUser: 0
545 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500546 neutron_ovs_agent_init:
547 privileged: true
548 runAsUser: 0
549 readOnlyRootFilesystem: true
550 neutron_ovs_agent:
551 readOnlyRootFilesystem: true
552 privileged: true
553 neutron_server:
554 pod:
555 runAsUser: 42424
556 container:
557 nginx:
558 runAsUser: 0
559 readOnlyRootFilesystem: false
560 neutron_server:
561 allowPrivilegeEscalation: false
562 readOnlyRootFilesystem: true
563 neutron_sriov_agent:
564 pod:
565 runAsUser: 42424
566 container:
567 neutron_sriov_agent_init:
568 privileged: true
569 runAsUser: 0
570 readOnlyRootFilesystem: false
571 neutron_sriov_agent:
572 readOnlyRootFilesystem: true
573 privileged: true
574 neutron_ironic_agent:
575 pod:
576 runAsUser: 42424
577 container:
578 neutron_ironic_agent:
579 allowPrivilegeEscalation: false
580 readOnlyRootFilesystem: true
581 neutron_netns_cleanup_cron:
582 pod:
583 runAsUser: 42424
584 container:
585 neutron_netns_cleanup_cron:
586 readOnlyRootFilesystem: true
587 privileged: true
588 affinity:
589 anti:
590 type:
591 default: preferredDuringSchedulingIgnoredDuringExecution
592 topologyKey:
593 default: kubernetes.io/hostname
594 weight:
595 default: 10
596 tolerations:
597 neutron:
598 enabled: false
599 tolerations:
600 - key: node-role.kubernetes.io/master
601 operator: Exists
602 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200603 - key: node-role.kubernetes.io/control-plane
604 operator: Exists
605 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500606 mounts:
607 neutron_server:
608 init_container: null
609 neutron_server:
610 volumeMounts:
611 volumes:
612 neutron_dhcp_agent:
613 init_container: null
614 neutron_dhcp_agent:
615 volumeMounts:
616 volumes:
617 neutron_l3_agent:
618 init_container: null
619 neutron_l3_agent:
620 volumeMounts:
621 volumes:
622 neutron_lb_agent:
623 init_container: null
624 neutron_lb_agent:
625 volumeMounts:
626 volumes:
627 neutron_metadata_agent:
628 init_container: null
629 neutron_metadata_agent:
630 volumeMounts:
631 volumes:
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200632 neutron_ovn_metadata_agent:
633 init_container: null
634 neutron_ovn_metadata_agent:
635 volumeMounts:
636 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500637 neutron_ovs_agent:
638 init_container: null
639 neutron_ovs_agent:
640 volumeMounts:
641 volumes:
642 neutron_sriov_agent:
643 init_container: null
644 neutron_sriov_agent:
645 volumeMounts:
646 volumes:
647 neutron_l2gw_agent:
648 init_container: null
649 neutron_l2gw_agent:
650 volumeMounts:
651 volumes:
652 bagpipe_bgp:
653 init_container: null
654 bagpipe_bgp:
655 volumeMounts:
656 volumes:
657 neutron_ironic_agent:
658 init_container: null
659 neutron_ironic_agent:
660 volumeMounts:
661 volumes:
662 neutron_netns_cleanup_cron:
663 init_container: null
664 neutron_netns_cleanup_cron:
665 volumeMounts:
666 volumes:
667 neutron_tests:
668 init_container: null
669 neutron_tests:
670 volumeMounts:
671 volumes:
672 neutron_bootstrap:
673 init_container: null
674 neutron_bootstrap:
675 volumeMounts:
676 volumes:
677 neutron_db_sync:
678 neutron_db_sync:
679 volumeMounts:
680 - name: db-sync-conf
681 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
682 subPath: ml2_conf.ini
683 readOnly: true
684 volumes:
685 replicas:
686 server: 1
687 ironic_agent: 1
688 lifecycle:
689 upgrades:
690 deployments:
691 revision_history: 3
692 pod_replacement_strategy: RollingUpdate
693 rolling_update:
694 max_unavailable: 1
695 max_surge: 3
696 daemonsets:
697 pod_replacement_strategy: RollingUpdate
698 dhcp_agent:
699 enabled: true
700 min_ready_seconds: 0
701 max_unavailable: 1
702 l3_agent:
703 enabled: true
704 min_ready_seconds: 0
705 max_unavailable: 1
706 lb_agent:
707 enabled: true
708 min_ready_seconds: 0
709 max_unavailable: 1
710 metadata_agent:
711 enabled: true
712 min_ready_seconds: 0
713 max_unavailable: 1
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200714 ovn_metadata_agent:
715 enabled: true
716 min_ready_seconds: 0
717 max_unavailable: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500718 ovs_agent:
719 enabled: true
720 min_ready_seconds: 0
721 max_unavailable: 1
722 sriov_agent:
723 enabled: true
724 min_ready_seconds: 0
725 max_unavailable: 1
726 netns_cleanup_cron:
727 enabled: true
728 min_ready_seconds: 0
729 max_unavailable: 1
730 disruption_budget:
731 server:
732 min_available: 0
733 termination_grace_period:
734 server:
735 timeout: 30
736 ironic_agent:
737 timeout: 30
738 resources:
739 enabled: false
740 agent:
741 dhcp:
742 requests:
743 memory: "128Mi"
744 cpu: "100m"
745 limits:
746 memory: "1024Mi"
747 cpu: "2000m"
748 l3:
749 requests:
750 memory: "128Mi"
751 cpu: "100m"
752 limits:
753 memory: "1024Mi"
754 cpu: "2000m"
755 lb:
756 requests:
757 memory: "128Mi"
758 cpu: "100m"
759 limits:
760 memory: "1024Mi"
761 cpu: "2000m"
762 metadata:
763 requests:
764 memory: "128Mi"
765 cpu: "100m"
766 limits:
767 memory: "1024Mi"
768 cpu: "2000m"
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200769 ovn_metadata:
770 requests:
771 memory: "128Mi"
772 cpu: "100m"
773 limits:
774 memory: "1024Mi"
775 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500776 ovs:
777 requests:
778 memory: "128Mi"
779 cpu: "100m"
780 limits:
781 memory: "1024Mi"
782 cpu: "2000m"
783 sriov:
784 requests:
785 memory: "128Mi"
786 cpu: "100m"
787 limits:
788 memory: "1024Mi"
789 cpu: "2000m"
790 l2gw:
791 requests:
792 memory: "128Mi"
793 cpu: "100m"
794 limits:
795 memory: "1024Mi"
796 cpu: "2000m"
797 bagpipe_bgp:
798 requests:
799 memory: "128Mi"
800 cpu: "100m"
801 limits:
802 memory: "1024Mi"
803 cpu: "2000m"
804 server:
805 requests:
806 memory: "128Mi"
807 cpu: "100m"
808 limits:
809 memory: "1024Mi"
810 cpu: "2000m"
811 ironic_agent:
812 requests:
813 memory: "128Mi"
814 cpu: "100m"
815 limits:
816 memory: "1024Mi"
817 cpu: "2000m"
818 netns_cleanup_cron:
819 requests:
820 memory: "128Mi"
821 cpu: "100m"
822 limits:
823 memory: "1024Mi"
824 cpu: "2000m"
825 jobs:
826 bootstrap:
827 requests:
828 memory: "128Mi"
829 cpu: "100m"
830 limits:
831 memory: "1024Mi"
832 cpu: "2000m"
833 db_init:
834 requests:
835 memory: "128Mi"
836 cpu: "100m"
837 limits:
838 memory: "1024Mi"
839 cpu: "2000m"
840 rabbit_init:
841 requests:
842 memory: "128Mi"
843 cpu: "100m"
844 limits:
845 memory: "1024Mi"
846 cpu: "2000m"
847 db_sync:
848 requests:
849 memory: "128Mi"
850 cpu: "100m"
851 limits:
852 memory: "1024Mi"
853 cpu: "2000m"
854 db_drop:
855 requests:
856 memory: "128Mi"
857 cpu: "100m"
858 limits:
859 memory: "1024Mi"
860 cpu: "2000m"
861 ks_endpoints:
862 requests:
863 memory: "128Mi"
864 cpu: "100m"
865 limits:
866 memory: "1024Mi"
867 cpu: "2000m"
868 ks_service:
869 requests:
870 memory: "128Mi"
871 cpu: "100m"
872 limits:
873 memory: "1024Mi"
874 cpu: "2000m"
875 ks_user:
876 requests:
877 memory: "128Mi"
878 cpu: "100m"
879 limits:
880 memory: "1024Mi"
881 cpu: "2000m"
882 tests:
883 requests:
884 memory: "128Mi"
885 cpu: "100m"
886 limits:
887 memory: "1024Mi"
888 cpu: "2000m"
889 image_repo_sync:
890 requests:
891 memory: "128Mi"
892 cpu: "100m"
893 limits:
894 memory: "1024Mi"
895 cpu: "2000m"
896
897conf:
898 rally_tests:
899 force_project_purge: false
900 run_tempest: false
901 clean_up: |
902 # NOTE: We will make the best effort to clean up rally generated networks and routers,
903 # but should not block further automated deployment.
904 set +e
905 PATTERN="^[sc]_rally_"
906
907 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
908 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
909
910 for ROUTER in $ROUTERS
911 do
912 openstack router unset --external-gateway $ROUTER
913 openstack router set --disable --no-ha $ROUTER
914
915 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
916 for SUBN in $SUBNS
917 do
918 openstack router remove subnet $ROUTER $SUBN
919 done
920
921 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
922 do
923 openstack router remove port $ROUTER $PORT
924 done
925
926 openstack router delete $ROUTER
927 done
928
929 for NETWORK in $NETWORKS
930 do
931 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
932 do
933 openstack port delete $PORT
934 done
935 openstack network delete $NETWORK
936 done
937 set -e
938 tests:
939 NeutronNetworks.create_and_delete_networks:
940 - args:
941 network_create_args: {}
942 context:
943 quotas:
944 neutron:
945 network: -1
946 runner:
947 concurrency: 1
948 times: 1
949 type: constant
950 sla:
951 failure_rate:
952 max: 0
953 NeutronNetworks.create_and_delete_ports:
954 - args:
955 network_create_args: {}
956 port_create_args: {}
957 ports_per_network: 10
958 context:
959 network: {}
960 quotas:
961 neutron:
962 network: -1
963 port: -1
964 runner:
965 concurrency: 1
966 times: 1
967 type: constant
968 sla:
969 failure_rate:
970 max: 0
971 NeutronNetworks.create_and_delete_routers:
972 - args:
973 network_create_args: {}
974 router_create_args: {}
975 subnet_cidr_start: 1.1.0.0/30
976 subnet_create_args: {}
977 subnets_per_network: 2
978 context:
979 network: {}
980 quotas:
981 neutron:
982 network: -1
983 router: -1
984 subnet: -1
985 runner:
986 concurrency: 1
987 times: 1
988 type: constant
989 sla:
990 failure_rate:
991 max: 0
992 NeutronNetworks.create_and_delete_subnets:
993 - args:
994 network_create_args: {}
995 subnet_cidr_start: 1.1.0.0/30
996 subnet_create_args: {}
997 subnets_per_network: 2
998 context:
999 network: {}
1000 quotas:
1001 neutron:
1002 network: -1
1003 subnet: -1
1004 runner:
1005 concurrency: 1
1006 times: 1
1007 type: constant
1008 sla:
1009 failure_rate:
1010 max: 0
1011 NeutronNetworks.create_and_list_routers:
1012 - args:
1013 network_create_args: {}
1014 router_create_args: {}
1015 subnet_cidr_start: 1.1.0.0/30
1016 subnet_create_args: {}
1017 subnets_per_network: 2
1018 context:
1019 network: {}
1020 quotas:
1021 neutron:
1022 network: -1
1023 router: -1
1024 subnet: -1
1025 runner:
1026 concurrency: 1
1027 times: 1
1028 type: constant
1029 sla:
1030 failure_rate:
1031 max: 0
1032 NeutronNetworks.create_and_list_subnets:
1033 - args:
1034 network_create_args: {}
1035 subnet_cidr_start: 1.1.0.0/30
1036 subnet_create_args: {}
1037 subnets_per_network: 2
1038 context:
1039 network: {}
1040 quotas:
1041 neutron:
1042 network: -1
1043 subnet: -1
1044 runner:
1045 concurrency: 1
1046 times: 1
1047 type: constant
1048 sla:
1049 failure_rate:
1050 max: 0
1051 NeutronNetworks.create_and_show_network:
1052 - args:
1053 network_create_args: {}
1054 context:
1055 quotas:
1056 neutron:
1057 network: -1
1058 runner:
1059 concurrency: 1
1060 times: 1
1061 type: constant
1062 sla:
1063 failure_rate:
1064 max: 0
1065 NeutronNetworks.create_and_update_networks:
1066 - args:
1067 network_create_args: {}
1068 network_update_args:
1069 admin_state_up: false
1070 context:
1071 quotas:
1072 neutron:
1073 network: -1
1074 runner:
1075 concurrency: 1
1076 times: 1
1077 type: constant
1078 sla:
1079 failure_rate:
1080 max: 0
1081 NeutronNetworks.create_and_update_ports:
1082 - args:
1083 network_create_args: {}
1084 port_create_args: {}
1085 port_update_args:
1086 admin_state_up: false
1087 device_id: dummy_id
1088 device_owner: dummy_owner
1089 ports_per_network: 5
1090 context:
1091 network: {}
1092 quotas:
1093 neutron:
1094 network: -1
1095 port: -1
1096 runner:
1097 concurrency: 1
1098 times: 1
1099 type: constant
1100 sla:
1101 failure_rate:
1102 max: 0
1103 NeutronNetworks.create_and_update_routers:
1104 - args:
1105 network_create_args: {}
1106 router_create_args: {}
1107 router_update_args:
1108 admin_state_up: false
1109 subnet_cidr_start: 1.1.0.0/30
1110 subnet_create_args: {}
1111 subnets_per_network: 2
1112 context:
1113 network: {}
1114 quotas:
1115 neutron:
1116 network: -1
1117 router: -1
1118 subnet: -1
1119 runner:
1120 concurrency: 1
1121 times: 1
1122 type: constant
1123 sla:
1124 failure_rate:
1125 max: 0
1126 NeutronNetworks.create_and_update_subnets:
1127 - args:
1128 network_create_args: {}
1129 subnet_cidr_start: 1.4.0.0/16
1130 subnet_create_args: {}
1131 subnet_update_args:
1132 enable_dhcp: false
1133 subnets_per_network: 2
1134 context:
1135 network: {}
1136 quotas:
1137 neutron:
1138 network: -1
1139 subnet: -1
1140 runner:
1141 concurrency: 1
1142 times: 1
1143 type: constant
1144 sla:
1145 failure_rate:
1146 max: 0
1147 NeutronNetworks.list_agents:
1148 - args:
1149 agent_args: {}
1150 runner:
1151 concurrency: 1
1152 times: 1
1153 type: constant
1154 sla:
1155 failure_rate:
1156 max: 0
1157 NeutronSecurityGroup.create_and_list_security_groups:
1158 - args:
1159 security_group_create_args: {}
1160 context:
1161 quotas:
1162 neutron:
1163 security_group: -1
1164 runner:
1165 concurrency: 1
1166 times: 1
1167 type: constant
1168 sla:
1169 failure_rate:
1170 max: 0
1171 NeutronSecurityGroup.create_and_update_security_groups:
1172 - args:
1173 security_group_create_args: {}
1174 security_group_update_args: {}
1175 context:
1176 quotas:
1177 neutron:
1178 security_group: -1
1179 runner:
1180 concurrency: 1
1181 times: 1
1182 type: constant
1183 sla:
1184 failure_rate:
1185 max: 0
okozachenko120317930d42023-09-06 00:24:05 +10001186 paste:
1187 composite:neutron:
1188 use: egg:Paste#urlmap
1189 /: neutronversions_composite
1190 /v2.0: neutronapi_v2_0
1191 composite:neutronapi_v2_0:
1192 use: call:neutron.auth:pipeline_factory
1193 noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
1194 keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0
1195 composite:neutronversions_composite:
1196 use: call:neutron.auth:pipeline_factory
1197 noauth: cors http_proxy_to_wsgi neutronversions
1198 keystone: cors http_proxy_to_wsgi neutronversions
1199 filter:request_id:
1200 paste.filter_factory: oslo_middleware:RequestId.factory
1201 filter:catch_errors:
1202 paste.filter_factory: oslo_middleware:CatchErrors.factory
1203 filter:cors:
1204 paste.filter_factory: oslo_middleware.cors:filter_factory
1205 oslo_config_project: neutron
1206 filter:http_proxy_to_wsgi:
1207 paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
1208 filter:keystonecontext:
1209 paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory
1210 filter:authtoken:
1211 paste.filter_factory: keystonemiddleware.auth_token:filter_factory
1212 filter:audit:
1213 paste.filter_factory: keystonemiddleware.audit:filter_factory
1214 audit_map_file: /etc/neutron/api_audit_map.conf
1215 filter:extensions:
1216 paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory
1217 app:neutronversions:
1218 paste.app_factory: neutron.pecan_wsgi.app:versions_factory
1219 app:neutronapiapp_v2_0:
1220 paste.app_factory: neutron.api.v2.router:APIRouter.factory
1221 filter:osprofiler:
1222 paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001223 policy: {}
1224 api_audit_map:
1225 DEFAULT:
1226 target_endpoint_type: None
1227 custom_actions:
1228 add_router_interface: update/add
1229 remove_router_interface: update/remove
1230 path_keywords:
1231 floatingips: ip
1232 healthmonitors: healthmonitor
1233 health_monitors: health_monitor
1234 lb: None
1235 members: member
1236 metering-labels: label
1237 metering-label-rules: rule
1238 networks: network
1239 pools: pool
1240 ports: port
1241 routers: router
1242 quotas: quota
1243 security-groups: security-group
1244 security-group-rules: rule
1245 subnets: subnet
1246 vips: vip
1247 service_endpoints:
1248 network: service/network
1249 neutron_sudoers: |
1250 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1251 Defaults !requiretty
1252 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1253 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1254 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1255 rootwrap: |
1256 # Configuration for neutron-rootwrap
1257 # This file should be owned by (and only-writeable by) the root user
1258
1259 [DEFAULT]
1260 # List of directories to load filter definitions from (separated by ',').
1261 # These directories MUST all be only writeable by root !
1262 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1263
1264 # List of directories to search executables in, in case filters do not
1265 # explicitely specify a full path (separated by ',')
1266 # If not specified, defaults to system PATH environment variable.
1267 # These directories MUST all be only writeable by root !
1268 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1269
1270 # Enable logging to syslog
1271 # Default value is False
1272 use_syslog=False
1273
1274 # Which syslog facility to use.
1275 # Valid values include auth, authpriv, syslog, local0, local1...
1276 # Default value is 'syslog'
1277 syslog_log_facility=syslog
1278
1279 # Which messages to log.
1280 # INFO means log all usage
1281 # ERROR means only log unsuccessful attempts
1282 syslog_log_level=ERROR
1283
1284 [xenapi]
1285 # XenAPI configuration is only required by the L2 agent if it is to
1286 # target a XenServer/XCP compute host's dom0.
1287 xenapi_connection_url=<None>
1288 xenapi_connection_username=root
1289 xenapi_connection_password=<None>
1290 rootwrap_filters:
1291 debug:
1292 pods:
1293 - dhcp_agent
1294 - l3_agent
1295 - lb_agent
1296 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001297 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001298 - ovs_agent
1299 - sriov_agent
1300 content: |
1301 # neutron-rootwrap command filters for nodes on which neutron is
1302 # expected to control network
1303 #
1304 # This file should be owned by (and only-writeable by) the root user
1305
1306 # format seems to be
1307 # cmd-name: filter-name, raw-command, user, args
1308
1309 [Filters]
1310
1311 # This is needed because we should ping
1312 # from inside a namespace which requires root
1313 # _alt variants allow to match -c and -w in any order
1314 # (used by NeutronDebugAgent.ping_all)
1315 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1316 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1317 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1318 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1319 dibbler:
1320 pods:
1321 - dhcp_agent
1322 - l3_agent
1323 - lb_agent
1324 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001325 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001326 - ovs_agent
1327 - sriov_agent
1328 content: |
1329 # neutron-rootwrap command filters for nodes on which neutron is
1330 # expected to control network
1331 #
1332 # This file should be owned by (and only-writeable by) the root user
1333
1334 # format seems to be
1335 # cmd-name: filter-name, raw-command, user, args
1336
1337 [Filters]
1338
1339 # Filters for the dibbler-based reference implementation of the pluggable
1340 # Prefix Delegation driver. Other implementations using an alternative agent
1341 # should include a similar filter in this folder.
1342
1343 # prefix_delegation_agent
1344 dibbler-client: CommandFilter, dibbler-client, root
1345 ipset_firewall:
1346 pods:
1347 - dhcp_agent
1348 - l3_agent
1349 - lb_agent
1350 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001351 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001352 - ovs_agent
1353 - sriov_agent
1354 content: |
1355 # neutron-rootwrap command filters for nodes on which neutron is
1356 # expected to control network
1357 #
1358 # This file should be owned by (and only-writeable by) the root user
1359
1360 # format seems to be
1361 # cmd-name: filter-name, raw-command, user, args
1362
1363 [Filters]
1364 # neutron/agent/linux/iptables_firewall.py
1365 # "ipset", "-A", ...
1366 ipset: CommandFilter, ipset, root
1367 l3:
1368 pods:
1369 - dhcp_agent
1370 - l3_agent
1371 - lb_agent
1372 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001373 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001374 - ovs_agent
1375 - sriov_agent
1376 content: |
1377 # neutron-rootwrap command filters for nodes on which neutron is
1378 # expected to control network
1379 #
1380 # This file should be owned by (and only-writeable by) the root user
1381
1382 # format seems to be
1383 # cmd-name: filter-name, raw-command, user, args
1384
1385 [Filters]
1386
1387 # arping
1388 arping: CommandFilter, arping, root
1389
1390 # l3_agent
1391 sysctl: CommandFilter, sysctl, root
1392 route: CommandFilter, route, root
1393 radvd: CommandFilter, radvd, root
1394
1395 # haproxy
1396 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1397 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1398
1399 # metadata proxy
1400 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1401 # RHEL invocation of the metadata proxy will report /usr/bin/python
1402 kill_metadata: KillFilter, root, python, -15, -9
1403 kill_metadata2: KillFilter, root, python2, -15, -9
1404 kill_metadata7: KillFilter, root, python2.7, -15, -9
1405 kill_metadata3: KillFilter, root, python3, -15, -9
1406 kill_metadata35: KillFilter, root, python3.5, -15, -9
1407 kill_metadata36: KillFilter, root, python3.6, -15, -9
1408 kill_metadata37: KillFilter, root, python3.7, -15, -9
1409 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1410 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1411
1412 # ip_lib
1413 ip: IpFilter, ip, root
1414 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1415 ip_exec: IpNetnsExecFilter, ip, root
1416
1417 # l3_tc_lib
1418 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1419 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1420 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1421 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1422 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1423 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1424 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1425
1426 # For ip monitor
1427 kill_ip_monitor: KillFilter, root, ip, -9
1428
1429 # ovs_lib (if OVSInterfaceDriver is used)
1430 ovs-vsctl: CommandFilter, ovs-vsctl, root
1431
1432 # iptables_manager
1433 iptables-save: CommandFilter, iptables-save, root
1434 iptables-restore: CommandFilter, iptables-restore, root
1435 ip6tables-save: CommandFilter, ip6tables-save, root
1436 ip6tables-restore: CommandFilter, ip6tables-restore, root
1437
1438 # Keepalived
1439 keepalived: CommandFilter, keepalived, root
1440 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1441
1442 # l3 agent to delete floatingip's conntrack state
1443 conntrack: CommandFilter, conntrack, root
1444
1445 # keepalived state change monitor
1446 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1447 # The following filters are used to kill the keepalived state change monitor.
1448 # Since the monitor runs as a Python script, the system reports that the
1449 # command of the process to be killed is python.
1450 # TODO(mlavalle) These kill filters will be updated once we come up with a
1451 # mechanism to kill using the name of the script being executed by Python
1452 kill_keepalived_monitor_py: KillFilter, root, python, -15
1453 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1454 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1455 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1456 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1457 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1458 netns_cleanup:
1459 pods:
1460 - dhcp_agent
1461 - l3_agent
1462 - lb_agent
1463 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001464 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001465 - ovs_agent
1466 - sriov_agent
1467 - netns_cleanup_cron
1468 content: |
1469 # neutron-rootwrap command filters for nodes on which neutron is
1470 # expected to control network
1471 #
1472 # This file should be owned by (and only-writeable by) the root user
1473
1474 # format seems to be
1475 # cmd-name: filter-name, raw-command, user, args
1476
1477 [Filters]
1478
1479 # netns-cleanup
1480 netstat: CommandFilter, netstat, root
1481 dhcp:
1482 pods:
1483 - dhcp_agent
1484 - l3_agent
1485 - lb_agent
1486 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001487 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001488 - ovs_agent
1489 - sriov_agent
1490 - netns_cleanup_cron
1491 content: |
1492 # neutron-rootwrap command filters for nodes on which neutron is
1493 # expected to control network
1494 #
1495 # This file should be owned by (and only-writeable by) the root user
1496
1497 # format seems to be
1498 # cmd-name: filter-name, raw-command, user, args
1499
1500 [Filters]
1501
1502 # dhcp-agent
1503 dnsmasq: CommandFilter, dnsmasq, root
1504 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1505 # it looks like these are the only signals needed, per
1506 # neutron/agent/linux/dhcp.py
1507 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1508 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1509
1510 ovs-vsctl: CommandFilter, ovs-vsctl, root
1511 ivs-ctl: CommandFilter, ivs-ctl, root
1512 mm-ctl: CommandFilter, mm-ctl, root
1513 dhcp_release: CommandFilter, dhcp_release, root
1514 dhcp_release6: CommandFilter, dhcp_release6, root
1515
1516 # metadata proxy
1517 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1518 # RHEL invocation of the metadata proxy will report /usr/bin/python
1519 kill_metadata: KillFilter, root, python, -9
1520 kill_metadata2: KillFilter, root, python2, -9
1521 kill_metadata7: KillFilter, root, python2.7, -9
1522 kill_metadata3: KillFilter, root, python3, -9
1523 kill_metadata35: KillFilter, root, python3.5, -9
1524 kill_metadata36: KillFilter, root, python3.6, -9
1525 kill_metadata37: KillFilter, root, python3.7, -9
1526
1527 # ip_lib
1528 ip: IpFilter, ip, root
1529 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1530 ip_exec: IpNetnsExecFilter, ip, root
1531 ebtables:
1532 pods:
1533 - dhcp_agent
1534 - l3_agent
1535 - lb_agent
1536 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001537 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001538 - ovs_agent
1539 - sriov_agent
1540 content: |
1541 # neutron-rootwrap command filters for nodes on which neutron is
1542 # expected to control network
1543 #
1544 # This file should be owned by (and only-writeable by) the root user
1545
1546 # format seems to be
1547 # cmd-name: filter-name, raw-command, user, args
1548
1549 [Filters]
1550
1551 ebtables: CommandFilter, ebtables, root
1552 iptables_firewall:
1553 pods:
1554 - dhcp_agent
1555 - l3_agent
1556 - lb_agent
1557 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001558 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001559 - ovs_agent
1560 - sriov_agent
1561 content: |
1562 # neutron-rootwrap command filters for nodes on which neutron is
1563 # expected to control network
1564 #
1565 # This file should be owned by (and only-writeable by) the root user
1566
1567 # format seems to be
1568 # cmd-name: filter-name, raw-command, user, args
1569
1570 [Filters]
1571
1572 # neutron/agent/linux/iptables_firewall.py
1573 # "iptables-save", ...
1574 iptables-save: CommandFilter, iptables-save, root
1575 iptables-restore: CommandFilter, iptables-restore, root
1576 ip6tables-save: CommandFilter, ip6tables-save, root
1577 ip6tables-restore: CommandFilter, ip6tables-restore, root
1578
1579 # neutron/agent/linux/iptables_firewall.py
1580 # "iptables", "-A", ...
1581 iptables: CommandFilter, iptables, root
1582 ip6tables: CommandFilter, ip6tables, root
1583
1584 # neutron/agent/linux/iptables_firewall.py
1585 sysctl: CommandFilter, sysctl, root
1586
1587 # neutron/agent/linux/ip_conntrack.py
1588 conntrack: CommandFilter, conntrack, root
1589 linuxbridge_plugin:
1590 pods:
1591 - dhcp_agent
1592 - l3_agent
1593 - lb_agent
1594 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001595 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001596 - ovs_agent
1597 - sriov_agent
1598 content: |
1599 # neutron-rootwrap command filters for nodes on which neutron is
1600 # expected to control network
1601 #
1602 # This file should be owned by (and only-writeable by) the root user
1603
1604 # format seems to be
1605 # cmd-name: filter-name, raw-command, user, args
1606
1607 [Filters]
1608
1609 # linuxbridge-agent
1610 # unclear whether both variants are necessary, but I'm transliterating
1611 # from the old mechanism
1612 brctl: CommandFilter, brctl, root
1613 bridge: CommandFilter, bridge, root
1614
1615 # ip_lib
1616 ip: IpFilter, ip, root
1617 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1618 ip_exec: IpNetnsExecFilter, ip, root
1619
1620 # tc commands needed for QoS support
1621 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1622 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1623 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1624 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1625 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1626 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1627 openvswitch_plugin:
1628 pods:
1629 - dhcp_agent
1630 - l3_agent
1631 - lb_agent
1632 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001633 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001634 - ovs_agent
1635 - sriov_agent
1636 content: |
1637 # neutron-rootwrap command filters for nodes on which neutron is
1638 # expected to control network
1639 #
1640 # This file should be owned by (and only-writeable by) the root user
1641
1642 # format seems to be
1643 # cmd-name: filter-name, raw-command, user, args
1644
1645 [Filters]
1646
1647 # openvswitch-agent
1648 # unclear whether both variants are necessary, but I'm transliterating
1649 # from the old mechanism
1650 ovs-vsctl: CommandFilter, ovs-vsctl, root
1651 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1652 ovs-ofctl: CommandFilter, ovs-ofctl, root
1653 ovs-appctl: CommandFilter, ovs-appctl, root
1654 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1655 ovsdb-client: CommandFilter, ovsdb-client, root
1656 xe: CommandFilter, xe, root
1657
1658 # ip_lib
1659 ip: IpFilter, ip, root
1660 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1661 ip_exec: IpNetnsExecFilter, ip, root
1662
1663 # needed for FDB extension
1664 bridge: CommandFilter, bridge, root
1665 privsep:
1666 pods:
1667 - dhcp_agent
1668 - l3_agent
1669 - lb_agent
1670 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001671 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001672 - ovs_agent
1673 - sriov_agent
1674 - netns_cleanup_cron
1675 content: |
1676 # Command filters to allow privsep daemon to be started via rootwrap.
1677 #
1678 # This file should be owned by (and only-writeable by) the root user
1679
1680 [Filters]
1681
1682 # By installing the following, the local admin is asserting that:
1683 #
1684 # 1. The python module load path used by privsep-helper
1685 # command as root (as started by sudo/rootwrap) is trusted.
1686 # 2. Any oslo.config files matching the --config-file
1687 # arguments below are trusted.
1688 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1689 # also allowed to invoke python "entrypoint" functions from
1690 # --privsep_context with the additional (possibly root) privileges
1691 # configured for that context.
1692 #
1693 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1694 #
1695 # In particular, the oslo.config and python module path must not
1696 # be writeable by the unprivileged user.
1697
1698 # oslo.privsep default neutron context
1699 privsep: PathFilter, privsep-helper, root,
1700 --config-file, /etc,
1701 --privsep_context, neutron.privileged.default,
1702 --privsep_sock_path, /
1703
1704 # NOTE: A second `--config-file` arg can also be added above. Since
1705 # many neutron components are installed like that (eg: by devstack).
1706 # Adjust to suit local requirements.
1707 linux_vxlan:
1708 pods:
1709 - bagpipe_bgp
1710 content: |
1711 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1712 # expected to control VXLAN Linux Bridge dataplane
1713 #
1714 # This file should be owned by (and only-writeable by) the root user
1715
1716 # format seems to be
1717 # cmd-name: filter-name, raw-command, user, args
1718
1719 [Filters]
1720
1721 #
1722 modprobe: CommandFilter, modprobe, root
1723
1724 #
1725 brctl: CommandFilter, brctl, root
1726 bridge: CommandFilter, bridge, root
1727
1728 # ip_lib
1729 ip: IpFilter, ip, root
1730 ip_exec: IpNetnsExecFilter, ip, root
1731
1732 # shell (for piped commands)
1733 sh: CommandFilter, sh, root
1734 mpls_ovs_dataplane:
1735 pods:
1736 - bagpipe_bgp
1737 content: |
1738 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1739 # expected to control MPLS OpenVSwitch dataplane
1740 #
1741 # This file should be owned by (and only-writeable by) the root user
1742
1743 # format seems to be
1744 # cmd-name: filter-name, raw-command, user, args
1745
1746 [Filters]
1747
1748 # openvswitch
1749 ovs-vsctl: CommandFilter, ovs-vsctl, root
1750 ovs-ofctl: CommandFilter, ovs-ofctl, root
1751
1752 # ip_lib
1753 ip: IpFilter, ip, root
1754 ip_exec: IpNetnsExecFilter, ip, root
1755
1756 # shell (for piped commands)
1757 sh: CommandFilter, sh, root
1758 neutron:
1759 DEFAULT:
1760 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1761 log_config_append: /etc/neutron/logging.conf
1762 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1763 # via the endpoints section.
1764 bind_port: null
1765 default_availability_zones: nova
1766 api_workers: 1
1767 rpc_workers: 4
1768 allow_overlapping_ips: True
1769 state_path: /var/lib/neutron
1770 # core_plugin can be: ml2, calico
1771 core_plugin: ml2
1772 # service_plugin can be: router, odl-router, empty for calico,
1773 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1774 service_plugins: router
1775 allow_automatic_l3agent_failover: True
1776 l3_ha: True
1777 max_l3_agents_per_router: 2
1778 l3_ha_network_type: vxlan
1779 network_auto_schedule: True
1780 router_auto_schedule: True
1781 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1782 # 'network.backend' to sane defaults.
1783 interface_driver: null
1784 oslo_concurrency:
1785 lock_path: /var/lib/neutron/tmp
1786 database:
1787 max_retries: -1
1788 agent:
1789 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1790 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1791 oslo_messaging_notifications:
1792 driver: messagingv2
1793 oslo_messaging_rabbit:
1794 rabbit_ha_queues: true
1795 oslo_middleware:
1796 enable_proxy_headers_parsing: true
1797 oslo_policy:
1798 policy_file: /etc/neutron/policy.yaml
Mohammed Naser593ec012023-07-23 09:20:05 +00001799 ovn:
1800 enable_distributed_floating_ip: true
1801 ovn_metadata_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001802 nova:
1803 auth_type: password
1804 auth_version: v3
1805 endpoint_type: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001806 placement:
1807 auth_type: password
1808 auth_version: v3
1809 endpoint_type: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001810 designate:
1811 auth_type: password
1812 auth_version: v3
1813 endpoint_type: internal
1814 allow_reverse_dns_lookup: true
1815 ironic:
1816 endpoint_type: internal
1817 keystone_authtoken:
okozachenko120317930d42023-09-06 00:24:05 +10001818 service_token_roles: service
1819 service_token_roles_required: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001820 memcache_security_strategy: ENCRYPT
1821 auth_type: password
1822 auth_version: v3
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001823 service_type: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001824 octavia:
1825 request_poll_timeout: 3000
1826 logging:
1827 loggers:
1828 keys:
1829 - root
1830 - neutron
1831 - neutron_taas
1832 handlers:
1833 keys:
1834 - stdout
1835 - stderr
1836 - "null"
1837 formatters:
1838 keys:
1839 - context
1840 - default
1841 logger_root:
1842 level: WARNING
1843 handlers: 'null'
1844 logger_neutron:
1845 level: INFO
1846 handlers:
1847 - stdout
1848 qualname: neutron
1849 logger_neutron_taas:
1850 level: INFO
1851 handlers:
1852 - stdout
1853 qualname: neutron_taas
1854 logger_amqp:
1855 level: WARNING
1856 handlers: stderr
1857 qualname: amqp
1858 logger_amqplib:
1859 level: WARNING
1860 handlers: stderr
1861 qualname: amqplib
1862 logger_eventletwsgi:
1863 level: WARNING
1864 handlers: stderr
1865 qualname: eventlet.wsgi.server
1866 logger_sqlalchemy:
1867 level: WARNING
1868 handlers: stderr
1869 qualname: sqlalchemy
1870 logger_boto:
1871 level: WARNING
1872 handlers: stderr
1873 qualname: boto
1874 handler_null:
1875 class: logging.NullHandler
1876 formatter: default
1877 args: ()
1878 handler_stdout:
1879 class: StreamHandler
1880 args: (sys.stdout,)
1881 formatter: context
1882 handler_stderr:
1883 class: StreamHandler
1884 args: (sys.stderr,)
1885 formatter: context
1886 formatter_context:
1887 class: oslo_log.formatters.ContextFormatter
1888 datefmt: "%Y-%m-%d %H:%M:%S"
1889 formatter_default:
1890 format: "%(message)s"
1891 datefmt: "%Y-%m-%d %H:%M:%S"
1892 plugins:
1893 ml2_conf:
1894 ml2:
1895 extension_drivers: port_security
1896 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1897 # in 'network.backend' to sane defaults.
1898 mechanism_drivers: null
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001899 type_drivers: flat,vlan,vxlan,local
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001900 tenant_network_types: vxlan
1901 ml2_type_vxlan:
1902 vni_ranges: 1:1000
1903 vxlan_group: 239.1.1.1
1904 ml2_type_flat:
1905 flat_networks: "*"
1906 # If you want to use the external network as a tagged provider network,
1907 # a range should be specified including the intended VLAN target
1908 # using ml2_type_vlan.network_vlan_ranges:
1909 # ml2_type_vlan:
1910 # network_vlan_ranges: "external:1100:1110"
Mohammed Naser593ec012023-07-23 09:20:05 +00001911 ml2_type_geneve:
1912 vni_ranges: 1:65536
1913 max_header_size: 38
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001914 agent:
1915 extensions: ""
1916 ml2_conf_sriov: null
1917 taas:
1918 taas:
1919 enabled: False
1920 openvswitch_agent:
1921 agent:
1922 tunnel_types: vxlan
1923 l2_population: True
1924 arp_responder: True
1925 ovs:
1926 bridge_mappings: "external:br-ex"
1927 securitygroup:
1928 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1929 linuxbridge_agent:
1930 linux_bridge:
1931 # To define Flat and VLAN connections, in LB we can assign
1932 # specific interface to the flat/vlan network name using:
1933 # physical_interface_mappings: "external:eth3"
1934 # Or we can set the mapping between the network and bridge:
1935 bridge_mappings: "external:br-ex"
1936 # The two above options are exclusive, do not use both of them at once
1937 securitygroup:
1938 firewall_driver: iptables
1939 vxlan:
1940 l2_population: True
1941 arp_responder: True
1942 macvtap_agent: null
1943 sriov_agent:
1944 securitygroup:
1945 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1946 sriov_nic:
1947 physical_device_mappings: physnet2:enp3s0f1
1948 # NOTE: do not use null here, use an empty string
1949 exclude_devices: ""
1950 dhcp_agent:
1951 DEFAULT:
1952 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1953 # 'network.backend' to sane defaults.
1954 interface_driver: null
1955 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1956 force_metadata: True
1957 dnsmasq: |
1958 #no-hosts
1959 #port=5353
1960 #cache-size=500
1961 #no-negcache
1962 #dns-forward-max=100
1963 #resolve-file=
1964 #strict-order
1965 #bind-interface
1966 #bind-dynamic
1967 #domain=
1968 #dhcp-range=10.10.10.10,10.10.10.100,24h
1969 #dhcp-lease-max=150
1970 #dhcp-host=11:22:33:44:55:66,ignore
1971 #dhcp-option=3,10.10.10.1
1972 #dhcp-option-force=26,1450
1973
1974 l3_agent:
1975 DEFAULT:
1976 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1977 # 'network.backend' to sane defaults.
1978 interface_driver: null
1979 agent_mode: legacy
1980 metering_agent: null
1981 metadata_agent:
1982 DEFAULT:
1983 # we cannot change the proxy socket path as it is declared
1984 # as a hostPath volume from agent daemonsets
1985 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1986 metadata_proxy_shared_secret: "password"
1987 cache:
1988 enabled: true
1989 backend: dogpile.cache.memcached
1990 bagpipe_bgp: {}
Mohammed Naser593ec012023-07-23 09:20:05 +00001991 ovn_metadata_agent:
1992 DEFAULT:
1993 # we cannot change the proxy socket path as it is declared
1994 # as a hostPath volume from agent daemonsets
1995 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1996 metadata_proxy_shared_secret: "password"
1997 metadata_workers: 2
1998 cache:
1999 enabled: true
2000 backend: dogpile.cache.memcached
2001 ovs:
2002 ovsdb_connection: unix:/run/openvswitch/db.sock
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002003
2004 rabbitmq:
2005 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
2006 policies:
2007 - vhost: "neutron"
2008 name: "ha_ttl_neutron"
2009 definition:
2010 # mirror messges to other nodes in rmq cluster
2011 ha-mode: "all"
2012 ha-sync-mode: "automatic"
2013 # 70s
2014 message-ttl: 70000
2015 priority: 0
2016 apply-to: all
2017 pattern: '^(?!(amq\.|reply_)).*'
2018 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
2019 ## This helps prevent sriov init script from failing due to mis-matched NIC
2020 ## For prod env, target NIC should match and init script should fail otherwise.
2021 ## sriov_init:
2022 ## - besteffort
2023 sriov_init:
2024 -
2025 # auto_bridge_add is a table of "bridge: interface" pairs
2026 # To automatically add a physical interfaces to a specific bridges,
2027 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
2028 # to br1 do something like:
2029 #
2030 # auto_bridge_add:
2031 # br-physnet1: eth3
2032 # br0: if0
2033 # br1: iface_two
2034 # br-ex will be added by default
2035 auto_bridge_add:
2036 br-ex: null
2037
Mohammed Nasera720f882023-06-30 23:48:02 -04002038 # Network off-loading configuration
2039 netoffload:
ricolin18e6fd32023-07-17 06:17:15 +00002040 enabled: false
Mohammed Nasera720f882023-06-30 23:48:02 -04002041 asap2:
2042 # - dev: enp97s0f0
2043 # vfs: 16
2044
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002045 # configuration of OVS DPDK bridges and NICs
2046 # this is a separate section and not part of the auto_bridge_add section
2047 # because additional parameters are needed
2048 ovs_dpdk:
2049 enabled: false
2050 # setting update_dpdk_bond_config to true will have default behavior,
2051 # which may cause disruptions in ovs dpdk traffic in case of neutron
2052 # ovs agent restart or when dpdk nic/bond configurations are changed.
2053 # Setting this to false will configure dpdk in the first run and
2054 # disable nic/bond config on event of restart or config update.
2055 update_dpdk_bond_config: true
2056 driver: uio_pci_generic
2057 # In case bonds are configured, the nics which are part of those bonds
2058 # must NOT be provided here.
2059 nics:
2060 - name: dpdk0
2061 pci_id: '0000:05:00.0'
2062 # Set VF Index in case some particular VF(s) need to be
2063 # used with ovs-dpdk.
2064 # vf_index: 0
2065 bridge: br-phy
2066 migrate_ip: true
2067 n_rxq: 2
2068 n_txq: 2
2069 pmd_rxq_affinity: "0:3,1:27"
2070 ofport_request: 1
2071 # optional parameters for tuning the OVS DPDK config
2072 # in alignment with the available hardware resources
2073 # mtu: 2000
2074 # n_rxq_size: 1024
2075 # n_txq_size: 1024
2076 # vhost-iommu-support: true
2077 bridges:
2078 - name: br-phy
2079 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
2080 # - tunnel_underlay_vlan: 45
2081 # Optional parameter for configuring bonding in OVS-DPDK
2082 # - name: br-phy-bond0
2083 # bonds:
2084 # - name: dpdkbond0
2085 # bridge: br-phy-bond0
2086 # # The IP from the first nic in nics list shall be used
2087 # migrate_ip: true
2088 # mtu: 2000
2089 # # Please note that n_rxq is set for each NIC individually
2090 # # rather than denoting the total number of rx queues for
2091 # # the bond as a whole. So setting n_rxq = 2 below for ex.
2092 # # would be 4 rx queues in total for the bond.
2093 # # Same for n_txq
2094 # n_rxq: 2
2095 # n_txq: 2
2096 # ofport_request: 1
2097 # n_rxq_size: 1024
2098 # n_txq_size: 1024
2099 # vhost-iommu-support: true
2100 # ovs_options: "bond_mode=active-backup"
2101 # nics:
2102 # - name: dpdk_b0s0
2103 # pci_id: '0000:06:00.0'
2104 # pmd_rxq_affinity: "0:3,1:27"
2105 # # Set VF Index in case some particular VF(s) need to be
2106 # # used with ovs-dpdk. In which case pci_id of PF must be
2107 # # provided above.
2108 # # vf_index: 0
2109 # - name: dpdk_b0s1
2110 # pci_id: '0000:07:00.0'
2111 # pmd_rxq_affinity: "0:3,1:27"
2112 # # Set VF Index in case some particular VF(s) need to be
2113 # # used with ovs-dpdk. In which case pci_id of PF must be
2114 # # provided above.
2115 # # vf_index: 0
2116 #
2117 # Set the log level for each target module (default level is always dbg)
2118 # Supported log levels are: off, emer, err, warn, info, dbg
2119 #
2120 # modules:
2121 # - name: dpdk
2122 # log_level: info
2123
2124# Names of secrets used by bootstrap and environmental checks
2125secrets:
2126 identity:
2127 admin: neutron-keystone-admin
2128 neutron: neutron-keystone-user
2129 test: neutron-keystone-test
2130 oslo_db:
2131 admin: neutron-db-admin
2132 neutron: neutron-db-user
2133 oslo_messaging:
2134 admin: neutron-rabbitmq-admin
2135 neutron: neutron-rabbitmq-user
2136 tls:
2137 compute_metadata:
2138 metadata:
2139 internal: metadata-tls-metadata
2140 network:
2141 server:
2142 public: neutron-tls-public
2143 internal: neutron-tls-server
2144 oci_image_registry:
2145 neutron: neutron-oci-image-registry
2146
2147# typically overridden by environmental
2148# values, but should include all endpoints
2149# required by this chart
2150endpoints:
2151 cluster_domain_suffix: cluster.local
2152 local_image_registry:
2153 name: docker-registry
2154 namespace: docker-registry
2155 hosts:
2156 default: localhost
2157 internal: docker-registry
2158 node: localhost
2159 host_fqdn_override:
2160 default: null
2161 port:
2162 registry:
2163 node: 5000
2164 oci_image_registry:
2165 name: oci-image-registry
2166 namespace: oci-image-registry
2167 auth:
2168 enabled: false
2169 neutron:
2170 username: neutron
2171 password: password
2172 hosts:
2173 default: localhost
2174 host_fqdn_override:
2175 default: null
2176 port:
2177 registry:
2178 default: null
2179 oslo_db:
2180 auth:
2181 admin:
2182 username: root
2183 password: password
2184 secret:
2185 tls:
2186 internal: mariadb-tls-direct
2187 neutron:
2188 username: neutron
2189 password: password
2190 hosts:
2191 default: mariadb
2192 host_fqdn_override:
2193 default: null
2194 path: /neutron
2195 scheme: mysql+pymysql
2196 port:
2197 mysql:
2198 default: 3306
2199 oslo_messaging:
2200 auth:
2201 admin:
2202 username: rabbitmq
2203 password: password
2204 secret:
2205 tls:
2206 internal: rabbitmq-tls-direct
2207 neutron:
2208 username: neutron
2209 password: password
2210 statefulset:
2211 replicas: 2
2212 name: rabbitmq-rabbitmq
2213 hosts:
2214 default: rabbitmq
2215 host_fqdn_override:
2216 default: null
2217 path: /neutron
2218 scheme: rabbit
2219 port:
2220 amqp:
2221 default: 5672
2222 http:
2223 default: 15672
2224 oslo_cache:
2225 auth:
2226 # NOTE(portdirect): this is used to define the value for keystone
2227 # authtoken cache encryption key, if not set it will be populated
2228 # automatically with a random value, but to take advantage of
2229 # this feature all services should be set to use the same key,
2230 # and memcache service.
2231 memcache_secret_key: null
2232 hosts:
2233 default: memcached
2234 host_fqdn_override:
2235 default: null
2236 port:
2237 memcache:
2238 default: 11211
2239 compute:
2240 name: nova
2241 hosts:
2242 default: nova-api
2243 public: nova
2244 host_fqdn_override:
2245 default: null
2246 path:
2247 default: "/v2.1/%(tenant_id)s"
2248 scheme:
2249 default: 'http'
2250 port:
2251 api:
2252 default: 8774
2253 public: 80
2254 novncproxy:
2255 default: 6080
2256 compute_metadata:
2257 name: nova
2258 hosts:
2259 default: nova-metadata
2260 public: metadata
2261 host_fqdn_override:
2262 default: null
2263 path:
2264 default: /
2265 scheme:
2266 default: 'http'
2267 port:
2268 metadata:
2269 default: 8775
2270 public: 80
2271 identity:
2272 name: keystone
2273 auth:
2274 admin:
2275 region_name: RegionOne
2276 username: admin
2277 password: password
2278 project_name: admin
2279 user_domain_name: default
2280 project_domain_name: default
2281 neutron:
2282 role: admin
2283 region_name: RegionOne
2284 username: neutron
2285 password: password
2286 project_name: service
2287 user_domain_name: service
2288 project_domain_name: service
2289 nova:
2290 region_name: RegionOne
2291 project_name: service
2292 username: nova
2293 password: password
2294 user_domain_name: service
2295 project_domain_name: service
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002296 placement:
2297 region_name: RegionOne
2298 project_name: service
2299 username: placement
2300 password: password
2301 user_domain_name: service
2302 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002303 designate:
2304 region_name: RegionOne
2305 project_name: service
2306 username: designate
2307 password: password
2308 user_domain_name: service
2309 project_domain_name: service
2310 ironic:
2311 region_name: RegionOne
2312 project_name: service
2313 username: ironic
2314 password: password
2315 user_domain_name: service
2316 project_domain_name: service
2317 test:
2318 role: admin
2319 region_name: RegionOne
2320 username: neutron-test
2321 password: password
2322 # NOTE: this project will be purged and reset if
2323 # conf.rally_tests.force_project_purge is set to true
2324 # which may be required upon test failure, but be aware that this will
2325 # expunge all openstack objects, so if this is used a seperate project
2326 # should be used for each helm test, and also it should be ensured
2327 # that this project is not in use by other tenants
2328 project_name: test
2329 user_domain_name: service
2330 project_domain_name: service
2331 hosts:
2332 default: keystone
2333 internal: keystone-api
2334 host_fqdn_override:
2335 default: null
2336 path:
2337 default: /v3
2338 scheme:
2339 default: http
2340 port:
2341 api:
2342 default: 80
2343 internal: 5000
2344 network:
2345 name: neutron
2346 hosts:
2347 default: neutron-server
2348 public: neutron
2349 host_fqdn_override:
2350 default: null
2351 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2352 # endpoints using the following format:
2353 # public:
2354 # host: null
2355 # tls:
2356 # crt: null
2357 # key: null
2358 path:
2359 default: null
2360 scheme:
2361 default: 'http'
2362 service: 'http'
2363 port:
2364 api:
2365 default: 9696
2366 public: 80
2367 service: 9696
2368 load_balancer:
2369 name: octavia
2370 hosts:
2371 default: octavia-api
2372 public: octavia
2373 host_fqdn_override:
2374 default: null
2375 path:
2376 default: null
2377 scheme:
2378 default: http
2379 port:
2380 api:
2381 default: 9876
2382 public: 80
2383 fluentd:
2384 namespace: osh-infra
2385 name: fluentd
2386 hosts:
2387 default: fluentd-logging
2388 host_fqdn_override:
2389 default: null
2390 path:
2391 default: null
2392 scheme: 'http'
2393 port:
2394 service:
2395 default: 24224
2396 metrics:
2397 default: 24220
2398 dns:
2399 name: designate
2400 hosts:
2401 default: designate-api
2402 public: designate
2403 host_fqdn_override:
2404 default: null
2405 path:
2406 default: /
2407 scheme:
2408 default: 'http'
2409 port:
2410 api:
2411 default: 9001
2412 public: 80
2413 baremetal:
2414 name: ironic
2415 hosts:
2416 default: ironic-api
2417 public: ironic
2418 host_fqdn_override:
2419 default: null
2420 path:
2421 default: null
2422 scheme:
2423 default: 'http'
2424 port:
2425 api:
2426 default: 6385
2427 public: 80
2428 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2429 # They are using to enable the Egress K8s network policy.
2430 kube_dns:
2431 namespace: kube-system
2432 name: kubernetes-dns
2433 hosts:
2434 default: kube-dns
2435 host_fqdn_override:
2436 default: null
2437 path:
2438 default: null
2439 scheme: http
2440 port:
2441 dns:
2442 default: 53
2443 protocol: UDP
2444 ingress:
2445 namespace: null
2446 name: ingress
2447 hosts:
2448 default: ingress
2449 port:
2450 ingress:
2451 default: 80
2452
2453network_policy:
2454 neutron:
2455 # TODO(lamt): Need to tighten this ingress for security.
2456 ingress:
2457 - {}
2458 egress:
2459 - {}
2460
2461helm3_hook: true
2462
2463health_probe:
2464 logging:
2465 level: ERROR
2466
2467tls:
2468 identity: false
2469 oslo_messaging: false
2470 oslo_db: false
2471
2472manifests:
2473 certificates: false
2474 configmap_bin: true
2475 configmap_etc: true
2476 daemonset_dhcp_agent: true
2477 daemonset_l3_agent: true
2478 daemonset_lb_agent: true
2479 daemonset_metadata_agent: true
2480 daemonset_ovs_agent: true
2481 daemonset_sriov_agent: true
2482 daemonset_l2gw_agent: false
2483 daemonset_bagpipe_bgp: false
2484 daemonset_netns_cleanup_cron: true
2485 deployment_ironic_agent: false
2486 deployment_server: true
2487 ingress_server: true
2488 job_bootstrap: true
2489 job_db_init: true
2490 job_db_sync: true
2491 job_db_drop: false
2492 job_image_repo_sync: true
2493 job_ks_endpoints: true
2494 job_ks_service: true
2495 job_ks_user: true
2496 job_rabbit_init: true
2497 pdb_server: true
2498 pod_rally_test: true
2499 network_policy: false
2500 secret_db: true
2501 secret_ingress_tls: true
2502 secret_keystone: true
2503 secret_rabbitmq: true
2504 secret_registry: true
2505 service_ingress_server: true
2506 service_server: true
2507...