blob: 114efa406bcf5e5d02d470f3416322297dda1495 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
Mohammed Nasera720f882023-06-30 23:48:02 -040033 netoffload: ghcr.io/vexxhost/netoffload:v1.0.1
Mohammed Naserf3f59a72023-01-15 21:02:04 -050034 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020037 neutron_ovn_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Mohammed Naserf3f59a72023-01-15 21:02:04 -050038 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
41 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
42 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
43 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
44 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
46 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
47 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
48 image_repo_sync: docker.io/docker:17.07.0
49 pull_policy: "IfNotPresent"
50 local_registry:
51 active: false
52 exclude:
53 - dep_check
54 - image_repo_sync
55
56labels:
57 agent:
58 dhcp:
59 node_selector_key: openstack-control-plane
60 node_selector_value: enabled
61 l3:
62 node_selector_key: openstack-control-plane
63 node_selector_value: enabled
64 metadata:
65 node_selector_key: openstack-control-plane
66 node_selector_value: enabled
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020067 ovn_metadata:
68 node_selector_key: openstack-compute-node
69 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050070 l2gw:
71 node_selector_key: openstack-control-plane
72 node_selector_value: enabled
73 job:
74 node_selector_key: openstack-control-plane
75 node_selector_value: enabled
76 lb:
77 node_selector_key: linuxbridge
78 node_selector_value: enabled
79 # openvswitch is a special case, requiring a special
80 # label that can apply to both control hosts
81 # and compute hosts, until we get more sophisticated
82 # with our daemonset scheduling
83 ovs:
84 node_selector_key: openvswitch
85 node_selector_value: enabled
86 sriov:
87 node_selector_key: sriov
88 node_selector_value: enabled
89 bagpipe_bgp:
90 node_selector_key: openstack-compute-node
91 node_selector_value: enabled
92 server:
93 node_selector_key: openstack-control-plane
94 node_selector_value: enabled
95 ironic_agent:
96 node_selector_key: openstack-control-plane
97 node_selector_value: enabled
98 netns_cleanup_cron:
99 node_selector_key: openstack-control-plane
100 node_selector_value: enabled
101 test:
102 node_selector_key: openstack-control-plane
103 node_selector_value: enabled
104
105network:
106 # provide what type of network wiring will be used
107 backend:
108 - openvswitch
109 # NOTE(Portdirect): Share network namespaces with the host,
110 # allowing agents to be restarted without packet loss and simpler
111 # debugging. This feature requires mount propagation support.
112 share_namespaces: true
113 interface:
114 # Tunnel interface will be used for VXLAN tunneling.
115 tunnel: null
116 # If tunnel is null there is a fallback mechanism to search
117 # for interface with routing using tunnel network cidr.
118 tunnel_network_cidr: "0/0"
119 # To perform setup of network interfaces using the SR-IOV init
120 # container you can use a section similar to:
121 # sriov:
122 # - device: ${DEV}
123 # num_vfs: 8
124 # mtu: 9214
125 # promisc: false
126 # qos:
127 # - vf_num: 0
128 # share: 10
129 # queues_per_vf:
130 # - num_queues: 16
131 # exclude_vf: 0,11,21
132 server:
133 ingress:
134 public: true
135 classes:
136 namespace: "nginx"
137 cluster: "nginx-cluster"
138 annotations:
139 nginx.ingress.kubernetes.io/rewrite-target: /
140 external_policy_local: false
141 node_port:
142 enabled: false
143 port: 30096
144
145bootstrap:
146 enabled: false
147 ks_user: neutron
148 script: |
149 openstack token issue
150
151dependencies:
152 dynamic:
153 common:
154 local_image_registry:
155 jobs:
156 - neutron-image-repo-sync
157 services:
158 - endpoint: node
159 service: local_image_registry
160 targeted:
161 sriov: {}
162 l2gateway: {}
163 bagpipe_bgp: {}
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200164 ovn: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500165 openvswitch:
166 dhcp:
167 pod:
168 - requireSameNode: true
169 labels:
170 application: neutron
171 component: neutron-ovs-agent
172 l3:
173 pod:
174 - requireSameNode: true
175 labels:
176 application: neutron
177 component: neutron-ovs-agent
178 metadata:
179 pod:
180 - requireSameNode: true
181 labels:
182 application: neutron
183 component: neutron-ovs-agent
184 linuxbridge:
185 dhcp:
186 pod:
187 - requireSameNode: true
188 labels:
189 application: neutron
190 component: neutron-lb-agent
191 l3:
192 pod:
193 - requireSameNode: true
194 labels:
195 application: neutron
196 component: neutron-lb-agent
197 metadata:
198 pod:
199 - requireSameNode: true
200 labels:
201 application: neutron
202 component: neutron-lb-agent
203 lb_agent:
204 pod: null
205 static:
206 bootstrap:
207 services:
208 - endpoint: internal
209 service: network
210 - endpoint: internal
211 service: compute
212 db_drop:
213 services:
214 - endpoint: internal
215 service: oslo_db
216 db_init:
217 services:
218 - endpoint: internal
219 service: oslo_db
220 db_sync:
221 jobs:
222 - neutron-db-init
223 services:
224 - endpoint: internal
225 service: oslo_db
226 dhcp:
227 pod: null
228 jobs:
229 - neutron-rabbit-init
230 services:
231 - endpoint: internal
232 service: oslo_messaging
233 - endpoint: internal
234 service: network
235 - endpoint: internal
236 service: compute
237 ks_endpoints:
238 jobs:
239 - neutron-ks-service
240 services:
241 - endpoint: internal
242 service: identity
243 ks_service:
244 services:
245 - endpoint: internal
246 service: identity
247 ks_user:
248 services:
249 - endpoint: internal
250 service: identity
251 rabbit_init:
252 services:
253 - service: oslo_messaging
254 endpoint: internal
255 l3:
256 pod: null
257 jobs:
258 - neutron-rabbit-init
259 services:
260 - endpoint: internal
261 service: oslo_messaging
262 - endpoint: internal
263 service: network
264 - endpoint: internal
265 service: compute
266 lb_agent:
267 pod: null
268 jobs:
269 - neutron-rabbit-init
270 services:
271 - endpoint: internal
272 service: oslo_messaging
273 - endpoint: internal
274 service: network
275 metadata:
276 pod: null
277 jobs:
278 - neutron-rabbit-init
279 services:
280 - endpoint: internal
281 service: oslo_messaging
282 - endpoint: internal
283 service: network
284 - endpoint: internal
285 service: compute
286 - endpoint: public
287 service: compute_metadata
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200288 ovn_metadata:
Mohammed Naser593ec012023-07-23 09:20:05 +0000289 pod:
290 - requireSameNode: true
291 labels:
292 application: ovn
293 component: ovn-controller
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200294 services:
295 - endpoint: internal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200296 service: compute_metadata
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500297 ovs_agent:
298 jobs:
299 - neutron-rabbit-init
300 pod:
301 - requireSameNode: true
302 labels:
303 application: openvswitch
304 component: server
305 services:
306 - endpoint: internal
307 service: oslo_messaging
308 - endpoint: internal
309 service: network
310 server:
311 jobs:
312 - neutron-db-sync
313 - neutron-ks-user
314 - neutron-ks-endpoints
315 - neutron-rabbit-init
316 services:
317 - endpoint: internal
318 service: oslo_db
319 - endpoint: internal
320 service: oslo_messaging
321 - endpoint: internal
322 service: oslo_cache
323 - endpoint: internal
324 service: identity
325 ironic_agent:
326 jobs:
327 - neutron-db-sync
328 - neutron-ks-user
329 - neutron-ks-endpoints
330 - neutron-rabbit-init
331 services:
332 - endpoint: internal
333 service: oslo_db
334 - endpoint: internal
335 service: oslo_messaging
336 - endpoint: internal
337 service: oslo_cache
338 - endpoint: internal
339 service: identity
340 tests:
341 services:
342 - endpoint: internal
343 service: network
344 - endpoint: internal
345 service: compute
346 image_repo_sync:
347 services:
348 - endpoint: internal
349 service: local_image_registry
350
351pod:
352 use_fqdn:
353 neutron_agent: true
354 probes:
355 rpc_timeout: 60
356 rpc_retries: 2
357 dhcp_agent:
358 dhcp_agent:
359 readiness:
360 enabled: true
361 params:
362 initialDelaySeconds: 30
363 periodSeconds: 190
364 timeoutSeconds: 185
365 liveness:
366 enabled: true
367 params:
368 initialDelaySeconds: 120
369 periodSeconds: 600
370 timeoutSeconds: 580
371 l3_agent:
372 l3_agent:
373 readiness:
374 enabled: true
375 params:
376 initialDelaySeconds: 30
377 periodSeconds: 190
378 timeoutSeconds: 185
379 liveness:
380 enabled: true
381 params:
382 initialDelaySeconds: 120
383 periodSeconds: 600
384 timeoutSeconds: 580
385 lb_agent:
386 lb_agent:
387 readiness:
388 enabled: true
389 metadata_agent:
390 metadata_agent:
391 readiness:
392 enabled: true
393 params:
394 initialDelaySeconds: 30
395 periodSeconds: 190
396 timeoutSeconds: 185
397 liveness:
398 enabled: true
399 params:
400 initialDelaySeconds: 120
401 periodSeconds: 600
402 timeoutSeconds: 580
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200403 ovn_metadata_agent:
404 ovn_metadata_agent:
405 readiness:
406 enabled: true
407 params:
408 initialDelaySeconds: 30
409 periodSeconds: 190
410 timeoutSeconds: 185
411 liveness:
412 enabled: true
413 params:
414 initialDelaySeconds: 120
415 periodSeconds: 600
416 timeoutSeconds: 580
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500417 ovs_agent:
418 ovs_agent:
419 readiness:
420 enabled: true
421 params:
422 liveness:
423 enabled: true
424 params:
425 initialDelaySeconds: 120
426 periodSeconds: 600
427 timeoutSeconds: 580
428 sriov_agent:
429 sriov_agent:
430 readiness:
431 enabled: true
432 params:
433 initialDelaySeconds: 30
434 periodSeconds: 190
435 timeoutSeconds: 185
436 bagpipe_bgp:
437 bagpipe_bgp:
438 readiness:
439 enabled: true
440 params:
441 liveness:
442 enabled: true
443 params:
444 initialDelaySeconds: 60
445 l2gw_agent:
446 l2gw_agent:
447 readiness:
448 enabled: true
449 params:
450 initialDelaySeconds: 30
451 periodSeconds: 15
452 timeoutSeconds: 65
453 liveness:
454 enabled: true
455 params:
456 initialDelaySeconds: 120
457 periodSeconds: 90
458 timeoutSeconds: 70
459 server:
460 server:
461 readiness:
462 enabled: true
463 params:
464 liveness:
465 enabled: true
466 params:
467 initialDelaySeconds: 60
468 security_context:
469 neutron_dhcp_agent:
470 pod:
471 runAsUser: 42424
472 container:
473 neutron_dhcp_agent:
474 readOnlyRootFilesystem: true
475 privileged: true
476 neutron_l2gw_agent:
477 pod:
478 runAsUser: 42424
479 container:
480 neutron_l2gw_agent:
481 readOnlyRootFilesystem: true
482 privileged: true
483 neutron_bagpipe_bgp:
484 pod:
485 runAsUser: 42424
486 container:
487 neutron_bagpipe_bgp:
488 readOnlyRootFilesystem: true
489 privileged: true
490 neutron_l3_agent:
491 pod:
492 runAsUser: 42424
493 container:
494 neutron_l3_agent:
495 readOnlyRootFilesystem: true
496 privileged: true
497 neutron_lb_agent:
498 pod:
499 runAsUser: 42424
500 container:
501 neutron_lb_agent_kernel_modules:
502 capabilities:
503 add:
504 - SYS_MODULE
505 - SYS_CHROOT
506 runAsUser: 0
507 readOnlyRootFilesystem: true
508 neutron_lb_agent_init:
509 privileged: true
510 runAsUser: 0
511 readOnlyRootFilesystem: true
512 neutron_lb_agent:
513 readOnlyRootFilesystem: true
514 privileged: true
515 neutron_metadata_agent:
516 pod:
517 runAsUser: 42424
518 container:
519 neutron_metadata_agent_init:
520 runAsUser: 0
521 readOnlyRootFilesystem: true
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200522 neutron_ovn_metadata_agent:
523 pod:
524 runAsUser: 42424
525 container:
526 neutron_ovn_metadata_agent_init:
527 runAsUser: 0
528 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500529 neutron_ovs_agent:
530 pod:
531 runAsUser: 42424
532 container:
533 neutron_openvswitch_agent_kernel_modules:
534 capabilities:
535 add:
536 - SYS_MODULE
537 - SYS_CHROOT
538 runAsUser: 0
539 readOnlyRootFilesystem: true
Mohammed Nasera720f882023-06-30 23:48:02 -0400540 netoffload:
541 privileged: true
542 runAsUser: 0
543 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500544 neutron_ovs_agent_init:
545 privileged: true
546 runAsUser: 0
547 readOnlyRootFilesystem: true
548 neutron_ovs_agent:
549 readOnlyRootFilesystem: true
550 privileged: true
551 neutron_server:
552 pod:
553 runAsUser: 42424
554 container:
555 nginx:
556 runAsUser: 0
557 readOnlyRootFilesystem: false
558 neutron_server:
559 allowPrivilegeEscalation: false
560 readOnlyRootFilesystem: true
561 neutron_sriov_agent:
562 pod:
563 runAsUser: 42424
564 container:
565 neutron_sriov_agent_init:
566 privileged: true
567 runAsUser: 0
568 readOnlyRootFilesystem: false
569 neutron_sriov_agent:
570 readOnlyRootFilesystem: true
571 privileged: true
572 neutron_ironic_agent:
573 pod:
574 runAsUser: 42424
575 container:
576 neutron_ironic_agent:
577 allowPrivilegeEscalation: false
578 readOnlyRootFilesystem: true
579 neutron_netns_cleanup_cron:
580 pod:
581 runAsUser: 42424
582 container:
583 neutron_netns_cleanup_cron:
584 readOnlyRootFilesystem: true
585 privileged: true
586 affinity:
587 anti:
588 type:
589 default: preferredDuringSchedulingIgnoredDuringExecution
590 topologyKey:
591 default: kubernetes.io/hostname
592 weight:
593 default: 10
594 tolerations:
595 neutron:
596 enabled: false
597 tolerations:
598 - key: node-role.kubernetes.io/master
599 operator: Exists
600 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200601 - key: node-role.kubernetes.io/control-plane
602 operator: Exists
603 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500604 mounts:
605 neutron_server:
606 init_container: null
607 neutron_server:
608 volumeMounts:
609 volumes:
610 neutron_dhcp_agent:
611 init_container: null
612 neutron_dhcp_agent:
613 volumeMounts:
614 volumes:
615 neutron_l3_agent:
616 init_container: null
617 neutron_l3_agent:
618 volumeMounts:
619 volumes:
620 neutron_lb_agent:
621 init_container: null
622 neutron_lb_agent:
623 volumeMounts:
624 volumes:
625 neutron_metadata_agent:
626 init_container: null
627 neutron_metadata_agent:
628 volumeMounts:
629 volumes:
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200630 neutron_ovn_metadata_agent:
631 init_container: null
632 neutron_ovn_metadata_agent:
633 volumeMounts:
634 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500635 neutron_ovs_agent:
636 init_container: null
637 neutron_ovs_agent:
638 volumeMounts:
639 volumes:
640 neutron_sriov_agent:
641 init_container: null
642 neutron_sriov_agent:
643 volumeMounts:
644 volumes:
645 neutron_l2gw_agent:
646 init_container: null
647 neutron_l2gw_agent:
648 volumeMounts:
649 volumes:
650 bagpipe_bgp:
651 init_container: null
652 bagpipe_bgp:
653 volumeMounts:
654 volumes:
655 neutron_ironic_agent:
656 init_container: null
657 neutron_ironic_agent:
658 volumeMounts:
659 volumes:
660 neutron_netns_cleanup_cron:
661 init_container: null
662 neutron_netns_cleanup_cron:
663 volumeMounts:
664 volumes:
665 neutron_tests:
666 init_container: null
667 neutron_tests:
668 volumeMounts:
669 volumes:
670 neutron_bootstrap:
671 init_container: null
672 neutron_bootstrap:
673 volumeMounts:
674 volumes:
675 neutron_db_sync:
676 neutron_db_sync:
677 volumeMounts:
678 - name: db-sync-conf
679 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
680 subPath: ml2_conf.ini
681 readOnly: true
682 volumes:
683 replicas:
684 server: 1
685 ironic_agent: 1
686 lifecycle:
687 upgrades:
688 deployments:
689 revision_history: 3
690 pod_replacement_strategy: RollingUpdate
691 rolling_update:
692 max_unavailable: 1
693 max_surge: 3
694 daemonsets:
695 pod_replacement_strategy: RollingUpdate
696 dhcp_agent:
697 enabled: true
698 min_ready_seconds: 0
699 max_unavailable: 1
700 l3_agent:
701 enabled: true
702 min_ready_seconds: 0
703 max_unavailable: 1
704 lb_agent:
705 enabled: true
706 min_ready_seconds: 0
707 max_unavailable: 1
708 metadata_agent:
709 enabled: true
710 min_ready_seconds: 0
711 max_unavailable: 1
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200712 ovn_metadata_agent:
713 enabled: true
714 min_ready_seconds: 0
715 max_unavailable: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500716 ovs_agent:
717 enabled: true
718 min_ready_seconds: 0
719 max_unavailable: 1
720 sriov_agent:
721 enabled: true
722 min_ready_seconds: 0
723 max_unavailable: 1
724 netns_cleanup_cron:
725 enabled: true
726 min_ready_seconds: 0
727 max_unavailable: 1
728 disruption_budget:
729 server:
730 min_available: 0
731 termination_grace_period:
732 server:
733 timeout: 30
734 ironic_agent:
735 timeout: 30
736 resources:
737 enabled: false
738 agent:
739 dhcp:
740 requests:
741 memory: "128Mi"
742 cpu: "100m"
743 limits:
744 memory: "1024Mi"
745 cpu: "2000m"
746 l3:
747 requests:
748 memory: "128Mi"
749 cpu: "100m"
750 limits:
751 memory: "1024Mi"
752 cpu: "2000m"
753 lb:
754 requests:
755 memory: "128Mi"
756 cpu: "100m"
757 limits:
758 memory: "1024Mi"
759 cpu: "2000m"
760 metadata:
761 requests:
762 memory: "128Mi"
763 cpu: "100m"
764 limits:
765 memory: "1024Mi"
766 cpu: "2000m"
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200767 ovn_metadata:
768 requests:
769 memory: "128Mi"
770 cpu: "100m"
771 limits:
772 memory: "1024Mi"
773 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500774 ovs:
775 requests:
776 memory: "128Mi"
777 cpu: "100m"
778 limits:
779 memory: "1024Mi"
780 cpu: "2000m"
781 sriov:
782 requests:
783 memory: "128Mi"
784 cpu: "100m"
785 limits:
786 memory: "1024Mi"
787 cpu: "2000m"
788 l2gw:
789 requests:
790 memory: "128Mi"
791 cpu: "100m"
792 limits:
793 memory: "1024Mi"
794 cpu: "2000m"
795 bagpipe_bgp:
796 requests:
797 memory: "128Mi"
798 cpu: "100m"
799 limits:
800 memory: "1024Mi"
801 cpu: "2000m"
802 server:
803 requests:
804 memory: "128Mi"
805 cpu: "100m"
806 limits:
807 memory: "1024Mi"
808 cpu: "2000m"
809 ironic_agent:
810 requests:
811 memory: "128Mi"
812 cpu: "100m"
813 limits:
814 memory: "1024Mi"
815 cpu: "2000m"
816 netns_cleanup_cron:
817 requests:
818 memory: "128Mi"
819 cpu: "100m"
820 limits:
821 memory: "1024Mi"
822 cpu: "2000m"
823 jobs:
824 bootstrap:
825 requests:
826 memory: "128Mi"
827 cpu: "100m"
828 limits:
829 memory: "1024Mi"
830 cpu: "2000m"
831 db_init:
832 requests:
833 memory: "128Mi"
834 cpu: "100m"
835 limits:
836 memory: "1024Mi"
837 cpu: "2000m"
838 rabbit_init:
839 requests:
840 memory: "128Mi"
841 cpu: "100m"
842 limits:
843 memory: "1024Mi"
844 cpu: "2000m"
845 db_sync:
846 requests:
847 memory: "128Mi"
848 cpu: "100m"
849 limits:
850 memory: "1024Mi"
851 cpu: "2000m"
852 db_drop:
853 requests:
854 memory: "128Mi"
855 cpu: "100m"
856 limits:
857 memory: "1024Mi"
858 cpu: "2000m"
859 ks_endpoints:
860 requests:
861 memory: "128Mi"
862 cpu: "100m"
863 limits:
864 memory: "1024Mi"
865 cpu: "2000m"
866 ks_service:
867 requests:
868 memory: "128Mi"
869 cpu: "100m"
870 limits:
871 memory: "1024Mi"
872 cpu: "2000m"
873 ks_user:
874 requests:
875 memory: "128Mi"
876 cpu: "100m"
877 limits:
878 memory: "1024Mi"
879 cpu: "2000m"
880 tests:
881 requests:
882 memory: "128Mi"
883 cpu: "100m"
884 limits:
885 memory: "1024Mi"
886 cpu: "2000m"
887 image_repo_sync:
888 requests:
889 memory: "128Mi"
890 cpu: "100m"
891 limits:
892 memory: "1024Mi"
893 cpu: "2000m"
894
895conf:
896 rally_tests:
897 force_project_purge: false
898 run_tempest: false
899 clean_up: |
900 # NOTE: We will make the best effort to clean up rally generated networks and routers,
901 # but should not block further automated deployment.
902 set +e
903 PATTERN="^[sc]_rally_"
904
905 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
906 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
907
908 for ROUTER in $ROUTERS
909 do
910 openstack router unset --external-gateway $ROUTER
911 openstack router set --disable --no-ha $ROUTER
912
913 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
914 for SUBN in $SUBNS
915 do
916 openstack router remove subnet $ROUTER $SUBN
917 done
918
919 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
920 do
921 openstack router remove port $ROUTER $PORT
922 done
923
924 openstack router delete $ROUTER
925 done
926
927 for NETWORK in $NETWORKS
928 do
929 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
930 do
931 openstack port delete $PORT
932 done
933 openstack network delete $NETWORK
934 done
935 set -e
936 tests:
937 NeutronNetworks.create_and_delete_networks:
938 - args:
939 network_create_args: {}
940 context:
941 quotas:
942 neutron:
943 network: -1
944 runner:
945 concurrency: 1
946 times: 1
947 type: constant
948 sla:
949 failure_rate:
950 max: 0
951 NeutronNetworks.create_and_delete_ports:
952 - args:
953 network_create_args: {}
954 port_create_args: {}
955 ports_per_network: 10
956 context:
957 network: {}
958 quotas:
959 neutron:
960 network: -1
961 port: -1
962 runner:
963 concurrency: 1
964 times: 1
965 type: constant
966 sla:
967 failure_rate:
968 max: 0
969 NeutronNetworks.create_and_delete_routers:
970 - args:
971 network_create_args: {}
972 router_create_args: {}
973 subnet_cidr_start: 1.1.0.0/30
974 subnet_create_args: {}
975 subnets_per_network: 2
976 context:
977 network: {}
978 quotas:
979 neutron:
980 network: -1
981 router: -1
982 subnet: -1
983 runner:
984 concurrency: 1
985 times: 1
986 type: constant
987 sla:
988 failure_rate:
989 max: 0
990 NeutronNetworks.create_and_delete_subnets:
991 - args:
992 network_create_args: {}
993 subnet_cidr_start: 1.1.0.0/30
994 subnet_create_args: {}
995 subnets_per_network: 2
996 context:
997 network: {}
998 quotas:
999 neutron:
1000 network: -1
1001 subnet: -1
1002 runner:
1003 concurrency: 1
1004 times: 1
1005 type: constant
1006 sla:
1007 failure_rate:
1008 max: 0
1009 NeutronNetworks.create_and_list_routers:
1010 - args:
1011 network_create_args: {}
1012 router_create_args: {}
1013 subnet_cidr_start: 1.1.0.0/30
1014 subnet_create_args: {}
1015 subnets_per_network: 2
1016 context:
1017 network: {}
1018 quotas:
1019 neutron:
1020 network: -1
1021 router: -1
1022 subnet: -1
1023 runner:
1024 concurrency: 1
1025 times: 1
1026 type: constant
1027 sla:
1028 failure_rate:
1029 max: 0
1030 NeutronNetworks.create_and_list_subnets:
1031 - args:
1032 network_create_args: {}
1033 subnet_cidr_start: 1.1.0.0/30
1034 subnet_create_args: {}
1035 subnets_per_network: 2
1036 context:
1037 network: {}
1038 quotas:
1039 neutron:
1040 network: -1
1041 subnet: -1
1042 runner:
1043 concurrency: 1
1044 times: 1
1045 type: constant
1046 sla:
1047 failure_rate:
1048 max: 0
1049 NeutronNetworks.create_and_show_network:
1050 - args:
1051 network_create_args: {}
1052 context:
1053 quotas:
1054 neutron:
1055 network: -1
1056 runner:
1057 concurrency: 1
1058 times: 1
1059 type: constant
1060 sla:
1061 failure_rate:
1062 max: 0
1063 NeutronNetworks.create_and_update_networks:
1064 - args:
1065 network_create_args: {}
1066 network_update_args:
1067 admin_state_up: false
1068 context:
1069 quotas:
1070 neutron:
1071 network: -1
1072 runner:
1073 concurrency: 1
1074 times: 1
1075 type: constant
1076 sla:
1077 failure_rate:
1078 max: 0
1079 NeutronNetworks.create_and_update_ports:
1080 - args:
1081 network_create_args: {}
1082 port_create_args: {}
1083 port_update_args:
1084 admin_state_up: false
1085 device_id: dummy_id
1086 device_owner: dummy_owner
1087 ports_per_network: 5
1088 context:
1089 network: {}
1090 quotas:
1091 neutron:
1092 network: -1
1093 port: -1
1094 runner:
1095 concurrency: 1
1096 times: 1
1097 type: constant
1098 sla:
1099 failure_rate:
1100 max: 0
1101 NeutronNetworks.create_and_update_routers:
1102 - args:
1103 network_create_args: {}
1104 router_create_args: {}
1105 router_update_args:
1106 admin_state_up: false
1107 subnet_cidr_start: 1.1.0.0/30
1108 subnet_create_args: {}
1109 subnets_per_network: 2
1110 context:
1111 network: {}
1112 quotas:
1113 neutron:
1114 network: -1
1115 router: -1
1116 subnet: -1
1117 runner:
1118 concurrency: 1
1119 times: 1
1120 type: constant
1121 sla:
1122 failure_rate:
1123 max: 0
1124 NeutronNetworks.create_and_update_subnets:
1125 - args:
1126 network_create_args: {}
1127 subnet_cidr_start: 1.4.0.0/16
1128 subnet_create_args: {}
1129 subnet_update_args:
1130 enable_dhcp: false
1131 subnets_per_network: 2
1132 context:
1133 network: {}
1134 quotas:
1135 neutron:
1136 network: -1
1137 subnet: -1
1138 runner:
1139 concurrency: 1
1140 times: 1
1141 type: constant
1142 sla:
1143 failure_rate:
1144 max: 0
1145 NeutronNetworks.list_agents:
1146 - args:
1147 agent_args: {}
1148 runner:
1149 concurrency: 1
1150 times: 1
1151 type: constant
1152 sla:
1153 failure_rate:
1154 max: 0
1155 NeutronSecurityGroup.create_and_list_security_groups:
1156 - args:
1157 security_group_create_args: {}
1158 context:
1159 quotas:
1160 neutron:
1161 security_group: -1
1162 runner:
1163 concurrency: 1
1164 times: 1
1165 type: constant
1166 sla:
1167 failure_rate:
1168 max: 0
1169 NeutronSecurityGroup.create_and_update_security_groups:
1170 - args:
1171 security_group_create_args: {}
1172 security_group_update_args: {}
1173 context:
1174 quotas:
1175 neutron:
1176 security_group: -1
1177 runner:
1178 concurrency: 1
1179 times: 1
1180 type: constant
1181 sla:
1182 failure_rate:
1183 max: 0
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001184 paste: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001185 policy: {}
1186 api_audit_map:
1187 DEFAULT:
1188 target_endpoint_type: None
1189 custom_actions:
1190 add_router_interface: update/add
1191 remove_router_interface: update/remove
1192 path_keywords:
1193 floatingips: ip
1194 healthmonitors: healthmonitor
1195 health_monitors: health_monitor
1196 lb: None
1197 members: member
1198 metering-labels: label
1199 metering-label-rules: rule
1200 networks: network
1201 pools: pool
1202 ports: port
1203 routers: router
1204 quotas: quota
1205 security-groups: security-group
1206 security-group-rules: rule
1207 subnets: subnet
1208 vips: vip
1209 service_endpoints:
1210 network: service/network
1211 neutron_sudoers: |
1212 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1213 Defaults !requiretty
1214 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1215 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1216 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1217 rootwrap: |
1218 # Configuration for neutron-rootwrap
1219 # This file should be owned by (and only-writeable by) the root user
1220
1221 [DEFAULT]
1222 # List of directories to load filter definitions from (separated by ',').
1223 # These directories MUST all be only writeable by root !
1224 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1225
1226 # List of directories to search executables in, in case filters do not
1227 # explicitely specify a full path (separated by ',')
1228 # If not specified, defaults to system PATH environment variable.
1229 # These directories MUST all be only writeable by root !
1230 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1231
1232 # Enable logging to syslog
1233 # Default value is False
1234 use_syslog=False
1235
1236 # Which syslog facility to use.
1237 # Valid values include auth, authpriv, syslog, local0, local1...
1238 # Default value is 'syslog'
1239 syslog_log_facility=syslog
1240
1241 # Which messages to log.
1242 # INFO means log all usage
1243 # ERROR means only log unsuccessful attempts
1244 syslog_log_level=ERROR
1245
1246 [xenapi]
1247 # XenAPI configuration is only required by the L2 agent if it is to
1248 # target a XenServer/XCP compute host's dom0.
1249 xenapi_connection_url=<None>
1250 xenapi_connection_username=root
1251 xenapi_connection_password=<None>
1252 rootwrap_filters:
1253 debug:
1254 pods:
1255 - dhcp_agent
1256 - l3_agent
1257 - lb_agent
1258 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001259 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001260 - ovs_agent
1261 - sriov_agent
1262 content: |
1263 # neutron-rootwrap command filters for nodes on which neutron is
1264 # expected to control network
1265 #
1266 # This file should be owned by (and only-writeable by) the root user
1267
1268 # format seems to be
1269 # cmd-name: filter-name, raw-command, user, args
1270
1271 [Filters]
1272
1273 # This is needed because we should ping
1274 # from inside a namespace which requires root
1275 # _alt variants allow to match -c and -w in any order
1276 # (used by NeutronDebugAgent.ping_all)
1277 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1278 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1279 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1280 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1281 dibbler:
1282 pods:
1283 - dhcp_agent
1284 - l3_agent
1285 - lb_agent
1286 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001287 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001288 - ovs_agent
1289 - sriov_agent
1290 content: |
1291 # neutron-rootwrap command filters for nodes on which neutron is
1292 # expected to control network
1293 #
1294 # This file should be owned by (and only-writeable by) the root user
1295
1296 # format seems to be
1297 # cmd-name: filter-name, raw-command, user, args
1298
1299 [Filters]
1300
1301 # Filters for the dibbler-based reference implementation of the pluggable
1302 # Prefix Delegation driver. Other implementations using an alternative agent
1303 # should include a similar filter in this folder.
1304
1305 # prefix_delegation_agent
1306 dibbler-client: CommandFilter, dibbler-client, root
1307 ipset_firewall:
1308 pods:
1309 - dhcp_agent
1310 - l3_agent
1311 - lb_agent
1312 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001313 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001314 - ovs_agent
1315 - sriov_agent
1316 content: |
1317 # neutron-rootwrap command filters for nodes on which neutron is
1318 # expected to control network
1319 #
1320 # This file should be owned by (and only-writeable by) the root user
1321
1322 # format seems to be
1323 # cmd-name: filter-name, raw-command, user, args
1324
1325 [Filters]
1326 # neutron/agent/linux/iptables_firewall.py
1327 # "ipset", "-A", ...
1328 ipset: CommandFilter, ipset, root
1329 l3:
1330 pods:
1331 - dhcp_agent
1332 - l3_agent
1333 - lb_agent
1334 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001335 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001336 - ovs_agent
1337 - sriov_agent
1338 content: |
1339 # neutron-rootwrap command filters for nodes on which neutron is
1340 # expected to control network
1341 #
1342 # This file should be owned by (and only-writeable by) the root user
1343
1344 # format seems to be
1345 # cmd-name: filter-name, raw-command, user, args
1346
1347 [Filters]
1348
1349 # arping
1350 arping: CommandFilter, arping, root
1351
1352 # l3_agent
1353 sysctl: CommandFilter, sysctl, root
1354 route: CommandFilter, route, root
1355 radvd: CommandFilter, radvd, root
1356
1357 # haproxy
1358 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1359 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1360
1361 # metadata proxy
1362 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1363 # RHEL invocation of the metadata proxy will report /usr/bin/python
1364 kill_metadata: KillFilter, root, python, -15, -9
1365 kill_metadata2: KillFilter, root, python2, -15, -9
1366 kill_metadata7: KillFilter, root, python2.7, -15, -9
1367 kill_metadata3: KillFilter, root, python3, -15, -9
1368 kill_metadata35: KillFilter, root, python3.5, -15, -9
1369 kill_metadata36: KillFilter, root, python3.6, -15, -9
1370 kill_metadata37: KillFilter, root, python3.7, -15, -9
1371 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1372 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1373
1374 # ip_lib
1375 ip: IpFilter, ip, root
1376 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1377 ip_exec: IpNetnsExecFilter, ip, root
1378
1379 # l3_tc_lib
1380 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1381 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1382 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1383 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1384 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1385 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1386 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1387
1388 # For ip monitor
1389 kill_ip_monitor: KillFilter, root, ip, -9
1390
1391 # ovs_lib (if OVSInterfaceDriver is used)
1392 ovs-vsctl: CommandFilter, ovs-vsctl, root
1393
1394 # iptables_manager
1395 iptables-save: CommandFilter, iptables-save, root
1396 iptables-restore: CommandFilter, iptables-restore, root
1397 ip6tables-save: CommandFilter, ip6tables-save, root
1398 ip6tables-restore: CommandFilter, ip6tables-restore, root
1399
1400 # Keepalived
1401 keepalived: CommandFilter, keepalived, root
1402 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1403
1404 # l3 agent to delete floatingip's conntrack state
1405 conntrack: CommandFilter, conntrack, root
1406
1407 # keepalived state change monitor
1408 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1409 # The following filters are used to kill the keepalived state change monitor.
1410 # Since the monitor runs as a Python script, the system reports that the
1411 # command of the process to be killed is python.
1412 # TODO(mlavalle) These kill filters will be updated once we come up with a
1413 # mechanism to kill using the name of the script being executed by Python
1414 kill_keepalived_monitor_py: KillFilter, root, python, -15
1415 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1416 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1417 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1418 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1419 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1420 netns_cleanup:
1421 pods:
1422 - dhcp_agent
1423 - l3_agent
1424 - lb_agent
1425 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001426 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001427 - ovs_agent
1428 - sriov_agent
1429 - netns_cleanup_cron
1430 content: |
1431 # neutron-rootwrap command filters for nodes on which neutron is
1432 # expected to control network
1433 #
1434 # This file should be owned by (and only-writeable by) the root user
1435
1436 # format seems to be
1437 # cmd-name: filter-name, raw-command, user, args
1438
1439 [Filters]
1440
1441 # netns-cleanup
1442 netstat: CommandFilter, netstat, root
1443 dhcp:
1444 pods:
1445 - dhcp_agent
1446 - l3_agent
1447 - lb_agent
1448 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001449 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001450 - ovs_agent
1451 - sriov_agent
1452 - netns_cleanup_cron
1453 content: |
1454 # neutron-rootwrap command filters for nodes on which neutron is
1455 # expected to control network
1456 #
1457 # This file should be owned by (and only-writeable by) the root user
1458
1459 # format seems to be
1460 # cmd-name: filter-name, raw-command, user, args
1461
1462 [Filters]
1463
1464 # dhcp-agent
1465 dnsmasq: CommandFilter, dnsmasq, root
1466 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1467 # it looks like these are the only signals needed, per
1468 # neutron/agent/linux/dhcp.py
1469 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1470 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1471
1472 ovs-vsctl: CommandFilter, ovs-vsctl, root
1473 ivs-ctl: CommandFilter, ivs-ctl, root
1474 mm-ctl: CommandFilter, mm-ctl, root
1475 dhcp_release: CommandFilter, dhcp_release, root
1476 dhcp_release6: CommandFilter, dhcp_release6, root
1477
1478 # metadata proxy
1479 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1480 # RHEL invocation of the metadata proxy will report /usr/bin/python
1481 kill_metadata: KillFilter, root, python, -9
1482 kill_metadata2: KillFilter, root, python2, -9
1483 kill_metadata7: KillFilter, root, python2.7, -9
1484 kill_metadata3: KillFilter, root, python3, -9
1485 kill_metadata35: KillFilter, root, python3.5, -9
1486 kill_metadata36: KillFilter, root, python3.6, -9
1487 kill_metadata37: KillFilter, root, python3.7, -9
1488
1489 # ip_lib
1490 ip: IpFilter, ip, root
1491 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1492 ip_exec: IpNetnsExecFilter, ip, root
1493 ebtables:
1494 pods:
1495 - dhcp_agent
1496 - l3_agent
1497 - lb_agent
1498 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001499 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001500 - ovs_agent
1501 - sriov_agent
1502 content: |
1503 # neutron-rootwrap command filters for nodes on which neutron is
1504 # expected to control network
1505 #
1506 # This file should be owned by (and only-writeable by) the root user
1507
1508 # format seems to be
1509 # cmd-name: filter-name, raw-command, user, args
1510
1511 [Filters]
1512
1513 ebtables: CommandFilter, ebtables, root
1514 iptables_firewall:
1515 pods:
1516 - dhcp_agent
1517 - l3_agent
1518 - lb_agent
1519 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001520 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001521 - ovs_agent
1522 - sriov_agent
1523 content: |
1524 # neutron-rootwrap command filters for nodes on which neutron is
1525 # expected to control network
1526 #
1527 # This file should be owned by (and only-writeable by) the root user
1528
1529 # format seems to be
1530 # cmd-name: filter-name, raw-command, user, args
1531
1532 [Filters]
1533
1534 # neutron/agent/linux/iptables_firewall.py
1535 # "iptables-save", ...
1536 iptables-save: CommandFilter, iptables-save, root
1537 iptables-restore: CommandFilter, iptables-restore, root
1538 ip6tables-save: CommandFilter, ip6tables-save, root
1539 ip6tables-restore: CommandFilter, ip6tables-restore, root
1540
1541 # neutron/agent/linux/iptables_firewall.py
1542 # "iptables", "-A", ...
1543 iptables: CommandFilter, iptables, root
1544 ip6tables: CommandFilter, ip6tables, root
1545
1546 # neutron/agent/linux/iptables_firewall.py
1547 sysctl: CommandFilter, sysctl, root
1548
1549 # neutron/agent/linux/ip_conntrack.py
1550 conntrack: CommandFilter, conntrack, root
1551 linuxbridge_plugin:
1552 pods:
1553 - dhcp_agent
1554 - l3_agent
1555 - lb_agent
1556 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001557 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001558 - ovs_agent
1559 - sriov_agent
1560 content: |
1561 # neutron-rootwrap command filters for nodes on which neutron is
1562 # expected to control network
1563 #
1564 # This file should be owned by (and only-writeable by) the root user
1565
1566 # format seems to be
1567 # cmd-name: filter-name, raw-command, user, args
1568
1569 [Filters]
1570
1571 # linuxbridge-agent
1572 # unclear whether both variants are necessary, but I'm transliterating
1573 # from the old mechanism
1574 brctl: CommandFilter, brctl, root
1575 bridge: CommandFilter, bridge, root
1576
1577 # ip_lib
1578 ip: IpFilter, ip, root
1579 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1580 ip_exec: IpNetnsExecFilter, ip, root
1581
1582 # tc commands needed for QoS support
1583 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1584 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1585 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1586 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1587 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1588 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1589 openvswitch_plugin:
1590 pods:
1591 - dhcp_agent
1592 - l3_agent
1593 - lb_agent
1594 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001595 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001596 - ovs_agent
1597 - sriov_agent
1598 content: |
1599 # neutron-rootwrap command filters for nodes on which neutron is
1600 # expected to control network
1601 #
1602 # This file should be owned by (and only-writeable by) the root user
1603
1604 # format seems to be
1605 # cmd-name: filter-name, raw-command, user, args
1606
1607 [Filters]
1608
1609 # openvswitch-agent
1610 # unclear whether both variants are necessary, but I'm transliterating
1611 # from the old mechanism
1612 ovs-vsctl: CommandFilter, ovs-vsctl, root
1613 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1614 ovs-ofctl: CommandFilter, ovs-ofctl, root
1615 ovs-appctl: CommandFilter, ovs-appctl, root
1616 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1617 ovsdb-client: CommandFilter, ovsdb-client, root
1618 xe: CommandFilter, xe, root
1619
1620 # ip_lib
1621 ip: IpFilter, ip, root
1622 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1623 ip_exec: IpNetnsExecFilter, ip, root
1624
1625 # needed for FDB extension
1626 bridge: CommandFilter, bridge, root
1627 privsep:
1628 pods:
1629 - dhcp_agent
1630 - l3_agent
1631 - lb_agent
1632 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001633 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001634 - ovs_agent
1635 - sriov_agent
1636 - netns_cleanup_cron
1637 content: |
1638 # Command filters to allow privsep daemon to be started via rootwrap.
1639 #
1640 # This file should be owned by (and only-writeable by) the root user
1641
1642 [Filters]
1643
1644 # By installing the following, the local admin is asserting that:
1645 #
1646 # 1. The python module load path used by privsep-helper
1647 # command as root (as started by sudo/rootwrap) is trusted.
1648 # 2. Any oslo.config files matching the --config-file
1649 # arguments below are trusted.
1650 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1651 # also allowed to invoke python "entrypoint" functions from
1652 # --privsep_context with the additional (possibly root) privileges
1653 # configured for that context.
1654 #
1655 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1656 #
1657 # In particular, the oslo.config and python module path must not
1658 # be writeable by the unprivileged user.
1659
1660 # oslo.privsep default neutron context
1661 privsep: PathFilter, privsep-helper, root,
1662 --config-file, /etc,
1663 --privsep_context, neutron.privileged.default,
1664 --privsep_sock_path, /
1665
1666 # NOTE: A second `--config-file` arg can also be added above. Since
1667 # many neutron components are installed like that (eg: by devstack).
1668 # Adjust to suit local requirements.
1669 linux_vxlan:
1670 pods:
1671 - bagpipe_bgp
1672 content: |
1673 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1674 # expected to control VXLAN Linux Bridge dataplane
1675 #
1676 # This file should be owned by (and only-writeable by) the root user
1677
1678 # format seems to be
1679 # cmd-name: filter-name, raw-command, user, args
1680
1681 [Filters]
1682
1683 #
1684 modprobe: CommandFilter, modprobe, root
1685
1686 #
1687 brctl: CommandFilter, brctl, root
1688 bridge: CommandFilter, bridge, root
1689
1690 # ip_lib
1691 ip: IpFilter, ip, root
1692 ip_exec: IpNetnsExecFilter, ip, root
1693
1694 # shell (for piped commands)
1695 sh: CommandFilter, sh, root
1696 mpls_ovs_dataplane:
1697 pods:
1698 - bagpipe_bgp
1699 content: |
1700 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1701 # expected to control MPLS OpenVSwitch dataplane
1702 #
1703 # This file should be owned by (and only-writeable by) the root user
1704
1705 # format seems to be
1706 # cmd-name: filter-name, raw-command, user, args
1707
1708 [Filters]
1709
1710 # openvswitch
1711 ovs-vsctl: CommandFilter, ovs-vsctl, root
1712 ovs-ofctl: CommandFilter, ovs-ofctl, root
1713
1714 # ip_lib
1715 ip: IpFilter, ip, root
1716 ip_exec: IpNetnsExecFilter, ip, root
1717
1718 # shell (for piped commands)
1719 sh: CommandFilter, sh, root
1720 neutron:
1721 DEFAULT:
1722 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1723 log_config_append: /etc/neutron/logging.conf
1724 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1725 # via the endpoints section.
1726 bind_port: null
1727 default_availability_zones: nova
1728 api_workers: 1
1729 rpc_workers: 4
1730 allow_overlapping_ips: True
1731 state_path: /var/lib/neutron
1732 # core_plugin can be: ml2, calico
1733 core_plugin: ml2
1734 # service_plugin can be: router, odl-router, empty for calico,
1735 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1736 service_plugins: router
1737 allow_automatic_l3agent_failover: True
1738 l3_ha: True
1739 max_l3_agents_per_router: 2
1740 l3_ha_network_type: vxlan
1741 network_auto_schedule: True
1742 router_auto_schedule: True
1743 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1744 # 'network.backend' to sane defaults.
1745 interface_driver: null
1746 oslo_concurrency:
1747 lock_path: /var/lib/neutron/tmp
1748 database:
1749 max_retries: -1
1750 agent:
1751 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1752 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1753 oslo_messaging_notifications:
1754 driver: messagingv2
1755 oslo_messaging_rabbit:
1756 rabbit_ha_queues: true
1757 oslo_middleware:
1758 enable_proxy_headers_parsing: true
1759 oslo_policy:
1760 policy_file: /etc/neutron/policy.yaml
Mohammed Naser593ec012023-07-23 09:20:05 +00001761 ovn:
1762 enable_distributed_floating_ip: true
1763 ovn_metadata_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001764 nova:
1765 auth_type: password
1766 auth_version: v3
1767 endpoint_type: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001768 placement:
1769 auth_type: password
1770 auth_version: v3
1771 endpoint_type: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001772 designate:
1773 auth_type: password
1774 auth_version: v3
1775 endpoint_type: internal
1776 allow_reverse_dns_lookup: true
1777 ironic:
1778 endpoint_type: internal
1779 keystone_authtoken:
1780 memcache_security_strategy: ENCRYPT
1781 auth_type: password
1782 auth_version: v3
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001783 service_type: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001784 octavia:
1785 request_poll_timeout: 3000
1786 logging:
1787 loggers:
1788 keys:
1789 - root
1790 - neutron
1791 - neutron_taas
1792 handlers:
1793 keys:
1794 - stdout
1795 - stderr
1796 - "null"
1797 formatters:
1798 keys:
1799 - context
1800 - default
1801 logger_root:
1802 level: WARNING
1803 handlers: 'null'
1804 logger_neutron:
1805 level: INFO
1806 handlers:
1807 - stdout
1808 qualname: neutron
1809 logger_neutron_taas:
1810 level: INFO
1811 handlers:
1812 - stdout
1813 qualname: neutron_taas
1814 logger_amqp:
1815 level: WARNING
1816 handlers: stderr
1817 qualname: amqp
1818 logger_amqplib:
1819 level: WARNING
1820 handlers: stderr
1821 qualname: amqplib
1822 logger_eventletwsgi:
1823 level: WARNING
1824 handlers: stderr
1825 qualname: eventlet.wsgi.server
1826 logger_sqlalchemy:
1827 level: WARNING
1828 handlers: stderr
1829 qualname: sqlalchemy
1830 logger_boto:
1831 level: WARNING
1832 handlers: stderr
1833 qualname: boto
1834 handler_null:
1835 class: logging.NullHandler
1836 formatter: default
1837 args: ()
1838 handler_stdout:
1839 class: StreamHandler
1840 args: (sys.stdout,)
1841 formatter: context
1842 handler_stderr:
1843 class: StreamHandler
1844 args: (sys.stderr,)
1845 formatter: context
1846 formatter_context:
1847 class: oslo_log.formatters.ContextFormatter
1848 datefmt: "%Y-%m-%d %H:%M:%S"
1849 formatter_default:
1850 format: "%(message)s"
1851 datefmt: "%Y-%m-%d %H:%M:%S"
1852 plugins:
1853 ml2_conf:
1854 ml2:
1855 extension_drivers: port_security
1856 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1857 # in 'network.backend' to sane defaults.
1858 mechanism_drivers: null
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001859 type_drivers: flat,vlan,vxlan,local
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001860 tenant_network_types: vxlan
1861 ml2_type_vxlan:
1862 vni_ranges: 1:1000
1863 vxlan_group: 239.1.1.1
1864 ml2_type_flat:
1865 flat_networks: "*"
1866 # If you want to use the external network as a tagged provider network,
1867 # a range should be specified including the intended VLAN target
1868 # using ml2_type_vlan.network_vlan_ranges:
1869 # ml2_type_vlan:
1870 # network_vlan_ranges: "external:1100:1110"
Mohammed Naser593ec012023-07-23 09:20:05 +00001871 ml2_type_geneve:
1872 vni_ranges: 1:65536
1873 max_header_size: 38
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001874 agent:
1875 extensions: ""
1876 ml2_conf_sriov: null
1877 taas:
1878 taas:
1879 enabled: False
1880 openvswitch_agent:
1881 agent:
1882 tunnel_types: vxlan
1883 l2_population: True
1884 arp_responder: True
1885 ovs:
1886 bridge_mappings: "external:br-ex"
1887 securitygroup:
1888 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1889 linuxbridge_agent:
1890 linux_bridge:
1891 # To define Flat and VLAN connections, in LB we can assign
1892 # specific interface to the flat/vlan network name using:
1893 # physical_interface_mappings: "external:eth3"
1894 # Or we can set the mapping between the network and bridge:
1895 bridge_mappings: "external:br-ex"
1896 # The two above options are exclusive, do not use both of them at once
1897 securitygroup:
1898 firewall_driver: iptables
1899 vxlan:
1900 l2_population: True
1901 arp_responder: True
1902 macvtap_agent: null
1903 sriov_agent:
1904 securitygroup:
1905 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1906 sriov_nic:
1907 physical_device_mappings: physnet2:enp3s0f1
1908 # NOTE: do not use null here, use an empty string
1909 exclude_devices: ""
1910 dhcp_agent:
1911 DEFAULT:
1912 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1913 # 'network.backend' to sane defaults.
1914 interface_driver: null
1915 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1916 force_metadata: True
1917 dnsmasq: |
1918 #no-hosts
1919 #port=5353
1920 #cache-size=500
1921 #no-negcache
1922 #dns-forward-max=100
1923 #resolve-file=
1924 #strict-order
1925 #bind-interface
1926 #bind-dynamic
1927 #domain=
1928 #dhcp-range=10.10.10.10,10.10.10.100,24h
1929 #dhcp-lease-max=150
1930 #dhcp-host=11:22:33:44:55:66,ignore
1931 #dhcp-option=3,10.10.10.1
1932 #dhcp-option-force=26,1450
1933
1934 l3_agent:
1935 DEFAULT:
1936 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1937 # 'network.backend' to sane defaults.
1938 interface_driver: null
1939 agent_mode: legacy
1940 metering_agent: null
1941 metadata_agent:
1942 DEFAULT:
1943 # we cannot change the proxy socket path as it is declared
1944 # as a hostPath volume from agent daemonsets
1945 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1946 metadata_proxy_shared_secret: "password"
1947 cache:
1948 enabled: true
1949 backend: dogpile.cache.memcached
1950 bagpipe_bgp: {}
Mohammed Naser593ec012023-07-23 09:20:05 +00001951 ovn_metadata_agent:
1952 DEFAULT:
1953 # we cannot change the proxy socket path as it is declared
1954 # as a hostPath volume from agent daemonsets
1955 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1956 metadata_proxy_shared_secret: "password"
1957 metadata_workers: 2
1958 cache:
1959 enabled: true
1960 backend: dogpile.cache.memcached
1961 ovs:
1962 ovsdb_connection: unix:/run/openvswitch/db.sock
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001963
1964 rabbitmq:
1965 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1966 policies:
1967 - vhost: "neutron"
1968 name: "ha_ttl_neutron"
1969 definition:
1970 # mirror messges to other nodes in rmq cluster
1971 ha-mode: "all"
1972 ha-sync-mode: "automatic"
1973 # 70s
1974 message-ttl: 70000
1975 priority: 0
1976 apply-to: all
1977 pattern: '^(?!(amq\.|reply_)).*'
1978 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
1979 ## This helps prevent sriov init script from failing due to mis-matched NIC
1980 ## For prod env, target NIC should match and init script should fail otherwise.
1981 ## sriov_init:
1982 ## - besteffort
1983 sriov_init:
1984 -
1985 # auto_bridge_add is a table of "bridge: interface" pairs
1986 # To automatically add a physical interfaces to a specific bridges,
1987 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
1988 # to br1 do something like:
1989 #
1990 # auto_bridge_add:
1991 # br-physnet1: eth3
1992 # br0: if0
1993 # br1: iface_two
1994 # br-ex will be added by default
1995 auto_bridge_add:
1996 br-ex: null
1997
Mohammed Nasera720f882023-06-30 23:48:02 -04001998 # Network off-loading configuration
1999 netoffload:
ricolin18e6fd32023-07-17 06:17:15 +00002000 enabled: false
Mohammed Nasera720f882023-06-30 23:48:02 -04002001 asap2:
2002 # - dev: enp97s0f0
2003 # vfs: 16
2004
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002005 # configuration of OVS DPDK bridges and NICs
2006 # this is a separate section and not part of the auto_bridge_add section
2007 # because additional parameters are needed
2008 ovs_dpdk:
2009 enabled: false
2010 # setting update_dpdk_bond_config to true will have default behavior,
2011 # which may cause disruptions in ovs dpdk traffic in case of neutron
2012 # ovs agent restart or when dpdk nic/bond configurations are changed.
2013 # Setting this to false will configure dpdk in the first run and
2014 # disable nic/bond config on event of restart or config update.
2015 update_dpdk_bond_config: true
2016 driver: uio_pci_generic
2017 # In case bonds are configured, the nics which are part of those bonds
2018 # must NOT be provided here.
2019 nics:
2020 - name: dpdk0
2021 pci_id: '0000:05:00.0'
2022 # Set VF Index in case some particular VF(s) need to be
2023 # used with ovs-dpdk.
2024 # vf_index: 0
2025 bridge: br-phy
2026 migrate_ip: true
2027 n_rxq: 2
2028 n_txq: 2
2029 pmd_rxq_affinity: "0:3,1:27"
2030 ofport_request: 1
2031 # optional parameters for tuning the OVS DPDK config
2032 # in alignment with the available hardware resources
2033 # mtu: 2000
2034 # n_rxq_size: 1024
2035 # n_txq_size: 1024
2036 # vhost-iommu-support: true
2037 bridges:
2038 - name: br-phy
2039 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
2040 # - tunnel_underlay_vlan: 45
2041 # Optional parameter for configuring bonding in OVS-DPDK
2042 # - name: br-phy-bond0
2043 # bonds:
2044 # - name: dpdkbond0
2045 # bridge: br-phy-bond0
2046 # # The IP from the first nic in nics list shall be used
2047 # migrate_ip: true
2048 # mtu: 2000
2049 # # Please note that n_rxq is set for each NIC individually
2050 # # rather than denoting the total number of rx queues for
2051 # # the bond as a whole. So setting n_rxq = 2 below for ex.
2052 # # would be 4 rx queues in total for the bond.
2053 # # Same for n_txq
2054 # n_rxq: 2
2055 # n_txq: 2
2056 # ofport_request: 1
2057 # n_rxq_size: 1024
2058 # n_txq_size: 1024
2059 # vhost-iommu-support: true
2060 # ovs_options: "bond_mode=active-backup"
2061 # nics:
2062 # - name: dpdk_b0s0
2063 # pci_id: '0000:06:00.0'
2064 # pmd_rxq_affinity: "0:3,1:27"
2065 # # Set VF Index in case some particular VF(s) need to be
2066 # # used with ovs-dpdk. In which case pci_id of PF must be
2067 # # provided above.
2068 # # vf_index: 0
2069 # - name: dpdk_b0s1
2070 # pci_id: '0000:07:00.0'
2071 # pmd_rxq_affinity: "0:3,1:27"
2072 # # Set VF Index in case some particular VF(s) need to be
2073 # # used with ovs-dpdk. In which case pci_id of PF must be
2074 # # provided above.
2075 # # vf_index: 0
2076 #
2077 # Set the log level for each target module (default level is always dbg)
2078 # Supported log levels are: off, emer, err, warn, info, dbg
2079 #
2080 # modules:
2081 # - name: dpdk
2082 # log_level: info
2083
2084# Names of secrets used by bootstrap and environmental checks
2085secrets:
2086 identity:
2087 admin: neutron-keystone-admin
2088 neutron: neutron-keystone-user
2089 test: neutron-keystone-test
2090 oslo_db:
2091 admin: neutron-db-admin
2092 neutron: neutron-db-user
2093 oslo_messaging:
2094 admin: neutron-rabbitmq-admin
2095 neutron: neutron-rabbitmq-user
2096 tls:
2097 compute_metadata:
2098 metadata:
2099 internal: metadata-tls-metadata
2100 network:
2101 server:
2102 public: neutron-tls-public
2103 internal: neutron-tls-server
2104 oci_image_registry:
2105 neutron: neutron-oci-image-registry
2106
2107# typically overridden by environmental
2108# values, but should include all endpoints
2109# required by this chart
2110endpoints:
2111 cluster_domain_suffix: cluster.local
2112 local_image_registry:
2113 name: docker-registry
2114 namespace: docker-registry
2115 hosts:
2116 default: localhost
2117 internal: docker-registry
2118 node: localhost
2119 host_fqdn_override:
2120 default: null
2121 port:
2122 registry:
2123 node: 5000
2124 oci_image_registry:
2125 name: oci-image-registry
2126 namespace: oci-image-registry
2127 auth:
2128 enabled: false
2129 neutron:
2130 username: neutron
2131 password: password
2132 hosts:
2133 default: localhost
2134 host_fqdn_override:
2135 default: null
2136 port:
2137 registry:
2138 default: null
2139 oslo_db:
2140 auth:
2141 admin:
2142 username: root
2143 password: password
2144 secret:
2145 tls:
2146 internal: mariadb-tls-direct
2147 neutron:
2148 username: neutron
2149 password: password
2150 hosts:
2151 default: mariadb
2152 host_fqdn_override:
2153 default: null
2154 path: /neutron
2155 scheme: mysql+pymysql
2156 port:
2157 mysql:
2158 default: 3306
2159 oslo_messaging:
2160 auth:
2161 admin:
2162 username: rabbitmq
2163 password: password
2164 secret:
2165 tls:
2166 internal: rabbitmq-tls-direct
2167 neutron:
2168 username: neutron
2169 password: password
2170 statefulset:
2171 replicas: 2
2172 name: rabbitmq-rabbitmq
2173 hosts:
2174 default: rabbitmq
2175 host_fqdn_override:
2176 default: null
2177 path: /neutron
2178 scheme: rabbit
2179 port:
2180 amqp:
2181 default: 5672
2182 http:
2183 default: 15672
2184 oslo_cache:
2185 auth:
2186 # NOTE(portdirect): this is used to define the value for keystone
2187 # authtoken cache encryption key, if not set it will be populated
2188 # automatically with a random value, but to take advantage of
2189 # this feature all services should be set to use the same key,
2190 # and memcache service.
2191 memcache_secret_key: null
2192 hosts:
2193 default: memcached
2194 host_fqdn_override:
2195 default: null
2196 port:
2197 memcache:
2198 default: 11211
2199 compute:
2200 name: nova
2201 hosts:
2202 default: nova-api
2203 public: nova
2204 host_fqdn_override:
2205 default: null
2206 path:
2207 default: "/v2.1/%(tenant_id)s"
2208 scheme:
2209 default: 'http'
2210 port:
2211 api:
2212 default: 8774
2213 public: 80
2214 novncproxy:
2215 default: 6080
2216 compute_metadata:
2217 name: nova
2218 hosts:
2219 default: nova-metadata
2220 public: metadata
2221 host_fqdn_override:
2222 default: null
2223 path:
2224 default: /
2225 scheme:
2226 default: 'http'
2227 port:
2228 metadata:
2229 default: 8775
2230 public: 80
2231 identity:
2232 name: keystone
2233 auth:
2234 admin:
2235 region_name: RegionOne
2236 username: admin
2237 password: password
2238 project_name: admin
2239 user_domain_name: default
2240 project_domain_name: default
2241 neutron:
2242 role: admin
2243 region_name: RegionOne
2244 username: neutron
2245 password: password
2246 project_name: service
2247 user_domain_name: service
2248 project_domain_name: service
2249 nova:
2250 region_name: RegionOne
2251 project_name: service
2252 username: nova
2253 password: password
2254 user_domain_name: service
2255 project_domain_name: service
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002256 placement:
2257 region_name: RegionOne
2258 project_name: service
2259 username: placement
2260 password: password
2261 user_domain_name: service
2262 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002263 designate:
2264 region_name: RegionOne
2265 project_name: service
2266 username: designate
2267 password: password
2268 user_domain_name: service
2269 project_domain_name: service
2270 ironic:
2271 region_name: RegionOne
2272 project_name: service
2273 username: ironic
2274 password: password
2275 user_domain_name: service
2276 project_domain_name: service
2277 test:
2278 role: admin
2279 region_name: RegionOne
2280 username: neutron-test
2281 password: password
2282 # NOTE: this project will be purged and reset if
2283 # conf.rally_tests.force_project_purge is set to true
2284 # which may be required upon test failure, but be aware that this will
2285 # expunge all openstack objects, so if this is used a seperate project
2286 # should be used for each helm test, and also it should be ensured
2287 # that this project is not in use by other tenants
2288 project_name: test
2289 user_domain_name: service
2290 project_domain_name: service
2291 hosts:
2292 default: keystone
2293 internal: keystone-api
2294 host_fqdn_override:
2295 default: null
2296 path:
2297 default: /v3
2298 scheme:
2299 default: http
2300 port:
2301 api:
2302 default: 80
2303 internal: 5000
2304 network:
2305 name: neutron
2306 hosts:
2307 default: neutron-server
2308 public: neutron
2309 host_fqdn_override:
2310 default: null
2311 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2312 # endpoints using the following format:
2313 # public:
2314 # host: null
2315 # tls:
2316 # crt: null
2317 # key: null
2318 path:
2319 default: null
2320 scheme:
2321 default: 'http'
2322 service: 'http'
2323 port:
2324 api:
2325 default: 9696
2326 public: 80
2327 service: 9696
2328 load_balancer:
2329 name: octavia
2330 hosts:
2331 default: octavia-api
2332 public: octavia
2333 host_fqdn_override:
2334 default: null
2335 path:
2336 default: null
2337 scheme:
2338 default: http
2339 port:
2340 api:
2341 default: 9876
2342 public: 80
2343 fluentd:
2344 namespace: osh-infra
2345 name: fluentd
2346 hosts:
2347 default: fluentd-logging
2348 host_fqdn_override:
2349 default: null
2350 path:
2351 default: null
2352 scheme: 'http'
2353 port:
2354 service:
2355 default: 24224
2356 metrics:
2357 default: 24220
2358 dns:
2359 name: designate
2360 hosts:
2361 default: designate-api
2362 public: designate
2363 host_fqdn_override:
2364 default: null
2365 path:
2366 default: /
2367 scheme:
2368 default: 'http'
2369 port:
2370 api:
2371 default: 9001
2372 public: 80
2373 baremetal:
2374 name: ironic
2375 hosts:
2376 default: ironic-api
2377 public: ironic
2378 host_fqdn_override:
2379 default: null
2380 path:
2381 default: null
2382 scheme:
2383 default: 'http'
2384 port:
2385 api:
2386 default: 6385
2387 public: 80
2388 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2389 # They are using to enable the Egress K8s network policy.
2390 kube_dns:
2391 namespace: kube-system
2392 name: kubernetes-dns
2393 hosts:
2394 default: kube-dns
2395 host_fqdn_override:
2396 default: null
2397 path:
2398 default: null
2399 scheme: http
2400 port:
2401 dns:
2402 default: 53
2403 protocol: UDP
2404 ingress:
2405 namespace: null
2406 name: ingress
2407 hosts:
2408 default: ingress
2409 port:
2410 ingress:
2411 default: 80
2412
2413network_policy:
2414 neutron:
2415 # TODO(lamt): Need to tighten this ingress for security.
2416 ingress:
2417 - {}
2418 egress:
2419 - {}
2420
2421helm3_hook: true
2422
2423health_probe:
2424 logging:
2425 level: ERROR
2426
2427tls:
2428 identity: false
2429 oslo_messaging: false
2430 oslo_db: false
2431
2432manifests:
2433 certificates: false
2434 configmap_bin: true
2435 configmap_etc: true
2436 daemonset_dhcp_agent: true
2437 daemonset_l3_agent: true
2438 daemonset_lb_agent: true
2439 daemonset_metadata_agent: true
2440 daemonset_ovs_agent: true
2441 daemonset_sriov_agent: true
2442 daemonset_l2gw_agent: false
2443 daemonset_bagpipe_bgp: false
2444 daemonset_netns_cleanup_cron: true
2445 deployment_ironic_agent: false
2446 deployment_server: true
2447 ingress_server: true
2448 job_bootstrap: true
2449 job_db_init: true
2450 job_db_sync: true
2451 job_db_drop: false
2452 job_image_repo_sync: true
2453 job_ks_endpoints: true
2454 job_ks_service: true
2455 job_ks_user: true
2456 job_rabbit_init: true
2457 pdb_server: true
2458 pod_rally_test: true
2459 network_policy: false
2460 secret_db: true
2461 secret_ingress_tls: true
2462 secret_keystone: true
2463 secret_rabbitmq: true
2464 secret_registry: true
2465 service_ingress_server: true
2466 service_server: true
2467...