blob: 1b2c6395510f0fc5b703606bdd21c06b9c526286 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
Mohammed Nasera720f882023-06-30 23:48:02 -040033 netoffload: ghcr.io/vexxhost/netoffload:v1.0.1
Mohammed Naserf3f59a72023-01-15 21:02:04 -050034 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020037 neutron_ovn_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Mohammed Naserf3f59a72023-01-15 21:02:04 -050038 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
41 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
42 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
43 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
44 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
46 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
47 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
48 image_repo_sync: docker.io/docker:17.07.0
49 pull_policy: "IfNotPresent"
50 local_registry:
51 active: false
52 exclude:
53 - dep_check
54 - image_repo_sync
55
56labels:
57 agent:
58 dhcp:
59 node_selector_key: openstack-control-plane
60 node_selector_value: enabled
61 l3:
62 node_selector_key: openstack-control-plane
63 node_selector_value: enabled
64 metadata:
65 node_selector_key: openstack-control-plane
66 node_selector_value: enabled
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020067 ovn_metadata:
68 node_selector_key: openstack-compute-node
69 node_selector_value: enabled
Mohammed Naserf3f59a72023-01-15 21:02:04 -050070 l2gw:
71 node_selector_key: openstack-control-plane
72 node_selector_value: enabled
73 job:
74 node_selector_key: openstack-control-plane
75 node_selector_value: enabled
76 lb:
77 node_selector_key: linuxbridge
78 node_selector_value: enabled
79 # openvswitch is a special case, requiring a special
80 # label that can apply to both control hosts
81 # and compute hosts, until we get more sophisticated
82 # with our daemonset scheduling
83 ovs:
84 node_selector_key: openvswitch
85 node_selector_value: enabled
86 sriov:
87 node_selector_key: sriov
88 node_selector_value: enabled
89 bagpipe_bgp:
90 node_selector_key: openstack-compute-node
91 node_selector_value: enabled
92 server:
93 node_selector_key: openstack-control-plane
94 node_selector_value: enabled
95 ironic_agent:
96 node_selector_key: openstack-control-plane
97 node_selector_value: enabled
98 netns_cleanup_cron:
99 node_selector_key: openstack-control-plane
100 node_selector_value: enabled
101 test:
102 node_selector_key: openstack-control-plane
103 node_selector_value: enabled
104
105network:
106 # provide what type of network wiring will be used
107 backend:
108 - openvswitch
109 # NOTE(Portdirect): Share network namespaces with the host,
110 # allowing agents to be restarted without packet loss and simpler
111 # debugging. This feature requires mount propagation support.
112 share_namespaces: true
113 interface:
114 # Tunnel interface will be used for VXLAN tunneling.
115 tunnel: null
116 # If tunnel is null there is a fallback mechanism to search
117 # for interface with routing using tunnel network cidr.
118 tunnel_network_cidr: "0/0"
119 # To perform setup of network interfaces using the SR-IOV init
120 # container you can use a section similar to:
121 # sriov:
122 # - device: ${DEV}
123 # num_vfs: 8
124 # mtu: 9214
125 # promisc: false
126 # qos:
127 # - vf_num: 0
128 # share: 10
129 # queues_per_vf:
130 # - num_queues: 16
131 # exclude_vf: 0,11,21
132 server:
133 ingress:
134 public: true
135 classes:
136 namespace: "nginx"
137 cluster: "nginx-cluster"
138 annotations:
139 nginx.ingress.kubernetes.io/rewrite-target: /
140 external_policy_local: false
141 node_port:
142 enabled: false
143 port: 30096
144
145bootstrap:
146 enabled: false
147 ks_user: neutron
148 script: |
149 openstack token issue
150
151dependencies:
152 dynamic:
153 common:
154 local_image_registry:
155 jobs:
156 - neutron-image-repo-sync
157 services:
158 - endpoint: node
159 service: local_image_registry
160 targeted:
161 sriov: {}
162 l2gateway: {}
163 bagpipe_bgp: {}
Mohammed Naserd6db2452023-07-23 14:34:59 +0000164 ovn:
165 libvirt:
166 pod:
167 - requireSameNode: true
168 labels:
169 application: ovn
170 component: ovn-controller
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500171 openvswitch:
172 dhcp:
173 pod:
174 - requireSameNode: true
175 labels:
176 application: neutron
177 component: neutron-ovs-agent
178 l3:
179 pod:
180 - requireSameNode: true
181 labels:
182 application: neutron
183 component: neutron-ovs-agent
184 metadata:
185 pod:
186 - requireSameNode: true
187 labels:
188 application: neutron
189 component: neutron-ovs-agent
190 linuxbridge:
191 dhcp:
192 pod:
193 - requireSameNode: true
194 labels:
195 application: neutron
196 component: neutron-lb-agent
197 l3:
198 pod:
199 - requireSameNode: true
200 labels:
201 application: neutron
202 component: neutron-lb-agent
203 metadata:
204 pod:
205 - requireSameNode: true
206 labels:
207 application: neutron
208 component: neutron-lb-agent
209 lb_agent:
210 pod: null
211 static:
212 bootstrap:
213 services:
214 - endpoint: internal
215 service: network
216 - endpoint: internal
217 service: compute
218 db_drop:
219 services:
220 - endpoint: internal
221 service: oslo_db
222 db_init:
223 services:
224 - endpoint: internal
225 service: oslo_db
226 db_sync:
227 jobs:
228 - neutron-db-init
229 services:
230 - endpoint: internal
231 service: oslo_db
232 dhcp:
233 pod: null
234 jobs:
235 - neutron-rabbit-init
236 services:
237 - endpoint: internal
238 service: oslo_messaging
239 - endpoint: internal
240 service: network
241 - endpoint: internal
242 service: compute
243 ks_endpoints:
244 jobs:
245 - neutron-ks-service
246 services:
247 - endpoint: internal
248 service: identity
249 ks_service:
250 services:
251 - endpoint: internal
252 service: identity
253 ks_user:
254 services:
255 - endpoint: internal
256 service: identity
257 rabbit_init:
258 services:
259 - service: oslo_messaging
260 endpoint: internal
261 l3:
262 pod: null
263 jobs:
264 - neutron-rabbit-init
265 services:
266 - endpoint: internal
267 service: oslo_messaging
268 - endpoint: internal
269 service: network
270 - endpoint: internal
271 service: compute
272 lb_agent:
273 pod: null
274 jobs:
275 - neutron-rabbit-init
276 services:
277 - endpoint: internal
278 service: oslo_messaging
279 - endpoint: internal
280 service: network
281 metadata:
282 pod: null
283 jobs:
284 - neutron-rabbit-init
285 services:
286 - endpoint: internal
287 service: oslo_messaging
288 - endpoint: internal
289 service: network
290 - endpoint: internal
291 service: compute
292 - endpoint: public
293 service: compute_metadata
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200294 ovn_metadata:
Mohammed Naser593ec012023-07-23 09:20:05 +0000295 pod:
296 - requireSameNode: true
297 labels:
298 application: ovn
299 component: ovn-controller
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200300 services:
301 - endpoint: internal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200302 service: compute_metadata
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500303 ovs_agent:
304 jobs:
305 - neutron-rabbit-init
306 pod:
307 - requireSameNode: true
308 labels:
309 application: openvswitch
310 component: server
311 services:
312 - endpoint: internal
313 service: oslo_messaging
314 - endpoint: internal
315 service: network
316 server:
317 jobs:
318 - neutron-db-sync
319 - neutron-ks-user
320 - neutron-ks-endpoints
321 - neutron-rabbit-init
322 services:
323 - endpoint: internal
324 service: oslo_db
325 - endpoint: internal
326 service: oslo_messaging
327 - endpoint: internal
328 service: oslo_cache
329 - endpoint: internal
330 service: identity
331 ironic_agent:
332 jobs:
333 - neutron-db-sync
334 - neutron-ks-user
335 - neutron-ks-endpoints
336 - neutron-rabbit-init
337 services:
338 - endpoint: internal
339 service: oslo_db
340 - endpoint: internal
341 service: oslo_messaging
342 - endpoint: internal
343 service: oslo_cache
344 - endpoint: internal
345 service: identity
346 tests:
347 services:
348 - endpoint: internal
349 service: network
350 - endpoint: internal
351 service: compute
352 image_repo_sync:
353 services:
354 - endpoint: internal
355 service: local_image_registry
356
357pod:
358 use_fqdn:
359 neutron_agent: true
360 probes:
361 rpc_timeout: 60
362 rpc_retries: 2
363 dhcp_agent:
364 dhcp_agent:
365 readiness:
366 enabled: true
367 params:
368 initialDelaySeconds: 30
369 periodSeconds: 190
370 timeoutSeconds: 185
371 liveness:
372 enabled: true
373 params:
374 initialDelaySeconds: 120
375 periodSeconds: 600
376 timeoutSeconds: 580
377 l3_agent:
378 l3_agent:
379 readiness:
380 enabled: true
381 params:
382 initialDelaySeconds: 30
383 periodSeconds: 190
384 timeoutSeconds: 185
385 liveness:
386 enabled: true
387 params:
388 initialDelaySeconds: 120
389 periodSeconds: 600
390 timeoutSeconds: 580
391 lb_agent:
392 lb_agent:
393 readiness:
394 enabled: true
395 metadata_agent:
396 metadata_agent:
397 readiness:
398 enabled: true
399 params:
400 initialDelaySeconds: 30
401 periodSeconds: 190
402 timeoutSeconds: 185
403 liveness:
404 enabled: true
405 params:
406 initialDelaySeconds: 120
407 periodSeconds: 600
408 timeoutSeconds: 580
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200409 ovn_metadata_agent:
410 ovn_metadata_agent:
411 readiness:
412 enabled: true
413 params:
414 initialDelaySeconds: 30
415 periodSeconds: 190
416 timeoutSeconds: 185
417 liveness:
418 enabled: true
419 params:
420 initialDelaySeconds: 120
421 periodSeconds: 600
422 timeoutSeconds: 580
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500423 ovs_agent:
424 ovs_agent:
425 readiness:
426 enabled: true
427 params:
428 liveness:
429 enabled: true
430 params:
431 initialDelaySeconds: 120
432 periodSeconds: 600
433 timeoutSeconds: 580
434 sriov_agent:
435 sriov_agent:
436 readiness:
437 enabled: true
438 params:
439 initialDelaySeconds: 30
440 periodSeconds: 190
441 timeoutSeconds: 185
442 bagpipe_bgp:
443 bagpipe_bgp:
444 readiness:
445 enabled: true
446 params:
447 liveness:
448 enabled: true
449 params:
450 initialDelaySeconds: 60
451 l2gw_agent:
452 l2gw_agent:
453 readiness:
454 enabled: true
455 params:
456 initialDelaySeconds: 30
457 periodSeconds: 15
458 timeoutSeconds: 65
459 liveness:
460 enabled: true
461 params:
462 initialDelaySeconds: 120
463 periodSeconds: 90
464 timeoutSeconds: 70
465 server:
466 server:
467 readiness:
468 enabled: true
469 params:
470 liveness:
471 enabled: true
472 params:
473 initialDelaySeconds: 60
474 security_context:
475 neutron_dhcp_agent:
476 pod:
477 runAsUser: 42424
478 container:
479 neutron_dhcp_agent:
480 readOnlyRootFilesystem: true
481 privileged: true
482 neutron_l2gw_agent:
483 pod:
484 runAsUser: 42424
485 container:
486 neutron_l2gw_agent:
487 readOnlyRootFilesystem: true
488 privileged: true
489 neutron_bagpipe_bgp:
490 pod:
491 runAsUser: 42424
492 container:
493 neutron_bagpipe_bgp:
494 readOnlyRootFilesystem: true
495 privileged: true
496 neutron_l3_agent:
497 pod:
498 runAsUser: 42424
499 container:
500 neutron_l3_agent:
501 readOnlyRootFilesystem: true
502 privileged: true
503 neutron_lb_agent:
504 pod:
505 runAsUser: 42424
506 container:
507 neutron_lb_agent_kernel_modules:
508 capabilities:
509 add:
510 - SYS_MODULE
511 - SYS_CHROOT
512 runAsUser: 0
513 readOnlyRootFilesystem: true
514 neutron_lb_agent_init:
515 privileged: true
516 runAsUser: 0
517 readOnlyRootFilesystem: true
518 neutron_lb_agent:
519 readOnlyRootFilesystem: true
520 privileged: true
521 neutron_metadata_agent:
522 pod:
523 runAsUser: 42424
524 container:
525 neutron_metadata_agent_init:
526 runAsUser: 0
527 readOnlyRootFilesystem: true
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200528 neutron_ovn_metadata_agent:
529 pod:
530 runAsUser: 42424
531 container:
532 neutron_ovn_metadata_agent_init:
533 runAsUser: 0
534 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500535 neutron_ovs_agent:
536 pod:
537 runAsUser: 42424
538 container:
539 neutron_openvswitch_agent_kernel_modules:
540 capabilities:
541 add:
542 - SYS_MODULE
543 - SYS_CHROOT
544 runAsUser: 0
545 readOnlyRootFilesystem: true
Mohammed Nasera720f882023-06-30 23:48:02 -0400546 netoffload:
547 privileged: true
548 runAsUser: 0
549 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500550 neutron_ovs_agent_init:
551 privileged: true
552 runAsUser: 0
553 readOnlyRootFilesystem: true
554 neutron_ovs_agent:
555 readOnlyRootFilesystem: true
556 privileged: true
557 neutron_server:
558 pod:
559 runAsUser: 42424
560 container:
561 nginx:
562 runAsUser: 0
563 readOnlyRootFilesystem: false
564 neutron_server:
565 allowPrivilegeEscalation: false
566 readOnlyRootFilesystem: true
567 neutron_sriov_agent:
568 pod:
569 runAsUser: 42424
570 container:
571 neutron_sriov_agent_init:
572 privileged: true
573 runAsUser: 0
574 readOnlyRootFilesystem: false
575 neutron_sriov_agent:
576 readOnlyRootFilesystem: true
577 privileged: true
578 neutron_ironic_agent:
579 pod:
580 runAsUser: 42424
581 container:
582 neutron_ironic_agent:
583 allowPrivilegeEscalation: false
584 readOnlyRootFilesystem: true
585 neutron_netns_cleanup_cron:
586 pod:
587 runAsUser: 42424
588 container:
589 neutron_netns_cleanup_cron:
590 readOnlyRootFilesystem: true
591 privileged: true
592 affinity:
593 anti:
594 type:
595 default: preferredDuringSchedulingIgnoredDuringExecution
596 topologyKey:
597 default: kubernetes.io/hostname
598 weight:
599 default: 10
600 tolerations:
601 neutron:
602 enabled: false
603 tolerations:
604 - key: node-role.kubernetes.io/master
605 operator: Exists
606 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200607 - key: node-role.kubernetes.io/control-plane
608 operator: Exists
609 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500610 mounts:
611 neutron_server:
612 init_container: null
613 neutron_server:
614 volumeMounts:
615 volumes:
616 neutron_dhcp_agent:
617 init_container: null
618 neutron_dhcp_agent:
619 volumeMounts:
620 volumes:
621 neutron_l3_agent:
622 init_container: null
623 neutron_l3_agent:
624 volumeMounts:
625 volumes:
626 neutron_lb_agent:
627 init_container: null
628 neutron_lb_agent:
629 volumeMounts:
630 volumes:
631 neutron_metadata_agent:
632 init_container: null
633 neutron_metadata_agent:
634 volumeMounts:
635 volumes:
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200636 neutron_ovn_metadata_agent:
637 init_container: null
638 neutron_ovn_metadata_agent:
639 volumeMounts:
640 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500641 neutron_ovs_agent:
642 init_container: null
643 neutron_ovs_agent:
644 volumeMounts:
645 volumes:
646 neutron_sriov_agent:
647 init_container: null
648 neutron_sriov_agent:
649 volumeMounts:
650 volumes:
651 neutron_l2gw_agent:
652 init_container: null
653 neutron_l2gw_agent:
654 volumeMounts:
655 volumes:
656 bagpipe_bgp:
657 init_container: null
658 bagpipe_bgp:
659 volumeMounts:
660 volumes:
661 neutron_ironic_agent:
662 init_container: null
663 neutron_ironic_agent:
664 volumeMounts:
665 volumes:
666 neutron_netns_cleanup_cron:
667 init_container: null
668 neutron_netns_cleanup_cron:
669 volumeMounts:
670 volumes:
671 neutron_tests:
672 init_container: null
673 neutron_tests:
674 volumeMounts:
675 volumes:
676 neutron_bootstrap:
677 init_container: null
678 neutron_bootstrap:
679 volumeMounts:
680 volumes:
681 neutron_db_sync:
682 neutron_db_sync:
683 volumeMounts:
684 - name: db-sync-conf
685 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
686 subPath: ml2_conf.ini
687 readOnly: true
688 volumes:
689 replicas:
690 server: 1
691 ironic_agent: 1
692 lifecycle:
693 upgrades:
694 deployments:
695 revision_history: 3
696 pod_replacement_strategy: RollingUpdate
697 rolling_update:
698 max_unavailable: 1
699 max_surge: 3
700 daemonsets:
701 pod_replacement_strategy: RollingUpdate
702 dhcp_agent:
703 enabled: true
704 min_ready_seconds: 0
705 max_unavailable: 1
706 l3_agent:
707 enabled: true
708 min_ready_seconds: 0
709 max_unavailable: 1
710 lb_agent:
711 enabled: true
712 min_ready_seconds: 0
713 max_unavailable: 1
714 metadata_agent:
715 enabled: true
716 min_ready_seconds: 0
717 max_unavailable: 1
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200718 ovn_metadata_agent:
719 enabled: true
720 min_ready_seconds: 0
721 max_unavailable: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500722 ovs_agent:
723 enabled: true
724 min_ready_seconds: 0
725 max_unavailable: 1
726 sriov_agent:
727 enabled: true
728 min_ready_seconds: 0
729 max_unavailable: 1
730 netns_cleanup_cron:
731 enabled: true
732 min_ready_seconds: 0
733 max_unavailable: 1
734 disruption_budget:
735 server:
736 min_available: 0
737 termination_grace_period:
738 server:
739 timeout: 30
740 ironic_agent:
741 timeout: 30
742 resources:
743 enabled: false
744 agent:
745 dhcp:
746 requests:
747 memory: "128Mi"
748 cpu: "100m"
749 limits:
750 memory: "1024Mi"
751 cpu: "2000m"
752 l3:
753 requests:
754 memory: "128Mi"
755 cpu: "100m"
756 limits:
757 memory: "1024Mi"
758 cpu: "2000m"
759 lb:
760 requests:
761 memory: "128Mi"
762 cpu: "100m"
763 limits:
764 memory: "1024Mi"
765 cpu: "2000m"
766 metadata:
767 requests:
768 memory: "128Mi"
769 cpu: "100m"
770 limits:
771 memory: "1024Mi"
772 cpu: "2000m"
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200773 ovn_metadata:
774 requests:
775 memory: "128Mi"
776 cpu: "100m"
777 limits:
778 memory: "1024Mi"
779 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500780 ovs:
781 requests:
782 memory: "128Mi"
783 cpu: "100m"
784 limits:
785 memory: "1024Mi"
786 cpu: "2000m"
787 sriov:
788 requests:
789 memory: "128Mi"
790 cpu: "100m"
791 limits:
792 memory: "1024Mi"
793 cpu: "2000m"
794 l2gw:
795 requests:
796 memory: "128Mi"
797 cpu: "100m"
798 limits:
799 memory: "1024Mi"
800 cpu: "2000m"
801 bagpipe_bgp:
802 requests:
803 memory: "128Mi"
804 cpu: "100m"
805 limits:
806 memory: "1024Mi"
807 cpu: "2000m"
808 server:
809 requests:
810 memory: "128Mi"
811 cpu: "100m"
812 limits:
813 memory: "1024Mi"
814 cpu: "2000m"
815 ironic_agent:
816 requests:
817 memory: "128Mi"
818 cpu: "100m"
819 limits:
820 memory: "1024Mi"
821 cpu: "2000m"
822 netns_cleanup_cron:
823 requests:
824 memory: "128Mi"
825 cpu: "100m"
826 limits:
827 memory: "1024Mi"
828 cpu: "2000m"
829 jobs:
830 bootstrap:
831 requests:
832 memory: "128Mi"
833 cpu: "100m"
834 limits:
835 memory: "1024Mi"
836 cpu: "2000m"
837 db_init:
838 requests:
839 memory: "128Mi"
840 cpu: "100m"
841 limits:
842 memory: "1024Mi"
843 cpu: "2000m"
844 rabbit_init:
845 requests:
846 memory: "128Mi"
847 cpu: "100m"
848 limits:
849 memory: "1024Mi"
850 cpu: "2000m"
851 db_sync:
852 requests:
853 memory: "128Mi"
854 cpu: "100m"
855 limits:
856 memory: "1024Mi"
857 cpu: "2000m"
858 db_drop:
859 requests:
860 memory: "128Mi"
861 cpu: "100m"
862 limits:
863 memory: "1024Mi"
864 cpu: "2000m"
865 ks_endpoints:
866 requests:
867 memory: "128Mi"
868 cpu: "100m"
869 limits:
870 memory: "1024Mi"
871 cpu: "2000m"
872 ks_service:
873 requests:
874 memory: "128Mi"
875 cpu: "100m"
876 limits:
877 memory: "1024Mi"
878 cpu: "2000m"
879 ks_user:
880 requests:
881 memory: "128Mi"
882 cpu: "100m"
883 limits:
884 memory: "1024Mi"
885 cpu: "2000m"
886 tests:
887 requests:
888 memory: "128Mi"
889 cpu: "100m"
890 limits:
891 memory: "1024Mi"
892 cpu: "2000m"
893 image_repo_sync:
894 requests:
895 memory: "128Mi"
896 cpu: "100m"
897 limits:
898 memory: "1024Mi"
899 cpu: "2000m"
900
901conf:
902 rally_tests:
903 force_project_purge: false
904 run_tempest: false
905 clean_up: |
906 # NOTE: We will make the best effort to clean up rally generated networks and routers,
907 # but should not block further automated deployment.
908 set +e
909 PATTERN="^[sc]_rally_"
910
911 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
912 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
913
914 for ROUTER in $ROUTERS
915 do
916 openstack router unset --external-gateway $ROUTER
917 openstack router set --disable --no-ha $ROUTER
918
919 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
920 for SUBN in $SUBNS
921 do
922 openstack router remove subnet $ROUTER $SUBN
923 done
924
925 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
926 do
927 openstack router remove port $ROUTER $PORT
928 done
929
930 openstack router delete $ROUTER
931 done
932
933 for NETWORK in $NETWORKS
934 do
935 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
936 do
937 openstack port delete $PORT
938 done
939 openstack network delete $NETWORK
940 done
941 set -e
942 tests:
943 NeutronNetworks.create_and_delete_networks:
944 - args:
945 network_create_args: {}
946 context:
947 quotas:
948 neutron:
949 network: -1
950 runner:
951 concurrency: 1
952 times: 1
953 type: constant
954 sla:
955 failure_rate:
956 max: 0
957 NeutronNetworks.create_and_delete_ports:
958 - args:
959 network_create_args: {}
960 port_create_args: {}
961 ports_per_network: 10
962 context:
963 network: {}
964 quotas:
965 neutron:
966 network: -1
967 port: -1
968 runner:
969 concurrency: 1
970 times: 1
971 type: constant
972 sla:
973 failure_rate:
974 max: 0
975 NeutronNetworks.create_and_delete_routers:
976 - args:
977 network_create_args: {}
978 router_create_args: {}
979 subnet_cidr_start: 1.1.0.0/30
980 subnet_create_args: {}
981 subnets_per_network: 2
982 context:
983 network: {}
984 quotas:
985 neutron:
986 network: -1
987 router: -1
988 subnet: -1
989 runner:
990 concurrency: 1
991 times: 1
992 type: constant
993 sla:
994 failure_rate:
995 max: 0
996 NeutronNetworks.create_and_delete_subnets:
997 - args:
998 network_create_args: {}
999 subnet_cidr_start: 1.1.0.0/30
1000 subnet_create_args: {}
1001 subnets_per_network: 2
1002 context:
1003 network: {}
1004 quotas:
1005 neutron:
1006 network: -1
1007 subnet: -1
1008 runner:
1009 concurrency: 1
1010 times: 1
1011 type: constant
1012 sla:
1013 failure_rate:
1014 max: 0
1015 NeutronNetworks.create_and_list_routers:
1016 - args:
1017 network_create_args: {}
1018 router_create_args: {}
1019 subnet_cidr_start: 1.1.0.0/30
1020 subnet_create_args: {}
1021 subnets_per_network: 2
1022 context:
1023 network: {}
1024 quotas:
1025 neutron:
1026 network: -1
1027 router: -1
1028 subnet: -1
1029 runner:
1030 concurrency: 1
1031 times: 1
1032 type: constant
1033 sla:
1034 failure_rate:
1035 max: 0
1036 NeutronNetworks.create_and_list_subnets:
1037 - args:
1038 network_create_args: {}
1039 subnet_cidr_start: 1.1.0.0/30
1040 subnet_create_args: {}
1041 subnets_per_network: 2
1042 context:
1043 network: {}
1044 quotas:
1045 neutron:
1046 network: -1
1047 subnet: -1
1048 runner:
1049 concurrency: 1
1050 times: 1
1051 type: constant
1052 sla:
1053 failure_rate:
1054 max: 0
1055 NeutronNetworks.create_and_show_network:
1056 - args:
1057 network_create_args: {}
1058 context:
1059 quotas:
1060 neutron:
1061 network: -1
1062 runner:
1063 concurrency: 1
1064 times: 1
1065 type: constant
1066 sla:
1067 failure_rate:
1068 max: 0
1069 NeutronNetworks.create_and_update_networks:
1070 - args:
1071 network_create_args: {}
1072 network_update_args:
1073 admin_state_up: false
1074 context:
1075 quotas:
1076 neutron:
1077 network: -1
1078 runner:
1079 concurrency: 1
1080 times: 1
1081 type: constant
1082 sla:
1083 failure_rate:
1084 max: 0
1085 NeutronNetworks.create_and_update_ports:
1086 - args:
1087 network_create_args: {}
1088 port_create_args: {}
1089 port_update_args:
1090 admin_state_up: false
1091 device_id: dummy_id
1092 device_owner: dummy_owner
1093 ports_per_network: 5
1094 context:
1095 network: {}
1096 quotas:
1097 neutron:
1098 network: -1
1099 port: -1
1100 runner:
1101 concurrency: 1
1102 times: 1
1103 type: constant
1104 sla:
1105 failure_rate:
1106 max: 0
1107 NeutronNetworks.create_and_update_routers:
1108 - args:
1109 network_create_args: {}
1110 router_create_args: {}
1111 router_update_args:
1112 admin_state_up: false
1113 subnet_cidr_start: 1.1.0.0/30
1114 subnet_create_args: {}
1115 subnets_per_network: 2
1116 context:
1117 network: {}
1118 quotas:
1119 neutron:
1120 network: -1
1121 router: -1
1122 subnet: -1
1123 runner:
1124 concurrency: 1
1125 times: 1
1126 type: constant
1127 sla:
1128 failure_rate:
1129 max: 0
1130 NeutronNetworks.create_and_update_subnets:
1131 - args:
1132 network_create_args: {}
1133 subnet_cidr_start: 1.4.0.0/16
1134 subnet_create_args: {}
1135 subnet_update_args:
1136 enable_dhcp: false
1137 subnets_per_network: 2
1138 context:
1139 network: {}
1140 quotas:
1141 neutron:
1142 network: -1
1143 subnet: -1
1144 runner:
1145 concurrency: 1
1146 times: 1
1147 type: constant
1148 sla:
1149 failure_rate:
1150 max: 0
1151 NeutronNetworks.list_agents:
1152 - args:
1153 agent_args: {}
1154 runner:
1155 concurrency: 1
1156 times: 1
1157 type: constant
1158 sla:
1159 failure_rate:
1160 max: 0
1161 NeutronSecurityGroup.create_and_list_security_groups:
1162 - args:
1163 security_group_create_args: {}
1164 context:
1165 quotas:
1166 neutron:
1167 security_group: -1
1168 runner:
1169 concurrency: 1
1170 times: 1
1171 type: constant
1172 sla:
1173 failure_rate:
1174 max: 0
1175 NeutronSecurityGroup.create_and_update_security_groups:
1176 - args:
1177 security_group_create_args: {}
1178 security_group_update_args: {}
1179 context:
1180 quotas:
1181 neutron:
1182 security_group: -1
1183 runner:
1184 concurrency: 1
1185 times: 1
1186 type: constant
1187 sla:
1188 failure_rate:
1189 max: 0
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001190 paste: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001191 policy: {}
1192 api_audit_map:
1193 DEFAULT:
1194 target_endpoint_type: None
1195 custom_actions:
1196 add_router_interface: update/add
1197 remove_router_interface: update/remove
1198 path_keywords:
1199 floatingips: ip
1200 healthmonitors: healthmonitor
1201 health_monitors: health_monitor
1202 lb: None
1203 members: member
1204 metering-labels: label
1205 metering-label-rules: rule
1206 networks: network
1207 pools: pool
1208 ports: port
1209 routers: router
1210 quotas: quota
1211 security-groups: security-group
1212 security-group-rules: rule
1213 subnets: subnet
1214 vips: vip
1215 service_endpoints:
1216 network: service/network
1217 neutron_sudoers: |
1218 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1219 Defaults !requiretty
1220 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1221 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1222 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1223 rootwrap: |
1224 # Configuration for neutron-rootwrap
1225 # This file should be owned by (and only-writeable by) the root user
1226
1227 [DEFAULT]
1228 # List of directories to load filter definitions from (separated by ',').
1229 # These directories MUST all be only writeable by root !
1230 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1231
1232 # List of directories to search executables in, in case filters do not
1233 # explicitely specify a full path (separated by ',')
1234 # If not specified, defaults to system PATH environment variable.
1235 # These directories MUST all be only writeable by root !
1236 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1237
1238 # Enable logging to syslog
1239 # Default value is False
1240 use_syslog=False
1241
1242 # Which syslog facility to use.
1243 # Valid values include auth, authpriv, syslog, local0, local1...
1244 # Default value is 'syslog'
1245 syslog_log_facility=syslog
1246
1247 # Which messages to log.
1248 # INFO means log all usage
1249 # ERROR means only log unsuccessful attempts
1250 syslog_log_level=ERROR
1251
1252 [xenapi]
1253 # XenAPI configuration is only required by the L2 agent if it is to
1254 # target a XenServer/XCP compute host's dom0.
1255 xenapi_connection_url=<None>
1256 xenapi_connection_username=root
1257 xenapi_connection_password=<None>
1258 rootwrap_filters:
1259 debug:
1260 pods:
1261 - dhcp_agent
1262 - l3_agent
1263 - lb_agent
1264 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001265 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001266 - ovs_agent
1267 - sriov_agent
1268 content: |
1269 # neutron-rootwrap command filters for nodes on which neutron is
1270 # expected to control network
1271 #
1272 # This file should be owned by (and only-writeable by) the root user
1273
1274 # format seems to be
1275 # cmd-name: filter-name, raw-command, user, args
1276
1277 [Filters]
1278
1279 # This is needed because we should ping
1280 # from inside a namespace which requires root
1281 # _alt variants allow to match -c and -w in any order
1282 # (used by NeutronDebugAgent.ping_all)
1283 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1284 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1285 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1286 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1287 dibbler:
1288 pods:
1289 - dhcp_agent
1290 - l3_agent
1291 - lb_agent
1292 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001293 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001294 - ovs_agent
1295 - sriov_agent
1296 content: |
1297 # neutron-rootwrap command filters for nodes on which neutron is
1298 # expected to control network
1299 #
1300 # This file should be owned by (and only-writeable by) the root user
1301
1302 # format seems to be
1303 # cmd-name: filter-name, raw-command, user, args
1304
1305 [Filters]
1306
1307 # Filters for the dibbler-based reference implementation of the pluggable
1308 # Prefix Delegation driver. Other implementations using an alternative agent
1309 # should include a similar filter in this folder.
1310
1311 # prefix_delegation_agent
1312 dibbler-client: CommandFilter, dibbler-client, root
1313 ipset_firewall:
1314 pods:
1315 - dhcp_agent
1316 - l3_agent
1317 - lb_agent
1318 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001319 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001320 - ovs_agent
1321 - sriov_agent
1322 content: |
1323 # neutron-rootwrap command filters for nodes on which neutron is
1324 # expected to control network
1325 #
1326 # This file should be owned by (and only-writeable by) the root user
1327
1328 # format seems to be
1329 # cmd-name: filter-name, raw-command, user, args
1330
1331 [Filters]
1332 # neutron/agent/linux/iptables_firewall.py
1333 # "ipset", "-A", ...
1334 ipset: CommandFilter, ipset, root
1335 l3:
1336 pods:
1337 - dhcp_agent
1338 - l3_agent
1339 - lb_agent
1340 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001341 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001342 - ovs_agent
1343 - sriov_agent
1344 content: |
1345 # neutron-rootwrap command filters for nodes on which neutron is
1346 # expected to control network
1347 #
1348 # This file should be owned by (and only-writeable by) the root user
1349
1350 # format seems to be
1351 # cmd-name: filter-name, raw-command, user, args
1352
1353 [Filters]
1354
1355 # arping
1356 arping: CommandFilter, arping, root
1357
1358 # l3_agent
1359 sysctl: CommandFilter, sysctl, root
1360 route: CommandFilter, route, root
1361 radvd: CommandFilter, radvd, root
1362
1363 # haproxy
1364 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1365 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1366
1367 # metadata proxy
1368 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1369 # RHEL invocation of the metadata proxy will report /usr/bin/python
1370 kill_metadata: KillFilter, root, python, -15, -9
1371 kill_metadata2: KillFilter, root, python2, -15, -9
1372 kill_metadata7: KillFilter, root, python2.7, -15, -9
1373 kill_metadata3: KillFilter, root, python3, -15, -9
1374 kill_metadata35: KillFilter, root, python3.5, -15, -9
1375 kill_metadata36: KillFilter, root, python3.6, -15, -9
1376 kill_metadata37: KillFilter, root, python3.7, -15, -9
1377 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1378 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1379
1380 # ip_lib
1381 ip: IpFilter, ip, root
1382 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1383 ip_exec: IpNetnsExecFilter, ip, root
1384
1385 # l3_tc_lib
1386 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1387 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1388 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1389 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1390 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1391 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1392 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1393
1394 # For ip monitor
1395 kill_ip_monitor: KillFilter, root, ip, -9
1396
1397 # ovs_lib (if OVSInterfaceDriver is used)
1398 ovs-vsctl: CommandFilter, ovs-vsctl, root
1399
1400 # iptables_manager
1401 iptables-save: CommandFilter, iptables-save, root
1402 iptables-restore: CommandFilter, iptables-restore, root
1403 ip6tables-save: CommandFilter, ip6tables-save, root
1404 ip6tables-restore: CommandFilter, ip6tables-restore, root
1405
1406 # Keepalived
1407 keepalived: CommandFilter, keepalived, root
1408 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1409
1410 # l3 agent to delete floatingip's conntrack state
1411 conntrack: CommandFilter, conntrack, root
1412
1413 # keepalived state change monitor
1414 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1415 # The following filters are used to kill the keepalived state change monitor.
1416 # Since the monitor runs as a Python script, the system reports that the
1417 # command of the process to be killed is python.
1418 # TODO(mlavalle) These kill filters will be updated once we come up with a
1419 # mechanism to kill using the name of the script being executed by Python
1420 kill_keepalived_monitor_py: KillFilter, root, python, -15
1421 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1422 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1423 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1424 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1425 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1426 netns_cleanup:
1427 pods:
1428 - dhcp_agent
1429 - l3_agent
1430 - lb_agent
1431 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001432 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001433 - ovs_agent
1434 - sriov_agent
1435 - netns_cleanup_cron
1436 content: |
1437 # neutron-rootwrap command filters for nodes on which neutron is
1438 # expected to control network
1439 #
1440 # This file should be owned by (and only-writeable by) the root user
1441
1442 # format seems to be
1443 # cmd-name: filter-name, raw-command, user, args
1444
1445 [Filters]
1446
1447 # netns-cleanup
1448 netstat: CommandFilter, netstat, root
1449 dhcp:
1450 pods:
1451 - dhcp_agent
1452 - l3_agent
1453 - lb_agent
1454 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001455 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001456 - ovs_agent
1457 - sriov_agent
1458 - netns_cleanup_cron
1459 content: |
1460 # neutron-rootwrap command filters for nodes on which neutron is
1461 # expected to control network
1462 #
1463 # This file should be owned by (and only-writeable by) the root user
1464
1465 # format seems to be
1466 # cmd-name: filter-name, raw-command, user, args
1467
1468 [Filters]
1469
1470 # dhcp-agent
1471 dnsmasq: CommandFilter, dnsmasq, root
1472 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1473 # it looks like these are the only signals needed, per
1474 # neutron/agent/linux/dhcp.py
1475 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1476 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1477
1478 ovs-vsctl: CommandFilter, ovs-vsctl, root
1479 ivs-ctl: CommandFilter, ivs-ctl, root
1480 mm-ctl: CommandFilter, mm-ctl, root
1481 dhcp_release: CommandFilter, dhcp_release, root
1482 dhcp_release6: CommandFilter, dhcp_release6, root
1483
1484 # metadata proxy
1485 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1486 # RHEL invocation of the metadata proxy will report /usr/bin/python
1487 kill_metadata: KillFilter, root, python, -9
1488 kill_metadata2: KillFilter, root, python2, -9
1489 kill_metadata7: KillFilter, root, python2.7, -9
1490 kill_metadata3: KillFilter, root, python3, -9
1491 kill_metadata35: KillFilter, root, python3.5, -9
1492 kill_metadata36: KillFilter, root, python3.6, -9
1493 kill_metadata37: KillFilter, root, python3.7, -9
1494
1495 # ip_lib
1496 ip: IpFilter, ip, root
1497 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1498 ip_exec: IpNetnsExecFilter, ip, root
1499 ebtables:
1500 pods:
1501 - dhcp_agent
1502 - l3_agent
1503 - lb_agent
1504 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001505 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001506 - ovs_agent
1507 - sriov_agent
1508 content: |
1509 # neutron-rootwrap command filters for nodes on which neutron is
1510 # expected to control network
1511 #
1512 # This file should be owned by (and only-writeable by) the root user
1513
1514 # format seems to be
1515 # cmd-name: filter-name, raw-command, user, args
1516
1517 [Filters]
1518
1519 ebtables: CommandFilter, ebtables, root
1520 iptables_firewall:
1521 pods:
1522 - dhcp_agent
1523 - l3_agent
1524 - lb_agent
1525 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001526 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001527 - ovs_agent
1528 - sriov_agent
1529 content: |
1530 # neutron-rootwrap command filters for nodes on which neutron is
1531 # expected to control network
1532 #
1533 # This file should be owned by (and only-writeable by) the root user
1534
1535 # format seems to be
1536 # cmd-name: filter-name, raw-command, user, args
1537
1538 [Filters]
1539
1540 # neutron/agent/linux/iptables_firewall.py
1541 # "iptables-save", ...
1542 iptables-save: CommandFilter, iptables-save, root
1543 iptables-restore: CommandFilter, iptables-restore, root
1544 ip6tables-save: CommandFilter, ip6tables-save, root
1545 ip6tables-restore: CommandFilter, ip6tables-restore, root
1546
1547 # neutron/agent/linux/iptables_firewall.py
1548 # "iptables", "-A", ...
1549 iptables: CommandFilter, iptables, root
1550 ip6tables: CommandFilter, ip6tables, root
1551
1552 # neutron/agent/linux/iptables_firewall.py
1553 sysctl: CommandFilter, sysctl, root
1554
1555 # neutron/agent/linux/ip_conntrack.py
1556 conntrack: CommandFilter, conntrack, root
1557 linuxbridge_plugin:
1558 pods:
1559 - dhcp_agent
1560 - l3_agent
1561 - lb_agent
1562 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001563 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001564 - ovs_agent
1565 - sriov_agent
1566 content: |
1567 # neutron-rootwrap command filters for nodes on which neutron is
1568 # expected to control network
1569 #
1570 # This file should be owned by (and only-writeable by) the root user
1571
1572 # format seems to be
1573 # cmd-name: filter-name, raw-command, user, args
1574
1575 [Filters]
1576
1577 # linuxbridge-agent
1578 # unclear whether both variants are necessary, but I'm transliterating
1579 # from the old mechanism
1580 brctl: CommandFilter, brctl, root
1581 bridge: CommandFilter, bridge, root
1582
1583 # ip_lib
1584 ip: IpFilter, ip, root
1585 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1586 ip_exec: IpNetnsExecFilter, ip, root
1587
1588 # tc commands needed for QoS support
1589 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1590 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1591 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1592 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1593 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1594 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1595 openvswitch_plugin:
1596 pods:
1597 - dhcp_agent
1598 - l3_agent
1599 - lb_agent
1600 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001601 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001602 - ovs_agent
1603 - sriov_agent
1604 content: |
1605 # neutron-rootwrap command filters for nodes on which neutron is
1606 # expected to control network
1607 #
1608 # This file should be owned by (and only-writeable by) the root user
1609
1610 # format seems to be
1611 # cmd-name: filter-name, raw-command, user, args
1612
1613 [Filters]
1614
1615 # openvswitch-agent
1616 # unclear whether both variants are necessary, but I'm transliterating
1617 # from the old mechanism
1618 ovs-vsctl: CommandFilter, ovs-vsctl, root
1619 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1620 ovs-ofctl: CommandFilter, ovs-ofctl, root
1621 ovs-appctl: CommandFilter, ovs-appctl, root
1622 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1623 ovsdb-client: CommandFilter, ovsdb-client, root
1624 xe: CommandFilter, xe, root
1625
1626 # ip_lib
1627 ip: IpFilter, ip, root
1628 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1629 ip_exec: IpNetnsExecFilter, ip, root
1630
1631 # needed for FDB extension
1632 bridge: CommandFilter, bridge, root
1633 privsep:
1634 pods:
1635 - dhcp_agent
1636 - l3_agent
1637 - lb_agent
1638 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001639 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001640 - ovs_agent
1641 - sriov_agent
1642 - netns_cleanup_cron
1643 content: |
1644 # Command filters to allow privsep daemon to be started via rootwrap.
1645 #
1646 # This file should be owned by (and only-writeable by) the root user
1647
1648 [Filters]
1649
1650 # By installing the following, the local admin is asserting that:
1651 #
1652 # 1. The python module load path used by privsep-helper
1653 # command as root (as started by sudo/rootwrap) is trusted.
1654 # 2. Any oslo.config files matching the --config-file
1655 # arguments below are trusted.
1656 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1657 # also allowed to invoke python "entrypoint" functions from
1658 # --privsep_context with the additional (possibly root) privileges
1659 # configured for that context.
1660 #
1661 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1662 #
1663 # In particular, the oslo.config and python module path must not
1664 # be writeable by the unprivileged user.
1665
1666 # oslo.privsep default neutron context
1667 privsep: PathFilter, privsep-helper, root,
1668 --config-file, /etc,
1669 --privsep_context, neutron.privileged.default,
1670 --privsep_sock_path, /
1671
1672 # NOTE: A second `--config-file` arg can also be added above. Since
1673 # many neutron components are installed like that (eg: by devstack).
1674 # Adjust to suit local requirements.
1675 linux_vxlan:
1676 pods:
1677 - bagpipe_bgp
1678 content: |
1679 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1680 # expected to control VXLAN Linux Bridge dataplane
1681 #
1682 # This file should be owned by (and only-writeable by) the root user
1683
1684 # format seems to be
1685 # cmd-name: filter-name, raw-command, user, args
1686
1687 [Filters]
1688
1689 #
1690 modprobe: CommandFilter, modprobe, root
1691
1692 #
1693 brctl: CommandFilter, brctl, root
1694 bridge: CommandFilter, bridge, root
1695
1696 # ip_lib
1697 ip: IpFilter, ip, root
1698 ip_exec: IpNetnsExecFilter, ip, root
1699
1700 # shell (for piped commands)
1701 sh: CommandFilter, sh, root
1702 mpls_ovs_dataplane:
1703 pods:
1704 - bagpipe_bgp
1705 content: |
1706 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1707 # expected to control MPLS OpenVSwitch dataplane
1708 #
1709 # This file should be owned by (and only-writeable by) the root user
1710
1711 # format seems to be
1712 # cmd-name: filter-name, raw-command, user, args
1713
1714 [Filters]
1715
1716 # openvswitch
1717 ovs-vsctl: CommandFilter, ovs-vsctl, root
1718 ovs-ofctl: CommandFilter, ovs-ofctl, root
1719
1720 # ip_lib
1721 ip: IpFilter, ip, root
1722 ip_exec: IpNetnsExecFilter, ip, root
1723
1724 # shell (for piped commands)
1725 sh: CommandFilter, sh, root
1726 neutron:
1727 DEFAULT:
1728 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1729 log_config_append: /etc/neutron/logging.conf
1730 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1731 # via the endpoints section.
1732 bind_port: null
1733 default_availability_zones: nova
1734 api_workers: 1
1735 rpc_workers: 4
1736 allow_overlapping_ips: True
1737 state_path: /var/lib/neutron
1738 # core_plugin can be: ml2, calico
1739 core_plugin: ml2
1740 # service_plugin can be: router, odl-router, empty for calico,
1741 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1742 service_plugins: router
1743 allow_automatic_l3agent_failover: True
1744 l3_ha: True
1745 max_l3_agents_per_router: 2
1746 l3_ha_network_type: vxlan
1747 network_auto_schedule: True
1748 router_auto_schedule: True
1749 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1750 # 'network.backend' to sane defaults.
1751 interface_driver: null
1752 oslo_concurrency:
1753 lock_path: /var/lib/neutron/tmp
1754 database:
1755 max_retries: -1
1756 agent:
1757 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1758 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1759 oslo_messaging_notifications:
1760 driver: messagingv2
1761 oslo_messaging_rabbit:
1762 rabbit_ha_queues: true
1763 oslo_middleware:
1764 enable_proxy_headers_parsing: true
1765 oslo_policy:
1766 policy_file: /etc/neutron/policy.yaml
Mohammed Naser593ec012023-07-23 09:20:05 +00001767 ovn:
1768 enable_distributed_floating_ip: true
1769 ovn_metadata_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001770 nova:
1771 auth_type: password
1772 auth_version: v3
1773 endpoint_type: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001774 placement:
1775 auth_type: password
1776 auth_version: v3
1777 endpoint_type: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001778 designate:
1779 auth_type: password
1780 auth_version: v3
1781 endpoint_type: internal
1782 allow_reverse_dns_lookup: true
1783 ironic:
1784 endpoint_type: internal
1785 keystone_authtoken:
1786 memcache_security_strategy: ENCRYPT
1787 auth_type: password
1788 auth_version: v3
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001789 service_type: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001790 octavia:
1791 request_poll_timeout: 3000
1792 logging:
1793 loggers:
1794 keys:
1795 - root
1796 - neutron
1797 - neutron_taas
1798 handlers:
1799 keys:
1800 - stdout
1801 - stderr
1802 - "null"
1803 formatters:
1804 keys:
1805 - context
1806 - default
1807 logger_root:
1808 level: WARNING
1809 handlers: 'null'
1810 logger_neutron:
1811 level: INFO
1812 handlers:
1813 - stdout
1814 qualname: neutron
1815 logger_neutron_taas:
1816 level: INFO
1817 handlers:
1818 - stdout
1819 qualname: neutron_taas
1820 logger_amqp:
1821 level: WARNING
1822 handlers: stderr
1823 qualname: amqp
1824 logger_amqplib:
1825 level: WARNING
1826 handlers: stderr
1827 qualname: amqplib
1828 logger_eventletwsgi:
1829 level: WARNING
1830 handlers: stderr
1831 qualname: eventlet.wsgi.server
1832 logger_sqlalchemy:
1833 level: WARNING
1834 handlers: stderr
1835 qualname: sqlalchemy
1836 logger_boto:
1837 level: WARNING
1838 handlers: stderr
1839 qualname: boto
1840 handler_null:
1841 class: logging.NullHandler
1842 formatter: default
1843 args: ()
1844 handler_stdout:
1845 class: StreamHandler
1846 args: (sys.stdout,)
1847 formatter: context
1848 handler_stderr:
1849 class: StreamHandler
1850 args: (sys.stderr,)
1851 formatter: context
1852 formatter_context:
1853 class: oslo_log.formatters.ContextFormatter
1854 datefmt: "%Y-%m-%d %H:%M:%S"
1855 formatter_default:
1856 format: "%(message)s"
1857 datefmt: "%Y-%m-%d %H:%M:%S"
1858 plugins:
1859 ml2_conf:
1860 ml2:
1861 extension_drivers: port_security
1862 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1863 # in 'network.backend' to sane defaults.
1864 mechanism_drivers: null
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001865 type_drivers: flat,vlan,vxlan,local
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001866 tenant_network_types: vxlan
1867 ml2_type_vxlan:
1868 vni_ranges: 1:1000
1869 vxlan_group: 239.1.1.1
1870 ml2_type_flat:
1871 flat_networks: "*"
1872 # If you want to use the external network as a tagged provider network,
1873 # a range should be specified including the intended VLAN target
1874 # using ml2_type_vlan.network_vlan_ranges:
1875 # ml2_type_vlan:
1876 # network_vlan_ranges: "external:1100:1110"
Mohammed Naser593ec012023-07-23 09:20:05 +00001877 ml2_type_geneve:
1878 vni_ranges: 1:65536
1879 max_header_size: 38
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001880 agent:
1881 extensions: ""
1882 ml2_conf_sriov: null
1883 taas:
1884 taas:
1885 enabled: False
1886 openvswitch_agent:
1887 agent:
1888 tunnel_types: vxlan
1889 l2_population: True
1890 arp_responder: True
1891 ovs:
1892 bridge_mappings: "external:br-ex"
1893 securitygroup:
1894 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1895 linuxbridge_agent:
1896 linux_bridge:
1897 # To define Flat and VLAN connections, in LB we can assign
1898 # specific interface to the flat/vlan network name using:
1899 # physical_interface_mappings: "external:eth3"
1900 # Or we can set the mapping between the network and bridge:
1901 bridge_mappings: "external:br-ex"
1902 # The two above options are exclusive, do not use both of them at once
1903 securitygroup:
1904 firewall_driver: iptables
1905 vxlan:
1906 l2_population: True
1907 arp_responder: True
1908 macvtap_agent: null
1909 sriov_agent:
1910 securitygroup:
1911 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1912 sriov_nic:
1913 physical_device_mappings: physnet2:enp3s0f1
1914 # NOTE: do not use null here, use an empty string
1915 exclude_devices: ""
1916 dhcp_agent:
1917 DEFAULT:
1918 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1919 # 'network.backend' to sane defaults.
1920 interface_driver: null
1921 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1922 force_metadata: True
1923 dnsmasq: |
1924 #no-hosts
1925 #port=5353
1926 #cache-size=500
1927 #no-negcache
1928 #dns-forward-max=100
1929 #resolve-file=
1930 #strict-order
1931 #bind-interface
1932 #bind-dynamic
1933 #domain=
1934 #dhcp-range=10.10.10.10,10.10.10.100,24h
1935 #dhcp-lease-max=150
1936 #dhcp-host=11:22:33:44:55:66,ignore
1937 #dhcp-option=3,10.10.10.1
1938 #dhcp-option-force=26,1450
1939
1940 l3_agent:
1941 DEFAULT:
1942 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1943 # 'network.backend' to sane defaults.
1944 interface_driver: null
1945 agent_mode: legacy
1946 metering_agent: null
1947 metadata_agent:
1948 DEFAULT:
1949 # we cannot change the proxy socket path as it is declared
1950 # as a hostPath volume from agent daemonsets
1951 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1952 metadata_proxy_shared_secret: "password"
1953 cache:
1954 enabled: true
1955 backend: dogpile.cache.memcached
1956 bagpipe_bgp: {}
Mohammed Naser593ec012023-07-23 09:20:05 +00001957 ovn_metadata_agent:
1958 DEFAULT:
1959 # we cannot change the proxy socket path as it is declared
1960 # as a hostPath volume from agent daemonsets
1961 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1962 metadata_proxy_shared_secret: "password"
1963 metadata_workers: 2
1964 cache:
1965 enabled: true
1966 backend: dogpile.cache.memcached
1967 ovs:
1968 ovsdb_connection: unix:/run/openvswitch/db.sock
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001969
1970 rabbitmq:
1971 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1972 policies:
1973 - vhost: "neutron"
1974 name: "ha_ttl_neutron"
1975 definition:
1976 # mirror messges to other nodes in rmq cluster
1977 ha-mode: "all"
1978 ha-sync-mode: "automatic"
1979 # 70s
1980 message-ttl: 70000
1981 priority: 0
1982 apply-to: all
1983 pattern: '^(?!(amq\.|reply_)).*'
1984 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
1985 ## This helps prevent sriov init script from failing due to mis-matched NIC
1986 ## For prod env, target NIC should match and init script should fail otherwise.
1987 ## sriov_init:
1988 ## - besteffort
1989 sriov_init:
1990 -
1991 # auto_bridge_add is a table of "bridge: interface" pairs
1992 # To automatically add a physical interfaces to a specific bridges,
1993 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
1994 # to br1 do something like:
1995 #
1996 # auto_bridge_add:
1997 # br-physnet1: eth3
1998 # br0: if0
1999 # br1: iface_two
2000 # br-ex will be added by default
2001 auto_bridge_add:
2002 br-ex: null
2003
Mohammed Nasera720f882023-06-30 23:48:02 -04002004 # Network off-loading configuration
2005 netoffload:
ricolin18e6fd32023-07-17 06:17:15 +00002006 enabled: false
Mohammed Nasera720f882023-06-30 23:48:02 -04002007 asap2:
2008 # - dev: enp97s0f0
2009 # vfs: 16
2010
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002011 # configuration of OVS DPDK bridges and NICs
2012 # this is a separate section and not part of the auto_bridge_add section
2013 # because additional parameters are needed
2014 ovs_dpdk:
2015 enabled: false
2016 # setting update_dpdk_bond_config to true will have default behavior,
2017 # which may cause disruptions in ovs dpdk traffic in case of neutron
2018 # ovs agent restart or when dpdk nic/bond configurations are changed.
2019 # Setting this to false will configure dpdk in the first run and
2020 # disable nic/bond config on event of restart or config update.
2021 update_dpdk_bond_config: true
2022 driver: uio_pci_generic
2023 # In case bonds are configured, the nics which are part of those bonds
2024 # must NOT be provided here.
2025 nics:
2026 - name: dpdk0
2027 pci_id: '0000:05:00.0'
2028 # Set VF Index in case some particular VF(s) need to be
2029 # used with ovs-dpdk.
2030 # vf_index: 0
2031 bridge: br-phy
2032 migrate_ip: true
2033 n_rxq: 2
2034 n_txq: 2
2035 pmd_rxq_affinity: "0:3,1:27"
2036 ofport_request: 1
2037 # optional parameters for tuning the OVS DPDK config
2038 # in alignment with the available hardware resources
2039 # mtu: 2000
2040 # n_rxq_size: 1024
2041 # n_txq_size: 1024
2042 # vhost-iommu-support: true
2043 bridges:
2044 - name: br-phy
2045 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
2046 # - tunnel_underlay_vlan: 45
2047 # Optional parameter for configuring bonding in OVS-DPDK
2048 # - name: br-phy-bond0
2049 # bonds:
2050 # - name: dpdkbond0
2051 # bridge: br-phy-bond0
2052 # # The IP from the first nic in nics list shall be used
2053 # migrate_ip: true
2054 # mtu: 2000
2055 # # Please note that n_rxq is set for each NIC individually
2056 # # rather than denoting the total number of rx queues for
2057 # # the bond as a whole. So setting n_rxq = 2 below for ex.
2058 # # would be 4 rx queues in total for the bond.
2059 # # Same for n_txq
2060 # n_rxq: 2
2061 # n_txq: 2
2062 # ofport_request: 1
2063 # n_rxq_size: 1024
2064 # n_txq_size: 1024
2065 # vhost-iommu-support: true
2066 # ovs_options: "bond_mode=active-backup"
2067 # nics:
2068 # - name: dpdk_b0s0
2069 # pci_id: '0000:06:00.0'
2070 # pmd_rxq_affinity: "0:3,1:27"
2071 # # Set VF Index in case some particular VF(s) need to be
2072 # # used with ovs-dpdk. In which case pci_id of PF must be
2073 # # provided above.
2074 # # vf_index: 0
2075 # - name: dpdk_b0s1
2076 # pci_id: '0000:07:00.0'
2077 # pmd_rxq_affinity: "0:3,1:27"
2078 # # Set VF Index in case some particular VF(s) need to be
2079 # # used with ovs-dpdk. In which case pci_id of PF must be
2080 # # provided above.
2081 # # vf_index: 0
2082 #
2083 # Set the log level for each target module (default level is always dbg)
2084 # Supported log levels are: off, emer, err, warn, info, dbg
2085 #
2086 # modules:
2087 # - name: dpdk
2088 # log_level: info
2089
2090# Names of secrets used by bootstrap and environmental checks
2091secrets:
2092 identity:
2093 admin: neutron-keystone-admin
2094 neutron: neutron-keystone-user
2095 test: neutron-keystone-test
2096 oslo_db:
2097 admin: neutron-db-admin
2098 neutron: neutron-db-user
2099 oslo_messaging:
2100 admin: neutron-rabbitmq-admin
2101 neutron: neutron-rabbitmq-user
2102 tls:
2103 compute_metadata:
2104 metadata:
2105 internal: metadata-tls-metadata
2106 network:
2107 server:
2108 public: neutron-tls-public
2109 internal: neutron-tls-server
2110 oci_image_registry:
2111 neutron: neutron-oci-image-registry
2112
2113# typically overridden by environmental
2114# values, but should include all endpoints
2115# required by this chart
2116endpoints:
2117 cluster_domain_suffix: cluster.local
2118 local_image_registry:
2119 name: docker-registry
2120 namespace: docker-registry
2121 hosts:
2122 default: localhost
2123 internal: docker-registry
2124 node: localhost
2125 host_fqdn_override:
2126 default: null
2127 port:
2128 registry:
2129 node: 5000
2130 oci_image_registry:
2131 name: oci-image-registry
2132 namespace: oci-image-registry
2133 auth:
2134 enabled: false
2135 neutron:
2136 username: neutron
2137 password: password
2138 hosts:
2139 default: localhost
2140 host_fqdn_override:
2141 default: null
2142 port:
2143 registry:
2144 default: null
2145 oslo_db:
2146 auth:
2147 admin:
2148 username: root
2149 password: password
2150 secret:
2151 tls:
2152 internal: mariadb-tls-direct
2153 neutron:
2154 username: neutron
2155 password: password
2156 hosts:
2157 default: mariadb
2158 host_fqdn_override:
2159 default: null
2160 path: /neutron
2161 scheme: mysql+pymysql
2162 port:
2163 mysql:
2164 default: 3306
2165 oslo_messaging:
2166 auth:
2167 admin:
2168 username: rabbitmq
2169 password: password
2170 secret:
2171 tls:
2172 internal: rabbitmq-tls-direct
2173 neutron:
2174 username: neutron
2175 password: password
2176 statefulset:
2177 replicas: 2
2178 name: rabbitmq-rabbitmq
2179 hosts:
2180 default: rabbitmq
2181 host_fqdn_override:
2182 default: null
2183 path: /neutron
2184 scheme: rabbit
2185 port:
2186 amqp:
2187 default: 5672
2188 http:
2189 default: 15672
2190 oslo_cache:
2191 auth:
2192 # NOTE(portdirect): this is used to define the value for keystone
2193 # authtoken cache encryption key, if not set it will be populated
2194 # automatically with a random value, but to take advantage of
2195 # this feature all services should be set to use the same key,
2196 # and memcache service.
2197 memcache_secret_key: null
2198 hosts:
2199 default: memcached
2200 host_fqdn_override:
2201 default: null
2202 port:
2203 memcache:
2204 default: 11211
2205 compute:
2206 name: nova
2207 hosts:
2208 default: nova-api
2209 public: nova
2210 host_fqdn_override:
2211 default: null
2212 path:
2213 default: "/v2.1/%(tenant_id)s"
2214 scheme:
2215 default: 'http'
2216 port:
2217 api:
2218 default: 8774
2219 public: 80
2220 novncproxy:
2221 default: 6080
2222 compute_metadata:
2223 name: nova
2224 hosts:
2225 default: nova-metadata
2226 public: metadata
2227 host_fqdn_override:
2228 default: null
2229 path:
2230 default: /
2231 scheme:
2232 default: 'http'
2233 port:
2234 metadata:
2235 default: 8775
2236 public: 80
2237 identity:
2238 name: keystone
2239 auth:
2240 admin:
2241 region_name: RegionOne
2242 username: admin
2243 password: password
2244 project_name: admin
2245 user_domain_name: default
2246 project_domain_name: default
2247 neutron:
2248 role: admin
2249 region_name: RegionOne
2250 username: neutron
2251 password: password
2252 project_name: service
2253 user_domain_name: service
2254 project_domain_name: service
2255 nova:
2256 region_name: RegionOne
2257 project_name: service
2258 username: nova
2259 password: password
2260 user_domain_name: service
2261 project_domain_name: service
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002262 placement:
2263 region_name: RegionOne
2264 project_name: service
2265 username: placement
2266 password: password
2267 user_domain_name: service
2268 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002269 designate:
2270 region_name: RegionOne
2271 project_name: service
2272 username: designate
2273 password: password
2274 user_domain_name: service
2275 project_domain_name: service
2276 ironic:
2277 region_name: RegionOne
2278 project_name: service
2279 username: ironic
2280 password: password
2281 user_domain_name: service
2282 project_domain_name: service
2283 test:
2284 role: admin
2285 region_name: RegionOne
2286 username: neutron-test
2287 password: password
2288 # NOTE: this project will be purged and reset if
2289 # conf.rally_tests.force_project_purge is set to true
2290 # which may be required upon test failure, but be aware that this will
2291 # expunge all openstack objects, so if this is used a seperate project
2292 # should be used for each helm test, and also it should be ensured
2293 # that this project is not in use by other tenants
2294 project_name: test
2295 user_domain_name: service
2296 project_domain_name: service
2297 hosts:
2298 default: keystone
2299 internal: keystone-api
2300 host_fqdn_override:
2301 default: null
2302 path:
2303 default: /v3
2304 scheme:
2305 default: http
2306 port:
2307 api:
2308 default: 80
2309 internal: 5000
2310 network:
2311 name: neutron
2312 hosts:
2313 default: neutron-server
2314 public: neutron
2315 host_fqdn_override:
2316 default: null
2317 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2318 # endpoints using the following format:
2319 # public:
2320 # host: null
2321 # tls:
2322 # crt: null
2323 # key: null
2324 path:
2325 default: null
2326 scheme:
2327 default: 'http'
2328 service: 'http'
2329 port:
2330 api:
2331 default: 9696
2332 public: 80
2333 service: 9696
2334 load_balancer:
2335 name: octavia
2336 hosts:
2337 default: octavia-api
2338 public: octavia
2339 host_fqdn_override:
2340 default: null
2341 path:
2342 default: null
2343 scheme:
2344 default: http
2345 port:
2346 api:
2347 default: 9876
2348 public: 80
2349 fluentd:
2350 namespace: osh-infra
2351 name: fluentd
2352 hosts:
2353 default: fluentd-logging
2354 host_fqdn_override:
2355 default: null
2356 path:
2357 default: null
2358 scheme: 'http'
2359 port:
2360 service:
2361 default: 24224
2362 metrics:
2363 default: 24220
2364 dns:
2365 name: designate
2366 hosts:
2367 default: designate-api
2368 public: designate
2369 host_fqdn_override:
2370 default: null
2371 path:
2372 default: /
2373 scheme:
2374 default: 'http'
2375 port:
2376 api:
2377 default: 9001
2378 public: 80
2379 baremetal:
2380 name: ironic
2381 hosts:
2382 default: ironic-api
2383 public: ironic
2384 host_fqdn_override:
2385 default: null
2386 path:
2387 default: null
2388 scheme:
2389 default: 'http'
2390 port:
2391 api:
2392 default: 6385
2393 public: 80
2394 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2395 # They are using to enable the Egress K8s network policy.
2396 kube_dns:
2397 namespace: kube-system
2398 name: kubernetes-dns
2399 hosts:
2400 default: kube-dns
2401 host_fqdn_override:
2402 default: null
2403 path:
2404 default: null
2405 scheme: http
2406 port:
2407 dns:
2408 default: 53
2409 protocol: UDP
2410 ingress:
2411 namespace: null
2412 name: ingress
2413 hosts:
2414 default: ingress
2415 port:
2416 ingress:
2417 default: 80
2418
2419network_policy:
2420 neutron:
2421 # TODO(lamt): Need to tighten this ingress for security.
2422 ingress:
2423 - {}
2424 egress:
2425 - {}
2426
2427helm3_hook: true
2428
2429health_probe:
2430 logging:
2431 level: ERROR
2432
2433tls:
2434 identity: false
2435 oslo_messaging: false
2436 oslo_db: false
2437
2438manifests:
2439 certificates: false
2440 configmap_bin: true
2441 configmap_etc: true
2442 daemonset_dhcp_agent: true
2443 daemonset_l3_agent: true
2444 daemonset_lb_agent: true
2445 daemonset_metadata_agent: true
2446 daemonset_ovs_agent: true
2447 daemonset_sriov_agent: true
2448 daemonset_l2gw_agent: false
2449 daemonset_bagpipe_bgp: false
2450 daemonset_netns_cleanup_cron: true
2451 deployment_ironic_agent: false
2452 deployment_server: true
2453 ingress_server: true
2454 job_bootstrap: true
2455 job_db_init: true
2456 job_db_sync: true
2457 job_db_drop: false
2458 job_image_repo_sync: true
2459 job_ks_endpoints: true
2460 job_ks_service: true
2461 job_ks_user: true
2462 job_rabbit_init: true
2463 pdb_server: true
2464 pod_rally_test: true
2465 network_policy: false
2466 secret_db: true
2467 secret_ingress_tls: true
2468 secret_keystone: true
2469 secret_rabbitmq: true
2470 secret_registry: true
2471 service_ingress_server: true
2472 service_server: true
2473...