blob: 423748cbff6ce04971b34b473eb715791bc76088 [file] [log] [blame]
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# Default values for neutron.
14# This is a YAML-formatted file.
15# Declare name/value pairs to be passed into your templates.
16# name: value
17
18---
19release_group: null
20
21images:
22 tags:
23 bootstrap: docker.io/openstackhelm/heat:stein-ubuntu_bionic
24 test: docker.io/xrally/xrally-openstack:2.0.0
25 purge_test: docker.io/openstackhelm/ospurge:latest
26 db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
27 neutron_db_sync: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
28 db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
29 rabbit_init: docker.io/rabbitmq:3.7-management
30 ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
31 ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
32 ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
Mohammed Nasera720f882023-06-30 23:48:02 -040033 netoffload: ghcr.io/vexxhost/netoffload:v1.0.1
Mohammed Naserf3f59a72023-01-15 21:02:04 -050034 neutron_server: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
35 neutron_dhcp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
36 neutron_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +020037 neutron_ovn_metadata: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
Mohammed Naserf3f59a72023-01-15 21:02:04 -050038 neutron_l3: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
39 neutron_l2gw: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
40 neutron_openvswitch_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
41 neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
42 neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov
43 neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov
44 neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
45 neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
46 neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
47 dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
48 image_repo_sync: docker.io/docker:17.07.0
49 pull_policy: "IfNotPresent"
50 local_registry:
51 active: false
52 exclude:
53 - dep_check
54 - image_repo_sync
55
56labels:
57 agent:
58 dhcp:
59 node_selector_key: openstack-control-plane
60 node_selector_value: enabled
61 l3:
62 node_selector_key: openstack-control-plane
63 node_selector_value: enabled
64 metadata:
65 node_selector_key: openstack-control-plane
66 node_selector_value: enabled
67 l2gw:
68 node_selector_key: openstack-control-plane
69 node_selector_value: enabled
70 job:
71 node_selector_key: openstack-control-plane
72 node_selector_value: enabled
73 lb:
74 node_selector_key: linuxbridge
75 node_selector_value: enabled
76 # openvswitch is a special case, requiring a special
77 # label that can apply to both control hosts
78 # and compute hosts, until we get more sophisticated
79 # with our daemonset scheduling
80 ovs:
81 node_selector_key: openvswitch
82 node_selector_value: enabled
83 sriov:
84 node_selector_key: sriov
85 node_selector_value: enabled
86 bagpipe_bgp:
87 node_selector_key: openstack-compute-node
88 node_selector_value: enabled
89 server:
90 node_selector_key: openstack-control-plane
91 node_selector_value: enabled
92 ironic_agent:
93 node_selector_key: openstack-control-plane
94 node_selector_value: enabled
95 netns_cleanup_cron:
96 node_selector_key: openstack-control-plane
97 node_selector_value: enabled
98 test:
99 node_selector_key: openstack-control-plane
100 node_selector_value: enabled
101
102network:
103 # provide what type of network wiring will be used
104 backend:
105 - openvswitch
106 # NOTE(Portdirect): Share network namespaces with the host,
107 # allowing agents to be restarted without packet loss and simpler
108 # debugging. This feature requires mount propagation support.
109 share_namespaces: true
110 interface:
111 # Tunnel interface will be used for VXLAN tunneling.
112 tunnel: null
113 # If tunnel is null there is a fallback mechanism to search
114 # for interface with routing using tunnel network cidr.
115 tunnel_network_cidr: "0/0"
116 # To perform setup of network interfaces using the SR-IOV init
117 # container you can use a section similar to:
118 # sriov:
119 # - device: ${DEV}
120 # num_vfs: 8
121 # mtu: 9214
122 # promisc: false
123 # qos:
124 # - vf_num: 0
125 # share: 10
126 # queues_per_vf:
127 # - num_queues: 16
128 # exclude_vf: 0,11,21
129 server:
130 ingress:
131 public: true
132 classes:
133 namespace: "nginx"
134 cluster: "nginx-cluster"
135 annotations:
136 nginx.ingress.kubernetes.io/rewrite-target: /
137 external_policy_local: false
138 node_port:
139 enabled: false
140 port: 30096
141
142bootstrap:
143 enabled: false
144 ks_user: neutron
145 script: |
146 openstack token issue
147
148dependencies:
149 dynamic:
150 common:
151 local_image_registry:
152 jobs:
153 - neutron-image-repo-sync
154 services:
155 - endpoint: node
156 service: local_image_registry
157 targeted:
158 sriov: {}
159 l2gateway: {}
160 bagpipe_bgp: {}
okozachenko1203151efed2023-07-29 02:34:25 +1000161 ovn: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500162 openvswitch:
163 dhcp:
164 pod:
165 - requireSameNode: true
166 labels:
167 application: neutron
168 component: neutron-ovs-agent
169 l3:
170 pod:
171 - requireSameNode: true
172 labels:
173 application: neutron
174 component: neutron-ovs-agent
175 metadata:
176 pod:
177 - requireSameNode: true
178 labels:
179 application: neutron
180 component: neutron-ovs-agent
181 linuxbridge:
182 dhcp:
183 pod:
184 - requireSameNode: true
185 labels:
186 application: neutron
187 component: neutron-lb-agent
188 l3:
189 pod:
190 - requireSameNode: true
191 labels:
192 application: neutron
193 component: neutron-lb-agent
194 metadata:
195 pod:
196 - requireSameNode: true
197 labels:
198 application: neutron
199 component: neutron-lb-agent
200 lb_agent:
201 pod: null
202 static:
203 bootstrap:
204 services:
205 - endpoint: internal
206 service: network
207 - endpoint: internal
208 service: compute
209 db_drop:
210 services:
211 - endpoint: internal
212 service: oslo_db
213 db_init:
214 services:
215 - endpoint: internal
216 service: oslo_db
217 db_sync:
218 jobs:
219 - neutron-db-init
220 services:
221 - endpoint: internal
222 service: oslo_db
223 dhcp:
224 pod: null
225 jobs:
226 - neutron-rabbit-init
227 services:
228 - endpoint: internal
229 service: oslo_messaging
230 - endpoint: internal
231 service: network
232 - endpoint: internal
233 service: compute
234 ks_endpoints:
235 jobs:
236 - neutron-ks-service
237 services:
238 - endpoint: internal
239 service: identity
240 ks_service:
241 services:
242 - endpoint: internal
243 service: identity
244 ks_user:
245 services:
246 - endpoint: internal
247 service: identity
248 rabbit_init:
249 services:
250 - service: oslo_messaging
251 endpoint: internal
252 l3:
253 pod: null
254 jobs:
255 - neutron-rabbit-init
256 services:
257 - endpoint: internal
258 service: oslo_messaging
259 - endpoint: internal
260 service: network
261 - endpoint: internal
262 service: compute
263 lb_agent:
264 pod: null
265 jobs:
266 - neutron-rabbit-init
267 services:
268 - endpoint: internal
269 service: oslo_messaging
270 - endpoint: internal
271 service: network
272 metadata:
273 pod: null
274 jobs:
275 - neutron-rabbit-init
276 services:
277 - endpoint: internal
278 service: oslo_messaging
279 - endpoint: internal
280 service: network
281 - endpoint: internal
282 service: compute
283 - endpoint: public
284 service: compute_metadata
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200285 ovn_metadata:
Mohammed Naser593ec012023-07-23 09:20:05 +0000286 pod:
287 - requireSameNode: true
288 labels:
289 application: ovn
290 component: ovn-controller
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200291 services:
292 - endpoint: internal
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200293 service: compute_metadata
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500294 ovs_agent:
295 jobs:
296 - neutron-rabbit-init
297 pod:
298 - requireSameNode: true
299 labels:
300 application: openvswitch
301 component: server
302 services:
303 - endpoint: internal
304 service: oslo_messaging
305 - endpoint: internal
306 service: network
307 server:
308 jobs:
309 - neutron-db-sync
310 - neutron-ks-user
311 - neutron-ks-endpoints
312 - neutron-rabbit-init
313 services:
314 - endpoint: internal
315 service: oslo_db
316 - endpoint: internal
317 service: oslo_messaging
318 - endpoint: internal
319 service: oslo_cache
320 - endpoint: internal
321 service: identity
322 ironic_agent:
323 jobs:
324 - neutron-db-sync
325 - neutron-ks-user
326 - neutron-ks-endpoints
327 - neutron-rabbit-init
328 services:
329 - endpoint: internal
330 service: oslo_db
331 - endpoint: internal
332 service: oslo_messaging
333 - endpoint: internal
334 service: oslo_cache
335 - endpoint: internal
336 service: identity
337 tests:
338 services:
339 - endpoint: internal
340 service: network
341 - endpoint: internal
342 service: compute
343 image_repo_sync:
344 services:
345 - endpoint: internal
346 service: local_image_registry
347
348pod:
349 use_fqdn:
350 neutron_agent: true
351 probes:
352 rpc_timeout: 60
353 rpc_retries: 2
354 dhcp_agent:
355 dhcp_agent:
356 readiness:
357 enabled: true
358 params:
359 initialDelaySeconds: 30
360 periodSeconds: 190
361 timeoutSeconds: 185
362 liveness:
363 enabled: true
364 params:
365 initialDelaySeconds: 120
366 periodSeconds: 600
367 timeoutSeconds: 580
368 l3_agent:
369 l3_agent:
370 readiness:
371 enabled: true
372 params:
373 initialDelaySeconds: 30
374 periodSeconds: 190
375 timeoutSeconds: 185
376 liveness:
377 enabled: true
378 params:
379 initialDelaySeconds: 120
380 periodSeconds: 600
381 timeoutSeconds: 580
382 lb_agent:
383 lb_agent:
384 readiness:
385 enabled: true
386 metadata_agent:
387 metadata_agent:
388 readiness:
389 enabled: true
390 params:
391 initialDelaySeconds: 30
392 periodSeconds: 190
393 timeoutSeconds: 185
394 liveness:
395 enabled: true
396 params:
397 initialDelaySeconds: 120
398 periodSeconds: 600
399 timeoutSeconds: 580
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200400 ovn_metadata_agent:
401 ovn_metadata_agent:
402 readiness:
403 enabled: true
404 params:
405 initialDelaySeconds: 30
406 periodSeconds: 190
407 timeoutSeconds: 185
408 liveness:
409 enabled: true
410 params:
411 initialDelaySeconds: 120
412 periodSeconds: 600
413 timeoutSeconds: 580
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500414 ovs_agent:
415 ovs_agent:
416 readiness:
417 enabled: true
418 params:
419 liveness:
420 enabled: true
421 params:
422 initialDelaySeconds: 120
423 periodSeconds: 600
424 timeoutSeconds: 580
425 sriov_agent:
426 sriov_agent:
427 readiness:
428 enabled: true
429 params:
430 initialDelaySeconds: 30
431 periodSeconds: 190
432 timeoutSeconds: 185
433 bagpipe_bgp:
434 bagpipe_bgp:
435 readiness:
436 enabled: true
437 params:
438 liveness:
439 enabled: true
440 params:
441 initialDelaySeconds: 60
442 l2gw_agent:
443 l2gw_agent:
444 readiness:
445 enabled: true
446 params:
447 initialDelaySeconds: 30
448 periodSeconds: 15
449 timeoutSeconds: 65
450 liveness:
451 enabled: true
452 params:
453 initialDelaySeconds: 120
454 periodSeconds: 90
455 timeoutSeconds: 70
456 server:
457 server:
458 readiness:
459 enabled: true
460 params:
461 liveness:
462 enabled: true
463 params:
464 initialDelaySeconds: 60
465 security_context:
466 neutron_dhcp_agent:
467 pod:
468 runAsUser: 42424
469 container:
470 neutron_dhcp_agent:
471 readOnlyRootFilesystem: true
472 privileged: true
473 neutron_l2gw_agent:
474 pod:
475 runAsUser: 42424
476 container:
477 neutron_l2gw_agent:
478 readOnlyRootFilesystem: true
479 privileged: true
480 neutron_bagpipe_bgp:
481 pod:
482 runAsUser: 42424
483 container:
484 neutron_bagpipe_bgp:
485 readOnlyRootFilesystem: true
486 privileged: true
487 neutron_l3_agent:
488 pod:
489 runAsUser: 42424
490 container:
491 neutron_l3_agent:
492 readOnlyRootFilesystem: true
493 privileged: true
494 neutron_lb_agent:
495 pod:
496 runAsUser: 42424
497 container:
498 neutron_lb_agent_kernel_modules:
499 capabilities:
500 add:
501 - SYS_MODULE
502 - SYS_CHROOT
503 runAsUser: 0
504 readOnlyRootFilesystem: true
505 neutron_lb_agent_init:
506 privileged: true
507 runAsUser: 0
508 readOnlyRootFilesystem: true
509 neutron_lb_agent:
510 readOnlyRootFilesystem: true
511 privileged: true
512 neutron_metadata_agent:
513 pod:
514 runAsUser: 42424
515 container:
516 neutron_metadata_agent_init:
517 runAsUser: 0
518 readOnlyRootFilesystem: true
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200519 neutron_ovn_metadata_agent:
520 pod:
521 runAsUser: 42424
522 container:
523 neutron_ovn_metadata_agent_init:
524 runAsUser: 0
525 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500526 neutron_ovs_agent:
527 pod:
528 runAsUser: 42424
529 container:
530 neutron_openvswitch_agent_kernel_modules:
531 capabilities:
532 add:
533 - SYS_MODULE
534 - SYS_CHROOT
535 runAsUser: 0
536 readOnlyRootFilesystem: true
Mohammed Nasera720f882023-06-30 23:48:02 -0400537 netoffload:
538 privileged: true
539 runAsUser: 0
540 readOnlyRootFilesystem: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500541 neutron_ovs_agent_init:
542 privileged: true
543 runAsUser: 0
544 readOnlyRootFilesystem: true
545 neutron_ovs_agent:
546 readOnlyRootFilesystem: true
547 privileged: true
548 neutron_server:
549 pod:
550 runAsUser: 42424
551 container:
552 nginx:
553 runAsUser: 0
554 readOnlyRootFilesystem: false
555 neutron_server:
556 allowPrivilegeEscalation: false
557 readOnlyRootFilesystem: true
558 neutron_sriov_agent:
559 pod:
560 runAsUser: 42424
561 container:
562 neutron_sriov_agent_init:
563 privileged: true
564 runAsUser: 0
565 readOnlyRootFilesystem: false
566 neutron_sriov_agent:
567 readOnlyRootFilesystem: true
568 privileged: true
569 neutron_ironic_agent:
570 pod:
571 runAsUser: 42424
572 container:
573 neutron_ironic_agent:
574 allowPrivilegeEscalation: false
575 readOnlyRootFilesystem: true
576 neutron_netns_cleanup_cron:
577 pod:
578 runAsUser: 42424
579 container:
580 neutron_netns_cleanup_cron:
581 readOnlyRootFilesystem: true
582 privileged: true
583 affinity:
584 anti:
585 type:
586 default: preferredDuringSchedulingIgnoredDuringExecution
587 topologyKey:
588 default: kubernetes.io/hostname
589 weight:
590 default: 10
591 tolerations:
592 neutron:
593 enabled: false
594 tolerations:
595 - key: node-role.kubernetes.io/master
596 operator: Exists
597 effect: NoSchedule
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200598 - key: node-role.kubernetes.io/control-plane
599 operator: Exists
600 effect: NoSchedule
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500601 mounts:
602 neutron_server:
603 init_container: null
604 neutron_server:
605 volumeMounts:
606 volumes:
607 neutron_dhcp_agent:
608 init_container: null
609 neutron_dhcp_agent:
610 volumeMounts:
611 volumes:
612 neutron_l3_agent:
613 init_container: null
614 neutron_l3_agent:
615 volumeMounts:
616 volumes:
617 neutron_lb_agent:
618 init_container: null
619 neutron_lb_agent:
620 volumeMounts:
621 volumes:
622 neutron_metadata_agent:
623 init_container: null
624 neutron_metadata_agent:
625 volumeMounts:
626 volumes:
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200627 neutron_ovn_metadata_agent:
628 init_container: null
629 neutron_ovn_metadata_agent:
630 volumeMounts:
631 volumes:
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500632 neutron_ovs_agent:
633 init_container: null
634 neutron_ovs_agent:
635 volumeMounts:
636 volumes:
637 neutron_sriov_agent:
638 init_container: null
639 neutron_sriov_agent:
640 volumeMounts:
641 volumes:
642 neutron_l2gw_agent:
643 init_container: null
644 neutron_l2gw_agent:
645 volumeMounts:
646 volumes:
647 bagpipe_bgp:
648 init_container: null
649 bagpipe_bgp:
650 volumeMounts:
651 volumes:
652 neutron_ironic_agent:
653 init_container: null
654 neutron_ironic_agent:
655 volumeMounts:
656 volumes:
657 neutron_netns_cleanup_cron:
658 init_container: null
659 neutron_netns_cleanup_cron:
660 volumeMounts:
661 volumes:
662 neutron_tests:
663 init_container: null
664 neutron_tests:
665 volumeMounts:
666 volumes:
667 neutron_bootstrap:
668 init_container: null
669 neutron_bootstrap:
670 volumeMounts:
671 volumes:
672 neutron_db_sync:
673 neutron_db_sync:
674 volumeMounts:
675 - name: db-sync-conf
676 mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini
677 subPath: ml2_conf.ini
678 readOnly: true
679 volumes:
680 replicas:
681 server: 1
682 ironic_agent: 1
683 lifecycle:
684 upgrades:
685 deployments:
686 revision_history: 3
687 pod_replacement_strategy: RollingUpdate
688 rolling_update:
689 max_unavailable: 1
690 max_surge: 3
691 daemonsets:
692 pod_replacement_strategy: RollingUpdate
693 dhcp_agent:
694 enabled: true
695 min_ready_seconds: 0
696 max_unavailable: 1
697 l3_agent:
698 enabled: true
699 min_ready_seconds: 0
700 max_unavailable: 1
701 lb_agent:
702 enabled: true
703 min_ready_seconds: 0
704 max_unavailable: 1
705 metadata_agent:
706 enabled: true
707 min_ready_seconds: 0
708 max_unavailable: 1
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200709 ovn_metadata_agent:
710 enabled: true
711 min_ready_seconds: 0
712 max_unavailable: 1
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500713 ovs_agent:
714 enabled: true
715 min_ready_seconds: 0
716 max_unavailable: 1
717 sriov_agent:
718 enabled: true
719 min_ready_seconds: 0
720 max_unavailable: 1
721 netns_cleanup_cron:
722 enabled: true
723 min_ready_seconds: 0
724 max_unavailable: 1
725 disruption_budget:
726 server:
727 min_available: 0
728 termination_grace_period:
729 server:
730 timeout: 30
731 ironic_agent:
732 timeout: 30
733 resources:
734 enabled: false
735 agent:
736 dhcp:
737 requests:
738 memory: "128Mi"
739 cpu: "100m"
740 limits:
741 memory: "1024Mi"
742 cpu: "2000m"
743 l3:
744 requests:
745 memory: "128Mi"
746 cpu: "100m"
747 limits:
748 memory: "1024Mi"
749 cpu: "2000m"
750 lb:
751 requests:
752 memory: "128Mi"
753 cpu: "100m"
754 limits:
755 memory: "1024Mi"
756 cpu: "2000m"
757 metadata:
758 requests:
759 memory: "128Mi"
760 cpu: "100m"
761 limits:
762 memory: "1024Mi"
763 cpu: "2000m"
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +0200764 ovn_metadata:
765 requests:
766 memory: "128Mi"
767 cpu: "100m"
768 limits:
769 memory: "1024Mi"
770 cpu: "2000m"
Mohammed Naserf3f59a72023-01-15 21:02:04 -0500771 ovs:
772 requests:
773 memory: "128Mi"
774 cpu: "100m"
775 limits:
776 memory: "1024Mi"
777 cpu: "2000m"
778 sriov:
779 requests:
780 memory: "128Mi"
781 cpu: "100m"
782 limits:
783 memory: "1024Mi"
784 cpu: "2000m"
785 l2gw:
786 requests:
787 memory: "128Mi"
788 cpu: "100m"
789 limits:
790 memory: "1024Mi"
791 cpu: "2000m"
792 bagpipe_bgp:
793 requests:
794 memory: "128Mi"
795 cpu: "100m"
796 limits:
797 memory: "1024Mi"
798 cpu: "2000m"
799 server:
800 requests:
801 memory: "128Mi"
802 cpu: "100m"
803 limits:
804 memory: "1024Mi"
805 cpu: "2000m"
806 ironic_agent:
807 requests:
808 memory: "128Mi"
809 cpu: "100m"
810 limits:
811 memory: "1024Mi"
812 cpu: "2000m"
813 netns_cleanup_cron:
814 requests:
815 memory: "128Mi"
816 cpu: "100m"
817 limits:
818 memory: "1024Mi"
819 cpu: "2000m"
820 jobs:
821 bootstrap:
822 requests:
823 memory: "128Mi"
824 cpu: "100m"
825 limits:
826 memory: "1024Mi"
827 cpu: "2000m"
828 db_init:
829 requests:
830 memory: "128Mi"
831 cpu: "100m"
832 limits:
833 memory: "1024Mi"
834 cpu: "2000m"
835 rabbit_init:
836 requests:
837 memory: "128Mi"
838 cpu: "100m"
839 limits:
840 memory: "1024Mi"
841 cpu: "2000m"
842 db_sync:
843 requests:
844 memory: "128Mi"
845 cpu: "100m"
846 limits:
847 memory: "1024Mi"
848 cpu: "2000m"
849 db_drop:
850 requests:
851 memory: "128Mi"
852 cpu: "100m"
853 limits:
854 memory: "1024Mi"
855 cpu: "2000m"
856 ks_endpoints:
857 requests:
858 memory: "128Mi"
859 cpu: "100m"
860 limits:
861 memory: "1024Mi"
862 cpu: "2000m"
863 ks_service:
864 requests:
865 memory: "128Mi"
866 cpu: "100m"
867 limits:
868 memory: "1024Mi"
869 cpu: "2000m"
870 ks_user:
871 requests:
872 memory: "128Mi"
873 cpu: "100m"
874 limits:
875 memory: "1024Mi"
876 cpu: "2000m"
877 tests:
878 requests:
879 memory: "128Mi"
880 cpu: "100m"
881 limits:
882 memory: "1024Mi"
883 cpu: "2000m"
884 image_repo_sync:
885 requests:
886 memory: "128Mi"
887 cpu: "100m"
888 limits:
889 memory: "1024Mi"
890 cpu: "2000m"
891
892conf:
893 rally_tests:
894 force_project_purge: false
895 run_tempest: false
896 clean_up: |
897 # NOTE: We will make the best effort to clean up rally generated networks and routers,
898 # but should not block further automated deployment.
899 set +e
900 PATTERN="^[sc]_rally_"
901
902 ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
903 NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r')
904
905 for ROUTER in $ROUTERS
906 do
907 openstack router unset --external-gateway $ROUTER
908 openstack router set --disable --no-ha $ROUTER
909
910 SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq)
911 for SUBN in $SUBNS
912 do
913 openstack router remove subnet $ROUTER $SUBN
914 done
915
916 for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r')
917 do
918 openstack router remove port $ROUTER $PORT
919 done
920
921 openstack router delete $ROUTER
922 done
923
924 for NETWORK in $NETWORKS
925 do
926 for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r')
927 do
928 openstack port delete $PORT
929 done
930 openstack network delete $NETWORK
931 done
932 set -e
933 tests:
934 NeutronNetworks.create_and_delete_networks:
935 - args:
936 network_create_args: {}
937 context:
938 quotas:
939 neutron:
940 network: -1
941 runner:
942 concurrency: 1
943 times: 1
944 type: constant
945 sla:
946 failure_rate:
947 max: 0
948 NeutronNetworks.create_and_delete_ports:
949 - args:
950 network_create_args: {}
951 port_create_args: {}
952 ports_per_network: 10
953 context:
954 network: {}
955 quotas:
956 neutron:
957 network: -1
958 port: -1
959 runner:
960 concurrency: 1
961 times: 1
962 type: constant
963 sla:
964 failure_rate:
965 max: 0
966 NeutronNetworks.create_and_delete_routers:
967 - args:
968 network_create_args: {}
969 router_create_args: {}
970 subnet_cidr_start: 1.1.0.0/30
971 subnet_create_args: {}
972 subnets_per_network: 2
973 context:
974 network: {}
975 quotas:
976 neutron:
977 network: -1
978 router: -1
979 subnet: -1
980 runner:
981 concurrency: 1
982 times: 1
983 type: constant
984 sla:
985 failure_rate:
986 max: 0
987 NeutronNetworks.create_and_delete_subnets:
988 - args:
989 network_create_args: {}
990 subnet_cidr_start: 1.1.0.0/30
991 subnet_create_args: {}
992 subnets_per_network: 2
993 context:
994 network: {}
995 quotas:
996 neutron:
997 network: -1
998 subnet: -1
999 runner:
1000 concurrency: 1
1001 times: 1
1002 type: constant
1003 sla:
1004 failure_rate:
1005 max: 0
1006 NeutronNetworks.create_and_list_routers:
1007 - args:
1008 network_create_args: {}
1009 router_create_args: {}
1010 subnet_cidr_start: 1.1.0.0/30
1011 subnet_create_args: {}
1012 subnets_per_network: 2
1013 context:
1014 network: {}
1015 quotas:
1016 neutron:
1017 network: -1
1018 router: -1
1019 subnet: -1
1020 runner:
1021 concurrency: 1
1022 times: 1
1023 type: constant
1024 sla:
1025 failure_rate:
1026 max: 0
1027 NeutronNetworks.create_and_list_subnets:
1028 - args:
1029 network_create_args: {}
1030 subnet_cidr_start: 1.1.0.0/30
1031 subnet_create_args: {}
1032 subnets_per_network: 2
1033 context:
1034 network: {}
1035 quotas:
1036 neutron:
1037 network: -1
1038 subnet: -1
1039 runner:
1040 concurrency: 1
1041 times: 1
1042 type: constant
1043 sla:
1044 failure_rate:
1045 max: 0
1046 NeutronNetworks.create_and_show_network:
1047 - args:
1048 network_create_args: {}
1049 context:
1050 quotas:
1051 neutron:
1052 network: -1
1053 runner:
1054 concurrency: 1
1055 times: 1
1056 type: constant
1057 sla:
1058 failure_rate:
1059 max: 0
1060 NeutronNetworks.create_and_update_networks:
1061 - args:
1062 network_create_args: {}
1063 network_update_args:
1064 admin_state_up: false
1065 context:
1066 quotas:
1067 neutron:
1068 network: -1
1069 runner:
1070 concurrency: 1
1071 times: 1
1072 type: constant
1073 sla:
1074 failure_rate:
1075 max: 0
1076 NeutronNetworks.create_and_update_ports:
1077 - args:
1078 network_create_args: {}
1079 port_create_args: {}
1080 port_update_args:
1081 admin_state_up: false
1082 device_id: dummy_id
1083 device_owner: dummy_owner
1084 ports_per_network: 5
1085 context:
1086 network: {}
1087 quotas:
1088 neutron:
1089 network: -1
1090 port: -1
1091 runner:
1092 concurrency: 1
1093 times: 1
1094 type: constant
1095 sla:
1096 failure_rate:
1097 max: 0
1098 NeutronNetworks.create_and_update_routers:
1099 - args:
1100 network_create_args: {}
1101 router_create_args: {}
1102 router_update_args:
1103 admin_state_up: false
1104 subnet_cidr_start: 1.1.0.0/30
1105 subnet_create_args: {}
1106 subnets_per_network: 2
1107 context:
1108 network: {}
1109 quotas:
1110 neutron:
1111 network: -1
1112 router: -1
1113 subnet: -1
1114 runner:
1115 concurrency: 1
1116 times: 1
1117 type: constant
1118 sla:
1119 failure_rate:
1120 max: 0
1121 NeutronNetworks.create_and_update_subnets:
1122 - args:
1123 network_create_args: {}
1124 subnet_cidr_start: 1.4.0.0/16
1125 subnet_create_args: {}
1126 subnet_update_args:
1127 enable_dhcp: false
1128 subnets_per_network: 2
1129 context:
1130 network: {}
1131 quotas:
1132 neutron:
1133 network: -1
1134 subnet: -1
1135 runner:
1136 concurrency: 1
1137 times: 1
1138 type: constant
1139 sla:
1140 failure_rate:
1141 max: 0
1142 NeutronNetworks.list_agents:
1143 - args:
1144 agent_args: {}
1145 runner:
1146 concurrency: 1
1147 times: 1
1148 type: constant
1149 sla:
1150 failure_rate:
1151 max: 0
1152 NeutronSecurityGroup.create_and_list_security_groups:
1153 - args:
1154 security_group_create_args: {}
1155 context:
1156 quotas:
1157 neutron:
1158 security_group: -1
1159 runner:
1160 concurrency: 1
1161 times: 1
1162 type: constant
1163 sla:
1164 failure_rate:
1165 max: 0
1166 NeutronSecurityGroup.create_and_update_security_groups:
1167 - args:
1168 security_group_create_args: {}
1169 security_group_update_args: {}
1170 context:
1171 quotas:
1172 neutron:
1173 security_group: -1
1174 runner:
1175 concurrency: 1
1176 times: 1
1177 type: constant
1178 sla:
1179 failure_rate:
1180 max: 0
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001181 paste: {}
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001182 policy: {}
1183 api_audit_map:
1184 DEFAULT:
1185 target_endpoint_type: None
1186 custom_actions:
1187 add_router_interface: update/add
1188 remove_router_interface: update/remove
1189 path_keywords:
1190 floatingips: ip
1191 healthmonitors: healthmonitor
1192 health_monitors: health_monitor
1193 lb: None
1194 members: member
1195 metering-labels: label
1196 metering-label-rules: rule
1197 networks: network
1198 pools: pool
1199 ports: port
1200 routers: router
1201 quotas: quota
1202 security-groups: security-group
1203 security-group-rules: rule
1204 subnets: subnet
1205 vips: vip
1206 service_endpoints:
1207 network: service/network
1208 neutron_sudoers: |
1209 # This sudoers file supports rootwrap for both Kolla and LOCI Images.
1210 Defaults !requiretty
1211 Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
1212 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
1213 neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1214 rootwrap: |
1215 # Configuration for neutron-rootwrap
1216 # This file should be owned by (and only-writeable by) the root user
1217
1218 [DEFAULT]
1219 # List of directories to load filter definitions from (separated by ',').
1220 # These directories MUST all be only writeable by root !
1221 filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d
1222
1223 # List of directories to search executables in, in case filters do not
1224 # explicitely specify a full path (separated by ',')
1225 # If not specified, defaults to system PATH environment variable.
1226 # These directories MUST all be only writeable by root !
1227 exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
1228
1229 # Enable logging to syslog
1230 # Default value is False
1231 use_syslog=False
1232
1233 # Which syslog facility to use.
1234 # Valid values include auth, authpriv, syslog, local0, local1...
1235 # Default value is 'syslog'
1236 syslog_log_facility=syslog
1237
1238 # Which messages to log.
1239 # INFO means log all usage
1240 # ERROR means only log unsuccessful attempts
1241 syslog_log_level=ERROR
1242
1243 [xenapi]
1244 # XenAPI configuration is only required by the L2 agent if it is to
1245 # target a XenServer/XCP compute host's dom0.
1246 xenapi_connection_url=<None>
1247 xenapi_connection_username=root
1248 xenapi_connection_password=<None>
1249 rootwrap_filters:
1250 debug:
1251 pods:
1252 - dhcp_agent
1253 - l3_agent
1254 - lb_agent
1255 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001256 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001257 - ovs_agent
1258 - sriov_agent
1259 content: |
1260 # neutron-rootwrap command filters for nodes on which neutron is
1261 # expected to control network
1262 #
1263 # This file should be owned by (and only-writeable by) the root user
1264
1265 # format seems to be
1266 # cmd-name: filter-name, raw-command, user, args
1267
1268 [Filters]
1269
1270 # This is needed because we should ping
1271 # from inside a namespace which requires root
1272 # _alt variants allow to match -c and -w in any order
1273 # (used by NeutronDebugAgent.ping_all)
1274 ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
1275 ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
1276 ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
1277 ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
1278 dibbler:
1279 pods:
1280 - dhcp_agent
1281 - l3_agent
1282 - lb_agent
1283 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001284 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001285 - ovs_agent
1286 - sriov_agent
1287 content: |
1288 # neutron-rootwrap command filters for nodes on which neutron is
1289 # expected to control network
1290 #
1291 # This file should be owned by (and only-writeable by) the root user
1292
1293 # format seems to be
1294 # cmd-name: filter-name, raw-command, user, args
1295
1296 [Filters]
1297
1298 # Filters for the dibbler-based reference implementation of the pluggable
1299 # Prefix Delegation driver. Other implementations using an alternative agent
1300 # should include a similar filter in this folder.
1301
1302 # prefix_delegation_agent
1303 dibbler-client: CommandFilter, dibbler-client, root
1304 ipset_firewall:
1305 pods:
1306 - dhcp_agent
1307 - l3_agent
1308 - lb_agent
1309 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001310 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001311 - ovs_agent
1312 - sriov_agent
1313 content: |
1314 # neutron-rootwrap command filters for nodes on which neutron is
1315 # expected to control network
1316 #
1317 # This file should be owned by (and only-writeable by) the root user
1318
1319 # format seems to be
1320 # cmd-name: filter-name, raw-command, user, args
1321
1322 [Filters]
1323 # neutron/agent/linux/iptables_firewall.py
1324 # "ipset", "-A", ...
1325 ipset: CommandFilter, ipset, root
1326 l3:
1327 pods:
1328 - dhcp_agent
1329 - l3_agent
1330 - lb_agent
1331 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001332 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001333 - ovs_agent
1334 - sriov_agent
1335 content: |
1336 # neutron-rootwrap command filters for nodes on which neutron is
1337 # expected to control network
1338 #
1339 # This file should be owned by (and only-writeable by) the root user
1340
1341 # format seems to be
1342 # cmd-name: filter-name, raw-command, user, args
1343
1344 [Filters]
1345
1346 # arping
1347 arping: CommandFilter, arping, root
1348
1349 # l3_agent
1350 sysctl: CommandFilter, sysctl, root
1351 route: CommandFilter, route, root
1352 radvd: CommandFilter, radvd, root
1353
1354 # haproxy
1355 haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
1356 kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
1357
1358 # metadata proxy
1359 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1360 # RHEL invocation of the metadata proxy will report /usr/bin/python
1361 kill_metadata: KillFilter, root, python, -15, -9
1362 kill_metadata2: KillFilter, root, python2, -15, -9
1363 kill_metadata7: KillFilter, root, python2.7, -15, -9
1364 kill_metadata3: KillFilter, root, python3, -15, -9
1365 kill_metadata35: KillFilter, root, python3.5, -15, -9
1366 kill_metadata36: KillFilter, root, python3.6, -15, -9
1367 kill_metadata37: KillFilter, root, python3.7, -15, -9
1368 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
1369 kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP
1370
1371 # ip_lib
1372 ip: IpFilter, ip, root
1373 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1374 ip_exec: IpNetnsExecFilter, ip, root
1375
1376 # l3_tc_lib
1377 l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1378 l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
1379 l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
1380 l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
1381 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
1382 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
1383 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1
1384
1385 # For ip monitor
1386 kill_ip_monitor: KillFilter, root, ip, -9
1387
1388 # ovs_lib (if OVSInterfaceDriver is used)
1389 ovs-vsctl: CommandFilter, ovs-vsctl, root
1390
1391 # iptables_manager
1392 iptables-save: CommandFilter, iptables-save, root
1393 iptables-restore: CommandFilter, iptables-restore, root
1394 ip6tables-save: CommandFilter, ip6tables-save, root
1395 ip6tables-restore: CommandFilter, ip6tables-restore, root
1396
1397 # Keepalived
1398 keepalived: CommandFilter, keepalived, root
1399 kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9
1400
1401 # l3 agent to delete floatingip's conntrack state
1402 conntrack: CommandFilter, conntrack, root
1403
1404 # keepalived state change monitor
1405 keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
1406 # The following filters are used to kill the keepalived state change monitor.
1407 # Since the monitor runs as a Python script, the system reports that the
1408 # command of the process to be killed is python.
1409 # TODO(mlavalle) These kill filters will be updated once we come up with a
1410 # mechanism to kill using the name of the script being executed by Python
1411 kill_keepalived_monitor_py: KillFilter, root, python, -15
1412 kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15
1413 kill_keepalived_monitor_py3: KillFilter, root, python3, -15
1414 kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15
1415 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15
1416 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15
1417 netns_cleanup:
1418 pods:
1419 - dhcp_agent
1420 - l3_agent
1421 - lb_agent
1422 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001423 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001424 - ovs_agent
1425 - sriov_agent
1426 - netns_cleanup_cron
1427 content: |
1428 # neutron-rootwrap command filters for nodes on which neutron is
1429 # expected to control network
1430 #
1431 # This file should be owned by (and only-writeable by) the root user
1432
1433 # format seems to be
1434 # cmd-name: filter-name, raw-command, user, args
1435
1436 [Filters]
1437
1438 # netns-cleanup
1439 netstat: CommandFilter, netstat, root
1440 dhcp:
1441 pods:
1442 - dhcp_agent
1443 - l3_agent
1444 - lb_agent
1445 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001446 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001447 - ovs_agent
1448 - sriov_agent
1449 - netns_cleanup_cron
1450 content: |
1451 # neutron-rootwrap command filters for nodes on which neutron is
1452 # expected to control network
1453 #
1454 # This file should be owned by (and only-writeable by) the root user
1455
1456 # format seems to be
1457 # cmd-name: filter-name, raw-command, user, args
1458
1459 [Filters]
1460
1461 # dhcp-agent
1462 dnsmasq: CommandFilter, dnsmasq, root
1463 # dhcp-agent uses kill as well, that's handled by the generic KillFilter
1464 # it looks like these are the only signals needed, per
1465 # neutron/agent/linux/dhcp.py
1466 kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
1467 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15
1468
1469 ovs-vsctl: CommandFilter, ovs-vsctl, root
1470 ivs-ctl: CommandFilter, ivs-ctl, root
1471 mm-ctl: CommandFilter, mm-ctl, root
1472 dhcp_release: CommandFilter, dhcp_release, root
1473 dhcp_release6: CommandFilter, dhcp_release6, root
1474
1475 # metadata proxy
1476 metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
1477 # RHEL invocation of the metadata proxy will report /usr/bin/python
1478 kill_metadata: KillFilter, root, python, -9
1479 kill_metadata2: KillFilter, root, python2, -9
1480 kill_metadata7: KillFilter, root, python2.7, -9
1481 kill_metadata3: KillFilter, root, python3, -9
1482 kill_metadata35: KillFilter, root, python3.5, -9
1483 kill_metadata36: KillFilter, root, python3.6, -9
1484 kill_metadata37: KillFilter, root, python3.7, -9
1485
1486 # ip_lib
1487 ip: IpFilter, ip, root
1488 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1489 ip_exec: IpNetnsExecFilter, ip, root
1490 ebtables:
1491 pods:
1492 - dhcp_agent
1493 - l3_agent
1494 - lb_agent
1495 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001496 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001497 - ovs_agent
1498 - sriov_agent
1499 content: |
1500 # neutron-rootwrap command filters for nodes on which neutron is
1501 # expected to control network
1502 #
1503 # This file should be owned by (and only-writeable by) the root user
1504
1505 # format seems to be
1506 # cmd-name: filter-name, raw-command, user, args
1507
1508 [Filters]
1509
1510 ebtables: CommandFilter, ebtables, root
1511 iptables_firewall:
1512 pods:
1513 - dhcp_agent
1514 - l3_agent
1515 - lb_agent
1516 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001517 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001518 - ovs_agent
1519 - sriov_agent
1520 content: |
1521 # neutron-rootwrap command filters for nodes on which neutron is
1522 # expected to control network
1523 #
1524 # This file should be owned by (and only-writeable by) the root user
1525
1526 # format seems to be
1527 # cmd-name: filter-name, raw-command, user, args
1528
1529 [Filters]
1530
1531 # neutron/agent/linux/iptables_firewall.py
1532 # "iptables-save", ...
1533 iptables-save: CommandFilter, iptables-save, root
1534 iptables-restore: CommandFilter, iptables-restore, root
1535 ip6tables-save: CommandFilter, ip6tables-save, root
1536 ip6tables-restore: CommandFilter, ip6tables-restore, root
1537
1538 # neutron/agent/linux/iptables_firewall.py
1539 # "iptables", "-A", ...
1540 iptables: CommandFilter, iptables, root
1541 ip6tables: CommandFilter, ip6tables, root
1542
1543 # neutron/agent/linux/iptables_firewall.py
1544 sysctl: CommandFilter, sysctl, root
1545
1546 # neutron/agent/linux/ip_conntrack.py
1547 conntrack: CommandFilter, conntrack, root
1548 linuxbridge_plugin:
1549 pods:
1550 - dhcp_agent
1551 - l3_agent
1552 - lb_agent
1553 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001554 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001555 - ovs_agent
1556 - sriov_agent
1557 content: |
1558 # neutron-rootwrap command filters for nodes on which neutron is
1559 # expected to control network
1560 #
1561 # This file should be owned by (and only-writeable by) the root user
1562
1563 # format seems to be
1564 # cmd-name: filter-name, raw-command, user, args
1565
1566 [Filters]
1567
1568 # linuxbridge-agent
1569 # unclear whether both variants are necessary, but I'm transliterating
1570 # from the old mechanism
1571 brctl: CommandFilter, brctl, root
1572 bridge: CommandFilter, bridge, root
1573
1574 # ip_lib
1575 ip: IpFilter, ip, root
1576 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1577 ip_exec: IpNetnsExecFilter, ip, root
1578
1579 # tc commands needed for QoS support
1580 tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
1581 tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
1582 tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
1583 tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
1584 tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
1585 tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
1586 openvswitch_plugin:
1587 pods:
1588 - dhcp_agent
1589 - l3_agent
1590 - lb_agent
1591 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001592 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001593 - ovs_agent
1594 - sriov_agent
1595 content: |
1596 # neutron-rootwrap command filters for nodes on which neutron is
1597 # expected to control network
1598 #
1599 # This file should be owned by (and only-writeable by) the root user
1600
1601 # format seems to be
1602 # cmd-name: filter-name, raw-command, user, args
1603
1604 [Filters]
1605
1606 # openvswitch-agent
1607 # unclear whether both variants are necessary, but I'm transliterating
1608 # from the old mechanism
1609 ovs-vsctl: CommandFilter, ovs-vsctl, root
1610 # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
1611 ovs-ofctl: CommandFilter, ovs-ofctl, root
1612 ovs-appctl: CommandFilter, ovs-appctl, root
1613 kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
1614 ovsdb-client: CommandFilter, ovsdb-client, root
1615 xe: CommandFilter, xe, root
1616
1617 # ip_lib
1618 ip: IpFilter, ip, root
1619 find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
1620 ip_exec: IpNetnsExecFilter, ip, root
1621
1622 # needed for FDB extension
1623 bridge: CommandFilter, bridge, root
1624 privsep:
1625 pods:
1626 - dhcp_agent
1627 - l3_agent
1628 - lb_agent
1629 - metadata_agent
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001630 - ovn_metadata_agent
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001631 - ovs_agent
1632 - sriov_agent
1633 - netns_cleanup_cron
1634 content: |
1635 # Command filters to allow privsep daemon to be started via rootwrap.
1636 #
1637 # This file should be owned by (and only-writeable by) the root user
1638
1639 [Filters]
1640
1641 # By installing the following, the local admin is asserting that:
1642 #
1643 # 1. The python module load path used by privsep-helper
1644 # command as root (as started by sudo/rootwrap) is trusted.
1645 # 2. Any oslo.config files matching the --config-file
1646 # arguments below are trusted.
1647 # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
1648 # also allowed to invoke python "entrypoint" functions from
1649 # --privsep_context with the additional (possibly root) privileges
1650 # configured for that context.
1651 #
1652 # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
1653 #
1654 # In particular, the oslo.config and python module path must not
1655 # be writeable by the unprivileged user.
1656
1657 # oslo.privsep default neutron context
1658 privsep: PathFilter, privsep-helper, root,
1659 --config-file, /etc,
1660 --privsep_context, neutron.privileged.default,
1661 --privsep_sock_path, /
1662
1663 # NOTE: A second `--config-file` arg can also be added above. Since
1664 # many neutron components are installed like that (eg: by devstack).
1665 # Adjust to suit local requirements.
1666 linux_vxlan:
1667 pods:
1668 - bagpipe_bgp
1669 content: |
1670 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1671 # expected to control VXLAN Linux Bridge dataplane
1672 #
1673 # This file should be owned by (and only-writeable by) the root user
1674
1675 # format seems to be
1676 # cmd-name: filter-name, raw-command, user, args
1677
1678 [Filters]
1679
1680 #
1681 modprobe: CommandFilter, modprobe, root
1682
1683 #
1684 brctl: CommandFilter, brctl, root
1685 bridge: CommandFilter, bridge, root
1686
1687 # ip_lib
1688 ip: IpFilter, ip, root
1689 ip_exec: IpNetnsExecFilter, ip, root
1690
1691 # shell (for piped commands)
1692 sh: CommandFilter, sh, root
1693 mpls_ovs_dataplane:
1694 pods:
1695 - bagpipe_bgp
1696 content: |
1697 # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is
1698 # expected to control MPLS OpenVSwitch dataplane
1699 #
1700 # This file should be owned by (and only-writeable by) the root user
1701
1702 # format seems to be
1703 # cmd-name: filter-name, raw-command, user, args
1704
1705 [Filters]
1706
1707 # openvswitch
1708 ovs-vsctl: CommandFilter, ovs-vsctl, root
1709 ovs-ofctl: CommandFilter, ovs-ofctl, root
1710
1711 # ip_lib
1712 ip: IpFilter, ip, root
1713 ip_exec: IpNetnsExecFilter, ip, root
1714
1715 # shell (for piped commands)
1716 sh: CommandFilter, sh, root
1717 neutron:
1718 DEFAULT:
1719 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1720 log_config_append: /etc/neutron/logging.conf
1721 # NOTE(portdirect): the bind port should not be defined, and is manipulated
1722 # via the endpoints section.
1723 bind_port: null
1724 default_availability_zones: nova
1725 api_workers: 1
1726 rpc_workers: 4
1727 allow_overlapping_ips: True
1728 state_path: /var/lib/neutron
1729 # core_plugin can be: ml2, calico
1730 core_plugin: ml2
1731 # service_plugin can be: router, odl-router, empty for calico,
1732 # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
1733 service_plugins: router
1734 allow_automatic_l3agent_failover: True
1735 l3_ha: True
1736 max_l3_agents_per_router: 2
1737 l3_ha_network_type: vxlan
1738 network_auto_schedule: True
1739 router_auto_schedule: True
1740 # (NOTE)portdirect: if unset this is populated dynamically from the value in
1741 # 'network.backend' to sane defaults.
1742 interface_driver: null
1743 oslo_concurrency:
1744 lock_path: /var/lib/neutron/tmp
1745 database:
1746 max_retries: -1
1747 agent:
1748 root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
1749 root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
1750 oslo_messaging_notifications:
1751 driver: messagingv2
1752 oslo_messaging_rabbit:
1753 rabbit_ha_queues: true
1754 oslo_middleware:
1755 enable_proxy_headers_parsing: true
1756 oslo_policy:
1757 policy_file: /etc/neutron/policy.yaml
Mohammed Naser593ec012023-07-23 09:20:05 +00001758 ovn:
1759 enable_distributed_floating_ip: true
1760 ovn_metadata_enabled: true
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001761 nova:
1762 auth_type: password
1763 auth_version: v3
1764 endpoint_type: internal
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01001765 placement:
1766 auth_type: password
1767 auth_version: v3
1768 endpoint_type: internal
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001769 designate:
1770 auth_type: password
1771 auth_version: v3
1772 endpoint_type: internal
1773 allow_reverse_dns_lookup: true
1774 ironic:
1775 endpoint_type: internal
1776 keystone_authtoken:
1777 memcache_security_strategy: ENCRYPT
1778 auth_type: password
1779 auth_version: v3
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001780 service_type: network
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001781 octavia:
1782 request_poll_timeout: 3000
1783 logging:
1784 loggers:
1785 keys:
1786 - root
1787 - neutron
1788 - neutron_taas
1789 handlers:
1790 keys:
1791 - stdout
1792 - stderr
1793 - "null"
1794 formatters:
1795 keys:
1796 - context
1797 - default
1798 logger_root:
1799 level: WARNING
1800 handlers: 'null'
1801 logger_neutron:
1802 level: INFO
1803 handlers:
1804 - stdout
1805 qualname: neutron
1806 logger_neutron_taas:
1807 level: INFO
1808 handlers:
1809 - stdout
1810 qualname: neutron_taas
1811 logger_amqp:
1812 level: WARNING
1813 handlers: stderr
1814 qualname: amqp
1815 logger_amqplib:
1816 level: WARNING
1817 handlers: stderr
1818 qualname: amqplib
1819 logger_eventletwsgi:
1820 level: WARNING
1821 handlers: stderr
1822 qualname: eventlet.wsgi.server
1823 logger_sqlalchemy:
1824 level: WARNING
1825 handlers: stderr
1826 qualname: sqlalchemy
1827 logger_boto:
1828 level: WARNING
1829 handlers: stderr
1830 qualname: boto
1831 handler_null:
1832 class: logging.NullHandler
1833 formatter: default
1834 args: ()
1835 handler_stdout:
1836 class: StreamHandler
1837 args: (sys.stdout,)
1838 formatter: context
1839 handler_stderr:
1840 class: StreamHandler
1841 args: (sys.stderr,)
1842 formatter: context
1843 formatter_context:
1844 class: oslo_log.formatters.ContextFormatter
1845 datefmt: "%Y-%m-%d %H:%M:%S"
1846 formatter_default:
1847 format: "%(message)s"
1848 datefmt: "%Y-%m-%d %H:%M:%S"
1849 plugins:
1850 ml2_conf:
1851 ml2:
1852 extension_drivers: port_security
1853 # (NOTE)portdirect: if unset this is populated dyanmicly from the value
1854 # in 'network.backend' to sane defaults.
1855 mechanism_drivers: null
Oleksandr Kozachenkoc0022be2023-05-23 20:36:21 +02001856 type_drivers: flat,vlan,vxlan,local
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001857 tenant_network_types: vxlan
1858 ml2_type_vxlan:
1859 vni_ranges: 1:1000
1860 vxlan_group: 239.1.1.1
1861 ml2_type_flat:
1862 flat_networks: "*"
1863 # If you want to use the external network as a tagged provider network,
1864 # a range should be specified including the intended VLAN target
1865 # using ml2_type_vlan.network_vlan_ranges:
1866 # ml2_type_vlan:
1867 # network_vlan_ranges: "external:1100:1110"
Mohammed Naser593ec012023-07-23 09:20:05 +00001868 ml2_type_geneve:
1869 vni_ranges: 1:65536
1870 max_header_size: 38
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001871 agent:
1872 extensions: ""
1873 ml2_conf_sriov: null
1874 taas:
1875 taas:
1876 enabled: False
1877 openvswitch_agent:
1878 agent:
1879 tunnel_types: vxlan
1880 l2_population: True
1881 arp_responder: True
1882 ovs:
1883 bridge_mappings: "external:br-ex"
1884 securitygroup:
1885 firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
1886 linuxbridge_agent:
1887 linux_bridge:
1888 # To define Flat and VLAN connections, in LB we can assign
1889 # specific interface to the flat/vlan network name using:
1890 # physical_interface_mappings: "external:eth3"
1891 # Or we can set the mapping between the network and bridge:
1892 bridge_mappings: "external:br-ex"
1893 # The two above options are exclusive, do not use both of them at once
1894 securitygroup:
1895 firewall_driver: iptables
1896 vxlan:
1897 l2_population: True
1898 arp_responder: True
1899 macvtap_agent: null
1900 sriov_agent:
1901 securitygroup:
1902 firewall_driver: neutron.agent.firewall.NoopFirewallDriver
1903 sriov_nic:
1904 physical_device_mappings: physnet2:enp3s0f1
1905 # NOTE: do not use null here, use an empty string
1906 exclude_devices: ""
1907 dhcp_agent:
1908 DEFAULT:
1909 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1910 # 'network.backend' to sane defaults.
1911 interface_driver: null
1912 dnsmasq_config_file: /etc/neutron/dnsmasq.conf
1913 force_metadata: True
1914 dnsmasq: |
1915 #no-hosts
1916 #port=5353
1917 #cache-size=500
1918 #no-negcache
1919 #dns-forward-max=100
1920 #resolve-file=
1921 #strict-order
1922 #bind-interface
1923 #bind-dynamic
1924 #domain=
1925 #dhcp-range=10.10.10.10,10.10.10.100,24h
1926 #dhcp-lease-max=150
1927 #dhcp-host=11:22:33:44:55:66,ignore
1928 #dhcp-option=3,10.10.10.1
1929 #dhcp-option-force=26,1450
1930
1931 l3_agent:
1932 DEFAULT:
1933 # (NOTE)portdirect: if unset this is populated dyanmicly from the value in
1934 # 'network.backend' to sane defaults.
1935 interface_driver: null
1936 agent_mode: legacy
1937 metering_agent: null
1938 metadata_agent:
1939 DEFAULT:
1940 # we cannot change the proxy socket path as it is declared
1941 # as a hostPath volume from agent daemonsets
1942 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1943 metadata_proxy_shared_secret: "password"
1944 cache:
1945 enabled: true
1946 backend: dogpile.cache.memcached
1947 bagpipe_bgp: {}
Mohammed Naser593ec012023-07-23 09:20:05 +00001948 ovn_metadata_agent:
1949 DEFAULT:
1950 # we cannot change the proxy socket path as it is declared
1951 # as a hostPath volume from agent daemonsets
1952 metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
1953 metadata_proxy_shared_secret: "password"
1954 metadata_workers: 2
1955 cache:
1956 enabled: true
1957 backend: dogpile.cache.memcached
1958 ovs:
1959 ovsdb_connection: unix:/run/openvswitch/db.sock
Mohammed Naserf3f59a72023-01-15 21:02:04 -05001960
1961 rabbitmq:
1962 # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
1963 policies:
1964 - vhost: "neutron"
1965 name: "ha_ttl_neutron"
1966 definition:
1967 # mirror messges to other nodes in rmq cluster
1968 ha-mode: "all"
1969 ha-sync-mode: "automatic"
1970 # 70s
1971 message-ttl: 70000
1972 priority: 0
1973 apply-to: all
1974 pattern: '^(?!(amq\.|reply_)).*'
1975 ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
1976 ## This helps prevent sriov init script from failing due to mis-matched NIC
1977 ## For prod env, target NIC should match and init script should fail otherwise.
1978 ## sriov_init:
1979 ## - besteffort
1980 sriov_init:
1981 -
1982 # auto_bridge_add is a table of "bridge: interface" pairs
1983 # To automatically add a physical interfaces to a specific bridges,
1984 # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
1985 # to br1 do something like:
1986 #
1987 # auto_bridge_add:
1988 # br-physnet1: eth3
1989 # br0: if0
1990 # br1: iface_two
1991 # br-ex will be added by default
1992 auto_bridge_add:
1993 br-ex: null
1994
Mohammed Nasera720f882023-06-30 23:48:02 -04001995 # Network off-loading configuration
1996 netoffload:
ricolin18e6fd32023-07-17 06:17:15 +00001997 enabled: false
Mohammed Nasera720f882023-06-30 23:48:02 -04001998 asap2:
1999 # - dev: enp97s0f0
2000 # vfs: 16
2001
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002002 # configuration of OVS DPDK bridges and NICs
2003 # this is a separate section and not part of the auto_bridge_add section
2004 # because additional parameters are needed
2005 ovs_dpdk:
2006 enabled: false
2007 # setting update_dpdk_bond_config to true will have default behavior,
2008 # which may cause disruptions in ovs dpdk traffic in case of neutron
2009 # ovs agent restart or when dpdk nic/bond configurations are changed.
2010 # Setting this to false will configure dpdk in the first run and
2011 # disable nic/bond config on event of restart or config update.
2012 update_dpdk_bond_config: true
2013 driver: uio_pci_generic
2014 # In case bonds are configured, the nics which are part of those bonds
2015 # must NOT be provided here.
2016 nics:
2017 - name: dpdk0
2018 pci_id: '0000:05:00.0'
2019 # Set VF Index in case some particular VF(s) need to be
2020 # used with ovs-dpdk.
2021 # vf_index: 0
2022 bridge: br-phy
2023 migrate_ip: true
2024 n_rxq: 2
2025 n_txq: 2
2026 pmd_rxq_affinity: "0:3,1:27"
2027 ofport_request: 1
2028 # optional parameters for tuning the OVS DPDK config
2029 # in alignment with the available hardware resources
2030 # mtu: 2000
2031 # n_rxq_size: 1024
2032 # n_txq_size: 1024
2033 # vhost-iommu-support: true
2034 bridges:
2035 - name: br-phy
2036 # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay
2037 # - tunnel_underlay_vlan: 45
2038 # Optional parameter for configuring bonding in OVS-DPDK
2039 # - name: br-phy-bond0
2040 # bonds:
2041 # - name: dpdkbond0
2042 # bridge: br-phy-bond0
2043 # # The IP from the first nic in nics list shall be used
2044 # migrate_ip: true
2045 # mtu: 2000
2046 # # Please note that n_rxq is set for each NIC individually
2047 # # rather than denoting the total number of rx queues for
2048 # # the bond as a whole. So setting n_rxq = 2 below for ex.
2049 # # would be 4 rx queues in total for the bond.
2050 # # Same for n_txq
2051 # n_rxq: 2
2052 # n_txq: 2
2053 # ofport_request: 1
2054 # n_rxq_size: 1024
2055 # n_txq_size: 1024
2056 # vhost-iommu-support: true
2057 # ovs_options: "bond_mode=active-backup"
2058 # nics:
2059 # - name: dpdk_b0s0
2060 # pci_id: '0000:06:00.0'
2061 # pmd_rxq_affinity: "0:3,1:27"
2062 # # Set VF Index in case some particular VF(s) need to be
2063 # # used with ovs-dpdk. In which case pci_id of PF must be
2064 # # provided above.
2065 # # vf_index: 0
2066 # - name: dpdk_b0s1
2067 # pci_id: '0000:07:00.0'
2068 # pmd_rxq_affinity: "0:3,1:27"
2069 # # Set VF Index in case some particular VF(s) need to be
2070 # # used with ovs-dpdk. In which case pci_id of PF must be
2071 # # provided above.
2072 # # vf_index: 0
2073 #
2074 # Set the log level for each target module (default level is always dbg)
2075 # Supported log levels are: off, emer, err, warn, info, dbg
2076 #
2077 # modules:
2078 # - name: dpdk
2079 # log_level: info
2080
2081# Names of secrets used by bootstrap and environmental checks
2082secrets:
2083 identity:
2084 admin: neutron-keystone-admin
2085 neutron: neutron-keystone-user
2086 test: neutron-keystone-test
2087 oslo_db:
2088 admin: neutron-db-admin
2089 neutron: neutron-db-user
2090 oslo_messaging:
2091 admin: neutron-rabbitmq-admin
2092 neutron: neutron-rabbitmq-user
2093 tls:
2094 compute_metadata:
2095 metadata:
2096 internal: metadata-tls-metadata
2097 network:
2098 server:
2099 public: neutron-tls-public
2100 internal: neutron-tls-server
2101 oci_image_registry:
2102 neutron: neutron-oci-image-registry
2103
2104# typically overridden by environmental
2105# values, but should include all endpoints
2106# required by this chart
2107endpoints:
2108 cluster_domain_suffix: cluster.local
2109 local_image_registry:
2110 name: docker-registry
2111 namespace: docker-registry
2112 hosts:
2113 default: localhost
2114 internal: docker-registry
2115 node: localhost
2116 host_fqdn_override:
2117 default: null
2118 port:
2119 registry:
2120 node: 5000
2121 oci_image_registry:
2122 name: oci-image-registry
2123 namespace: oci-image-registry
2124 auth:
2125 enabled: false
2126 neutron:
2127 username: neutron
2128 password: password
2129 hosts:
2130 default: localhost
2131 host_fqdn_override:
2132 default: null
2133 port:
2134 registry:
2135 default: null
2136 oslo_db:
2137 auth:
2138 admin:
2139 username: root
2140 password: password
2141 secret:
2142 tls:
2143 internal: mariadb-tls-direct
2144 neutron:
2145 username: neutron
2146 password: password
2147 hosts:
2148 default: mariadb
2149 host_fqdn_override:
2150 default: null
2151 path: /neutron
2152 scheme: mysql+pymysql
2153 port:
2154 mysql:
2155 default: 3306
2156 oslo_messaging:
2157 auth:
2158 admin:
2159 username: rabbitmq
2160 password: password
2161 secret:
2162 tls:
2163 internal: rabbitmq-tls-direct
2164 neutron:
2165 username: neutron
2166 password: password
2167 statefulset:
2168 replicas: 2
2169 name: rabbitmq-rabbitmq
2170 hosts:
2171 default: rabbitmq
2172 host_fqdn_override:
2173 default: null
2174 path: /neutron
2175 scheme: rabbit
2176 port:
2177 amqp:
2178 default: 5672
2179 http:
2180 default: 15672
2181 oslo_cache:
2182 auth:
2183 # NOTE(portdirect): this is used to define the value for keystone
2184 # authtoken cache encryption key, if not set it will be populated
2185 # automatically with a random value, but to take advantage of
2186 # this feature all services should be set to use the same key,
2187 # and memcache service.
2188 memcache_secret_key: null
2189 hosts:
2190 default: memcached
2191 host_fqdn_override:
2192 default: null
2193 port:
2194 memcache:
2195 default: 11211
2196 compute:
2197 name: nova
2198 hosts:
2199 default: nova-api
2200 public: nova
2201 host_fqdn_override:
2202 default: null
2203 path:
2204 default: "/v2.1/%(tenant_id)s"
2205 scheme:
2206 default: 'http'
2207 port:
2208 api:
2209 default: 8774
2210 public: 80
2211 novncproxy:
2212 default: 6080
2213 compute_metadata:
2214 name: nova
2215 hosts:
2216 default: nova-metadata
2217 public: metadata
2218 host_fqdn_override:
2219 default: null
2220 path:
2221 default: /
2222 scheme:
2223 default: 'http'
2224 port:
2225 metadata:
2226 default: 8775
2227 public: 80
2228 identity:
2229 name: keystone
2230 auth:
2231 admin:
2232 region_name: RegionOne
2233 username: admin
2234 password: password
2235 project_name: admin
2236 user_domain_name: default
2237 project_domain_name: default
2238 neutron:
2239 role: admin
2240 region_name: RegionOne
2241 username: neutron
2242 password: password
2243 project_name: service
2244 user_domain_name: service
2245 project_domain_name: service
2246 nova:
2247 region_name: RegionOne
2248 project_name: service
2249 username: nova
2250 password: password
2251 user_domain_name: service
2252 project_domain_name: service
Oleksandr Kozachenkoa10d7852023-02-02 22:01:16 +01002253 placement:
2254 region_name: RegionOne
2255 project_name: service
2256 username: placement
2257 password: password
2258 user_domain_name: service
2259 project_domain_name: service
Mohammed Naserf3f59a72023-01-15 21:02:04 -05002260 designate:
2261 region_name: RegionOne
2262 project_name: service
2263 username: designate
2264 password: password
2265 user_domain_name: service
2266 project_domain_name: service
2267 ironic:
2268 region_name: RegionOne
2269 project_name: service
2270 username: ironic
2271 password: password
2272 user_domain_name: service
2273 project_domain_name: service
2274 test:
2275 role: admin
2276 region_name: RegionOne
2277 username: neutron-test
2278 password: password
2279 # NOTE: this project will be purged and reset if
2280 # conf.rally_tests.force_project_purge is set to true
2281 # which may be required upon test failure, but be aware that this will
2282 # expunge all openstack objects, so if this is used a seperate project
2283 # should be used for each helm test, and also it should be ensured
2284 # that this project is not in use by other tenants
2285 project_name: test
2286 user_domain_name: service
2287 project_domain_name: service
2288 hosts:
2289 default: keystone
2290 internal: keystone-api
2291 host_fqdn_override:
2292 default: null
2293 path:
2294 default: /v3
2295 scheme:
2296 default: http
2297 port:
2298 api:
2299 default: 80
2300 internal: 5000
2301 network:
2302 name: neutron
2303 hosts:
2304 default: neutron-server
2305 public: neutron
2306 host_fqdn_override:
2307 default: null
2308 # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
2309 # endpoints using the following format:
2310 # public:
2311 # host: null
2312 # tls:
2313 # crt: null
2314 # key: null
2315 path:
2316 default: null
2317 scheme:
2318 default: 'http'
2319 service: 'http'
2320 port:
2321 api:
2322 default: 9696
2323 public: 80
2324 service: 9696
2325 load_balancer:
2326 name: octavia
2327 hosts:
2328 default: octavia-api
2329 public: octavia
2330 host_fqdn_override:
2331 default: null
2332 path:
2333 default: null
2334 scheme:
2335 default: http
2336 port:
2337 api:
2338 default: 9876
2339 public: 80
2340 fluentd:
2341 namespace: osh-infra
2342 name: fluentd
2343 hosts:
2344 default: fluentd-logging
2345 host_fqdn_override:
2346 default: null
2347 path:
2348 default: null
2349 scheme: 'http'
2350 port:
2351 service:
2352 default: 24224
2353 metrics:
2354 default: 24220
2355 dns:
2356 name: designate
2357 hosts:
2358 default: designate-api
2359 public: designate
2360 host_fqdn_override:
2361 default: null
2362 path:
2363 default: /
2364 scheme:
2365 default: 'http'
2366 port:
2367 api:
2368 default: 9001
2369 public: 80
2370 baremetal:
2371 name: ironic
2372 hosts:
2373 default: ironic-api
2374 public: ironic
2375 host_fqdn_override:
2376 default: null
2377 path:
2378 default: null
2379 scheme:
2380 default: 'http'
2381 port:
2382 api:
2383 default: 6385
2384 public: 80
2385 # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2386 # They are using to enable the Egress K8s network policy.
2387 kube_dns:
2388 namespace: kube-system
2389 name: kubernetes-dns
2390 hosts:
2391 default: kube-dns
2392 host_fqdn_override:
2393 default: null
2394 path:
2395 default: null
2396 scheme: http
2397 port:
2398 dns:
2399 default: 53
2400 protocol: UDP
2401 ingress:
2402 namespace: null
2403 name: ingress
2404 hosts:
2405 default: ingress
2406 port:
2407 ingress:
2408 default: 80
2409
2410network_policy:
2411 neutron:
2412 # TODO(lamt): Need to tighten this ingress for security.
2413 ingress:
2414 - {}
2415 egress:
2416 - {}
2417
2418helm3_hook: true
2419
2420health_probe:
2421 logging:
2422 level: ERROR
2423
2424tls:
2425 identity: false
2426 oslo_messaging: false
2427 oslo_db: false
2428
2429manifests:
2430 certificates: false
2431 configmap_bin: true
2432 configmap_etc: true
2433 daemonset_dhcp_agent: true
2434 daemonset_l3_agent: true
2435 daemonset_lb_agent: true
2436 daemonset_metadata_agent: true
2437 daemonset_ovs_agent: true
2438 daemonset_sriov_agent: true
2439 daemonset_l2gw_agent: false
2440 daemonset_bagpipe_bgp: false
2441 daemonset_netns_cleanup_cron: true
2442 deployment_ironic_agent: false
2443 deployment_server: true
2444 ingress_server: true
2445 job_bootstrap: true
2446 job_db_init: true
2447 job_db_sync: true
2448 job_db_drop: false
2449 job_image_repo_sync: true
2450 job_ks_endpoints: true
2451 job_ks_service: true
2452 job_ks_user: true
2453 job_rabbit_init: true
2454 pdb_server: true
2455 pod_rally_test: true
2456 network_policy: false
2457 secret_db: true
2458 secret_ingress_tls: true
2459 secret_keystone: true
2460 secret_rabbitmq: true
2461 secret_registry: true
2462 service_ingress_server: true
2463 service_server: true
2464...