Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 2 | # you may not use this file except in compliance with the License. |
| 3 | # You may obtain a copy of the License at |
| 4 | # |
| 5 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 6 | # |
| 7 | # Unless required by applicable law or agreed to in writing, software |
| 8 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 10 | # See the License for the specific language governing permissions and |
| 11 | # limitations under the License. |
| 12 | |
| 13 | # Default values for nova. |
| 14 | # This is a YAML-formatted file. |
| 15 | # Declare name/value pairs to be passed into your templates. |
| 16 | # name: value |
| 17 | |
| 18 | --- |
| 19 | release_group: null |
| 20 | |
| 21 | labels: |
| 22 | agent: |
| 23 | compute: |
| 24 | node_selector_key: openstack-compute-node |
| 25 | node_selector_value: enabled |
| 26 | compute_ironic: |
| 27 | node_selector_key: openstack-compute-node |
| 28 | node_selector_value: enabled |
| 29 | api_metadata: |
| 30 | node_selector_key: openstack-control-plane |
| 31 | node_selector_value: enabled |
| 32 | conductor: |
| 33 | node_selector_key: openstack-control-plane |
| 34 | node_selector_value: enabled |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 35 | job: |
| 36 | node_selector_key: openstack-control-plane |
| 37 | node_selector_value: enabled |
| 38 | novncproxy: |
| 39 | node_selector_key: openstack-control-plane |
| 40 | node_selector_value: enabled |
| 41 | osapi: |
| 42 | node_selector_key: openstack-control-plane |
| 43 | node_selector_value: enabled |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 44 | scheduler: |
| 45 | node_selector_key: openstack-control-plane |
| 46 | node_selector_value: enabled |
| 47 | spiceproxy: |
| 48 | node_selector_key: openstack-control-plane |
| 49 | node_selector_value: enabled |
| 50 | test: |
| 51 | node_selector_key: openstack-control-plane |
| 52 | node_selector_value: enabled |
| 53 | |
| 54 | images: |
| 55 | pull_policy: IfNotPresent |
| 56 | tags: |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 57 | bootstrap: docker.io/openstackhelm/heat:wallaby-ubuntu_focal |
| 58 | db_drop: docker.io/openstackhelm/heat:wallaby-ubuntu_focal |
| 59 | db_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 60 | dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' |
| 61 | rabbit_init: docker.io/rabbitmq:3.7-management |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 62 | ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal |
| 63 | ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal |
| 64 | nova_archive_deleted_rows: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 65 | ks_endpoints: docker.io/openstackhelm/heat:wallaby-ubuntu_focal |
| 66 | nova_api: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 67 | nova_cell_setup: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 68 | nova_cell_setup_init: docker.io/openstackhelm/heat:wallaby-ubuntu_focal |
| 69 | nova_compute: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 70 | nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby' |
| 71 | nova_compute_ssh: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 72 | nova_conductor: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 73 | nova_db_sync: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 74 | nova_novncproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 75 | nova_novncproxy_assets: 'docker.io/kolla/ubuntu-source-nova-novncproxy:wallaby' |
| 76 | nova_scheduler: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 77 | # NOTE(portdirect): we simply use the ceph config helper here, |
| 78 | # as it has both oscli and jq. |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 79 | nova_service_cleaner: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal' |
| 80 | nova_spiceproxy: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
| 81 | nova_spiceproxy_assets: docker.io/openstackhelm/nova:wallaby-ubuntu_focal |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 82 | test: docker.io/xrally/xrally-openstack:2.0.0 |
| 83 | image_repo_sync: docker.io/docker:17.07.0 |
| 84 | nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6 |
| 85 | local_registry: |
| 86 | active: false |
| 87 | exclude: |
| 88 | - dep_check |
| 89 | - image_repo_sync |
| 90 | |
| 91 | jobs: |
| 92 | # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default. |
| 93 | # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves. |
| 94 | cell_setup: |
| 95 | cron: "0 */1 * * *" |
| 96 | starting_deadline: 600 |
| 97 | history: |
| 98 | success: 3 |
| 99 | failed: 1 |
| 100 | extended_wait: |
| 101 | enabled: false |
| 102 | iteration: 3 |
| 103 | duration: 5 |
| 104 | service_cleaner: |
| 105 | cron: "0 */1 * * *" |
| 106 | starting_deadline: 600 |
| 107 | history: |
| 108 | success: 3 |
| 109 | failed: 1 |
| 110 | sleep_time: 60 |
| 111 | archive_deleted_rows: |
| 112 | cron: "0 */1 * * *" |
| 113 | starting_deadline: 600 |
| 114 | history: |
| 115 | success: 3 |
| 116 | failed: 1 |
| 117 | |
| 118 | bootstrap: |
| 119 | enabled: true |
| 120 | ks_user: admin |
| 121 | script: null |
| 122 | structured: |
| 123 | flavors: |
| 124 | enabled: true |
| 125 | options: |
| 126 | m1_tiny: |
| 127 | name: "m1.tiny" |
| 128 | ram: 512 |
| 129 | disk: 1 |
| 130 | vcpus: 1 |
| 131 | m1_small: |
| 132 | name: "m1.small" |
| 133 | ram: 2048 |
| 134 | disk: 20 |
| 135 | vcpus: 1 |
| 136 | m1_medium: |
| 137 | name: "m1.medium" |
| 138 | ram: 4096 |
| 139 | disk: 40 |
| 140 | vcpus: 2 |
| 141 | m1_large: |
| 142 | name: "m1.large" |
| 143 | ram: 8192 |
| 144 | disk: 80 |
| 145 | vcpus: 4 |
| 146 | m1_xlarge: |
| 147 | name: "m1.xlarge" |
| 148 | ram: 16384 |
| 149 | disk: 160 |
| 150 | vcpus: 8 |
| 151 | wait_for_computes: |
| 152 | enabled: false |
| 153 | # Wait percentage is the minimum percentage of compute hypervisors which |
| 154 | # must be available before the remainder of the bootstrap script can be run. |
| 155 | wait_percentage: 70 |
| 156 | # Once the wait_percentage above is achieved, the remaining_wait is the |
| 157 | # amount of time in seconds to wait before executing the remainder of the |
| 158 | # boostrap script. |
| 159 | remaining_wait: 300 |
| 160 | scripts: |
| 161 | init_script: | |
| 162 | # This runs in a bootstrap init container. It counts the number of compute nodes. |
| 163 | COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort) |
| 164 | /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt |
| 165 | wait_script: | |
| 166 | # This script runs in the main bootstrap container just before the |
| 167 | # bootstrap.script is called. |
| 168 | COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w` |
| 169 | if [[ $COMPUTE_HOSTS == 0 ]]; then |
| 170 | echo "There are no compute hosts found!" |
| 171 | exit 1 |
| 172 | fi |
| 173 | |
| 174 | # Wait for all hypervisors to come up before moving on with the deployment |
| 175 | HYPERVISOR_WAIT=true |
| 176 | WAIT_AFTER_READY=0 |
| 177 | SLEEP=5 |
| 178 | while [[ $HYPERVISOR_WAIT == true ]]; do |
| 179 | # Its possible that openstack command may fail due to not being able to |
| 180 | # reach the compute service |
| 181 | set +e |
| 182 | HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w) |
| 183 | set -e |
| 184 | |
| 185 | PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS )) |
| 186 | if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then |
| 187 | echo "Hypervisor ready percentage is $PERCENT_READY" |
| 188 | if [[ $PERCENT_READY == 100 ]]; then |
| 189 | HYPERVISOR_WAIT=false |
| 190 | echo "All hypervisors are ready." |
| 191 | elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then |
| 192 | HYPERVISOR_WAIT=false |
| 193 | echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap." |
| 194 | else |
| 195 | sleep $SLEEP |
| 196 | WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP )) |
| 197 | fi |
| 198 | else |
| 199 | echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..." |
| 200 | sleep $SLEEP |
| 201 | fi |
| 202 | done |
| 203 | |
| 204 | network: |
| 205 | # provide what type of network wiring will be used |
| 206 | # possible options: openvswitch, linuxbridge, sriov |
| 207 | backend: |
| 208 | - openvswitch |
| 209 | osapi: |
| 210 | port: 8774 |
| 211 | ingress: |
| 212 | public: true |
| 213 | classes: |
| 214 | namespace: "nginx" |
| 215 | cluster: "nginx-cluster" |
| 216 | annotations: |
| 217 | nginx.ingress.kubernetes.io/rewrite-target: / |
| 218 | external_policy_local: false |
| 219 | node_port: |
| 220 | enabled: false |
| 221 | port: 30774 |
| 222 | metadata: |
| 223 | port: 8775 |
| 224 | ingress: |
| 225 | public: true |
| 226 | classes: |
| 227 | namespace: "nginx" |
| 228 | cluster: "nginx-cluster" |
| 229 | annotations: |
| 230 | nginx.ingress.kubernetes.io/rewrite-target: / |
| 231 | external_policy_local: false |
| 232 | node_port: |
| 233 | enabled: false |
| 234 | port: 30775 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 235 | novncproxy: |
| 236 | ingress: |
| 237 | public: true |
| 238 | classes: |
| 239 | namespace: "nginx" |
| 240 | cluster: "nginx-cluster" |
| 241 | annotations: |
| 242 | nginx.ingress.kubernetes.io/rewrite-target: / |
| 243 | node_port: |
| 244 | enabled: false |
| 245 | port: 30680 |
| 246 | spiceproxy: |
| 247 | node_port: |
| 248 | enabled: false |
| 249 | port: 30682 |
| 250 | ssh: |
| 251 | enabled: false |
| 252 | port: 8022 |
| 253 | from_subnet: 0.0.0.0/0 |
| 254 | key_types: |
| 255 | - rsa |
| 256 | - dsa |
| 257 | - ecdsa |
| 258 | - ed25519 |
| 259 | private_key: 'null' |
| 260 | public_key: 'null' |
| 261 | |
| 262 | dependencies: |
| 263 | dynamic: |
| 264 | common: |
| 265 | local_image_registry: |
| 266 | jobs: |
| 267 | - nova-image-repo-sync |
| 268 | services: |
| 269 | - endpoint: node |
| 270 | service: local_image_registry |
| 271 | targeted: |
| 272 | openvswitch: |
| 273 | compute: |
| 274 | pod: |
| 275 | - requireSameNode: true |
| 276 | labels: |
| 277 | application: neutron |
| 278 | component: neutron-ovs-agent |
| 279 | linuxbridge: |
| 280 | compute: |
| 281 | pod: |
| 282 | - requireSameNode: true |
| 283 | labels: |
| 284 | application: neutron |
| 285 | component: neutron-lb-agent |
| 286 | sriov: |
| 287 | compute: |
| 288 | pod: |
| 289 | - requireSameNode: true |
| 290 | labels: |
| 291 | application: neutron |
| 292 | component: neutron-sriov-agent |
| 293 | static: |
| 294 | api: |
| 295 | jobs: |
| 296 | - nova-db-sync |
| 297 | - nova-ks-user |
| 298 | - nova-ks-endpoints |
| 299 | - nova-rabbit-init |
| 300 | services: |
| 301 | - endpoint: internal |
| 302 | service: oslo_messaging |
| 303 | - endpoint: internal |
| 304 | service: oslo_db |
| 305 | - endpoint: internal |
| 306 | service: identity |
| 307 | api_metadata: |
| 308 | jobs: |
| 309 | - nova-db-sync |
| 310 | - nova-ks-user |
| 311 | - nova-ks-endpoints |
| 312 | - nova-rabbit-init |
| 313 | services: |
| 314 | - endpoint: internal |
| 315 | service: oslo_messaging |
| 316 | - endpoint: internal |
| 317 | service: oslo_db |
| 318 | - endpoint: internal |
| 319 | service: identity |
| 320 | bootstrap: |
| 321 | services: |
| 322 | - endpoint: internal |
| 323 | service: identity |
| 324 | - endpoint: internal |
| 325 | service: compute |
| 326 | cell_setup: |
| 327 | jobs: |
| 328 | - nova-db-sync |
| 329 | - nova-rabbit-init |
| 330 | services: |
| 331 | - endpoint: internal |
| 332 | service: oslo_messaging |
| 333 | - endpoint: internal |
| 334 | service: oslo_db |
| 335 | - endpoint: internal |
| 336 | service: identity |
| 337 | - endpoint: internal |
| 338 | service: compute |
| 339 | pod: |
| 340 | - requireSameNode: false |
| 341 | labels: |
| 342 | application: nova |
| 343 | component: compute |
| 344 | service_cleaner: |
| 345 | jobs: |
| 346 | - nova-db-sync |
| 347 | - nova-rabbit-init |
| 348 | services: |
| 349 | - endpoint: internal |
| 350 | service: oslo_messaging |
| 351 | - endpoint: internal |
| 352 | service: oslo_db |
| 353 | - endpoint: internal |
| 354 | service: identity |
| 355 | - endpoint: internal |
| 356 | service: compute |
| 357 | compute: |
| 358 | pod: |
| 359 | - requireSameNode: true |
| 360 | labels: |
| 361 | application: libvirt |
| 362 | component: libvirt |
| 363 | jobs: |
| 364 | - nova-db-sync |
| 365 | - nova-rabbit-init |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 366 | services: |
| 367 | - endpoint: internal |
| 368 | service: oslo_messaging |
| 369 | - endpoint: internal |
| 370 | service: image |
| 371 | - endpoint: internal |
| 372 | service: compute |
| 373 | - endpoint: internal |
| 374 | service: network |
| 375 | - endpoint: internal |
| 376 | service: compute_metadata |
| 377 | compute_ironic: |
| 378 | jobs: |
| 379 | - nova-db-sync |
| 380 | - nova-rabbit-init |
| 381 | services: |
| 382 | - endpoint: internal |
| 383 | service: oslo_messaging |
| 384 | - endpoint: internal |
| 385 | service: image |
| 386 | - endpoint: internal |
| 387 | service: compute |
| 388 | - endpoint: internal |
| 389 | service: network |
| 390 | - endpoint: internal |
| 391 | service: baremetal |
| 392 | conductor: |
| 393 | jobs: |
| 394 | - nova-db-sync |
| 395 | - nova-rabbit-init |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 396 | services: |
| 397 | - endpoint: internal |
| 398 | service: oslo_messaging |
| 399 | - endpoint: internal |
| 400 | service: oslo_db |
| 401 | - endpoint: internal |
| 402 | service: identity |
| 403 | - endpoint: internal |
| 404 | service: compute |
| 405 | db_drop: |
| 406 | services: |
| 407 | - endpoint: internal |
| 408 | service: oslo_db |
| 409 | archive_deleted_rows: |
| 410 | jobs: |
| 411 | - nova-db-init |
| 412 | - nova-db-sync |
| 413 | db_init: |
| 414 | services: |
| 415 | - endpoint: internal |
| 416 | service: oslo_db |
| 417 | db_sync: |
| 418 | jobs: |
| 419 | - nova-db-init |
| 420 | services: |
| 421 | - endpoint: internal |
| 422 | service: oslo_db |
| 423 | ks_endpoints: |
| 424 | jobs: |
| 425 | - nova-ks-service |
| 426 | services: |
| 427 | - endpoint: internal |
| 428 | service: identity |
| 429 | ks_service: |
| 430 | services: |
| 431 | - endpoint: internal |
| 432 | service: identity |
| 433 | ks_user: |
| 434 | services: |
| 435 | - endpoint: internal |
| 436 | service: identity |
| 437 | rabbit_init: |
| 438 | services: |
| 439 | - service: oslo_messaging |
| 440 | endpoint: internal |
| 441 | novncproxy: |
| 442 | jobs: |
| 443 | - nova-db-sync |
| 444 | services: |
| 445 | - endpoint: internal |
| 446 | service: oslo_db |
| 447 | spiceproxy: |
| 448 | jobs: |
| 449 | - nova-db-sync |
| 450 | services: |
| 451 | - endpoint: internal |
| 452 | service: oslo_db |
| 453 | scheduler: |
| 454 | jobs: |
| 455 | - nova-db-sync |
| 456 | - nova-rabbit-init |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 457 | services: |
| 458 | - endpoint: internal |
| 459 | service: oslo_messaging |
| 460 | - endpoint: internal |
| 461 | service: oslo_db |
| 462 | - endpoint: internal |
| 463 | service: identity |
| 464 | - endpoint: internal |
| 465 | service: compute |
| 466 | tests: |
| 467 | services: |
| 468 | - endpoint: internal |
| 469 | service: image |
| 470 | - endpoint: internal |
| 471 | service: compute |
| 472 | - endpoint: internal |
| 473 | service: network |
| 474 | - endpoint: internal |
| 475 | service: compute_metadata |
| 476 | image_repo_sync: |
| 477 | services: |
| 478 | - endpoint: internal |
| 479 | service: local_image_registry |
| 480 | |
| 481 | console: |
| 482 | # serial | spice | novnc | none |
| 483 | console_kind: novnc |
| 484 | serial: |
| 485 | spice: |
| 486 | compute: |
| 487 | # IF blank, search default routing interface |
| 488 | server_proxyclient_interface: |
| 489 | proxy: |
| 490 | # IF blank, search default routing interface |
| 491 | server_proxyclient_interface: |
| 492 | novnc: |
| 493 | compute: |
| 494 | # IF blank, search default routing interface |
| 495 | vncserver_proxyclient_interface: |
| 496 | vncproxy: |
| 497 | # IF blank, search default routing interface |
| 498 | vncserver_proxyclient_interface: |
| 499 | |
| 500 | ceph_client: |
| 501 | configmap: ceph-etc |
| 502 | user_secret_name: pvc-ceph-client-key |
| 503 | |
| 504 | conf: |
| 505 | security: | |
| 506 | # |
| 507 | # Disable access to the entire file system except for the directories that |
| 508 | # are explicitly allowed later. |
| 509 | # |
| 510 | # This currently breaks the configurations that come with some web application |
| 511 | # Debian packages. |
| 512 | # |
| 513 | #<Directory /> |
| 514 | # AllowOverride None |
| 515 | # Require all denied |
| 516 | #</Directory> |
| 517 | |
| 518 | # Changing the following options will not really affect the security of the |
| 519 | # server, but might make attacks slightly more difficult in some cases. |
| 520 | |
| 521 | # |
| 522 | # ServerTokens |
| 523 | # This directive configures what you return as the Server HTTP response |
| 524 | # Header. The default is 'Full' which sends information about the OS-Type |
| 525 | # and compiled in modules. |
| 526 | # Set to one of: Full | OS | Minimal | Minor | Major | Prod |
| 527 | # where Full conveys the most information, and Prod the least. |
| 528 | ServerTokens Prod |
| 529 | |
| 530 | # |
| 531 | # Optionally add a line containing the server version and virtual host |
| 532 | # name to server-generated pages (internal error documents, FTP directory |
| 533 | # listings, mod_status and mod_info output etc., but not CGI generated |
| 534 | # documents or custom error documents). |
| 535 | # Set to "EMail" to also include a mailto: link to the ServerAdmin. |
| 536 | # Set to one of: On | Off | EMail |
| 537 | ServerSignature Off |
| 538 | |
| 539 | # |
| 540 | # Allow TRACE method |
| 541 | # |
| 542 | # Set to "extended" to also reflect the request body (only for testing and |
| 543 | # diagnostic purposes). |
| 544 | # |
| 545 | # Set to one of: On | Off | extended |
| 546 | TraceEnable Off |
| 547 | |
| 548 | # |
| 549 | # Forbid access to version control directories |
| 550 | # |
| 551 | # If you use version control systems in your document root, you should |
| 552 | # probably deny access to their directories. For example, for subversion: |
| 553 | # |
| 554 | #<DirectoryMatch "/\.svn"> |
| 555 | # Require all denied |
| 556 | #</DirectoryMatch> |
| 557 | |
| 558 | # |
| 559 | # Setting this header will prevent MSIE from interpreting files as something |
| 560 | # else than declared by the content type in the HTTP headers. |
| 561 | # Requires mod_headers to be enabled. |
| 562 | # |
| 563 | #Header set X-Content-Type-Options: "nosniff" |
| 564 | |
| 565 | # |
| 566 | # Setting this header will prevent other sites from embedding pages from this |
| 567 | # site as frames. This defends against clickjacking attacks. |
| 568 | # Requires mod_headers to be enabled. |
| 569 | # |
| 570 | #Header set X-Frame-Options: "sameorigin" |
| 571 | software: |
| 572 | apache2: |
| 573 | binary: apache2 |
| 574 | start_parameters: -DFOREGROUND |
| 575 | conf_dir: /etc/apache2/conf-enabled |
| 576 | site_dir: /etc/apache2/sites-enable |
| 577 | mods_dir: /etc/apache2/mods-available |
| 578 | a2enmod: null |
| 579 | a2dismod: null |
| 580 | ceph: |
| 581 | enabled: true |
| 582 | admin_keyring: null |
| 583 | cinder: |
| 584 | user: "cinder" |
| 585 | keyring: null |
| 586 | secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337 |
| 587 | rally_tests: |
| 588 | run_tempest: false |
| 589 | clean_up: | |
| 590 | FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }') |
| 591 | if [ -n "$FLAVORS" ]; then |
| 592 | echo $FLAVORS | xargs openstack flavor delete |
| 593 | fi |
| 594 | SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }') |
| 595 | if [ -n "$SERVERS" ]; then |
| 596 | echo $SERVERS | xargs openstack server delete |
| 597 | fi |
| 598 | IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }') |
| 599 | if [ -n "$IMAGES" ]; then |
| 600 | echo $IMAGES | xargs openstack image delete |
| 601 | fi |
| 602 | tests: |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 603 | NovaAggregates.create_and_get_aggregate_details: |
| 604 | - args: |
| 605 | availability_zone: nova |
| 606 | runner: |
| 607 | concurrency: 1 |
| 608 | times: 1 |
| 609 | type: constant |
| 610 | sla: |
| 611 | failure_rate: |
| 612 | max: 0 |
| 613 | NovaAggregates.create_and_update_aggregate: |
| 614 | - args: |
| 615 | availability_zone: nova |
| 616 | runner: |
| 617 | concurrency: 1 |
| 618 | times: 1 |
| 619 | type: constant |
| 620 | sla: |
| 621 | failure_rate: |
| 622 | max: 0 |
| 623 | NovaAggregates.list_aggregates: |
| 624 | - runner: |
| 625 | concurrency: 1 |
| 626 | times: 1 |
| 627 | type: constant |
| 628 | sla: |
| 629 | failure_rate: |
| 630 | max: 0 |
| 631 | NovaAvailabilityZones.list_availability_zones: |
| 632 | - args: |
| 633 | detailed: true |
| 634 | runner: |
| 635 | concurrency: 1 |
| 636 | times: 1 |
| 637 | type: constant |
| 638 | sla: |
| 639 | failure_rate: |
| 640 | max: 0 |
| 641 | NovaFlavors.create_and_delete_flavor: |
| 642 | - args: |
| 643 | disk: 1 |
| 644 | ram: 500 |
| 645 | vcpus: 1 |
| 646 | runner: |
| 647 | concurrency: 1 |
| 648 | times: 1 |
| 649 | type: constant |
| 650 | sla: |
| 651 | failure_rate: |
| 652 | max: 0 |
| 653 | NovaFlavors.create_and_list_flavor_access: |
| 654 | - args: |
| 655 | disk: 1 |
| 656 | ram: 500 |
| 657 | vcpus: 1 |
| 658 | runner: |
| 659 | concurrency: 1 |
| 660 | times: 1 |
| 661 | type: constant |
| 662 | sla: |
| 663 | failure_rate: |
| 664 | max: 0 |
| 665 | NovaFlavors.create_flavor: |
| 666 | - args: |
| 667 | disk: 1 |
| 668 | ram: 500 |
| 669 | vcpus: 1 |
| 670 | runner: |
| 671 | concurrency: 1 |
| 672 | times: 1 |
| 673 | type: constant |
| 674 | sla: |
| 675 | failure_rate: |
| 676 | max: 0 |
| 677 | NovaFlavors.create_flavor_and_add_tenant_access: |
| 678 | - args: |
| 679 | disk: 1 |
| 680 | ram: 500 |
| 681 | vcpus: 1 |
| 682 | runner: |
| 683 | concurrency: 1 |
| 684 | times: 1 |
| 685 | type: constant |
| 686 | sla: |
| 687 | failure_rate: |
| 688 | max: 0 |
| 689 | NovaFlavors.create_flavor_and_set_keys: |
| 690 | - args: |
| 691 | disk: 1 |
| 692 | extra_specs: |
| 693 | 'quota:disk_read_bytes_sec': 10240 |
| 694 | ram: 500 |
| 695 | vcpus: 1 |
| 696 | runner: |
| 697 | concurrency: 1 |
| 698 | times: 1 |
| 699 | type: constant |
| 700 | sla: |
| 701 | failure_rate: |
| 702 | max: 0 |
| 703 | NovaFlavors.list_flavors: |
| 704 | - args: |
| 705 | detailed: true |
| 706 | runner: |
| 707 | concurrency: 1 |
| 708 | times: 1 |
| 709 | type: constant |
| 710 | sla: |
| 711 | failure_rate: |
| 712 | max: 0 |
| 713 | NovaHypervisors.list_and_get_hypervisors: |
| 714 | - args: |
| 715 | detailed: true |
| 716 | runner: |
| 717 | concurrency: 1 |
| 718 | times: 1 |
| 719 | type: constant |
| 720 | sla: |
| 721 | failure_rate: |
| 722 | max: 0 |
| 723 | NovaHypervisors.list_and_get_uptime_hypervisors: |
| 724 | - args: |
| 725 | detailed: true |
| 726 | runner: |
| 727 | concurrency: 1 |
| 728 | times: 1 |
| 729 | type: constant |
| 730 | sla: |
| 731 | failure_rate: |
| 732 | max: 0 |
| 733 | NovaHypervisors.list_and_search_hypervisors: |
| 734 | - args: |
| 735 | detailed: true |
| 736 | runner: |
| 737 | concurrency: 1 |
| 738 | times: 1 |
| 739 | type: constant |
| 740 | sla: |
| 741 | failure_rate: |
| 742 | max: 0 |
| 743 | NovaHypervisors.list_hypervisors: |
| 744 | - args: |
| 745 | detailed: true |
| 746 | runner: |
| 747 | concurrency: 1 |
| 748 | times: 1 |
| 749 | type: constant |
| 750 | sla: |
| 751 | failure_rate: |
| 752 | max: 0 |
| 753 | NovaHypervisors.statistics_hypervisors: |
| 754 | - args: {} |
| 755 | runner: |
| 756 | concurrency: 1 |
| 757 | times: 1 |
| 758 | type: constant |
| 759 | sla: |
| 760 | failure_rate: |
| 761 | max: 0 |
| 762 | NovaKeypair.create_and_delete_keypair: |
| 763 | - runner: |
| 764 | concurrency: 1 |
| 765 | times: 1 |
| 766 | type: constant |
| 767 | sla: |
| 768 | failure_rate: |
| 769 | max: 0 |
| 770 | NovaKeypair.create_and_list_keypairs: |
| 771 | - runner: |
| 772 | concurrency: 1 |
| 773 | times: 1 |
| 774 | type: constant |
| 775 | sla: |
| 776 | failure_rate: |
| 777 | max: 0 |
| 778 | NovaServerGroups.create_and_list_server_groups: |
| 779 | - args: |
| 780 | all_projects: false |
| 781 | kwargs: |
| 782 | policies: |
| 783 | - affinity |
| 784 | runner: |
| 785 | concurrency: 1 |
| 786 | times: 1 |
| 787 | type: constant |
| 788 | sla: |
| 789 | failure_rate: |
| 790 | max: 0 |
| 791 | NovaServices.list_services: |
| 792 | - runner: |
| 793 | concurrency: 1 |
| 794 | times: 1 |
| 795 | type: constant |
| 796 | sla: |
| 797 | failure_rate: |
| 798 | max: 0 |
| 799 | paste: |
| 800 | composite:metadata: |
| 801 | use: egg:Paste#urlmap |
| 802 | /: meta |
| 803 | pipeline:meta: |
| 804 | pipeline: cors metaapp |
| 805 | app:metaapp: |
| 806 | paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory |
| 807 | composite:osapi_compute: |
| 808 | use: call:nova.api.openstack.urlmap:urlmap_factory |
| 809 | /: oscomputeversions |
| 810 | /v2: openstack_compute_api_v21_legacy_v2_compatible |
| 811 | /v2.1: openstack_compute_api_v21 |
| 812 | composite:openstack_compute_api_v21: |
| 813 | use: call:nova.api.auth:pipeline_factory_v21 |
| 814 | noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21 |
| 815 | keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21 |
| 816 | composite:openstack_compute_api_v21_legacy_v2_compatible: |
| 817 | use: call:nova.api.auth:pipeline_factory_v21 |
| 818 | noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21 |
| 819 | keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21 |
| 820 | filter:request_id: |
| 821 | paste.filter_factory: oslo_middleware:RequestId.factory |
| 822 | filter:compute_req_id: |
| 823 | paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory |
| 824 | filter:faultwrap: |
| 825 | paste.filter_factory: nova.api.openstack:FaultWrapper.factory |
| 826 | filter:noauth2: |
| 827 | paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory |
| 828 | filter:sizelimit: |
| 829 | paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory |
| 830 | filter:http_proxy_to_wsgi: |
| 831 | paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory |
| 832 | filter:legacy_v2_compatible: |
| 833 | paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory |
| 834 | app:osapi_compute_app_v21: |
| 835 | paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory |
| 836 | pipeline:oscomputeversions: |
| 837 | pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp |
| 838 | app:oscomputeversionapp: |
| 839 | paste.app_factory: nova.api.openstack.compute.versions:Versions.factory |
| 840 | filter:cors: |
| 841 | paste.filter_factory: oslo_middleware.cors:filter_factory |
| 842 | oslo_config_project: nova |
| 843 | filter:keystonecontext: |
| 844 | paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory |
| 845 | filter:authtoken: |
| 846 | paste.filter_factory: keystonemiddleware.auth_token:filter_factory |
| 847 | filter:audit: |
| 848 | paste.filter_factory: keystonemiddleware.audit:filter_factory |
| 849 | audit_map_file: /etc/nova/api_audit_map.conf |
| 850 | policy: {} |
| 851 | nova_sudoers: | |
| 852 | # This sudoers file supports rootwrap for both Kolla and LOCI Images. |
| 853 | Defaults !requiretty |
| 854 | Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin" |
| 855 | nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf * |
| 856 | api_audit_map: |
| 857 | DEFAULT: |
| 858 | target_endpoint_type: None |
| 859 | custom_actions: |
| 860 | enable: enable |
| 861 | disable: disable |
| 862 | delete: delete |
| 863 | startup: start/startup |
| 864 | shutdown: stop/shutdown |
| 865 | reboot: start/reboot |
| 866 | os-migrations/get: read |
| 867 | os-server-password/post: update |
| 868 | path_keywords: |
| 869 | add: None |
| 870 | action: None |
| 871 | enable: None |
| 872 | disable: None |
| 873 | configure-project: None |
| 874 | defaults: None |
| 875 | delete: None |
| 876 | detail: None |
| 877 | diagnostics: None |
| 878 | entries: entry |
| 879 | extensions: alias |
| 880 | flavors: flavor |
| 881 | images: image |
| 882 | ips: label |
| 883 | limits: None |
| 884 | metadata: key |
| 885 | os-agents: os-agent |
| 886 | os-aggregates: os-aggregate |
| 887 | os-availability-zone: None |
| 888 | os-certificates: None |
| 889 | os-cloudpipe: None |
| 890 | os-fixed-ips: ip |
| 891 | os-extra_specs: key |
| 892 | os-flavor-access: None |
| 893 | os-floating-ip-dns: domain |
| 894 | os-floating-ips-bulk: host |
| 895 | os-floating-ip-pools: None |
| 896 | os-floating-ips: floating-ip |
| 897 | os-hosts: host |
| 898 | os-hypervisors: hypervisor |
| 899 | os-instance-actions: instance-action |
| 900 | os-keypairs: keypair |
| 901 | os-migrations: None |
| 902 | os-networks: network |
| 903 | os-quota-sets: tenant |
| 904 | os-security-groups: security_group |
| 905 | os-security-group-rules: rule |
| 906 | os-server-password: None |
| 907 | os-services: None |
| 908 | os-simple-tenant-usage: tenant |
| 909 | os-virtual-interfaces: None |
| 910 | os-volume_attachments: attachment |
| 911 | os-volumes_boot: None |
| 912 | os-volumes: volume |
| 913 | os-volume-types: volume-type |
| 914 | os-snapshots: snapshot |
| 915 | reboot: None |
| 916 | servers: server |
| 917 | shutdown: None |
| 918 | startup: None |
| 919 | statistics: None |
| 920 | service_endpoints: |
| 921 | compute: service/compute |
| 922 | rootwrap: | |
| 923 | # Configuration for nova-rootwrap |
| 924 | # This file should be owned by (and only-writeable by) the root user |
| 925 | |
| 926 | [DEFAULT] |
| 927 | # List of directories to load filter definitions from (separated by ','). |
| 928 | # These directories MUST all be only writeable by root ! |
| 929 | filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap |
| 930 | |
| 931 | # List of directories to search executables in, in case filters do not |
| 932 | # explicitely specify a full path (separated by ',') |
| 933 | # If not specified, defaults to system PATH environment variable. |
| 934 | # These directories MUST all be only writeable by root ! |
| 935 | exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin |
| 936 | |
| 937 | # Enable logging to syslog |
| 938 | # Default value is False |
| 939 | use_syslog=False |
| 940 | |
| 941 | # Which syslog facility to use. |
| 942 | # Valid values include auth, authpriv, syslog, local0, local1... |
| 943 | # Default value is 'syslog' |
| 944 | syslog_log_facility=syslog |
| 945 | |
| 946 | # Which messages to log. |
| 947 | # INFO means log all usage |
| 948 | # ERROR means only log unsuccessful attempts |
| 949 | syslog_log_level=ERROR |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 950 | rootwrap_filters: |
| 951 | api_metadata: |
| 952 | pods: |
| 953 | - metadata |
| 954 | content: | |
| 955 | # nova-rootwrap command filters for api-metadata nodes |
| 956 | # This is needed on nova-api hosts running with "metadata" in enabled_apis |
| 957 | # or when running nova-api-metadata |
| 958 | # This file should be owned by (and only-writeable by) the root user |
| 959 | |
| 960 | [Filters] |
| 961 | # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... |
| 962 | iptables-save: CommandFilter, iptables-save, root |
| 963 | ip6tables-save: CommandFilter, ip6tables-save, root |
| 964 | |
| 965 | # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) |
| 966 | iptables-restore: CommandFilter, iptables-restore, root |
| 967 | ip6tables-restore: CommandFilter, ip6tables-restore, root |
| 968 | compute: |
| 969 | pods: |
| 970 | - compute |
| 971 | content: | |
| 972 | # nova-rootwrap command filters for compute nodes |
| 973 | # This file should be owned by (and only-writeable by) the root user |
| 974 | |
| 975 | [Filters] |
| 976 | # nova/virt/disk/mount/api.py: 'kpartx', '-a', device |
| 977 | # nova/virt/disk/mount/api.py: 'kpartx', '-d', device |
| 978 | kpartx: CommandFilter, kpartx, root |
| 979 | |
| 980 | # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path |
| 981 | # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path |
| 982 | tune2fs: CommandFilter, tune2fs, root |
| 983 | |
| 984 | # nova/virt/disk/mount/api.py: 'mount', mapped_device |
| 985 | # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target |
| 986 | # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'.. |
| 987 | # nova/virt/configdrive.py: 'mount', device, mountdir |
| 988 | # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ... |
| 989 | mount: CommandFilter, mount, root |
| 990 | |
| 991 | # nova/virt/disk/mount/api.py: 'umount', mapped_device |
| 992 | # nova/virt/disk/api.py: 'umount' target |
| 993 | # nova/virt/xenapi/vm_utils.py: 'umount', dev_path |
| 994 | # nova/virt/configdrive.py: 'umount', mountdir |
| 995 | umount: CommandFilter, umount, root |
| 996 | |
| 997 | # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image |
| 998 | # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device |
| 999 | qemu-nbd: CommandFilter, qemu-nbd, root |
| 1000 | |
| 1001 | # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image |
| 1002 | # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device |
| 1003 | losetup: CommandFilter, losetup, root |
| 1004 | |
| 1005 | # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device |
| 1006 | blkid: CommandFilter, blkid, root |
| 1007 | |
| 1008 | # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path |
| 1009 | # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device |
| 1010 | blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.* |
| 1011 | |
| 1012 | # nova/virt/disk/vfs/localfs.py: 'tee', canonpath |
| 1013 | tee: CommandFilter, tee, root |
| 1014 | |
| 1015 | # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath |
| 1016 | mkdir: CommandFilter, mkdir, root |
| 1017 | |
| 1018 | # nova/virt/disk/vfs/localfs.py: 'chown' |
| 1019 | # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log |
| 1020 | # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log |
| 1021 | # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk') |
| 1022 | chown: CommandFilter, chown, root |
| 1023 | |
| 1024 | # nova/virt/disk/vfs/localfs.py: 'chmod' |
| 1025 | chmod: CommandFilter, chmod, root |
| 1026 | |
| 1027 | # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' |
| 1028 | # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' |
| 1029 | # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev |
| 1030 | # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. |
| 1031 | # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. |
| 1032 | # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. |
| 1033 | # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. |
| 1034 | # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) |
| 1035 | # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] |
| 1036 | # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge |
| 1037 | # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. |
| 1038 | # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. |
| 1039 | # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... |
| 1040 | # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. |
| 1041 | # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' |
| 1042 | # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' |
| 1043 | # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. |
| 1044 | # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. |
| 1045 | # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' |
| 1046 | # nova/network/linux_net.py: 'ip', 'route', 'add', .. |
| 1047 | # nova/network/linux_net.py: 'ip', 'route', 'del', . |
| 1048 | # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev |
| 1049 | ip: CommandFilter, ip, root |
| 1050 | |
| 1051 | # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev |
| 1052 | # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev |
| 1053 | tunctl: CommandFilter, tunctl, root |
| 1054 | |
| 1055 | # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... |
| 1056 | # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... |
| 1057 | # nova/network/linux_net.py: 'ovs-vsctl', .... |
| 1058 | ovs-vsctl: CommandFilter, ovs-vsctl, root |
| 1059 | |
| 1060 | # nova/virt/libvirt/vif.py: 'vrouter-port-control', ... |
| 1061 | vrouter-port-control: CommandFilter, vrouter-port-control, root |
| 1062 | |
| 1063 | # nova/virt/libvirt/vif.py: 'ebrctl', ... |
| 1064 | ebrctl: CommandFilter, ebrctl, root |
| 1065 | |
| 1066 | # nova/virt/libvirt/vif.py: 'mm-ctl', ... |
| 1067 | mm-ctl: CommandFilter, mm-ctl, root |
| 1068 | |
| 1069 | # nova/network/linux_net.py: 'ovs-ofctl', .... |
| 1070 | ovs-ofctl: CommandFilter, ovs-ofctl, root |
| 1071 | |
| 1072 | # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ... |
| 1073 | dd: CommandFilter, dd, root |
| 1074 | |
| 1075 | # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... |
| 1076 | iscsiadm: CommandFilter, iscsiadm, root |
| 1077 | |
| 1078 | # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev |
| 1079 | # nova/virt/libvirt/volume/aoe.py: 'aoe-discover' |
| 1080 | aoe-revalidate: CommandFilter, aoe-revalidate, root |
| 1081 | aoe-discover: CommandFilter, aoe-discover, root |
| 1082 | |
| 1083 | # nova/virt/xenapi/vm_utils.py: parted, --script, ... |
| 1084 | # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*. |
| 1085 | parted: CommandFilter, parted, root |
| 1086 | |
| 1087 | # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path |
| 1088 | pygrub: CommandFilter, pygrub, root |
| 1089 | |
| 1090 | # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s |
| 1091 | fdisk: CommandFilter, fdisk, root |
| 1092 | |
| 1093 | # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path |
| 1094 | # nova/virt/disk/api.py: e2fsck, -f, -p, image |
| 1095 | e2fsck: CommandFilter, e2fsck, root |
| 1096 | |
| 1097 | # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path |
| 1098 | # nova/virt/disk/api.py: resize2fs, image |
| 1099 | resize2fs: CommandFilter, resize2fs, root |
| 1100 | |
| 1101 | # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... |
| 1102 | iptables-save: CommandFilter, iptables-save, root |
| 1103 | ip6tables-save: CommandFilter, ip6tables-save, root |
| 1104 | |
| 1105 | # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) |
| 1106 | iptables-restore: CommandFilter, iptables-restore, root |
| 1107 | ip6tables-restore: CommandFilter, ip6tables-restore, root |
| 1108 | |
| 1109 | # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... |
| 1110 | # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. |
| 1111 | arping: CommandFilter, arping, root |
| 1112 | |
| 1113 | # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address |
| 1114 | dhcp_release: CommandFilter, dhcp_release, root |
| 1115 | |
| 1116 | # nova/network/linux_net.py: 'kill', '-9', pid |
| 1117 | # nova/network/linux_net.py: 'kill', '-HUP', pid |
| 1118 | kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP |
| 1119 | |
| 1120 | # nova/network/linux_net.py: 'kill', pid |
| 1121 | kill_radvd: KillFilter, root, /usr/sbin/radvd |
| 1122 | |
| 1123 | # nova/network/linux_net.py: dnsmasq call |
| 1124 | dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq |
| 1125 | |
| 1126 | # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. |
| 1127 | radvd: CommandFilter, radvd, root |
| 1128 | |
| 1129 | # nova/network/linux_net.py: 'brctl', 'addbr', bridge |
| 1130 | # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 |
| 1131 | # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' |
| 1132 | # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface |
| 1133 | brctl: CommandFilter, brctl, root |
| 1134 | |
| 1135 | # nova/virt/libvirt/utils.py: 'mkswap' |
| 1136 | # nova/virt/xenapi/vm_utils.py: 'mkswap' |
| 1137 | mkswap: CommandFilter, mkswap, root |
| 1138 | |
| 1139 | # nova/virt/libvirt/utils.py: 'nova-idmapshift' |
| 1140 | nova-idmapshift: CommandFilter, nova-idmapshift, root |
| 1141 | |
| 1142 | # nova/virt/xenapi/vm_utils.py: 'mkfs' |
| 1143 | # nova/utils.py: 'mkfs', fs, path, label |
| 1144 | mkfs: CommandFilter, mkfs, root |
| 1145 | |
| 1146 | # nova/virt/libvirt/utils.py: 'qemu-img' |
| 1147 | qemu-img: CommandFilter, qemu-img, root |
| 1148 | |
| 1149 | # nova/virt/disk/vfs/localfs.py: 'readlink', '-e' |
| 1150 | readlink: CommandFilter, readlink, root |
| 1151 | |
| 1152 | # nova/virt/disk/api.py: |
| 1153 | mkfs.ext3: CommandFilter, mkfs.ext3, root |
| 1154 | mkfs.ext4: CommandFilter, mkfs.ext4, root |
| 1155 | mkfs.ntfs: CommandFilter, mkfs.ntfs, root |
| 1156 | |
| 1157 | # nova/virt/libvirt/connection.py: |
| 1158 | lvremove: CommandFilter, lvremove, root |
| 1159 | |
| 1160 | # nova/virt/libvirt/utils.py: |
| 1161 | lvcreate: CommandFilter, lvcreate, root |
| 1162 | |
| 1163 | # nova/virt/libvirt/utils.py: |
| 1164 | lvs: CommandFilter, lvs, root |
| 1165 | |
| 1166 | # nova/virt/libvirt/utils.py: |
| 1167 | vgs: CommandFilter, vgs, root |
| 1168 | |
| 1169 | # nova/utils.py:read_file_as_root: 'cat', file_path |
| 1170 | # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file) |
| 1171 | read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd |
| 1172 | read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow |
| 1173 | |
| 1174 | # os-brick needed commands |
| 1175 | read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi |
| 1176 | multipath: CommandFilter, multipath, root |
| 1177 | # multipathd show status |
| 1178 | multipathd: CommandFilter, multipathd, root |
| 1179 | systool: CommandFilter, systool, root |
| 1180 | vgc-cluster: CommandFilter, vgc-cluster, root |
| 1181 | # os_brick/initiator/connector.py |
| 1182 | drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid |
| 1183 | |
| 1184 | # TODO(smcginnis) Temporary fix. |
| 1185 | # Need to pull in os-brick os-brick.filters file instead and clean |
| 1186 | # out stale brick values from this file. |
| 1187 | scsi_id: CommandFilter, /lib/udev/scsi_id, root |
| 1188 | # os_brick.privileged.default oslo.privsep context |
| 1189 | # This line ties the superuser privs with the config files, context name, |
| 1190 | # and (implicitly) the actual python code invoked. |
| 1191 | privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* |
| 1192 | |
| 1193 | # nova/storage/linuxscsi.py: sg_scan device |
| 1194 | sg_scan: CommandFilter, sg_scan, root |
| 1195 | |
| 1196 | # nova/volume/encryptors/cryptsetup.py: |
| 1197 | # nova/volume/encryptors/luks.py: |
| 1198 | ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+ |
| 1199 | |
| 1200 | # nova/volume/encryptors.py: |
| 1201 | # nova/virt/libvirt/dmcrypt.py: |
| 1202 | cryptsetup: CommandFilter, cryptsetup, root |
| 1203 | |
| 1204 | # nova/virt/xenapi/vm_utils.py: |
| 1205 | xenstore-read: CommandFilter, xenstore-read, root |
| 1206 | |
| 1207 | # nova/virt/libvirt/utils.py: |
| 1208 | rbd: CommandFilter, rbd, root |
| 1209 | |
| 1210 | # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path |
| 1211 | shred: CommandFilter, shred, root |
| 1212 | |
| 1213 | # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control.. |
| 1214 | cp: CommandFilter, cp, root |
| 1215 | |
| 1216 | # nova/virt/xenapi/vm_utils.py: |
| 1217 | sync: CommandFilter, sync, root |
| 1218 | |
| 1219 | # nova/virt/libvirt/imagebackend.py: |
| 1220 | ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .* |
| 1221 | prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .* |
| 1222 | |
| 1223 | # nova/virt/libvirt/utils.py: 'xend', 'status' |
| 1224 | xend: CommandFilter, xend, root |
| 1225 | |
| 1226 | # nova/virt/libvirt/utils.py: |
| 1227 | touch: CommandFilter, touch, root |
| 1228 | |
| 1229 | # nova/virt/libvirt/volume/vzstorage.py |
| 1230 | pstorage-mount: CommandFilter, pstorage-mount, root |
| 1231 | network: |
| 1232 | pods: |
| 1233 | - compute |
| 1234 | content: | |
| 1235 | # nova-rootwrap command filters for network nodes |
| 1236 | # This file should be owned by (and only-writeable by) the root user |
| 1237 | |
| 1238 | [Filters] |
| 1239 | # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' |
| 1240 | # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' |
| 1241 | # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev |
| 1242 | # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. |
| 1243 | # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. |
| 1244 | # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. |
| 1245 | # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. |
| 1246 | # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) |
| 1247 | # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] |
| 1248 | # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge |
| 1249 | # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. |
| 1250 | # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. |
| 1251 | # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... |
| 1252 | # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. |
| 1253 | # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' |
| 1254 | # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' |
| 1255 | # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. |
| 1256 | # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. |
| 1257 | # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' |
| 1258 | # nova/network/linux_net.py: 'ip', 'route', 'add', .. |
| 1259 | # nova/network/linux_net.py: 'ip', 'route', 'del', . |
| 1260 | # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev |
| 1261 | ip: CommandFilter, ip, root |
| 1262 | |
| 1263 | # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... |
| 1264 | # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... |
| 1265 | # nova/network/linux_net.py: 'ovs-vsctl', .... |
| 1266 | ovs-vsctl: CommandFilter, ovs-vsctl, root |
| 1267 | |
| 1268 | # nova/network/linux_net.py: 'ovs-ofctl', .... |
| 1269 | ovs-ofctl: CommandFilter, ovs-ofctl, root |
| 1270 | |
| 1271 | # nova/virt/libvirt/vif.py: 'ivs-ctl', ... |
| 1272 | # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ... |
| 1273 | # nova/network/linux_net.py: 'ivs-ctl', .... |
| 1274 | ivs-ctl: CommandFilter, ivs-ctl, root |
| 1275 | |
| 1276 | # nova/virt/libvirt/vif.py: 'ifc_ctl', ... |
| 1277 | ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root |
| 1278 | |
| 1279 | # nova/network/linux_net.py: 'ebtables', '-D' ... |
| 1280 | # nova/network/linux_net.py: 'ebtables', '-I' ... |
| 1281 | ebtables: CommandFilter, ebtables, root |
| 1282 | ebtables_usr: CommandFilter, ebtables, root |
| 1283 | |
| 1284 | # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... |
| 1285 | iptables-save: CommandFilter, iptables-save, root |
| 1286 | ip6tables-save: CommandFilter, ip6tables-save, root |
| 1287 | |
| 1288 | # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) |
| 1289 | iptables-restore: CommandFilter, iptables-restore, root |
| 1290 | ip6tables-restore: CommandFilter, ip6tables-restore, root |
| 1291 | |
| 1292 | # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... |
| 1293 | # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. |
| 1294 | arping: CommandFilter, arping, root |
| 1295 | |
| 1296 | # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address |
| 1297 | dhcp_release: CommandFilter, dhcp_release, root |
| 1298 | |
| 1299 | # nova/network/linux_net.py: 'kill', '-9', pid |
| 1300 | # nova/network/linux_net.py: 'kill', '-HUP', pid |
| 1301 | kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP |
| 1302 | |
| 1303 | # nova/network/linux_net.py: 'kill', pid |
| 1304 | kill_radvd: KillFilter, root, /usr/sbin/radvd |
| 1305 | |
| 1306 | # nova/network/linux_net.py: dnsmasq call |
| 1307 | dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq |
| 1308 | |
| 1309 | # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. |
| 1310 | radvd: CommandFilter, radvd, root |
| 1311 | |
| 1312 | # nova/network/linux_net.py: 'brctl', 'addbr', bridge |
| 1313 | # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 |
| 1314 | # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' |
| 1315 | # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface |
| 1316 | brctl: CommandFilter, brctl, root |
| 1317 | |
| 1318 | # nova/network/linux_net.py: 'sysctl', .... |
| 1319 | sysctl: CommandFilter, sysctl, root |
| 1320 | |
| 1321 | # nova/network/linux_net.py: 'conntrack' |
| 1322 | conntrack: CommandFilter, conntrack, root |
| 1323 | |
| 1324 | # nova/network/linux_net.py: 'fp-vdev' |
| 1325 | fp-vdev: CommandFilter, fp-vdev, root |
| 1326 | nova_ironic: |
| 1327 | DEFAULT: |
| 1328 | scheduler_host_manager: ironic_host_manager |
| 1329 | compute_driver: ironic.IronicDriver |
| 1330 | ram_allocation_ratio: 1.0 |
| 1331 | cpu_allocation_ratio: 1.0 |
| 1332 | reserved_host_memory_mb: 0 |
| 1333 | libvirt: |
| 1334 | # Get the IP address to be used as the target for live migration traffic using interface name. |
| 1335 | # If this option is set to None, the hostname of the migration target compute node will be used. |
| 1336 | live_migration_interface: |
| 1337 | hypervisor: |
| 1338 | # my_ip can be set automatically through this interface name. |
| 1339 | host_interface: |
| 1340 | # This list is the keys to exclude from the config file ingested by nova-compute |
| 1341 | nova_compute_redactions: |
| 1342 | - database |
| 1343 | - api_database |
| 1344 | - cell0_database |
| 1345 | nova: |
| 1346 | DEFAULT: |
| 1347 | log_config_append: /etc/nova/logging.conf |
| 1348 | default_ephemeral_format: ext4 |
| 1349 | ram_allocation_ratio: 1.0 |
| 1350 | disk_allocation_ratio: 1.0 |
| 1351 | cpu_allocation_ratio: 3.0 |
| 1352 | state_path: /var/lib/nova |
| 1353 | osapi_compute_listen: 0.0.0.0 |
| 1354 | # NOTE(portdirect): the bind port should not be defined, and is manipulated |
| 1355 | # via the endpoints section. |
| 1356 | osapi_compute_listen_port: null |
| 1357 | osapi_compute_workers: 1 |
| 1358 | metadata_workers: 1 |
| 1359 | use_neutron: true |
| 1360 | firewall_driver: nova.virt.firewall.NoopFirewallDriver |
| 1361 | linuxnet_interface_driver: openvswitch |
| 1362 | compute_driver: libvirt.LibvirtDriver |
| 1363 | my_ip: 0.0.0.0 |
| 1364 | instance_usage_audit: True |
| 1365 | instance_usage_audit_period: hour |
| 1366 | notify_on_state_change: vm_and_task_state |
| 1367 | resume_guests_state_on_host_boot: True |
| 1368 | vnc: |
| 1369 | novncproxy_host: 0.0.0.0 |
| 1370 | vncserver_listen: 0.0.0.0 |
| 1371 | # This would be set by each compute nodes's ip |
| 1372 | # server_proxyclient_address: 127.0.0.1 |
| 1373 | spice: |
| 1374 | html5proxy_host: 0.0.0.0 |
| 1375 | server_listen: 0.0.0.0 |
| 1376 | # This would be set by each compute nodes's ip |
| 1377 | # server_proxyclient_address: 127.0.0.1 |
| 1378 | conductor: |
| 1379 | workers: 1 |
| 1380 | oslo_policy: |
| 1381 | policy_file: /etc/nova/policy.yaml |
| 1382 | oslo_concurrency: |
| 1383 | lock_path: /var/lib/nova/tmp |
| 1384 | oslo_middleware: |
| 1385 | enable_proxy_headers_parsing: true |
| 1386 | glance: |
| 1387 | num_retries: 3 |
| 1388 | ironic: |
| 1389 | api_endpoint: null |
| 1390 | auth_url: null |
| 1391 | neutron: |
| 1392 | metadata_proxy_shared_secret: "password" |
| 1393 | service_metadata_proxy: True |
| 1394 | auth_type: password |
| 1395 | auth_version: v3 |
| 1396 | database: |
| 1397 | max_retries: -1 |
| 1398 | api_database: |
| 1399 | max_retries: -1 |
| 1400 | cell0_database: |
| 1401 | max_retries: -1 |
| 1402 | keystone_authtoken: |
| 1403 | auth_type: password |
| 1404 | auth_version: v3 |
| 1405 | memcache_security_strategy: ENCRYPT |
| 1406 | service_user: |
| 1407 | auth_type: password |
| 1408 | send_service_user_token: false |
| 1409 | libvirt: |
| 1410 | connection_uri: "qemu+unix:///system?socket=/run/libvirt/libvirt-sock" |
| 1411 | images_type: qcow2 |
| 1412 | images_rbd_pool: vms |
| 1413 | images_rbd_ceph_conf: /etc/ceph/ceph.conf |
| 1414 | rbd_user: cinder |
| 1415 | rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337 |
| 1416 | disk_cachemodes: "network=writeback" |
| 1417 | hw_disk_discard: unmap |
| 1418 | upgrade_levels: |
| 1419 | compute: auto |
| 1420 | cache: |
| 1421 | enabled: true |
| 1422 | backend: dogpile.cache.memcached |
| 1423 | wsgi: |
| 1424 | api_paste_config: /etc/nova/api-paste.ini |
| 1425 | oslo_messaging_notifications: |
| 1426 | driver: messagingv2 |
| 1427 | oslo_messaging_rabbit: |
| 1428 | rabbit_ha_queues: true |
| 1429 | placement: |
| 1430 | auth_type: password |
| 1431 | auth_version: v3 |
| 1432 | logging: |
| 1433 | loggers: |
| 1434 | keys: |
| 1435 | - root |
| 1436 | - nova |
| 1437 | - os.brick |
| 1438 | handlers: |
| 1439 | keys: |
| 1440 | - stdout |
| 1441 | - stderr |
| 1442 | - "null" |
| 1443 | formatters: |
| 1444 | keys: |
| 1445 | - context |
| 1446 | - default |
| 1447 | logger_root: |
| 1448 | level: WARNING |
| 1449 | handlers: 'null' |
| 1450 | logger_nova: |
| 1451 | level: INFO |
| 1452 | handlers: |
| 1453 | - stdout |
| 1454 | qualname: nova |
| 1455 | logger_os.brick: |
| 1456 | level: INFO |
| 1457 | handlers: |
| 1458 | - stdout |
| 1459 | qualname: os.brick |
| 1460 | logger_amqp: |
| 1461 | level: WARNING |
| 1462 | handlers: stderr |
| 1463 | qualname: amqp |
| 1464 | logger_amqplib: |
| 1465 | level: WARNING |
| 1466 | handlers: stderr |
| 1467 | qualname: amqplib |
| 1468 | logger_eventletwsgi: |
| 1469 | level: WARNING |
| 1470 | handlers: stderr |
| 1471 | qualname: eventlet.wsgi.server |
| 1472 | logger_sqlalchemy: |
| 1473 | level: WARNING |
| 1474 | handlers: stderr |
| 1475 | qualname: sqlalchemy |
| 1476 | logger_boto: |
| 1477 | level: WARNING |
| 1478 | handlers: stderr |
| 1479 | qualname: boto |
| 1480 | handler_null: |
| 1481 | class: logging.NullHandler |
| 1482 | formatter: default |
| 1483 | args: () |
| 1484 | handler_stdout: |
| 1485 | class: StreamHandler |
| 1486 | args: (sys.stdout,) |
| 1487 | formatter: context |
| 1488 | handler_stderr: |
| 1489 | class: StreamHandler |
| 1490 | args: (sys.stderr,) |
| 1491 | formatter: context |
| 1492 | formatter_context: |
| 1493 | class: oslo_log.formatters.ContextFormatter |
| 1494 | datefmt: "%Y-%m-%d %H:%M:%S" |
| 1495 | formatter_default: |
| 1496 | format: "%(message)s" |
| 1497 | datefmt: "%Y-%m-%d %H:%M:%S" |
| 1498 | rabbitmq: |
| 1499 | # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones |
| 1500 | policies: |
| 1501 | - vhost: "nova" |
| 1502 | name: "ha_ttl_nova" |
| 1503 | definition: |
| 1504 | # mirror messges to other nodes in rmq cluster |
| 1505 | ha-mode: "all" |
| 1506 | ha-sync-mode: "automatic" |
| 1507 | # 70s |
| 1508 | message-ttl: 70000 |
| 1509 | priority: 0 |
| 1510 | apply-to: all |
| 1511 | pattern: '^(?!(amq\.|reply_)).*' |
| 1512 | enable_iscsi: false |
| 1513 | archive_deleted_rows: |
| 1514 | purge_deleted_rows: false |
| 1515 | until_completion: true |
| 1516 | all_cells: false |
| 1517 | max_rows: |
| 1518 | enabled: False |
| 1519 | rows: 1000 |
| 1520 | before: |
| 1521 | enabled: false |
| 1522 | date: 'nil' |
| 1523 | |
| 1524 | # Names of secrets used by bootstrap and environmental checks |
| 1525 | secrets: |
| 1526 | identity: |
| 1527 | admin: nova-keystone-admin |
| 1528 | nova: nova-keystone-user |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1529 | test: nova-keystone-test |
| 1530 | oslo_db: |
| 1531 | admin: nova-db-admin |
| 1532 | nova: nova-db-user |
| 1533 | oslo_db_api: |
| 1534 | admin: nova-db-api-admin |
| 1535 | nova: nova-db-api-user |
| 1536 | oslo_db_cell0: |
| 1537 | admin: nova-db-cell0-admin |
| 1538 | nova: nova-db-cell0-user |
| 1539 | oslo_messaging: |
| 1540 | admin: nova-rabbitmq-admin |
| 1541 | nova: nova-rabbitmq-user |
| 1542 | tls: |
| 1543 | compute: |
| 1544 | osapi: |
| 1545 | public: nova-tls-public |
| 1546 | internal: nova-tls-api |
| 1547 | compute_novnc_proxy: |
| 1548 | novncproxy: |
| 1549 | public: nova-novncproxy-tls-public |
| 1550 | internal: nova-novncproxy-tls-proxy |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1551 | compute_metadata: |
| 1552 | metadata: |
| 1553 | public: metadata-tls-public |
| 1554 | internal: metadata-tls-metadata |
| 1555 | compute_spice_proxy: |
| 1556 | spiceproxy: |
| 1557 | internal: nova-tls-spiceproxy |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 1558 | oci_image_registry: |
| 1559 | nova: nova-oci-image-registry |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1560 | |
| 1561 | # typically overridden by environmental |
| 1562 | # values, but should include all endpoints |
| 1563 | # required by this chart |
| 1564 | endpoints: |
| 1565 | cluster_domain_suffix: cluster.local |
| 1566 | local_image_registry: |
| 1567 | name: docker-registry |
| 1568 | namespace: docker-registry |
| 1569 | hosts: |
| 1570 | default: localhost |
| 1571 | internal: docker-registry |
| 1572 | node: localhost |
| 1573 | host_fqdn_override: |
| 1574 | default: null |
| 1575 | port: |
| 1576 | registry: |
| 1577 | node: 5000 |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 1578 | oci_image_registry: |
| 1579 | name: oci-image-registry |
| 1580 | namespace: oci-image-registry |
| 1581 | auth: |
| 1582 | enabled: false |
| 1583 | nova: |
| 1584 | username: nova |
| 1585 | password: password |
| 1586 | hosts: |
| 1587 | default: localhost |
| 1588 | host_fqdn_override: |
| 1589 | default: null |
| 1590 | port: |
| 1591 | registry: |
| 1592 | default: null |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1593 | oslo_db: |
| 1594 | auth: |
| 1595 | admin: |
| 1596 | username: root |
| 1597 | password: password |
| 1598 | secret: |
| 1599 | tls: |
| 1600 | internal: mariadb-tls-direct |
| 1601 | nova: |
| 1602 | username: nova |
| 1603 | password: password |
| 1604 | hosts: |
| 1605 | default: mariadb |
| 1606 | host_fqdn_override: |
| 1607 | default: null |
| 1608 | path: /nova |
| 1609 | scheme: mysql+pymysql |
| 1610 | port: |
| 1611 | mysql: |
| 1612 | default: 3306 |
| 1613 | oslo_db_api: |
| 1614 | auth: |
| 1615 | admin: |
| 1616 | username: root |
| 1617 | password: password |
| 1618 | nova: |
| 1619 | username: nova |
| 1620 | password: password |
| 1621 | hosts: |
| 1622 | default: mariadb |
| 1623 | host_fqdn_override: |
| 1624 | default: null |
| 1625 | path: /nova_api |
| 1626 | scheme: mysql+pymysql |
| 1627 | port: |
| 1628 | mysql: |
| 1629 | default: 3306 |
| 1630 | oslo_db_cell0: |
| 1631 | auth: |
| 1632 | admin: |
| 1633 | username: root |
| 1634 | password: password |
| 1635 | nova: |
| 1636 | username: nova |
| 1637 | password: password |
| 1638 | hosts: |
| 1639 | default: mariadb |
| 1640 | host_fqdn_override: |
| 1641 | default: null |
| 1642 | path: /nova_cell0 |
| 1643 | scheme: mysql+pymysql |
| 1644 | port: |
| 1645 | mysql: |
| 1646 | default: 3306 |
| 1647 | oslo_messaging: |
| 1648 | auth: |
| 1649 | admin: |
| 1650 | username: rabbitmq |
| 1651 | password: password |
| 1652 | secret: |
| 1653 | tls: |
| 1654 | internal: rabbitmq-tls-direct |
| 1655 | nova: |
| 1656 | username: nova |
| 1657 | password: password |
| 1658 | statefulset: |
| 1659 | replicas: 2 |
| 1660 | name: rabbitmq-rabbitmq |
| 1661 | hosts: |
| 1662 | default: rabbitmq |
| 1663 | host_fqdn_override: |
| 1664 | default: null |
| 1665 | path: /nova |
| 1666 | scheme: rabbit |
| 1667 | port: |
| 1668 | amqp: |
| 1669 | default: 5672 |
| 1670 | http: |
| 1671 | default: 15672 |
| 1672 | oslo_cache: |
| 1673 | auth: |
| 1674 | # NOTE(portdirect): this is used to define the value for keystone |
| 1675 | # authtoken cache encryption key, if not set it will be populated |
| 1676 | # automatically with a random value, but to take advantage of |
| 1677 | # this feature all services should be set to use the same key, |
| 1678 | # and memcache service. |
| 1679 | memcache_secret_key: null |
| 1680 | hosts: |
| 1681 | default: memcached |
| 1682 | host_fqdn_override: |
| 1683 | default: null |
| 1684 | port: |
| 1685 | memcache: |
| 1686 | default: 11211 |
| 1687 | identity: |
| 1688 | name: keystone |
| 1689 | auth: |
| 1690 | admin: |
| 1691 | region_name: RegionOne |
| 1692 | username: admin |
| 1693 | password: password |
| 1694 | project_name: admin |
| 1695 | user_domain_name: default |
| 1696 | project_domain_name: default |
| 1697 | nova: |
| 1698 | role: admin |
| 1699 | region_name: RegionOne |
| 1700 | username: nova |
| 1701 | password: password |
| 1702 | project_name: service |
| 1703 | user_domain_name: service |
| 1704 | project_domain_name: service |
| 1705 | # NOTE(portdirect): the neutron user is not managed by the nova chart |
| 1706 | # these values should match those set in the neutron chart. |
| 1707 | neutron: |
| 1708 | region_name: RegionOne |
| 1709 | project_name: service |
| 1710 | user_domain_name: service |
| 1711 | project_domain_name: service |
| 1712 | username: neutron |
| 1713 | password: password |
| 1714 | # NOTE(portdirect): the ironic user is not managed by the nova chart |
| 1715 | # these values should match those set in the ironic chart. |
| 1716 | ironic: |
| 1717 | auth_type: password |
| 1718 | auth_version: v3 |
| 1719 | region_name: RegionOne |
| 1720 | project_name: service |
| 1721 | user_domain_name: service |
| 1722 | project_domain_name: service |
| 1723 | username: ironic |
| 1724 | password: password |
| 1725 | placement: |
| 1726 | role: admin |
| 1727 | region_name: RegionOne |
| 1728 | username: placement |
| 1729 | password: password |
| 1730 | project_name: service |
| 1731 | user_domain_name: service |
| 1732 | project_domain_name: service |
| 1733 | test: |
| 1734 | role: admin |
| 1735 | region_name: RegionOne |
| 1736 | username: nova-test |
| 1737 | password: password |
| 1738 | project_name: test |
| 1739 | user_domain_name: service |
| 1740 | project_domain_name: service |
| 1741 | hosts: |
| 1742 | default: keystone |
| 1743 | internal: keystone-api |
| 1744 | host_fqdn_override: |
| 1745 | default: null |
| 1746 | path: |
| 1747 | default: /v3 |
| 1748 | scheme: |
| 1749 | default: http |
| 1750 | port: |
| 1751 | api: |
| 1752 | default: 80 |
| 1753 | internal: 5000 |
| 1754 | image: |
| 1755 | name: glance |
| 1756 | hosts: |
| 1757 | default: glance-api |
| 1758 | public: glance |
| 1759 | host_fqdn_override: |
| 1760 | default: null |
| 1761 | path: |
| 1762 | default: null |
| 1763 | scheme: |
| 1764 | default: http |
| 1765 | port: |
| 1766 | api: |
| 1767 | default: 9292 |
| 1768 | public: 80 |
| 1769 | compute: |
| 1770 | name: nova |
| 1771 | hosts: |
| 1772 | default: nova-api |
| 1773 | public: nova |
| 1774 | host_fqdn_override: |
| 1775 | default: null |
| 1776 | # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public |
| 1777 | # endpoints using the following format: |
| 1778 | # public: |
| 1779 | # host: null |
| 1780 | # tls: |
| 1781 | # crt: null |
| 1782 | # key: null |
| 1783 | path: |
| 1784 | default: "/v2.1/%(tenant_id)s" |
| 1785 | scheme: |
| 1786 | default: 'http' |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 1787 | service: 'http' |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1788 | port: |
| 1789 | api: |
| 1790 | default: 8774 |
| 1791 | public: 80 |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 1792 | service: 8774 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1793 | novncproxy: |
| 1794 | default: 6080 |
| 1795 | compute_metadata: |
| 1796 | name: nova |
| 1797 | ip: |
| 1798 | # IF blank, set clusterIP and metadata_host dynamically |
| 1799 | ingress: null |
| 1800 | hosts: |
| 1801 | default: nova-metadata |
| 1802 | public: metadata |
| 1803 | host_fqdn_override: |
| 1804 | default: null |
| 1805 | path: |
| 1806 | default: / |
| 1807 | scheme: |
| 1808 | default: 'http' |
| 1809 | port: |
| 1810 | metadata: |
| 1811 | default: 8775 |
| 1812 | public: 80 |
| 1813 | compute_novnc_proxy: |
| 1814 | name: nova |
| 1815 | hosts: |
| 1816 | default: nova-novncproxy |
| 1817 | public: novncproxy |
| 1818 | host_fqdn_override: |
| 1819 | default: null |
| 1820 | # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public |
| 1821 | # endpoints using the following format: |
| 1822 | # public: |
| 1823 | # host: null |
| 1824 | # tls: |
| 1825 | # crt: null |
| 1826 | # key: null |
| 1827 | path: |
| 1828 | default: /vnc_auto.html |
| 1829 | scheme: |
| 1830 | default: 'http' |
| 1831 | port: |
| 1832 | novnc_proxy: |
| 1833 | default: 6080 |
| 1834 | public: 80 |
| 1835 | compute_spice_proxy: |
| 1836 | name: nova |
| 1837 | hosts: |
| 1838 | default: nova-spiceproxy |
| 1839 | public: placement |
| 1840 | host_fqdn_override: |
| 1841 | default: null |
| 1842 | path: |
| 1843 | default: /spice_auto.html |
| 1844 | scheme: |
| 1845 | default: 'http' |
| 1846 | port: |
| 1847 | spice_proxy: |
| 1848 | default: 6082 |
| 1849 | placement: |
| 1850 | name: placement |
| 1851 | hosts: |
| 1852 | default: placement-api |
| 1853 | public: placement |
| 1854 | host_fqdn_override: |
| 1855 | default: null |
| 1856 | path: |
| 1857 | default: / |
| 1858 | scheme: |
| 1859 | default: 'http' |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 1860 | service: 'http' |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1861 | port: |
| 1862 | api: |
| 1863 | default: 8778 |
| 1864 | public: 80 |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 1865 | service: 8778 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1866 | network: |
| 1867 | name: neutron |
| 1868 | hosts: |
| 1869 | default: neutron-server |
| 1870 | public: neutron |
| 1871 | host_fqdn_override: |
| 1872 | default: null |
| 1873 | path: |
| 1874 | default: null |
| 1875 | scheme: |
| 1876 | default: 'http' |
| 1877 | port: |
| 1878 | api: |
| 1879 | default: 9696 |
| 1880 | public: 80 |
| 1881 | baremetal: |
| 1882 | name: ironic |
| 1883 | hosts: |
| 1884 | default: ironic-api |
| 1885 | public: ironic |
| 1886 | host_fqdn_override: |
| 1887 | default: null |
| 1888 | path: |
| 1889 | default: null |
| 1890 | scheme: |
| 1891 | default: http |
| 1892 | port: |
| 1893 | api: |
| 1894 | default: 6385 |
| 1895 | public: 80 |
| 1896 | fluentd: |
| 1897 | namespace: null |
| 1898 | name: fluentd |
| 1899 | hosts: |
| 1900 | default: fluentd-logging |
| 1901 | host_fqdn_override: |
| 1902 | default: null |
| 1903 | path: |
| 1904 | default: null |
| 1905 | scheme: 'http' |
| 1906 | port: |
| 1907 | service: |
| 1908 | default: 24224 |
| 1909 | metrics: |
| 1910 | default: 24220 |
| 1911 | # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress |
| 1912 | # They are using to enable the Egress K8s network policy. |
| 1913 | kube_dns: |
| 1914 | namespace: kube-system |
| 1915 | name: kubernetes-dns |
| 1916 | hosts: |
| 1917 | default: kube-dns |
| 1918 | host_fqdn_override: |
| 1919 | default: null |
| 1920 | path: |
| 1921 | default: null |
| 1922 | scheme: http |
| 1923 | port: |
| 1924 | dns: |
| 1925 | default: 53 |
| 1926 | protocol: UDP |
| 1927 | ingress: |
| 1928 | namespace: null |
| 1929 | name: ingress |
| 1930 | hosts: |
| 1931 | default: ingress |
| 1932 | port: |
| 1933 | ingress: |
| 1934 | default: 80 |
| 1935 | |
| 1936 | pod: |
| 1937 | probes: |
| 1938 | rpc_timeout: 60 |
| 1939 | rpc_retries: 2 |
| 1940 | compute: |
| 1941 | default: |
| 1942 | liveness: |
| 1943 | enabled: True |
| 1944 | params: |
| 1945 | initialDelaySeconds: 120 |
| 1946 | periodSeconds: 90 |
| 1947 | timeoutSeconds: 70 |
| 1948 | readiness: |
| 1949 | enabled: True |
| 1950 | params: |
| 1951 | initialDelaySeconds: 80 |
| 1952 | periodSeconds: 90 |
| 1953 | timeoutSeconds: 70 |
| 1954 | api-metadata: |
| 1955 | default: |
| 1956 | liveness: |
| 1957 | enabled: True |
| 1958 | params: |
| 1959 | initialDelaySeconds: 30 |
| 1960 | periodSeconds: 60 |
| 1961 | timeoutSeconds: 15 |
| 1962 | readiness: |
| 1963 | enabled: True |
| 1964 | params: |
| 1965 | initialDelaySeconds: 30 |
| 1966 | periodSeconds: 60 |
| 1967 | timeoutSeconds: 15 |
| 1968 | api-osapi: |
| 1969 | default: |
| 1970 | liveness: |
| 1971 | enabled: True |
| 1972 | params: |
| 1973 | initialDelaySeconds: 30 |
| 1974 | periodSeconds: 60 |
| 1975 | timeoutSeconds: 15 |
| 1976 | readiness: |
| 1977 | enabled: True |
| 1978 | params: |
| 1979 | initialDelaySeconds: 30 |
| 1980 | periodSeconds: 60 |
| 1981 | timeoutSeconds: 15 |
| 1982 | conductor: |
| 1983 | default: |
| 1984 | liveness: |
| 1985 | enabled: True |
| 1986 | params: |
| 1987 | initialDelaySeconds: 120 |
| 1988 | periodSeconds: 90 |
| 1989 | timeoutSeconds: 70 |
| 1990 | readiness: |
| 1991 | enabled: True |
| 1992 | params: |
| 1993 | initialDelaySeconds: 80 |
| 1994 | periodSeconds: 90 |
| 1995 | timeoutSeconds: 70 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 1996 | novncproxy: |
| 1997 | default: |
| 1998 | liveness: |
| 1999 | enabled: True |
| 2000 | params: |
| 2001 | initialDelaySeconds: 30 |
| 2002 | periodSeconds: 60 |
| 2003 | timeoutSeconds: 15 |
| 2004 | readiness: |
| 2005 | enabled: True |
| 2006 | params: |
| 2007 | initialDelaySeconds: 30 |
| 2008 | periodSeconds: 60 |
| 2009 | timeoutSeconds: 15 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2010 | scheduler: |
| 2011 | default: |
| 2012 | liveness: |
| 2013 | enabled: True |
| 2014 | params: |
| 2015 | initialDelaySeconds: 120 |
| 2016 | periodSeconds: 90 |
| 2017 | timeoutSeconds: 70 |
| 2018 | readiness: |
| 2019 | enabled: True |
| 2020 | params: |
| 2021 | initialDelaySeconds: 80 |
| 2022 | periodSeconds: 90 |
| 2023 | timeoutSeconds: 70 |
| 2024 | compute-spice-proxy: |
| 2025 | default: |
| 2026 | liveness: |
| 2027 | enabled: True |
| 2028 | params: |
| 2029 | initialDelaySeconds: 30 |
| 2030 | periodSeconds: 60 |
| 2031 | timeoutSeconds: 15 |
| 2032 | readiness: |
| 2033 | enabled: True |
| 2034 | params: |
| 2035 | initialDelaySeconds: 30 |
| 2036 | periodSeconds: 60 |
| 2037 | timeoutSeconds: 15 |
| 2038 | security_context: |
| 2039 | nova: |
| 2040 | pod: |
| 2041 | runAsUser: 42424 |
| 2042 | container: |
| 2043 | nova_compute_init: |
| 2044 | readOnlyRootFilesystem: true |
| 2045 | runAsUser: 0 |
| 2046 | tungstenfabric_compute_init: |
| 2047 | readOnlyRootFilesystem: true |
| 2048 | allowPrivilegeEscalation: false |
| 2049 | ceph_perms: |
| 2050 | readOnlyRootFilesystem: true |
| 2051 | runAsUser: 0 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2052 | nova_compute_vnc_init: |
| 2053 | readOnlyRootFilesystem: true |
| 2054 | allowPrivilegeEscalation: false |
| 2055 | nova_compute_spice_init: |
| 2056 | readOnlyRootFilesystem: true |
| 2057 | allowPrivilegeEscalation: false |
| 2058 | nova_compute: |
| 2059 | readOnlyRootFilesystem: true |
| 2060 | privileged: true |
| 2061 | nova_compute_ssh: |
| 2062 | privileged: true |
| 2063 | runAsUser: 0 |
| 2064 | nova_compute_ssh_init: |
| 2065 | runAsUser: 0 |
| 2066 | nova_api_metadata_init: |
| 2067 | readOnlyRootFilesystem: true |
| 2068 | allowPrivilegeEscalation: false |
| 2069 | nova_api: |
| 2070 | readOnlyRootFilesystem: true |
| 2071 | allowPrivilegeEscalation: false |
| 2072 | nova_osapi: |
| 2073 | readOnlyRootFilesystem: true |
| 2074 | allowPrivilegeEscalation: false |
| 2075 | nova_conductor: |
| 2076 | readOnlyRootFilesystem: true |
| 2077 | allowPrivilegeEscalation: false |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2078 | nova_novncproxy_init: |
| 2079 | readOnlyRootFilesystem: true |
| 2080 | allowPrivilegeEscalation: false |
| 2081 | nova_novncproxy_init_assests: |
| 2082 | readOnlyRootFilesystem: true |
| 2083 | allowPrivilegeEscalation: false |
| 2084 | nova_novncproxy: |
| 2085 | readOnlyRootFilesystem: true |
| 2086 | allowPrivilegeEscalation: false |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2087 | nova_scheduler: |
| 2088 | readOnlyRootFilesystem: true |
| 2089 | allowPrivilegeEscalation: false |
| 2090 | nova_spiceproxy_init: |
| 2091 | readOnlyRootFilesystem: true |
| 2092 | allowPrivilegeEscalation: false |
| 2093 | nova_spiceproxy_init_assets: |
| 2094 | readOnlyRootFilesystem: true |
| 2095 | allowPrivilegeEscalation: false |
| 2096 | nova_spiceproxy: |
| 2097 | readOnlyRootFilesystem: true |
| 2098 | allowPrivilegeEscalation: false |
| 2099 | bootstrap: |
| 2100 | pod: |
| 2101 | runAsUser: 42424 |
| 2102 | container: |
| 2103 | nova_wait_for_computes_init: |
| 2104 | readOnlyRootFilesystem: true |
| 2105 | allowPrivilegeEscalation: false |
| 2106 | bootstrap: |
| 2107 | readOnlyRootFilesystem: true |
| 2108 | allowPrivilegeEscalation: false |
| 2109 | nova_cell_setup: |
| 2110 | pod: |
| 2111 | runAsUser: 42424 |
| 2112 | container: |
| 2113 | nova_wait_for_computes_init: |
| 2114 | readOnlyRootFilesystem: true |
| 2115 | allowPrivilegeEscalation: false |
| 2116 | nova_cell_setup_init: |
| 2117 | readOnlyRootFilesystem: true |
| 2118 | allowPrivilegeEscalation: false |
| 2119 | nova_cell_setup: |
| 2120 | readOnlyRootFilesystem: true |
| 2121 | allowPrivilegeEscalation: false |
| 2122 | archive_deleted_rows: |
| 2123 | pod: |
| 2124 | runAsUser: 42424 |
| 2125 | container: |
| 2126 | nova_archive_deleted_rows_init: |
| 2127 | readOnlyRootFilesystem: true |
| 2128 | allowPrivilegeEscalation: false |
| 2129 | nova_archive_deleted_rows: |
| 2130 | readOnlyRootFilesystem: true |
| 2131 | allowPrivilegeEscalation: false |
| 2132 | cell_setup: |
| 2133 | pod: |
| 2134 | runAsUser: 42424 |
| 2135 | container: |
| 2136 | nova_cell_setup: |
| 2137 | readOnlyRootFilesystem: true |
| 2138 | allowPrivilegeEscalation: false |
| 2139 | service_cleaner: |
| 2140 | pod: |
| 2141 | runAsUser: 42424 |
| 2142 | container: |
| 2143 | nova_service_cleaner: |
| 2144 | readOnlyRootFilesystem: true |
| 2145 | allowPrivilegeEscalation: false |
| 2146 | use_fqdn: |
| 2147 | # NOTE: If the option "host" is not specified in nova.conf, the host name |
| 2148 | # shown in the hypervisor host is defaulted to the short name of the host. |
| 2149 | # Setting the option here to true will cause use $(hostname --fqdn) as the |
| 2150 | # host name by default. If the short name is desired $(hostname --short), |
| 2151 | # set the option to false. Specifying a host in the nova.conf via the conf: |
| 2152 | # section will supersede the value of this option. |
| 2153 | compute: true |
| 2154 | affinity: |
| 2155 | anti: |
| 2156 | type: |
| 2157 | default: preferredDuringSchedulingIgnoredDuringExecution |
| 2158 | topologyKey: |
| 2159 | default: kubernetes.io/hostname |
| 2160 | weight: |
| 2161 | default: 10 |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 2162 | tolerations: |
| 2163 | nova: |
| 2164 | enabled: false |
| 2165 | tolerations: |
| 2166 | - key: node-role.kubernetes.io/master |
| 2167 | operator: Exists |
| 2168 | effect: NoSchedule |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2169 | mounts: |
| 2170 | nova_compute: |
| 2171 | init_container: null |
| 2172 | nova_compute: |
| 2173 | volumeMounts: |
| 2174 | volumes: |
| 2175 | nova_compute_ironic: |
| 2176 | init_container: null |
| 2177 | nova_compute_ironic: |
| 2178 | volumeMounts: |
| 2179 | volumes: |
| 2180 | nova_api_metadata: |
| 2181 | init_container: null |
| 2182 | nova_api_metadata: |
| 2183 | volumeMounts: |
| 2184 | volumes: |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2185 | nova_api_osapi: |
| 2186 | init_container: null |
| 2187 | nova_api_osapi: |
| 2188 | volumeMounts: |
| 2189 | volumes: |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2190 | nova_conductor: |
| 2191 | init_container: null |
| 2192 | nova_conductor: |
| 2193 | volumeMounts: |
| 2194 | volumes: |
| 2195 | nova_scheduler: |
| 2196 | init_container: null |
| 2197 | nova_scheduler: |
| 2198 | volumeMounts: |
| 2199 | volumes: |
| 2200 | nova_bootstrap: |
| 2201 | init_container: null |
| 2202 | nova_bootstrap: |
| 2203 | volumeMounts: |
| 2204 | volumes: |
| 2205 | nova_tests: |
| 2206 | init_container: null |
| 2207 | nova_tests: |
| 2208 | volumeMounts: |
| 2209 | volumes: |
| 2210 | nova_novncproxy: |
| 2211 | init_novncproxy: null |
| 2212 | nova_novncproxy: |
| 2213 | volumeMounts: |
| 2214 | volumes: |
| 2215 | nova_spiceproxy: |
| 2216 | init_spiceproxy: null |
| 2217 | nova_spiceproxy: |
| 2218 | volumeMounts: |
| 2219 | volumes: |
| 2220 | nova_db_sync: |
| 2221 | nova_db_sync: |
| 2222 | volumeMounts: |
| 2223 | volumes: |
| 2224 | useHostNetwork: |
| 2225 | novncproxy: true |
| 2226 | replicas: |
| 2227 | api_metadata: 1 |
| 2228 | compute_ironic: 1 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2229 | osapi: 1 |
| 2230 | conductor: 1 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2231 | scheduler: 1 |
| 2232 | novncproxy: 1 |
| 2233 | spiceproxy: 1 |
| 2234 | lifecycle: |
| 2235 | upgrades: |
| 2236 | deployments: |
| 2237 | revision_history: 3 |
| 2238 | pod_replacement_strategy: RollingUpdate |
| 2239 | rolling_update: |
| 2240 | max_unavailable: 1 |
| 2241 | max_surge: 3 |
| 2242 | daemonsets: |
| 2243 | pod_replacement_strategy: RollingUpdate |
| 2244 | compute: |
| 2245 | enabled: true |
| 2246 | min_ready_seconds: 0 |
| 2247 | max_unavailable: 1 |
| 2248 | disruption_budget: |
| 2249 | metadata: |
| 2250 | min_available: 0 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2251 | osapi: |
| 2252 | min_available: 0 |
| 2253 | termination_grace_period: |
| 2254 | metadata: |
| 2255 | timeout: 30 |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2256 | osapi: |
| 2257 | timeout: 30 |
| 2258 | resources: |
| 2259 | enabled: false |
| 2260 | compute: |
| 2261 | requests: |
| 2262 | memory: "128Mi" |
| 2263 | cpu: "100m" |
| 2264 | limits: |
| 2265 | memory: "1024Mi" |
| 2266 | cpu: "2000m" |
| 2267 | compute_ironic: |
| 2268 | requests: |
| 2269 | memory: "128Mi" |
| 2270 | cpu: "100m" |
| 2271 | limits: |
| 2272 | memory: "1024Mi" |
| 2273 | cpu: "2000m" |
| 2274 | api_metadata: |
| 2275 | requests: |
| 2276 | memory: "128Mi" |
| 2277 | cpu: "100m" |
| 2278 | limits: |
| 2279 | memory: "1024Mi" |
| 2280 | cpu: "2000m" |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2281 | api: |
| 2282 | requests: |
| 2283 | memory: "128Mi" |
| 2284 | cpu: "100m" |
| 2285 | limits: |
| 2286 | memory: "1024Mi" |
| 2287 | cpu: "2000m" |
| 2288 | conductor: |
| 2289 | requests: |
| 2290 | memory: "128Mi" |
| 2291 | cpu: "100m" |
| 2292 | limits: |
| 2293 | memory: "1024Mi" |
| 2294 | cpu: "2000m" |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2295 | scheduler: |
| 2296 | requests: |
| 2297 | memory: "128Mi" |
| 2298 | cpu: "100m" |
| 2299 | limits: |
| 2300 | memory: "1024Mi" |
| 2301 | cpu: "2000m" |
| 2302 | ssh: |
| 2303 | requests: |
| 2304 | memory: "128Mi" |
| 2305 | cpu: "100m" |
| 2306 | limits: |
| 2307 | memory: "1024Mi" |
| 2308 | cpu: "2000m" |
| 2309 | novncproxy: |
| 2310 | requests: |
| 2311 | memory: "128Mi" |
| 2312 | cpu: "100m" |
| 2313 | limits: |
| 2314 | memory: "1024Mi" |
| 2315 | cpu: "2000m" |
| 2316 | spiceproxy: |
| 2317 | requests: |
| 2318 | memory: "128Mi" |
| 2319 | cpu: "100m" |
| 2320 | limits: |
| 2321 | memory: "1024Mi" |
| 2322 | cpu: "2000m" |
| 2323 | jobs: |
| 2324 | bootstrap: |
| 2325 | requests: |
| 2326 | memory: "128Mi" |
| 2327 | cpu: "100m" |
| 2328 | limits: |
| 2329 | memory: "1024Mi" |
| 2330 | cpu: "2000m" |
| 2331 | db_init: |
| 2332 | requests: |
| 2333 | memory: "128Mi" |
| 2334 | cpu: "100m" |
| 2335 | limits: |
| 2336 | memory: "1024Mi" |
| 2337 | cpu: "2000m" |
| 2338 | rabbit_init: |
| 2339 | requests: |
| 2340 | memory: "128Mi" |
| 2341 | cpu: "100m" |
| 2342 | limits: |
| 2343 | memory: "1024Mi" |
| 2344 | cpu: "2000m" |
| 2345 | db_sync: |
| 2346 | requests: |
| 2347 | memory: "128Mi" |
| 2348 | cpu: "100m" |
| 2349 | limits: |
| 2350 | memory: "1024Mi" |
| 2351 | cpu: "2000m" |
| 2352 | archive_deleted_rows: |
| 2353 | requests: |
| 2354 | memory: "128Mi" |
| 2355 | cpu: "100m" |
| 2356 | limits: |
| 2357 | memory: "1024Mi" |
| 2358 | cpu: "2000m" |
| 2359 | db_drop: |
| 2360 | requests: |
| 2361 | memory: "128Mi" |
| 2362 | cpu: "100m" |
| 2363 | limits: |
| 2364 | memory: "1024Mi" |
| 2365 | cpu: "2000m" |
| 2366 | ks_endpoints: |
| 2367 | requests: |
| 2368 | memory: "128Mi" |
| 2369 | cpu: "100m" |
| 2370 | limits: |
| 2371 | memory: "1024Mi" |
| 2372 | cpu: "2000m" |
| 2373 | ks_service: |
| 2374 | requests: |
| 2375 | memory: "128Mi" |
| 2376 | cpu: "100m" |
| 2377 | limits: |
| 2378 | memory: "1024Mi" |
| 2379 | cpu: "2000m" |
| 2380 | ks_user: |
| 2381 | requests: |
| 2382 | memory: "128Mi" |
| 2383 | cpu: "100m" |
| 2384 | limits: |
| 2385 | memory: "1024Mi" |
| 2386 | cpu: "2000m" |
| 2387 | tests: |
| 2388 | requests: |
| 2389 | memory: "128Mi" |
| 2390 | cpu: "100m" |
| 2391 | limits: |
| 2392 | memory: "1024Mi" |
| 2393 | cpu: "2000m" |
| 2394 | cell_setup: |
| 2395 | requests: |
| 2396 | memory: "128Mi" |
| 2397 | cpu: "100m" |
| 2398 | limits: |
| 2399 | memory: "1024Mi" |
| 2400 | cpu: "2000m" |
| 2401 | service_cleaner: |
| 2402 | requests: |
| 2403 | memory: "128Mi" |
| 2404 | cpu: "100m" |
| 2405 | limits: |
| 2406 | memory: "1024Mi" |
| 2407 | cpu: "2000m" |
| 2408 | image_repo_sync: |
| 2409 | requests: |
| 2410 | memory: "128Mi" |
| 2411 | cpu: "100m" |
| 2412 | limits: |
| 2413 | memory: "1024Mi" |
| 2414 | cpu: "2000m" |
| 2415 | |
| 2416 | network_policy: |
| 2417 | nova: |
| 2418 | # TODO(lamt): Need to tighten this ingress for security. |
| 2419 | ingress: |
| 2420 | - {} |
| 2421 | egress: |
| 2422 | - {} |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2423 | |
| 2424 | # NOTE(helm_hook): helm_hook might break for helm2 binary. |
| 2425 | # set helm3_hook: false when using the helm2 binary. |
| 2426 | helm3_hook: true |
| 2427 | |
| 2428 | health_probe: |
| 2429 | logging: |
| 2430 | level: ERROR |
| 2431 | |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 2432 | tls: |
| 2433 | identity: false |
| 2434 | oslo_messaging: false |
| 2435 | oslo_db: false |
| 2436 | |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2437 | manifests: |
| 2438 | certificates: false |
| 2439 | configmap_bin: true |
| 2440 | configmap_etc: true |
| 2441 | cron_job_cell_setup: true |
| 2442 | cron_job_service_cleaner: true |
| 2443 | cron_job_archive_deleted_rows: false |
| 2444 | daemonset_compute: true |
| 2445 | deployment_api_metadata: true |
| 2446 | deployment_api_osapi: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2447 | deployment_conductor: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2448 | deployment_novncproxy: true |
| 2449 | deployment_spiceproxy: true |
| 2450 | deployment_scheduler: true |
| 2451 | ingress_metadata: true |
| 2452 | ingress_novncproxy: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2453 | ingress_osapi: true |
| 2454 | job_bootstrap: true |
| 2455 | job_db_init: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2456 | job_db_sync: true |
| 2457 | job_db_drop: false |
| 2458 | job_image_repo_sync: true |
| 2459 | job_rabbit_init: true |
| 2460 | job_ks_endpoints: true |
| 2461 | job_ks_service: true |
| 2462 | job_ks_user: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2463 | job_cell_setup: true |
| 2464 | pdb_metadata: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2465 | pdb_osapi: true |
| 2466 | pod_rally_test: true |
| 2467 | network_policy: false |
| 2468 | secret_db_api: true |
| 2469 | secret_db_cell0: true |
| 2470 | secret_db: true |
| 2471 | secret_ingress_tls: true |
| 2472 | secret_keystone: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2473 | secret_rabbitmq: true |
Oleksandr Kozachenko | a10d785 | 2023-02-02 22:01:16 +0100 | [diff] [blame] | 2474 | secret_registry: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2475 | service_ingress_metadata: true |
| 2476 | service_ingress_novncproxy: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2477 | service_ingress_osapi: true |
| 2478 | service_metadata: true |
Mohammed Naser | f3f59a7 | 2023-01-15 21:02:04 -0500 | [diff] [blame] | 2479 | service_novncproxy: true |
| 2480 | service_spiceproxy: true |
| 2481 | service_osapi: true |
| 2482 | statefulset_compute_ironic: false |
| 2483 | ... |