chore: add more pre-commit-config
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 54088b0..7e90a18 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -6,7 +6,7 @@
build-prometheus-ethtool-exporter:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3.0.2
+ - uses: actions/checkout@v3.0.2
with:
fetch-depth: 0
- uses: tj-actions/changed-files@v29.0.7
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cc39e47..b10d6c8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,4 +3,21 @@
rev: v2.0.0
hooks:
- id: conventional-pre-commit
- stages: [commit-msg]
+ stages:
+ - commit-msg
+
+ - repo: https://github.com/psf/black
+ rev: 22.8.0
+ hooks:
+ - id: black
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.10.1
+ hooks:
+ - id: isort
+
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.3.0
+ hooks:
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
diff --git a/doc/requirements.txt b/doc/requirements.txt
index e796768..8e38ac0 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -3,4 +3,4 @@
sphinx_rtd_theme
reno[sphinx]
https://github.com/ypid/yaml4rst/archive/master.tar.gz
-https://github.com/debops/yaml2rst/archive/master.tar.gz
\ No newline at end of file
+https://github.com/debops/yaml2rst/archive/master.tar.gz
diff --git a/doc/source/_templates/yaml4rst/defaults_header.j2 b/doc/source/_templates/yaml4rst/defaults_header.j2
index b15a2e1..5634148 100644
--- a/doc/source/_templates/yaml4rst/defaults_header.j2
+++ b/doc/source/_templates/yaml4rst/defaults_header.j2
@@ -8,4 +8,4 @@
# =================
# .. contents:: Sections
-# :local:
\ No newline at end of file
+# :local:
diff --git a/doc/source/conf.py b/doc/source/conf.py
index ce1d48d..d846a1b 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -22,38 +22,40 @@
# import sys
# sys.path.insert(0, os.path.abspath('.'))
+import glob
+
# -- yaml2rst ----------------------------------------------------------------
import os
-import glob
+import pathlib
+
import yaml2rst
from yaml4rst.reformatter import YamlRstReformatter
-import pathlib
for defaults_file in glob.glob("../../roles/*/defaults/main.yml"):
role_name = defaults_file.split("/")[-3]
YamlRstReformatter._HEADER_END_LINES = {
- 'yaml4rst': [
- '# Default variables',
- '# :local:',
- '# .. contents:: Sections',
- '# .. include:: includes/all.rst',
- '# .. include:: includes/role.rst',
- '# .. include:: ../../../includes/global.rst',
- '# -----------------',
+ "yaml4rst": [
+ "# Default variables",
+ "# :local:",
+ "# .. contents:: Sections",
+ "# .. include:: includes/all.rst",
+ "# .. include:: includes/role.rst",
+ "# .. include:: ../../../includes/global.rst",
+ "# -----------------",
],
}
reformatter = YamlRstReformatter(
- preset='yaml4rst',
+ preset="yaml4rst",
template_path=os.path.join(
os.path.abspath(os.path.dirname(__file__)),
- '_templates',
+ "_templates",
),
config={
- 'ansible_full_role_name': f"vexxhost.atmosphere.{role_name}",
- 'ansible_role_name': role_name,
- }
+ "ansible_full_role_name": f"vexxhost.atmosphere.{role_name}",
+ "ansible_role_name": role_name,
+ },
)
reformatter.read_file(defaults_file)
reformatter.reformat()
@@ -67,16 +69,16 @@
rst_content = yaml2rst.convert_file(
defaults_file,
f"roles/{role_name}/defaults/main.rst",
- strip_regex=r'\s*(:?\[{3}|\]{3})\d?$',
- yaml_strip_regex=r'^\s{66,67}#\s\]{3}\d?$',
+ strip_regex=r"\s*(:?\[{3}|\]{3})\d?$",
+ yaml_strip_regex=r"^\s{66,67}#\s\]{3}\d?$",
)
# -- Project information -----------------------------------------------------
-project = 'Atmosphere'
-copyright = '2022, VEXXHOST, Inc.'
-author = 'VEXXHOST, Inc.'
+project = "Atmosphere"
+copyright = "2022, VEXXHOST, Inc."
+author = "VEXXHOST, Inc."
# -- General configuration ---------------------------------------------------
@@ -85,11 +87,11 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'reno.sphinxext',
+ "reno.sphinxext",
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -102,9 +104,9 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'sphinx_rtd_theme'
+html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
diff --git a/doc/source/releasenotes.rst b/doc/source/releasenotes.rst
index f182f00..dbd6f82 100644
--- a/doc/source/releasenotes.rst
+++ b/doc/source/releasenotes.rst
@@ -1,4 +1,4 @@
Release Notes
=============
-.. release-notes::
\ No newline at end of file
+.. release-notes::
diff --git a/doc/source/roles/build_openstack_requirements/index.rst b/doc/source/roles/build_openstack_requirements/index.rst
index bcdfa7e..0398a5c 100644
--- a/doc/source/roles/build_openstack_requirements/index.rst
+++ b/doc/source/roles/build_openstack_requirements/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/ceph_csi_rbd/index.rst b/doc/source/roles/ceph_csi_rbd/index.rst
index 4148d09..08ae6ff 100644
--- a/doc/source/roles/ceph_csi_rbd/index.rst
+++ b/doc/source/roles/ceph_csi_rbd/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/ceph_mon/index.rst b/doc/source/roles/ceph_mon/index.rst
index 3cdec24..e960778 100644
--- a/doc/source/roles/ceph_mon/index.rst
+++ b/doc/source/roles/ceph_mon/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/ceph_osd/index.rst b/doc/source/roles/ceph_osd/index.rst
index 3591ee1..566f156 100644
--- a/doc/source/roles/ceph_osd/index.rst
+++ b/doc/source/roles/ceph_osd/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/ceph_repository/index.rst b/doc/source/roles/ceph_repository/index.rst
index 090527f..2f4a3ea 100644
--- a/doc/source/roles/ceph_repository/index.rst
+++ b/doc/source/roles/ceph_repository/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/cert_manager/index.rst b/doc/source/roles/cert_manager/index.rst
index 81a18b8..43f12e5 100644
--- a/doc/source/roles/cert_manager/index.rst
+++ b/doc/source/roles/cert_manager/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/containerd/index.rst b/doc/source/roles/containerd/index.rst
index 0ebf780..18efcb9 100644
--- a/doc/source/roles/containerd/index.rst
+++ b/doc/source/roles/containerd/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/csi/index.rst b/doc/source/roles/csi/index.rst
index 3cf76af..ddf620b 100644
--- a/doc/source/roles/csi/index.rst
+++ b/doc/source/roles/csi/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/helm/index.rst b/doc/source/roles/helm/index.rst
index 6c8de12..fee6c74 100644
--- a/doc/source/roles/helm/index.rst
+++ b/doc/source/roles/helm/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/ipmi_exporter/index.rst b/doc/source/roles/ipmi_exporter/index.rst
index 28dbfba..cfd9ef8 100644
--- a/doc/source/roles/ipmi_exporter/index.rst
+++ b/doc/source/roles/ipmi_exporter/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/keepalived/index.rst b/doc/source/roles/keepalived/index.rst
index de5844e..f358c35 100644
--- a/doc/source/roles/keepalived/index.rst
+++ b/doc/source/roles/keepalived/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/kube_prometheus_stack/index.rst b/doc/source/roles/kube_prometheus_stack/index.rst
index 3ed41da..5a32ad9 100644
--- a/doc/source/roles/kube_prometheus_stack/index.rst
+++ b/doc/source/roles/kube_prometheus_stack/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/kubernetes/index.rst b/doc/source/roles/kubernetes/index.rst
index a8b9bcc..68f6ec7 100644
--- a/doc/source/roles/kubernetes/index.rst
+++ b/doc/source/roles/kubernetes/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_cli/index.rst b/doc/source/roles/openstack_cli/index.rst
index 246af3b..d98260e 100644
--- a/doc/source/roles/openstack_cli/index.rst
+++ b/doc/source/roles/openstack_cli/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_exporter/index.rst b/doc/source/roles/openstack_exporter/index.rst
index 9cedd84..296257d 100644
--- a/doc/source/roles/openstack_exporter/index.rst
+++ b/doc/source/roles/openstack_exporter/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_barbican/index.rst b/doc/source/roles/openstack_helm_barbican/index.rst
index 7a449d8..5a26c99 100644
--- a/doc/source/roles/openstack_helm_barbican/index.rst
+++ b/doc/source/roles/openstack_helm_barbican/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_cinder/index.rst b/doc/source/roles/openstack_helm_cinder/index.rst
index f9fb91c..5bc6b01 100644
--- a/doc/source/roles/openstack_helm_cinder/index.rst
+++ b/doc/source/roles/openstack_helm_cinder/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_endpoints/index.rst b/doc/source/roles/openstack_helm_endpoints/index.rst
index aad51b7..a2796b8 100644
--- a/doc/source/roles/openstack_helm_endpoints/index.rst
+++ b/doc/source/roles/openstack_helm_endpoints/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_glance/index.rst b/doc/source/roles/openstack_helm_glance/index.rst
index a9868c6..c16251f 100644
--- a/doc/source/roles/openstack_helm_glance/index.rst
+++ b/doc/source/roles/openstack_helm_glance/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_heat/index.rst b/doc/source/roles/openstack_helm_heat/index.rst
index c77c442..b2f4435 100644
--- a/doc/source/roles/openstack_helm_heat/index.rst
+++ b/doc/source/roles/openstack_helm_heat/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_horizon/index.rst b/doc/source/roles/openstack_helm_horizon/index.rst
index c017ea4..71079cf 100644
--- a/doc/source/roles/openstack_helm_horizon/index.rst
+++ b/doc/source/roles/openstack_helm_horizon/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_infra_ceph_provisioners/index.rst b/doc/source/roles/openstack_helm_infra_ceph_provisioners/index.rst
index 83a0d64..7e6f0ca 100644
--- a/doc/source/roles/openstack_helm_infra_ceph_provisioners/index.rst
+++ b/doc/source/roles/openstack_helm_infra_ceph_provisioners/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_infra_libvirt/index.rst b/doc/source/roles/openstack_helm_infra_libvirt/index.rst
index 355ff12..b2b7b35 100644
--- a/doc/source/roles/openstack_helm_infra_libvirt/index.rst
+++ b/doc/source/roles/openstack_helm_infra_libvirt/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_infra_memcached/index.rst b/doc/source/roles/openstack_helm_infra_memcached/index.rst
index 8e39b8e..cf020cd 100644
--- a/doc/source/roles/openstack_helm_infra_memcached/index.rst
+++ b/doc/source/roles/openstack_helm_infra_memcached/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_infra_openvswitch/index.rst b/doc/source/roles/openstack_helm_infra_openvswitch/index.rst
index 6f7e857..57ab7cf 100644
--- a/doc/source/roles/openstack_helm_infra_openvswitch/index.rst
+++ b/doc/source/roles/openstack_helm_infra_openvswitch/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_ingress/index.rst b/doc/source/roles/openstack_helm_ingress/index.rst
index b068625..7a126e3 100644
--- a/doc/source/roles/openstack_helm_ingress/index.rst
+++ b/doc/source/roles/openstack_helm_ingress/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_keystone/index.rst b/doc/source/roles/openstack_helm_keystone/index.rst
index bb37000..c928143 100644
--- a/doc/source/roles/openstack_helm_keystone/index.rst
+++ b/doc/source/roles/openstack_helm_keystone/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_neutron/index.rst b/doc/source/roles/openstack_helm_neutron/index.rst
index a84eff8..4cc5106 100644
--- a/doc/source/roles/openstack_helm_neutron/index.rst
+++ b/doc/source/roles/openstack_helm_neutron/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_nova/index.rst b/doc/source/roles/openstack_helm_nova/index.rst
index 5fbfafb..a4b3ff5 100644
--- a/doc/source/roles/openstack_helm_nova/index.rst
+++ b/doc/source/roles/openstack_helm_nova/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_placement/index.rst b/doc/source/roles/openstack_helm_placement/index.rst
index f5f67e3..87157c4 100644
--- a/doc/source/roles/openstack_helm_placement/index.rst
+++ b/doc/source/roles/openstack_helm_placement/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_senlin/index.rst b/doc/source/roles/openstack_helm_senlin/index.rst
index 982967e..085df9c 100644
--- a/doc/source/roles/openstack_helm_senlin/index.rst
+++ b/doc/source/roles/openstack_helm_senlin/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/roles/openstack_helm_tempest/index.rst b/doc/source/roles/openstack_helm_tempest/index.rst
index 5a084e5..f9cf349 100644
--- a/doc/source/roles/openstack_helm_tempest/index.rst
+++ b/doc/source/roles/openstack_helm_tempest/index.rst
@@ -7,4 +7,4 @@
.. toctree::
:maxdepth: 2
- defaults/main
\ No newline at end of file
+ defaults/main
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
index de66057..5be5b8e 100644
--- a/doc/source/user/index.rst
+++ b/doc/source/user/index.rst
@@ -4,4 +4,4 @@
.. toctree::
:maxdepth: 1
- quickstart
\ No newline at end of file
+ quickstart
diff --git a/docs/certificates.md b/docs/certificates.md
index baedc4d..8beefbe 100644
--- a/docs/certificates.md
+++ b/docs/certificates.md
@@ -85,4 +85,4 @@
tls.key: |
CA_PRIVATE_KEY_HERE
```
-NOTE: If your issuer represents an intermediate, ensure that tls.crt contains the issuer's full chain in the correct order: issuer -> intermediate(s) -> root.
\ No newline at end of file
+NOTE: If your issuer represents an intermediate, ensure that tls.crt contains the issuer's full chain in the correct order: issuer -> intermediate(s) -> root.
diff --git a/molecule/default/requirements.txt b/molecule/default/requirements.txt
index 1bcbc4b..862d238 100644
--- a/molecule/default/requirements.txt
+++ b/molecule/default/requirements.txt
@@ -1,3 +1,3 @@
molecule==3.5.2 # https://github.com/ansible-community/molecule/issues/3435
openstacksdk==0.61.0
-netaddr
\ No newline at end of file
+netaddr
diff --git a/playbooks/cleanup.yml b/playbooks/cleanup.yml
index 49031d4..47933af 100644
--- a/playbooks/cleanup.yml
+++ b/playbooks/cleanup.yml
@@ -32,4 +32,4 @@
kind: PersistentVolumeClaim
namespace: openstack
name: "rabbitmq-data-rabbitmq-rabbitmq-{{ item }}"
- loop: "{{ range(0, 3) | list }}"
\ No newline at end of file
+ loop: "{{ range(0, 3) | list }}"
diff --git a/playbooks/generate_workspace.yml b/playbooks/generate_workspace.yml
index c072d94..9214452 100644
--- a/playbooks/generate_workspace.yml
+++ b/playbooks/generate_workspace.yml
@@ -180,7 +180,7 @@
ansible.builtin.file:
path: "{{ _endpoints_path }}"
state: touch
-
+
- name: Load the current endpoints into a variable
ansible.builtin.include_vars:
file: "{{ _endpoints_path }}"
diff --git a/playbooks/site.yml b/playbooks/site.yml
index f1eaefb..d27b276 100644
--- a/playbooks/site.yml
+++ b/playbooks/site.yml
@@ -15,4 +15,4 @@
- import_playbook: vexxhost.atmosphere.ceph
- import_playbook: vexxhost.atmosphere.kubernetes
- import_playbook: vexxhost.atmosphere.openstack
-- import_playbook: vexxhost.atmosphere.cleanup
\ No newline at end of file
+- import_playbook: vexxhost.atmosphere.cleanup
diff --git a/plugins/module_utils/ca_common.py b/plugins/module_utils/ca_common.py
index 380463b..5336582 100644
--- a/plugins/module_utils/ca_common.py
+++ b/plugins/module_utils/ca_common.py
@@ -1,26 +1,28 @@
-import os
import datetime
+import os
-def generate_ceph_cmd(sub_cmd, args, user_key=None, cluster='ceph', user='client.admin', container_image=None, interactive=False):
- '''
+def generate_ceph_cmd(
+ sub_cmd,
+ args,
+ user_key=None,
+ cluster="ceph",
+ user="client.admin",
+ container_image=None,
+ interactive=False,
+):
+ """
Generate 'ceph' command line to execute
- '''
+ """
if not user_key:
- user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user)
+ user_key = "/etc/ceph/{}.{}.keyring".format(cluster, user)
cmd = pre_generate_ceph_cmd(
- container_image=container_image, interactive=interactive)
+ container_image=container_image, interactive=interactive
+ )
- base_cmd = [
- '-n',
- user,
- '-k',
- user_key,
- '--cluster',
- cluster
- ]
+ base_cmd = ["-n", user, "-k", user_key, "--cluster", cluster]
base_cmd.extend(sub_cmd)
cmd.extend(base_cmd + args)
@@ -28,32 +30,40 @@
def container_exec(binary, container_image, interactive=False):
- '''
+ """
Build the docker CLI to run a command inside a container
- '''
+ """
- container_binary = os.getenv('CEPH_CONTAINER_BINARY')
- command_exec = [container_binary, 'run']
+ container_binary = os.getenv("CEPH_CONTAINER_BINARY")
+ command_exec = [container_binary, "run"]
if interactive:
- command_exec.extend(['--interactive'])
+ command_exec.extend(["--interactive"])
- command_exec.extend(['--rm',
- '--net=host',
- '-v', '/etc/ceph:/etc/ceph:z',
- '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
- '-v', '/var/log/ceph/:/var/log/ceph/:z',
- '--entrypoint=' + binary, container_image])
+ command_exec.extend(
+ [
+ "--rm",
+ "--net=host",
+ "-v",
+ "/etc/ceph:/etc/ceph:z",
+ "-v",
+ "/var/lib/ceph/:/var/lib/ceph/:z",
+ "-v",
+ "/var/log/ceph/:/var/log/ceph/:z",
+ "--entrypoint=" + binary,
+ container_image,
+ ]
+ )
return command_exec
def is_containerized():
- '''
+ """
Check if we are running on a containerized cluster
- '''
+ """
- if 'CEPH_CONTAINER_IMAGE' in os.environ:
- container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ if "CEPH_CONTAINER_IMAGE" in os.environ:
+ container_image = os.getenv("CEPH_CONTAINER_IMAGE")
else:
container_image = None
@@ -61,21 +71,21 @@
def pre_generate_ceph_cmd(container_image=None, interactive=False):
- '''
+ """
Generate ceph prefix comaand
- '''
+ """
if container_image:
- cmd = container_exec('ceph', container_image, interactive=interactive)
+ cmd = container_exec("ceph", container_image, interactive=interactive)
else:
- cmd = ['ceph']
+ cmd = ["ceph"]
return cmd
def exec_command(module, cmd, stdin=None):
- '''
+ """
Execute command(s)
- '''
+ """
binary_data = False
if stdin:
@@ -85,7 +95,9 @@
return rc, cmd, out, err
-def exit_module(module, out, rc, cmd, err, startd, changed=False, diff=dict(before="", after="")):
+def exit_module(
+ module, out, rc, cmd, err, startd, changed=False, diff=dict(before="", after="")
+):
endd = datetime.datetime.now()
delta = endd - startd
@@ -98,17 +110,17 @@
stdout=out.rstrip("\r\n"),
stderr=err.rstrip("\r\n"),
changed=changed,
- diff=diff
+ diff=diff,
)
module.exit_json(**result)
def fatal(message, module):
- '''
+ """
Report a fatal error and exit
- '''
+ """
if module:
module.fail_json(msg=message, rc=1)
else:
- raise(Exception(message))
+ raise (Exception(message))
diff --git a/plugins/modules/ceph_config.py b/plugins/modules/ceph_config.py
index 0c7a183..968ba8e 100644
--- a/plugins/modules/ceph_config.py
+++ b/plugins/modules/ceph_config.py
@@ -2,34 +2,30 @@
from ansible.module_utils.basic import AnsibleModule
+
def run_module():
module_args = dict(
- who=dict(type='str', required=True),
- name=dict(type='str', required=True),
- value=dict(type='str', required=True),
+ who=dict(type="str", required=True),
+ name=dict(type="str", required=True),
+ value=dict(type="str", required=True),
)
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True
- )
+ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
- who = module.params['who']
- name = module.params['name']
- value = module.params['value']
+ who = module.params["who"]
+ name = module.params["name"]
+ value = module.params["value"]
changed = False
- _, out, _ = module.run_command(
- ['ceph', 'config', 'get', who, name], check_rc=True
- )
+ _, out, _ = module.run_command(["ceph", "config", "get", who, name], check_rc=True)
if out.strip() != value:
changed = True
if not module.check_mode:
_, _, _ = module.run_command(
- ['ceph', 'config', 'set', who, name, value], check_rc=True
+ ["ceph", "config", "set", who, name, value], check_rc=True
)
module.exit_json(changed=changed)
@@ -39,5 +35,5 @@
run_module()
-if __name__ == '__main__':
- main()
\ No newline at end of file
+if __name__ == "__main__":
+ main()
diff --git a/plugins/modules/ceph_key.py b/plugins/modules/ceph_key.py
index 437a0d3..33cb13f 100644
--- a/plugins/modules/ceph_key.py
+++ b/plugins/modules/ceph_key.py
@@ -15,30 +15,32 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.vexxhost.atmosphere.plugins.module_utils.ca_common import generate_ceph_cmd, \
- is_containerized, \
- container_exec, \
- fatal
-
+import base64
import datetime
import json
import os
+import socket
import struct
import time
-import base64
-import socket
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.vexxhost.atmosphere.plugins.module_utils.ca_common import (
+ container_exec,
+ fatal,
+ generate_ceph_cmd,
+ is_containerized,
+)
ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
}
-DOCUMENTATION = '''
+DOCUMENTATION = """
---
module: ceph_key
@@ -124,9 +126,9 @@
entity.
required: false
default: json
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = """
keys_to_create:
- { name: client.key, key: "AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==", caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } # noqa: E501
@@ -198,13 +200,20 @@
- name: fetch cephx keys
ceph_key:
state: fetch_initial_keys
-'''
+"""
-RETURN = '''# '''
+RETURN = """# """
-CEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa: E501
- 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa: E501
+CEPH_INITIAL_KEYS = [
+ "client.admin",
+ "client.bootstrap-mds",
+ "client.bootstrap-mgr", # noqa: E501
+ "client.bootstrap-osd",
+ "client.bootstrap-rbd",
+ "client.bootstrap-rbd-mirror",
+ "client.bootstrap-rgw",
+] # noqa: E501
def str_to_bool(val):
@@ -212,30 +221,30 @@
val = val.lower()
except AttributeError:
val = str(val).lower()
- if val == 'true':
+ if val == "true":
return True
- elif val == 'false':
+ elif val == "false":
return False
else:
raise ValueError("Invalid input value: %s" % val)
def generate_secret():
- '''
+ """
Generate a CephX secret
- '''
+ """
key = os.urandom(16)
- header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
+ header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
secret = base64.b64encode(header + key)
return secret
def generate_caps(_type, caps):
- '''
+ """
Generate CephX capabilities list
- '''
+ """
caps_cli = []
@@ -251,25 +260,26 @@
return caps_cli
-def generate_ceph_authtool_cmd(cluster, name, secret, caps, dest, container_image=None): # noqa: E501
- '''
+def generate_ceph_authtool_cmd(
+ cluster, name, secret, caps, dest, container_image=None
+): # noqa: E501
+ """
Generate 'ceph-authtool' command line to execute
- '''
+ """
if container_image:
- binary = 'ceph-authtool'
- cmd = container_exec(
- binary, container_image)
+ binary = "ceph-authtool"
+ cmd = container_exec(binary, container_image)
else:
- binary = ['ceph-authtool']
+ binary = ["ceph-authtool"]
cmd = binary
base_cmd = [
- '--create-keyring',
+ "--create-keyring",
dest,
- '--name',
+ "--name",
name,
- '--add-key',
+ "--add-key",
secret,
]
@@ -279,133 +289,168 @@
return cmd
-def create_key(module, result, cluster, user, user_key, name, secret, caps, import_key, dest, container_image=None): # noqa: E501
- '''
+def create_key(
+ module,
+ result,
+ cluster,
+ user,
+ user_key,
+ name,
+ secret,
+ caps,
+ import_key,
+ dest,
+ container_image=None,
+): # noqa: E501
+ """
Create a CephX key
- '''
+ """
cmd_list = []
if not secret:
secret = generate_secret()
- if user == 'client.admin':
- args = ['import', '-i', dest]
+ if user == "client.admin":
+ args = ["import", "-i", dest]
else:
- args = ['get-or-create', name]
+ args = ["get-or-create", name]
args.extend(generate_caps(None, caps))
- args.extend(['-o', dest])
+ args.extend(["-o", dest])
- cmd_list.append(generate_ceph_authtool_cmd(
- cluster, name, secret, caps, dest, container_image))
+ cmd_list.append(
+ generate_ceph_authtool_cmd(cluster, name, secret, caps, dest, container_image)
+ )
- if import_key or user != 'client.admin':
- cmd_list.append(generate_ceph_cmd(sub_cmd=['auth'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image))
+ if import_key or user != "client.admin":
+ cmd_list.append(
+ generate_ceph_cmd(
+ sub_cmd=["auth"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
+ )
return cmd_list
def delete_key(cluster, user, user_key, name, container_image=None):
- '''
+ """
Delete a CephX key
- '''
+ """
cmd_list = []
args = [
- 'del',
+ "del",
name,
]
- cmd_list.append(generate_ceph_cmd(sub_cmd=['auth'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image))
+ cmd_list.append(
+ generate_ceph_cmd(
+ sub_cmd=["auth"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
+ )
return cmd_list
def get_key(cluster, user, user_key, name, dest, container_image=None):
- '''
+ """
Get a CephX key (write on the filesystem)
- '''
+ """
cmd_list = []
args = [
- 'get',
+ "get",
name,
- '-o',
+ "-o",
dest,
]
- cmd_list.append(generate_ceph_cmd(sub_cmd=['auth'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image))
+ cmd_list.append(
+ generate_ceph_cmd(
+ sub_cmd=["auth"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
+ )
return cmd_list
-def info_key(cluster, name, user, user_key, output_format, container_image=None): # noqa: E501
- '''
+def info_key(
+ cluster, name, user, user_key, output_format, container_image=None
+): # noqa: E501
+ """
Get information about a CephX key
- '''
+ """
cmd_list = []
args = [
- 'get',
+ "get",
name,
- '-f',
+ "-f",
output_format,
]
- cmd_list.append(generate_ceph_cmd(sub_cmd=['auth'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image))
+ cmd_list.append(
+ generate_ceph_cmd(
+ sub_cmd=["auth"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
+ )
return cmd_list
def list_keys(cluster, user, user_key, container_image=None):
- '''
+ """
List all CephX keys
- '''
+ """
cmd_list = []
args = [
- 'ls',
- '-f',
- 'json',
+ "ls",
+ "-f",
+ "json",
]
- cmd_list.append(generate_ceph_cmd(sub_cmd=['auth'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image))
+ cmd_list.append(
+ generate_ceph_cmd(
+ sub_cmd=["auth"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
+ )
return cmd_list
def exec_commands(module, cmd_list):
- '''
+ """
Execute command(s)
- '''
+ """
for cmd in cmd_list:
rc, out, err = module.run_command(cmd)
@@ -416,15 +461,17 @@
def lookup_ceph_initial_entities(module, out):
- '''
+ """
Lookup Ceph initial keys entries in the auth map
- '''
+ """
# convert out to json, ansible returns a string...
try:
out_dict = json.loads(out)
except ValueError as e:
- fatal("Could not decode 'ceph auth list' json output: {}".format(e), module) # noqa: E501
+ fatal(
+ "Could not decode 'ceph auth list' json output: {}".format(e), module
+ ) # noqa: E501
entities = []
if "auth_dump" in out_dict:
@@ -436,21 +483,25 @@
else:
fatal("'auth_dump' key not present in json output:", module) # noqa: E501
- if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa: E501
+ if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(
+ os.environ.get("CEPH_ROLLING_UPDATE", False)
+ ): # noqa: E501
# must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS
# it'd be in entities from the above test. Report what's missing.
missing = []
for e in CEPH_INITIAL_KEYS:
if e not in entities:
missing.append(e)
- fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa: E501
+ fatal(
+ "initial keyring does not contain keys: " + " ".join(missing), module
+ ) # noqa: E501
return entities
def build_key_path(cluster, entity):
- '''
+ """
Build key path depending on the key type
- '''
+ """
if "admin" in entity:
path = "/etc/ceph"
@@ -461,7 +512,7 @@
# bootstrap keys show up as 'client.boostrap-osd'
# however the directory is called '/var/lib/ceph/bootstrap-osd'
# so we need to substring 'client.'
- entity_split = entity.split('.')[1]
+ entity_split = entity.split(".")[1]
keyring_filename = cluster + ".keyring"
key_path = os.path.join(path, entity_split, keyring_filename)
else:
@@ -472,17 +523,34 @@
def run_module():
module_args = dict(
- cluster=dict(type='str', required=False, default='ceph'),
- name=dict(type='str', required=False),
- state=dict(type='str', required=False, default='present', choices=['present', 'update', 'absent', # noqa: E501
- 'list', 'info', 'fetch_initial_keys', 'generate_secret']), # noqa: E501
- caps=dict(type='dict', required=False, default=None),
- secret=dict(type='str', required=False, default=None, no_log=True),
- import_key=dict(type='bool', required=False, default=True),
- dest=dict(type='str', required=False, default='/etc/ceph/'),
- user=dict(type='str', required=False, default='client.admin'),
- user_key=dict(type='str', required=False, default=None),
- output_format=dict(type='str', required=False, default='json', choices=['json', 'plain', 'xml', 'yaml']) # noqa: E501
+ cluster=dict(type="str", required=False, default="ceph"),
+ name=dict(type="str", required=False),
+ state=dict(
+ type="str",
+ required=False,
+ default="present",
+ choices=[
+ "present",
+ "update",
+ "absent", # noqa: E501
+ "list",
+ "info",
+ "fetch_initial_keys",
+ "generate_secret",
+ ],
+ ), # noqa: E501
+ caps=dict(type="dict", required=False, default=None),
+ secret=dict(type="str", required=False, default=None, no_log=True),
+ import_key=dict(type="bool", required=False, default=True),
+ dest=dict(type="str", required=False, default="/etc/ceph/"),
+ user=dict(type="str", required=False, default="client.admin"),
+ user_key=dict(type="str", required=False, default=None),
+ output_format=dict(
+ type="str",
+ required=False,
+ default="json",
+ choices=["json", "plain", "xml", "yaml"],
+ ), # noqa: E501
)
module = AnsibleModule(
@@ -494,27 +562,27 @@
file_args = module.load_file_common_arguments(module.params)
# Gather module parameters in variables
- state = module.params['state']
- name = module.params.get('name')
- cluster = module.params.get('cluster')
- caps = module.params.get('caps')
- secret = module.params.get('secret')
- import_key = module.params.get('import_key')
- dest = module.params.get('dest')
- user = module.params.get('user')
- user_key = module.params.get('user_key')
- output_format = module.params.get('output_format')
+ state = module.params["state"]
+ name = module.params.get("name")
+ cluster = module.params.get("cluster")
+ caps = module.params.get("caps")
+ secret = module.params.get("secret")
+ import_key = module.params.get("import_key")
+ dest = module.params.get("dest")
+ user = module.params.get("user")
+ user_key = module.params.get("user_key")
+ output_format = module.params.get("output_format")
changed = False
result = dict(
changed=changed,
- stdout='',
- stderr='',
+ stdout="",
+ stderr="",
rc=0,
- start='',
- end='',
- delta='',
+ start="",
+ end="",
+ delta="",
)
if module.check_mode and state != "info":
@@ -533,66 +601,115 @@
key_exist = 1
if not user_key:
- user_key_filename = '{}.{}.keyring'.format(cluster, user)
- user_key_dir = '/etc/ceph'
+ user_key_filename = "{}.{}.keyring".format(cluster, user)
+ user_key_dir = "/etc/ceph"
user_key_path = os.path.join(user_key_dir, user_key_filename)
else:
user_key_path = user_key
- if (state in ["present", "update"]):
+ if state in ["present", "update"]:
# if dest is not a directory, the user wants to change the file's name
# (e,g: /etc/ceph/ceph.mgr.ceph-mon2.keyring)
if not os.path.isdir(dest):
file_path = dest
else:
- if 'bootstrap' in dest:
+ if "bootstrap" in dest:
# Build a different path for bootstrap keys as there are stored
# as /var/lib/ceph/bootstrap-rbd/ceph.keyring
- keyring_filename = cluster + '.keyring'
+ keyring_filename = cluster + ".keyring"
else:
keyring_filename = cluster + "." + name + ".keyring"
file_path = os.path.join(dest, keyring_filename)
- file_args['path'] = file_path
+ file_args["path"] = file_path
if import_key:
_info_key = []
rc, cmd, out, err = exec_commands(
- module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501
+ module,
+ info_key(
+ cluster, name, user, user_key_path, output_format, container_image
+ ),
+ ) # noqa: E501
key_exist = rc
if not caps and key_exist != 0:
- fatal("Capabilities must be provided when state is 'present'", module) # noqa: E501
+ fatal(
+ "Capabilities must be provided when state is 'present'", module
+ ) # noqa: E501
if key_exist != 0 and secret is None and caps is None:
- fatal("Keyring doesn't exist, you must provide 'secret' and 'caps'", module) # noqa: E501
+ fatal(
+ "Keyring doesn't exist, you must provide 'secret' and 'caps'",
+ module,
+ ) # noqa: E501
if key_exist == 0:
_info_key = json.loads(out)
if not secret:
- secret = _info_key[0]['key']
- _secret = _info_key[0]['key']
+ secret = _info_key[0]["key"]
+ _secret = _info_key[0]["key"]
if not caps:
- caps = _info_key[0]['caps']
- _caps = _info_key[0]['caps']
+ caps = _info_key[0]["caps"]
+ _caps = _info_key[0]["caps"]
if secret == _secret and caps == _caps:
if not os.path.isfile(file_path):
- rc, cmd, out, err = exec_commands(module, get_key(cluster, user, user_key_path, name, file_path, container_image)) # noqa: E501
+ rc, cmd, out, err = exec_commands(
+ module,
+ get_key(
+ cluster,
+ user,
+ user_key_path,
+ name,
+ file_path,
+ container_image,
+ ),
+ ) # noqa: E501
result["rc"] = rc
if rc != 0:
- result["stdout"] = "Couldn't fetch the key {0} at {1}.".format(name, file_path) # noqa: E501
+ result[
+ "stdout"
+ ] = "Couldn't fetch the key {0} at {1}.".format(
+ name, file_path
+ ) # noqa: E501
module.exit_json(**result)
- result["stdout"] = "fetched the key {0} at {1}.".format(name, file_path) # noqa: E501
+ result["stdout"] = "fetched the key {0} at {1}.".format(
+ name, file_path
+ ) # noqa: E501
- result["stdout"] = "{0} already exists and doesn't need to be updated.".format(name) # noqa: E501
+ result[
+ "stdout"
+ ] = "{0} already exists and doesn't need to be updated.".format(
+ name
+ ) # noqa: E501
result["rc"] = 0
module.set_fs_attributes_if_different(file_args, False)
module.exit_json(**result)
else:
if os.path.isfile(file_path) and not secret or not caps:
- result["stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(name, dest, import_key) # noqa: E501
+ result[
+ "stdout"
+ ] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(
+ name, dest, import_key
+ ) # noqa: E501
result["rc"] = 0
module.exit_json(**result)
- if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: # noqa: E501
- rc, cmd, out, err = exec_commands(module, create_key(
- module, result, cluster, user, user_key_path, name, secret, caps, import_key, file_path, container_image)) # noqa: E501
+ if (
+ key_exist == 0 and (secret != _secret or caps != _caps)
+ ) or key_exist != 0: # noqa: E501
+ rc, cmd, out, err = exec_commands(
+ module,
+ create_key(
+ module,
+ result,
+ cluster,
+ user,
+ user_key_path,
+ name,
+ secret,
+ caps,
+ import_key,
+ file_path,
+ container_image,
+ ),
+ ) # noqa: E501
if rc != 0:
result["stdout"] = "Couldn't create or update {0}".format(name)
result["stderr"] = err
@@ -603,7 +720,8 @@
elif state == "absent":
if key_exist == 0:
rc, cmd, out, err = exec_commands(
- module, delete_key(cluster, user, user_key_path, name, container_image)) # noqa: E501
+ module, delete_key(cluster, user, user_key_path, name, container_image)
+ ) # noqa: E501
if rc == 0:
changed = True
else:
@@ -611,23 +729,29 @@
elif state == "info":
rc, cmd, out, err = exec_commands(
- module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501
+ module,
+ info_key(
+ cluster, name, user, user_key_path, output_format, container_image
+ ),
+ ) # noqa: E501
elif state == "list":
rc, cmd, out, err = exec_commands(
- module, list_keys(cluster, user, user_key_path, container_image))
+ module, list_keys(cluster, user, user_key_path, container_image)
+ )
elif state == "fetch_initial_keys":
- hostname = socket.gethostname().split('.', 1)[0]
+ hostname = socket.gethostname().split(".", 1)[0]
user = "mon."
keyring_filename = cluster + "-" + hostname + "/keyring"
user_key_path = os.path.join("/var/lib/ceph/mon/", keyring_filename)
rc, cmd, out, err = exec_commands(
- module, list_keys(cluster, user, user_key_path, container_image))
+ module, list_keys(cluster, user, user_key_path, container_image)
+ )
if rc != 0:
result["stdout"] = "failed to retrieve ceph keys"
result["sdterr"] = err
- result['rc'] = 0
+ result["rc"] = 0
module.exit_json(**result)
entities = lookup_ceph_initial_entities(module, out)
@@ -643,25 +767,25 @@
continue
extra_args = [
- '-o',
+ "-o",
key_path,
]
- info_cmd = info_key(cluster, entity, user,
- user_key_path, output_format, container_image)
+ info_cmd = info_key(
+ cluster, entity, user, user_key_path, output_format, container_image
+ )
# we use info_cmd[0] because info_cmd is an array made of an array
info_cmd[0].extend(extra_args)
- rc, cmd, out, err = exec_commands(
- module, info_cmd) # noqa: E501
+ rc, cmd, out, err = exec_commands(module, info_cmd) # noqa: E501
file_args = module.load_file_common_arguments(module.params)
- file_args['path'] = key_path
+ file_args["path"] = key_path
module.set_fs_attributes_if_different(file_args, False)
elif state == "generate_secret":
out = generate_secret().decode()
- cmd = ''
+ cmd = ""
rc = 0
- err = ''
+ err = ""
changed = True
endd = datetime.datetime.now()
@@ -679,7 +803,7 @@
)
if rc != 0:
- module.fail_json(msg='non-zero return code', **result)
+ module.fail_json(msg="non-zero return code", **result)
module.exit_json(**result)
@@ -688,5 +812,5 @@
run_module()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/ceph_pool.py b/plugins/modules/ceph_pool.py
index ff91f76..1c125e0 100644
--- a/plugins/modules/ceph_pool.py
+++ b/plugins/modules/ceph_pool.py
@@ -15,27 +15,29 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.vexxhost.atmosphere.plugins.module_utils.ca_common import generate_ceph_cmd, \
- pre_generate_ceph_cmd, \
- is_containerized, \
- exec_command, \
- exit_module
+__metaclass__ = type
import datetime
import json
import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.vexxhost.atmosphere.plugins.module_utils.ca_common import (
+ exec_command,
+ exit_module,
+ generate_ceph_cmd,
+ is_containerized,
+ pre_generate_ceph_cmd,
+)
ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
}
-DOCUMENTATION = '''
+DOCUMENTATION = """
---
module: ceph_pool
@@ -122,9 +124,9 @@
- Set the pool application on the pool.
required: false
default: None
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = """
pools:
- { name: foo, size: 3, application: rbd, pool_type: 'replicated',
@@ -142,153 +144,146 @@
pool_type: "{{ item.pool_type }}"
pg_autoscale_mode: "{{ item.pg_autoscale_mode }}"
with_items: "{{ pools }}"
-'''
+"""
-RETURN = '''# '''
+RETURN = """# """
-def check_pool_exist(cluster,
- name,
- user,
- user_key,
- output_format='json',
- container_image=None):
- '''
+def check_pool_exist(
+ cluster, name, user, user_key, output_format="json", container_image=None
+):
+ """
Check if a given pool exists
- '''
+ """
- args = ['stats', name, '-f', output_format]
+ args = ["stats", name, "-f", output_format]
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
return cmd
-def generate_get_config_cmd(param,
- cluster,
- user,
- user_key,
- container_image=None):
+def generate_get_config_cmd(param, cluster, user, user_key, container_image=None):
_cmd = pre_generate_ceph_cmd(container_image=container_image)
args = [
- '-n',
+ "-n",
user,
- '-k',
+ "-k",
user_key,
- '--cluster',
+ "--cluster",
cluster,
- 'config',
- 'get',
- 'mon.*',
- param
+ "config",
+ "get",
+ "mon.*",
+ param,
]
cmd = _cmd + args
return cmd
-def get_application_pool(cluster,
- name,
- user,
- user_key,
- output_format='json',
- container_image=None):
- '''
+def get_application_pool(
+ cluster, name, user, user_key, output_format="json", container_image=None
+):
+ """
Get application type enabled on a given pool
- '''
+ """
- args = ['application', 'get', name, '-f', output_format]
+ args = ["application", "get", name, "-f", output_format]
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
return cmd
-def enable_application_pool(cluster,
- name,
- application,
- user,
- user_key,
- container_image=None):
- '''
+def enable_application_pool(
+ cluster, name, application, user, user_key, container_image=None
+):
+ """
Enable application on a given pool
- '''
+ """
- args = ['application', 'enable', name, application]
+ args = ["application", "enable", name, application]
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
return cmd
-def disable_application_pool(cluster,
- name,
- application,
- user,
- user_key,
- container_image=None):
- '''
+def disable_application_pool(
+ cluster, name, application, user, user_key, container_image=None
+):
+ """
Disable application on a given pool
- '''
+ """
- args = ['application', 'disable', name,
- application, '--yes-i-really-mean-it']
+ args = ["application", "disable", name, application, "--yes-i-really-mean-it"]
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
return cmd
-def get_pool_details(module,
- cluster,
- name,
- user,
- user_key,
- output_format='json',
- container_image=None):
- '''
+def get_pool_details(
+ module, cluster, name, user, user_key, output_format="json", container_image=None
+):
+ """
Get details about a given pool
- '''
+ """
- args = ['ls', 'detail', '-f', output_format]
+ args = ["ls", "detail", "-f", output_format]
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
rc, cmd, out, err = exec_command(module, cmd)
if rc == 0:
- out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0]
+ out = [p for p in json.loads(out.strip()) if p["pool_name"] == name][0]
- _rc, _cmd, application_pool, _err = exec_command(module,
- get_application_pool(cluster, # noqa: E501
- name, # noqa: E501
- user, # noqa: E501
- user_key, # noqa: E501
- container_image=container_image)) # noqa: E501
+ _rc, _cmd, application_pool, _err = exec_command(
+ module,
+ get_application_pool(
+ cluster, # noqa: E501
+ name, # noqa: E501
+ user, # noqa: E501
+ user_key, # noqa: E501
+ container_image=container_image,
+ ),
+ ) # noqa: E501
# This is a trick because "target_size_ratio" isn't present at the same
# level in the dict
@@ -303,181 +298,231 @@
# }
# If 'target_size_ratio' is present in 'options', we set it, this way we
# end up with a dict containing all needed keys at the same level.
- if 'target_size_ratio' in out['options'].keys():
- out['target_size_ratio'] = out['options']['target_size_ratio']
+ if "target_size_ratio" in out["options"].keys():
+ out["target_size_ratio"] = out["options"]["target_size_ratio"]
else:
- out['target_size_ratio'] = None
+ out["target_size_ratio"] = None
application = list(json.loads(application_pool.strip()).keys())
if len(application) == 0:
- out['application'] = ''
+ out["application"] = ""
else:
- out['application'] = application[0]
+ out["application"] = application[0]
return rc, cmd, out, err
def compare_pool_config(user_pool_config, running_pool_details):
- '''
+ """
Compare user input config pool details with current running pool details
- '''
+ """
delta = {}
- filter_keys = ['pg_num', 'pg_placement_num', 'size',
- 'pg_autoscale_mode', 'target_size_ratio']
+ filter_keys = [
+ "pg_num",
+ "pg_placement_num",
+ "size",
+ "pg_autoscale_mode",
+ "target_size_ratio",
+ ]
for key in filter_keys:
- if (str(running_pool_details[key]) != user_pool_config[key]['value'] and # noqa: E501
- user_pool_config[key]['value']):
+ if (
+ str(running_pool_details[key]) != user_pool_config[key]["value"]
+ and user_pool_config[key]["value"] # noqa: E501
+ ):
delta[key] = user_pool_config[key]
- if (running_pool_details['application'] !=
- user_pool_config['application']['value'] and
- user_pool_config['application']['value']):
- delta['application'] = {}
- delta['application']['new_application'] = user_pool_config['application']['value'] # noqa: E501
+ if (
+ running_pool_details["application"] != user_pool_config["application"]["value"]
+ and user_pool_config["application"]["value"]
+ ):
+ delta["application"] = {}
+ delta["application"]["new_application"] = user_pool_config["application"][
+ "value"
+ ] # noqa: E501
# to be improved (for update_pools()...)
- delta['application']['value'] = delta['application']['new_application']
- delta['application']['old_application'] = running_pool_details['application'] # noqa: E501
+ delta["application"]["value"] = delta["application"]["new_application"]
+ delta["application"]["old_application"] = running_pool_details[
+ "application"
+ ] # noqa: E501
return delta
-def list_pools(cluster,
- user,
- user_key,
- details,
- output_format='json',
- container_image=None):
- '''
+def list_pools(
+ cluster, user, user_key, details, output_format="json", container_image=None
+):
+ """
List existing pools
- '''
+ """
- args = ['ls']
+ args = ["ls"]
if details:
- args.append('detail')
+ args.append("detail")
- args.extend(['-f', output_format])
+ args.extend(["-f", output_format])
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
return cmd
-def create_pool(cluster,
- name,
- user,
- user_key,
- user_pool_config,
- container_image=None):
- '''
+def create_pool(cluster, name, user, user_key, user_pool_config, container_image=None):
+ """
Create a new pool
- '''
+ """
- args = ['create', user_pool_config['pool_name']['value'],
- user_pool_config['type']['value']]
+ args = [
+ "create",
+ user_pool_config["pool_name"]["value"],
+ user_pool_config["type"]["value"],
+ ]
- if user_pool_config['pg_autoscale_mode']['value'] != 'on':
- args.extend(['--pg_num',
- user_pool_config['pg_num']['value'],
- '--pgp_num',
- user_pool_config['pgp_num']['value'] or
- user_pool_config['pg_num']['value']])
- elif user_pool_config['target_size_ratio']['value']:
- args.extend(['--target_size_ratio',
- user_pool_config['target_size_ratio']['value']])
+ if user_pool_config["pg_autoscale_mode"]["value"] != "on":
+ args.extend(
+ [
+ "--pg_num",
+ user_pool_config["pg_num"]["value"],
+ "--pgp_num",
+ user_pool_config["pgp_num"]["value"]
+ or user_pool_config["pg_num"]["value"],
+ ]
+ )
+ elif user_pool_config["target_size_ratio"]["value"]:
+ args.extend(
+ ["--target_size_ratio", user_pool_config["target_size_ratio"]["value"]]
+ )
- if user_pool_config['type']['value'] == 'replicated':
- args.extend([user_pool_config['crush_rule']['value'],
- '--expected_num_objects',
- user_pool_config['expected_num_objects']['value'],
- '--autoscale-mode',
- user_pool_config['pg_autoscale_mode']['value']])
+ if user_pool_config["type"]["value"] == "replicated":
+ args.extend(
+ [
+ user_pool_config["crush_rule"]["value"],
+ "--expected_num_objects",
+ user_pool_config["expected_num_objects"]["value"],
+ "--autoscale-mode",
+ user_pool_config["pg_autoscale_mode"]["value"],
+ ]
+ )
- if (user_pool_config['size']['value'] and
- user_pool_config['type']['value'] == "replicated"):
- args.extend(['--size', user_pool_config['size']['value']])
+ if (
+ user_pool_config["size"]["value"]
+ and user_pool_config["type"]["value"] == "replicated"
+ ):
+ args.extend(["--size", user_pool_config["size"]["value"]])
- elif user_pool_config['type']['value'] == 'erasure':
- args.extend([user_pool_config['erasure_profile']['value']])
+ elif user_pool_config["type"]["value"] == "erasure":
+ args.extend([user_pool_config["erasure_profile"]["value"]])
- if user_pool_config['crush_rule']['value']:
- args.extend([user_pool_config['crush_rule']['value']])
+ if user_pool_config["crush_rule"]["value"]:
+ args.extend([user_pool_config["crush_rule"]["value"]])
- args.extend(['--expected_num_objects',
- user_pool_config['expected_num_objects']['value'],
- '--autoscale-mode',
- user_pool_config['pg_autoscale_mode']['value']])
+ args.extend(
+ [
+ "--expected_num_objects",
+ user_pool_config["expected_num_objects"]["value"],
+ "--autoscale-mode",
+ user_pool_config["pg_autoscale_mode"]["value"],
+ ]
+ )
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
return cmd
def remove_pool(cluster, name, user, user_key, container_image=None):
- '''
+ """
Remove a pool
- '''
+ """
- args = ['rm', name, name, '--yes-i-really-really-mean-it']
+ args = ["rm", name, name, "--yes-i-really-really-mean-it"]
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
return cmd
-def update_pool(module, cluster, name,
- user, user_key, delta, container_image=None):
- '''
+def update_pool(module, cluster, name, user, user_key, delta, container_image=None):
+ """
Update an existing pool
- '''
+ """
report = ""
for key in delta.keys():
- if key != 'application':
- args = ['set',
- name,
- delta[key]['cli_set_opt'],
- delta[key]['value']]
+ if key != "application":
+ args = ["set", name, delta[key]["cli_set_opt"], delta[key]["value"]]
- cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
- args=args,
- cluster=cluster,
- user=user,
- user_key=user_key,
- container_image=container_image)
+ cmd = generate_ceph_cmd(
+ sub_cmd=["osd", "pool"],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image,
+ )
rc, cmd, out, err = exec_command(module, cmd)
if rc != 0:
return rc, cmd, out, err
else:
- rc, cmd, out, err = exec_command(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(
+ module,
+ disable_application_pool(
+ cluster,
+ name,
+ delta["application"]["old_application"],
+ user,
+ user_key,
+ container_image=container_image,
+ ),
+ ) # noqa: E501
if rc != 0:
return rc, cmd, out, err
- rc, cmd, out, err = exec_command(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(
+ module,
+ enable_application_pool(
+ cluster,
+ name,
+ delta["application"]["new_application"],
+ user,
+ user_key,
+ container_image=container_image,
+ ),
+ ) # noqa: E501
if rc != 0:
return rc, cmd, out, err
- report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) # noqa: E501
+ report = (
+ report
+ + "\n"
+ + "{} has been updated: {} is now {}".format(name, key, delta[key]["value"])
+ ) # noqa: E501
out = report
return rc, cmd, out, err
@@ -485,92 +530,99 @@
def run_module():
module_args = dict(
- cluster=dict(type='str', required=False, default='ceph'),
- name=dict(type='str', required=True),
- state=dict(type='str', required=False, default='present',
- choices=['present', 'absent', 'list']),
- details=dict(type='bool', required=False, default=False),
- size=dict(type='str', required=False),
- min_size=dict(type='str', required=False),
- pg_num=dict(type='str', required=False),
- pgp_num=dict(type='str', required=False),
- pg_autoscale_mode=dict(type='str', required=False, default='on'),
- target_size_ratio=dict(type='str', required=False, default=None),
- pool_type=dict(type='str', required=False, default='replicated',
- choices=['replicated', 'erasure', '1', '3']),
- erasure_profile=dict(type='str', required=False, default='default'),
- rule_name=dict(type='str', required=False, default=None),
- expected_num_objects=dict(type='str', required=False, default="0"),
- application=dict(type='str', required=False, default=None),
+ cluster=dict(type="str", required=False, default="ceph"),
+ name=dict(type="str", required=True),
+ state=dict(
+ type="str",
+ required=False,
+ default="present",
+ choices=["present", "absent", "list"],
+ ),
+ details=dict(type="bool", required=False, default=False),
+ size=dict(type="str", required=False),
+ min_size=dict(type="str", required=False),
+ pg_num=dict(type="str", required=False),
+ pgp_num=dict(type="str", required=False),
+ pg_autoscale_mode=dict(type="str", required=False, default="on"),
+ target_size_ratio=dict(type="str", required=False, default=None),
+ pool_type=dict(
+ type="str",
+ required=False,
+ default="replicated",
+ choices=["replicated", "erasure", "1", "3"],
+ ),
+ erasure_profile=dict(type="str", required=False, default="default"),
+ rule_name=dict(type="str", required=False, default=None),
+ expected_num_objects=dict(type="str", required=False, default="0"),
+ application=dict(type="str", required=False, default=None),
)
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True
- )
+ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
# Gather module parameters in variables
- cluster = module.params.get('cluster')
- name = module.params.get('name')
- state = module.params.get('state')
- details = module.params.get('details')
- size = module.params.get('size')
- min_size = module.params.get('min_size')
- pg_num = module.params.get('pg_num')
- pgp_num = module.params.get('pgp_num')
- pg_autoscale_mode = module.params.get('pg_autoscale_mode')
- target_size_ratio = module.params.get('target_size_ratio')
- application = module.params.get('application')
+ cluster = module.params.get("cluster")
+ name = module.params.get("name")
+ state = module.params.get("state")
+ details = module.params.get("details")
+ size = module.params.get("size")
+ min_size = module.params.get("min_size")
+ pg_num = module.params.get("pg_num")
+ pgp_num = module.params.get("pgp_num")
+ pg_autoscale_mode = module.params.get("pg_autoscale_mode")
+ target_size_ratio = module.params.get("target_size_ratio")
+ application = module.params.get("application")
- if (module.params.get('pg_autoscale_mode').lower() in
- ['true', 'on', 'yes']):
- pg_autoscale_mode = 'on'
- elif (module.params.get('pg_autoscale_mode').lower() in
- ['false', 'off', 'no']):
- pg_autoscale_mode = 'off'
+ if module.params.get("pg_autoscale_mode").lower() in ["true", "on", "yes"]:
+ pg_autoscale_mode = "on"
+ elif module.params.get("pg_autoscale_mode").lower() in ["false", "off", "no"]:
+ pg_autoscale_mode = "off"
else:
- pg_autoscale_mode = 'warn'
+ pg_autoscale_mode = "warn"
- if module.params.get('pool_type') == '1':
- pool_type = 'replicated'
- elif module.params.get('pool_type') == '3':
- pool_type = 'erasure'
+ if module.params.get("pool_type") == "1":
+ pool_type = "replicated"
+ elif module.params.get("pool_type") == "3":
+ pool_type = "erasure"
else:
- pool_type = module.params.get('pool_type')
+ pool_type = module.params.get("pool_type")
- if not module.params.get('rule_name'):
- rule_name = 'replicated_rule' if pool_type == 'replicated' else None
+ if not module.params.get("rule_name"):
+ rule_name = "replicated_rule" if pool_type == "replicated" else None
else:
- rule_name = module.params.get('rule_name')
+ rule_name = module.params.get("rule_name")
- erasure_profile = module.params.get('erasure_profile')
- expected_num_objects = module.params.get('expected_num_objects')
+ erasure_profile = module.params.get("erasure_profile")
+ expected_num_objects = module.params.get("expected_num_objects")
user_pool_config = {
- 'pool_name': {'value': name},
- 'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'},
- 'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'},
- 'pg_autoscale_mode': {'value': pg_autoscale_mode,
- 'cli_set_opt': 'pg_autoscale_mode'},
- 'target_size_ratio': {'value': target_size_ratio,
- 'cli_set_opt': 'target_size_ratio'},
- 'application': {'value': application},
- 'type': {'value': pool_type},
- 'erasure_profile': {'value': erasure_profile},
- 'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'},
- 'expected_num_objects': {'value': expected_num_objects},
- 'size': {'value': size, 'cli_set_opt': 'size'},
- 'min_size': {'value': min_size}
+ "pool_name": {"value": name},
+ "pg_num": {"value": pg_num, "cli_set_opt": "pg_num"},
+ "pgp_num": {"value": pgp_num, "cli_set_opt": "pgp_num"},
+ "pg_autoscale_mode": {
+ "value": pg_autoscale_mode,
+ "cli_set_opt": "pg_autoscale_mode",
+ },
+ "target_size_ratio": {
+ "value": target_size_ratio,
+ "cli_set_opt": "target_size_ratio",
+ },
+ "application": {"value": application},
+ "type": {"value": pool_type},
+ "erasure_profile": {"value": erasure_profile},
+ "crush_rule": {"value": rule_name, "cli_set_opt": "crush_rule"},
+ "expected_num_objects": {"value": expected_num_objects},
+ "size": {"value": size, "cli_set_opt": "size"},
+ "min_size": {"value": min_size},
}
if module.check_mode:
module.exit_json(
changed=False,
- stdout='',
- stderr='',
+ stdout="",
+ stderr="",
rc=0,
- start='',
- end='',
- delta='',
+ start="",
+ end="",
+ delta="",
)
startd = datetime.datetime.now()
@@ -580,105 +632,120 @@
container_image = is_containerized()
user = "client.admin"
- keyring_filename = cluster + '.' + user + '.keyring'
+ keyring_filename = cluster + "." + user + ".keyring"
user_key = os.path.join("/etc/ceph/", keyring_filename)
if state == "present":
- rc, cmd, out, err = exec_command(module,
- check_pool_exist(cluster,
- name,
- user,
- user_key,
- container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(
+ module,
+ check_pool_exist(
+ cluster, name, user, user_key, container_image=container_image
+ ),
+ ) # noqa: E501
if rc == 0:
- running_pool_details = get_pool_details(module,
- cluster,
- name,
- user,
- user_key,
- container_image=container_image) # noqa: E501
- user_pool_config['pg_placement_num'] = {'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num'} # noqa: E501
- delta = compare_pool_config(user_pool_config,
- running_pool_details[2])
+ running_pool_details = get_pool_details(
+ module, cluster, name, user, user_key, container_image=container_image
+ ) # noqa: E501
+ user_pool_config["pg_placement_num"] = {
+ "value": str(running_pool_details[2]["pg_placement_num"]),
+ "cli_set_opt": "pgp_num",
+ } # noqa: E501
+ delta = compare_pool_config(user_pool_config, running_pool_details[2])
if len(delta) > 0:
keys = list(delta.keys())
details = running_pool_details[2]
- if details['erasure_code_profile'] and 'size' in keys:
- del delta['size']
- if details['pg_autoscale_mode'] == 'on':
- delta.pop('pg_num', None)
- delta.pop('pgp_num', None)
+ if details["erasure_code_profile"] and "size" in keys:
+ del delta["size"]
+ if details["pg_autoscale_mode"] == "on":
+ delta.pop("pg_num", None)
+ delta.pop("pgp_num", None)
if len(delta) == 0:
- out = "Skipping pool {}.\nUpdating either 'size' on an erasure-coded pool or 'pg_num'/'pgp_num' on a pg autoscaled pool is incompatible".format(name) # noqa: E501
+ out = "Skipping pool {}.\nUpdating either 'size' on an erasure-coded pool or 'pg_num'/'pgp_num' on a pg autoscaled pool is incompatible".format(
+ name
+ ) # noqa: E501
else:
- rc, cmd, out, err = update_pool(module,
- cluster,
- name,
- user,
- user_key,
- delta,
- container_image=container_image) # noqa: E501
+ rc, cmd, out, err = update_pool(
+ module,
+ cluster,
+ name,
+ user,
+ user_key,
+ delta,
+ container_image=container_image,
+ ) # noqa: E501
if rc == 0:
changed = True
else:
- out = "Pool {} already exists and there is nothing to update.".format(name) # noqa: E501
+ out = "Pool {} already exists and there is nothing to update.".format(
+ name
+ ) # noqa: E501
else:
- rc, cmd, out, err = exec_command(module,
- create_pool(cluster,
- name,
- user,
- user_key,
- user_pool_config=user_pool_config, # noqa: E501
- container_image=container_image)) # noqa: E501
- if user_pool_config['application']['value']:
- rc, _, _, _ = exec_command(module,
- enable_application_pool(cluster,
- name,
- user_pool_config['application']['value'], # noqa: E501
- user,
- user_key,
- container_image=container_image)) # noqa: E501
- if user_pool_config['min_size']['value']:
+ rc, cmd, out, err = exec_command(
+ module,
+ create_pool(
+ cluster,
+ name,
+ user,
+ user_key,
+ user_pool_config=user_pool_config, # noqa: E501
+ container_image=container_image,
+ ),
+ ) # noqa: E501
+ if user_pool_config["application"]["value"]:
+ rc, _, _, _ = exec_command(
+ module,
+ enable_application_pool(
+ cluster,
+ name,
+ user_pool_config["application"]["value"], # noqa: E501
+ user,
+ user_key,
+ container_image=container_image,
+ ),
+ ) # noqa: E501
+ if user_pool_config["min_size"]["value"]:
# not implemented yet
pass
changed = True
elif state == "list":
- rc, cmd, out, err = exec_command(module,
- list_pools(cluster,
- name, user,
- user_key,
- details,
- container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(
+ module,
+ list_pools(
+ cluster, name, user, user_key, details, container_image=container_image
+ ),
+ ) # noqa: E501
if rc != 0:
out = "Couldn't list pool(s) present on the cluster"
elif state == "absent":
- rc, cmd, out, err = exec_command(module,
- check_pool_exist(cluster,
- name, user,
- user_key,
- container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(
+ module,
+ check_pool_exist(
+ cluster, name, user, user_key, container_image=container_image
+ ),
+ ) # noqa: E501
if rc == 0:
- rc, cmd, out, err = exec_command(module,
- remove_pool(cluster,
- name,
- user,
- user_key,
- container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(
+ module,
+ remove_pool(
+ cluster, name, user, user_key, container_image=container_image
+ ),
+ ) # noqa: E501
changed = True
else:
rc = 0
out = "Skipped, since pool {} doesn't exist".format(name)
- exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd,
- changed=changed)
+ exit_module(
+ module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed
+ )
def main():
run_module()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..5d7bf33
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,2 @@
+[tool.isort]
+profile = "black"
diff --git a/releasenotes/notes/add-ansible-lint-c1e961c2fb88dbc7.yaml b/releasenotes/notes/add-ansible-lint-c1e961c2fb88dbc7.yaml
index f5de6cc..59b1793 100644
--- a/releasenotes/notes/add-ansible-lint-c1e961c2fb88dbc7.yaml
+++ b/releasenotes/notes/add-ansible-lint-c1e961c2fb88dbc7.yaml
@@ -1,3 +1,3 @@
---
features:
- - Added ``ansible-lint`` to all of the playbooks and roles.
\ No newline at end of file
+ - Added ``ansible-lint`` to all of the playbooks and roles.
diff --git a/releasenotes/notes/add-molecule-customization-9feb3a6a6e6d85f2.yaml b/releasenotes/notes/add-molecule-customization-9feb3a6a6e6d85f2.yaml
index 3f4d7af..1bb4118 100644
--- a/releasenotes/notes/add-molecule-customization-9feb3a6a6e6d85f2.yaml
+++ b/releasenotes/notes/add-molecule-customization-9feb3a6a6e6d85f2.yaml
@@ -2,4 +2,4 @@
features:
- Added the ability to customize the Heat stack properties
fixes:
- - Added notes on working around Molecule bug.
\ No newline at end of file
+ - Added notes on working around Molecule bug.
diff --git a/releasenotes/notes/add-openstack-exporter-role-f87a6a6f90a0f236.yaml b/releasenotes/notes/add-openstack-exporter-role-f87a6a6f90a0f236.yaml
index 78c5ee9..4c08505 100644
--- a/releasenotes/notes/add-openstack-exporter-role-f87a6a6f90a0f236.yaml
+++ b/releasenotes/notes/add-openstack-exporter-role-f87a6a6f90a0f236.yaml
@@ -1,3 +1,3 @@
---
features:
- - Added ``openstack-exporter`` with alertings.
\ No newline at end of file
+ - Added ``openstack-exporter`` with alertings.
diff --git a/releasenotes/notes/add-promote-job-079c3c57f1b5e272.yaml b/releasenotes/notes/add-promote-job-079c3c57f1b5e272.yaml
index 626a321..0bdb8f0 100644
--- a/releasenotes/notes/add-promote-job-079c3c57f1b5e272.yaml
+++ b/releasenotes/notes/add-promote-job-079c3c57f1b5e272.yaml
@@ -1,4 +1,4 @@
---
features:
- Add jobs to promote the generated artifact to the tarballs server in order
- to make it easy to pull in latest version.
\ No newline at end of file
+ to make it easy to pull in latest version.
diff --git a/releasenotes/notes/add-wheel-builds-e731c5a64f98964b.yaml b/releasenotes/notes/add-wheel-builds-e731c5a64f98964b.yaml
index 5d11c4f..4acf240 100644
--- a/releasenotes/notes/add-wheel-builds-e731c5a64f98964b.yaml
+++ b/releasenotes/notes/add-wheel-builds-e731c5a64f98964b.yaml
@@ -1,3 +1,3 @@
---
features:
- - Added Zuul jobs for building wheels and publishing them
\ No newline at end of file
+ - Added Zuul jobs for building wheels and publishing them
diff --git a/releasenotes/notes/add-workspace-generation-8ff28781216beccd.yaml b/releasenotes/notes/add-workspace-generation-8ff28781216beccd.yaml
index a4780e5..8f4ade7 100644
--- a/releasenotes/notes/add-workspace-generation-8ff28781216beccd.yaml
+++ b/releasenotes/notes/add-workspace-generation-8ff28781216beccd.yaml
@@ -1,4 +1,4 @@
---
features:
- Added playbook to allow for generating workspace for deployment and
- integrate it into Molecule in order to make sure we always test it.
\ No newline at end of file
+ integrate it into Molecule in order to make sure we always test it.
diff --git a/releasenotes/notes/added-role-docs-e7203e2b3db04f9f.yaml b/releasenotes/notes/added-role-docs-e7203e2b3db04f9f.yaml
index ecade5f..066acef 100644
--- a/releasenotes/notes/added-role-docs-e7203e2b3db04f9f.yaml
+++ b/releasenotes/notes/added-role-docs-e7203e2b3db04f9f.yaml
@@ -1,3 +1,3 @@
---
other:
- - Added basic documentation infrastructure.
\ No newline at end of file
+ - Added basic documentation infrastructure.
diff --git a/releasenotes/notes/correct_nova_timeout-111d1967cacf02dc.yaml b/releasenotes/notes/correct_nova_timeout-111d1967cacf02dc.yaml
index 360426e..b620b50 100644
--- a/releasenotes/notes/correct_nova_timeout-111d1967cacf02dc.yaml
+++ b/releasenotes/notes/correct_nova_timeout-111d1967cacf02dc.yaml
@@ -1,3 +1,2 @@
---
fixes: Correct the Nova timeout
-
diff --git a/releasenotes/notes/create-cloud-resources-dd6b1441b047fe98.yaml b/releasenotes/notes/create-cloud-resources-dd6b1441b047fe98.yaml
index e0bc21f..9f7e2fe 100644
--- a/releasenotes/notes/create-cloud-resources-dd6b1441b047fe98.yaml
+++ b/releasenotes/notes/create-cloud-resources-dd6b1441b047fe98.yaml
@@ -1,3 +1,3 @@
---
features:
- - Create cloud resources such as networks and flavors
\ No newline at end of file
+ - Create cloud resources such as networks and flavors
diff --git a/releasenotes/notes/fix-ceph-csi-monmap-89505192fb838958.yaml b/releasenotes/notes/fix-ceph-csi-monmap-89505192fb838958.yaml
index e3f87ea..d165dc0 100644
--- a/releasenotes/notes/fix-ceph-csi-monmap-89505192fb838958.yaml
+++ b/releasenotes/notes/fix-ceph-csi-monmap-89505192fb838958.yaml
@@ -2,8 +2,6 @@
feature:
- |
When we have ceph public network running on a
- separate network, we should dump the correct
- monitor ip addresses in order to ceph csi to be
+ separate network, we should dump the correct
+ monitor ip addresses in order to ceph csi to be
able to talk to ceph cluster and provision pvcs.
-
-
diff --git a/releasenotes/notes/fix-git-mirrors-af8cec9540a12842.yaml b/releasenotes/notes/fix-git-mirrors-af8cec9540a12842.yaml
index 6955ba8..9b285be 100644
--- a/releasenotes/notes/fix-git-mirrors-af8cec9540a12842.yaml
+++ b/releasenotes/notes/fix-git-mirrors-af8cec9540a12842.yaml
@@ -1,3 +1,3 @@
---
fixes:
- - The GitHub mirroring job was not included to run so this patch fixes that.
\ No newline at end of file
+ - The GitHub mirroring job was not included to run so this patch fixes that.
diff --git a/releasenotes/notes/fix-ipmi-exporter-3099bb1397c884d4.yaml b/releasenotes/notes/fix-ipmi-exporter-3099bb1397c884d4.yaml
index b21a50f..a6556af 100644
--- a/releasenotes/notes/fix-ipmi-exporter-3099bb1397c884d4.yaml
+++ b/releasenotes/notes/fix-ipmi-exporter-3099bb1397c884d4.yaml
@@ -3,4 +3,4 @@
- The IPMI exporter depended on the `ipmi` module being loaded, however, it
is the case that the module could be loaded on a virtual machine. This
patch instead only runs it on systems that don't expose the ``HYPERVISOR``
- flag.
\ No newline at end of file
+ flag.
diff --git a/releasenotes/notes/fix-semver-0aa05baa8ecdb2b0.yaml b/releasenotes/notes/fix-semver-0aa05baa8ecdb2b0.yaml
index 3414941..812184f 100644
--- a/releasenotes/notes/fix-semver-0aa05baa8ecdb2b0.yaml
+++ b/releasenotes/notes/fix-semver-0aa05baa8ecdb2b0.yaml
@@ -1,4 +1,4 @@
---
fixes:
- Fix ``pbr`` version os the ``.devN`` part to be ``-N`` instead to have
- proper semantic versioning.
\ No newline at end of file
+ proper semantic versioning.
diff --git a/releasenotes/notes/fix-senlin-username-a8a238893e806d8d.yaml b/releasenotes/notes/fix-senlin-username-a8a238893e806d8d.yaml
index 1c2b7a8..16488bf 100644
--- a/releasenotes/notes/fix-senlin-username-a8a238893e806d8d.yaml
+++ b/releasenotes/notes/fix-senlin-username-a8a238893e806d8d.yaml
@@ -2,4 +2,4 @@
fixes:
- |
Fixes senlin username which was wrongly pointing to cinder, causing
- authentication issues to volume service.
\ No newline at end of file
+ authentication issues to volume service.
diff --git a/releasenotes/notes/fix_osd_mon_hosts-aa7bd5fa08241131.yaml b/releasenotes/notes/fix_osd_mon_hosts-aa7bd5fa08241131.yaml
index 5b718d6..d6d3b20 100644
--- a/releasenotes/notes/fix_osd_mon_hosts-aa7bd5fa08241131.yaml
+++ b/releasenotes/notes/fix_osd_mon_hosts-aa7bd5fa08241131.yaml
@@ -3,4 +3,4 @@
- |
Since we define the monmap based on the
ceph public network, we should build ceph.conf
- for osd with the correct ip addresses.
\ No newline at end of file
+ for osd with the correct ip addresses.
diff --git a/releasenotes/notes/ingress-add-variable-for-annotations-b824db994ead135b.yaml b/releasenotes/notes/ingress-add-variable-for-annotations-b824db994ead135b.yaml
index 9277962..4e39c12 100644
--- a/releasenotes/notes/ingress-add-variable-for-annotations-b824db994ead135b.yaml
+++ b/releasenotes/notes/ingress-add-variable-for-annotations-b824db994ead135b.yaml
@@ -1,8 +1,8 @@
---
features:
- |
- Add ansible variables for ingress annotations for roles consuming
- openstack_helm_ingress role
+ Add ansible variables for ingress annotations for roles consuming
+ openstack_helm_ingress role
- openstack_helm_barbican
- openstack_helm_cinder
diff --git a/releasenotes/notes/lookup-ceph-public-iface-e9147f1615e8371b.yaml b/releasenotes/notes/lookup-ceph-public-iface-e9147f1615e8371b.yaml
index 636f265..ac8af8c 100644
--- a/releasenotes/notes/lookup-ceph-public-iface-e9147f1615e8371b.yaml
+++ b/releasenotes/notes/lookup-ceph-public-iface-e9147f1615e8371b.yaml
@@ -3,4 +3,4 @@
- |
Add the ability to lookup for the ip address of the
ceph public network. This is useful when the ceph public
- network is differnet from the default network on the system.
\ No newline at end of file
+ network is differnet from the default network on the system.
diff --git a/releasenotes/notes/use-symbolic-link-for-adm-cfg-94f57076773d7864.yaml b/releasenotes/notes/use-symbolic-link-for-adm-cfg-94f57076773d7864.yaml
index 2e3d646..69cbddd 100644
--- a/releasenotes/notes/use-symbolic-link-for-adm-cfg-94f57076773d7864.yaml
+++ b/releasenotes/notes/use-symbolic-link-for-adm-cfg-94f57076773d7864.yaml
@@ -4,4 +4,3 @@
Use symbolic link for kube admin config, instead of copy and maintain
two identical file. `/root/.kube/config` is now a symbolic link of file
`/etc/kubernetes/admin.conf`. This will prevent any unsync file issue.
-
diff --git a/roles/ceph_repository/templates/apt-preferences.j2 b/roles/ceph_repository/templates/apt-preferences.j2
index 91d6b11..c197476 100644
--- a/roles/ceph_repository/templates/apt-preferences.j2
+++ b/roles/ceph_repository/templates/apt-preferences.j2
@@ -8,4 +8,4 @@
Package: ceph-osd
Pin: version {{ ceph_repository_version }}-*
-Pin-Priority: 1000
\ No newline at end of file
+Pin-Priority: 1000
diff --git a/roles/containerd/templates/config.toml.j2 b/roles/containerd/templates/config.toml.j2
index 4ce5d20..84cad6d 100644
--- a/roles/containerd/templates/config.toml.j2
+++ b/roles/containerd/templates/config.toml.j2
@@ -2,4 +2,4 @@
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
- sandbox_image = "{{ containerd_pause_image }}"
\ No newline at end of file
+ sandbox_image = "{{ containerd_pause_image }}"
diff --git a/roles/kubernetes/templates/apt-preferences.j2 b/roles/kubernetes/templates/apt-preferences.j2
index 494a3a8..aad283b 100644
--- a/roles/kubernetes/templates/apt-preferences.j2
+++ b/roles/kubernetes/templates/apt-preferences.j2
@@ -8,4 +8,4 @@
Package: kubelet
Pin: version {{ kubernetes_version }}-00
-Pin-Priority: 1000
\ No newline at end of file
+Pin-Priority: 1000
diff --git a/roles/kubernetes/templates/check_apiserver.sh.j2 b/roles/kubernetes/templates/check_apiserver.sh.j2
index bc9aa95..5d81310 100644
--- a/roles/kubernetes/templates/check_apiserver.sh.j2
+++ b/roles/kubernetes/templates/check_apiserver.sh.j2
@@ -8,4 +8,4 @@
curl --silent --max-time 2 --insecure https://localhost:16443/ -o /dev/null || errorExit "Error GET https://localhost:16443/"
if ip addr | grep -q {{ kubernetes_keepalived_vip }}; then
curl --silent --max-time 2 --insecure https://{{ kubernetes_keepalived_vip }}:6443/ -o /dev/null || errorExit "Error GET https://{{ kubernetes_keepalived_vip }}:6443/"
-fi
\ No newline at end of file
+fi
diff --git a/roles/kubernetes/templates/keepalived.conf.j2 b/roles/kubernetes/templates/keepalived.conf.j2
index 58fde31..7efe465 100644
--- a/roles/kubernetes/templates/keepalived.conf.j2
+++ b/roles/kubernetes/templates/keepalived.conf.j2
@@ -22,4 +22,4 @@
track_script {
check_apiserver
}
-}
\ No newline at end of file
+}
diff --git a/roles/kubernetes/templates/kubeadm.yaml.j2 b/roles/kubernetes/templates/kubeadm.yaml.j2
index 5ef4829..e12280b 100644
--- a/roles/kubernetes/templates/kubeadm.yaml.j2
+++ b/roles/kubernetes/templates/kubeadm.yaml.j2
@@ -56,4 +56,4 @@
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
-metricsBindAddress: 0.0.0.0
\ No newline at end of file
+metricsBindAddress: 0.0.0.0
diff --git a/roles/kubernetes/templates/modules-load.conf.j2 b/roles/kubernetes/templates/modules-load.conf.j2
index 2c4d984..351a663 100644
--- a/roles/kubernetes/templates/modules-load.conf.j2
+++ b/roles/kubernetes/templates/modules-load.conf.j2
@@ -1,3 +1,3 @@
{% for kubernetes_kernel_module in kubernetes_kernel_modules %}
{{ kubernetes_kernel_module }}
-{% endfor %}
\ No newline at end of file
+{% endfor %}
diff --git a/roles/openstack_cli/templates/openrc.j2 b/roles/openstack_cli/templates/openrc.j2
index 7b56e0f..ba230f4 100644
--- a/roles/openstack_cli/templates/openrc.j2
+++ b/roles/openstack_cli/templates/openrc.j2
@@ -9,4 +9,4 @@
export OS_USERNAME="{{ openstack_helm_endpoints['identity']['auth']['admin']['username'] }}"
export OS_PASSWORD="{{ openstack_helm_endpoints['identity']['auth']['admin']['password'] }}"
export OS_PROJECT_DOMAIN_NAME=Default
-export OS_PROJECT_NAME=admin
\ No newline at end of file
+export OS_PROJECT_NAME=admin
diff --git a/roles/openstack_helm_horizon/files/50-monasca-ui-settings.py b/roles/openstack_helm_horizon/files/50-monasca-ui-settings.py
index 28b4a99..1171a8e 100644
--- a/roles/openstack_helm_horizon/files/50-monasca-ui-settings.py
+++ b/roles/openstack_helm_horizon/files/50-monasca-ui-settings.py
@@ -3,54 +3,49 @@
# Service group names (global across all projects):
MONITORING_SERVICES_GROUPS = [
- {'name': _('OpenStack Services'), 'groupBy': 'service'},
- {'name': _('Servers'), 'groupBy': 'hostname'}
+ {"name": _("OpenStack Services"), "groupBy": "service"},
+ {"name": _("Servers"), "groupBy": "hostname"},
]
# Services being monitored
MONITORING_SERVICES = getattr(
- settings,
- 'MONITORING_SERVICES_GROUPS',
- MONITORING_SERVICES_GROUPS
+ settings, "MONITORING_SERVICES_GROUPS", MONITORING_SERVICES_GROUPS
)
-MONITORING_SERVICE_VERSION = getattr(
- settings, 'MONITORING_SERVICE_VERSION', '2_0'
-)
-MONITORING_SERVICE_TYPE = getattr(
- settings, 'MONITORING_SERVICE_TYPE', 'monitoring'
-)
+MONITORING_SERVICE_VERSION = getattr(settings, "MONITORING_SERVICE_VERSION", "2_0")
+MONITORING_SERVICE_TYPE = getattr(settings, "MONITORING_SERVICE_TYPE", "monitoring")
MONITORING_ENDPOINT_TYPE = getattr(
# NOTE(trebskit) # will default to OPENSTACK_ENDPOINT_TYPE
- settings, 'MONITORING_ENDPOINT_TYPE', None
+ settings,
+ "MONITORING_ENDPOINT_TYPE",
+ None,
)
# Grafana button titles/file names (global across all projects):
# GRAFANA_LINKS = [{"raw": True, "path": "monasca-dashboard", "title": "Sub page1"}]
GRAFANA_LINKS = []
-DASHBOARDS = getattr(settings, 'GRAFANA_LINKS', GRAFANA_LINKS)
+DASHBOARDS = getattr(settings, "GRAFANA_LINKS", GRAFANA_LINKS)
GRAFANA_URL = {"regionOne": "/grafana"}
-SHOW_GRAFANA_HOME = getattr(settings, 'SHOW_GRAFANA_HOME', True)
+SHOW_GRAFANA_HOME = getattr(settings, "SHOW_GRAFANA_HOME", True)
-ENABLE_LOG_MANAGEMENT_BUTTON = getattr(
- settings, 'ENABLE_LOG_MANAGEMENT_BUTTON', False)
+ENABLE_LOG_MANAGEMENT_BUTTON = getattr(settings, "ENABLE_LOG_MANAGEMENT_BUTTON", False)
ENABLE_EVENT_MANAGEMENT_BUTTON = getattr(
- settings, 'ENABLE_EVENT_MANAGEMENT_BUTTON', False)
+ settings, "ENABLE_EVENT_MANAGEMENT_BUTTON", False
+)
-KIBANA_POLICY_RULE = getattr(settings, 'KIBANA_POLICY_RULE',
- 'monitoring:kibana_access')
-KIBANA_POLICY_SCOPE = getattr(settings, 'KIBANA_POLICY_SCOPE',
- 'monitoring')
-KIBANA_HOST = getattr(settings, 'KIBANA_HOST',
- 'http://192.168.10.6:5601/')
+KIBANA_POLICY_RULE = getattr(settings, "KIBANA_POLICY_RULE", "monitoring:kibana_access")
+KIBANA_POLICY_SCOPE = getattr(settings, "KIBANA_POLICY_SCOPE", "monitoring")
+KIBANA_HOST = getattr(settings, "KIBANA_HOST", "http://192.168.10.6:5601/")
-OPENSTACK_SSL_NO_VERIFY = getattr(
- settings, 'OPENSTACK_SSL_NO_VERIFY', False)
-OPENSTACK_SSL_CACERT = getattr(
- settings, 'OPENSTACK_SSL_CACERT', None)
+OPENSTACK_SSL_NO_VERIFY = getattr(settings, "OPENSTACK_SSL_NO_VERIFY", False)
+OPENSTACK_SSL_CACERT = getattr(settings, "OPENSTACK_SSL_CACERT", None)
-POLICY_FILES = getattr(settings, 'POLICY_FILES', {})
-POLICY_FILES.update({'monitoring': 'monitoring_policy.json', }) # noqa
-setattr(settings, 'POLICY_FILES', POLICY_FILES)
+POLICY_FILES = getattr(settings, "POLICY_FILES", {})
+POLICY_FILES.update(
+ {
+ "monitoring": "monitoring_policy.json",
+ }
+) # noqa
+setattr(settings, "POLICY_FILES", POLICY_FILES)