reformate for black
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 7a12515..9055a03 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -40,8 +40,8 @@
 # openstackdocstheme options
 openstackdocs_repo_name = "openstack/staffeln"
 openstackdocs_bug_project = (
-    "replace with the name of the project on "
-    "Launchpad or the ID from Storyboard")
+    "replace with the name of the project on " "Launchpad or the ID from Storyboard"
+)
 openstackdocs_bug_tag = ""
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
diff --git a/staffeln/api/app.py b/staffeln/api/app.py
index b19b9b8..8b54dad 100755
--- a/staffeln/api/app.py
+++ b/staffeln/api/app.py
@@ -1,12 +1,10 @@
 from __future__ import annotations

 

-from flask import Flask

-from flask import request

-from flask import Response

+from flask import Flask, Response, request

 from oslo_log import log

 

-from staffeln.common import context

 from staffeln import objects

+from staffeln.common import context

 

 ctx = context.make_context()

 app = Flask(__name__)

diff --git a/staffeln/api/middleware/parsable_error.py b/staffeln/api/middleware/parsable_error.py
index 2443201..343c8c2 100755
--- a/staffeln/api/middleware/parsable_error.py
+++ b/staffeln/api/middleware/parsable_error.py
@@ -80,10 +80,7 @@
                 state["status_code"] = status_code

             except (ValueError, TypeError):  # pragma: nocover

                 raise Exception(

-                    _(

-                        "ErrorDocumentMiddleware received an invalid "

-                        "status %s"

-                    )

+                    _("ErrorDocumentMiddleware received an invalid " "status %s")

                     % status

                 )

             else:

diff --git a/staffeln/cmd/api.py b/staffeln/cmd/api.py
index ba2ce09..a175089 100755
--- a/staffeln/cmd/api.py
+++ b/staffeln/cmd/api.py
@@ -7,9 +7,9 @@
 

 from oslo_log import log as logging

 

+import staffeln.conf

 from staffeln.api import app as api_app

 from staffeln.common import service

-import staffeln.conf

 from staffeln.i18n import _

 

 CONF = staffeln.conf.CONF

diff --git a/staffeln/cmd/conductor.py b/staffeln/cmd/conductor.py
index 3b2b84e..eaec3cf 100755
--- a/staffeln/cmd/conductor.py
+++ b/staffeln/cmd/conductor.py
@@ -5,9 +5,9 @@
 import cotyledon

 from cotyledon import oslo_config_glue

 

+import staffeln.conf

 from staffeln.common import service

 from staffeln.conductor import manager

-import staffeln.conf

 

 CONF = staffeln.conf.CONF

 

diff --git a/staffeln/cmd/dbmanage.py b/staffeln/cmd/dbmanage.py
index 433b7d7..bd6f01d 100644
--- a/staffeln/cmd/dbmanage.py
+++ b/staffeln/cmd/dbmanage.py
@@ -8,8 +8,8 @@
 
 from oslo_config import cfg
 
-from staffeln.common import service
 from staffeln import conf
+from staffeln.common import service
 from staffeln.db import migration
 
 CONF = conf.CONF
@@ -27,14 +27,10 @@
 
 def add_command_parsers(subparsers):
 
-    parser = subparsers.add_parser(
-        "create_schema", help="Create the database schema."
-    )
+    parser = subparsers.add_parser("create_schema", help="Create the database schema.")
     parser.set_defaults(func=DBCommand.create_schema)
 
-    parser = subparsers.add_parser(
-        "upgrade", help="Upgrade the database schema."
-    )
+    parser = subparsers.add_parser("upgrade", help="Upgrade the database schema.")
     parser.add_argument("revision", nargs="?")
     parser.set_defaults(func=DBCommand.do_upgrade)
 
diff --git a/staffeln/common/email.py b/staffeln/common/email.py
index 79d7225..368028b 100644
--- a/staffeln/common/email.py
+++ b/staffeln/common/email.py
@@ -2,11 +2,11 @@
 

 from __future__ import annotations

 

+import smtplib

+from email import utils

 from email.header import Header

 from email.mime.multipart import MIMEMultipart

 from email.mime.text import MIMEText

-from email import utils

-import smtplib

 

 from oslo_log import log

 

diff --git a/staffeln/common/lock.py b/staffeln/common/lock.py
index 5f21bf6..9f7db41 100644
--- a/staffeln/common/lock.py
+++ b/staffeln/common/lock.py
@@ -5,15 +5,14 @@
 import os
 import re
 import sys
-from typing import Optional  # noqa: H301
 import uuid
+from typing import Optional  # noqa: H301
 
-from oslo_log import log
 import sherlock
+from oslo_log import log
 from tooz import coordination
 
-from staffeln import conf
-from staffeln import exception
+from staffeln import conf, exception
 
 CONF = conf.CONF
 LOG = log.getLogger(__name__)
diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py
index e9f730c..710d112 100644
--- a/staffeln/common/openstack.py
+++ b/staffeln/common/openstack.py
@@ -1,7 +1,6 @@
 from __future__ import annotations

 

-from openstack import exceptions

-from openstack import proxy

+from openstack import exceptions, proxy

 from oslo_log import log

 

 from staffeln.common import auth

@@ -20,9 +19,7 @@
         project_id = project.get("id")

 

         if project_id not in self.conn_list:

-            LOG.debug(

-                _("Initiate connection for project %s" % project.get("name"))

-            )

+            LOG.debug(_("Initiate connection for project %s" % project.get("name")))

             conn = self.conn.connect_as_project(project)

             self.conn_list[project_id] = conn

         LOG.debug(_("Connect as project %s" % project.get("name")))

@@ -33,14 +30,10 @@
         user_name = self.conn.config.auth["username"]

         if "user_domain_id" in self.conn.config.auth:

             domain_id = self.conn.config.auth["user_domain_id"]

-            user = self.conn.get_user(

-                name_or_id=user_name, domain_id=domain_id

-            )

+            user = self.conn.get_user(name_or_id=user_name, domain_id=domain_id)

         elif "user_domain_name" in self.conn.config.auth:

             domain_name = self.conn.config.auth["user_domain_name"]

-            user = self.conn.get_user(

-                name_or_id=user_name, domain_id=domain_name

-            )

+            user = self.conn.get_user(name_or_id=user_name, domain_id=domain_name)

         else:

             user = self.conn.get_user(name_or_id=user_name)

         return user.id

@@ -81,9 +74,7 @@
                 project_id=project_id,

             )

         else:

-            return self.conn.compute.servers(

-                details=details, all_projects=all_projects

-            )

+            return self.conn.compute.servers(details=details, all_projects=all_projects)

 

     def get_volume(self, uuid, project_id):

         return self.conn.get_volume_by_id(uuid)

@@ -149,15 +140,11 @@
 

         if usage:

             resp = self.conn.block_storage.get(

-                "/os-quota-sets/{project_id}?usage=True".format(

-                    project_id=project_id

-                )

+                "/os-quota-sets/{project_id}?usage=True".format(project_id=project_id)

             )

         else:

             resp = self.conn.block_storage.get(

                 "/os-quota-sets/{project_id}".format(project_id=project_id)

             )

-        data = proxy._json_response(

-            resp, error_message="cinder client call failed"

-        )

+        data = proxy._json_response(resp, error_message="cinder client call failed")

         return self.conn._get_and_munchify("quota_set", data)

diff --git a/staffeln/common/service.py b/staffeln/common/service.py
index c657896..98399f6 100755
--- a/staffeln/common/service.py
+++ b/staffeln/common/service.py
@@ -15,9 +15,9 @@
 

 from oslo_log import log as logging

 

-from staffeln.common import config

 import staffeln.conf

 from staffeln import objects

+from staffeln.common import config

 

 CONF = staffeln.conf.CONF

 

diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index 5319035..760132c 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -1,8 +1,7 @@
 from __future__ import annotations
 
 import collections
-from datetime import timedelta
-from datetime import timezone
+from datetime import timedelta, timezone
 
 from openstack.exceptions import HttpException as OpenstackHttpException
 from openstack.exceptions import ResourceNotFound as OpenstackResourceNotFound
@@ -10,14 +9,12 @@
 from oslo_log import log
 from oslo_utils import timeutils
 
-from staffeln.common import constants
-from staffeln.common import context
-from staffeln.common import openstack
+import staffeln.conf
+from staffeln import objects
+from staffeln.common import constants, context, openstack
 from staffeln.common import time as xtime
 from staffeln.conductor import result
-import staffeln.conf
 from staffeln.i18n import _
-from staffeln import objects
 
 CONF = staffeln.conf.CONF
 LOG = log.getLogger(__name__)
@@ -148,8 +145,8 @@
                 return False
 
             return (
-                metadata[CONF.conductor.backup_metadata_key].lower(
-                ) == constants.BACKUP_ENABLED_KEY
+                metadata[CONF.conductor.backup_metadata_key].lower()
+                == constants.BACKUP_ENABLED_KEY
             )
         else:
             return True
@@ -164,8 +161,8 @@
             if not res:
                 reason = _(
                     "Volume %s is not triger new backup task because "
-                    "it is in %s status" %
-                    (volume_id, volume["status"]))
+                    "it is in %s status" % (volume_id, volume["status"])
+                )
                 LOG.info(reason)
                 return reason
             return res
@@ -279,9 +276,7 @@
                 )
 
         except OpenstackSDKException as e:
-            LOG.warn(
-                f"Backup {backup_object.backup_id} deletion failed. {str(e)}"
-            )
+            LOG.warn(f"Backup {backup_object.backup_id} deletion failed. {str(e)}")
             # We don't delete backup object if any exception occured
             # backup_object.delete_backup()
             return False
@@ -296,7 +291,8 @@
                     f"{backup_object.backup_id} is not existing in "
                     "Openstack. Please check your access right to this "
                     "project. "
-                    "Skip this backup from remove now and will retry later.")
+                    "Skip this backup from remove now and will retry later."
+                )
                 # Don't remove backup object, keep it and retry on next
                 # periodic task backup_object.delete_backup()
                 return
@@ -309,17 +305,15 @@
                 LOG.info(
                     f"Backup {backup_object.backup_id} is removed from "
                     "Openstack or cinder-backup is not existing in the "
-                    "cloud. Start removing backup object from Staffeln.")
+                    "cloud. Start removing backup object from Staffeln."
+                )
                 return backup_object.delete_backup()
 
             self.openstacksdk.delete_backup(uuid=backup_object.backup_id)
             # Don't remove backup until it's officially removed from Cinder
             # backup_object.delete_backup()
         except Exception as e:
-            if (
-                skip_inc_err and (
-                    "Incremental backups exist for this backup" in str(e))
-            ):
+            if skip_inc_err and ("Incremental backups exist for this backup" in str(e)):
                 LOG.debug(str(e))
             else:
                 LOG.info(
@@ -355,15 +349,11 @@
                 # Ignore backup interval
                 return True
             interval = CONF.conductor.backup_min_interval
-            threshold_strtime = timeutils.utcnow() - timedelta(
-                seconds=interval
-            )
+            threshold_strtime = timeutils.utcnow() - timedelta(seconds=interval)
             backups = self.get_backups(
                 filters={
                     "volume_id__eq": volume_id,
-                    "created_at__gt": threshold_strtime.astimezone(
-                        timezone.utc
-                    ),
+                    "created_at__gt": threshold_strtime.astimezone(timezone.utc),
                 }
             )
             if backups:
@@ -407,8 +397,7 @@
                     return True
         except Exception as e:
             LOG.debug(
-                "Failed to get backup history to decide backup "
-                f"method. Reason: {e}"
+                "Failed to get backup history to decide backup " f"method. Reason: {e}"
             )
         return False
 
@@ -513,14 +502,16 @@
                     LOG.debug(
                         f"Found retention time ({server_retention_time}) "
                         f"defined for server {server.id}, "
-                        "Adding it retention reference map.")
+                        "Adding it retention reference map."
+                    )
                     retention_map[server.id] = server_retention_time
                 else:
                     LOG.info(
                         f"Server retention time for instance {server.id} is "
                         "incorrect. Please follow "
                         "'<YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>"
-                        "h<MINUTES>min<SECONDS>s' format.")
+                        "h<MINUTES>min<SECONDS>s' format."
+                    )
         return retention_map
 
     def _volume_queue(self, task):
@@ -575,10 +566,7 @@
                 # NOTE(Alex): no need to wait because we have a cycle time out
                 if project_id not in self.project_list:
                     LOG.warn(
-                        _(
-                            "Project ID %s is not existing in project list"
-                            % project_id
-                        )
+                        _("Project ID %s is not existing in project list" % project_id)
                     )
                     self.process_non_existing_backup(task)
                     return
@@ -586,10 +574,7 @@
                 backup_method = "Incremental" if task.incremental else "Full"
                 LOG.info(
                     _(
-                        (
-                            "%s Backup (name: %s) for volume %s creating "
-                            "in project %s"
-                        )
+                        ("%s Backup (name: %s) for volume %s creating " "in project %s")
                         % (
                             backup_method,
                             backup_name,
@@ -608,26 +593,23 @@
                 task.backup_status = constants.BACKUP_WIP
                 task.save()
             except OpenstackSDKException as error:
-                inc_err_msg = (
-                    "No backups available to do an incremental backup"
-                )
+                inc_err_msg = "No backups available to do an incremental backup"
                 if inc_err_msg in str(error):
                     LOG.info(
                         "Retry to create full backup for volume %s instead of "
-                        "incremental." %
-                        task.volume_id)
+                        "incremental." % task.volume_id
+                    )
                     task.incremental = False
                     task.save()
                 else:
                     reason = _(
                         "Backup (name: %s) creation for the volume %s "
-                        "failled. %s" %
-                        (backup_name, task.volume_id, str(error)[
-                            :64]))
+                        "failled. %s" % (backup_name, task.volume_id, str(error)[:64])
+                    )
                     LOG.warn(
                         "Backup (name: %s) creation for the volume %s "
-                        "failled. %s" %
-                        (backup_name, task.volume_id, str(error)))
+                        "failled. %s" % (backup_name, task.volume_id, str(error))
+                    )
                     task.reason = reason
                     task.backup_status = constants.BACKUP_FAILED
                     task.save()
@@ -654,8 +636,7 @@
     def process_pre_failed_backup(self, task):
         # 1.notify via email
         reason = _(
-            "The backup creation for the volume %s was prefailed."
-            % task.volume_id
+            "The backup creation for the volume %s was prefailed." % task.volume_id
         )
         LOG.warn(reason)
         task.reason = reason
@@ -664,9 +645,7 @@
 
     def process_failed_backup(self, task):
         # 1. notify via email
-        reason = (
-            f"The status of backup for the volume {task.volume_id} is error."
-        )
+        reason = f"The status of backup for the volume {task.volume_id} is error."
         LOG.warn(reason)
         # 2. delete backup generator
         try:
@@ -729,10 +708,7 @@
         if backup_gen is None:
             # TODO(Alex): need to check when it is none
             LOG.info(
-                _(
-                    "[Beta] Backup status of %s is returning none."
-                    % (queue.backup_id)
-                )
+                _("[Beta] Backup status of %s is returning none." % (queue.backup_id))
             )
             self.process_non_existing_backup(queue)
             return
@@ -741,9 +717,7 @@
         elif backup_gen.status == "available":
             self.process_available_backup(queue)
         elif backup_gen.status == "creating":
-            LOG.info(
-                "Waiting for backup of %s to be completed" % queue.volume_id
-            )
+            LOG.info("Waiting for backup of %s to be completed" % queue.volume_id)
         else:  # "deleting", "restoring", "error_restoring" status
             self.process_using_backup(queue)
 
diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index 1d96d52..c43e13b 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -1,23 +1,20 @@
 from __future__ import annotations

 

-from datetime import timedelta

-from datetime import timezone

 import threading

 import time

+from datetime import timedelta, timezone

 

 import cotyledon

 from futurist import periodics

 from oslo_log import log

 from oslo_utils import timeutils

 

-from staffeln.common import constants

-from staffeln.common import context

-from staffeln.common import lock

+import staffeln.conf

+from staffeln import objects

+from staffeln.common import constants, context, lock

 from staffeln.common import time as xtime

 from staffeln.conductor import backup as backup_controller

-import staffeln.conf

 from staffeln.i18n import _

-from staffeln import objects

 

 LOG = log.getLogger(__name__)

 CONF = staffeln.conf.CONF

@@ -125,9 +122,7 @@
                 ) as t_lock:

                     if t_lock.acquired:

                         # Re-pulling status and make it's up-to-date

-                        task = self.controller.get_queue_task_by_id(

-                            task_id=task.id

-                        )

+                        task = self.controller.get_queue_task_by_id(task_id=task.id)

                         if task.backup_status == constants.BACKUP_PLANNED:

                             task.backup_status = constants.BACKUP_INIT

                             task.save()

@@ -146,13 +141,9 @@
 

     def _report_backup_result(self):

         report_period = CONF.conductor.report_period

-        threshold_strtime = timeutils.utcnow() - timedelta(

-            seconds=report_period

-        )

+        threshold_strtime = timeutils.utcnow() - timedelta(seconds=report_period)

 

-        filters = {

-            "created_at__gt": threshold_strtime.astimezone(timezone.utc)

-        }

+        filters = {"created_at__gt": threshold_strtime.astimezone(timezone.utc)}

         report_tss = objects.ReportTimestamp.list(  # pylint: disable=E1120

             context=self.ctx, filters=filters

         )

@@ -166,13 +157,9 @@
             threshold_strtime = timeutils.utcnow() - timedelta(

                 seconds=report_period * 10

             )

-            filters = {

-                "created_at__lt": threshold_strtime.astimezone(timezone.utc)

-            }

-            old_report_tss = (

-                objects.ReportTimestamp.list(  # pylint: disable=E1120

-                    context=self.ctx, filters=filters

-                )

+            filters = {"created_at__lt": threshold_strtime.astimezone(timezone.utc)}

+            old_report_tss = objects.ReportTimestamp.list(  # pylint: disable=E1120

+                context=self.ctx, filters=filters

             )

             for report_ts in old_report_tss:

                 report_ts.delete()

@@ -181,9 +168,7 @@
         LOG.info("Backup manager started %s" % str(time.time()))

         LOG.info("%s periodics" % self.name)

 

-        @periodics.periodic(

-            spacing=backup_service_period, run_immediately=True

-        )

+        @periodics.periodic(spacing=backup_service_period, run_immediately=True)

         def backup_tasks():

             with self.lock_mgt:

                 with lock.Lock(self.lock_mgt, constants.PULLER) as puller:

@@ -269,14 +254,10 @@
     def rotation_engine(self, retention_service_period):

         LOG.info(f"{self.name} rotation_engine")

 

-        @periodics.periodic(

-            spacing=retention_service_period, run_immediately=True

-        )

+        @periodics.periodic(spacing=retention_service_period, run_immediately=True)

         def rotation_tasks():

             with self.lock_mgt:

-                with lock.Lock(

-                    self.lock_mgt, constants.RETENTION

-                ) as retention:

+                with lock.Lock(self.lock_mgt, constants.RETENTION) as retention:

                     if not retention.acquired:

                         return

 

@@ -291,9 +272,8 @@
                     )

 

                     # No way to judge retention

-                    if (

-                        self.threshold_strtime is None and (

-                            not self.instance_retention_map)

+                    if self.threshold_strtime is None and (

+                        not self.instance_retention_map

                     ):

                         return

                     backup_instance_map = {}

@@ -309,9 +289,7 @@
                         # after we enable incremental backup.

                         # So we need to have information to judge on.

                         if backup.instance_id in backup_instance_map:

-                            backup_instance_map[backup.instance_id].append(

-                                backup

-                            )

+                            backup_instance_map[backup.instance_id].append(backup)

                         else:

                             backup_instance_map[backup.instance_id] = [backup]

 

diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py
index c602ae3..6bc24f5 100644
--- a/staffeln/conductor/result.py
+++ b/staffeln/conductor/result.py
@@ -5,11 +5,10 @@
 from oslo_log import log

 from oslo_utils import timeutils

 

-from staffeln.common import constants

-from staffeln.common import email

-from staffeln.common import time as xtime

 import staffeln.conf

 from staffeln import objects

+from staffeln.common import constants, email

+from staffeln.common import time as xtime

 

 CONF = staffeln.conf.CONF

 LOG = log.getLogger(__name__)

@@ -40,23 +39,23 @@
             receiver = CONF.notification.receiver

         elif not CONF.notification.project_receiver_domain:

             try:

-                receiver = (

-                    self.backup_mgt.openstacksdk.get_project_member_emails(

-                        project_id

-                    )

+                receiver = self.backup_mgt.openstacksdk.get_project_member_emails(

+                    project_id

                 )

                 if not receiver:

                     LOG.warn(

                         "No email can be found from members of project "

                         f"{project_id}. "

-                        "Skip report now and will try to report later.")

+                        "Skip report now and will try to report later."

+                    )

                     return False

             except Exception as ex:

                 LOG.warn(

                     "Failed to fetch emails from project members with "

                     f"exception: {str(ex)} "

                     "As also no receiver email or project receiver domain are "

-                    "configured. Will try to report later.")

+                    "configured. Will try to report later."

+                )

                 return False

         else:

             receiver_domain = CONF.notification.project_receiver_domain

@@ -124,21 +123,31 @@
         if success_tasks:

             success_volumes = "<br>".join(

                 [

-                    (f"Volume ID: {str(e.volume_id)}, "

-                     f"Backup ID: {str(e.backup_id)}, "

-                     "Backup mode: "

-                     f"{'Incremental' if e.incremental else 'Full'}, "

-                     f"Created at: {str(e.created_at)}, Last updated at: "

-                     f"{str(e.updated_at)}") for e in success_tasks])

+                    (

+                        f"Volume ID: {str(e.volume_id)}, "

+                        f"Backup ID: {str(e.backup_id)}, "

+                        "Backup mode: "

+                        f"{'Incremental' if e.incremental else 'Full'}, "

+                        f"Created at: {str(e.created_at)}, Last updated at: "

+                        f"{str(e.updated_at)}"

+                    )

+                    for e in success_tasks

+                ]

+            )

         else:

             success_volumes = "<br>"

         if failed_tasks:

             failed_volumes = "<br>".join(

                 [

-                    (f"Volume ID: {str(e.volume_id)}, "

-                     f"Reason: {str(e.reason)}, "

-                     f"Created at: {str(e.created_at)}, Last updated at: "

-                     f"{str(e.updated_at)}") for e in failed_tasks])

+                    (

+                        f"Volume ID: {str(e.volume_id)}, "

+                        f"Reason: {str(e.reason)}, "

+                        f"Created at: {str(e.created_at)}, Last updated at: "

+                        f"{str(e.updated_at)}"

+                    )

+                    for e in failed_tasks

+                ]

+            )

         else:

             failed_volumes = "<br>"

         html += (

@@ -152,7 +161,8 @@
             "<h3>Success List</h3>"

             f"<FONT COLOR=GREEN><h4>{success_volumes}</h4></FONT><br>"

             "<h3>Failed List</h3>"

-            f"<FONT COLOR=RED><h4>{failed_volumes}</h4></FONT><br>")

+            f"<FONT COLOR=RED><h4>{failed_volumes}</h4></FONT><br>"

+        )

         self.content += html

         subject = f"Staffeln Backup result: {project_id}"

         reported = self.send_result_email(

diff --git a/staffeln/conf/__init__.py b/staffeln/conf/__init__.py
index 4da72a5..76c247e 100755
--- a/staffeln/conf/__init__.py
+++ b/staffeln/conf/__init__.py
@@ -2,11 +2,7 @@
 

 from oslo_config import cfg

 

-from staffeln.conf import api

-from staffeln.conf import conductor

-from staffeln.conf import database

-from staffeln.conf import notify

-from staffeln.conf import paths

+from staffeln.conf import api, conductor, database, notify, paths

 

 CONF = cfg.CONF

 

diff --git a/staffeln/conf/api.py b/staffeln/conf/api.py
index 16db057..4f848eb 100755
--- a/staffeln/conf/api.py
+++ b/staffeln/conf/api.py
@@ -19,9 +19,7 @@
     cfg.PortOpt(
         "port",
         default=8808,
-        help=_(
-            "Staffeln API listens on this port number for incoming requests."
-        ),
+        help=_("Staffeln API listens on this port number for incoming requests."),
     ),
     cfg.BoolOpt("enabled_ssl", default=False, help=_("ssl enabled")),
     cfg.StrOpt("ssl_key_file", default=False, help=_("ssl key file path")),
diff --git a/staffeln/conf/conductor.py b/staffeln/conf/conductor.py
index db0f840..86407cc 100755
--- a/staffeln/conf/conductor.py
+++ b/staffeln/conf/conductor.py
@@ -8,10 +8,7 @@
 conductor_group = cfg.OptGroup(
     "conductor",
     title="Conductor Options",
-    help=_(
-        "Options under this group are used "
-        "to define Conductor's configuration."
-    ),
+    help=_("Options under this group are used " "to define Conductor's configuration."),
 )
 
 backup_opts = [
@@ -60,9 +57,7 @@
     ),
     cfg.StrOpt(
         "backup_metadata_key",
-        help=_(
-            "The key string of metadata the VM, which requres back up, has"
-        ),
+        help=_("The key string of metadata the VM, which requres back up, has"),
     ),
     cfg.StrOpt(
         "retention_metadata_key",
@@ -122,8 +117,7 @@
     "coordination",
     title="Coordination Options",
     help=_(
-        "Options under this group are used to define Coordination's"
-        "configuration."
+        "Options under this group are used to define Coordination's" "configuration."
     ),
 )
 
diff --git a/staffeln/conf/database.py b/staffeln/conf/database.py
index f4fe98e..aa65873 100644
--- a/staffeln/conf/database.py
+++ b/staffeln/conf/database.py
@@ -17,9 +17,7 @@
 )
 
 SQL_OPTS = [
-    cfg.StrOpt(
-        "mysql_engine", default="InnoDB", help=_("MySQL engine to use.")
-    ),
+    cfg.StrOpt("mysql_engine", default="InnoDB", help=_("MySQL engine to use.")),
 ]
 
 
diff --git a/staffeln/conf/notify.py b/staffeln/conf/notify.py
index bc3f4bf..c0834b1 100644
--- a/staffeln/conf/notify.py
+++ b/staffeln/conf/notify.py
@@ -7,9 +7,7 @@
 notify_group = cfg.OptGroup(
     "notification",
     title="Notification options",
-    help=_(
-        "Options under this group are used to define notification settings."
-    ),
+    help=_("Options under this group are used to define notification settings."),
 )
 
 email_opts = [
diff --git a/staffeln/conf/paths.py b/staffeln/conf/paths.py
index 7341e48..08cf205 100644
--- a/staffeln/conf/paths.py
+++ b/staffeln/conf/paths.py
@@ -9,9 +9,7 @@
 PATH_OPTS = [
     cfg.StrOpt(
         "pybasedir",
-        default=os.path.abspath(
-            os.path.join(os.path.dirname(__file__), "../")
-        ),
+        default=os.path.abspath(os.path.join(os.path.dirname(__file__), "../")),
         help=_("Directory where the staffeln python module is installed."),
     ),
     cfg.StrOpt(
diff --git a/staffeln/db/api.py b/staffeln/db/api.py
index 3e22bde..5f6d2f8 100644
--- a/staffeln/db/api.py
+++ b/staffeln/db/api.py
@@ -6,9 +6,7 @@
 from oslo_db import api as db_api
 
 _BACKEND_MAPPING = {"sqlalchemy": "staffeln.db.sqlalchemy.api"}
-IMPL = db_api.DBAPI.from_config(
-    cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True
-)
+IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True)
 
 
 def get_instance():
diff --git a/staffeln/db/sqlalchemy/alembic/env.py b/staffeln/db/sqlalchemy/alembic/env.py
index 970dcc7..18b6ee4 100644
--- a/staffeln/db/sqlalchemy/alembic/env.py
+++ b/staffeln/db/sqlalchemy/alembic/env.py
@@ -46,9 +46,7 @@
     """
     engine = sqla_api.get_engine()
     with engine.connect() as connection:
-        context.configure(
-            connection=connection, target_metadata=target_metadata
-        )
+        context.configure(connection=connection, target_metadata=target_metadata)
         with context.begin_transaction():
             context.run_migrations()
 
diff --git a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
index 6d53f0e..492009c 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
@@ -12,8 +12,8 @@
 revision = "041d9a0f1159"
 down_revision = ""
 
-from alembic import op  # noqa: E402
 import sqlalchemy as sa  # noqa: E402
+from alembic import op  # noqa: E402
 
 
 def upgrade():
diff --git a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
index 4ebaf9f..5f87464 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
@@ -12,8 +12,8 @@
 revision = "2b2b9df199bd"
 down_revision = "ebdbed01e9a7"
 
-from alembic import op  # noqa: E402
 import sqlalchemy as sa  # noqa: E402
+from alembic import op  # noqa: E402
 
 
 def upgrade():
diff --git a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
index 5635fd9..20605ee 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
@@ -1,8 +1,8 @@
 from __future__ import annotations
 
+import sqlalchemy as sa
 from alembic import op
 from oslo_log import log
-import sqlalchemy as sa
 
 """add report timestamp
 
diff --git a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
index 45cc8a8..8dccd8b 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
@@ -12,16 +12,12 @@
 revision = "ebdbed01e9a7"
 down_revision = "041d9a0f1159"
 
-from alembic import op  # noqa: E402
 import sqlalchemy as sa  # noqa: E402
+from alembic import op  # noqa: E402
 
 
 def upgrade():
     # ### commands auto generated by Alembic - please adjust! ###
-    op.add_column(
-        "backup_data", sa.Column("incremental", sa.Boolean(), nullable=True)
-    )
-    op.add_column(
-        "queue_data", sa.Column("incremental", sa.Boolean(), nullable=True)
-    )
+    op.add_column("backup_data", sa.Column("incremental", sa.Boolean(), nullable=True))
+    op.add_column("queue_data", sa.Column("incremental", sa.Boolean(), nullable=True))
     # ### end Alembic commands ###
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 4919f28..b27d0d1 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -10,9 +10,7 @@
 from oslo_db.sqlalchemy import session as db_session
 from oslo_db.sqlalchemy import utils as db_utils
 from oslo_log import log
-from oslo_utils import strutils
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
+from oslo_utils import strutils, timeutils, uuidutils
 from sqlalchemy.inspection import inspect
 from sqlalchemy.orm import exc
 
@@ -185,8 +183,9 @@
         field = getattr(model, fieldname)
 
         if (
-            fieldname != "deleted" and value and (
-                field.type.python_type is datetime.datetime)
+            fieldname != "deleted"
+            and value
+            and (field.type.python_type is datetime.datetime)
         ):
             if not isinstance(value, datetime.datetime):
                 value = timeutils.parse_isotime(value)
@@ -347,9 +346,7 @@
         """Get the column from the backup_data with matching backup_id"""
 
         try:
-            return self._get_backup(
-                context, fieldname="backup_id", value=backup_id
-            )
+            return self._get_backup(context, fieldname="backup_id", value=backup_id)
         except Exception:  # noqa: E722
             LOG.error("Backup not found with backup_id %s." % backup_id)
 
@@ -379,9 +376,7 @@
 
     def create_report_timestamp(self, values):
         try:
-            report_timestamp_data = self._create(
-                models.Report_timestamp, values
-            )
+            report_timestamp_data = self._create(models.Report_timestamp, values)
         except db_exc.DBDuplicateEntry:
             LOG.error("Report Timestamp ID already exists.")
         return report_timestamp_data
diff --git a/staffeln/exception.py b/staffeln/exception.py
index 3f8a34e..e1caceb 100644
--- a/staffeln/exception.py
+++ b/staffeln/exception.py
@@ -16,8 +16,7 @@
 """Staffeln base exception handling."""
 from __future__ import annotations
 
-from typing import Optional
-from typing import Union
+from typing import Optional, Union
 
 from oslo_log import log as logging
 
diff --git a/staffeln/objects/base.py b/staffeln/objects/base.py
index 60a6fe2..7f3cb85 100755
--- a/staffeln/objects/base.py
+++ b/staffeln/objects/base.py
@@ -33,9 +33,7 @@
     OBJ_PROJECT_NAMESPACE = "staffeln"

 

     def as_dict(self):

-        return {

-            k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)

-        }

+        return {k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)}

 

 

 class StaffelnObjectSerializer(ovoo_base.VersionedObjectSerializer):

@@ -53,14 +51,9 @@
     object_fields = {}

 

     def obj_refresh(self, loaded_object):

-        fields = (

-            field for field in self.fields if field not in self.object_fields

-        )

+        fields = (field for field in self.fields if field not in self.object_fields)

         for field in fields:

-            if (

-                self.obj_attr_is_set(field) and (

-                    self[field] != loaded_object[field])

-            ):

+            if self.obj_attr_is_set(field) and (self[field] != loaded_object[field]):

                 self[field] = loaded_object[field]

 

     @staticmethod

diff --git a/staffeln/objects/report.py b/staffeln/objects/report.py
index 7bc1885..588be73 100644
--- a/staffeln/objects/report.py
+++ b/staffeln/objects/report.py
@@ -26,9 +26,7 @@
 
     @base.remotable_classmethod
     def list(cls, context, filters=None):  # pylint: disable=E0213
-        db_report = cls.dbapi.get_report_timestamp_list(
-            context, filters=filters
-        )
+        db_report = cls.dbapi.get_report_timestamp_list(context, filters=filters)
         return [cls._from_db_object(cls(context), obj) for obj in db_report]
 
     @base.remotable
diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py
index 88f952a..f8b6e80 100644
--- a/staffeln/objects/volume.py
+++ b/staffeln/objects/volume.py
@@ -36,9 +36,7 @@
 
         :param filters: dict mapping the filter to a value.
         """
-        db_backups = cls.dbapi.get_backup_list(
-            context, filters=filters, **kwargs
-        )
+        db_backups = cls.dbapi.get_backup_list(context, filters=filters, **kwargs)
 
         return [cls._from_db_object(cls(context), obj) for obj in db_backups]
 
@@ -79,9 +77,7 @@
         self.dbapi.soft_delete_backup(self.id)
 
     @base.remotable_classmethod
-    def get_backup_by_backup_id(
-        cls, context, backup_id
-    ):  # pylint: disable=E0213
+    def get_backup_by_backup_id(cls, context, backup_id):  # pylint: disable=E0213
         """Find a backup based on backup_id
 
         :param context: Security context. NOTE: This should only
diff --git a/staffeln/tests/common/test_openstacksdk.py b/staffeln/tests/common/test_openstacksdk.py
index e516a30..ceeece9 100644
--- a/staffeln/tests/common/test_openstacksdk.py
+++ b/staffeln/tests/common/test_openstacksdk.py
@@ -3,11 +3,11 @@
 
 from unittest import mock
 
-from openstack import exceptions as openstack_exc
 import tenacity
+from openstack import exceptions as openstack_exc
 
-from staffeln.common import openstack as s_openstack
 from staffeln import conf
+from staffeln.common import openstack as s_openstack
 from staffeln.tests import base
 
 
@@ -46,20 +46,17 @@
         self.fake_role_assignment = mock.MagicMock(user="foo")
         self.fake_role_assignment2 = mock.MagicMock(user={"id": "bar"})
 
-    def _test_http_error(
-        self, m_func, retry_func, status_code, call_count=1, **kwargs
-    ):
-        m_func.side_effect = openstack_exc.HttpException(
-            http_status=status_code
-        )
+    def _test_http_error(self, m_func, retry_func, status_code, call_count=1, **kwargs):
+        m_func.side_effect = openstack_exc.HttpException(http_status=status_code)
         exc = self.assertRaises(
             openstack_exc.HttpException,
             getattr(self.openstack, retry_func),
             **kwargs,
         )
         self.assertEqual(status_code, exc.status_code)
-        skip_retry_codes = conf.CONF.openstack.skip_retry_codes.replace(
-            ' ', '').split(',')
+        skip_retry_codes = conf.CONF.openstack.skip_retry_codes.replace(" ", "").split(
+            ","
+        )
         if str(status_code) not in skip_retry_codes:
             if call_count == 1:
                 self.m_sleep.assert_called_once_with(1.0)
@@ -72,9 +69,7 @@
 
     def _test_non_http_error(self, m_func, retry_func, **kwargs):
         m_func.side_effect = KeyError
-        self.assertRaises(
-            KeyError, getattr(self.openstack, retry_func), **kwargs
-        )
+        self.assertRaises(KeyError, getattr(self.openstack, retry_func), **kwargs)
         self.m_sleep.assert_not_called()
 
     def test_get_servers(self):
@@ -88,28 +83,20 @@
         self._test_non_http_error(self.m_c.compute.servers, "get_servers")
 
     def test_get_servers_conf_skip_http_error(self):
-        conf.CONF.set_override('skip_retry_codes', '403,', 'openstack')
-        self._test_http_error(
-            self.m_c.compute.servers, "get_servers", status_code=403
-        )
-        self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes)
+        conf.CONF.set_override("skip_retry_codes", "403,", "openstack")
+        self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=403)
+        self.assertEqual("403,", conf.CONF.openstack.skip_retry_codes)
 
     def test_get_servers_conf_skip_http_error_not_hit(self):
-        conf.CONF.set_override('skip_retry_codes', '403,', 'openstack')
-        self._test_http_error(
-            self.m_c.compute.servers, "get_servers", status_code=404
-        )
-        self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes)
+        conf.CONF.set_override("skip_retry_codes", "403,", "openstack")
+        self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=404)
+        self.assertEqual("403,", conf.CONF.openstack.skip_retry_codes)
 
     def test_get_servers_404_http_error(self):
-        self._test_http_error(
-            self.m_c.compute.servers, "get_servers", status_code=404
-        )
+        self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=404)
 
     def test_get_servers_500_http_error(self):
-        self._test_http_error(
-            self.m_c.compute.servers, "get_servers", status_code=500
-        )
+        self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=500)
 
     def test_get_projects(self):
         self.m_c.list_projects = mock.MagicMock(return_value=[])
@@ -120,14 +107,10 @@
         self._test_non_http_error(self.m_c.list_projects, "get_projects")
 
     def test_get_projects_404_http_error(self):
-        self._test_http_error(
-            self.m_c.list_projects, "get_projects", status_code=404
-        )
+        self._test_http_error(self.m_c.list_projects, "get_projects", status_code=404)
 
     def test_get_projects_500_http_error(self):
-        self._test_http_error(
-            self.m_c.list_projects, "get_projects", status_code=500
-        )
+        self._test_http_error(self.m_c.list_projects, "get_projects", status_code=500)
 
     def test_get_user_id(self):
         self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
@@ -138,14 +121,10 @@
         self._test_non_http_error(self.m_c.get_user, "get_user_id")
 
     def test_get_user_id_404_http_error(self):
-        self._test_http_error(
-            self.m_c.get_user, "get_user_id", status_code=404
-        )
+        self._test_http_error(self.m_c.get_user, "get_user_id", status_code=404)
 
     def test_get_user_id_500_http_error(self):
-        self._test_http_error(
-            self.m_c.get_user, "get_user_id", status_code=500
-        )
+        self._test_http_error(self.m_c.get_user, "get_user_id", status_code=500)
 
     def test_get_user(self):
         self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
@@ -177,9 +156,7 @@
 
     def test_get_role_assignments(self):
         self.m_c.list_role_assignments = mock.MagicMock(return_value=[])
-        self.assertEqual(
-            self.openstack.get_role_assignments(project_id="foo"), []
-        )
+        self.assertEqual(self.openstack.get_role_assignments(project_id="foo"), [])
         self.m_c.list_role_assignments.assert_called_once_with(
             filters={"project": "foo"}
         )
@@ -226,9 +203,7 @@
         self.m_c.get_user.assert_has_calls(
             [
                 mock.call(name_or_id=self.fake_role_assignment.user),
-                mock.call(
-                    name_or_id=self.fake_role_assignment2.user.get("id")
-                ),
+                mock.call(name_or_id=self.fake_role_assignment2.user.get("id")),
             ]
         )
 
@@ -257,13 +232,9 @@
         )
 
     def test_get_volume(self):
-        self.m_c.get_volume_by_id = mock.MagicMock(
-            return_value=self.fake_volume
-        )
+        self.m_c.get_volume_by_id = mock.MagicMock(return_value=self.fake_volume)
         self.assertEqual(
-            self.openstack.get_volume(
-                uuid=self.fake_volume.id, project_id="bar"
-            ),
+            self.openstack.get_volume(uuid=self.fake_volume.id, project_id="bar"),
             self.fake_volume,
         )
         self.m_c.get_volume_by_id.assert_called_once_with(self.fake_volume.id)
@@ -295,13 +266,9 @@
         )
 
     def test_get_backup(self):
-        self.m_c.get_volume_backup = mock.MagicMock(
-            return_value=self.fake_backup
-        )
+        self.m_c.get_volume_backup = mock.MagicMock(return_value=self.fake_backup)
         self.assertEqual(
-            self.openstack.get_backup(
-                uuid=self.fake_backup.id, project_id="bar"
-            ),
+            self.openstack.get_backup(uuid=self.fake_backup.id, project_id="bar"),
             self.fake_backup,
         )
         self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id)
@@ -311,9 +278,7 @@
             side_effect=openstack_exc.ResourceNotFound
         )
         self.assertEqual(
-            self.openstack.get_backup(
-                uuid=self.fake_backup.id, project_id="bar"
-            ),
+            self.openstack.get_backup(uuid=self.fake_backup.id, project_id="bar"),
             None,
         )
         self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id)
@@ -345,13 +310,9 @@
         )
 
     def test_delete_backup(self):
-        self.m_c.delete_volume_backup = mock.MagicMock(
-            return_value=self.fake_backup
-        )
+        self.m_c.delete_volume_backup = mock.MagicMock(return_value=self.fake_backup)
         self.assertEqual(
-            self.openstack.delete_backup(
-                uuid=self.fake_backup.id, project_id="bar"
-            ),
+            self.openstack.delete_backup(uuid=self.fake_backup.id, project_id="bar"),
             None,
         )
         self.m_c.delete_volume_backup.assert_called_once_with(
@@ -363,9 +324,7 @@
             side_effect=openstack_exc.ResourceNotFound
         )
         self.assertEqual(
-            self.openstack.delete_backup(
-                uuid=self.fake_backup.id, project_id="bar"
-            ),
+            self.openstack.delete_backup(uuid=self.fake_backup.id, project_id="bar"),
             None,
         )
         self.m_c.delete_volume_backup.assert_called_once_with(
@@ -438,9 +397,7 @@
         self.m_c.block_storage.get = mock.MagicMock(status_code=200)
         self.m_gam = mock.MagicMock()
         self.m_c._get_and_munchify = self.m_gam
-        self.m_gam.return_value = mock.MagicMock(
-            backup_gigabytes=[self.fake_backup.id]
-        )
+        self.m_gam.return_value = mock.MagicMock(backup_gigabytes=[self.fake_backup.id])
         self.assertEqual(
             [self.fake_backup.id],
             self.openstack.get_backup_gigabytes_quota(project_id="bar"),
@@ -497,7 +454,5 @@
             self.m_gam_return,
             self.openstack._get_volume_quotas(project_id="bar", usage=False),
         )
-        self.m_c.block_storage.get.assert_called_once_with(
-            "/os-quota-sets/bar"
-        )
+        self.m_c.block_storage.get.assert_called_once_with("/os-quota-sets/bar")
         self.m_gam.assert_called_once_with("quota_set", m_j_r())