Add pre-commit
diff --git a/.gitignore b/.gitignore
index 32e35b0..1ae05e4 100755
--- a/.gitignore
+++ b/.gitignore
@@ -68,4 +68,4 @@
 *.log
 
 # envvar openrc file
-*openrc.sh
\ No newline at end of file
+*openrc.sh
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..1dbe818
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,11 @@
+repos:
+  - repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v4.3.0
+    hooks:
+      - id: end-of-file-fixer
+      - id: trailing-whitespace
+  - repo: https://github.com/pycqa/flake8
+    rev: 7.0.0
+    hooks:
+      - id: flake8
+        args: [--max-line-length=79]
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 1c590d6..7a12515 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,6 +11,7 @@
 # implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from __future__ import annotations
 
 import os
 import sys
@@ -39,8 +40,8 @@
 # openstackdocstheme options
 openstackdocs_repo_name = "openstack/staffeln"
 openstackdocs_bug_project = (
-    "replace with the name of the project on Launchpad or the ID from Storyboard"
-)
+    "replace with the name of the project on "
+    "Launchpad or the ID from Storyboard")
 openstackdocs_bug_tag = ""
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
diff --git a/hack/stack.sh b/hack/stack.sh
index 497b29a..4876c88 100755
--- a/hack/stack.sh
+++ b/hack/stack.sh
@@ -36,7 +36,7 @@
 SWIFT_REPLICAS=1
 enable_plugin neutron https://opendev.org/openstack/neutron
 #swift
-enable_service s-proxy s-object s-container s-account 
+enable_service s-proxy s-object s-container s-account
 # Cinder
 enable_service c-bak
 [[post-config|/etc/neutron/neutron.conf]]
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 65d2460..d4b373e 100755
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -11,7 +11,6 @@
 # implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 # This file is execfile()d with the current directory set to its
 # containing dir.
 #
@@ -20,20 +19,18 @@
 #
 # All configuration values have a default; values that are commented out
 # serve to show the default.
-
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 # sys.path.insert(0, os.path.abspath('.'))
-
 # -- General configuration ------------------------------------------------
-
 # If your documentation needs a minimal Sphinx version, state it here.
 # needs_sphinx = '1.0'
-
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
+from __future__ import annotations
+
 extensions = [
     "openstackdocstheme",
     "reno.sphinxext",
diff --git a/requirements.txt b/requirements.txt
index 3789929..2372ce2 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,5 +20,4 @@
 tooz # Apache-2.0
 sherlock>=0.4.1 # MIT
 kubernetes # Apache-2.0
-# email
-# smtplib
+pre-commit
diff --git a/setup.py b/setup.py
index 0346ed3..673123b 100755
--- a/setup.py
+++ b/setup.py
@@ -12,8 +12,9 @@
 # implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+from __future__ import annotations
+
 import setuptools
 
 setuptools.setup(setup_requires=["pbr"], pbr=True)
diff --git a/staffeln/__init__.py b/staffeln/__init__.py
index 5612b0d..78fc3f2 100755
--- a/staffeln/__init__.py
+++ b/staffeln/__init__.py
@@ -1,5 +1,4 @@
 # -*- coding: utf-8 -*-
-
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # a copy of the License at
@@ -11,6 +10,7 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations
 # under the License.
+from __future__ import annotations
 
 import pbr.version
 
diff --git a/staffeln/api/app.py b/staffeln/api/app.py
index bf1234e..b19b9b8 100755
--- a/staffeln/api/app.py
+++ b/staffeln/api/app.py
@@ -1,7 +1,12 @@
-from flask import Flask, Response, request

+from __future__ import annotations

+

+from flask import Flask

+from flask import request

+from flask import Response

 from oslo_log import log

-from staffeln import objects

+

 from staffeln.common import context

+from staffeln import objects

 

 ctx = context.make_context()

 app = Flask(__name__)

@@ -22,8 +27,8 @@
     backup = objects.Volume.get_backup_by_backup_id(  # pylint: disable=E1120

         context=ctx, backup_id=request.args["backup_id"]

     )

-    # backup_info is None when there is no entry of the backup id in backup_table.

-    # So the backup should not be the automated backup.

+    # backup_info is None when there is no entry of the backup id in

+    # backup_table. So the backup should not be the automated backup.

     if backup is None:

         return Response(

             "True",

diff --git a/staffeln/api/middleware/parsable_error.py b/staffeln/api/middleware/parsable_error.py
index 2b49f83..2443201 100755
--- a/staffeln/api/middleware/parsable_error.py
+++ b/staffeln/api/middleware/parsable_error.py
@@ -17,8 +17,10 @@
 

 Based on pecan.middleware.errordocument

 """

+from __future__ import annotations

 

 from oslo_serialization import jsonutils

+

 from staffeln.i18n import _

 

 

@@ -78,7 +80,10 @@
                 state["status_code"] = status_code

             except (ValueError, TypeError):  # pragma: nocover

                 raise Exception(

-                    _("ErrorDocumentMiddleware received an invalid " "status %s")

+                    _(

+                        "ErrorDocumentMiddleware received an invalid "

+                        "status %s"

+                    )

                     % status

                 )

             else:

diff --git a/staffeln/api/wsgi.py b/staffeln/api/wsgi.py
index bef4092..6965beb 100755
--- a/staffeln/api/wsgi.py
+++ b/staffeln/api/wsgi.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 import app
 
 if __name__ == "__main__":
diff --git a/staffeln/cmd/api.py b/staffeln/cmd/api.py
index a46656c..ba2ce09 100755
--- a/staffeln/cmd/api.py
+++ b/staffeln/cmd/api.py
@@ -1,11 +1,15 @@
 """Starter script for Staffeln API service"""

+

+from __future__ import annotations

+

 import os

 import sys

 

-import staffeln.conf

 from oslo_log import log as logging

+

 from staffeln.api import app as api_app

 from staffeln.common import service

+import staffeln.conf

 from staffeln.i18n import _

 

 CONF = staffeln.conf.CONF

diff --git a/staffeln/cmd/conductor.py b/staffeln/cmd/conductor.py
index f4c9579..3b2b84e 100755
--- a/staffeln/cmd/conductor.py
+++ b/staffeln/cmd/conductor.py
@@ -1,10 +1,13 @@
 """Starter script for the staffeln conductor service."""

 

+from __future__ import annotations

+

 import cotyledon

-import staffeln.conf

 from cotyledon import oslo_config_glue

+

 from staffeln.common import service

 from staffeln.conductor import manager

+import staffeln.conf

 

 CONF = staffeln.conf.CONF

 

@@ -13,9 +16,15 @@
     service.prepare_service()

 

     sm = cotyledon.ServiceManager()

-    sm.add(manager.BackupManager, workers=CONF.conductor.backup_workers, args=(CONF,))

     sm.add(

-        manager.RotationManager, workers=CONF.conductor.rotation_workers, args=(CONF,)

+        manager.BackupManager,

+        workers=CONF.conductor.backup_workers,

+        args=(CONF,),

+    )

+    sm.add(

+        manager.RotationManager,

+        workers=CONF.conductor.rotation_workers,

+        args=(CONF,),

     )

     oslo_config_glue.setup(sm, CONF)

     sm.run()

diff --git a/staffeln/cmd/dbmanage.py b/staffeln/cmd/dbmanage.py
index 5801a36..433b7d7 100644
--- a/staffeln/cmd/dbmanage.py
+++ b/staffeln/cmd/dbmanage.py
@@ -2,11 +2,14 @@
 Run storage database migration.
 """
 
+from __future__ import annotations
+
 import sys
 
 from oslo_config import cfg
-from staffeln import conf
+
 from staffeln.common import service
+from staffeln import conf
 from staffeln.db import migration
 
 CONF = conf.CONF
@@ -25,17 +28,22 @@
 def add_command_parsers(subparsers):
 
     parser = subparsers.add_parser(
-        "create_schema", help="Create the database schema.")
+        "create_schema", help="Create the database schema."
+    )
     parser.set_defaults(func=DBCommand.create_schema)
 
     parser = subparsers.add_parser(
-        "upgrade", help="Upgrade the database schema.")
+        "upgrade", help="Upgrade the database schema."
+    )
     parser.add_argument("revision", nargs="?")
     parser.set_defaults(func=DBCommand.do_upgrade)
 
 
 command_opt = cfg.SubCommandOpt(
-    "command", title="Command", help="Available commands", handler=add_command_parsers
+    "command",
+    title="Command",
+    help="Available commands",
+    handler=add_command_parsers,
 )
 
 
diff --git a/staffeln/common/auth.py b/staffeln/common/auth.py
index e23ef71..b64c515 100755
--- a/staffeln/common/auth.py
+++ b/staffeln/common/auth.py
@@ -1,3 +1,5 @@
+from __future__ import annotations

+

 import openstack

 

 

diff --git a/staffeln/common/config.py b/staffeln/common/config.py
index f71a378..5b69fb4 100755
--- a/staffeln/common/config.py
+++ b/staffeln/common/config.py
@@ -1,4 +1,6 @@
 # from staffeln.common import rpc

+from __future__ import annotations

+

 import staffeln.conf

 from staffeln import version

 

diff --git a/staffeln/common/constants.py b/staffeln/common/constants.py
index b7d6d09..d065966 100644
--- a/staffeln/common/constants.py
+++ b/staffeln/common/constants.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 BACKUP_INIT = 4
 BACKUP_FAILED = 3
 BACKUP_COMPLETED = 2
diff --git a/staffeln/common/context.py b/staffeln/common/context.py
index c6046e1..d789a58 100644
--- a/staffeln/common/context.py
+++ b/staffeln/common/context.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 from oslo_context import context
 from oslo_log import log
 
@@ -5,7 +7,11 @@
 
 
 class RequestContext(context.RequestContext):
-    """Added security context with request parameters from openstack common library"""
+    """Added security context
+
+    Added security context with request
+    parameters from openstack common library
+    """
 
     def __init__(
         self,
@@ -14,7 +20,7 @@
         instance_id=None,
         executed_at=None,
         backup_status=None,
-        **kwargs
+        **kwargs,
     ):
         self.backup_id = backup_id
         self.volume_id = volume_id
diff --git a/staffeln/common/email.py b/staffeln/common/email.py
index cf6e937..79d7225 100644
--- a/staffeln/common/email.py
+++ b/staffeln/common/email.py
@@ -1,10 +1,12 @@
 """ Email module with SMTP"""

 

-import smtplib

-from email import utils

+from __future__ import annotations

+

 from email.header import Header

 from email.mime.multipart import MIMEMultipart

 from email.mime.text import MIMEText

+from email import utils

+import smtplib

 

 from oslo_log import log

 

@@ -32,10 +34,12 @@
 

     try:

         smtp_obj = smtplib.SMTP(

-            smtp_profile["smtp_server_domain"], smtp_profile["smtp_server_port"]

+            smtp_profile["smtp_server_domain"],

+            smtp_profile["smtp_server_port"],

         )

         smtp_obj.connect(

-            smtp_profile["smtp_server_domain"], smtp_profile["smtp_server_port"]

+            smtp_profile["smtp_server_domain"],

+            smtp_profile["smtp_server_port"],

         )

         smtp_obj.ehlo()

         smtp_obj.starttls()

@@ -43,7 +47,9 @@
         # SMTP Login

         smtp_obj.login(smtp_profile["src_email"], smtp_profile["src_pwd"])

         smtp_obj.sendmail(

-            smtp_profile["src_email"], smtp_profile["dest_email"], msg.as_string()

+            smtp_profile["src_email"],

+            smtp_profile["dest_email"],

+            msg.as_string(),

         )

         # Email Sent

     except smtplib.SMTPException as error:

diff --git a/staffeln/common/lock.py b/staffeln/common/lock.py
index 4c05626..5f21bf6 100644
--- a/staffeln/common/lock.py
+++ b/staffeln/common/lock.py
@@ -1,16 +1,20 @@
+from __future__ import annotations
+
 import errno
 import glob
 import os
 import re
 import sys
-import uuid
 from typing import Optional  # noqa: H301
+import uuid
 
-import sherlock
 from oslo_log import log
-from staffeln import conf, exception
+import sherlock
 from tooz import coordination
 
+from staffeln import conf
+from staffeln import exception
+
 CONF = conf.CONF
 LOG = log.getLogger(__name__)
 
@@ -146,7 +150,10 @@
     """
 
     def __init__(
-        self, expire: int = 3600, timeout: int = 10, namespace: str = "openstack"
+        self,
+        expire: int = 3600,
+        timeout: int = 10,
+        namespace: str = "openstack",
     ):
         self.timeout = timeout
         self.expire = expire
diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py
index 9f412e8..ea566e3 100644
--- a/staffeln/common/openstack.py
+++ b/staffeln/common/openstack.py
@@ -1,5 +1,10 @@
-from openstack import exceptions, proxy

+from __future__ import annotations

+

+from openstack import exceptions

+from openstack import proxy

 from oslo_log import log

+import tenacity

+

 from staffeln.common import auth

 from staffeln.i18n import _

 

@@ -16,7 +21,9 @@
         project_id = project.get("id")

 

         if project_id not in self.conn_list:

-            LOG.debug(_("Initiate connection for project %s" % project.get("name")))

+            LOG.debug(

+                _("Initiate connection for project %s" % project.get("name"))

+            )

             conn = self.conn.connect_as_project(project)

             self.conn_list[project_id] = conn

         LOG.debug(_("Connect as project %s" % project.get("name")))

@@ -27,10 +34,14 @@
         user_name = self.conn.config.auth["username"]

         if "user_domain_id" in self.conn.config.auth:

             domain_id = self.conn.config.auth["user_domain_id"]

-            user = self.conn.get_user(name_or_id=user_name, domain_id=domain_id)

+            user = self.conn.get_user(

+                name_or_id=user_name, domain_id=domain_id

+            )

         elif "user_domain_name" in self.conn.config.auth:

             domain_name = self.conn.config.auth["user_domain_name"]

-            user = self.conn.get_user(name_or_id=user_name, domain_id=domain_name)

+            user = self.conn.get_user(

+                name_or_id=user_name, domain_id=domain_name

+            )

         else:

             user = self.conn.get_user(name_or_id=user_name)

         return user.id

@@ -66,19 +77,19 @@
     def get_servers(self, project_id=None, all_projects=True, details=True):

         if project_id is not None:

             return self.conn.compute.servers(

-                details=details, all_projects=all_projects, project_id=project_id

+                details=details,

+                all_projects=all_projects,

+                project_id=project_id,

             )

         else:

-            return self.conn.compute.servers(details=details, all_projects=all_projects)

+            return self.conn.compute.servers(

+                details=details, all_projects=all_projects

+            )

 

     def get_volume(self, uuid, project_id):

         return self.conn.get_volume_by_id(uuid)

 

     def get_backup(self, uuid, project_id=None):

-        # return conn.block_storage.get_backup(

-        #     project_id=project_id, backup_id=uuid,

-        # )

-        # conn.block_storage.backups(volume_id=uuid,project_id=project_id)

         try:

             return self.conn.get_volume_backup(uuid)

         except exceptions.ResourceNotFound:

@@ -93,9 +104,6 @@
         name=None,

         incremental=False,

     ):

-        # return conn.block_storage.create_backup(

-        #     volume_id=queue.volume_id, force=True, project_id=queue.project_id, name="name"

-        # )

         return self.conn.create_volume_backup(

             volume_id=volume_id,

             force=force,

@@ -112,7 +120,8 @@
         LOG.debug(f"Start deleting backup {uuid} in OpenStack.")

         try:

             self.conn.delete_volume_backup(uuid, force=force)

-            # TODO(Alex): After delete the backup generator, need to set the volume status again

+            # TODO(Alex): After delete the backup generator,

+            # need to set the volume status again

         except exceptions.ResourceNotFound:

             return None

 

@@ -128,7 +137,8 @@
 

     # rewrite openstasdk._block_storage.get_volume_quotas

     # added usage flag

-    # ref: https://docs.openstack.org/api-ref/block-storage/v3/?expanded=#show-quota-usage-for-a-project

+    # ref: https://docs.openstack.org/api-ref/block-storage/v3/?

+    # expanded=#show-quota-usage-for-a-project

     def _get_volume_quotas(self, project_id, usage=True):

         """Get volume quotas for a project

 

@@ -140,11 +150,15 @@
 

         if usage:

             resp = self.conn.block_storage.get(

-                "/os-quota-sets/{project_id}?usage=True".format(project_id=project_id)

+                "/os-quota-sets/{project_id}?usage=True".format(

+                    project_id=project_id

+                )

             )

         else:

             resp = self.conn.block_storage.get(

                 "/os-quota-sets/{project_id}".format(project_id=project_id)

             )

-        data = proxy._json_response(resp, error_message="cinder client call failed")

+        data = proxy._json_response(

+            resp, error_message="cinder client call failed"

+        )

         return self.conn._get_and_munchify("quota_set", data)

diff --git a/staffeln/common/service.py b/staffeln/common/service.py
index d2ad7a5..c657896 100755
--- a/staffeln/common/service.py
+++ b/staffeln/common/service.py
@@ -11,11 +11,13 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

 # See the License for the specific language governing permissions and

 # limitations under the License.

+from __future__ import annotations

 

-import staffeln.conf

 from oslo_log import log as logging

-from staffeln import objects

+

 from staffeln.common import config

+import staffeln.conf

+from staffeln import objects

 

 CONF = staffeln.conf.CONF

 

diff --git a/staffeln/common/short_id.py b/staffeln/common/short_id.py
index 18be04c..e182ad1 100755
--- a/staffeln/common/short_id.py
+++ b/staffeln/common/short_id.py
@@ -2,10 +2,13 @@
 The IDs each comprise 12 (lower-case) alphanumeric characters.

 """

 

+from __future__ import annotations

+

 import base64

 import uuid

 

 import six

+

 from staffeln.i18n import _

 

 

diff --git a/staffeln/common/time.py b/staffeln/common/time.py
index 103096a..79ef345 100644
--- a/staffeln/common/time.py
+++ b/staffeln/common/time.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 import re
 
 from dateutil.relativedelta import relativedelta
@@ -6,8 +8,10 @@
 DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
 
 regex = re.compile(
-    r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?"
-    r"((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?"
+    r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?"
+    r"((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?"
+    r"((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?"
+    r"((?P<seconds>\d+?)s)?"
 )
 
 
@@ -45,7 +49,14 @@
 
 
 def timeago(
-    years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, from_date=None
+    years=0,
+    months=0,
+    weeks=0,
+    days=0,
+    hours=0,
+    minutes=0,
+    seconds=0,
+    from_date=None,
 ):
     if from_date is None:
         from_date = timeutils.utcnow()
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index 8375bb5..559d782 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -1,17 +1,23 @@
-import collections
-from datetime import timedelta, timezone
+from __future__ import annotations
 
-import staffeln.conf
+import collections
+from datetime import timedelta
+from datetime import timezone
+
 from openstack.exceptions import HttpException as OpenstackHttpException
 from openstack.exceptions import ResourceNotFound as OpenstackResourceNotFound
 from openstack.exceptions import SDKException as OpenstackSDKException
 from oslo_log import log
 from oslo_utils import timeutils
-from staffeln import objects
-from staffeln.common import constants, context, openstack
+
+from staffeln.common import constants
+from staffeln.common import context
+from staffeln.common import openstack
 from staffeln.common import time as xtime
 from staffeln.conductor import result
+import staffeln.conf
 from staffeln.i18n import _
+from staffeln import objects
 
 CONF = staffeln.conf.CONF
 LOG = log.getLogger(__name__)
@@ -115,8 +121,7 @@
         return queue
 
     def create_queue(self, old_tasks):
-        """
-        Create the queue of all the volumes for backup
+        """Create the queue of all the volumes for backup
 
         :param old_tasks: Task list not completed in the previous cycle
         :type: List<Class objects.Queue>
@@ -129,7 +134,8 @@
         for old_task in old_tasks:
             old_task_volume_list.append(old_task.volume_id)
 
-        # 2. add new tasks in the queue which are not existing in the old task list
+        # 2. add new tasks in the queue which are not existing in the old task
+        # list
         task_list = self.check_instance_volumes()
         for task in task_list:
             if task.volume_id not in old_task_volume_list:
@@ -142,8 +148,8 @@
                 return False
 
             return (
-                metadata[CONF.conductor.backup_metadata_key].lower()
-                == constants.BACKUP_ENABLED_KEY
+                metadata[CONF.conductor.backup_metadata_key].lower(
+                ) == constants.BACKUP_ENABLED_KEY
             )
         else:
             return True
@@ -157,9 +163,9 @@
             res = volume["status"] in ("available", "in-use")
             if not res:
                 reason = _(
-                    "Volume %s is not triger new backup task because it is in %s status"
-                    % (volume_id, volume["status"])
-                )
+                    "Volume %s is not triger new backup task because "
+                    "it is in %s status" %
+                    (volume_id, volume["status"]))
                 LOG.info(reason)
                 return reason
             return res
@@ -169,7 +175,7 @@
 
     def purge_backups(self, project_id=None):
         LOG.info(f"Start pruge backup tasks for project {project_id}")
-        # TODO make all this in a single DB command
+        # We can consider make all these in a single DB command
         success_tasks = self.get_queues(
             filters={
                 "backup_status": constants.BACKUP_COMPLETED,
@@ -273,7 +279,9 @@
                 )
 
         except OpenstackSDKException as e:
-            LOG.warn(f"Backup {backup_object.backup_id} deletion failed. {str(e)}")
+            LOG.warn(
+                f"Backup {backup_object.backup_id} deletion failed. {str(e)}"
+            )
             # We don't delete backup object if any exception occured
             # backup_object.delete_backup()
             return False
@@ -286,11 +294,11 @@
                 LOG.warn(
                     f"Project {project_id} for backup "
                     f"{backup_object.backup_id} is not existing in "
-                    "Openstack. Please check your access right to this project. "
-                    "Skip this backup from remove now and will retry later."
-                )
-                # Don't remove backup object, keep it and retry on next periodic task
-                # backup_object.delete_backup()
+                    "Openstack. Please check your access right to this "
+                    "project. "
+                    "Skip this backup from remove now and will retry later.")
+                # Don't remove backup object, keep it and retry on next
+                # periodic task backup_object.delete_backup()
                 return
 
             self.openstacksdk.set_project(self.project_list[project_id])
@@ -299,17 +307,19 @@
             )
             if backup is None:
                 LOG.info(
-                    f"Backup {backup_object.backup_id} is removed from Openstack "
-                    "or cinder-backup is not existing in the cloud. "
-                    "Start removing backup object from Staffeln."
-                )
+                    f"Backup {backup_object.backup_id} is removed from "
+                    "Openstack or cinder-backup is not existing in the "
+                    "cloud. Start removing backup object from Staffeln.")
                 return backup_object.delete_backup()
 
             self.openstacksdk.delete_backup(uuid=backup_object.backup_id)
             # Don't remove backup until it's officially removed from Cinder
             # backup_object.delete_backup()
         except Exception as e:
-            if skip_inc_err and "Incremental backups exist for this backup" in str(e):
+            if (
+                skip_inc_err and (
+                    "Incremental backups exist for this backup" in str(e))
+            ):
                 LOG.debug(str(e))
             else:
                 LOG.info(
@@ -318,8 +328,8 @@
                 )
                 LOG.debug(f"deletion failed {str(e)}")
 
-                # Don't remove backup object, keep it and retry on next periodic task
-                # backup_object.delete_backup()
+                # Don't remove backup object, keep it and retry on next
+                # periodic task backup_object.delete_backup()
 
     def update_project_list(self):
         projects = self.openstacksdk.get_projects()
@@ -327,8 +337,7 @@
             self.project_list[project.id] = project
 
     def _is_backup_required(self, volume_id):
-        """
-        Decide if the backup required based on the backup history
+        """Decide if the backup required based on the backup history
 
         If there is any backup created during certain time,
         will not trigger new backup request.
@@ -346,11 +355,15 @@
                 # Ignore backup interval
                 return True
             interval = CONF.conductor.backup_min_interval
-            threshold_strtime = timeutils.utcnow() - timedelta(seconds=interval)
+            threshold_strtime = timeutils.utcnow() - timedelta(
+                seconds=interval
+            )
             backups = self.get_backups(
                 filters={
                     "volume_id__eq": volume_id,
-                    "created_at__gt": threshold_strtime.astimezone(timezone.utc),
+                    "created_at__gt": threshold_strtime.astimezone(
+                        timezone.utc
+                    ),
                 }
             )
             if backups:
@@ -365,8 +378,7 @@
         return True
 
     def _is_incremental(self, volume_id):
-        """
-        Decide the backup method based on the backup history
+        """Decide the backup method based on the backup history
 
         It queries to select the last N backups from backup table and
         decide backup type as full if there is no full backup.
@@ -395,16 +407,13 @@
                     return True
         except Exception as e:
             LOG.debug(
-                _(
-                    "Failed to get backup history to decide backup method. Reason: %s"
-                    % str(e)
-                )
+                "Failed to get backup history to decide backup "
+                f"method. Reason: {e}"
             )
         return False
 
     def check_instance_volumes(self):
-        """
-        Retrieves volume list to backup
+        """Retrieves volume list to backup
 
         Get the list of all the volumes from the project using openstacksdk.
         Function first list all the servers in the project and get the volumes
@@ -422,10 +431,8 @@
                 servers = self.openstacksdk.get_servers(project_id=project.id)
             except OpenstackHttpException as ex:
                 LOG.warn(
-                    _(
-                        "Failed to list servers in project %s. %s"
-                        % (project.id, str(ex))
-                    )
+                    f"Failed to list servers in project {project.id}. "
+                    f"{str(ex)} (status code: {ex.status_code})."
                 )
                 continue
             for server in servers:
@@ -490,8 +497,12 @@
 
         try:
             servers = self.openstacksdk.get_servers(all_projects=True)
-        except OpenstackHttpException:
-            LOG.warn(_("Failed to list servers for all projects."))
+        except OpenstackHttpException as ex:
+            servers = []
+            LOG.warn(
+                f"Failed to list servers for all projects. "
+                f"{str(ex)} (status code: {ex.status_code})."
+            )
 
         for server in servers:
             if CONF.conductor.retention_metadata_key in server.metadata:
@@ -500,21 +511,20 @@
                 ].lower()
                 if xtime.regex.fullmatch(server_retention_time):
                     LOG.debug(
-                        f"Found retention time ({server_retention_time}) defined for "
-                        f"server {server.id}, Adding it retention reference map."
-                    )
+                        f"Found retention time ({server_retention_time}) "
+                        f"defined for server {server.id}, "
+                        "Adding it retention reference map.")
                     retention_map[server.id] = server_retention_time
                 else:
                     LOG.info(
-                        f"Server retention time for instance {server.id} is incorrect. "
-                        "Please follow '<YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>"
-                        "h<MINUTES>min<SECONDS>s' format."
-                    )
+                        f"Server retention time for instance {server.id} is "
+                        "incorrect. Please follow "
+                        "'<YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>"
+                        "h<MINUTES>min<SECONDS>s' format.")
         return retention_map
 
     def _volume_queue(self, task):
-        """
-        Commits one backup task to queue table
+        """Commits one backup task to queue table
 
         :param task: One backup task
         :type: QueueMapping
@@ -528,7 +538,8 @@
         volume_queue.instance_name = task.instance_name
         volume_queue.volume_name = task.volume_name
         # NOTE(Oleks): Backup mode is inherited from backup service.
-        # Need to keep and navigate backup mode history, to decide a different mode per volume
+        # Need to keep and navigate backup mode history, to decide a different
+        # mode per volume
         volume_queue.incremental = task.incremental
 
         backup_method = "Incremental" if task.incremental else "Full"
@@ -542,6 +553,7 @@
 
     def create_volume_backup(self, task):
         """Initiate the backup of the volume
+
         :param task: Provide the map of the volume that needs
                   backup.
         This function will call the backupup api and change the
@@ -563,7 +575,10 @@
                 # NOTE(Alex): no need to wait because we have a cycle time out
                 if project_id not in self.project_list:
                     LOG.warn(
-                        _("Project ID %s is not existing in project list" % project_id)
+                        _(
+                            "Project ID %s is not existing in project list"
+                            % project_id
+                        )
                     )
                     self.process_non_existing_backup(task)
                     return
@@ -571,8 +586,16 @@
                 backup_method = "Incremental" if task.incremental else "Full"
                 LOG.info(
                     _(
-                        ("%s Backup (name: %s) for volume %s creating in project %s")
-                        % (backup_method, backup_name, task.volume_id, project_id)
+                        (
+                            "%s Backup (name: %s) for volume %s creating "
+                            "in project %s"
+                        )
+                        % (
+                            backup_method,
+                            backup_name,
+                            task.volume_id,
+                            project_id,
+                        )
                     )
                 )
                 volume_backup = self.openstacksdk.create_backup(
@@ -585,27 +608,31 @@
                 task.backup_status = constants.BACKUP_WIP
                 task.save()
             except OpenstackSDKException as error:
-                inc_err_msg = "No backups available to do an incremental backup"
+                inc_err_msg = (
+                    "No backups available to do an incremental backup"
+                )
                 if inc_err_msg in str(error):
                     LOG.info(
-                        "Retry to create full backup for volume %s instead of incremental."
-                        % task.volume_id
-                    )
+                        "Retry to create full backup for volume %s instead of "
+                        "incremental." %
+                        task.volume_id)
                     task.incremental = False
                     task.save()
                 else:
                     reason = _(
-                        "Backup (name: %s) creation for the volume %s failled. %s"
-                        % (backup_name, task.volume_id, str(error)[:64])
-                    )
+                        "Backup (name: %s) creation for the volume %s "
+                        "failled. %s" %
+                        (backup_name, task.volume_id, str(error)[
+                            :64]))
                     LOG.warn(
-                        "Backup (name: %s) creation for the volume %s failled. %s"
-                        % (backup_name, task.volume_id, str(error))
-                    )
+                        "Backup (name: %s) creation for the volume %s "
+                        "failled. %s" %
+                        (backup_name, task.volume_id, str(error)))
                     task.reason = reason
                     task.backup_status = constants.BACKUP_FAILED
                     task.save()
-            # Added extra exception as OpenstackSDKException does not handle the keystone unauthourized issue.
+            # Added extra exception as OpenstackSDKException does not handle
+            # the keystone unauthourized issue.
             except Exception as error:
                 reason = _(
                     "Backup (name: %s) creation for the volume %s failled. %s"
@@ -627,7 +654,8 @@
     def process_pre_failed_backup(self, task):
         # 1.notify via email
         reason = _(
-            "The backup creation for the volume %s was prefailed." % task.volume_id
+            "The backup creation for the volume %s was prefailed."
+            % task.volume_id
         )
         LOG.warn(reason)
         task.reason = reason
@@ -636,7 +664,9 @@
 
     def process_failed_backup(self, task):
         # 1. notify via email
-        reason = _("The status of backup for the volume %s is error." % task.volume_id)
+        reason = (
+            f"The status of backup for the volume {task.volume_id} is error."
+        )
         LOG.warn(reason)
         # 2. delete backup generator
         try:
@@ -679,6 +709,7 @@
 
     def check_volume_backup_status(self, queue):
         """Checks the backup status of the volume
+
         :params: queue: Provide the map of the volume that needs backup
                  status checked.
         Call the backups api to see if the backup is successful.
@@ -698,7 +729,10 @@
         if backup_gen is None:
             # TODO(Alex): need to check when it is none
             LOG.info(
-                _("[Beta] Backup status of %s is returning none." % (queue.backup_id))
+                _(
+                    "[Beta] Backup status of %s is returning none."
+                    % (queue.backup_id)
+                )
             )
             self.process_non_existing_backup(queue)
             return
@@ -707,13 +741,16 @@
         elif backup_gen.status == "available":
             self.process_available_backup(queue)
         elif backup_gen.status == "creating":
-            LOG.info("Waiting for backup of %s to be completed" % queue.volume_id)
+            LOG.info(
+                "Waiting for backup of %s to be completed" % queue.volume_id
+            )
         else:  # "deleting", "restoring", "error_restoring" status
             self.process_using_backup(queue)
 
     def _volume_backup(self, task):
         # matching_backups = [
-        #     g for g in self.available_backups if g.backup_id == task.backup_id
+        #     g for g in self.available_backups
+        #          if g.backup_id == task.backup_id
         # ]
         # if not matching_backups:
         volume_backup = objects.Volume(self.ctx)
diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index 380c2f7..1d96d52 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -1,17 +1,23 @@
+from __future__ import annotations

+

+from datetime import timedelta

+from datetime import timezone

 import threading

 import time

-from datetime import timedelta, timezone

 

 import cotyledon

-import staffeln.conf

 from futurist import periodics

 from oslo_log import log

 from oslo_utils import timeutils

-from staffeln import objects

-from staffeln.common import constants, context, lock

+

+from staffeln.common import constants

+from staffeln.common import context

+from staffeln.common import lock

 from staffeln.common import time as xtime

 from staffeln.conductor import backup as backup_controller

+import staffeln.conf

 from staffeln.i18n import _

+from staffeln import objects

 

 LOG = log.getLogger(__name__)

 CONF = staffeln.conf.CONF

@@ -58,7 +64,8 @@
                 LOG.info(_("cycle timein"))

                 for queue in queues_started:

                     LOG.debug(

-                        f"try to get lock and run task for volume: {queue.volume_id}."

+                        "try to get lock and run task for volume: "

+                        f"{queue.volume_id}."

                     )

                     with lock.Lock(

                         self.lock_mgt, queue.volume_id, remove_lock=True

@@ -82,7 +89,8 @@
             LOG.info(

                 _(

                     "Recycle timeout format is invalid. "

-                    "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."

+                    "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>"

+                    "min<SECONDS>s."

                 )

             )

             time_delta_dict = xtime.parse_timedelta_string(

@@ -117,7 +125,9 @@
                 ) as t_lock:

                     if t_lock.acquired:

                         # Re-pulling status and make it's up-to-date

-                        task = self.controller.get_queue_task_by_id(task_id=task.id)

+                        task = self.controller.get_queue_task_by_id(

+                            task_id=task.id

+                        )

                         if task.backup_status == constants.BACKUP_PLANNED:

                             task.backup_status = constants.BACKUP_INIT

                             task.save()

@@ -136,9 +146,13 @@
 

     def _report_backup_result(self):

         report_period = CONF.conductor.report_period

-        threshold_strtime = timeutils.utcnow() - timedelta(seconds=report_period)

+        threshold_strtime = timeutils.utcnow() - timedelta(

+            seconds=report_period

+        )

 

-        filters = {"created_at__gt": threshold_strtime.astimezone(timezone.utc)}

+        filters = {

+            "created_at__gt": threshold_strtime.astimezone(timezone.utc)

+        }

         report_tss = objects.ReportTimestamp.list(  # pylint: disable=E1120

             context=self.ctx, filters=filters

         )

@@ -152,9 +166,13 @@
             threshold_strtime = timeutils.utcnow() - timedelta(

                 seconds=report_period * 10

             )

-            filters = {"created_at__lt": threshold_strtime.astimezone(timezone.utc)}

-            old_report_tss = objects.ReportTimestamp.list(  # pylint: disable=E1120

-                context=self.ctx, filters=filters

+            filters = {

+                "created_at__lt": threshold_strtime.astimezone(timezone.utc)

+            }

+            old_report_tss = (

+                objects.ReportTimestamp.list(  # pylint: disable=E1120

+                    context=self.ctx, filters=filters

+                )

             )

             for report_ts in old_report_tss:

                 report_ts.delete()

@@ -163,7 +181,9 @@
         LOG.info("Backup manager started %s" % str(time.time()))

         LOG.info("%s periodics" % self.name)

 

-        @periodics.periodic(spacing=backup_service_period, run_immediately=True)

+        @periodics.periodic(

+            spacing=backup_service_period, run_immediately=True

+        )

         def backup_tasks():

             with self.lock_mgt:

                 with lock.Lock(self.lock_mgt, constants.PULLER) as puller:

@@ -230,14 +250,18 @@
             if backup_age > retention_time:

                 # Backup remain longer than retention, need to purge it.

                 LOG.debug(

-                    f"Found potential volume backup for retention: Backup ID: {backup.backup_id} "

-                    f"with backup age: {backup_age} (Target retention time: {retention_time})."

+                    "Found potential volume backup for retention: Backup "

+                    f"ID: {backup.backup_id} "

+                    f"with backup age: {backup_age} (Target retention "

+                    f"time: {retention_time})."

                 )

                 return True

         elif now - self.threshold_strtime < backup_age:

             LOG.debug(

-                f"Found potential volume backup for retention: Backup ID: {backup.backup_id} "

-                f"with backup age: {backup_age} (Default retention time: {self.threshold_strtime})."

+                "Found potential volume backup for retention: "

+                f"Backup ID: {backup.backup_id} "

+                f"with backup age: {backup_age} (Default retention "

+                f"time: {self.threshold_strtime})."

             )

             return True

         return False

@@ -245,10 +269,14 @@
     def rotation_engine(self, retention_service_period):

         LOG.info(f"{self.name} rotation_engine")

 

-        @periodics.periodic(spacing=retention_service_period, run_immediately=True)

+        @periodics.periodic(

+            spacing=retention_service_period, run_immediately=True

+        )

         def rotation_tasks():

             with self.lock_mgt:

-                with lock.Lock(self.lock_mgt, constants.RETENTION) as retention:

+                with lock.Lock(

+                    self.lock_mgt, constants.RETENTION

+                ) as retention:

                     if not retention.acquired:

                         return

 

@@ -264,8 +292,8 @@
 

                     # No way to judge retention

                     if (

-                        self.threshold_strtime is None

-                        and not self.instance_retention_map

+                        self.threshold_strtime is None and (

+                            not self.instance_retention_map)

                     ):

                         return

                     backup_instance_map = {}

@@ -274,17 +302,21 @@
                     self.controller.update_project_list()

 

                     for backup in self.get_backup_list():

-                        # Create backup instance map for later sorted by created_at.

-                        # This can be use as base of judgement on delete a backup.

-                        # The reason we need such list is because backup have

-                        # dependency with each other after we enable incremental backup.

+                        # Create backup instance map for later sorted by

+                        # created_at. This can be use as base of judgement

+                        # on delete a backup. The reason we need such list

+                        # is because backup have dependency with each other

+                        # after we enable incremental backup.

                         # So we need to have information to judge on.

                         if backup.instance_id in backup_instance_map:

-                            backup_instance_map[backup.instance_id].append(backup)

+                            backup_instance_map[backup.instance_id].append(

+                                backup

+                            )

                         else:

                             backup_instance_map[backup.instance_id] = [backup]

 

-                    # Sort backup instance map and use it to check backup create time and order.

+                    # Sort backup instance map and use it to check backup

+                    # create time and order.

                     for instance_id in backup_instance_map:

                         sorted_backup_list = sorted(

                             backup_instance_map[instance_id],

@@ -294,9 +326,11 @@
                         for backup in sorted_backup_list:

                             if self.is_retention(backup):

                                 LOG.debug(

-                                    f"Retention: Try to remove volume backup {backup.backup_id}"

+                                    "Retention: Try to remove volume backup "

+                                    f"{backup.backup_id}"

                                 )

-                                # Try to delete and skip any incremental exist error.

+                                # Try to delete and skip any incremental

+                                # exist error.

                                 self.controller.hard_remove_volume_backup(

                                     backup, skip_inc_err=True

                                 )

@@ -319,7 +353,8 @@
             LOG.info(

                 _(

                     "Retention time format is invalid. "

-                    "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."

+                    "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h"

+                    "<MINUTES>min<SECONDS>s."

                 )

             )

             return None

diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py
index be07aff..c602ae3 100644
--- a/staffeln/conductor/result.py
+++ b/staffeln/conductor/result.py
@@ -1,12 +1,15 @@
 # Email notification package

 # This should be upgraded by integrating with mail server to send batch

-import staffeln.conf

+from __future__ import annotations

+

 from oslo_log import log

 from oslo_utils import timeutils

-from staffeln import objects

-from staffeln.common import constants, email

+

+from staffeln.common import constants

+from staffeln.common import email

 from staffeln.common import time as xtime

-from staffeln.i18n import _

+import staffeln.conf

+from staffeln import objects

 

 CONF = staffeln.conf.CONF

 LOG = log.getLogger(__name__)

@@ -37,21 +40,23 @@
             receiver = CONF.notification.receiver

         elif not CONF.notification.project_receiver_domain:

             try:

-                receiver = self.backup_mgt.openstacksdk.get_project_member_emails(

-                    project_id

+                receiver = (

+                    self.backup_mgt.openstacksdk.get_project_member_emails(

+                        project_id

+                    )

                 )

                 if not receiver:

                     LOG.warn(

-                        f"No email can be found from members of project {project_id}. "

-                        "Skip report now and will try to report later."

-                    )

+                        "No email can be found from members of project "

+                        f"{project_id}. "

+                        "Skip report now and will try to report later.")

                     return False

             except Exception as ex:

                 LOG.warn(

-                    f"Failed to fetch emails from project members with exception: {str(ex)} "

+                    "Failed to fetch emails from project members with "

+                    f"exception: {str(ex)} "

                     "As also no receiver email or project receiver domain are "

-                    "configured. Will try to report later."

-                )

+                    "configured. Will try to report later.")

                 return False

         else:

             receiver_domain = CONF.notification.project_receiver_domain

@@ -119,41 +124,35 @@
         if success_tasks:

             success_volumes = "<br>".join(

                 [

-                    (

-                        f"Volume ID: {str(e.volume_id)}, Backup ID: {str(e.backup_id)}, "

-                        f"Backup mode: {'Incremental' if e.incremental else 'Full'}, "

-                        f"Created at: {str(e.created_at)}, Last updated at: "

-                        f"{str(e.updated_at)}"

-                    )

-                    for e in success_tasks

-                ]

-            )

+                    (f"Volume ID: {str(e.volume_id)}, "

+                     f"Backup ID: {str(e.backup_id)}, "

+                     "Backup mode: "

+                     f"{'Incremental' if e.incremental else 'Full'}, "

+                     f"Created at: {str(e.created_at)}, Last updated at: "

+                     f"{str(e.updated_at)}") for e in success_tasks])

         else:

             success_volumes = "<br>"

         if failed_tasks:

             failed_volumes = "<br>".join(

                 [

-                    (

-                        f"Volume ID: {str(e.volume_id)}, Reason: {str(e.reason)}, "

-                        f"Created at: {str(e.created_at)}, Last updated at: "

-                        f"{str(e.updated_at)}"

-                    )

-                    for e in failed_tasks

-                ]

-            )

+                    (f"Volume ID: {str(e.volume_id)}, "

+                     f"Reason: {str(e.reason)}, "

+                     f"Created at: {str(e.created_at)}, Last updated at: "

+                     f"{str(e.updated_at)}") for e in failed_tasks])

         else:

             failed_volumes = "<br>"

         html += (

             f"<h3>Project: {project_name} (ID: {project_id})</h3>"

             "<h3>Quota Usage (Backup Gigabytes)</h3>"

-            f"<FONT COLOR={quota_color}><h4>Limit: {str(quota['limit'])} GB, In Use: "

-            f"{str(quota['in_use'])} GB, Reserved: {str(quota['reserved'])} GB, Total "

+            f"<FONT COLOR={quota_color}><h4>Limit: {str(quota['limit'])} "

+            "GB, In Use: "

+            f"{str(quota['in_use'])} GB, Reserved: {str(quota['reserved'])} "

+            "GB, Total "

             f"rate: {str(quota_usage)}</h4></FONT>"

             "<h3>Success List</h3>"

             f"<FONT COLOR=GREEN><h4>{success_volumes}</h4></FONT><br>"

             "<h3>Failed List</h3>"

-            f"<FONT COLOR=RED><h4>{failed_volumes}</h4></FONT><br>"

-        )

+            f"<FONT COLOR=RED><h4>{failed_volumes}</h4></FONT><br>")

         self.content += html

         subject = f"Staffeln Backup result: {project_id}"

         reported = self.send_result_email(

@@ -163,5 +162,4 @@
             # Record success report

             self.create_report_record()

             return True

-        else:

-            return False

+        return False

diff --git a/staffeln/conf/__init__.py b/staffeln/conf/__init__.py
index 3289b63..4da72a5 100755
--- a/staffeln/conf/__init__.py
+++ b/staffeln/conf/__init__.py
@@ -1,5 +1,12 @@
+from __future__ import annotations

+

 from oslo_config import cfg

-from staffeln.conf import api, conductor, database, notify, paths

+

+from staffeln.conf import api

+from staffeln.conf import conductor

+from staffeln.conf import database

+from staffeln.conf import notify

+from staffeln.conf import paths

 

 CONF = cfg.CONF

 

diff --git a/staffeln/conf/api.py b/staffeln/conf/api.py
index e405d6a..16db057 100755
--- a/staffeln/conf/api.py
+++ b/staffeln/conf/api.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
 from oslo_config import cfg
+
 from staffeln.i18n import _
 
 api_group = cfg.OptGroup(
@@ -16,7 +19,9 @@
     cfg.PortOpt(
         "port",
         default=8808,
-        help=_("Staffeln API listens on this port number for incoming requests."),
+        help=_(
+            "Staffeln API listens on this port number for incoming requests."
+        ),
     ),
     cfg.BoolOpt("enabled_ssl", default=False, help=_("ssl enabled")),
     cfg.StrOpt("ssl_key_file", default=False, help=_("ssl key file path")),
diff --git a/staffeln/conf/conductor.py b/staffeln/conf/conductor.py
index ab8e258..db0f840 100755
--- a/staffeln/conf/conductor.py
+++ b/staffeln/conf/conductor.py
@@ -1,11 +1,17 @@
+from __future__ import annotations
+
 from oslo_config import cfg
+
 from staffeln.common import constants
 from staffeln.i18n import _
 
 conductor_group = cfg.OptGroup(
     "conductor",
     title="Conductor Options",
-    help=_("Options under this group are used " "to define Conductor's configuration."),
+    help=_(
+        "Options under this group are used "
+        "to define Conductor's configuration."
+    ),
 )
 
 backup_opts = [
@@ -43,7 +49,8 @@
         "backup_cycle_timout",
         regex=(
             r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?"
-            r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?"
+            r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?"
+            r"((?P<seconds>\d+?)s)?"
         ),
         default=constants.DEFAULT_BACKUP_CYCLE_TIMEOUT,
         help=_(
@@ -53,12 +60,15 @@
     ),
     cfg.StrOpt(
         "backup_metadata_key",
-        help=_("The key string of metadata the VM, which requres back up, has"),
+        help=_(
+            "The key string of metadata the VM, which requres back up, has"
+        ),
     ),
     cfg.StrOpt(
         "retention_metadata_key",
         help=_(
-            "The key string of metadata the VM, which use as backup retention period."
+            "The key string of metadata the VM, which use as backup retention "
+            "period."
         ),
     ),
     cfg.IntOpt(
@@ -96,7 +106,8 @@
         "retention_time",
         regex=(
             r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?"
-            r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?"
+            r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?"
+            r"((?P<seconds>\d+?)s)?"
         ),
         default="2w3d",
         help=_(
@@ -110,13 +121,18 @@
 coordination_group = cfg.OptGroup(
     "coordination",
     title="Coordination Options",
-    help=_("Options under this group are used to define Coordination's configuration."),
+    help=_(
+        "Options under this group are used to define Coordination's"
+        "configuration."
+    ),
 )
 
 
 coordination_opts = [
     cfg.StrOpt(
-        "backend_url", default="", help=_("lock coordination connection backend URL.")
+        "backend_url",
+        default="",
+        help=_("lock coordination connection backend URL."),
     ),
 ]
 
diff --git a/staffeln/conf/database.py b/staffeln/conf/database.py
index 761aa15..f4fe98e 100644
--- a/staffeln/conf/database.py
+++ b/staffeln/conf/database.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
 from oslo_config import cfg
 from oslo_db import options as oslo_db_options
+
 from staffeln.conf import paths
 from staffeln.i18n import _
 
@@ -14,7 +17,9 @@
 )
 
 SQL_OPTS = [
-    cfg.StrOpt("mysql_engine", default="InnoDB", help=_("MySQL engine to use.")),
+    cfg.StrOpt(
+        "mysql_engine", default="InnoDB", help=_("MySQL engine to use.")
+    ),
 ]
 
 
diff --git a/staffeln/conf/notify.py b/staffeln/conf/notify.py
index 21c67e8..bc3f4bf 100644
--- a/staffeln/conf/notify.py
+++ b/staffeln/conf/notify.py
@@ -1,10 +1,15 @@
+from __future__ import annotations
+
 from oslo_config import cfg
+
 from staffeln.i18n import _
 
 notify_group = cfg.OptGroup(
     "notification",
     title="Notification options",
-    help=_("Options under this group are used to define notification settings."),
+    help=_(
+        "Options under this group are used to define notification settings."
+    ),
 )
 
 email_opts = [
@@ -32,7 +37,8 @@
             "The user name to authenticate with."
         ),
     ),
-    # We can remove the sender password as we are using postfix to send mail and we won't be authenticating.
+    # We can remove the sender password as we are using postfix to send
+    # mail and we won't be authenticating.
     cfg.StrOpt(
         "sender_pwd",
         help=_(
diff --git a/staffeln/conf/paths.py b/staffeln/conf/paths.py
index 7dbd9a1..7341e48 100644
--- a/staffeln/conf/paths.py
+++ b/staffeln/conf/paths.py
@@ -1,12 +1,17 @@
+from __future__ import annotations
+
 import os
 
 from oslo_config import cfg
+
 from staffeln.i18n import _
 
 PATH_OPTS = [
     cfg.StrOpt(
         "pybasedir",
-        default=os.path.abspath(os.path.join(os.path.dirname(__file__), "../")),
+        default=os.path.abspath(
+            os.path.join(os.path.dirname(__file__), "../")
+        ),
         help=_("Directory where the staffeln python module is installed."),
     ),
     cfg.StrOpt(
diff --git a/staffeln/db/api.py b/staffeln/db/api.py
index 2d10a05..3e22bde 100644
--- a/staffeln/db/api.py
+++ b/staffeln/db/api.py
@@ -1,9 +1,14 @@
 """Base classes for storage engines"""
+
+from __future__ import annotations
+
 from oslo_config import cfg
 from oslo_db import api as db_api
 
 _BACKEND_MAPPING = {"sqlalchemy": "staffeln.db.sqlalchemy.api"}
-IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True)
+IMPL = db_api.DBAPI.from_config(
+    cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True
+)
 
 
 def get_instance():
diff --git a/staffeln/db/base.py b/staffeln/db/base.py
index de0d7c3..ad303d1 100755
--- a/staffeln/db/base.py
+++ b/staffeln/db/base.py
@@ -1,5 +1,7 @@
 """Database setup and migration commands."""
 
+from __future__ import annotations
+
 
 class base:
     def __init__(self):
diff --git a/staffeln/db/migration.py b/staffeln/db/migration.py
index 113116e..0d6eea9 100644
--- a/staffeln/db/migration.py
+++ b/staffeln/db/migration.py
@@ -1,7 +1,11 @@
 """Database setup command"""
-import staffeln.conf
+
+from __future__ import annotations
+
 from stevedore import driver
 
+import staffeln.conf
+
 CONF = staffeln.conf.CONF
 
 _IMPL = None
diff --git a/staffeln/db/sqlalchemy/alembic/env.py b/staffeln/db/sqlalchemy/alembic/env.py
index 71461fe..970dcc7 100644
--- a/staffeln/db/sqlalchemy/alembic/env.py
+++ b/staffeln/db/sqlalchemy/alembic/env.py
@@ -9,10 +9,12 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+from __future__ import annotations
 
 from logging import config as log_config
 
 from alembic import context
+
 from staffeln.db.sqlalchemy import api as sqla_api
 from staffeln.db.sqlalchemy import models
 
@@ -44,7 +46,9 @@
     """
     engine = sqla_api.get_engine()
     with engine.connect() as connection:
-        context.configure(connection=connection, target_metadata=target_metadata)
+        context.configure(
+            connection=connection, target_metadata=target_metadata
+        )
         with context.begin_transaction():
             context.run_migrations()
 
diff --git a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
index a16f27c..6d53f0e 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
@@ -7,17 +7,21 @@
 """
 
 # revision identifiers, used by Alembic.
+from __future__ import annotations
+
 revision = "041d9a0f1159"
 down_revision = ""
 
-import sqlalchemy as sa  # noqa: E402
 from alembic import op  # noqa: E402
+import sqlalchemy as sa  # noqa: E402
 
 
 def upgrade():
     op.add_column(
-        "queue_data", sa.Column("volume_name", sa.String(length=100), nullable=True)
+        "queue_data",
+        sa.Column("volume_name", sa.String(length=100), nullable=True),
     )
     op.add_column(
-        "queue_data", sa.Column("instance_name", sa.String(length=100), nullable=True)
+        "queue_data",
+        sa.Column("instance_name", sa.String(length=100), nullable=True),
     )
diff --git a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
index f78c91d..4ebaf9f 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
@@ -7,11 +7,13 @@
 """
 
 # revision identifiers, used by Alembic.
+from __future__ import annotations
+
 revision = "2b2b9df199bd"
 down_revision = "ebdbed01e9a7"
 
-import sqlalchemy as sa  # noqa: E402
 from alembic import op  # noqa: E402
+import sqlalchemy as sa  # noqa: E402
 
 
 def upgrade():
diff --git a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
index 1abed60..5635fd9 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
@@ -1,6 +1,8 @@
-import sqlalchemy as sa
+from __future__ import annotations
+
 from alembic import op
 from oslo_log import log
+import sqlalchemy as sa
 
 """add report timestamp
 
@@ -21,7 +23,11 @@
     op.create_table(
         "report_timestamp",
         sa.Column(
-            "id", sa.String(36), primary_key=True, nullable=False, autoincrement=True
+            "id",
+            sa.String(36),
+            primary_key=True,
+            nullable=False,
+            autoincrement=True,
         ),
         sa.Column("created_at", sa.DateTime),
         sa.Column("updated_at", sa.DateTime),
diff --git a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
index b2ed161..45cc8a8 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
@@ -7,15 +7,21 @@
 """
 
 # revision identifiers, used by Alembic.
+from __future__ import annotations
+
 revision = "ebdbed01e9a7"
 down_revision = "041d9a0f1159"
 
-import sqlalchemy as sa  # noqa: E402
 from alembic import op  # noqa: E402
+import sqlalchemy as sa  # noqa: E402
 
 
 def upgrade():
     # ### commands auto generated by Alembic - please adjust! ###
-    op.add_column("backup_data", sa.Column("incremental", sa.Boolean(), nullable=True))
-    op.add_column("queue_data", sa.Column("incremental", sa.Boolean(), nullable=True))
+    op.add_column(
+        "backup_data", sa.Column("incremental", sa.Boolean(), nullable=True)
+    )
+    op.add_column(
+        "queue_data", sa.Column("incremental", sa.Boolean(), nullable=True)
+    )
     # ### end Alembic commands ###
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 3cda5e7..335094e 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -1,5 +1,7 @@
 """SQLAlchemy storage backend."""
 
+from __future__ import annotations
+
 import datetime
 import operator
 
@@ -8,9 +10,12 @@
 from oslo_db.sqlalchemy import session as db_session
 from oslo_db.sqlalchemy import utils as db_utils
 from oslo_log import log
-from oslo_utils import strutils, timeutils, uuidutils
+from oslo_utils import strutils
+from oslo_utils import timeutils
+from oslo_utils import uuidutils
 from sqlalchemy.inspection import inspect
 from sqlalchemy.orm import exc
+
 from staffeln.common import short_id
 from staffeln.db.sqlalchemy import models
 
@@ -54,6 +59,7 @@
 
 def add_identity_filter(query, value):
     """Adds an identity filter to a query.
+
     Filters results by ID, if supplied value is a valid integer.
     Otherwise attempts to filter results by backup_id.
     :param query: Initial query to add filter to.
@@ -161,6 +167,7 @@
 
     def _add_filters(self, query, model, filters=None, plain_fields=None):
         """Add filters while listing the columns from database table"""
+
         # timestamp_mixin_fields = ["created_at", "updated_at"]
         filters = filters or {}
 
@@ -178,9 +185,8 @@
         field = getattr(model, fieldname)
 
         if (
-            fieldname != "deleted"
-            and value
-            and field.type.python_type is datetime.datetime
+            fieldname != "deleted" and value and (
+                field.type.python_type is datetime.datetime)
         ):
             if not isinstance(value, datetime.datetime):
                 value = timeutils.parse_isotime(value)
@@ -323,7 +329,10 @@
         try:
 
             return self._get(
-                context, model=models.Queue_data, fieldname=fieldname, value=value
+                context,
+                model=models.Queue_data,
+                fieldname=fieldname,
+                value=value,
             )
         except:  # noqa: E722
             LOG.error("Queue not found")
@@ -338,7 +347,9 @@
         """Get the column from the backup_data with matching backup_id"""
 
         try:
-            return self._get_backup(context, fieldname="backup_id", value=backup_id)
+            return self._get_backup(
+                context, fieldname="backup_id", value=backup_id
+            )
         except:  # noqa: E722
             LOG.error("Backup not found with backup_id %s." % backup_id)
 
@@ -347,7 +358,10 @@
 
         try:
             return self._get(
-                context, model=models.Backup_data, fieldname=fieldname, value=value
+                context,
+                model=models.Backup_data,
+                fieldname=fieldname,
+                value=value,
             )
         except:  # noqa: E722
             LOG.error("Backup resource not found.")
@@ -365,7 +379,9 @@
 
     def create_report_timestamp(self, values):
         try:
-            report_timestamp_data = self._create(models.Report_timestamp, values)
+            report_timestamp_data = self._create(
+                models.Report_timestamp, values
+            )
         except db_exc.DBDuplicateEntry:
             LOG.error("Report Timestamp ID already exists.")
         return report_timestamp_data
diff --git a/staffeln/db/sqlalchemy/migration.py b/staffeln/db/sqlalchemy/migration.py
index 3a34c2b..e50d757 100644
--- a/staffeln/db/sqlalchemy/migration.py
+++ b/staffeln/db/sqlalchemy/migration.py
@@ -1,7 +1,10 @@
+from __future__ import annotations
+
 import os
 
-import staffeln.conf
 from oslo_db.sqlalchemy.migration_cli import manager
+
+import staffeln.conf
 from staffeln.db.sqlalchemy import api as sqla_api
 from staffeln.db.sqlalchemy import models
 
diff --git a/staffeln/db/sqlalchemy/models.py b/staffeln/db/sqlalchemy/models.py
index c186ddc..a0e3815 100644
--- a/staffeln/db/sqlalchemy/models.py
+++ b/staffeln/db/sqlalchemy/models.py
@@ -1,11 +1,15 @@
 """
 SQLAlchemy models for staffeln service
 """
+
+from __future__ import annotations
+
 import urllib.parse as urlparse
 
 from oslo_db.sqlalchemy import models
 from sqlalchemy import Boolean, Column, Integer, String, UniqueConstraint
 from sqlalchemy.ext.declarative import declarative_base
+
 from staffeln import conf
 
 CONF = conf.CONF
@@ -14,7 +18,10 @@
 def table_args():
     engine_name = urlparse.urlparse(CONF.database.connection).scheme
     if engine_name == "mysql":
-        return {"mysql_engine": CONF.database.mysql_engine, "mysql_charset": "utf8"}
+        return {
+            "mysql_engine": CONF.database.mysql_engine,
+            "mysql_charset": "utf8",
+        }
     return None
 
 
diff --git a/staffeln/exception.py b/staffeln/exception.py
index e561506..3f8a34e 100644
--- a/staffeln/exception.py
+++ b/staffeln/exception.py
@@ -13,10 +13,11 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-
 """Staffeln base exception handling."""
+from __future__ import annotations
 
-from typing import Optional, Union  # noqa: H301
+from typing import Optional
+from typing import Union
 
 from oslo_log import log as logging
 
diff --git a/staffeln/i18n.py b/staffeln/i18n.py
index 09fe8aa..cbef6e5 100755
--- a/staffeln/i18n.py
+++ b/staffeln/i18n.py
@@ -2,6 +2,8 @@
 See http://docs.openstack.org/developer/oslo.i18n/usage.html .

 """

 

+from __future__ import annotations

+

 import oslo_i18n

 

 DOMAIN = "staffeln"

diff --git a/staffeln/objects/__init__.py b/staffeln/objects/__init__.py
index 2af8df0..defa200 100755
--- a/staffeln/objects/__init__.py
+++ b/staffeln/objects/__init__.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 from .queue import Queue  # noqa: F401
 from .report import ReportTimestamp  # noqa: F401
 from .volume import Volume  # noqa: F401
diff --git a/staffeln/objects/base.py b/staffeln/objects/base.py
index 8dd6f94..60a6fe2 100755
--- a/staffeln/objects/base.py
+++ b/staffeln/objects/base.py
@@ -1,8 +1,11 @@
 """Staffeln common internal object model"""

 

+from __future__ import annotations

+

 from oslo_utils import versionutils

 from oslo_versionedobjects import base as ovoo_base

 from oslo_versionedobjects import fields as ovoo_fields

+

 from staffeln import objects

 

 remotable_classmethod = ovoo_base.remotable_classmethod

@@ -30,7 +33,9 @@
     OBJ_PROJECT_NAMESPACE = "staffeln"

 

     def as_dict(self):

-        return {k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)}

+        return {

+            k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)

+        }

 

 

 class StaffelnObjectSerializer(ovoo_base.VersionedObjectSerializer):

@@ -48,9 +53,14 @@
     object_fields = {}

 

     def obj_refresh(self, loaded_object):

-        fields = (field for field in self.fields if field not in self.object_fields)

+        fields = (

+            field for field in self.fields if field not in self.object_fields

+        )

         for field in fields:

-            if self.obj_attr_is_set(field) and self[field] != loaded_object[field]:

+            if (

+                self.obj_attr_is_set(field) and (

+                    self[field] != loaded_object[field])

+            ):

                 self[field] = loaded_object[field]

 

     @staticmethod

diff --git a/staffeln/objects/fields.py b/staffeln/objects/fields.py
index 3f6c2a7..95ed59c 100644
--- a/staffeln/objects/fields.py
+++ b/staffeln/objects/fields.py
@@ -1,4 +1,7 @@
 """Utility method for objects"""
+
+from __future__ import annotations
+
 from oslo_serialization import jsonutils
 from oslo_versionedobjects import fields
 
diff --git a/staffeln/objects/queue.py b/staffeln/objects/queue.py
index db49c21..b80c2d8 100644
--- a/staffeln/objects/queue.py
+++ b/staffeln/objects/queue.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
 from oslo_versionedobjects import fields as ovoo_fields
+
 from staffeln.db import api as db_api
 from staffeln.objects import base
 from staffeln.objects import fields as sfeild
@@ -6,7 +9,9 @@
 
 @base.StaffelnObjectRegistry.register
 class Queue(
-    base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+    base.StaffelnPersistentObject,
+    base.StaffelnObject,
+    base.StaffelnObjectDictCompat,
 ):
     VERSION = "1.2"
     # Version 1.0: Initial version
@@ -37,6 +42,7 @@
     @base.remotable_classmethod
     def get_by_id(cls, context, id):  # pylint: disable=E0213
         """Find a queue task based on id
+
         :param context: Security context. NOTE: This should only
                         be used internally by the indirection_api.
                         Unfortunately, RPC requires context as the first
@@ -46,6 +52,7 @@
         :param backup_id: the backup id of volume in queue.
         :returns: a :class:`Queue` object.
         """
+
         db_queue = cls.dbapi.get_queue_by_id(context, id)
         queue = cls._from_db_object(cls(context), db_queue)
         return queue
@@ -53,6 +60,7 @@
     @base.remotable
     def create(self):
         """Create a :class:`Backup_data` record in the DB"""
+
         values = self.obj_get_changes()
         db_queue = self.dbapi.create_queue(values)
         return self._from_db_object(self, db_queue)
@@ -73,4 +81,5 @@
     @base.remotable
     def delete_queue(self):
         """Soft Delete the :class:`Queue_data` from the DB"""
+
         self.dbapi.soft_delete_queue(self.id)
diff --git a/staffeln/objects/report.py b/staffeln/objects/report.py
index e851a93..7bc1885 100644
--- a/staffeln/objects/report.py
+++ b/staffeln/objects/report.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
 from oslo_versionedobjects import fields as ovoo_fields
+
 from staffeln.db import api as db_api
 from staffeln.objects import base
 from staffeln.objects import fields as sfeild
@@ -6,7 +9,9 @@
 
 @base.StaffelnObjectRegistry.register
 class ReportTimestamp(
-    base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+    base.StaffelnPersistentObject,
+    base.StaffelnObject,
+    base.StaffelnObjectDictCompat,
 ):
     VERSION = "1.0"
     # Version 1.0: Initial version
@@ -21,7 +26,9 @@
 
     @base.remotable_classmethod
     def list(cls, context, filters=None):  # pylint: disable=E0213
-        db_report = cls.dbapi.get_report_timestamp_list(context, filters=filters)
+        db_report = cls.dbapi.get_report_timestamp_list(
+            context, filters=filters
+        )
         return [cls._from_db_object(cls(context), obj) for obj in db_report]
 
     @base.remotable
diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py
index a4f24f6..88f952a 100644
--- a/staffeln/objects/volume.py
+++ b/staffeln/objects/volume.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
 from oslo_versionedobjects import fields as ovoo_fields
+
 from staffeln.db import api as db_api
 from staffeln.objects import base
 from staffeln.objects import fields as sfeild
@@ -6,7 +9,9 @@
 
 @base.StaffelnObjectRegistry.register
 class Volume(
-    base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+    base.StaffelnPersistentObject,
+    base.StaffelnObject,
+    base.StaffelnObjectDictCompat,
 ):
     VERSION = "1.1"
     # Version 1.0: Initial version
@@ -31,7 +36,9 @@
 
         :param filters: dict mapping the filter to a value.
         """
-        db_backups = cls.dbapi.get_backup_list(context, filters=filters, **kwargs)
+        db_backups = cls.dbapi.get_backup_list(
+            context, filters=filters, **kwargs
+        )
 
         return [cls._from_db_object(cls(context), obj) for obj in db_backups]
 
@@ -58,6 +65,7 @@
     @base.remotable
     def refresh(self):
         """Loads updates for this :class:`Backup_data`.
+
         Loads a backup with the same backup_id from the database and
         checks for updated attributes. Updates are applied from
         the loaded backup column by column, if there are any updates.
@@ -71,8 +79,11 @@
         self.dbapi.soft_delete_backup(self.id)
 
     @base.remotable_classmethod
-    def get_backup_by_backup_id(cls, context, backup_id):  # pylint: disable=E0213
+    def get_backup_by_backup_id(
+        cls, context, backup_id
+    ):  # pylint: disable=E0213
         """Find a backup based on backup_id
+
         :param context: Security context. NOTE: This should only
                         be used internally by the indirection_api.
                         Unfortunately, RPC requires context as the first
diff --git a/staffeln/tests/base.py b/staffeln/tests/base.py
index 1c30cdb..00059c0 100755
--- a/staffeln/tests/base.py
+++ b/staffeln/tests/base.py
@@ -1,5 +1,4 @@
 # -*- coding: utf-8 -*-
-
 # Copyright 2010-2011 OpenStack Foundation
 # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
 #
@@ -14,10 +13,10 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations
 # under the License.
+from __future__ import annotations
 
 from oslotest import base
 
 
 class TestCase(base.BaseTestCase):
-
     """Test case base class for all unit tests."""
diff --git a/staffeln/tests/common/test_openstacksdk.py b/staffeln/tests/common/test_openstacksdk.py
new file mode 100644
index 0000000..e516a30
--- /dev/null
+++ b/staffeln/tests/common/test_openstacksdk.py
@@ -0,0 +1,503 @@
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from unittest import mock
+
+from openstack import exceptions as openstack_exc
+import tenacity
+
+from staffeln.common import openstack as s_openstack
+from staffeln import conf
+from staffeln.tests import base
+
+
+class OpenstackSDKTest(base.TestCase):
+
+    def setUp(self):
+        super(OpenstackSDKTest, self).setUp()
+        self.m_c = mock.MagicMock()
+        with mock.patch("openstack.connect", return_value=self.m_c):
+            self.openstack = s_openstack.OpenstackSDK()
+        self.m_sleep = mock.Mock()
+        func_list = [
+            "get_user_id",
+            "get_projects",
+            "get_servers",
+            "get_role_assignments",
+            "get_user",
+            "get_project_member_emails",
+            "get_volume",
+            "get_backup",
+            "delete_backup",
+            "get_backup_quota",
+            "get_backup_gigabytes_quota",
+        ]
+        for i in func_list:
+            getattr(self.openstack, i).retry.sleep = (  # pylint: disable=E1101
+                self.m_sleep
+            )
+            getattr(self.openstack, i).retry.stop = (  # pylint: disable=E1101
+                tenacity.stop_after_attempt(2)
+            )
+
+        self.fake_user = mock.MagicMock(id="foo", email="foo@foo.com")
+        self.fake_volume = mock.MagicMock(id="fake_volume")
+        self.fake_backup = mock.MagicMock(id="fake_backup")
+        self.fake_role_assignment = mock.MagicMock(user="foo")
+        self.fake_role_assignment2 = mock.MagicMock(user={"id": "bar"})
+
+    def _test_http_error(
+        self, m_func, retry_func, status_code, call_count=1, **kwargs
+    ):
+        m_func.side_effect = openstack_exc.HttpException(
+            http_status=status_code
+        )
+        exc = self.assertRaises(
+            openstack_exc.HttpException,
+            getattr(self.openstack, retry_func),
+            **kwargs,
+        )
+        self.assertEqual(status_code, exc.status_code)
+        skip_retry_codes = conf.CONF.openstack.skip_retry_codes.replace(
+            ' ', '').split(',')
+        if str(status_code) not in skip_retry_codes:
+            if call_count == 1:
+                self.m_sleep.assert_called_once_with(1.0)
+            else:
+                self.m_sleep.assert_has_calls(
+                    [mock.call(1.0) for c in range(call_count)]
+                )
+        else:
+            self.m_sleep.assert_not_called()
+
+    def _test_non_http_error(self, m_func, retry_func, **kwargs):
+        m_func.side_effect = KeyError
+        self.assertRaises(
+            KeyError, getattr(self.openstack, retry_func), **kwargs
+        )
+        self.m_sleep.assert_not_called()
+
+    def test_get_servers(self):
+        self.m_c.compute.servers = mock.MagicMock(return_value=[])
+        self.assertEqual(self.openstack.get_servers(), [])
+        self.m_c.compute.servers.assert_called_once_with(
+            details=True, all_projects=True
+        )
+
+    def test_get_servers_non_http_error(self):
+        self._test_non_http_error(self.m_c.compute.servers, "get_servers")
+
+    def test_get_servers_conf_skip_http_error(self):
+        conf.CONF.set_override('skip_retry_codes', '403,', 'openstack')
+        self._test_http_error(
+            self.m_c.compute.servers, "get_servers", status_code=403
+        )
+        self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes)
+
+    def test_get_servers_conf_skip_http_error_not_hit(self):
+        conf.CONF.set_override('skip_retry_codes', '403,', 'openstack')
+        self._test_http_error(
+            self.m_c.compute.servers, "get_servers", status_code=404
+        )
+        self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes)
+
+    def test_get_servers_404_http_error(self):
+        self._test_http_error(
+            self.m_c.compute.servers, "get_servers", status_code=404
+        )
+
+    def test_get_servers_500_http_error(self):
+        self._test_http_error(
+            self.m_c.compute.servers, "get_servers", status_code=500
+        )
+
+    def test_get_projects(self):
+        self.m_c.list_projects = mock.MagicMock(return_value=[])
+        self.assertEqual(self.openstack.get_projects(), [])
+        self.m_c.list_projects.assert_called_once_with()
+
+    def test_get_projects_non_http_error(self):
+        self._test_non_http_error(self.m_c.list_projects, "get_projects")
+
+    def test_get_projects_404_http_error(self):
+        self._test_http_error(
+            self.m_c.list_projects, "get_projects", status_code=404
+        )
+
+    def test_get_projects_500_http_error(self):
+        self._test_http_error(
+            self.m_c.list_projects, "get_projects", status_code=500
+        )
+
+    def test_get_user_id(self):
+        self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
+        self.assertEqual(self.openstack.get_user_id(), "foo")
+        self.m_c.get_user.assert_called_once_with(name_or_id=mock.ANY)
+
+    def test_get_user_id_non_http_error(self):
+        self._test_non_http_error(self.m_c.get_user, "get_user_id")
+
+    def test_get_user_id_404_http_error(self):
+        self._test_http_error(
+            self.m_c.get_user, "get_user_id", status_code=404
+        )
+
+    def test_get_user_id_500_http_error(self):
+        self._test_http_error(
+            self.m_c.get_user, "get_user_id", status_code=500
+        )
+
+    def test_get_user(self):
+        self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
+        self.assertEqual(
+            self.openstack.get_user(user_id=self.fake_user.id), self.fake_user
+        )
+        self.m_c.get_user.assert_called_once_with(name_or_id=self.fake_user.id)
+
+    def test_get_user_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.get_user, "get_user", user_id=self.fake_user.id
+        )
+
+    def test_get_user_404_http_error(self):
+        self._test_http_error(
+            self.m_c.get_user,
+            "get_user",
+            status_code=404,
+            user_id=self.fake_user.id,
+        )
+
+    def test_get_user_500_http_error(self):
+        self._test_http_error(
+            self.m_c.get_user,
+            "get_user",
+            status_code=500,
+            user_id=self.fake_user.id,
+        )
+
+    def test_get_role_assignments(self):
+        self.m_c.list_role_assignments = mock.MagicMock(return_value=[])
+        self.assertEqual(
+            self.openstack.get_role_assignments(project_id="foo"), []
+        )
+        self.m_c.list_role_assignments.assert_called_once_with(
+            filters={"project": "foo"}
+        )
+
+    def test_get_role_assignments_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.list_role_assignments,
+            "get_role_assignments",
+            project_id="foo",
+        )
+
+    def test_get_role_assignments_404_http_error(self):
+        self._test_http_error(
+            self.m_c.list_role_assignments,
+            "get_role_assignments",
+            status_code=404,
+            project_id="foo",
+        )
+
+    def test_get_role_assignments_500_http_error(self):
+        self._test_http_error(
+            self.m_c.list_role_assignments,
+            "get_role_assignments",
+            status_code=500,
+            project_id="foo",
+        )
+
+    def test_get_project_member_emails(self):
+        # Make sure we cover both get_user pattern
+        self.m_c.list_role_assignments = mock.MagicMock(
+            return_value=[
+                self.fake_role_assignment,
+                self.fake_role_assignment2,
+            ]
+        )
+        self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
+        self.assertEqual(
+            self.openstack.get_project_member_emails(project_id="foo"),
+            [self.fake_user.email, self.fake_user.email],
+        )
+        self.m_c.list_role_assignments.assert_called_once_with(
+            filters={"project": "foo"}
+        )
+        self.m_c.get_user.assert_has_calls(
+            [
+                mock.call(name_or_id=self.fake_role_assignment.user),
+                mock.call(
+                    name_or_id=self.fake_role_assignment2.user.get("id")
+                ),
+            ]
+        )
+
+    def test_get_project_member_emails_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.list_role_assignments,
+            "get_project_member_emails",
+            project_id="foo",
+        )
+
+    def test_get_project_member_emails_404_http_error(self):
+        self._test_http_error(
+            self.m_c.list_role_assignments,
+            "get_project_member_emails",
+            status_code=404,
+            project_id="foo",
+        )
+
+    def test_get_project_member_emails_500_http_error(self):
+        self._test_http_error(
+            self.m_c.list_role_assignments,
+            "get_project_member_emails",
+            status_code=500,
+            call_count=3,
+            project_id="foo",
+        )
+
+    def test_get_volume(self):
+        self.m_c.get_volume_by_id = mock.MagicMock(
+            return_value=self.fake_volume
+        )
+        self.assertEqual(
+            self.openstack.get_volume(
+                uuid=self.fake_volume.id, project_id="bar"
+            ),
+            self.fake_volume,
+        )
+        self.m_c.get_volume_by_id.assert_called_once_with(self.fake_volume.id)
+
+    def test_get_volume_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.get_volume_by_id,
+            "get_volume",
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_get_volume_404_http_error(self):
+        self._test_http_error(
+            self.m_c.get_volume_by_id,
+            "get_volume",
+            status_code=404,
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_get_volume_500_http_error(self):
+        self._test_http_error(
+            self.m_c.get_volume_by_id,
+            "get_volume",
+            status_code=500,
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_get_backup(self):
+        self.m_c.get_volume_backup = mock.MagicMock(
+            return_value=self.fake_backup
+        )
+        self.assertEqual(
+            self.openstack.get_backup(
+                uuid=self.fake_backup.id, project_id="bar"
+            ),
+            self.fake_backup,
+        )
+        self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id)
+
+    def test_get_backup_not_found(self):
+        self.m_c.get_volume_backup = mock.MagicMock(
+            side_effect=openstack_exc.ResourceNotFound
+        )
+        self.assertEqual(
+            self.openstack.get_backup(
+                uuid=self.fake_backup.id, project_id="bar"
+            ),
+            None,
+        )
+        self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id)
+
+    def test_get_backup_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.get_volume_backup,
+            "get_backup",
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_get_backup_404_http_error(self):
+        self._test_http_error(
+            self.m_c.get_volume_backup,
+            "get_backup",
+            status_code=404,
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_get_backup_500_http_error(self):
+        self._test_http_error(
+            self.m_c.get_volume_backup,
+            "get_backup",
+            status_code=500,
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_delete_backup(self):
+        self.m_c.delete_volume_backup = mock.MagicMock(
+            return_value=self.fake_backup
+        )
+        self.assertEqual(
+            self.openstack.delete_backup(
+                uuid=self.fake_backup.id, project_id="bar"
+            ),
+            None,
+        )
+        self.m_c.delete_volume_backup.assert_called_once_with(
+            self.fake_backup.id, force=False
+        )
+
+    def test_delete_backup_not_found(self):
+        self.m_c.delete_volume_backup = mock.MagicMock(
+            side_effect=openstack_exc.ResourceNotFound
+        )
+        self.assertEqual(
+            self.openstack.delete_backup(
+                uuid=self.fake_backup.id, project_id="bar"
+            ),
+            None,
+        )
+        self.m_c.delete_volume_backup.assert_called_once_with(
+            self.fake_backup.id, force=False
+        )
+
+    def test_delete_backup_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.delete_volume_backup,
+            "delete_backup",
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_delete_backup_404_http_error(self):
+        self._test_http_error(
+            self.m_c.delete_volume_backup,
+            "delete_backup",
+            status_code=404,
+            uuid="foo",
+            project_id="bar",
+        )
+
+    def test_delete_backup_500_http_error(self):
+        self._test_http_error(
+            self.m_c.delete_volume_backup,
+            "delete_backup",
+            status_code=500,
+            uuid="foo",
+            project_id="bar",
+        )
+
+    @mock.patch("openstack.proxy._json_response")
+    def test_get_backup_quota(self, m_j_r):
+        self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+        self.m_gam = mock.MagicMock()
+        self.m_c._get_and_munchify = self.m_gam
+        self.m_gam.return_value = mock.MagicMock(backups=[self.fake_backup.id])
+        self.assertEqual(
+            [self.fake_backup.id],
+            self.openstack.get_backup_quota(project_id="bar"),
+        )
+        self.m_c.block_storage.get.assert_called_once_with(
+            "/os-quota-sets/bar?usage=True"
+        )
+
+    def test_get_backup_quota_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.block_storage.get, "get_backup_quota", project_id="bar"
+        )
+
+    def test_get_backup_quota_404_http_error(self):
+        self._test_http_error(
+            self.m_c.block_storage.get,
+            "get_backup_quota",
+            status_code=404,
+            project_id="bar",
+        )
+
+    def test_get_backup_quota_500_http_error(self):
+        self._test_http_error(
+            self.m_c.block_storage.get,
+            "get_backup_quota",
+            status_code=500,
+            project_id="bar",
+        )
+
+    @mock.patch("openstack.proxy._json_response")
+    def test_get_backup_gigabytes_quota(self, m_j_r):
+        self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+        self.m_gam = mock.MagicMock()
+        self.m_c._get_and_munchify = self.m_gam
+        self.m_gam.return_value = mock.MagicMock(
+            backup_gigabytes=[self.fake_backup.id]
+        )
+        self.assertEqual(
+            [self.fake_backup.id],
+            self.openstack.get_backup_gigabytes_quota(project_id="bar"),
+        )
+        self.m_c.block_storage.get.assert_called_once_with(
+            "/os-quota-sets/bar?usage=True"
+        )
+
+    def test_get_backup_gigabytes_quota_non_http_error(self):
+        self._test_non_http_error(
+            self.m_c.block_storage.get,
+            "get_backup_gigabytes_quota",
+            project_id="bar",
+        )
+
+    def test_get_backup_gigabytes_quota_404_http_error(self):
+        self._test_http_error(
+            self.m_c.block_storage.get,
+            "get_backup_gigabytes_quota",
+            status_code=404,
+            project_id="bar",
+        )
+
+    def test_get_backup_gigabytes_quota_500_http_error(self):
+        self._test_http_error(
+            self.m_c.block_storage.get,
+            "get_backup_gigabytes_quota",
+            status_code=500,
+            project_id="bar",
+        )
+
+    @mock.patch("openstack.proxy._json_response")
+    def test_get_volume_quotas(self, m_j_r):
+        self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+        self.m_gam_return = mock.MagicMock()
+        self.m_gam = mock.MagicMock(return_value=self.m_gam_return)
+        self.m_c._get_and_munchify = self.m_gam
+        self.assertEqual(
+            self.m_gam_return,
+            self.openstack._get_volume_quotas(project_id="bar"),
+        )
+        self.m_c.block_storage.get.assert_called_once_with(
+            "/os-quota-sets/bar?usage=True"
+        )
+        self.m_gam.assert_called_once_with("quota_set", m_j_r())
+
+    @mock.patch("openstack.proxy._json_response")
+    def test_get_volume_quotas_no_usage(self, m_j_r):
+        self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+        self.m_gam_return = mock.MagicMock()
+        self.m_gam = mock.MagicMock(return_value=self.m_gam_return)
+        self.m_c._get_and_munchify = self.m_gam
+        self.assertEqual(
+            self.m_gam_return,
+            self.openstack._get_volume_quotas(project_id="bar", usage=False),
+        )
+        self.m_c.block_storage.get.assert_called_once_with(
+            "/os-quota-sets/bar"
+        )
+        self.m_gam.assert_called_once_with("quota_set", m_j_r())
diff --git a/staffeln/tests/test_staffeln.py b/staffeln/tests/test_staffeln.py
index 6c7c5f3..5ef2a92 100755
--- a/staffeln/tests/test_staffeln.py
+++ b/staffeln/tests/test_staffeln.py
@@ -1,5 +1,4 @@
 # -*- coding: utf-8 -*-
-
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # a copy of the License at
@@ -11,13 +10,13 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations
 # under the License.
-
 """
 test_staffeln
 ----------------------------------
 
 Tests for `staffeln` module.
 """
+from __future__ import annotations
 
 from staffeln.tests import base
 
diff --git a/staffeln/version.py b/staffeln/version.py
index efe79df..b943573 100755
--- a/staffeln/version.py
+++ b/staffeln/version.py
@@ -1,3 +1,5 @@
+from __future__ import annotations

+

 import pbr.version

 

 version_info = pbr.version.VersionInfo("staffeln")

diff --git a/tox.ini b/tox.ini
index 4812539..bdbf5da 100755
--- a/tox.ini
+++ b/tox.ini
@@ -27,7 +27,7 @@
 commands = stestr run --slowest {posargs}
 
 [testenv:pep8]
-commands =   
+commands =
     flake8
 
 [testenv:cover]