Merge pull request #129 from ricolin/add-staffeln-UT-CI
Add staffeln ut ci
relate to #110
Reviewed-by: Mohammed Naser <mnaser@vexxhost.com>
Reviewed-by: Rico Lin <ricolin@ricolky.com>
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000..6deafc2
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,2 @@
+[flake8]
+max-line-length = 120
diff --git a/.github/workflows/linters.yaml b/.github/workflows/linters.yaml
deleted file mode 100644
index 6745ffe..0000000
--- a/.github/workflows/linters.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-name: linters
-on: push
-
-jobs:
- super-lint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - uses: github/super-linter@v4
- env:
- DEFAULT_BRANCH: main
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- VALIDATE_ALL_CODEBASE: true
- VALIDATE_DOCKERFILE_HADOLINT: false
- VALIDATE_PYTHON_MYPY: false
- VALIDATE_JSCPD: false
diff --git a/.gitignore b/.gitignore
index 32e35b0..1ae05e4 100755
--- a/.gitignore
+++ b/.gitignore
@@ -68,4 +68,4 @@
*.log
# envvar openrc file
-*openrc.sh
\ No newline at end of file
+*openrc.sh
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..4975271
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,30 @@
+repos:
+ - repo: https://github.com/compilerla/conventional-pre-commit
+ rev: v2.0.0
+ hooks:
+ - id: conventional-pre-commit
+ stages:
+ - commit-msg
+
+ - repo: https://github.com/psf/black
+ rev: 24.8.0
+ hooks:
+ - id: black
+
+ - repo: https://github.com/pycqa/flake8
+ rev: 5.0.4
+ hooks:
+ - id: flake8
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.13.2
+ hooks:
+ - id: isort
+
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.3.0
+ hooks:
+ - id: check-yaml
+ args: [--allow-multiple-documents]
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 1c590d6..9055a03 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,6 +11,7 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import annotations
import os
import sys
@@ -39,7 +40,7 @@
# openstackdocstheme options
openstackdocs_repo_name = "openstack/staffeln"
openstackdocs_bug_project = (
- "replace with the name of the project on Launchpad or the ID from Storyboard"
+ "replace with the name of the project on " "Launchpad or the ID from Storyboard"
)
openstackdocs_bug_tag = ""
diff --git a/hack/stack.sh b/hack/stack.sh
index 497b29a..4876c88 100755
--- a/hack/stack.sh
+++ b/hack/stack.sh
@@ -36,7 +36,7 @@
SWIFT_REPLICAS=1
enable_plugin neutron https://opendev.org/openstack/neutron
#swift
-enable_service s-proxy s-object s-container s-account
+enable_service s-proxy s-object s-container s-account
# Cinder
enable_service c-bak
[[post-config|/etc/neutron/neutron.conf]]
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 65d2460..d4b373e 100755
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -11,7 +11,6 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
# This file is execfile()d with the current directory set to its
# containing dir.
#
@@ -20,20 +19,18 @@
#
# All configuration values have a default; values that are commented out
# serve to show the default.
-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
-
# -- General configuration ------------------------------------------------
-
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
-
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
+from __future__ import annotations
+
extensions = [
"openstackdocstheme",
"reno.sphinxext",
diff --git a/requirements.txt b/requirements.txt
index 3789929..140798f 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,5 +20,3 @@
tooz # Apache-2.0
sherlock>=0.4.1 # MIT
kubernetes # Apache-2.0
-# email
-# smtplib
diff --git a/setup.cfg b/setup.cfg
index 5466daa..53df199 100755
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,6 +17,8 @@
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
@@ -35,4 +37,4 @@
wsgi_scripts =
staffeln-api-wsgi = staffeln.api:app
staffeln.database.migration_backend =
- sqlalchemy = staffeln.db.sqlalchemy.migration
\ No newline at end of file
+ sqlalchemy = staffeln.db.sqlalchemy.migration
diff --git a/setup.py b/setup.py
index 0346ed3..673123b 100755
--- a/setup.py
+++ b/setup.py
@@ -12,8 +12,9 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+from __future__ import annotations
+
import setuptools
setuptools.setup(setup_requires=["pbr"], pbr=True)
diff --git a/staffeln/__init__.py b/staffeln/__init__.py
index 5612b0d..78fc3f2 100755
--- a/staffeln/__init__.py
+++ b/staffeln/__init__.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -11,6 +10,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import annotations
import pbr.version
diff --git a/staffeln/api/app.py b/staffeln/api/app.py
index bf1234e..8b54dad 100755
--- a/staffeln/api/app.py
+++ b/staffeln/api/app.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
from flask import Flask, Response, request
from oslo_log import log
+
from staffeln import objects
from staffeln.common import context
@@ -22,8 +25,8 @@
backup = objects.Volume.get_backup_by_backup_id( # pylint: disable=E1120
context=ctx, backup_id=request.args["backup_id"]
)
- # backup_info is None when there is no entry of the backup id in backup_table.
- # So the backup should not be the automated backup.
+ # backup_info is None when there is no entry of the backup id in
+ # backup_table. So the backup should not be the automated backup.
if backup is None:
return Response(
"True",
diff --git a/staffeln/api/middleware/parsable_error.py b/staffeln/api/middleware/parsable_error.py
index 2b49f83..343c8c2 100755
--- a/staffeln/api/middleware/parsable_error.py
+++ b/staffeln/api/middleware/parsable_error.py
@@ -17,8 +17,10 @@
Based on pecan.middleware.errordocument
"""
+from __future__ import annotations
from oslo_serialization import jsonutils
+
from staffeln.i18n import _
diff --git a/staffeln/api/wsgi.py b/staffeln/api/wsgi.py
index bef4092..6965beb 100755
--- a/staffeln/api/wsgi.py
+++ b/staffeln/api/wsgi.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import app
if __name__ == "__main__":
diff --git a/staffeln/cmd/api.py b/staffeln/cmd/api.py
index a46656c..a175089 100755
--- a/staffeln/cmd/api.py
+++ b/staffeln/cmd/api.py
@@ -1,9 +1,13 @@
"""Starter script for Staffeln API service"""
+
+from __future__ import annotations
+
import os
import sys
-import staffeln.conf
from oslo_log import log as logging
+
+import staffeln.conf
from staffeln.api import app as api_app
from staffeln.common import service
from staffeln.i18n import _
diff --git a/staffeln/cmd/conductor.py b/staffeln/cmd/conductor.py
index f4c9579..eaec3cf 100755
--- a/staffeln/cmd/conductor.py
+++ b/staffeln/cmd/conductor.py
@@ -1,8 +1,11 @@
"""Starter script for the staffeln conductor service."""
+from __future__ import annotations
+
import cotyledon
-import staffeln.conf
from cotyledon import oslo_config_glue
+
+import staffeln.conf
from staffeln.common import service
from staffeln.conductor import manager
@@ -13,9 +16,15 @@
service.prepare_service()
sm = cotyledon.ServiceManager()
- sm.add(manager.BackupManager, workers=CONF.conductor.backup_workers, args=(CONF,))
sm.add(
- manager.RotationManager, workers=CONF.conductor.rotation_workers, args=(CONF,)
+ manager.BackupManager,
+ workers=CONF.conductor.backup_workers,
+ args=(CONF,),
+ )
+ sm.add(
+ manager.RotationManager,
+ workers=CONF.conductor.rotation_workers,
+ args=(CONF,),
)
oslo_config_glue.setup(sm, CONF)
sm.run()
diff --git a/staffeln/cmd/dbmanage.py b/staffeln/cmd/dbmanage.py
index d4706cf..bd6f01d 100644
--- a/staffeln/cmd/dbmanage.py
+++ b/staffeln/cmd/dbmanage.py
@@ -2,9 +2,12 @@
Run storage database migration.
"""
+from __future__ import annotations
+
import sys
from oslo_config import cfg
+
from staffeln import conf
from staffeln.common import service
from staffeln.db import migration
@@ -33,7 +36,10 @@
command_opt = cfg.SubCommandOpt(
- "command", title="Command", help="Available commands", handler=add_command_parsers
+ "command",
+ title="Command",
+ help="Available commands",
+ handler=add_command_parsers,
)
diff --git a/staffeln/common/auth.py b/staffeln/common/auth.py
index e23ef71..b64c515 100755
--- a/staffeln/common/auth.py
+++ b/staffeln/common/auth.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import openstack
diff --git a/staffeln/common/config.py b/staffeln/common/config.py
index f71a378..5b69fb4 100755
--- a/staffeln/common/config.py
+++ b/staffeln/common/config.py
@@ -1,4 +1,6 @@
# from staffeln.common import rpc
+from __future__ import annotations
+
import staffeln.conf
from staffeln import version
diff --git a/staffeln/common/constants.py b/staffeln/common/constants.py
index b7d6d09..d065966 100644
--- a/staffeln/common/constants.py
+++ b/staffeln/common/constants.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
BACKUP_INIT = 4
BACKUP_FAILED = 3
BACKUP_COMPLETED = 2
diff --git a/staffeln/common/context.py b/staffeln/common/context.py
index c6046e1..d789a58 100644
--- a/staffeln/common/context.py
+++ b/staffeln/common/context.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from oslo_context import context
from oslo_log import log
@@ -5,7 +7,11 @@
class RequestContext(context.RequestContext):
- """Added security context with request parameters from openstack common library"""
+ """Added security context
+
+ Added security context with request
+ parameters from openstack common library
+ """
def __init__(
self,
@@ -14,7 +20,7 @@
instance_id=None,
executed_at=None,
backup_status=None,
- **kwargs
+ **kwargs,
):
self.backup_id = backup_id
self.volume_id = volume_id
diff --git a/staffeln/common/email.py b/staffeln/common/email.py
index cf6e937..368028b 100644
--- a/staffeln/common/email.py
+++ b/staffeln/common/email.py
@@ -1,5 +1,7 @@
""" Email module with SMTP"""
+from __future__ import annotations
+
import smtplib
from email import utils
from email.header import Header
@@ -32,10 +34,12 @@
try:
smtp_obj = smtplib.SMTP(
- smtp_profile["smtp_server_domain"], smtp_profile["smtp_server_port"]
+ smtp_profile["smtp_server_domain"],
+ smtp_profile["smtp_server_port"],
)
smtp_obj.connect(
- smtp_profile["smtp_server_domain"], smtp_profile["smtp_server_port"]
+ smtp_profile["smtp_server_domain"],
+ smtp_profile["smtp_server_port"],
)
smtp_obj.ehlo()
smtp_obj.starttls()
@@ -43,7 +47,9 @@
# SMTP Login
smtp_obj.login(smtp_profile["src_email"], smtp_profile["src_pwd"])
smtp_obj.sendmail(
- smtp_profile["src_email"], smtp_profile["dest_email"], msg.as_string()
+ smtp_profile["src_email"],
+ smtp_profile["dest_email"],
+ msg.as_string(),
)
# Email Sent
except smtplib.SMTPException as error:
diff --git a/staffeln/common/lock.py b/staffeln/common/lock.py
index 4c05626..9f7db41 100644
--- a/staffeln/common/lock.py
+++ b/staffeln/common/lock.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import errno
import glob
import os
@@ -8,9 +10,10 @@
import sherlock
from oslo_log import log
-from staffeln import conf, exception
from tooz import coordination
+from staffeln import conf, exception
+
CONF = conf.CONF
LOG = log.getLogger(__name__)
@@ -146,7 +149,10 @@
"""
def __init__(
- self, expire: int = 3600, timeout: int = 10, namespace: str = "openstack"
+ self,
+ expire: int = 3600,
+ timeout: int = 10,
+ namespace: str = "openstack",
):
self.timeout = timeout
self.expire = expire
diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py
index 9f412e8..710d112 100644
--- a/staffeln/common/openstack.py
+++ b/staffeln/common/openstack.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
from openstack import exceptions, proxy
from oslo_log import log
+
from staffeln.common import auth
from staffeln.i18n import _
@@ -66,7 +69,9 @@
def get_servers(self, project_id=None, all_projects=True, details=True):
if project_id is not None:
return self.conn.compute.servers(
- details=details, all_projects=all_projects, project_id=project_id
+ details=details,
+ all_projects=all_projects,
+ project_id=project_id,
)
else:
return self.conn.compute.servers(details=details, all_projects=all_projects)
@@ -75,10 +80,6 @@
return self.conn.get_volume_by_id(uuid)
def get_backup(self, uuid, project_id=None):
- # return conn.block_storage.get_backup(
- # project_id=project_id, backup_id=uuid,
- # )
- # conn.block_storage.backups(volume_id=uuid,project_id=project_id)
try:
return self.conn.get_volume_backup(uuid)
except exceptions.ResourceNotFound:
@@ -93,9 +94,6 @@
name=None,
incremental=False,
):
- # return conn.block_storage.create_backup(
- # volume_id=queue.volume_id, force=True, project_id=queue.project_id, name="name"
- # )
return self.conn.create_volume_backup(
volume_id=volume_id,
force=force,
@@ -112,7 +110,8 @@
LOG.debug(f"Start deleting backup {uuid} in OpenStack.")
try:
self.conn.delete_volume_backup(uuid, force=force)
- # TODO(Alex): After delete the backup generator, need to set the volume status again
+ # TODO(Alex): After delete the backup generator,
+ # need to set the volume status again
except exceptions.ResourceNotFound:
return None
@@ -128,7 +127,8 @@
# rewrite openstasdk._block_storage.get_volume_quotas
# added usage flag
- # ref: https://docs.openstack.org/api-ref/block-storage/v3/?expanded=#show-quota-usage-for-a-project
+ # ref: https://docs.openstack.org/api-ref/block-storage/v3/?
+ # expanded=#show-quota-usage-for-a-project
def _get_volume_quotas(self, project_id, usage=True):
"""Get volume quotas for a project
diff --git a/staffeln/common/service.py b/staffeln/common/service.py
index d2ad7a5..98399f6 100755
--- a/staffeln/common/service.py
+++ b/staffeln/common/service.py
@@ -11,9 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import annotations
+
+from oslo_log import log as logging
import staffeln.conf
-from oslo_log import log as logging
from staffeln import objects
from staffeln.common import config
diff --git a/staffeln/common/short_id.py b/staffeln/common/short_id.py
index 18be04c..e182ad1 100755
--- a/staffeln/common/short_id.py
+++ b/staffeln/common/short_id.py
@@ -2,10 +2,13 @@
The IDs each comprise 12 (lower-case) alphanumeric characters.
"""
+from __future__ import annotations
+
import base64
import uuid
import six
+
from staffeln.i18n import _
diff --git a/staffeln/common/time.py b/staffeln/common/time.py
index 103096a..08fd5d1 100644
--- a/staffeln/common/time.py
+++ b/staffeln/common/time.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import re
from dateutil.relativedelta import relativedelta
@@ -6,8 +8,10 @@
DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
regex = re.compile(
- r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?"
- r"((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?"
+ r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?"
+ r"((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?"
+ r"((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?"
+ r"((?P<seconds>\d+?)s)?"
)
@@ -31,7 +35,7 @@
if empty_flag:
return None
return time_params
- except: # noqa: E722
+ except Exception: # noqa: E722
return None
@@ -45,7 +49,14 @@
def timeago(
- years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, from_date=None
+ years=0,
+ months=0,
+ weeks=0,
+ days=0,
+ hours=0,
+ minutes=0,
+ seconds=0,
+ from_date=None,
):
if from_date is None:
from_date = timeutils.utcnow()
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index 05f5cfd..760132c 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -1,12 +1,15 @@
+from __future__ import annotations
+
import collections
from datetime import timedelta, timezone
-import staffeln.conf
from openstack.exceptions import HttpException as OpenstackHttpException
from openstack.exceptions import ResourceNotFound as OpenstackResourceNotFound
from openstack.exceptions import SDKException as OpenstackSDKException
from oslo_log import log
from oslo_utils import timeutils
+
+import staffeln.conf
from staffeln import objects
from staffeln.common import constants, context, openstack
from staffeln.common import time as xtime
@@ -115,8 +118,7 @@
return queue
def create_queue(self, old_tasks):
- """
- Create the queue of all the volumes for backup
+ """Create the queue of all the volumes for backup
:param old_tasks: Task list not completed in the previous cycle
:type: List<Class objects.Queue>
@@ -129,7 +131,8 @@
for old_task in old_tasks:
old_task_volume_list.append(old_task.volume_id)
- # 2. add new tasks in the queue which are not existing in the old task list
+ # 2. add new tasks in the queue which are not existing in the old task
+ # list
task_list = self.check_instance_volumes()
for task in task_list:
if task.volume_id not in old_task_volume_list:
@@ -157,8 +160,8 @@
res = volume["status"] in ("available", "in-use")
if not res:
reason = _(
- "Volume %s is not triger new backup task because it is in %s status"
- % (volume_id, volume["status"])
+ "Volume %s is not triger new backup task because "
+ "it is in %s status" % (volume_id, volume["status"])
)
LOG.info(reason)
return reason
@@ -169,7 +172,7 @@
def purge_backups(self, project_id=None):
LOG.info(f"Start pruge backup tasks for project {project_id}")
- # TODO make all this in a single DB command
+ # We can consider make all these in a single DB command
success_tasks = self.get_queues(
filters={
"backup_status": constants.BACKUP_COMPLETED,
@@ -252,10 +255,8 @@
backup = self.openstacksdk.get_backup(backup_object.backup_id)
if backup is None:
LOG.info(
- _(
- f"Backup {backup_object.backup_id} is removed from "
- "Openstack or cinder-backup is not existing in the cloud."
- )
+ f"Backup {backup_object.backup_id} is removed from "
+ "Openstack or cinder-backup is not existing in the cloud."
)
return backup_object.delete_backup()
if backup["status"] in ("available"):
@@ -269,15 +270,13 @@
# backup_object.delete_backup()
else: # "deleting", "restoring"
LOG.info(
- _(
- "Rotation for the backup %s is skipped in this cycle "
- "because it is in %s status"
- )
- % (backup_object.backup_id, backup["status"])
+ f"Rotation for the backup {backup_object.backup_id} "
+ "is skipped in this cycle "
+ f"because it is in {backup['status']} status"
)
except OpenstackSDKException as e:
- LOG.warn(_(f"Backup {backup_object.backup_id} deletion failed. {str(e)}"))
+ LOG.warn(f"Backup {backup_object.backup_id} deletion failed. {str(e)}")
# We don't delete backup object if any exception occured
# backup_object.delete_backup()
return False
@@ -288,15 +287,14 @@
project_id = backup_object.project_id
if project_id not in self.project_list:
LOG.warn(
- _(
- f"Project {project_id} for backup "
- f"{backup_object.backup_id} is not existing in "
- "Openstack. Please check your access right to this project. "
- "Skip this backup from remove now and will retry later."
- )
+ f"Project {project_id} for backup "
+ f"{backup_object.backup_id} is not existing in "
+ "Openstack. Please check your access right to this "
+ "project. "
+ "Skip this backup from remove now and will retry later."
)
- # Don't remove backup object, keep it and retry on next periodic task
- # backup_object.delete_backup()
+ # Don't remove backup object, keep it and retry on next
+ # periodic task backup_object.delete_backup()
return
self.openstacksdk.set_project(self.project_list[project_id])
@@ -305,12 +303,9 @@
)
if backup is None:
LOG.info(
- _(
- "Backup %s is removed from Openstack "
- "or cinder-backup is not existing in the cloud. "
- "Start removing backup object from Staffeln."
- % backup_object.backup_id
- )
+ f"Backup {backup_object.backup_id} is removed from "
+ "Openstack or cinder-backup is not existing in the "
+ "cloud. Start removing backup object from Staffeln."
)
return backup_object.delete_backup()
@@ -318,19 +313,17 @@
# Don't remove backup until it's officially removed from Cinder
# backup_object.delete_backup()
except Exception as e:
- if skip_inc_err and "Incremental backups exist for this backup" in str(e):
+ if skip_inc_err and ("Incremental backups exist for this backup" in str(e)):
LOG.debug(str(e))
else:
LOG.info(
- _(
- f"Backup {backup_object.backup_id} deletion failed. "
- "Skip this backup from remove now and will retry later."
- )
+ f"Backup {backup_object.backup_id} deletion failed. "
+ "Skip this backup from remove now and will retry later."
)
LOG.debug(f"deletion failed {str(e)}")
- # Don't remove backup object, keep it and retry on next periodic task
- # backup_object.delete_backup()
+ # Don't remove backup object, keep it and retry on next
+ # periodic task backup_object.delete_backup()
def update_project_list(self):
projects = self.openstacksdk.get_projects()
@@ -338,8 +331,7 @@
self.project_list[project.id] = project
def _is_backup_required(self, volume_id):
- """
- Decide if the backup required based on the backup history
+ """Decide if the backup required based on the backup history
If there is any backup created during certain time,
will not trigger new backup request.
@@ -376,8 +368,7 @@
return True
def _is_incremental(self, volume_id):
- """
- Decide the backup method based on the backup history
+ """Decide the backup method based on the backup history
It queries to select the last N backups from backup table and
decide backup type as full if there is no full backup.
@@ -406,16 +397,12 @@
return True
except Exception as e:
LOG.debug(
- _(
- "Failed to get backup history to decide backup method. Reason: %s"
- % str(e)
- )
+ "Failed to get backup history to decide backup " f"method. Reason: {e}"
)
return False
def check_instance_volumes(self):
- """
- Retrieves volume list to backup
+ """Retrieves volume list to backup
Get the list of all the volumes from the project using openstacksdk.
Function first list all the servers in the project and get the volumes
@@ -433,10 +420,8 @@
servers = self.openstacksdk.get_servers(project_id=project.id)
except OpenstackHttpException as ex:
LOG.warn(
- _(
- "Failed to list servers in project %s. %s"
- % (project.id, str(ex))
- )
+ f"Failed to list servers in project {project.id}. "
+ f"{str(ex)} (status code: {ex.status_code})."
)
continue
for server in servers:
@@ -501,8 +486,12 @@
try:
servers = self.openstacksdk.get_servers(all_projects=True)
- except OpenstackHttpException:
- LOG.warn(_("Failed to list servers for all projects."))
+ except OpenstackHttpException as ex:
+ servers = []
+ LOG.warn(
+ f"Failed to list servers for all projects. "
+ f"{str(ex)} (status code: {ex.status_code})."
+ )
for server in servers:
if CONF.conductor.retention_metadata_key in server.metadata:
@@ -511,21 +500,22 @@
].lower()
if xtime.regex.fullmatch(server_retention_time):
LOG.debug(
- f"Found retention time ({server_retention_time}) defined for "
- f"server {server.id}, Adding it retention reference map."
+ f"Found retention time ({server_retention_time}) "
+ f"defined for server {server.id}, "
+ "Adding it retention reference map."
)
retention_map[server.id] = server_retention_time
else:
LOG.info(
- f"Server retention time for instance {server.id} is incorrect. "
- "Please follow '<YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>"
+ f"Server retention time for instance {server.id} is "
+ "incorrect. Please follow "
+ "'<YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>"
"h<MINUTES>min<SECONDS>s' format."
)
return retention_map
def _volume_queue(self, task):
- """
- Commits one backup task to queue table
+ """Commits one backup task to queue table
:param task: One backup task
:type: QueueMapping
@@ -539,7 +529,8 @@
volume_queue.instance_name = task.instance_name
volume_queue.volume_name = task.volume_name
# NOTE(Oleks): Backup mode is inherited from backup service.
- # Need to keep and navigate backup mode history, to decide a different mode per volume
+ # Need to keep and navigate backup mode history, to decide a different
+ # mode per volume
volume_queue.incremental = task.incremental
backup_method = "Incremental" if task.incremental else "Full"
@@ -553,6 +544,7 @@
def create_volume_backup(self, task):
"""Initiate the backup of the volume
+
:param task: Provide the map of the volume that needs
backup.
This function will call the backupup api and change the
@@ -582,8 +574,13 @@
backup_method = "Incremental" if task.incremental else "Full"
LOG.info(
_(
- ("%s Backup (name: %s) for volume %s creating in project %s")
- % (backup_method, backup_name, task.volume_id, project_id)
+ ("%s Backup (name: %s) for volume %s creating " "in project %s")
+ % (
+ backup_method,
+ backup_name,
+ task.volume_id,
+ project_id,
+ )
)
)
volume_backup = self.openstacksdk.create_backup(
@@ -599,24 +596,25 @@
inc_err_msg = "No backups available to do an incremental backup"
if inc_err_msg in str(error):
LOG.info(
- "Retry to create full backup for volume %s instead of incremental."
- % task.volume_id
+ "Retry to create full backup for volume %s instead of "
+ "incremental." % task.volume_id
)
task.incremental = False
task.save()
else:
reason = _(
- "Backup (name: %s) creation for the volume %s failled. %s"
- % (backup_name, task.volume_id, str(error)[:64])
+ "Backup (name: %s) creation for the volume %s "
+ "failled. %s" % (backup_name, task.volume_id, str(error)[:64])
)
LOG.warn(
- "Backup (name: %s) creation for the volume %s failled. %s"
- % (backup_name, task.volume_id, str(error))
+ "Backup (name: %s) creation for the volume %s "
+ "failled. %s" % (backup_name, task.volume_id, str(error))
)
task.reason = reason
task.backup_status = constants.BACKUP_FAILED
task.save()
- # Added extra exception as OpenstackSDKException does not handle the keystone unauthourized issue.
+ # Added extra exception as OpenstackSDKException does not handle
+ # the keystone unauthourized issue.
except Exception as error:
reason = _(
"Backup (name: %s) creation for the volume %s failled. %s"
@@ -647,7 +645,7 @@
def process_failed_backup(self, task):
# 1. notify via email
- reason = _("The status of backup for the volume %s is error." % task.volume_id)
+ reason = f"The status of backup for the volume {task.volume_id} is error."
LOG.warn(reason)
# 2. delete backup generator
try:
@@ -656,8 +654,8 @@
except OpenstackHttpException as ex:
LOG.warn(
_(
- "Failed to delete volume backup %s. %s. Need to delete manually."
- % (task.backup_id, str(ex))
+ "Failed to delete volume backup %s. %s. Need "
+ "to delete manually." % (task.backup_id, str(ex))
)
)
task.reason = reason
@@ -690,6 +688,7 @@
def check_volume_backup_status(self, queue):
"""Checks the backup status of the volume
+
:params: queue: Provide the map of the volume that needs backup
status checked.
Call the backups api to see if the backup is successful.
@@ -724,7 +723,8 @@
def _volume_backup(self, task):
# matching_backups = [
- # g for g in self.available_backups if g.backup_id == task.backup_id
+ # g for g in self.available_backups
+ # if g.backup_id == task.backup_id
# ]
# if not matching_backups:
volume_backup = objects.Volume(self.ctx)
diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index 380c2f7..c43e13b 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -1,12 +1,15 @@
+from __future__ import annotations
+
import threading
import time
from datetime import timedelta, timezone
import cotyledon
-import staffeln.conf
from futurist import periodics
from oslo_log import log
from oslo_utils import timeutils
+
+import staffeln.conf
from staffeln import objects
from staffeln.common import constants, context, lock
from staffeln.common import time as xtime
@@ -58,7 +61,8 @@
LOG.info(_("cycle timein"))
for queue in queues_started:
LOG.debug(
- f"try to get lock and run task for volume: {queue.volume_id}."
+ "try to get lock and run task for volume: "
+ f"{queue.volume_id}."
)
with lock.Lock(
self.lock_mgt, queue.volume_id, remove_lock=True
@@ -82,7 +86,8 @@
LOG.info(
_(
"Recycle timeout format is invalid. "
- "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."
+ "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>"
+ "min<SECONDS>s."
)
)
time_delta_dict = xtime.parse_timedelta_string(
@@ -230,14 +235,18 @@
if backup_age > retention_time:
# Backup remain longer than retention, need to purge it.
LOG.debug(
- f"Found potential volume backup for retention: Backup ID: {backup.backup_id} "
- f"with backup age: {backup_age} (Target retention time: {retention_time})."
+ "Found potential volume backup for retention: Backup "
+ f"ID: {backup.backup_id} "
+ f"with backup age: {backup_age} (Target retention "
+ f"time: {retention_time})."
)
return True
elif now - self.threshold_strtime < backup_age:
LOG.debug(
- f"Found potential volume backup for retention: Backup ID: {backup.backup_id} "
- f"with backup age: {backup_age} (Default retention time: {self.threshold_strtime})."
+ "Found potential volume backup for retention: "
+ f"Backup ID: {backup.backup_id} "
+ f"with backup age: {backup_age} (Default retention "
+ f"time: {self.threshold_strtime})."
)
return True
return False
@@ -263,9 +272,8 @@
)
# No way to judge retention
- if (
- self.threshold_strtime is None
- and not self.instance_retention_map
+ if self.threshold_strtime is None and (
+ not self.instance_retention_map
):
return
backup_instance_map = {}
@@ -274,17 +282,19 @@
self.controller.update_project_list()
for backup in self.get_backup_list():
- # Create backup instance map for later sorted by created_at.
- # This can be use as base of judgement on delete a backup.
- # The reason we need such list is because backup have
- # dependency with each other after we enable incremental backup.
+ # Create backup instance map for later sorted by
+ # created_at. This can be use as base of judgement
+ # on delete a backup. The reason we need such list
+ # is because backup have dependency with each other
+ # after we enable incremental backup.
# So we need to have information to judge on.
if backup.instance_id in backup_instance_map:
backup_instance_map[backup.instance_id].append(backup)
else:
backup_instance_map[backup.instance_id] = [backup]
- # Sort backup instance map and use it to check backup create time and order.
+ # Sort backup instance map and use it to check backup
+ # create time and order.
for instance_id in backup_instance_map:
sorted_backup_list = sorted(
backup_instance_map[instance_id],
@@ -294,9 +304,11 @@
for backup in sorted_backup_list:
if self.is_retention(backup):
LOG.debug(
- f"Retention: Try to remove volume backup {backup.backup_id}"
+ "Retention: Try to remove volume backup "
+ f"{backup.backup_id}"
)
- # Try to delete and skip any incremental exist error.
+ # Try to delete and skip any incremental
+ # exist error.
self.controller.hard_remove_volume_backup(
backup, skip_inc_err=True
)
@@ -319,7 +331,8 @@
LOG.info(
_(
"Retention time format is invalid. "
- "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."
+ "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h"
+ "<MINUTES>min<SECONDS>s."
)
)
return None
diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py
index 34d9660..6bc24f5 100644
--- a/staffeln/conductor/result.py
+++ b/staffeln/conductor/result.py
@@ -1,12 +1,14 @@
# Email notification package
# This should be upgraded by integrating with mail server to send batch
-import staffeln.conf
+from __future__ import annotations
+
from oslo_log import log
from oslo_utils import timeutils
+
+import staffeln.conf
from staffeln import objects
from staffeln.common import constants, email
from staffeln.common import time as xtime
-from staffeln.i18n import _
CONF = staffeln.conf.CONF
LOG = log.getLogger(__name__)
@@ -42,13 +44,15 @@
)
if not receiver:
LOG.warn(
- f"No email can be found from members of project {project_id}. "
+ "No email can be found from members of project "
+ f"{project_id}. "
"Skip report now and will try to report later."
)
return False
except Exception as ex:
LOG.warn(
- f"Failed to fetch emails from project members with exception: {str(ex)} "
+ "Failed to fetch emails from project members with "
+ f"exception: {str(ex)} "
"As also no receiver email or project receiver domain are "
"configured. Will try to report later."
)
@@ -68,14 +72,12 @@
"smtp_server_port": CONF.notification.smtp_server_port,
}
email.send(smtp_profile)
- LOG.info(_(f"Backup result email sent to {receiver}"))
+ LOG.info(f"Backup result email sent to {receiver}")
return True
except Exception as e:
LOG.warn(
- _(
- f"Backup result email send to {receiver} failed. "
- f"Please check email configuration. {str(e)}"
- )
+ f"Backup result email send to {receiver} failed. "
+ f"Please check email configuration. {str(e)}"
)
raise
@@ -122,8 +124,10 @@
success_volumes = "<br>".join(
[
(
- f"Volume ID: {str(e.volume_id)}, Backup ID: {str(e.backup_id)}, "
- f"Backup mode: {'Incremental' if e.incremental else 'Full'}, "
+ f"Volume ID: {str(e.volume_id)}, "
+ f"Backup ID: {str(e.backup_id)}, "
+ "Backup mode: "
+ f"{'Incremental' if e.incremental else 'Full'}, "
f"Created at: {str(e.created_at)}, Last updated at: "
f"{str(e.updated_at)}"
)
@@ -136,7 +140,8 @@
failed_volumes = "<br>".join(
[
(
- f"Volume ID: {str(e.volume_id)}, Reason: {str(e.reason)}, "
+ f"Volume ID: {str(e.volume_id)}, "
+ f"Reason: {str(e.reason)}, "
f"Created at: {str(e.created_at)}, Last updated at: "
f"{str(e.updated_at)}"
)
@@ -148,8 +153,10 @@
html += (
f"<h3>Project: {project_name} (ID: {project_id})</h3>"
"<h3>Quota Usage (Backup Gigabytes)</h3>"
- f"<FONT COLOR={quota_color}><h4>Limit: {str(quota['limit'])} GB, In Use: "
- f"{str(quota['in_use'])} GB, Reserved: {str(quota['reserved'])} GB, Total "
+ f"<FONT COLOR={quota_color}><h4>Limit: {str(quota['limit'])} "
+ "GB, In Use: "
+ f"{str(quota['in_use'])} GB, Reserved: {str(quota['reserved'])} "
+ "GB, Total "
f"rate: {str(quota_usage)}</h4></FONT>"
"<h3>Success List</h3>"
f"<FONT COLOR=GREEN><h4>{success_volumes}</h4></FONT><br>"
@@ -165,5 +172,4 @@
# Record success report
self.create_report_record()
return True
- else:
- return False
+ return False
diff --git a/staffeln/conf/__init__.py b/staffeln/conf/__init__.py
index 3289b63..76c247e 100755
--- a/staffeln/conf/__init__.py
+++ b/staffeln/conf/__init__.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from oslo_config import cfg
+
from staffeln.conf import api, conductor, database, notify, paths
CONF = cfg.CONF
diff --git a/staffeln/conf/api.py b/staffeln/conf/api.py
index e405d6a..4f848eb 100755
--- a/staffeln/conf/api.py
+++ b/staffeln/conf/api.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from oslo_config import cfg
+
from staffeln.i18n import _
api_group = cfg.OptGroup(
diff --git a/staffeln/conf/conductor.py b/staffeln/conf/conductor.py
index ab8e258..86407cc 100755
--- a/staffeln/conf/conductor.py
+++ b/staffeln/conf/conductor.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from oslo_config import cfg
+
from staffeln.common import constants
from staffeln.i18n import _
@@ -43,7 +46,8 @@
"backup_cycle_timout",
regex=(
r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?"
- r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?"
+ r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?"
+ r"((?P<seconds>\d+?)s)?"
),
default=constants.DEFAULT_BACKUP_CYCLE_TIMEOUT,
help=_(
@@ -58,7 +62,8 @@
cfg.StrOpt(
"retention_metadata_key",
help=_(
- "The key string of metadata the VM, which use as backup retention period."
+ "The key string of metadata the VM, which use as backup retention "
+ "period."
),
),
cfg.IntOpt(
@@ -96,7 +101,8 @@
"retention_time",
regex=(
r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?"
- r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?"
+ r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?"
+ r"((?P<seconds>\d+?)s)?"
),
default="2w3d",
help=_(
@@ -110,13 +116,17 @@
coordination_group = cfg.OptGroup(
"coordination",
title="Coordination Options",
- help=_("Options under this group are used to define Coordination's configuration."),
+ help=_(
+ "Options under this group are used to define Coordination's" "configuration."
+ ),
)
coordination_opts = [
cfg.StrOpt(
- "backend_url", default="", help=_("lock coordination connection backend URL.")
+ "backend_url",
+ default="",
+ help=_("lock coordination connection backend URL."),
),
]
diff --git a/staffeln/conf/database.py b/staffeln/conf/database.py
index 761aa15..aa65873 100644
--- a/staffeln/conf/database.py
+++ b/staffeln/conf/database.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
from oslo_config import cfg
from oslo_db import options as oslo_db_options
+
from staffeln.conf import paths
from staffeln.i18n import _
diff --git a/staffeln/conf/notify.py b/staffeln/conf/notify.py
index 21c67e8..c0834b1 100644
--- a/staffeln/conf/notify.py
+++ b/staffeln/conf/notify.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from oslo_config import cfg
+
from staffeln.i18n import _
notify_group = cfg.OptGroup(
@@ -32,7 +35,8 @@
"The user name to authenticate with."
),
),
- # We can remove the sender password as we are using postfix to send mail and we won't be authenticating.
+ # We can remove the sender password as we are using postfix to send
+ # mail and we won't be authenticating.
cfg.StrOpt(
"sender_pwd",
help=_(
diff --git a/staffeln/conf/paths.py b/staffeln/conf/paths.py
index 7dbd9a1..08cf205 100644
--- a/staffeln/conf/paths.py
+++ b/staffeln/conf/paths.py
@@ -1,6 +1,9 @@
+from __future__ import annotations
+
import os
from oslo_config import cfg
+
from staffeln.i18n import _
PATH_OPTS = [
diff --git a/staffeln/db/api.py b/staffeln/db/api.py
index 2d10a05..5f6d2f8 100644
--- a/staffeln/db/api.py
+++ b/staffeln/db/api.py
@@ -1,4 +1,7 @@
"""Base classes for storage engines"""
+
+from __future__ import annotations
+
from oslo_config import cfg
from oslo_db import api as db_api
diff --git a/staffeln/db/base.py b/staffeln/db/base.py
index de0d7c3..ad303d1 100755
--- a/staffeln/db/base.py
+++ b/staffeln/db/base.py
@@ -1,5 +1,7 @@
"""Database setup and migration commands."""
+from __future__ import annotations
+
class base:
def __init__(self):
diff --git a/staffeln/db/migration.py b/staffeln/db/migration.py
index 113116e..0d6eea9 100644
--- a/staffeln/db/migration.py
+++ b/staffeln/db/migration.py
@@ -1,7 +1,11 @@
"""Database setup command"""
-import staffeln.conf
+
+from __future__ import annotations
+
from stevedore import driver
+import staffeln.conf
+
CONF = staffeln.conf.CONF
_IMPL = None
diff --git a/staffeln/db/sqlalchemy/alembic/env.py b/staffeln/db/sqlalchemy/alembic/env.py
index 71461fe..18b6ee4 100644
--- a/staffeln/db/sqlalchemy/alembic/env.py
+++ b/staffeln/db/sqlalchemy/alembic/env.py
@@ -9,10 +9,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import annotations
from logging import config as log_config
from alembic import context
+
from staffeln.db.sqlalchemy import api as sqla_api
from staffeln.db.sqlalchemy import models
diff --git a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
index a16f27c..492009c 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py
@@ -7,6 +7,8 @@
"""
# revision identifiers, used by Alembic.
+from __future__ import annotations
+
revision = "041d9a0f1159"
down_revision = ""
@@ -16,8 +18,10 @@
def upgrade():
op.add_column(
- "queue_data", sa.Column("volume_name", sa.String(length=100), nullable=True)
+ "queue_data",
+ sa.Column("volume_name", sa.String(length=100), nullable=True),
)
op.add_column(
- "queue_data", sa.Column("instance_name", sa.String(length=100), nullable=True)
+ "queue_data",
+ sa.Column("instance_name", sa.String(length=100), nullable=True),
)
diff --git a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
index f78c91d..5f87464 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py
@@ -7,6 +7,8 @@
"""
# revision identifiers, used by Alembic.
+from __future__ import annotations
+
revision = "2b2b9df199bd"
down_revision = "ebdbed01e9a7"
diff --git a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
index 1abed60..20605ee 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import sqlalchemy as sa
from alembic import op
from oslo_log import log
@@ -21,7 +23,11 @@
op.create_table(
"report_timestamp",
sa.Column(
- "id", sa.String(36), primary_key=True, nullable=False, autoincrement=True
+ "id",
+ sa.String(36),
+ primary_key=True,
+ nullable=False,
+ autoincrement=True,
),
sa.Column("created_at", sa.DateTime),
sa.Column("updated_at", sa.DateTime),
diff --git a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
index b2ed161..8dccd8b 100644
--- a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
+++ b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py
@@ -7,6 +7,8 @@
"""
# revision identifiers, used by Alembic.
+from __future__ import annotations
+
revision = "ebdbed01e9a7"
down_revision = "041d9a0f1159"
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 3cda5e7..b27d0d1 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -1,5 +1,7 @@
"""SQLAlchemy storage backend."""
+from __future__ import annotations
+
import datetime
import operator
@@ -11,6 +13,7 @@
from oslo_utils import strutils, timeutils, uuidutils
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import exc
+
from staffeln.common import short_id
from staffeln.db.sqlalchemy import models
@@ -54,6 +57,7 @@
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
+
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by backup_id.
:param query: Initial query to add filter to.
@@ -161,6 +165,7 @@
def _add_filters(self, query, model, filters=None, plain_fields=None):
"""Add filters while listing the columns from database table"""
+
# timestamp_mixin_fields = ["created_at", "updated_at"]
filters = filters or {}
@@ -180,7 +185,7 @@
if (
fieldname != "deleted"
and value
- and field.type.python_type is datetime.datetime
+ and (field.type.python_type is datetime.datetime)
):
if not isinstance(value, datetime.datetime):
value = timeutils.parse_isotime(value)
@@ -288,7 +293,7 @@
try:
return self._update(models.Backup_data, backup_id, values)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("backup resource not found.")
def create_queue(self, values):
@@ -310,7 +315,7 @@
try:
return self._update(models.Queue_data, id, values)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Queue resource not found.")
def get_queue_by_id(self, context, id):
@@ -323,15 +328,18 @@
try:
return self._get(
- context, model=models.Queue_data, fieldname=fieldname, value=value
+ context,
+ model=models.Queue_data,
+ fieldname=fieldname,
+ value=value,
)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Queue not found")
def soft_delete_queue(self, id):
try:
return self._soft_delete(models.Queue_data, id)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Queue Not found.")
def get_backup_by_backup_id(self, context, backup_id):
@@ -339,7 +347,7 @@
try:
return self._get_backup(context, fieldname="backup_id", value=backup_id)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Backup not found with backup_id %s." % backup_id)
def _get_backup(self, context, fieldname, value):
@@ -347,15 +355,18 @@
try:
return self._get(
- context, model=models.Backup_data, fieldname=fieldname, value=value
+ context,
+ model=models.Backup_data,
+ fieldname=fieldname,
+ value=value,
)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Backup resource not found.")
def soft_delete_backup(self, id):
try:
return self._soft_delete(models.Backup_data, id)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Backup Not found.")
def get_report_timestamp_list(self, *args, **kwargs):
@@ -374,11 +385,11 @@
try:
return self._update(models.Report_timestamp, id, values)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Report Timestamp resource not found.")
def soft_delete_report_timestamp(self, id):
try:
return self._soft_delete(models.Report_timestamp, id)
- except: # noqa: E722
+ except Exception: # noqa: E722
LOG.error("Report Timestamp Not found.")
diff --git a/staffeln/db/sqlalchemy/migration.py b/staffeln/db/sqlalchemy/migration.py
index 3a34c2b..e50d757 100644
--- a/staffeln/db/sqlalchemy/migration.py
+++ b/staffeln/db/sqlalchemy/migration.py
@@ -1,7 +1,10 @@
+from __future__ import annotations
+
import os
-import staffeln.conf
from oslo_db.sqlalchemy.migration_cli import manager
+
+import staffeln.conf
from staffeln.db.sqlalchemy import api as sqla_api
from staffeln.db.sqlalchemy import models
diff --git a/staffeln/db/sqlalchemy/models.py b/staffeln/db/sqlalchemy/models.py
index c186ddc..a0e3815 100644
--- a/staffeln/db/sqlalchemy/models.py
+++ b/staffeln/db/sqlalchemy/models.py
@@ -1,11 +1,15 @@
"""
SQLAlchemy models for staffeln service
"""
+
+from __future__ import annotations
+
import urllib.parse as urlparse
from oslo_db.sqlalchemy import models
from sqlalchemy import Boolean, Column, Integer, String, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
+
from staffeln import conf
CONF = conf.CONF
@@ -14,7 +18,10 @@
def table_args():
engine_name = urlparse.urlparse(CONF.database.connection).scheme
if engine_name == "mysql":
- return {"mysql_engine": CONF.database.mysql_engine, "mysql_charset": "utf8"}
+ return {
+ "mysql_engine": CONF.database.mysql_engine,
+ "mysql_charset": "utf8",
+ }
return None
diff --git a/staffeln/exception.py b/staffeln/exception.py
index e561506..e1caceb 100644
--- a/staffeln/exception.py
+++ b/staffeln/exception.py
@@ -13,10 +13,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""Staffeln base exception handling."""
+from __future__ import annotations
-from typing import Optional, Union # noqa: H301
+from typing import Optional, Union
from oslo_log import log as logging
diff --git a/staffeln/i18n.py b/staffeln/i18n.py
index 09fe8aa..cbef6e5 100755
--- a/staffeln/i18n.py
+++ b/staffeln/i18n.py
@@ -2,6 +2,8 @@
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
+from __future__ import annotations
+
import oslo_i18n
DOMAIN = "staffeln"
diff --git a/staffeln/objects/__init__.py b/staffeln/objects/__init__.py
index 2af8df0..defa200 100755
--- a/staffeln/objects/__init__.py
+++ b/staffeln/objects/__init__.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from .queue import Queue # noqa: F401
from .report import ReportTimestamp # noqa: F401
from .volume import Volume # noqa: F401
diff --git a/staffeln/objects/base.py b/staffeln/objects/base.py
index 8dd6f94..7f3cb85 100755
--- a/staffeln/objects/base.py
+++ b/staffeln/objects/base.py
@@ -1,8 +1,11 @@
"""Staffeln common internal object model"""
+from __future__ import annotations
+
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from oslo_versionedobjects import fields as ovoo_fields
+
from staffeln import objects
remotable_classmethod = ovoo_base.remotable_classmethod
@@ -50,7 +53,7 @@
def obj_refresh(self, loaded_object):
fields = (field for field in self.fields if field not in self.object_fields)
for field in fields:
- if self.obj_attr_is_set(field) and self[field] != loaded_object[field]:
+ if self.obj_attr_is_set(field) and (self[field] != loaded_object[field]):
self[field] = loaded_object[field]
@staticmethod
diff --git a/staffeln/objects/fields.py b/staffeln/objects/fields.py
index 3f6c2a7..95ed59c 100644
--- a/staffeln/objects/fields.py
+++ b/staffeln/objects/fields.py
@@ -1,4 +1,7 @@
"""Utility method for objects"""
+
+from __future__ import annotations
+
from oslo_serialization import jsonutils
from oslo_versionedobjects import fields
diff --git a/staffeln/objects/queue.py b/staffeln/objects/queue.py
index db49c21..b80c2d8 100644
--- a/staffeln/objects/queue.py
+++ b/staffeln/objects/queue.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from oslo_versionedobjects import fields as ovoo_fields
+
from staffeln.db import api as db_api
from staffeln.objects import base
from staffeln.objects import fields as sfeild
@@ -6,7 +9,9 @@
@base.StaffelnObjectRegistry.register
class Queue(
- base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+ base.StaffelnPersistentObject,
+ base.StaffelnObject,
+ base.StaffelnObjectDictCompat,
):
VERSION = "1.2"
# Version 1.0: Initial version
@@ -37,6 +42,7 @@
@base.remotable_classmethod
def get_by_id(cls, context, id): # pylint: disable=E0213
"""Find a queue task based on id
+
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
@@ -46,6 +52,7 @@
:param backup_id: the backup id of volume in queue.
:returns: a :class:`Queue` object.
"""
+
db_queue = cls.dbapi.get_queue_by_id(context, id)
queue = cls._from_db_object(cls(context), db_queue)
return queue
@@ -53,6 +60,7 @@
@base.remotable
def create(self):
"""Create a :class:`Backup_data` record in the DB"""
+
values = self.obj_get_changes()
db_queue = self.dbapi.create_queue(values)
return self._from_db_object(self, db_queue)
@@ -73,4 +81,5 @@
@base.remotable
def delete_queue(self):
"""Soft Delete the :class:`Queue_data` from the DB"""
+
self.dbapi.soft_delete_queue(self.id)
diff --git a/staffeln/objects/report.py b/staffeln/objects/report.py
index e851a93..588be73 100644
--- a/staffeln/objects/report.py
+++ b/staffeln/objects/report.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from oslo_versionedobjects import fields as ovoo_fields
+
from staffeln.db import api as db_api
from staffeln.objects import base
from staffeln.objects import fields as sfeild
@@ -6,7 +9,9 @@
@base.StaffelnObjectRegistry.register
class ReportTimestamp(
- base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+ base.StaffelnPersistentObject,
+ base.StaffelnObject,
+ base.StaffelnObjectDictCompat,
):
VERSION = "1.0"
# Version 1.0: Initial version
diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py
index a4f24f6..f8b6e80 100644
--- a/staffeln/objects/volume.py
+++ b/staffeln/objects/volume.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from oslo_versionedobjects import fields as ovoo_fields
+
from staffeln.db import api as db_api
from staffeln.objects import base
from staffeln.objects import fields as sfeild
@@ -6,7 +9,9 @@
@base.StaffelnObjectRegistry.register
class Volume(
- base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+ base.StaffelnPersistentObject,
+ base.StaffelnObject,
+ base.StaffelnObjectDictCompat,
):
VERSION = "1.1"
# Version 1.0: Initial version
@@ -58,6 +63,7 @@
@base.remotable
def refresh(self):
"""Loads updates for this :class:`Backup_data`.
+
Loads a backup with the same backup_id from the database and
checks for updated attributes. Updates are applied from
the loaded backup column by column, if there are any updates.
@@ -73,6 +79,7 @@
@base.remotable_classmethod
def get_backup_by_backup_id(cls, context, backup_id): # pylint: disable=E0213
"""Find a backup based on backup_id
+
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
diff --git a/staffeln/tests/base.py b/staffeln/tests/base.py
index 1c30cdb..00059c0 100755
--- a/staffeln/tests/base.py
+++ b/staffeln/tests/base.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
@@ -14,10 +13,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import annotations
from oslotest import base
class TestCase(base.BaseTestCase):
-
"""Test case base class for all unit tests."""
diff --git a/staffeln/tests/common/test_openstacksdk.py b/staffeln/tests/common/test_openstacksdk.py
new file mode 100644
index 0000000..ceeece9
--- /dev/null
+++ b/staffeln/tests/common/test_openstacksdk.py
@@ -0,0 +1,458 @@
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from unittest import mock
+
+import tenacity
+from openstack import exceptions as openstack_exc
+
+from staffeln import conf
+from staffeln.common import openstack as s_openstack
+from staffeln.tests import base
+
+
+class OpenstackSDKTest(base.TestCase):
+
+ def setUp(self):
+ super(OpenstackSDKTest, self).setUp()
+ self.m_c = mock.MagicMock()
+ with mock.patch("openstack.connect", return_value=self.m_c):
+ self.openstack = s_openstack.OpenstackSDK()
+ self.m_sleep = mock.Mock()
+ func_list = [
+ "get_user_id",
+ "get_projects",
+ "get_servers",
+ "get_role_assignments",
+ "get_user",
+ "get_project_member_emails",
+ "get_volume",
+ "get_backup",
+ "delete_backup",
+ "get_backup_quota",
+ "get_backup_gigabytes_quota",
+ ]
+ for i in func_list:
+ getattr(self.openstack, i).retry.sleep = ( # pylint: disable=E1101
+ self.m_sleep
+ )
+ getattr(self.openstack, i).retry.stop = ( # pylint: disable=E1101
+ tenacity.stop_after_attempt(2)
+ )
+
+ self.fake_user = mock.MagicMock(id="foo", email="foo@foo.com")
+ self.fake_volume = mock.MagicMock(id="fake_volume")
+ self.fake_backup = mock.MagicMock(id="fake_backup")
+ self.fake_role_assignment = mock.MagicMock(user="foo")
+ self.fake_role_assignment2 = mock.MagicMock(user={"id": "bar"})
+
+ def _test_http_error(self, m_func, retry_func, status_code, call_count=1, **kwargs):
+ m_func.side_effect = openstack_exc.HttpException(http_status=status_code)
+ exc = self.assertRaises(
+ openstack_exc.HttpException,
+ getattr(self.openstack, retry_func),
+ **kwargs,
+ )
+ self.assertEqual(status_code, exc.status_code)
+ skip_retry_codes = conf.CONF.openstack.skip_retry_codes.replace(" ", "").split(
+ ","
+ )
+ if str(status_code) not in skip_retry_codes:
+ if call_count == 1:
+ self.m_sleep.assert_called_once_with(1.0)
+ else:
+ self.m_sleep.assert_has_calls(
+ [mock.call(1.0) for c in range(call_count)]
+ )
+ else:
+ self.m_sleep.assert_not_called()
+
+ def _test_non_http_error(self, m_func, retry_func, **kwargs):
+ m_func.side_effect = KeyError
+ self.assertRaises(KeyError, getattr(self.openstack, retry_func), **kwargs)
+ self.m_sleep.assert_not_called()
+
+ def test_get_servers(self):
+ self.m_c.compute.servers = mock.MagicMock(return_value=[])
+ self.assertEqual(self.openstack.get_servers(), [])
+ self.m_c.compute.servers.assert_called_once_with(
+ details=True, all_projects=True
+ )
+
+ def test_get_servers_non_http_error(self):
+ self._test_non_http_error(self.m_c.compute.servers, "get_servers")
+
+ def test_get_servers_conf_skip_http_error(self):
+ conf.CONF.set_override("skip_retry_codes", "403,", "openstack")
+ self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=403)
+ self.assertEqual("403,", conf.CONF.openstack.skip_retry_codes)
+
+ def test_get_servers_conf_skip_http_error_not_hit(self):
+ conf.CONF.set_override("skip_retry_codes", "403,", "openstack")
+ self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=404)
+ self.assertEqual("403,", conf.CONF.openstack.skip_retry_codes)
+
+ def test_get_servers_404_http_error(self):
+ self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=404)
+
+ def test_get_servers_500_http_error(self):
+ self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=500)
+
+ def test_get_projects(self):
+ self.m_c.list_projects = mock.MagicMock(return_value=[])
+ self.assertEqual(self.openstack.get_projects(), [])
+ self.m_c.list_projects.assert_called_once_with()
+
+ def test_get_projects_non_http_error(self):
+ self._test_non_http_error(self.m_c.list_projects, "get_projects")
+
+ def test_get_projects_404_http_error(self):
+ self._test_http_error(self.m_c.list_projects, "get_projects", status_code=404)
+
+ def test_get_projects_500_http_error(self):
+ self._test_http_error(self.m_c.list_projects, "get_projects", status_code=500)
+
+ def test_get_user_id(self):
+ self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
+ self.assertEqual(self.openstack.get_user_id(), "foo")
+ self.m_c.get_user.assert_called_once_with(name_or_id=mock.ANY)
+
+ def test_get_user_id_non_http_error(self):
+ self._test_non_http_error(self.m_c.get_user, "get_user_id")
+
+ def test_get_user_id_404_http_error(self):
+ self._test_http_error(self.m_c.get_user, "get_user_id", status_code=404)
+
+ def test_get_user_id_500_http_error(self):
+ self._test_http_error(self.m_c.get_user, "get_user_id", status_code=500)
+
+ def test_get_user(self):
+ self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
+ self.assertEqual(
+ self.openstack.get_user(user_id=self.fake_user.id), self.fake_user
+ )
+ self.m_c.get_user.assert_called_once_with(name_or_id=self.fake_user.id)
+
+ def test_get_user_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.get_user, "get_user", user_id=self.fake_user.id
+ )
+
+ def test_get_user_404_http_error(self):
+ self._test_http_error(
+ self.m_c.get_user,
+ "get_user",
+ status_code=404,
+ user_id=self.fake_user.id,
+ )
+
+ def test_get_user_500_http_error(self):
+ self._test_http_error(
+ self.m_c.get_user,
+ "get_user",
+ status_code=500,
+ user_id=self.fake_user.id,
+ )
+
+ def test_get_role_assignments(self):
+ self.m_c.list_role_assignments = mock.MagicMock(return_value=[])
+ self.assertEqual(self.openstack.get_role_assignments(project_id="foo"), [])
+ self.m_c.list_role_assignments.assert_called_once_with(
+ filters={"project": "foo"}
+ )
+
+ def test_get_role_assignments_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.list_role_assignments,
+ "get_role_assignments",
+ project_id="foo",
+ )
+
+ def test_get_role_assignments_404_http_error(self):
+ self._test_http_error(
+ self.m_c.list_role_assignments,
+ "get_role_assignments",
+ status_code=404,
+ project_id="foo",
+ )
+
+ def test_get_role_assignments_500_http_error(self):
+ self._test_http_error(
+ self.m_c.list_role_assignments,
+ "get_role_assignments",
+ status_code=500,
+ project_id="foo",
+ )
+
+ def test_get_project_member_emails(self):
+ # Make sure we cover both get_user pattern
+ self.m_c.list_role_assignments = mock.MagicMock(
+ return_value=[
+ self.fake_role_assignment,
+ self.fake_role_assignment2,
+ ]
+ )
+ self.m_c.get_user = mock.MagicMock(return_value=self.fake_user)
+ self.assertEqual(
+ self.openstack.get_project_member_emails(project_id="foo"),
+ [self.fake_user.email, self.fake_user.email],
+ )
+ self.m_c.list_role_assignments.assert_called_once_with(
+ filters={"project": "foo"}
+ )
+ self.m_c.get_user.assert_has_calls(
+ [
+ mock.call(name_or_id=self.fake_role_assignment.user),
+ mock.call(name_or_id=self.fake_role_assignment2.user.get("id")),
+ ]
+ )
+
+ def test_get_project_member_emails_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.list_role_assignments,
+ "get_project_member_emails",
+ project_id="foo",
+ )
+
+ def test_get_project_member_emails_404_http_error(self):
+ self._test_http_error(
+ self.m_c.list_role_assignments,
+ "get_project_member_emails",
+ status_code=404,
+ project_id="foo",
+ )
+
+ def test_get_project_member_emails_500_http_error(self):
+ self._test_http_error(
+ self.m_c.list_role_assignments,
+ "get_project_member_emails",
+ status_code=500,
+ call_count=3,
+ project_id="foo",
+ )
+
+ def test_get_volume(self):
+ self.m_c.get_volume_by_id = mock.MagicMock(return_value=self.fake_volume)
+ self.assertEqual(
+ self.openstack.get_volume(uuid=self.fake_volume.id, project_id="bar"),
+ self.fake_volume,
+ )
+ self.m_c.get_volume_by_id.assert_called_once_with(self.fake_volume.id)
+
+ def test_get_volume_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.get_volume_by_id,
+ "get_volume",
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_get_volume_404_http_error(self):
+ self._test_http_error(
+ self.m_c.get_volume_by_id,
+ "get_volume",
+ status_code=404,
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_get_volume_500_http_error(self):
+ self._test_http_error(
+ self.m_c.get_volume_by_id,
+ "get_volume",
+ status_code=500,
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_get_backup(self):
+ self.m_c.get_volume_backup = mock.MagicMock(return_value=self.fake_backup)
+ self.assertEqual(
+ self.openstack.get_backup(uuid=self.fake_backup.id, project_id="bar"),
+ self.fake_backup,
+ )
+ self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id)
+
+ def test_get_backup_not_found(self):
+ self.m_c.get_volume_backup = mock.MagicMock(
+ side_effect=openstack_exc.ResourceNotFound
+ )
+ self.assertEqual(
+ self.openstack.get_backup(uuid=self.fake_backup.id, project_id="bar"),
+ None,
+ )
+ self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id)
+
+ def test_get_backup_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.get_volume_backup,
+ "get_backup",
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_get_backup_404_http_error(self):
+ self._test_http_error(
+ self.m_c.get_volume_backup,
+ "get_backup",
+ status_code=404,
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_get_backup_500_http_error(self):
+ self._test_http_error(
+ self.m_c.get_volume_backup,
+ "get_backup",
+ status_code=500,
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_delete_backup(self):
+ self.m_c.delete_volume_backup = mock.MagicMock(return_value=self.fake_backup)
+ self.assertEqual(
+ self.openstack.delete_backup(uuid=self.fake_backup.id, project_id="bar"),
+ None,
+ )
+ self.m_c.delete_volume_backup.assert_called_once_with(
+ self.fake_backup.id, force=False
+ )
+
+ def test_delete_backup_not_found(self):
+ self.m_c.delete_volume_backup = mock.MagicMock(
+ side_effect=openstack_exc.ResourceNotFound
+ )
+ self.assertEqual(
+ self.openstack.delete_backup(uuid=self.fake_backup.id, project_id="bar"),
+ None,
+ )
+ self.m_c.delete_volume_backup.assert_called_once_with(
+ self.fake_backup.id, force=False
+ )
+
+ def test_delete_backup_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.delete_volume_backup,
+ "delete_backup",
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_delete_backup_404_http_error(self):
+ self._test_http_error(
+ self.m_c.delete_volume_backup,
+ "delete_backup",
+ status_code=404,
+ uuid="foo",
+ project_id="bar",
+ )
+
+ def test_delete_backup_500_http_error(self):
+ self._test_http_error(
+ self.m_c.delete_volume_backup,
+ "delete_backup",
+ status_code=500,
+ uuid="foo",
+ project_id="bar",
+ )
+
+ @mock.patch("openstack.proxy._json_response")
+ def test_get_backup_quota(self, m_j_r):
+ self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+ self.m_gam = mock.MagicMock()
+ self.m_c._get_and_munchify = self.m_gam
+ self.m_gam.return_value = mock.MagicMock(backups=[self.fake_backup.id])
+ self.assertEqual(
+ [self.fake_backup.id],
+ self.openstack.get_backup_quota(project_id="bar"),
+ )
+ self.m_c.block_storage.get.assert_called_once_with(
+ "/os-quota-sets/bar?usage=True"
+ )
+
+ def test_get_backup_quota_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.block_storage.get, "get_backup_quota", project_id="bar"
+ )
+
+ def test_get_backup_quota_404_http_error(self):
+ self._test_http_error(
+ self.m_c.block_storage.get,
+ "get_backup_quota",
+ status_code=404,
+ project_id="bar",
+ )
+
+ def test_get_backup_quota_500_http_error(self):
+ self._test_http_error(
+ self.m_c.block_storage.get,
+ "get_backup_quota",
+ status_code=500,
+ project_id="bar",
+ )
+
+ @mock.patch("openstack.proxy._json_response")
+ def test_get_backup_gigabytes_quota(self, m_j_r):
+ self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+ self.m_gam = mock.MagicMock()
+ self.m_c._get_and_munchify = self.m_gam
+ self.m_gam.return_value = mock.MagicMock(backup_gigabytes=[self.fake_backup.id])
+ self.assertEqual(
+ [self.fake_backup.id],
+ self.openstack.get_backup_gigabytes_quota(project_id="bar"),
+ )
+ self.m_c.block_storage.get.assert_called_once_with(
+ "/os-quota-sets/bar?usage=True"
+ )
+
+ def test_get_backup_gigabytes_quota_non_http_error(self):
+ self._test_non_http_error(
+ self.m_c.block_storage.get,
+ "get_backup_gigabytes_quota",
+ project_id="bar",
+ )
+
+ def test_get_backup_gigabytes_quota_404_http_error(self):
+ self._test_http_error(
+ self.m_c.block_storage.get,
+ "get_backup_gigabytes_quota",
+ status_code=404,
+ project_id="bar",
+ )
+
+ def test_get_backup_gigabytes_quota_500_http_error(self):
+ self._test_http_error(
+ self.m_c.block_storage.get,
+ "get_backup_gigabytes_quota",
+ status_code=500,
+ project_id="bar",
+ )
+
+ @mock.patch("openstack.proxy._json_response")
+ def test_get_volume_quotas(self, m_j_r):
+ self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+ self.m_gam_return = mock.MagicMock()
+ self.m_gam = mock.MagicMock(return_value=self.m_gam_return)
+ self.m_c._get_and_munchify = self.m_gam
+ self.assertEqual(
+ self.m_gam_return,
+ self.openstack._get_volume_quotas(project_id="bar"),
+ )
+ self.m_c.block_storage.get.assert_called_once_with(
+ "/os-quota-sets/bar?usage=True"
+ )
+ self.m_gam.assert_called_once_with("quota_set", m_j_r())
+
+ @mock.patch("openstack.proxy._json_response")
+ def test_get_volume_quotas_no_usage(self, m_j_r):
+ self.m_c.block_storage.get = mock.MagicMock(status_code=200)
+ self.m_gam_return = mock.MagicMock()
+ self.m_gam = mock.MagicMock(return_value=self.m_gam_return)
+ self.m_c._get_and_munchify = self.m_gam
+ self.assertEqual(
+ self.m_gam_return,
+ self.openstack._get_volume_quotas(project_id="bar", usage=False),
+ )
+ self.m_c.block_storage.get.assert_called_once_with("/os-quota-sets/bar")
+ self.m_gam.assert_called_once_with("quota_set", m_j_r())
diff --git a/staffeln/tests/test_staffeln.py b/staffeln/tests/test_staffeln.py
index 6c7c5f3..5ef2a92 100755
--- a/staffeln/tests/test_staffeln.py
+++ b/staffeln/tests/test_staffeln.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -11,13 +10,13 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
test_staffeln
----------------------------------
Tests for `staffeln` module.
"""
+from __future__ import annotations
from staffeln.tests import base
diff --git a/staffeln/version.py b/staffeln/version.py
index efe79df..b943573 100755
--- a/staffeln/version.py
+++ b/staffeln/version.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import pbr.version
version_info = pbr.version.VersionInfo("staffeln")
diff --git a/test-requirements.txt b/test-requirements.txt
index f09cf7f..2c28785 100755
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -9,3 +9,4 @@
oslotest>=1.10.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testtools>=1.4.0 # MIT
+pre-commit
diff --git a/tox.ini b/tox.ini
index 4812539..2de6fda 100755
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py37,pep8
+envlist = py3,linters
skipsdist = True
sitepackages = False
skip_missing_interpreters = True
@@ -12,7 +12,6 @@
TERM=linux
deps =
- flake8
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
@@ -21,15 +20,11 @@
pip install {opts} {packages}
-[testenv:py3]
+[testenv:{py3,py38,py39,py310}]
basepython = python3
deps = -r{toxinidir}/test-requirements.txt
commands = stestr run --slowest {posargs}
-[testenv:pep8]
-commands =
- flake8
-
[testenv:cover]
basepython = python3
deps = -r{toxinidir}/requirements.txt
@@ -45,14 +40,12 @@
coverage xml -o cover/coverage.xml
coverage report
+[testenv:linters]
+skipsdist = True
+deps =
+ pre-commit
+commands =
+ pre-commit run --all-files --show-diff-on-failure
[testenv:venv]
commands = {posargs}
-
-[flake8]
-# E123, E125 skipped as they are invalid PEP-8.
-
-show-source = True
-ignore = E123,E125
-builtins = _
-exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
new file mode 100644
index 0000000..d7e620d
--- /dev/null
+++ b/zuul.d/jobs.yaml
@@ -0,0 +1,9 @@
+- job:
+ name: staffeln-linters
+ parent: tox-linters
+
+- job:
+ name: staffeln-unit
+ parent: tox
+ vars:
+ tox_envlist: py3
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
new file mode 100644
index 0000000..a62642a
--- /dev/null
+++ b/zuul.d/project.yaml
@@ -0,0 +1,9 @@
+- project:
+ check:
+ jobs:
+ - staffeln-linters
+ - staffeln-unit
+ gate:
+ jobs:
+ - staffeln-linters
+ - staffeln-unit