Fix black linter errors
diff --git a/README.md b/README.md
index 0146874..d5cdcac 100755
--- a/README.md
+++ b/README.md
@@ -12,7 +12,7 @@
### Function Overview
-The solution backs up all volumes attached to VMs which have a pre-defined metadata set, for
+The solution backs up all volumes attached to VMs which have a predefined metadata set, for
example, `backup=yes`.
First, it gets the list of VMs which have backup metadata and the list of volumes attached to the
VMs in the given project by consuming the Openstack API (nova-api and cinder-api). Once the
diff --git a/doc/source/conf.py b/doc/source/conf.py
index e41b2c9..3588b3d 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -15,14 +15,14 @@
import os
import sys
-sys.path.insert(0, os.path.abspath('../..'))
+sys.path.insert(0, os.path.abspath("../.."))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
- 'sphinx.ext.autodoc',
- 'openstackdocstheme',
+ "sphinx.ext.autodoc",
+ "openstackdocstheme",
#'sphinx.ext.intersphinx',
]
@@ -31,19 +31,21 @@
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = u'staffeln'
-copyright = u'2017, OpenStack Developers'
+project = "staffeln"
+copyright = "2017, OpenStack Developers"
# openstackdocstheme options
-openstackdocs_repo_name = 'openstack/staffeln'
-openstackdocs_bug_project = 'replace with the name of the project on Launchpad or the ID from Storyboard'
-openstackdocs_bug_tag = ''
+openstackdocs_repo_name = "openstack/staffeln"
+openstackdocs_bug_project = (
+ "replace with the name of the project on Launchpad or the ID from Storyboard"
+)
+openstackdocs_bug_tag = ""
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
@@ -53,7 +55,7 @@
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'native'
+pygments_style = "native"
# -- Options for HTML output --------------------------------------------------
@@ -62,20 +64,23 @@
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
-html_theme = 'openstackdocs'
+html_theme = "openstackdocs"
# Output file base name for HTML help builder.
-htmlhelp_basename = '%sdoc' % project
+htmlhelp_basename = "%sdoc" % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
- ('index',
- '%s.tex' % project,
- u'%s Documentation' % project,
- u'OpenStack Developers', 'manual'),
+ (
+ "index",
+ "%s.tex" % project,
+ "%s Documentation" % project,
+ "OpenStack Developers",
+ "manual",
+ ),
]
# Example configuration for intersphinx: refer to the Python standard library.
-#intersphinx_mapping = {'http://docs.python.org/': None}
+# intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index b3a878b..65d2460 100755
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -35,32 +35,32 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'openstackdocstheme',
- 'reno.sphinxext',
+ "openstackdocstheme",
+ "reno.sphinxext",
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = u'staffeln Release Notes'
-copyright = u'2017, OpenStack Developers'
+project = "staffeln Release Notes"
+copyright = "2017, OpenStack Developers"
# openstackdocstheme options
-openstackdocs_repo_name = 'openstack/staffeln'
+openstackdocs_repo_name = "openstack/staffeln"
openstackdocs_bug_project = """replace with the name of the
project on Launchpad or the ID from Storyboard"""
-openstackdocs_bug_tag = ''
-openstackdocs_auto_name = 'False'
+openstackdocs_bug_tag = ""
+openstackdocs_auto_name = "False"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -68,9 +68,9 @@
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
-release = ''
+release = ""
# The short X.Y version.
-version = ''
+version = ""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -102,7 +102,7 @@
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'native'
+pygments_style = "native"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -115,7 +115,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'openstackdocs'
+html_theme = "openstackdocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -144,7 +144,7 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
@@ -189,7 +189,7 @@
# html_file_suffix = None
# Output file base name for HTML help builder.
-htmlhelp_basename = 'staffelnReleaseNotesdoc'
+htmlhelp_basename = "staffelnReleaseNotesdoc"
# -- Options for LaTeX output ---------------------------------------------
@@ -197,10 +197,8 @@
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
-
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
-
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
@@ -209,9 +207,13 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- ('index', 'staffelnReleaseNotes.tex',
- u'staffeln Release Notes Documentation',
- u'OpenStack Foundation', 'manual'),
+ (
+ "index",
+ "staffelnReleaseNotes.tex",
+ "staffeln Release Notes Documentation",
+ "OpenStack Foundation",
+ "manual",
+ ),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -240,9 +242,13 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- ('index', 'staffelnrereleasenotes',
- u'staffeln Release Notes Documentation',
- [u'OpenStack Foundation'], 1)
+ (
+ "index",
+ "staffelnrereleasenotes",
+ "staffeln Release Notes Documentation",
+ ["OpenStack Foundation"],
+ 1,
+ )
]
# If true, show URL addresses after external links.
@@ -255,11 +261,15 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'staffeln ReleaseNotes',
- u'staffeln Release Notes Documentation',
- u'OpenStack Foundation', 'staffelnReleaseNotes',
- 'One line description of project.',
- 'Miscellaneous'),
+ (
+ "index",
+ "staffeln ReleaseNotes",
+ "staffeln Release Notes Documentation",
+ "OpenStack Foundation",
+ "staffelnReleaseNotes",
+ "One line description of project.",
+ "Miscellaneous",
+ ),
]
# Documents to append as an appendix to all manuals.
@@ -275,4 +285,4 @@
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
-locale_dirs = ['locale/']
+locale_dirs = ["locale/"]
diff --git a/setup.py b/setup.py
index 1f988cd..0346ed3 100755
--- a/setup.py
+++ b/setup.py
@@ -16,6 +16,4 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
-setuptools.setup(
- setup_requires=['pbr'],
- pbr=True)
+setuptools.setup(setup_requires=["pbr"], pbr=True)
diff --git a/staffeln/__init__.py b/staffeln/__init__.py
index 686f74c..553afa9 100755
--- a/staffeln/__init__.py
+++ b/staffeln/__init__.py
@@ -15,5 +15,4 @@
import pbr.version
-__version__ = pbr.version.VersionInfo(
- 'staffeln').version_string()
+__version__ = pbr.version.VersionInfo("staffeln").version_string()
diff --git a/staffeln/api/app.py b/staffeln/api/app.py
index 9552746..e506dd6 100755
--- a/staffeln/api/app.py
+++ b/staffeln/api/app.py
@@ -14,7 +14,7 @@
@app.route("/v1/backup", methods=["POST"])
def backup_id():
-
+
if "backup_id" not in request.args:
# Return error if the backup_id argument is not provided.
return Response(
diff --git a/staffeln/api/middleware/parsable_error.py b/staffeln/api/middleware/parsable_error.py
index d7ce289..05297b2 100755
--- a/staffeln/api/middleware/parsable_error.py
+++ b/staffeln/api/middleware/parsable_error.py
@@ -25,6 +25,7 @@
class ParsableErrorMiddleware(object):
"""Replace error body with something the client can parse."""
+
def __init__(self, app):
self.app = app
@@ -33,33 +34,36 @@
for err_str in app_iter:
err = {}
try:
- err = jsonutils.loads(err_str.decode('utf-8'))
+ err = jsonutils.loads(err_str.decode("utf-8"))
except ValueError:
pass
- if 'title' in err and 'description' in err:
- title = err['title']
- desc = err['description']
- elif 'faultstring' in err:
- title = err['faultstring'].split('.', 1)[0]
- desc = err['faultstring']
+ if "title" in err and "description" in err:
+ title = err["title"]
+ desc = err["description"]
+ elif "faultstring" in err:
+ title = err["faultstring"].split(".", 1)[0]
+ desc = err["faultstring"]
else:
- title = ''
- desc = ''
+ title = ""
+ desc = ""
- code = err['faultcode'].lower() if 'faultcode' in err else ''
+ code = err["faultcode"].lower() if "faultcode" in err else ""
# if already formatted by custom exception, don't update
- if 'min_version' in err:
+ if "min_version" in err:
errs.append(err)
else:
- errs.append({
- 'request_id': '',
- 'code': code,
- 'status': status_code,
- 'title': title,
- 'detail': desc,
- 'links': []})
+ errs.append(
+ {
+ "request_id": "",
+ "code": code,
+ "status": status_code,
+ "title": title,
+ "detail": desc,
+ "links": [],
+ }
+ )
return errs
@@ -71,33 +75,35 @@
def replacement_start_response(status, headers, exc_info=None):
"""Overrides the default response to make errors parsable."""
try:
- status_code = int(status.split(' ')[0])
- state['status_code'] = status_code
+ status_code = int(status.split(" ")[0])
+ state["status_code"] = status_code
except (ValueError, TypeError): # pragma: nocover
- raise Exception(_(
- 'ErrorDocumentMiddleware received an invalid '
- 'status %s') % status)
+ raise Exception(
+ _("ErrorDocumentMiddleware received an invalid " "status %s")
+ % status
+ )
else:
- if (state['status_code'] // 100) not in (2, 3):
+ if (state["status_code"] // 100) not in (2, 3):
# Remove some headers so we can replace them later
# when we have the full error message and can
# compute the length.
- headers = [(h, v)
- for (h, v) in headers
- if h not in ('Content-Length', 'Content-Type')
- ]
+ headers = [
+ (h, v)
+ for (h, v) in headers
+ if h not in ("Content-Length", "Content-Type")
+ ]
# Save the headers in case we need to modify them.
- state['headers'] = headers
+ state["headers"] = headers
return start_response(status, headers, exc_info)
app_iter = self.app(environ, replacement_start_response)
- if (state['status_code'] // 100) not in (2, 3):
- errs = self._update_errors(app_iter, state['status_code'])
- body = [jsonutils.dump_as_bytes({'errors': errs})]
- state['headers'].append(('Content-Type', 'application/json'))
- state['headers'].append(('Content-Length', str(len(body[0]))))
+ if (state["status_code"] // 100) not in (2, 3):
+ errs = self._update_errors(app_iter, state["status_code"])
+ body = [jsonutils.dump_as_bytes({"errors": errs})]
+ state["headers"].append(("Content-Type", "application/json"))
+ state["headers"].append(("Content-Length", str(len(body[0]))))
else:
body = app_iter
diff --git a/staffeln/cmd/api.py b/staffeln/cmd/api.py
index f554354..0280a66 100755
--- a/staffeln/cmd/api.py
+++ b/staffeln/cmd/api.py
@@ -21,12 +21,10 @@
key_file = CONF.api.ssl_key_file
if cert_file and not os.path.exists(cert_file):
- raise RuntimeError(
- _("Unable to find cert_file : %s") % cert_file)
+ raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if key_file and not os.path.exists(key_file):
- raise RuntimeError(
- _("Unable to find key_file : %s") % key_file)
+ raise RuntimeError(_("Unable to find key_file : %s") % key_file)
return cert_file, key_file
else:
@@ -42,11 +40,13 @@
# Create the WSGI server and start it
host, port = CONF.api.host, CONF.api.port
- LOG.info('Starting server in PID %s', os.getpid())
+ LOG.info("Starting server in PID %s", os.getpid())
LOG.debug("Configuration:")
CONF.log_opt_values(LOG, logging.DEBUG)
- LOG.info('Serving on %(proto)s://%(host)s:%(port)s',
- dict(proto="https" if use_ssl else "http", host=host, port=port))
+ LOG.info(
+ "Serving on %(proto)s://%(host)s:%(port)s",
+ dict(proto="https" if use_ssl else "http", host=host, port=port),
+ )
api_app.run(host=host, port=port, ssl_context=_get_ssl_configs(use_ssl))
diff --git a/staffeln/cmd/conductor.py b/staffeln/cmd/conductor.py
index 2550d88..24ba590 100755
--- a/staffeln/cmd/conductor.py
+++ b/staffeln/cmd/conductor.py
@@ -14,9 +14,9 @@
service.prepare_service()
sm = cotyledon.ServiceManager()
- sm.add(manager.BackupManager,
- workers=CONF.conductor.backup_workers, args=(CONF,))
- sm.add(manager.RotationManager,
- workers=CONF.conductor.rotation_workers, args=(CONF,))
+ sm.add(manager.BackupManager, workers=CONF.conductor.backup_workers, args=(CONF,))
+ sm.add(
+ manager.RotationManager, workers=CONF.conductor.rotation_workers, args=(CONF,)
+ )
oslo_config_glue.setup(sm, CONF)
sm.run()
diff --git a/staffeln/cmd/dbmanage.py b/staffeln/cmd/dbmanage.py
index 89b69ba..467b52a 100644
--- a/staffeln/cmd/dbmanage.py
+++ b/staffeln/cmd/dbmanage.py
@@ -15,7 +15,6 @@
class DBCommand(object):
-
@staticmethod
def create_schema():
migration.create_schema()
@@ -23,16 +22,13 @@
def add_command_parsers(subparsers):
- parser = subparsers.add_parser(
- 'create_schema',
- help="Create the database schema.")
+ parser = subparsers.add_parser("create_schema", help="Create the database schema.")
parser.set_defaults(func=DBCommand.create_schema)
-command_opt = cfg.SubCommandOpt('command',
- title='Command',
- help='Available commands',
- handler=add_command_parsers)
+command_opt = cfg.SubCommandOpt(
+ "command", title="Command", help="Available commands", handler=add_command_parsers
+)
def register_sub_command_opts():
@@ -42,11 +38,13 @@
def main():
register_sub_command_opts()
- valid_commands = set([
- 'create_schema',
- ])
+ valid_commands = set(
+ [
+ "create_schema",
+ ]
+ )
if not set(sys.argv).intersection(valid_commands):
- sys.argv.append('create_schema')
+ sys.argv.append("create_schema")
service.prepare_service(sys.argv)
CONF.command.func()
diff --git a/staffeln/common/config.py b/staffeln/common/config.py
index dd28201..f71a378 100755
--- a/staffeln/common/config.py
+++ b/staffeln/common/config.py
@@ -7,10 +7,12 @@
def parse_args(argv, default_config_files=None):
# rpc.set_defaults(control_exchange='staffeln')
- CONF(argv[1:],
- project='staffeln',
- version=version.version_info.release_string(),
- default_config_files=default_config_files)
+ CONF(
+ argv[1:],
+ project="staffeln",
+ version=version.version_info.release_string(),
+ default_config_files=default_config_files,
+ )
# rpc.init(CONF)
diff --git a/staffeln/common/constants.py b/staffeln/common/constants.py
index 91b2a95..6b60ef5 100644
--- a/staffeln/common/constants.py
+++ b/staffeln/common/constants.py
@@ -1,9 +1,9 @@
-BACKUP_COMPLETED=2
-BACKUP_WIP=1
-BACKUP_PLANNED=0
+BACKUP_COMPLETED = 2
+BACKUP_WIP = 1
+BACKUP_PLANNED = 0
-BACKUP_ENABLED_KEY = 'true'
-BACKUP_RESULT_CHECK_INTERVAL = 60 # second
+BACKUP_ENABLED_KEY = "true"
+BACKUP_RESULT_CHECK_INTERVAL = 60 # second
# default config values
-DEFAULT_BACKUP_CYCLE_TIMEOUT="5min"
\ No newline at end of file
+DEFAULT_BACKUP_CYCLE_TIMEOUT = "5min"
diff --git a/staffeln/common/email.py b/staffeln/common/email.py
index d88a96d..c5c4b70 100644
--- a/staffeln/common/email.py
+++ b/staffeln/common/email.py
@@ -8,13 +8,13 @@
def send(
- src_email,
- src_pwd,
- dest_email,
- subject,
- content,
- smtp_server_domain,
- smtp_server_port,
+ src_email,
+ src_pwd,
+ dest_email,
+ subject,
+ content,
+ smtp_server_domain,
+ smtp_server_port,
):
message = MIMEMultipart("alternative")
message["Subject"] = subject
diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py
index 7aaf1a0..c6d2769 100644
--- a/staffeln/common/openstack.py
+++ b/staffeln/common/openstack.py
@@ -7,25 +7,22 @@
LOG = log.getLogger(__name__)
-class OpenstackSDK():
-
+class OpenstackSDK:
def __init__(self):
self.conn_list = {}
self.conn = auth.create_connection()
-
def set_project(self, project):
- LOG.debug(_("Connect as project %s" % project.get('name')))
- project_id = project.get('id')
+ LOG.debug(_("Connect as project %s" % project.get("name")))
+ project_id = project.get("id")
if project_id not in self.conn_list:
- LOG.debug(_("Initiate connection for project %s" % project.get('name')))
+ LOG.debug(_("Initiate connection for project %s" % project.get("name")))
conn = self.conn.connect_as_project(project)
self.conn_list[project_id] = conn
- LOG.debug(_("Connect as project %s" % project.get('name')))
+ LOG.debug(_("Connect as project %s" % project.get("name")))
self.conn = self.conn_list[project_id]
-
# user
def get_user_id(self):
user_name = self.conn.config.auth["username"]
@@ -43,19 +40,16 @@
def get_projects(self):
return self.conn.list_projects()
-
############## server
def get_servers(self, project_id, all_projects=True, details=True):
return self.conn.compute.servers(
details=details, all_projects=all_projects, project_id=project_id
)
-
############## volume
def get_volume(self, uuid, project_id):
return self.conn.get_volume_by_id(uuid)
-
############## backup
def get_backup(self, uuid, project_id=None):
# return conn.block_storage.get_backup(
@@ -67,16 +61,16 @@
except exceptions.ResourceNotFound:
return None
-
def create_backup(self, volume_id, project_id, force=True, wait=False):
# return conn.block_storage.create_backup(
# volume_id=queue.volume_id, force=True, project_id=queue.project_id,
# )
return self.conn.create_volume_backup(
- volume_id=volume_id, force=force, wait=wait,
+ volume_id=volume_id,
+ force=force,
+ wait=wait,
)
-
def delete_backup(self, uuid, project_id=None, force=False):
# Note(Alex): v3 is not supporting force delete?
# conn.block_storage.delete_backup(
@@ -88,18 +82,16 @@
except exceptions.ResourceNotFound:
return None
-
def get_backup_quota(self, project_id):
# quota = conn.get_volume_quotas(project_id)
quota = self._get_volume_quotas(project_id)
return quota.backups
-
# rewrite openstasdk._block_storage.get_volume_quotas
# added usage flag
# ref: https://docs.openstack.org/api-ref/block-storage/v3/?expanded=#show-quota-usage-for-a-project
def _get_volume_quotas(self, project_id, usage=True):
- """ Get volume quotas for a project
+ """Get volume quotas for a project
:param name_or_id: project name or id
:raises: OpenStackCloudException if it's not a valid project
@@ -109,12 +101,11 @@
if usage:
resp = self.conn.block_storage.get(
- '/os-quota-sets/{project_id}?usage=True'.format(project_id=project_id))
+ "/os-quota-sets/{project_id}?usage=True".format(project_id=project_id)
+ )
else:
resp = self.conn.block_storage.get(
- '/os-quota-sets/{project_id}'.format(project_id=project_id))
- data = proxy._json_response(
- resp,
- error_message="cinder client call failed")
- return self.conn._get_and_munchify('quota_set', data)
-
+ "/os-quota-sets/{project_id}".format(project_id=project_id)
+ )
+ data = proxy._json_response(resp, error_message="cinder client call failed")
+ return self.conn._get_and_munchify("quota_set", data)
diff --git a/staffeln/common/service.py b/staffeln/common/service.py
index 791955b..e35da2a 100755
--- a/staffeln/common/service.py
+++ b/staffeln/common/service.py
@@ -28,4 +28,4 @@
config.parse_args(argv)
config.set_config_defaults()
objects.register_all()
- logging.setup(CONF, 'staffeln')
+ logging.setup(CONF, "staffeln")
diff --git a/staffeln/common/short_id.py b/staffeln/common/short_id.py
index be719c9..9350258 100755
--- a/staffeln/common/short_id.py
+++ b/staffeln/common/short_id.py
@@ -17,9 +17,10 @@
required.
"""
shifts = six.moves.xrange(num_bits - 8, -8, -8)
- byte_at = lambda off: ((value >> off # noqa: E731
- if off >= 0 else value << -off) & 0xff)
- return ''.join(chr(byte_at(offset)) for offset in shifts)
+ byte_at = lambda off: (
+ (value >> off if off >= 0 else value << -off) & 0xFF # noqa: E731
+ )
+ return "".join(chr(byte_at(offset)) for offset in shifts)
def get_id(source_uuid):
@@ -30,7 +31,7 @@
if isinstance(source_uuid, six.string_types):
source_uuid = uuid.UUID(source_uuid)
if source_uuid.version != 4:
- raise ValueError(_('Invalid UUID version (%d)') % source_uuid.version)
+ raise ValueError(_("Invalid UUID version (%d)") % source_uuid.version)
# The "time" field of a v4 UUID contains 60 random bits
# (see RFC4122, Section 4.4)
@@ -39,7 +40,7 @@
encoded = base64.b32encode(six.b(random_bytes))[:12]
if six.PY3:
- return encoded.lower().decode('utf-8')
+ return encoded.lower().decode("utf-8")
else:
return encoded.lower()
diff --git a/staffeln/common/time.py b/staffeln/common/time.py
index 02e40e1..fd045dd 100644
--- a/staffeln/common/time.py
+++ b/staffeln/common/time.py
@@ -5,7 +5,7 @@
DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
regex = re.compile(
- r'((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?'
+ r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?"
)
@@ -26,25 +26,37 @@
empty_flag = False
else:
time_params[key] = 0
- if empty_flag: return None
+ if empty_flag:
+ return None
return time_params
except:
return None
+
def get_current_time():
return datetime.now()
+
def get_current_strtime():
now = datetime.now()
return now.strftime(DEFAULT_TIME_FORMAT)
-def timeago(years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, from_date=None):
+def timeago(
+ years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, from_date=None
+):
if from_date is None:
from_date = datetime.now()
- return from_date - relativedelta(years=years, months=months,
- weeks=weeks, days=days, hours=hours,
- minutes=minutes, seconds=seconds)
+ return from_date - relativedelta(
+ years=years,
+ months=months,
+ weeks=weeks,
+ days=days,
+ hours=hours,
+ minutes=minutes,
+ seconds=seconds,
+ )
+
## yearsago using Standard library
# def yearsago(years, from_date=None):
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index c79c5d9..07b8946 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -16,16 +16,19 @@
LOG = log.getLogger(__name__)
BackupMapping = collections.namedtuple(
- "BackupMapping", ["volume_id", "backup_id", "project_id", "instance_id", "backup_completed"]
+ "BackupMapping",
+ ["volume_id", "backup_id", "project_id", "instance_id", "backup_completed"],
)
QueueMapping = collections.namedtuple(
- "QueueMapping", ["volume_id", "backup_id", "project_id", "instance_id", "backup_status"]
+ "QueueMapping",
+ ["volume_id", "backup_id", "project_id", "instance_id", "backup_status"],
)
def retry_auth(func):
"""Decorator to reconnect openstack and avoid token rotation"""
+
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
@@ -34,6 +37,7 @@
LOG.warn(_("Token has been expired or rotated!"))
self.refresh_openstacksdk()
return func(self, *args, **kwargs)
+
return wrapper
@@ -86,7 +90,10 @@
if not CONF.conductor.backup_metadata_key in metadata:
return False
- return metadata[CONF.conductor.backup_metadata_key].lower() == constants.BACKUP_ENABLED_KEY
+ return (
+ metadata[CONF.conductor.backup_metadata_key].lower()
+ == constants.BACKUP_ENABLED_KEY
+ )
else:
return True
@@ -94,10 +101,14 @@
def filter_by_volume_status(self, volume_id, project_id):
try:
volume = self.openstacksdk.get_volume(volume_id, project_id)
- if volume == None: return False
- res = volume['status'] in ("available", "in-use")
+ if volume == None:
+ return False
+ res = volume["status"] in ("available", "in-use")
if not res:
- reason = _("Volume %s is not backed because it is in %s status" % (volume_id, volume['status']))
+ reason = _(
+ "Volume %s is not backed because it is in %s status"
+ % (volume_id, volume["status"])
+ )
LOG.info(reason)
self.result.add_failed_backup(project_id, volume_id, reason)
return res
@@ -112,17 +123,18 @@
reason = _("Cancel backup %s because of timeout." % task.backup_id)
LOG.info(reason)
- if project_id not in self.project_list: self.process_non_existing_backup(task)
+ if project_id not in self.project_list:
+ self.process_non_existing_backup(task)
self.openstacksdk.set_project(self.project_list[project_id])
backup = self.openstacksdk.get_backup(task.backup_id)
- if backup == None: return task.delete_queue()
+ if backup == None:
+ return task.delete_queue()
self.openstacksdk.delete_backup(task.backup_id, force=True)
task.delete_queue()
self.result.add_failed_backup(task.project_id, task.volume_id, reason)
except OpenstackSDKException as e:
- reason = _("Backup %s deletion failed."
- "%s" % (task.backup_id, str(e)))
+ reason = _("Backup %s deletion failed." "%s" % (task.backup_id, str(e)))
LOG.info(reason)
# remove from the queue table
task.delete_queue()
@@ -133,8 +145,13 @@
try:
backup = self.openstacksdk.get_backup(backup_object.backup_id)
if backup == None:
- LOG.info(_("Backup %s is not existing in Openstack."
- "Or cinder-backup is not existing in the cloud." % backup_object.backup_id))
+ LOG.info(
+ _(
+ "Backup %s is not existing in Openstack."
+ "Or cinder-backup is not existing in the cloud."
+ % backup_object.backup_id
+ )
+ )
return backup_object.delete_backup()
if backup["status"] in ("available"):
self.openstacksdk.delete_backup(backup_object.backup_id)
@@ -146,13 +163,17 @@
# backup table so user can delete it on Horizon.
backup_object.delete_backup()
else: # "deleting", "restoring"
- LOG.info(_("Rotation for the backup %s is skipped in this cycle "
- "because it is in %s status") % (backup_object.backup_id, backup["status"]))
+ LOG.info(
+ _(
+ "Rotation for the backup %s is skipped in this cycle "
+ "because it is in %s status"
+ )
+ % (backup_object.backup_id, backup["status"])
+ )
except OpenstackSDKException as e:
LOG.info(
- _("Backup %s deletion failed." "%s" % (backup_object.backup_id,
- str(e)))
+ _("Backup %s deletion failed." "%s" % (backup_object.backup_id, str(e)))
)
# TODO(Alex): Add it into the notification queue
# remove from the backup table
@@ -167,11 +188,17 @@
backup_object.delete_backup()
self.openstacksdk.set_project(self.project_list[project_id])
- backup = self.openstacksdk.get_backup(uuid=backup_object.backup_id,
- project_id=project_id)
+ backup = self.openstacksdk.get_backup(
+ uuid=backup_object.backup_id, project_id=project_id
+ )
if backup == None:
- LOG.info(_("Backup %s is not existing in Openstack."
- "Or cinder-backup is not existing in the cloud." % backup_object.backup_id))
+ LOG.info(
+ _(
+ "Backup %s is not existing in Openstack."
+ "Or cinder-backup is not existing in the cloud."
+ % backup_object.backup_id
+ )
+ )
return backup_object.delete_backup()
self.openstacksdk.delete_backup(uuid=backup_object.backup_id)
@@ -179,8 +206,7 @@
except OpenstackSDKException as e:
LOG.info(
- _("Backup %s deletion failed." "%s" % (backup_object.backup_id,
- str(e)))
+ _("Backup %s deletion failed." "%s" % (backup_object.backup_id, str(e)))
)
# TODO(Alex): Add it into the notification queue
@@ -206,16 +232,22 @@
try:
servers = self.openstacksdk.get_servers(project_id=project.id)
except OpenstackHttpException as ex:
- LOG.warn(_("Failed to list servers in project %s. %s"
- % (project.id, str(ex))))
+ LOG.warn(
+ _(
+ "Failed to list servers in project %s. %s"
+ % (project.id, str(ex))
+ )
+ )
continue
for server in servers:
- if not self.filter_by_server_metadata(server.metadata): continue
+ if not self.filter_by_server_metadata(server.metadata):
+ continue
if empty_project:
empty_project = False
self.result.add_project(project.id, project.name)
for volume in server.attached_volumes:
- if not self.filter_by_volume_status(volume["id"], project.id): continue
+ if not self.filter_by_volume_status(volume["id"], project.id):
+ continue
queues_map.append(
QueueMapping(
project_id=project.id,
@@ -250,21 +282,29 @@
try:
# NOTE(Alex): no need to wait because we have a cycle time out
if project_id not in self.project_list:
- LOG.warn(_("Project ID %s is not existing in project list"
- % project_id))
+ LOG.warn(
+ _("Project ID %s is not existing in project list" % project_id)
+ )
self.process_non_existing_backup(queue)
return
self.openstacksdk.set_project(self.project_list[project_id])
- LOG.info(_("Backup for volume %s creating in project %s"
- % (queue.volume_id, project_id)))
- volume_backup = self.openstacksdk.create_backup(volume_id=queue.volume_id,
- project_id=project_id)
+ LOG.info(
+ _(
+ "Backup for volume %s creating in project %s"
+ % (queue.volume_id, project_id)
+ )
+ )
+ volume_backup = self.openstacksdk.create_backup(
+ volume_id=queue.volume_id, project_id=project_id
+ )
queue.backup_id = volume_backup.id
queue.backup_status = constants.BACKUP_WIP
queue.save()
except OpenstackSDKException as error:
- reason = _("Backup creation for the volume %s failled. %s"
- % (queue.volume_id, str(error)))
+ reason = _(
+ "Backup creation for the volume %s failled. %s"
+ % (queue.volume_id, str(error))
+ )
LOG.info(reason)
self.result.add_failed_backup(project_id, queue.volume_id, reason)
parsed = parse.parse("Error in creating volume backup {id}", str(error))
@@ -274,8 +314,10 @@
queue.save()
# Added extra exception as OpenstackSDKException does not handle the keystone unauthourized issue.
except Exception as error:
- reason = _("Backup creation for the volume %s failled. %s"
- % (queue.volume_id, str(error)))
+ reason = _(
+ "Backup creation for the volume %s failled. %s"
+ % (queue.volume_id, str(error))
+ )
LOG.error(reason)
self.result.add_failed_backup(project_id, queue.volume_id, reason)
parsed = parse.parse("Error in creating volume backup {id}", str(error))
@@ -291,8 +333,9 @@
# backup gen was not created
def process_pre_failed_backup(self, task):
# 1.notify via email
- reason = _("The backup creation for the volume %s was prefailed."
- % task.volume_id)
+ reason = _(
+ "The backup creation for the volume %s was prefailed." % task.volume_id
+ )
self.result.add_failed_backup(task.project_id, task.volume_id, reason)
# LOG.error(reason)
# 2. remove failed task from the task queue
@@ -344,13 +387,16 @@
if queue.backup_id == "NULL":
self.process_pre_failed_backup(queue)
return
- if project_id not in self.project_list: self.process_non_existing_backup(queue)
+ if project_id not in self.project_list:
+ self.process_non_existing_backup(queue)
self.openstacksdk.set_project(self.project_list[project_id])
backup_gen = self.openstacksdk.get_backup(queue.backup_id)
if backup_gen == None:
# TODO(Alex): need to check when it is none
- LOG.info(_("[Beta] Backup status of %s is returning none." % (queue.backup_id)))
+ LOG.info(
+ _("[Beta] Backup status of %s is returning none." % (queue.backup_id))
+ )
self.process_non_existing_backup(queue)
return
if backup_gen.status == "error":
@@ -362,7 +408,6 @@
else: # "deleting", "restoring", "error_restoring" status
self.process_using_backup(queue)
-
def _volume_backup(self, task):
# matching_backups = [
# g for g in self.available_backups if g.backup_id == task.backup_id
diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index 5388df5..e885043 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -44,7 +44,7 @@
self.cycle_start_time = xtime.get_current_time()
# loop - take care of backup result while timeout
- while (1):
+ while 1:
queues_started = self.controller.get_queues(
filters={"backup_status": constants.BACKUP_WIP}
)
@@ -53,21 +53,31 @@
break
if not self._backup_cycle_timeout(): # time in
LOG.info(_("cycle timein"))
- for queue in queues_started: self.controller.check_volume_backup_status(queue)
+ for queue in queues_started:
+ self.controller.check_volume_backup_status(queue)
else: # time out
LOG.info(_("cycle timeout"))
- for queue in queues_started: self.controller.hard_cancel_backup_task(queue)
+ for queue in queues_started:
+ self.controller.hard_cancel_backup_task(queue)
break
time.sleep(constants.BACKUP_RESULT_CHECK_INTERVAL)
# if the backup cycle timeout, then return True
def _backup_cycle_timeout(self):
- time_delta_dict = xtime.parse_timedelta_string(CONF.conductor.backup_cycle_timout)
+ time_delta_dict = xtime.parse_timedelta_string(
+ CONF.conductor.backup_cycle_timout
+ )
if time_delta_dict == None:
- LOG.info(_("Recycle timeout format is invalid. "
- "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."))
- time_delta_dict = xtime.parse_timedelta_string(constants.DEFAULT_BACKUP_CYCLE_TIMEOUT)
+ LOG.info(
+ _(
+ "Recycle timeout format is invalid. "
+ "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."
+ )
+ )
+ time_delta_dict = xtime.parse_timedelta_string(
+ constants.DEFAULT_BACKUP_CYCLE_TIMEOUT
+ )
rto = xtime.timeago(
years=time_delta_dict["years"],
months=time_delta_dict["months"],
@@ -119,11 +129,14 @@
periodic_callables = [
(backup_tasks, (), {}),
]
- periodic_worker = periodics.PeriodicWorker(periodic_callables, schedule_strategy="last_finished")
+ periodic_worker = periodics.PeriodicWorker(
+ periodic_callables, schedule_strategy="last_finished"
+ )
periodic_thread = threading.Thread(target=periodic_worker.start)
periodic_thread.daemon = True
periodic_thread.start()
+
class RotationManager(cotyledon.Service):
name = "Staffeln conductor rotation controller"
@@ -147,8 +160,11 @@
def get_backup_list(self):
threshold_strtime = self.get_threshold_strtime()
- if threshold_strtime == None: return False
- self.backup_list = self.controller.get_backups(filters={"created_at__lt": threshold_strtime})
+ if threshold_strtime == None:
+ return False
+ self.backup_list = self.controller.get_backups(
+ filters={"created_at__lt": threshold_strtime}
+ )
return True
def remove_backups(self):
@@ -163,15 +179,19 @@
def rotation_tasks():
self.controller.refresh_openstacksdk()
# 1. get the list of backups to remove based on the retention time
- if not self.get_backup_list(): return
+ if not self.get_backup_list():
+ return
# 2. get project list
self.controller.update_project_list()
# 3. remove the backups
self.remove_backups()
+
periodic_callables = [
(rotation_tasks, (), {}),
]
- periodic_worker = periodics.PeriodicWorker(periodic_callables, schedule_strategy="last_finished")
+ periodic_worker = periodics.PeriodicWorker(
+ periodic_callables, schedule_strategy="last_finished"
+ )
periodic_thread = threading.Thread(target=periodic_worker.start)
periodic_thread.daemon = True
periodic_thread.start()
@@ -180,8 +200,12 @@
def get_threshold_strtime(self):
time_delta_dict = xtime.parse_timedelta_string(CONF.conductor.retention_time)
if time_delta_dict == None:
- LOG.info(_("Retention time format is invalid. "
- "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."))
+ LOG.info(
+ _(
+ "Retention time format is invalid. "
+ "Follow <YEARS>y<MONTHS>m<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."
+ )
+ )
return None
res = xtime.timeago(
diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py
index b16bcc6..3a6d640 100644
--- a/staffeln/conductor/result.py
+++ b/staffeln/conductor/result.py
@@ -16,7 +16,6 @@
class BackupResult(object):
-
def __init__(self):
pass
@@ -27,11 +26,9 @@
self.failed_backup_list = {}
def add_project(self, id, name):
- if id in self.success_backup_list: return
- self.project_list.append({
- "name": name,
- "id": id
- })
+ if id in self.success_backup_list:
+ return
+ self.project_list.append({"name": name, "id": id})
self.success_backup_list[id] = []
self.failed_backup_list[id] = []
@@ -39,24 +36,29 @@
if not project_id in self.success_backup_list:
LOG.error(_("Not registered project is reported for backup result."))
return
- self.success_backup_list[project_id].append({
- "volume_id": volume_id,
- "backup_id": backup_id,
- })
+ self.success_backup_list[project_id].append(
+ {
+ "volume_id": volume_id,
+ "backup_id": backup_id,
+ }
+ )
def add_failed_backup(self, project_id, volume_id, reason):
if not project_id in self.failed_backup_list:
LOG.error(_("Not registered project is reported for backup result."))
return
- self.failed_backup_list[project_id].append({
- "volume_id": volume_id,
- "reason": reason,
- })
+ self.failed_backup_list[project_id].append(
+ {
+ "volume_id": volume_id,
+ "reason": reason,
+ }
+ )
def send_result_email(self):
subject = "Backup result"
try:
- if len(CONF.notification.receiver) == 0: return
+ if len(CONF.notification.receiver) == 0:
+ return
email.send(
src_email=CONF.notification.sender_email,
src_pwd=CONF.notification.sender_pwd,
@@ -68,7 +70,12 @@
)
LOG.info(_("Backup result email sent"))
except Exception as e:
- LOG.error(_("Backup result email send failed. Please check email configuration. %s" % (str(e))))
+ LOG.error(
+ _(
+ "Backup result email send failed. Please check email configuration. %s"
+ % (str(e))
+ )
+ )
def publish(self):
# 1. get quota
@@ -78,16 +85,17 @@
for project in self.project_list:
quota = backup.Backup().get_backup_quota(project["id"])
- html += "<h3>Project: ${PROJECT}</h3><br>" \
- "<h3>Quota Usage</h3><br>" \
- "<h4>Limit: ${QUOTA_LIMIT}, In Use: ${QUOTA_IN_USE}, Reserved: ${QUOTA_RESERVED}</h4><br>" \
- "<h3>Success List</h3><br>" \
- "<h4>${SUCCESS_VOLUME_LIST}</h4><br>" \
- "<h3>Failed List</h3><br>" \
- "<h4>${FAILED_VOLUME_LIST}</h4><br>"
+ html += (
+ "<h3>Project: ${PROJECT}</h3><br>"
+ "<h3>Quota Usage</h3><br>"
+ "<h4>Limit: ${QUOTA_LIMIT}, In Use: ${QUOTA_IN_USE}, Reserved: ${QUOTA_RESERVED}</h4><br>"
+ "<h3>Success List</h3><br>"
+ "<h4>${SUCCESS_VOLUME_LIST}</h4><br>"
+ "<h3>Failed List</h3><br>"
+ "<h4>${FAILED_VOLUME_LIST}</h4><br>"
+ )
success_volumes = "<br>".join(
-
[
"Volume ID: %s, Backup ID: %s"
% (str(e["volume_id"]), str(e["backup_id"]))
@@ -108,6 +116,7 @@
html = html.replace("${SUCCESS_VOLUME_LIST}", success_volumes)
html = html.replace("${FAILED_VOLUME_LIST}", failed_volumes)
html = html.replace("${PROJECT}", project["name"])
- if html == "": return
+ if html == "":
+ return
self.content += html
self.send_result_email()
diff --git a/staffeln/conf/conductor.py b/staffeln/conf/conductor.py
index 69f7fe2..cb665d5 100755
--- a/staffeln/conf/conductor.py
+++ b/staffeln/conf/conductor.py
@@ -12,8 +12,10 @@
cfg.IntOpt(
"backup_workers",
default=1,
- help=_("The maximum number of backup processes to "
- "fork and run. Default to number of CPUs on the host."),
+ help=_(
+ "The maximum number of backup processes to "
+ "fork and run. Default to number of CPUs on the host."
+ ),
),
cfg.IntOpt(
"backup_service_period",
@@ -23,10 +25,12 @@
),
cfg.StrOpt(
"backup_cycle_timout",
- regex=r'((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?',
+ regex=r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?",
default=constants.DEFAULT_BACKUP_CYCLE_TIMEOUT,
- help=_("The duration while the backup cycle waits backups."
- "<YEARS>y<MONTHS>mon<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."),
+ help=_(
+ "The duration while the backup cycle waits backups."
+ "<YEARS>y<MONTHS>mon<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."
+ ),
),
cfg.StrOpt(
"backup_metadata_key",
@@ -38,8 +42,10 @@
cfg.IntOpt(
"rotation_workers",
default=1,
- help=_("The maximum number of rotation processes to "
- "fork and run. Default to number of CPUs on the host."),
+ help=_(
+ "The maximum number of rotation processes to "
+ "fork and run. Default to number of CPUs on the host."
+ ),
),
cfg.IntOpt(
"retention_service_period",
@@ -50,15 +56,19 @@
cfg.IntOpt(
"rotation_workers",
default=1,
- help=_("The maximum number of rotation processes to "
- "fork and run. Default to number of CPUs on the host."),
+ help=_(
+ "The maximum number of rotation processes to "
+ "fork and run. Default to number of CPUs on the host."
+ ),
),
cfg.StrOpt(
"retention_time",
- regex=r'((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?',
+ regex=r"((?P<years>\d+?)y)?((?P<months>\d+?)mon)?((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)min)?((?P<seconds>\d+?)s)?",
default="2w3d",
- help=_("The time of retention period, the for mat is "
- "<YEARS>y<MONTHS>mon<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."),
+ help=_(
+ "The time of retention period, the for mat is "
+ "<YEARS>y<MONTHS>mon<WEEKS>w<DAYS>d<HOURS>h<MINUTES>min<SECONDS>s."
+ ),
),
]
diff --git a/staffeln/db/api.py b/staffeln/db/api.py
index db0255f..029ad11 100644
--- a/staffeln/db/api.py
+++ b/staffeln/db/api.py
@@ -12,6 +12,5 @@
"""Return a DB API instance."""
return IMPL
-
-# class BaseConnection(object, metaclass=abc.ABCMeta):
+ # class BaseConnection(object, metaclass=abc.ABCMeta):
"""Base class for storage system connections."""
diff --git a/staffeln/db/migration.py b/staffeln/db/migration.py
index 176d66a..24e03ee 100644
--- a/staffeln/db/migration.py
+++ b/staffeln/db/migration.py
@@ -12,8 +12,9 @@
def get_backend():
global _IMPL
if not _IMPL:
- _IMPL = driver.DriverManager("staffeln.database.migration_backend",
- CONF.database.backend).driver
+ _IMPL = driver.DriverManager(
+ "staffeln.database.migration_backend", CONF.database.backend
+ ).driver
return _IMPL
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 0148c3d..c777c19 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -109,14 +109,13 @@
if filters is None:
filters = {}
-
plain_fields = [
"volume_id",
"backup_id",
"project_id",
"backup_completed",
"instance_id",
- "created_at"
+ "created_at",
]
return self._add_filters(
@@ -317,7 +316,6 @@
except:
LOG.error("Queue Not found.")
-
def get_backup_by_backup_id(self, context, backup_id):
"""Get the column from the backup_data with matching backup_id"""
@@ -336,7 +334,6 @@
except:
LOG.error("Backup resource not found.")
-
def soft_delete_backup(self, id):
try:
return self._soft_delete(models.Backup_data, id)
diff --git a/staffeln/i18n.py b/staffeln/i18n.py
index 462c91c..09fe8aa 100755
--- a/staffeln/i18n.py
+++ b/staffeln/i18n.py
@@ -4,7 +4,7 @@
import oslo_i18n
-DOMAIN = 'staffeln'
+DOMAIN = "staffeln"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
diff --git a/staffeln/objects/__init__.py b/staffeln/objects/__init__.py
index 4f7ca56..4a5adfd 100755
--- a/staffeln/objects/__init__.py
+++ b/staffeln/objects/__init__.py
@@ -1,6 +1,7 @@
from .queue import Queue
from .volume import Volume
+
# from volume import Volume
def register_all():
- __import__('staffeln.objects.volume')
- __import__('staffeln.objects.queue')
+ __import__("staffeln.objects.volume")
+ __import__("staffeln.objects.queue")
diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py
index ce300af..1216c64 100644
--- a/staffeln/objects/volume.py
+++ b/staffeln/objects/volume.py
@@ -83,4 +83,3 @@
else:
backup = cls._from_db_object(cls(context), db_backup)
return backup
-
\ No newline at end of file
diff --git a/staffeln/tests/test_staffeln.py b/staffeln/tests/test_staffeln.py
index a8b6e32..6c7c5f3 100755
--- a/staffeln/tests/test_staffeln.py
+++ b/staffeln/tests/test_staffeln.py
@@ -23,6 +23,5 @@
class TestStaffeln(base.TestCase):
-
def test_something(self):
pass
diff --git a/staffeln/version.py b/staffeln/version.py
index 0f9e8d2..1ef8d77 100755
--- a/staffeln/version.py
+++ b/staffeln/version.py
@@ -1,5 +1,5 @@
import pbr.version
-version_info = pbr.version.VersionInfo('staffeln')
+version_info = pbr.version.VersionInfo("staffeln")
version_string = version_info.version_string