Added discription for some functions and formated with black.
diff --git a/staffeln/common/context.py b/staffeln/common/context.py
index d916b32..a223558 100644
--- a/staffeln/common/context.py
+++ b/staffeln/common/context.py
@@ -6,8 +6,17 @@
 
 
 class RequestContext(context.RequestContext):
+    """Added security context with request parameters from openstack common library"""
 
-    def __init__(self, backup_id=None, volume_id=None, instance_id=None, executed_at=None, backup_status=None, **kwargs):
+    def __init__(
+        self,
+        backup_id=None,
+        volume_id=None,
+        instance_id=None,
+        executed_at=None,
+        backup_status=None,
+        **kwargs
+    ):
         self.backup_id = backup_id
         self.volume_id = volume_id
         self.instance_id = instance_id
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index ea3c20a..18e5347 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -4,7 +4,6 @@
 from oslo_log import log

 from staffeln.common import auth

 from staffeln.common import context

-# from staffeln.objects import backup as backup_api

 from staffeln import objects

 from staffeln.common import short_id

 

@@ -13,11 +12,11 @@
 

 

 BackupMapping = collections.namedtuple(

-    'BackupMapping', ['volume_id', 'backup_id', 'instance_id', 'backup_completed'])

+    "BackupMapping", ["volume_id", "backup_id", "instance_id", "backup_completed"]

+)

 

 QueueMapping = collections.namedtuple(

-    'QueueMapping', ['volume_id', 'backup_id',

-                     'instance_id', 'backup_status']

+    "QueueMapping", ["volume_id", "backup_id", "instance_id", "backup_status"]

 )

 

 conn = auth.create_connection()

@@ -26,7 +25,7 @@
 def check_vm_backup_metadata(metadata):

     if not CONF.conductor.backup_metadata_key in metadata:

         return False

-    return metadata[CONF.conductor.backup_metadata_key].lower() in ['true']

+    return metadata[CONF.conductor.backup_metadata_key].lower() in ["true"]

 

 

 def backup_volumes_in_project(conn, project_name):

@@ -36,10 +35,12 @@
 

 def get_projects_list():

     projects = conn.list_projects()

-    return(projects)

+    return projects

 

 

 class Queue(object):

+    """Implmentations of the queue with the sql."""

+

     def __init__(self):

         self.ctx = context.make_context()

         self.discovered_map = None

@@ -51,64 +52,70 @@
     def available_queues(self):

         """Queues loaded from DB"""

         if self._available_queues is None:

-            self._available_queues = objects.Queue.list(

-                self.ctx)

+            self._available_queues = objects.Queue.list(self.ctx)

         return self._available_queues

 

     @property

     def available_queues_map(self):

-        """Mapping of backup loaded from DB"""

+        """Mapping of backup queue loaded from DB"""

         if self._available_queues_map is None:

             self._available_queues_map = {

                 QueueMapping(

                     backup_id=g.backup_id,

                     volume_id=g.volume_id,

                     instance_id=g.instance_id,

-                    backup_status=g.backup_status): g

+                    backup_status=g.backup_status,

+                ): g

                 for g in self.available_queues

             }

         return self._available_queues_map

 

     def get_queues(self, filters=None):

+        """Get the list of volume queue columns from the queue_data table"""

         queues = objects.Queue.list(self.ctx, filters=filters)

         return queues

 

     def create_queue(self):

+        """Create the queue of all the volumes for backup"""

         self.discovered_map = self.check_instance_volumes()

         queues_map = self.discovered_map["queues"]

         for queue_name, queue_map in queues_map.items():

             self._volume_queue(queue_map)

 

     def check_instance_volumes(self):

+        """Get the list of all the volumes from the project using openstacksdk

+        Function first list all the servers in the project and get the volumes

+        that are attached to the instance.

+        """

         queues_map = {}

-        discovered_map = {

-            "queues": queues_map

-        }

+        discovered_map = {"queues": queues_map}

         projects = get_projects_list()

         for project in projects:

             servers = conn.compute.servers(

-                details=True, all_projects=True, project_id=project.id)

+                details=True, all_projects=True, project_id=project.id

+            )

             for server in servers:

                 server_id = server.host_id

                 volumes = server.attached_volumes

                 for volume in volumes:

-                    queues_map['queues'] = QueueMapping(

-                        volume_id=volume['id'],

+                    queues_map["queues"] = QueueMapping(

+                        volume_id=volume["id"],

                         backup_id=short_id.generate_id(),

                         instance_id=server_id,

-                        backup_status=1

+                        backup_status=1,

                     )

         return discovered_map

 

     def _volume_queue(self, queue_map):

-        # print(queue_map)

+        """Saves the queue data to the database."""

         volume_id = queue_map.volume_id

         backup_id = queue_map.backup_id

         instance_id = queue_map.instance_id

         backup_status = queue_map.backup_status

         backup_mapping = dict()

-        matching_backups = [g for g in self.available_queues

-                            if g.backup_id == backup_id]

+        matching_backups = [

+            g for g in self.available_queues if g.backup_id == backup_id

+        ]

         if not matching_backups:

             volume_queue = objects.Queue(self.ctx)

             volume_queue.backup_id = backup_id

@@ -119,6 +126,7 @@
 

 

 class Backup_data(object):

+    """Implementation for volumes backup"""

 

     def __init__(self):

         self.ctx = context.make_context()

@@ -143,7 +151,8 @@
                     backup_id=g.backup_id,

                     volume_id=g.volume_id,

                     instance_id=g.instance_id,

-                    backup_completed=g.backup_completed): g

+                    backup_completed=g.backup_completed,

+                ): g

                 for g in self.available_backups

             }

         return self._available_backups_map

@@ -152,15 +161,14 @@
         pass

 

     def _volume_backup(self, backup_map):

+        """Saves the backup data to database."""

         volume_id = backup_map.volume_id

         backup_id = backup_map.backup_id

         instance_id = backup_map.instance_id

         backup_mapping = dict()

-        for g in self.available_backups:

-            print(g)

-            print(g.volume_id)

-        matching_backups = [g for g in self.available_backups

-                            if g.backup_id == backup_id]

+        matching_backups = [

+            g for g in self.available_backups if g.backup_id == backup_id

+        ]

         if not matching_backups:

             volume = objects.Volume(self.ctx)

             volume.backup_id = backup_id

diff --git a/staffeln/conf/api.py b/staffeln/conf/api.py
index 56dab61..7402740 100755
--- a/staffeln/conf/api.py
+++ b/staffeln/conf/api.py
@@ -2,40 +2,28 @@
 
 
 api_group = cfg.OptGroup(
-    'api',
-    title='API options',
-    help='Options under this group are used to define staffeln API.'
+    "api",
+    title="API options",
+    help="Options under this group are used to define staffeln API.",
 )
 
 connection_opts = [
     cfg.StrOpt(
-        'host',
+        "host",
         default="0.0.0.0",
-        help='IP address on which the staffeln API will listen.'
+        help="IP address on which the staffeln API will listen.",
     ),
     cfg.PortOpt(
-        'port',
+        "port",
         default=8808,
-        help='Staffeln API listens on this port number for incoming requests.'
+        help="Staffeln API listens on this port number for incoming requests.",
     ),
-    cfg.BoolOpt(
-        'enabled_ssl',
-        default=False,
-        help='ssl enabled'
-    ),
-    cfg.StrOpt(
-        'ssl_key_file',
-        default=False,
-        help='ssl key file path'
-    ),
-    cfg.StrOpt(
-        'ssl_cert_file',
-        default=False,
-        help='ssl cert file path'
-    ),
+    cfg.BoolOpt("enabled_ssl", default=False, help="ssl enabled"),
+    cfg.StrOpt("ssl_key_file", default=False, help="ssl key file path"),
+    cfg.StrOpt("ssl_cert_file", default=False, help="ssl cert file path"),
 ]
 
-API_OPTS = (connection_opts)
+API_OPTS = connection_opts
 
 
 def register_opts(conf):
diff --git a/staffeln/conf/conductor.py b/staffeln/conf/conductor.py
index 57f9f6f..b83737d 100755
--- a/staffeln/conf/conductor.py
+++ b/staffeln/conf/conductor.py
@@ -1,40 +1,44 @@
 from oslo_config import cfg
 
 conductor_group = cfg.OptGroup(
-    'conductor',
-    title='Conductor Options',
-    help='Options under this group are used '
-         'to define Conductor\'s configuration.',
+    "conductor",
+    title="Conductor Options",
+    help="Options under this group are used " "to define Conductor's configuration.",
 )
 
 backup_opts = [
     cfg.IntOpt(
-        'backup_workers',
+        "backup_workers",
         default=1,
-        help='The maximum number of backup processes to '
-             'fork and run. Default to number of CPUs on the host.'),
+        help="The maximum number of backup processes to "
+        "fork and run. Default to number of CPUs on the host.",
+    ),
     cfg.IntOpt(
-        'backup_period',
+        "backup_period",
         default=10,
         min=1,
-        help='The time of bakup period, the unit is one minute.'),
+        help="The time of bakup period, the unit is one minute.",
+    ),
     cfg.StrOpt(
-        'backup_metadata_key',
+        "backup_metadata_key",
         default="test",
-        help='The key string of metadata the VM, which requres back up, has'),
+        help="The key string of metadata the VM, which requres back up, has",
+    ),
 ]
 
 rotation_opts = [
     cfg.IntOpt(
-        'rotation_workers',
+        "rotation_workers",
         default=1,
-        help='The maximum number of rotation processes to '
-             'fork and run. Default to number of CPUs on the host.'),
+        help="The maximum number of rotation processes to "
+        "fork and run. Default to number of CPUs on the host.",
+    ),
     cfg.IntOpt(
-        'rotation_period',
+        "rotation_period",
         default=1,
         min=1,
-        help='The time of rotation period, the unit is one day.'),
+        help="The time of rotation period, the unit is one day.",
+    ),
 ]
 
 CONDUCTOR_OPTS = (backup_opts, rotation_opts)
@@ -47,5 +51,4 @@
 
 
 def list_opts():
-    return {"DEFAULT": rotation_opts,
-            conductor_group: backup_opts}
+    return {"DEFAULT": rotation_opts, conductor_group: backup_opts}
diff --git a/staffeln/conf/database.py b/staffeln/conf/database.py
index bd8a6df..5f636f2 100644
--- a/staffeln/conf/database.py
+++ b/staffeln/conf/database.py
@@ -1,19 +1,16 @@
 from oslo_config import cfg
 from oslo_db import options as oslo_db_options
 
-_DEFAULT_SQL_CONNECTION = 'mysql+pymysql://root:password@localhost:3306/staffeln'
+_DEFAULT_SQL_CONNECTION = "mysql+pymysql://root:password@localhost:3306/staffeln"
 
 database = cfg.OptGroup(
-    'database',
-    title='Database options',
-    help='Options under this group are used for defining database.'
+    "database",
+    title="Database options",
+    help="Options under this group are used for defining database.",
 )
 
 SQL_OPTS = [
-    cfg.StrOpt('mysql_engine',
-               default='InnoDB',
-               help='MySQL engine to use.'
-               ),
+    cfg.StrOpt("mysql_engine", default="InnoDB", help="MySQL engine to use."),
 ]
 
 
diff --git a/staffeln/db/api.py b/staffeln/db/api.py
index 7caa0a4..a623f7a 100644
--- a/staffeln/db/api.py
+++ b/staffeln/db/api.py
@@ -4,9 +4,8 @@
 from oslo_config import cfg
 from oslo_db import api as db_api
 
-_BACKEND_MAPPING = {'sqlalchemy': 'staffeln.db.sqlalchemy.api'}
-IMPL = db_api.DBAPI.from_config(
-    cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True)
+_BACKEND_MAPPING = {"sqlalchemy": "staffeln.db.sqlalchemy.api"}
+IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True)
 
 
 def get_instance():
@@ -63,7 +62,7 @@
     @abc.abstractmethod
     def create_queue(self, values):
         """Create entry in queue_data.
-        :param values: A dict containing several items used to add 
+        :param values: A dict containing several items used to add
                         the volume information for backup
 
                         ::
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 52d9853..f6c3ea4 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -56,7 +56,7 @@
 
 
 def model_query(model, *args, **kwargs):
-    session = kwargs.get('session') or get_session()
+    session = kwargs.get("session") or get_session()
     query = session.query(model, *args)
     return query
 
@@ -64,7 +64,7 @@
 def add_identity_filter(query, value):
     """Adds an identity filter to a query.
     Filters results by ID, if supplied value is a valid integer.
-    Otherwise attempts to filter results by UUID.
+    Otherwise attempts to filter results by backup_id.
     :param query: Initial query to add filter to.
     :param value: Value for filtering results by.
     :return: Modified query.
@@ -77,15 +77,17 @@
         LOG.error("Invalid Identity")
 
 
-def _paginate_query(model, limit=None, marker=None, sort_key=None,
-                    sort_dir=None, query=None):
+def _paginate_query(
+    model, limit=None, marker=None, sort_key=None, sort_dir=None, query=None
+):
     if not query:
         query = model_query(model)
-    sort_keys = ['id']
+    sort_keys = ["id"]
     if sort_key and sort_key not in sort_keys:
         sort_keys.insert(0, sort_key)
-    query = db_utils.paginate_query(query, model, limit, sort_keys,
-                                    marker=marker, sort_dir=sort_dir)
+    query = db_utils.paginate_query(
+        query, model, limit, sort_keys, marker=marker, sort_dir=sort_dir
+    )
     return query.all()
 
 
@@ -110,44 +112,60 @@
         return inspect(model).relationships
 
     def _add_backup_filters(self, query, filters):
+        """Add filters while listing the columns from the backup_data table"""
         if filters is None:
             filters = {}
 
-        plain_fields = ['volume_id', 'backup_id',
-                        'backup_completed', 'instance_id']
+        plain_fields = ["volume_id", "backup_id", "backup_completed", "instance_id"]
 
         return self._add_filters(
-            query=query, model=models.Backup_data, filters=filters, plain_fields=plain_fields
+            query=query,
+            model=models.Backup_data,
+            filters=filters,
+            plain_fields=plain_fields,
         )
 
     def _add_queues_filters(self, query, filters):
+        """Add filters while listing the columns from the queue_data table"""
         if filters is None:
             filters = {}
 
-        plain_fields = ['backup_id', 'volume_id',
-                        'instance_id', 'executed_at', 'backup_status']
+        plain_fields = [
+            "backup_id",
+            "volume_id",
+            "instance_id",
+            "backup_status",
+        ]
 
         return self._add_filters(
-            query=query, model=models.Queue_data, filters=filters,
-            plain_fields=plain_fields)
+            query=query,
+            model=models.Queue_data,
+            filters=filters,
+            plain_fields=plain_fields,
+        )
 
     def _add_filters(self, query, model, filters=None, plain_fields=None):
-        timestamp_mixin_fields = ['created_at', 'updated_at']
+        """Add filters while listing the columns from database table"""
+        timestamp_mixin_fields = ["created_at", "updated_at"]
         filters = filters or {}
 
         for raw_fieldname, value in filters.items():
             fieldname, operator_ = self.__decompose_filter(raw_fieldname)
             if fieldname in plain_fields:
                 query = self.__add_simple_filter(
-                    query, model, fieldname, value, operator_)
+                    query, model, fieldname, value, operator_
+                )
 
         return query
 
     def __add_simple_filter(self, query, model, fieldname, value, operator_):
         field = getattr(model, fieldname)
 
-        if (fieldname != 'deleted' and value and
-                field.type.python_type is datetime.datetime):
+        if (
+            fieldname != "deleted"
+            and value
+            and field.type.python_type is datetime.datetime
+        ):
             if not isinstance(value, datetime.datetime):
                 value = timeutils.parse_isotime(value)
 
@@ -156,11 +174,11 @@
     def __decompose_filter(self, raw_fieldname):
         """Decompose a filter name into it's two subparts"""
 
-        seperator = '__'
+        seperator = "__"
         fieldname, seperator, operator_ = raw_fieldname.partition(seperator)
 
         if operator_ and operator_ not in self.valid_operators:
-            LOG.error('Invalid operator %s' % operator_)
+            LOG.error("Invalid operator %s" % operator_)
 
         return fieldname, operator_
 
@@ -180,49 +198,59 @@
 
     def _create(self, model, values):
         obj = model()
-        cleaned_values = {k: v for k, v in values.items()
-                          if k not in self._get_relationships(model)}
+        cleaned_values = {
+            k: v for k, v in values.items() if k not in self._get_relationships(model)
+        }
         print(cleaned_values)
         obj.update(cleaned_values)
         obj.save()
         return obj
 
-    @ staticmethod
+    @staticmethod
     def _update(model, id_, values):
         session = get_session()
         with session.begin():
             query = model_query(model, session=session)
             query = add_identity_filter(query, id_)
             try:
-                ref = query.with_lockmode('update').one()
+                ref = query.with_lockmode("update").one()
             except exc.NoResultFound:
                 LOG.error("Update backup failed. No result found.")
 
-    def _get_model_list(self, model, add_filter_func, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False):
+    def _get_model_list(
+        self,
+        model,
+        add_filter_func,
+        context,
+        filters=None,
+        limit=None,
+        marker=None,
+        sort_key=None,
+        sort_dir=None,
+    ):
         query = model_query(model)
 
         query = add_filter_func(query, filters)
-        return _paginate_query(model, limit, marker,
-                               sort_key, sort_dir, query)
+        return _paginate_query(model, limit, marker, sort_key, sort_dir, query)
 
     def create_backup(self, values):
-        if not values.get('backup_id'):
-            values['backup_id'] = short_id.generate_id()
+        if not values.get("backup_id"):
+            values["backup_id"] = short_id.generate_id()
 
         try:
             backup_data = self._create(models.Backup_data, values)
         except db_exc.DBDuplicateEntry:
-            LOG.error("Backup UUID already exists.")
+            LOG.error("Backup ID already exists.")
         return backup_data
 
     def get_backup_list(self, *args, **kwargs):
-        return self._get_model_list(models.Backup_data,
-                                    self._add_backup_filters,
-                                    *args, **kwargs)
+        return self._get_model_list(
+            models.Backup_data, self._add_backup_filters, *args, **kwargs
+        )
 
     def update_backup(self, backup_id, values):
-        if 'backup_id' in values:
-            LOG.error("Cannot override UUID for existing backup")
+        if "backup_id" in values:
+            LOG.error("Cannot override ID for existing backup")
 
         try:
             return self._update(models.Backup_data, backup_id, values)
@@ -230,36 +258,38 @@
             LOG.error("backup resource not found.")
 
     def create_queue(self, values):
-        if not values.get('backup_id'):
-            values['backup_id'] = short_id.generate_id()
+        if not values.get("backup_id"):
+            values["backup_id"] = short_id.generate_id()
 
         try:
             queue_data = self._create(models.Queue_data, values)
         except db_exc.DBDuplicateEntry:
-            LOG.error("Backup UUID already exists.")
+            LOG.error("Backup ID already exists.")
         return queue_data
 
     def get_queue_list(self, *args, **kwargs):
-        return self._get_model_list(models.Queue_data,
-                                    self._add_queues_filters,
-                                    *args, **kwargs)
+        return self._get_model_list(
+            models.Queue_data, self._add_queues_filters, *args, **kwargs
+        )
 
     def update_queue(self, backup_id, values):
-        if 'backup_id' in values:
-            LOG.error("Cannot override UUID for existing backup")
+        if "backup_id" in values:
+            LOG.error("Cannot override backup_id for existing backup queue.")
 
         try:
             return self._update(models.Queue_data, backup_id, values)
         except:
             LOG.error("backup resource not found.")
 
-    def get_queue_by_uuid(self, context, backup_id):
-        return self._get_queue(
-            context, fieldname="uuid", value=backup_id)
+    def get_queue_by_backup_id(self, context, backup_id):
+        """Get the column from queue_data with matching backup_id"""
+        return self._get_queue(context, fieldname="backup_id", value=backup_id)
 
     def _get_queue(self, context, fieldname, value):
+        """Get the columns from queue_data table"""
         try:
-            return self._get(context, model=models.Queue_data,
-                             fieldname=fieldname, value=value)
+            return self._get(
+                context, model=models.Queue_data, fieldname=fieldname, value=value
+            )
         except:
             LOG.error("Queue not found")
diff --git a/staffeln/db/sqlalchemy/migration.py b/staffeln/db/sqlalchemy/migration.py
index e0d0375..546cf37 100644
--- a/staffeln/db/sqlalchemy/migration.py
+++ b/staffeln/db/sqlalchemy/migration.py
@@ -11,7 +11,7 @@
 
 
 def _alembic_config():
-    path = os.path.join(os.path.dirname(__file__), 'alembic.ini')
+    path = os.path.join(os.path.dirname(__file__), "alembic.ini")
     config = alembic_config.Config(path)
     return config
 
diff --git a/staffeln/db/sqlalchemy/models.py b/staffeln/db/sqlalchemy/models.py
index 9c78c43..715d747 100644
--- a/staffeln/db/sqlalchemy/models.py
+++ b/staffeln/db/sqlalchemy/models.py
@@ -26,9 +26,8 @@
 
 def table_args():
     engine_name = urlparse.urlparse(CONF.database.connection).scheme
-    if engine_name == 'mysql':
-        return {'mysql_engine': CONF.database.mysql_engine,
-                'mysql_charset': "utf8"}
+    if engine_name == "mysql":
+        return {"mysql_engine": CONF.database.mysql_engine, "mysql_charset": "utf8"}
     return None
 
 
@@ -56,10 +55,10 @@
 class Backup_data(Base):
     """Represent the backup_data"""
 
-    __tablename__ = 'backup_data'
+    __tablename__ = "backup_data"
     __table_args__ = (
-        UniqueConstraint('backup_id', name='unique_backup0uuid'),
-        table_args()
+        UniqueConstraint("backup_id", name="unique_backup0uuid"),
+        table_args(),
     )
     id = Column(Integer, primary_key=True, autoincrement=True)
     backup_id = Column(String(100))
@@ -71,10 +70,8 @@
 class Queue_data(Base):
     """Represent the queue of the database"""
 
-    __tablename__ = 'queue_data'
-    __table_args__ = (
-        table_args()
-    )
+    __tablename__ = "queue_data"
+    __table_args__ = table_args()
     id = Column(Integer, primary_key=True, autoincrement=True)
     backup_id = Column(String(100))
     volume_id = Column(String(100))
diff --git a/staffeln/objects/base.py b/staffeln/objects/base.py
index e5a9f06..d300500 100755
--- a/staffeln/objects/base.py
+++ b/staffeln/objects/base.py
@@ -15,7 +15,7 @@
     """Return the mangled name of the attribute's underlying storage."""

     # FIXME(danms): This is just until we use o.vo's class properties

     # and object base.

-    return '_obj_' + name

+    return "_obj_" + name

 

 

 class StaffelnObject(ovoo_base.VersionedObject):

@@ -27,13 +27,12 @@
     necessary "get" classmethod routines as well as "save" object methods

     as appropriate.

     """

-    OBJ_SERIAL_NAMESPACE = 'staffeln_object'

-    OBJ_PROJECT_NAMESPACE = 'staffeln'

+

+    OBJ_SERIAL_NAMESPACE = "staffeln_object"

+    OBJ_PROJECT_NAMESPACE = "staffeln"

 

     def as_dict(self):

-        return {k: getattr(self, k)

-                for k in self.fields

-                if self.obj_attr_is_set(k)}

+        return {k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)}

 

 

 class StaffelnObjectSerializer(ovoo_base.VersionedObjectSerializer):

@@ -43,9 +42,9 @@
 

 class StaffelnPersistentObject(object):

     feilds = {

-        'created_at': ovoo_fields.DateTimeField(nullable=True),

-        'updated_at': ovoo_fields.DateTimeField(nullable=True),

-        'deleted_at': ovoo_fields.DateTimeField(nullable=True),

+        "created_at": ovoo_fields.DateTimeField(nullable=True),

+        "updated_at": ovoo_fields.DateTimeField(nullable=True),

+        "deleted_at": ovoo_fields.DateTimeField(nullable=True),

     }

 

     object_fields = {}

@@ -53,7 +52,7 @@
     def obj_refresh(self, loaded_object):

         fields = (field for field in self.feilds if field not in self.object_fields)

         for field in fields:

-            if (self.obj_attr_is_set(field) and self[field] != loaded_object[field]):

+            if self.obj_attr_is_set(field) and self[field] != loaded_object[field]:

                 self[field] = loaded_object[field]

 

     @staticmethod

@@ -76,7 +75,8 @@
             setattr(objects, cls.obj_name(), cls)

         else:

             cur_version = versionutils.convert_version_to_tuple(

-                getattr(objects, cls.obj_name()).VERSION)

+                getattr(objects, cls.obj_name()).VERSION

+            )

             if version >= cur_version:

                 setattr(objects, cls.obj_name(), cls)

 

diff --git a/staffeln/objects/queue.py b/staffeln/objects/queue.py
index b6f9b76..a010e27 100644
--- a/staffeln/objects/queue.py
+++ b/staffeln/objects/queue.py
@@ -5,17 +5,19 @@
 
 
 @base.StaffelnObjectRegistry.register
-class Queue(base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat):
-    VERSION = '1.0'
+class Queue(
+    base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+):
+    VERSION = "1.0"
 
     dbapi = db_api.get_instance()
 
     fields = {
-        'id': sfeild.IntegerField(),
-        'backup_id': sfeild.StringField(),
-        'volume_id': sfeild.UUIDField(),
-        'instance_id': sfeild.StringField(),
-        'backup_status': sfeild.IntegerField()
+        "id": sfeild.IntegerField(),
+        "backup_id": sfeild.StringField(),
+        "volume_id": sfeild.UUIDField(),
+        "instance_id": sfeild.StringField(),
+        "backup_status": sfeild.IntegerField(),
     }
 
     @base.remotable_classmethod
@@ -41,6 +43,7 @@
 
     @base.remotable
     def create(self):
+        """Create a :class:`Backup_data` record in the DB"""
         values = self.obj_get_changes()
         db_queue = self.dbapi.create_queue(values)
         self._from_db_object(self, db_queue)
diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py
index 8952164..640e144 100644
--- a/staffeln/objects/volume.py
+++ b/staffeln/objects/volume.py
@@ -5,17 +5,19 @@
 
 
 @base.StaffelnObjectRegistry.register
-class Volume(base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat):
-    VERSION = '1.0'
+class Volume(
+    base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+):
+    VERSION = "1.0"
 
     dbapi = db_api.get_instance()
 
     fields = {
-        'id': sfeild.IntegerField(),
-        'backup_id': sfeild.StringField(),
-        'instance_id': sfeild.StringField(),
-        'volume_id': sfeild.UUIDField(),
-        'backup_completed': sfeild.IntegerField()
+        "id": sfeild.IntegerField(),
+        "backup_id": sfeild.StringField(),
+        "instance_id": sfeild.StringField(),
+        "volume_id": sfeild.UUIDField(),
+        "backup_completed": sfeild.IntegerField(),
     }
 
     @base.remotable_classmethod