Added the backup process to the service.
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index 7a83df9..0f0522a 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -44,8 +44,12 @@
 

     def __init__(self):

         self.ctx = context.make_context()

-        self.discovered_map = None

+        self.discovered_queue_map = None

+        self.discovered_backup_map = None

         self.queue_mapping = dict()

+        self.volume_mapping = dict()

+        self._available_backups = None

+        self._available_backups_map = None

         self._available_queues = None

         self._available_queues_map = None

 

@@ -71,6 +75,28 @@
             }

         return self._available_queues_map

 

+    @property

+    def available_backups(self):

+        """Backups loaded from DB"""

+        if self._available_backups is None:

+            self._available_backups = objects.Volume.list(self.ctx)

+        return self._available_backups

+

+    @property

+    def available_backups_map(self):

+        """Mapping of backup loaded from DB"""

+        if self._available_backups_map is None:

+            self._available_backups_map = {

+                QueueMapping(

+                    backup_id=g.backup_id,

+                    volume_id=g.volume_id,

+                    instance_id=g.instance_id,

+                    backup_completed=g.backup_completed,

+                ): g

+                for g in self.available_queues

+            }

+        return self._available_queues_map

+

     def get_queues(self, filters=None):

         """Get the list of volume queue columns from the queue_data table"""

         queues = objects.Queue.list(self.ctx, filters=filters)

@@ -78,8 +104,8 @@
 

     def create_queue(self):

         """Create the queue of all the volumes for backup"""

-        self.discovered_map = self.check_instance_volumes()

-        queues_map = self.discovered_map["queues"]

+        self.discovered_queue_map = self.check_instance_volumes()

+        queues_map = self.discovered_queue_map["queues"]

         for queue_name, queue_map in queues_map.items():

             self._volume_queue(queue_map)

 

@@ -89,7 +115,7 @@
         that are attached to the instance.

         """

         queues_map = {}

-        discovered_map = {"queues": queues_map}

+        discovered_queue_map = {"queues": queues_map}

         projects = get_projects_list()

         for project in projects:

             servers = conn.compute.servers(

@@ -105,7 +131,7 @@
                         instance_id=server_id,

                         backup_status=0,

                     )

-        return discovered_map

+        return discovered_queue_map

 

     def _volume_queue(self, queue_map):

         """Saves the queue data to the database."""

@@ -150,15 +176,46 @@
         Call the backups api to see if the backup is successful.

         """

         for raw in conn.block_storage.backups(volume_id=queue.volume_id):

-            backup_info = raw.to_dict()

-            if backup_info.id == queue.id:

-                if backup_info.status == error:

-                    Log.error("Backup of the volume %s failed." % queue.id)

-                    ## Call db api to remove the queue object.

+            backup_info = raw

+            if backup_info.id == queue.backup_id:

+                if backup_info.status == "error":

+                    LOG.error("Backup of the volume %s failed." % queue.id)

+                    queue_delete = objects.Queue.get_by_id(self.ctx, queue.id)

+                    queue_delete.delete_queue()

                 elif backup_info.status == "success":

+                    backups_map = {}

+                    discovered_backup_map = {"backups": backups_map}

                     LOG.info("Backup of the volume %s is successful." % queue.volume_id)

+                    backups_map["backups"] = BackupMapping(

+                        volume_id=queue.volume_id,

+                        backup_id=queue.backup_id,

+                        instance_id=queue.instance_id,

+                        backup_completed=1,

+                    )

+                    # Save volume backup success to backup_data table.

+                    self._volume_backup(discovered_backup_map)

                     ## call db api to remove the queue object.

-                    ## Call db api to add the backup status in volume_backup table.

+                    queue_delete = objects.Queue.get_by_id(self.ctx, queue.id)

+                    queue_delete.delete_queue()

                 else:

                     pass

                     ## Wait for the backup to be completed.

+

+    def _volume_backup(self, discovered_backup_map):

+        volumes_map = discovered_backup_map["backups"]

+        for volume_name, volume_map in volumes_map.items():

+            volume_id = volume_map.volume_id

+            backup_id = volume_map.backup_id

+            instance_id = volume_map.instance_id

+            backup_completed = volume_map.backup_completed

+            backup_mapping = dict()

+            matching_backups = [

+                g for g in self.available_backups if g.backup_id == backup_id

+            ]

+            if not matching_backups:

+                volume_backup = objects.Volume(self.ctx)

+                volume_backup.backup_id = backup_id

+                volume_backup.volume_id = volume_id

+                volume_backup.instance_id = instance_id

+                volume_backup.backup_completed = backup_completed

+                volume_backup.create()

diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index 8bf5ca0..7f12246 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -53,9 +53,6 @@
         queues_started = backup.Backup().get_queues(
             filters={"backup_status": constants.BACKUP_WIP}
         )
-        queue_completed = backup.Backup().get_queues(
-            filters={"backup_status": constants.BACKUP_COMPLETED}
-        )
         if len(queue) == 0:
             create_queue = backup.Backup().create_queue()
         elif len(queues_started) != 0:
@@ -66,8 +63,6 @@
             for queue in queues_to_start:
                 LOG.info("Started backup process for %s" % queue.volume_id)
                 backup_volume = backup.Backup().volume_backup_initiate(queue)
-        elif len(queue_completed) == len(queue):
-            pass
 
 
 class RotationManager(cotyledon.Service):
diff --git a/staffeln/conf/paths.py b/staffeln/conf/paths.py
index bb2420a..3007619 100644
--- a/staffeln/conf/paths.py
+++ b/staffeln/conf/paths.py
@@ -4,6 +4,16 @@
 
 PATH_OPTS = [
     cfg.StrOpt(
+        "pybasedir",
+        default=os.path.abspath(os.path.join(os.path.dirname(__file__), "../")),
+        help="Directory where the staffeln python module is installed.",
+    ),
+    cfg.StrOpt(
+        "bindir",
+        default="$pybasedir/bin",
+        help="Directory where staffeln binaries are installed.",
+    ),
+    cfg.StrOpt(
         "state_path",
         default="$pybasedir",
         help="Top-level directory for maintaining staffeln's state.",
@@ -11,6 +21,16 @@
 ]
 
 
+def basedir_def(*args):
+    """Return an uninterpolated path relative to $pybasedir."""
+    return os.path.join("$pybasedir", *args)
+
+
+def bindir_def(*args):
+    """Return an uninterpolated path relative to $bindir."""
+    return os.path.join("$bindir", *args)
+
+
 def state_path_def(*args):
     """Return an uninterpolated path relative to $state_path."""
     return os.path.join("$state_path", *args)
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 400fc7a..83bb484 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -209,6 +209,21 @@
             ref.update(values)
         return ref
 
+    @staticmethod
+    def _soft_delete(model, id_):
+        session = get_session()
+        with session.begin():
+            query = model_query(model, session=session)
+            query = add_identity_filter(query, id_)
+            try:
+                row = query.one()
+
+            except exc.NoResultFound:
+                LOG.error("Resource Not found.")
+
+            deleted_row = session.delete(row)
+            return row
+
     def _get_model_list(
         self,
         model,
@@ -285,3 +300,9 @@
             )
         except:
             LOG.error("Queue not found")
+
+    def soft_delete_queue(self, id):
+        try:
+            return self._soft_delete(models.Queue_data, id)
+        except:
+            LOG.error("Queue Not found.")
diff --git a/staffeln/objects/queue.py b/staffeln/objects/queue.py
index ac41bab..ac8c9e0 100644
--- a/staffeln/objects/queue.py
+++ b/staffeln/objects/queue.py
@@ -60,3 +60,8 @@
     def refresh(self):
         current = self.get_by_backup_id(backup_id=self.backup_id)
         self.obj_refresh(current)
+
+    @base.remotable
+    def delete_queue(self):
+        """Soft Delete the :class:`Queue_data` from the DB"""
+        db_obj = self.dbapi.soft_delete_queue(self.id)
diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py
index 498d65a..3933bdf 100644
--- a/staffeln/objects/volume.py
+++ b/staffeln/objects/volume.py
@@ -20,12 +20,12 @@
     }
 
     @base.remotable_classmethod
-    def list(cls, filters=None):
+    def list(cls, context, filters=None):
         """Return a list of :class:`Backup` objects.
 
         :param filters: dict mapping the filter to a value.
         """
-        db_backups = cls.dbapi.get_backup_list(filters=filters)
+        db_backups = cls.dbapi.get_backup_list(context, filters=filters)
 
         return [cls._from_db_object(cls(), obj) for obj in db_backups]