Merge pull request #31 from vexxhost/set-project

Use the same project for backups with volumes
diff --git a/staffeln/api/app.py b/staffeln/api/app.py
index 041b2fd..80c1bb2 100755
--- a/staffeln/api/app.py
+++ b/staffeln/api/app.py
@@ -15,8 +15,8 @@
 

 @app.route("/v1/backup", methods=["POST"])

 def backup_id():

-

-    retention_user_id = openstack.get_user_id()

+    openstacksdk = openstack.OpenstackSDK()

+    retention_user_id = openstacksdk.get_user_id()

     

     if not "user_id" in request.args or not "backup_id" in request.args:

         # Return error if the backup_id argument is not provided.

diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py
index c19f28e..1a7c10e 100644
--- a/staffeln/common/openstack.py
+++ b/staffeln/common/openstack.py
@@ -2,92 +2,109 @@
 from openstack import proxy

 from staffeln.common import auth

 

-conn = auth.create_connection()

+

+class OpenstackSDK():

+

+    def __init__(self):

+        self.conn_list = {}

+        self.conn = auth.create_connection()

 

 

-# user

-def get_user_id():

-    user_name = conn.config.auth["username"]

-    if "user_domain_id" in conn.config.auth:

-        domain_id = conn.config.auth["user_domain_id"]

-        user = conn.get_user(name_or_id=user_name, domain_id=domain_id)

-    elif "user_domain_name" in conn.config.auth:

-        domain_name = conn.config.auth["user_domain_name"]

-        user = conn.get_user(name_or_id=user_name, domain_id=domain_name)

-    else:

-        user = conn.get_user(name_or_id=user_name)

-    return user.id

+    def set_project(self, project):

+        project_id = project.get('id')

 

-############## project

-def get_projects():

-    conn.block_storage

-    return conn.list_projects()

+        if project_id in self.conn_list:

+            self.conn = self.conn_list[project_id]

+        else:

+            conn = self.conn.connect_as_project(project)

+            self.conn = conn

+

+    # user

+    def get_user_id(self):

+        user_name = self.conn.config.auth["username"]

+        if "user_domain_id" in self.conn.config.auth:

+            domain_id = self.conn.config.auth["user_domain_id"]

+            user = self.conn.get_user(name_or_id=user_name, domain_id=domain_id)

+        elif "user_domain_name" in self.conn.config.auth:

+            domain_name = self.conn.config.auth["user_domain_name"]

+            user = self.conn.get_user(name_or_id=user_name, domain_id=domain_name)

+        else:

+            user = self.conn.get_user(name_or_id=user_name)

+        return user.id

+

+    ############## project

+    def get_projects(self):

+        return self.conn.list_projects()

 

 

-############## server

-def get_servers(project_id, all_projects=True, details=True):

-    return conn.compute.servers(details=details, all_projects=all_projects, project_id=project_id)

+    ############## server

+    def get_servers(self, project_id, all_projects=True, details=True):

+        return self.conn.compute.servers(

+            details=details, all_projects=all_projects, project_id=project_id

+        )

 

 

-############## volume

-def get_volume(uuid, project_id):

-    # volume = conn.block_storage.get_volume(volume_id)

-    return conn.get_volume_by_id(uuid)

+    ############## volume

+    def get_volume(self, uuid, project_id):

+        return self.conn.get_volume_by_id(uuid)

 

 

-############## backup

-def get_backup(uuid, project_id=None):

-    # return conn.block_storage.get_backup(

-    #     project_id=project_id, backup_id=uuid,

-    # )

-    # conn.block_storage.backups(volume_id=uuid,project_id=project_id)

-    return conn.get_volume_backup(uuid)

+    ############## backup

+    def get_backup(self, uuid, project_id=None):

+        # return conn.block_storage.get_backup(

+        #     project_id=project_id, backup_id=uuid,

+        # )

+        # conn.block_storage.backups(volume_id=uuid,project_id=project_id)

+        return self.conn.get_volume_backup(uuid)

 

 

-def create_backup(volume_id, project_id, force=True, wait=False):

-    # return conn.block_storage.create_backup(

-    #     volume_id=queue.volume_id, force=True, project_id=queue.project_id,

-    # )

-    return conn.create_volume_backup(

-        volume_id=volume_id, force=force, wait=wait,

-    )

+    def create_backup(self, volume_id, project_id, force=True, wait=False):

+        # return conn.block_storage.create_backup(

+        #     volume_id=queue.volume_id, force=True, project_id=queue.project_id,

+        # )

+        return self.conn.create_volume_backup(

+            volume_id=volume_id, force=force, wait=wait,

+        )

 

 

-def delete_backup(uuid, project_id=None, force=True):

-    # TODO(Alex): v3 is not supporting force delete?

-    # conn.block_storage.delete_backup(

-    #     project_id=project_id, backup_id=uuid,

-    # )

-    conn.delete_volume_backup(uuid, force=force)

-    # TODO(Alex): After delete the backup generator, need to set the volume status again

+    def delete_backup(self, uuid, project_id=None, force=True):

+        # Note(Alex): v3 is not supporting force delete?

+        # conn.block_storage.delete_backup(

+        #     project_id=project_id, backup_id=uuid,

+        # )

+        try:

+            self.conn.delete_volume_backup(uuid, force=force)

+            # TODO(Alex): After delete the backup generator, need to set the volume status again

+        except exceptions.ResourceNotFound:

+            return

 

 

-def get_backup_quota(project_id):

-    # quota = conn.get_volume_quotas(project_id)

-    quota = _get_volume_quotas(project_id)

-    return quota.backups

+    def get_backup_quota(self, project_id):

+        # quota = conn.get_volume_quotas(project_id)

+        quota = self._get_volume_quotas(project_id)

+        return quota.backups

 

 

-# rewrite openstasdk._block_storage.get_volume_quotas

-# added usage flag

-# ref: https://docs.openstack.org/api-ref/block-storage/v3/?expanded=#show-quota-usage-for-a-project

-def _get_volume_quotas(project_id, usage=True):

-    """ Get volume quotas for a project

+    # rewrite openstasdk._block_storage.get_volume_quotas

+    # added usage flag

+    # ref: https://docs.openstack.org/api-ref/block-storage/v3/?expanded=#show-quota-usage-for-a-project

+    def _get_volume_quotas(self, project_id, usage=True):

+        """ Get volume quotas for a project

 

-    :param name_or_id: project name or id

-    :raises: OpenStackCloudException if it's not a valid project

+        :param name_or_id: project name or id

+        :raises: OpenStackCloudException if it's not a valid project

 

-    :returns: Munch object with the quotas

-    """

+        :returns: Munch object with the quotas

+        """

 

-    if usage:

-        resp = conn.block_storage.get(

-            '/os-quota-sets/{project_id}?usage=True'.format(project_id=project_id))

-    else:

-        resp = conn.block_storage.get(

-            '/os-quota-sets/{project_id}'.format(project_id=project_id))

-    data = proxy._json_response(

-        resp,

-        error_message="cinder client call failed")

-    return conn._get_and_munchify('quota_set', data)

+        if usage:

+            resp = self.conn.block_storage.get(

+                '/os-quota-sets/{project_id}?usage=True'.format(project_id=project_id))

+        else:

+            resp = self.conn.block_storage.get(

+                '/os-quota-sets/{project_id}'.format(project_id=project_id))

+        data = proxy._json_response(

+            resp,

+            error_message="cinder client call failed")

+        return self.conn._get_and_munchify('quota_set', data)

 

diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index d09dad3..5a489b3 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -6,11 +6,10 @@
 from openstack.exceptions import ResourceNotFound as OpenstackResourceNotFound
 from openstack.exceptions import SDKException as OpenstackSDKException
 from oslo_log import log
-from staffeln.common import auth
 from staffeln.common import context
 from staffeln import objects
 from staffeln.i18n import _
-from staffeln.common import openstack as openstacksdk
+from staffeln.common import openstack
 
 CONF = staffeln.conf.CONF
 LOG = log.getLogger(__name__)
@@ -36,6 +35,8 @@
     def __init__(self):
         self.ctx = context.make_context()
         self.result = result.BackupResult()
+        self.openstacksdk = openstack.OpenstackSDK()
+        self.project_list = {}
 
     def publish_backup_result(self):
         self.result.publish()
@@ -44,7 +45,7 @@
         return objects.Volume.list(self.ctx, filters=filters)
 
     def get_backup_quota(self, project_id):
-        return openstacksdk.get_backup_quota(project_id)
+        return self.openstacksdk.get_backup_quota(project_id)
 
     def get_queues(self, filters=None):
         """Get the list of volume queue columns from the queue_data table"""
@@ -76,7 +77,7 @@
     # Backup the volumes in in-use and available status
     def filter_by_volume_status(self, volume_id, project_id):
         try:
-            volume = openstacksdk.get_volume(volume_id, project_id)
+            volume = self.openstacksdk.get_volume(volume_id, project_id)
             if volume == None: return False
             res = volume['status'] in ("available", "in-use")
             if not res:
@@ -88,14 +89,19 @@
         except OpenstackResourceNotFound:
             return False
 
+
     #  delete all backups forcily regardless of the status
     def hard_cancel_backup_task(self, task):
         try:
+            project_id = task.project_id
             reason = _("Cancel backup %s because of timeout." % task.backup_id)
             LOG.info(reason)
-            backup = openstacksdk.get_backup(task.backup_id)
+
+            if project_id not in self.project_list: self.process_non_existing_backup(task)
+            self.openstacksdk.set_project(self.project_list[project_id])
+            backup = self.openstacksdk.get_backup(task.backup_id)
             if backup == None: return task.delete_queue()
-            openstacksdk.delete_backup(task.backup_id)
+            self.openstacksdk.delete_backup(task.backup_id)
             task.delete_queue()
             self.result.add_failed_backup(task.project_id, task.volume_id, reason)
         except OpenstackResourceNotFound:
@@ -113,13 +119,13 @@
             task.delete_queue()
             self.result.add_failed_backup(task.project_id, task.volume_id, reason)
 
-    #  delete only available backups
+    #  delete only available backups: reserved
     def soft_remove_backup_task(self, backup_object):
         try:
-            backup = openstacksdk.get_backup(backup_object.backup_id)
+            backup = self.openstacksdk.get_backup(backup_object.backup_id)
             if backup == None: return backup_object.delete_backup()
             if backup["status"] in ("available"):
-                openstacksdk.delete_backup(backup_object.backup_id)
+                self.openstacksdk.delete_backup(backup_object.backup_id)
                 backup_object.delete_backup()
             elif backup["status"] in ("error", "error_restoring"):
                 # TODO(Alex): need to discuss
@@ -151,11 +157,11 @@
     #  delete all backups forcily regardless of the status
     def hard_remove_volume_backup(self, backup_object):
         try:
-            backup = openstacksdk.get_backup(uuid=backup_object.backup_id,
+            backup = self.openstacksdk.get_backup(uuid=backup_object.backup_id,
                                              project_id=backup_object.project_id)
             if backup == None: return backup_object.delete_backup()
 
-            openstacksdk.delete_backup(uuid=backup_object.backup_id)
+            self.openstacksdk.delete_backup(uuid=backup_object.backup_id)
             backup_object.delete_backup()
 
         except OpenstackResourceNotFound:
@@ -180,10 +186,11 @@
         that are attached to the instance.
         """
         queues_map = []
-        projects = openstacksdk.get_projects()
+        projects = self.openstacksdk.get_projects()
         for project in projects:
             empty_project = True
-            servers = openstacksdk.get_servers(project_id=project.id)
+            self.project_list[project.id] = project
+            servers = self.openstacksdk.get_servers(project_id=project.id)
             for server in servers:
                 if not self.filter_by_server_metadata(server.metadata): continue
                 if empty_project:
@@ -224,14 +231,16 @@
         This function will call the backupup api and change the
         backup_status and backup_id in the queue table.
         """
-        backup_id = queue.backup_id
-        if backup_id == "NULL":
+        project_id = queue.project_id
+        if queue.backup_id == "NULL":
             try:
                 LOG.info(_("Backup for volume %s creating in project %s"
-                           % (queue.volume_id, queue.project_id)))
+                           % (queue.volume_id, project_id)))
                 # NOTE(Alex): no need to wait because we have a cycle time out
-                volume_backup = openstacksdk.create_backup(volume_id=queue.volume_id,
-                                                           project_id=queue.project_id)
+                if project_id not in self.project_list: self.process_non_existing_backup(queue)
+                self.openstacksdk.set_project(self.project_list[project_id])
+                volume_backup = self.openstacksdk.create_backup(volume_id=queue.volume_id,
+                                                           project_id=project_id)
                 queue.backup_id = volume_backup.id
                 queue.backup_status = constants.BACKUP_WIP
                 queue.save()
@@ -239,7 +248,7 @@
                 reason = _("Backup creation for the volume %s failled. %s"
                            % (queue.volume_id, str(error)))
                 LOG.info(reason)
-                self.result.add_failed_backup(queue.project_id, queue.volume_id, reason)
+                self.result.add_failed_backup(project_id, queue.volume_id, reason)
                 parsed = parse.parse("Error in creating volume backup {id}", str(error))
                 if parsed is not None:
                     queue.backup_id = parsed["id"]
@@ -264,12 +273,12 @@
         task.delete_queue()
 
     def process_failed_backup(self, task):
-        # 1.notify via email
+        # 1. notify via email
         reason = _("The status of backup for the volume %s is error." % task.volume_id)
         self.result.add_failed_backup(task.project_id, task.volume_id, reason)
         LOG.error(reason)
-        # 2. cancel volume backup
-        self.hard_cancel_backup_task(task)
+        # 2. delete backup generator
+        self.openstacksdk.delete_backup(uuid=task.backup_id)
         # 3. remove failed task from the task queue
         task.delete_queue()
 
@@ -304,11 +313,17 @@
         Call the backups api to see if the backup is successful.
         """
         try:
+
+            project_id = queue.project_id
+
             # The case in which the error produced before backup gen created.
             if queue.backup_id == "NULL":
                 self.process_pre_failed_backup(queue)
                 return
-            backup_gen = openstacksdk.get_backup(queue.backup_id)
+            if project_id not in self.project_list: self.process_non_existing_backup(queue)
+            self.openstacksdk.set_project(self.project_list[project_id])
+            backup_gen = self.openstacksdk.get_backup(queue.backup_id)
+
             if backup_gen == None:
                 # TODO(Alex): need to check when it is none
                 LOG.info(_("[Beta] Backup status of %s is returning none." % (queue.backup_id)))