Add project_id in data models
diff --git a/staffeln/common/time.py b/staffeln/common/time.py
index a4f3c80..39a6fb9 100644
--- a/staffeln/common/time.py
+++ b/staffeln/common/time.py
@@ -31,6 +31,8 @@
except:
return None
+def get_current_time():
+ return datetime.now()
def get_current_strtime():
now = datetime.now()
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index 3cd48fe..bf3f63e 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -1,3 +1,4 @@
+import parse
import staffeln.conf
import collections
from staffeln.common import constants
@@ -14,11 +15,11 @@
LOG = log.getLogger(__name__)
BackupMapping = collections.namedtuple(
- "BackupMapping", ["volume_id", "backup_id", "instance_id", "backup_completed"]
+ "BackupMapping", ["volume_id", "backup_id", "project_id", "instance_id", "backup_completed"]
)
QueueMapping = collections.namedtuple(
- "QueueMapping", ["volume_id", "backup_id", "instance_id", "backup_status"]
+ "QueueMapping", ["volume_id", "backup_id", "project_id", "instance_id", "backup_status"]
)
conn = auth.create_connection()
@@ -77,6 +78,7 @@
# Backup the volumes in in-use and available status
def filter_volume(self, volume_id):
try:
+ # volume = conn.block_storage.get_volume(volume_id)
volume = conn.get_volume_by_id(volume_id)
if volume == None: return False
res = volume['status'] in ("available", "in-use")
@@ -90,9 +92,17 @@
# delete all backups forcily regardless of the status
def hard_cancel_volume_backup(self, task):
try:
+ LOG.info(_("Cancel backup %s" % task.backup_id))
+ # backup = conn.block_storage.get_backup(
+ # project_id=task.project_id, backup_id=task.backup_id,
+ # )
backup = conn.get_volume_backup(task.backup_id)
if backup == None: return task.delete_queue()
+ # TODO(Alex): v3 is not supporting force delete?
+ # conn.block_storage.delete_backup(
+ # project_id=task.project_id, backup_id=task.backup_id,
+ # )
conn.delete_volume_backup(task.backup_id, force=True)
task.delete_queue()
@@ -111,6 +121,10 @@
# delete only available backups
def soft_remove_volume_backup(self, backup_object):
try:
+
+ # backup = conn.block_storage.get_backup(
+ # project_id=backup_object.project_id, backup_id=backup_object.backup_id,
+ # )
backup = conn.get_volume_backup(backup_object.backup_id)
if backup == None: return backup_object.delete_backup()
if backup["status"] in ("available"):
@@ -141,10 +155,13 @@
backup_object.delete_backup()
return False
-
# delete all backups forcily regardless of the status
def hard_remove_volume_backup(self, backup_object):
try:
+
+ # backup = conn.block_storage.get_backup(
+ # project_id=backup_object.project_id, backup_id=backup_object.backup_id,
+ # )
backup = conn.get_volume_backup(backup_object.backup_id)
if backup == None: return backup_object.delete_backup()
@@ -181,6 +198,7 @@
if not self.filter_volume(volume["id"]): continue
queues_map.append(
QueueMapping(
+ project_id=project.id,
volume_id=volume["id"],
backup_id="NULL",
instance_id=server.id,
@@ -213,16 +231,25 @@
backup_id = queue.backup_id
if backup_id == "NULL":
try:
+ LOG.info(_("Backup for volume %s creating" % queue.volume_id))
+ # volume_backup = conn.block_storage.create_backup(
+ # volume_id=queue.volume_id, force=True, project_id=queue.project_id,
+ # )
+ # NOTE(Alex): no need to wait because we have a cycle time out
volume_backup = conn.create_volume_backup(
- volume_id=queue.volume_id, force=True
+ volume_id=queue.volume_id, force=True, wait=False,
)
+ queue.backup_id = volume_backup.id
+ queue.backup_status = constants.BACKUP_WIP
+ queue.save()
except OpenstackSDKException as error:
LOG.info(_("Backup creation for the volume %s failled. %s"
% (queue.volume_id, str(error))))
-
- queue.backup_id = volume_backup.id
- queue.backup_status = constants.BACKUP_WIP
- queue.save()
+ parsed = parse.parse("Error in creating volume backup {id}", str(error))
+ if parsed == None: return
+ queue.backup_id = parsed["id"]
+ queue.backup_status = constants.BACKUP_WIP
+ queue.save()
else:
pass
# TODO(Alex): remove this task from the task list
@@ -235,15 +262,20 @@
LOG.error("Backup of the volume %s failed." % task.volume_id)
# 2. TODO(Alex): remove failed backup instance from the openstack
# then set the volume status in-use
+ self.hard_cancel_volume_backup(task)
# 3. remove failed task from the task queue
task.delete_queue()
+ def process_non_existing_backup(self, task):
+ task.delete_queue()
+
def process_available_backup(self, task):
LOG.info("Backup of the volume %s is successful." % task.volume_id)
# 1. save success backup in the backup table
self._volume_backup(
BackupMapping(
volume_id=task.volume_id,
+ project_id=task.project_id,
backup_id=task.backup_id,
instance_id=task.instance_id,
backup_completed=1,
@@ -265,10 +297,13 @@
"""
# for backup_gen in conn.block_storage.backups(volume_id=queue.volume_id):
try:
+ # backup_gen = conn.block_storage.get_backup(
+ # project_id=queue.project_id, backup_id=queue.backup_id,
+ # )
backup_gen = conn.get_volume_backup(queue.backup_id)
if backup_gen == None:
# TODO(Alex): need to check when it is none
- LOG.info(_("Backup status of %s is returning none." % (queue.backup_id)))
+ LOG.info(_("[Beta] Backup status of %s is returning none." % (queue.backup_id)))
return
if backup_gen.status == "error":
self.process_failed_backup(queue)
@@ -283,7 +318,7 @@
else: # "deleting", "restoring", "error_restoring" status
self.process_using_backup(queue)
except OpenstackResourceNotFound as e:
- self.process_failed_backup(queue)
+ self.process_non_existing_backup(queue)
def _volume_backup(self, task):
# matching_backups = [
diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index aaa8773..a08b3db 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -58,24 +58,26 @@
return False
# Manage active backup generators
- # TODO(Alex): need to discuss
- # Need to wait until all backups are finished?
- # That is required to make the backup report
def _process_wip_tasks(self):
LOG.info(_("Processing WIP backup generators..."))
# TODO(Alex): Replace this infinite loop with finite time
self.cycle_start_time = xtime.get_current_time()
- # loop - take care of backup result
+ # loop - take care of backup result while timeout
while(1):
queues_started = backup.Backup().get_queues(
filters={"backup_status": constants.BACKUP_WIP}
)
- if len(queues_started) == 0: break
+ if len(queues_started) == 0:
+ LOG.info(_("task queue empty"))
+ break
if not self._backup_cycle_timeout():# time in
+ LOG.info(_("cycle timein"))
for queue in queues_started: backup.Backup().check_volume_backup_status(queue)
else: # time out
+ LOG.info(_("cycle timeout"))
for queue in queues_started: backup.Backup().hard_cancel_volume_backup(queue)
+ break
time.sleep(constants.BACKUP_RESULT_CHECK_INTERVAL)
# if the backup cycle timeout, then return True
@@ -133,7 +135,7 @@
self._update_task_queue()
self._process_todo_tasks()
self._process_wip_tasks()
- self._report_backup_result()
+ # self._report_backup_result()
class RotationManager(cotyledon.Service):
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 4b4fc9f..0148c3d 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -110,7 +110,14 @@
filters = {}
- plain_fields = ["volume_id", "backup_id", "backup_completed", "instance_id", "created_at"]
+ plain_fields = [
+ "volume_id",
+ "backup_id",
+ "project_id",
+ "backup_completed",
+ "instance_id",
+ "created_at"
+ ]
return self._add_filters(
query=query,
@@ -126,6 +133,7 @@
plain_fields = [
"backup_id",
+ "project_id",
"volume_id",
"instance_id",
"backup_status",
diff --git a/staffeln/db/sqlalchemy/models.py b/staffeln/db/sqlalchemy/models.py
index 715d747..93dae9f 100644
--- a/staffeln/db/sqlalchemy/models.py
+++ b/staffeln/db/sqlalchemy/models.py
@@ -62,6 +62,7 @@
)
id = Column(Integer, primary_key=True, autoincrement=True)
backup_id = Column(String(100))
+ project_id = Column(String(100))
volume_id = Column(String(100))
instance_id = Column(String(100))
backup_completed = Column(Integer())
@@ -74,6 +75,7 @@
__table_args__ = table_args()
id = Column(Integer, primary_key=True, autoincrement=True)
backup_id = Column(String(100))
+ project_id = Column(String(100))
volume_id = Column(String(100))
backup_status = Column(Integer())
instance_id = Column(String(100))
diff --git a/staffeln/objects/queue.py b/staffeln/objects/queue.py
index ac8c9e0..0137d4e 100644
--- a/staffeln/objects/queue.py
+++ b/staffeln/objects/queue.py
@@ -15,6 +15,7 @@
fields = {
"id": sfeild.IntegerField(),
"backup_id": sfeild.StringField(),
+ "project_id": sfeild.UUIDField(),
"volume_id": sfeild.UUIDField(),
"instance_id": sfeild.StringField(),
"backup_status": sfeild.IntegerField(),
diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py
index 6655b7b..ce300af 100644
--- a/staffeln/objects/volume.py
+++ b/staffeln/objects/volume.py
@@ -15,6 +15,7 @@
"id": sfeild.IntegerField(),
"backup_id": sfeild.StringField(),
"instance_id": sfeild.StringField(),
+ "project_id": sfeild.UUIDField(),
"volume_id": sfeild.UUIDField(),
"backup_completed": sfeild.IntegerField(),
}