Refector retention backup delete
This will make incremental backup delete logic more simple and
easy to read.
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index 8394187..9f3f54d 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -227,7 +227,7 @@
return False
# delete all backups forcily regardless of the status
- def hard_remove_volume_backup(self, backup_object):
+ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False):
try:
project_id = backup_object.project_id
if project_id not in self.project_list:
@@ -251,16 +251,19 @@
backup_object.delete_backup()
except Exception as e:
- LOG.warn(
- _(
- "Backup %s deletion failed. Need to delete manually."
- "%s" % (backup_object.backup_id, str(e))
+ if skip_inc_err and "Incremental backups exist for this backup" in str(e):
+ pass
+ else:
+ LOG.warn(
+ _(
+ "Backup %s deletion failed. Need to delete manually."
+ "%s" % (backup_object.backup_id, str(e))
+ )
)
- )
- # TODO(Alex): Add it into the notification queue
- # remove from the backup table
- backup_object.delete_backup()
+ # TODO(Alex): Add it into the notification queue
+ # remove from the backup table
+ backup_object.delete_backup()
def update_project_list(self):
projects = self.openstacksdk.get_projects()
diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index a105fc1..33ab139 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -238,8 +238,11 @@
and not self.instance_retention_map
):
return
- retention_backups = []
backup_instance_map = {}
+
+ # get project list
+ self.controller.update_project_list()
+
for backup in self.get_backup_list():
# Create backup instance map for later sorted by created_at.
# This can be use as base of judgement on delete a backup.
@@ -252,67 +255,18 @@
backup_instance_map[backup.instance_id] = [backup]
# Sort backup instance map and use it to check backup create time and order.
- # Generate retention_backups base on it.
for instance_id in backup_instance_map:
sorted_backup_list = sorted(
backup_instance_map[instance_id],
key=lambda backup: backup.created_at.timestamp(),
reverse=True,
)
- idx = 0
- list_len = len(sorted_backup_list)
- find_earlier_full = False
- purge_incremental = True
-
- while idx < list_len:
- backup = sorted_backup_list[idx]
- if find_earlier_full and backup.incremental is True:
- # Skip on incrementals when try to find earlier
- # created full backup.
- idx += 1
- continue
- # If we should consider delete this backup
+ for backup in sorted_backup_list:
if self.is_retention(backup):
- # If is full backup
- if not backup.incremental:
- # For full backup should be deleted, purge
- # all backup include itself, otherwise, purge
- # only all earlier one if other backup depends on it.
- if not purge_incremental:
- # Still got incremental dependency,
- # but add all backups older than this one if any.
- idx += 1
- retention_backups += sorted_backup_list[idx:]
- break
- # If is incremental backup
- else:
- # This means there still have incremental
- # backup denepds on this one. So we will go to the
- # latest incremental backup for earlier full backup,
- # or to the earlier full backup itself if it had no
- # incremental backup rely on.
- if not purge_incremental:
- find_earlier_full = True
- idx += 1
- else:
- # The later backup is full backup, fine for us to
- # purge this and all older backup.
- retention_backups += sorted_backup_list[idx:]
- break
- else:
- # If it's incremental and not to be delete, make sure
- # we keep all it's dependency.
- purge_incremental = (
- False if backup.incremental else True
+ # Try to delete and skip any incremental exist error.
+ self.controller.hard_remove_volume_backup(
+ backup, skip_inc_err=True
)
- idx += 1
-
- if not retention_backups:
- return
- # 2. get project list
- self.controller.update_project_list()
- # 3. remove the backups
- self.remove_backups(retention_backups)
except coordination.LockAcquireFailed:
LOG.debug("Failed to lock for retention")