Merge pull request #90 from vexxhost/refect-report

Refect report
diff --git a/requirements.txt b/requirements.txt
index 103f767..8a52b2b 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,6 +3,8 @@
 # process, which may cause wedges in the gate later.
 
 pbr>=2.0 # Apache-2.0
+
+alembic>=1.4.2 # MIT
 flask
 cotyledon>=1.3.0 #Apache-2.0
 futurist>=1.8.0 # Apache-2.0
diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py
index 13dc05d..0e639e1 100644
--- a/staffeln/common/openstack.py
+++ b/staffeln/common/openstack.py
@@ -102,6 +102,7 @@
         # conn.block_storage.delete_backup(

         #     project_id=project_id, backup_id=uuid,

         # )

+        LOG.debug(f"Start deleting backup {uuid} in OpenStack.")

         try:

             self.conn.delete_volume_backup(uuid, force=force)

             # TODO(Alex): After delete the backup generator, need to set the volume status again

@@ -113,6 +114,11 @@
         quota = self._get_volume_quotas(project_id)

         return quota.backups

 

+    def get_backup_gigabytes_quota(self, project_id):

+        # quota = conn.get_volume_quotas(project_id)

+        quota = self._get_volume_quotas(project_id)

+        return quota.backup_gigabytes

+

     # rewrite openstasdk._block_storage.get_volume_quotas

     # added usage flag

     # ref: https://docs.openstack.org/api-ref/block-storage/v3/?expanded=#show-quota-usage-for-a-project

diff --git a/staffeln/common/time.py b/staffeln/common/time.py
index 45e6ffe..3c0b57b 100644
--- a/staffeln/common/time.py
+++ b/staffeln/common/time.py
@@ -1,7 +1,7 @@
 import re
-from datetime import datetime
 
 from dateutil.relativedelta import relativedelta
+from oslo_utils import timeutils
 
 DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
 
@@ -36,11 +36,11 @@
 
 
 def get_current_time():
-    return datetime.now()
+    return timeutils.utcnow()
 
 
 def get_current_strtime():
-    now = datetime.now()
+    now = timeutils.utcnow()
     return now.strftime(DEFAULT_TIME_FORMAT)
 
 
@@ -48,7 +48,7 @@
     years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, from_date=None
 ):
     if from_date is None:
-        from_date = datetime.now()
+        from_date = timeutils.utcnow()
     return from_date - relativedelta(
         years=years,
         months=months,
diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py
index 84c4d49..0a330a7 100755
--- a/staffeln/conductor/backup.py
+++ b/staffeln/conductor/backup.py
@@ -1,11 +1,12 @@
 import collections
-from datetime import datetime, timedelta
+from datetime import timedelta, timezone
 
 import staffeln.conf
 from openstack.exceptions import HttpException as OpenstackHttpException
 from openstack.exceptions import ResourceNotFound as OpenstackResourceNotFound
 from openstack.exceptions import SDKException as OpenstackSDKException
 from oslo_log import log
+from oslo_utils import timeutils
 from staffeln import objects
 from staffeln.common import constants, context, openstack
 from staffeln.conductor import result
@@ -95,6 +96,9 @@
     def get_backup_quota(self, project_id):
         return self.openstacksdk.get_backup_quota(project_id)
 
+    def get_backup_gigabytes_quota(self, project_id):
+        return self.openstacksdk.get_backup_gigabytes_quota(project_id)
+
     def get_queues(self, filters=None):
         """Get the list of volume queue columns from the queue_data table"""
         queues = objects.Queue.list(  # pylint: disable=E1120
@@ -187,7 +191,7 @@
         # but the process to check the remove will eventually starts.
         # Note: 315360000 = 10 years. The create time of an backup object will
         # not affect report.
-        threshold_strtime = datetime.now() - timedelta(seconds=315360000)
+        threshold_strtime = timeutils.utcnow() - timedelta(seconds=315360000)
         self._volume_backup(
             BackupMapping(
                 volume_id=task.volume_id,
@@ -241,8 +245,8 @@
             if backup is None:
                 LOG.info(
                     _(
-                        "Backup %s is not existing in Openstack."
-                        "Or cinder-backup is not existing in the cloud."
+                        "Backup %s is not existing in Openstack "
+                        "or cinder-backup is not existing in the cloud."
                         % backup_object.backup_id
                     )
                 )
@@ -295,7 +299,7 @@
             if backup is None:
                 LOG.info(
                     _(
-                        "Backup %s is not existing in Openstack."
+                        "Backup %s is not existing in Openstack. "
                         "Or cinder-backup is not existing in the cloud."
                         "Start removing backup object from Staffeln."
                         % backup_object.backup_id
@@ -346,11 +350,11 @@
                 # Ignore backup interval
                 return True
             interval = CONF.conductor.backup_min_interval
-            threshold_strtime = datetime.now() - timedelta(seconds=interval)
+            threshold_strtime = timeutils.utcnow() - timedelta(seconds=interval)
             backups = self.get_backups(
                 filters={
                     "volume_id__eq": volume_id,
-                    "created_at__gt": threshold_strtime.astimezone(),
+                    "created_at__gt": threshold_strtime.astimezone(timezone.utc),
                 }
             )
             if backups:
@@ -536,7 +540,7 @@
         backup_status and backup_id in the task queue table.
         """
         project_id = task.project_id
-        timestamp = int(datetime.now().timestamp())
+        timestamp = int(timeutils.utcnow().timestamp())
         # Backup name allows max 255 chars of string
         backup_name = ("%(instance_name)s_%(volume_name)s_%(timestamp)s") % {
             "instance_name": task.instance_name,
@@ -655,7 +659,7 @@
                 instance_id=task.instance_id,
                 backup_completed=1,
                 incremental=task.incremental,
-                created_at=datetime.now(),
+                created_at=timeutils.utcnow(),
             )
         )
         task.backup_status = constants.BACKUP_COMPLETED
diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py
index d8ead77..a8419cc 100755
--- a/staffeln/conductor/manager.py
+++ b/staffeln/conductor/manager.py
@@ -1,11 +1,13 @@
 import threading

 import time

-from datetime import datetime, timedelta

+from datetime import timedelta, timezone

 

 import cotyledon

 import staffeln.conf

 from futurist import periodics

 from oslo_log import log

+from oslo_utils import timeutils

+from staffeln import objects

 from staffeln.common import constants, context, lock

 from staffeln.common import time as xtime

 from staffeln.conductor import backup as backup_controller

@@ -129,17 +131,28 @@
 

     def _report_backup_result(self):

         report_period = CONF.conductor.report_period

-        threshold_strtime = datetime.now() - timedelta(seconds=report_period)

-        filters = {"created_at__lt": threshold_strtime.astimezone()}

-        old_tasks = self.controller.get_queues(filters=filters)

-        for task in old_tasks:

-            if task.backup_status in (

-                constants.BACKUP_COMPLETED,

-                constants.BACKUP_FAILED,

-            ):

-                LOG.info(_("Reporting finished backup tasks..."))

-                self.controller.publish_backup_result(purge_on_success=True)

-                return

+        threshold_strtime = timeutils.utcnow() - timedelta(seconds=report_period)

+

+        filters = {"created_at__gt": threshold_strtime.astimezone(timezone.utc)}

+        report_tss = objects.ReportTimestamp.list(  # pylint: disable=E1120

+            context=self.ctx, filters=filters

+        )

+        # If there are no reports that generated within report_period seconds,

+        # generate and publish one.

+        if not report_tss:

+            LOG.info(_("Reporting finished backup tasks..."))

+            self.controller.publish_backup_result(purge_on_success=True)

+

+            # Purge records that live longer than 10 report cycles

+            threshold_strtime = timeutils.utcnow() - timedelta(

+                seconds=report_period * 10

+            )

+            filters = {"created_at__lt": threshold_strtime.astimezone(timezone.utc)}

+            old_report_tss = objects.ReportTimestamp.list(  # pylint: disable=E1120

+                context=self.ctx, filters=filters

+            )

+            for report_ts in old_report_tss:

+                report_ts.delete()

 

     def backup_engine(self, backup_service_period):

         LOG.info("Backup manager started %s" % str(time.time()))

@@ -202,8 +215,8 @@
             self.controller.hard_remove_volume_backup(retention_backup)

 

     def is_retention(self, backup):

-        now = datetime.now()

-        backup_age = now.astimezone() - backup.created_at

+        now = timeutils.utcnow().astimezone(timezone.utc)

+        backup_age = now - backup.created_at.astimezone(timezone.utc)

         # see if need to be delete.

         if backup.instance_id in self.instance_retention_map:

             retention_time = now - self.get_time_from_str(

@@ -229,7 +242,7 @@
                     # get the threshold time

                     self.threshold_strtime = self.get_time_from_str(

                         CONF.conductor.retention_time

-                    )

+                    ).astimezone(timezone.utc)

                     self.instance_retention_map = (

                         self.controller.collect_instance_retention_map()

                     )

diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py
index b107172..98e3c05 100644
--- a/staffeln/conductor/result.py
+++ b/staffeln/conductor/result.py
@@ -2,6 +2,8 @@
 # This should be upgraded by integrating with mail server to send batch

 import staffeln.conf

 from oslo_log import log

+from oslo_utils import timeutils

+from staffeln import objects

 from staffeln.common import constants, email

 from staffeln.common import time as xtime

 from staffeln.i18n import _

@@ -27,7 +29,7 @@
                 "Directly record report in log as sender email "

                 f"are not configed. Report: {self.content}"

             )

-            return

+            return True

         if not subject:

             subject = "Staffeln Backup result"

         if len(CONF.notification.receiver) != 0:

@@ -67,6 +69,7 @@
             }

             email.send(smtp_profile)

             LOG.info(_(f"Backup result email sent to {receiver}"))

+            return True

         except Exception as e:

             LOG.warn(

                 _(

@@ -76,10 +79,20 @@
             )

             raise

 

+    def create_report_record(self):

+        sender = (

+            CONF.notification.sender_email

+            if CONF.notification.sender_email

+            else "RecordInLog"

+        )

+        report_ts = objects.ReportTimestamp(self.backup_mgt.ctx)

+        report_ts.sender = sender

+        report_ts.created_at = timeutils.utcnow()

+        return report_ts.create()

+

     def publish(self, project_id=None, project_name=None):

         # 1. get quota

-        self.content = "<h3>${TIME}</h3><br>"

-        self.content = self.content.replace("${TIME}", xtime.get_current_strtime())

+        self.content = f"<h3>{xtime.get_current_strtime()}</h3><br>"

         success_tasks = self.backup_mgt.get_queues(

             filters={

                 "backup_status": constants.BACKUP_COMPLETED,

@@ -95,20 +108,16 @@
         if not success_tasks and not failed_tasks:

             return False

 

+        # Geneerate HTML Content

         html = ""

-        quota = self.backup_mgt.get_backup_quota(project_id)

-

-        html += (

-            "<h3>Project: ${PROJECT} (ID: ${PROJECT_ID})</h3><h3>Quota Usage</h3>"

-            "<FONT COLOR=${QUOTA_COLLOR}><h4>Limit: ${QUOTA_LIMIT}, In Use: "

-            "${QUOTA_IN_USE}, Reserved: ${QUOTA_RESERVED}, Total "

-            "rate: ${QUOTA_USAGE}</h4></FONT>"

-            "<h3>Success List</h3>"

-            "<FONT COLOR=GREEN><h4>${SUCCESS_VOLUME_LIST}</h4></FONT><br>"

-            "<h3>Failed List</h3>"

-            "<FONT COLOR=RED><h4>${FAILED_VOLUME_LIST}</h4></FONT><br>"

-        )

-

+        quota = self.backup_mgt.get_backup_gigabytes_quota(project_id)

+        quota_usage = (quota["in_use"] + quota["reserved"]) / quota["limit"]

+        if quota_usage > 0.8:

+            quota_color = "RED"

+        elif quota_usage > 0.5:

+            quota_color = "YALLOW"

+        else:

+            quota_color = "GREEN"

         if success_tasks:

             success_volumes = "<br>".join(

                 [

@@ -136,23 +145,23 @@
             )

         else:

             failed_volumes = "<br>"

-        quota_usage = (quota["in_use"] + quota["reserved"]) / quota["limit"]

-        if quota_usage > 0.8:

-            quota_color = "RED"

-        elif quota_usage > 0.5:

-            quota_color = "YALLOW"

-        else:

-            quota_color = "GREEN"

-        html = html.replace("${QUOTA_USAGE}", str(quota_usage))

-        html = html.replace("${QUOTA_COLLOR}", quota_color)

-        html = html.replace("${QUOTA_LIMIT}", str(quota["limit"]))

-        html = html.replace("${QUOTA_IN_USE}", str(quota["in_use"]))

-        html = html.replace("${QUOTA_RESERVED}", str(quota["reserved"]))

-        html = html.replace("${SUCCESS_VOLUME_LIST}", success_volumes)

-        html = html.replace("${FAILED_VOLUME_LIST}", failed_volumes)

-        html = html.replace("${PROJECT}", project_name)

-        html = html.replace("${PROJECT_ID}", project_id)

+        html += (

+            f"<h3>Project: {project_name} (ID: {project_id})</h3>"

+            "<h3>Quota Usage (Backup Gigabytes)</h3>"

+            f"<FONT COLOR={quota_color}><h4>Limit: {str(quota['limit'])} GB, In Use: "

+            f"{str(quota['in_use'])} GB, Reserved: {str(quota['reserved'])} GB, Total "

+            f"rate: {str(quota_usage)}</h4></FONT>"

+            "<h3>Success List</h3>"

+            f"<FONT COLOR=GREEN><h4>{success_volumes}</h4></FONT><br>"

+            "<h3>Failed List</h3>"

+            f"<FONT COLOR=RED><h4>{failed_volumes}</h4></FONT><br>"

+        )

         self.content += html

         subject = f"Staffeln Backup result: {project_id}"

-        self.send_result_email(project_id, subject=subject, project_name=project_name)

+        reported = self.send_result_email(

+            project_id, subject=subject, project_name=project_name

+        )

+        if reported:

+            # Record success report

+            self.create_report_record()

         return True

diff --git a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
new file mode 100644
index 0000000..293de1a
--- /dev/null
+++ b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py
@@ -0,0 +1,36 @@
+import sqlalchemy as sa
+from alembic import op
+from oslo_log import log
+
+"""add report timestamp
+
+Revision ID: 5b2e78435231
+Revises: 2b2b9df199bd
+Create Date: 2023-03-20 12:24:58.084135
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = "5b2e78435231"
+down_revision = "2b2b9df199bd"
+
+LOG = log.getLogger(__name__)
+
+
+def upgrade():
+    op.create_table(
+        "report_timestamp",
+        sa.Column("id", sa.String(36), primary_key=True, nullable=False),
+        sa.Column("created_at", sa.DateTime),
+        sa.Column("updated_at", sa.DateTime),
+        sa.Column("sender", sa.String(length=255), nullable=True),
+        mysql_engine="InnoDB",
+        mysql_charset="utf8",
+    )
+
+
+def downgrade():
+    try:
+        op.drop_table("report_timestamp")
+    except Exception:
+        LOG.exception("Error Dropping 'report_timestamp' table.")
diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py
index 3526b93..6d9325f 100644
--- a/staffeln/db/sqlalchemy/api.py
+++ b/staffeln/db/sqlalchemy/api.py
@@ -100,6 +100,23 @@
     def _get_relationships(model):
         return inspect(model).relationships
 
+    def _add_report_filters(self, query, filters):
+        """Add filters while listing report_timestamp table"""
+        if filters is None:
+            filters = {}
+
+        plain_fields = [
+            "sender",
+            "created_at",
+        ]
+
+        return self._add_filters(
+            query=query,
+            model=models.Report_timestamp,
+            filters=filters,
+            plain_fields=plain_fields,
+        )
+
     def _add_backup_filters(self, query, filters):
         """Add filters while listing the columns from the backup_data table"""
         if filters is None:
@@ -336,3 +353,28 @@
             return self._soft_delete(models.Backup_data, id)
         except:  # noqa: E722
             LOG.error("Backup Not found.")
+
+    def get_report_timestamp_list(self, *args, **kwargs):
+        return self._get_model_list(
+            models.Report_timestamp, self._add_report_filters, *args, **kwargs
+        )
+
+    def create_report_timestamp(self, values):
+        try:
+            report_timestamp_data = self._create(models.Report_timestamp, values)
+        except db_exc.DBDuplicateEntry:
+            LOG.error("Report Timestamp ID already exists.")
+        return report_timestamp_data
+
+    def update_report_timestamp(self, id, values):
+
+        try:
+            return self._update(models.Report_timestamp, id, values)
+        except:  # noqa: E722
+            LOG.error("Report Timestamp resource not found.")
+
+    def soft_delete_report_timestamp(self, id):
+        try:
+            return self._soft_delete(models.Report_timestamp, id)
+        except:  # noqa: E722
+            LOG.error("Report Timestamp Not found.")
diff --git a/staffeln/db/sqlalchemy/models.py b/staffeln/db/sqlalchemy/models.py
index 99b7ea3..c186ddc 100644
--- a/staffeln/db/sqlalchemy/models.py
+++ b/staffeln/db/sqlalchemy/models.py
@@ -71,3 +71,12 @@
     instance_name = Column(String(100))
     incremental = Column(Boolean, default=False)
     reason = Column(String(255), nullable=True)
+
+
+class Report_timestamp(Base):
+    """Represent the report_timestamp"""
+
+    __tablename__ = "report_timestamp"
+    __table_args__ = table_args()
+    id = Column(Integer, primary_key=True, autoincrement=True)
+    sender = Column(String(255), nullable=True)
diff --git a/staffeln/objects/__init__.py b/staffeln/objects/__init__.py
index b79e47b..2af8df0 100755
--- a/staffeln/objects/__init__.py
+++ b/staffeln/objects/__init__.py
@@ -1,8 +1,9 @@
 from .queue import Queue  # noqa: F401
+from .report import ReportTimestamp  # noqa: F401
 from .volume import Volume  # noqa: F401
 
 
-# from volume import Volume
 def register_all():
     __import__("staffeln.objects.volume")
     __import__("staffeln.objects.queue")
+    __import__("staffeln.objects.report")
diff --git a/staffeln/objects/queue.py b/staffeln/objects/queue.py
index e709ce1..c6b1177 100644
--- a/staffeln/objects/queue.py
+++ b/staffeln/objects/queue.py
@@ -1,3 +1,4 @@
+from oslo_versionedobjects import fields as ovoo_fields
 from staffeln.db import api as db_api
 from staffeln.objects import base
 from staffeln.objects import fields as sfeild
@@ -7,9 +8,10 @@
 class Queue(
     base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
 ):
-    VERSION = "1.1"
+    VERSION = "1.2"
     # Version 1.0: Initial version
     # Version 1.1: Add 'incremental' and 'reason' field
+    # Version 1.2: Add 'created_at' field
 
     dbapi = db_api.get_instance()
 
@@ -24,6 +26,7 @@
         "instance_name": sfeild.StringField(),
         "incremental": sfeild.BooleanField(),
         "reason": sfeild.StringField(nullable=True),
+        "created_at": ovoo_fields.DateTimeField(),
     }
 
     @base.remotable_classmethod
diff --git a/staffeln/objects/report.py b/staffeln/objects/report.py
new file mode 100644
index 0000000..e851a93
--- /dev/null
+++ b/staffeln/objects/report.py
@@ -0,0 +1,45 @@
+from oslo_versionedobjects import fields as ovoo_fields
+from staffeln.db import api as db_api
+from staffeln.objects import base
+from staffeln.objects import fields as sfeild
+
+
+@base.StaffelnObjectRegistry.register
+class ReportTimestamp(
+    base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat
+):
+    VERSION = "1.0"
+    # Version 1.0: Initial version
+
+    dbapi = db_api.get_instance()
+
+    fields = {
+        "id": sfeild.IntegerField(),
+        "sender": sfeild.StringField(nullable=True),
+        "created_at": ovoo_fields.DateTimeField(),
+    }
+
+    @base.remotable_classmethod
+    def list(cls, context, filters=None):  # pylint: disable=E0213
+        db_report = cls.dbapi.get_report_timestamp_list(context, filters=filters)
+        return [cls._from_db_object(cls(context), obj) for obj in db_report]
+
+    @base.remotable
+    def create(self):
+        """Create a :class:`report_timestamp` record in the DB"""
+        values = self.obj_get_changes()
+        db_report_timestamp = self.dbapi.create_report_timestamp(values)
+        return self._from_db_object(self, db_report_timestamp)
+
+    @base.remotable
+    def save(self):
+        updates = self.obj_get_changes()
+        db_obj = self.dbapi.update_report_timestamp(self.id, updates)
+        obj = self._from_db_object(self, db_obj, eager=False)
+        self.obj_refresh(obj)
+        self.obj_reset_changes()
+
+    @base.remotable
+    def delete(self):
+        """Soft Delete the :class:`report_timestamp` from the DB"""
+        self.dbapi.soft_delete_report_timestamp(self.id)