2023-04-03 18:18:23 +00:00
|
|
|
from typing import List, Optional
|
2023-04-10 13:22:33 +00:00
|
|
|
from datetime import datetime, timedelta
|
2023-06-07 16:33:13 +00:00
|
|
|
from os import statvfs
|
2023-02-17 15:55:19 +00:00
|
|
|
|
|
|
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
|
|
|
2023-06-16 15:09:39 +00:00
|
|
|
from selfprivacy_api.utils import ReadUserData, WriteUserData
|
2023-02-08 14:05:25 +00:00
|
|
|
|
2023-02-22 14:45:11 +00:00
|
|
|
from selfprivacy_api.services import get_service_by_id
|
2023-02-08 14:57:34 +00:00
|
|
|
from selfprivacy_api.services.service import Service
|
2023-02-22 14:45:11 +00:00
|
|
|
|
2023-02-08 14:57:34 +00:00
|
|
|
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
2023-01-23 13:43:18 +00:00
|
|
|
|
2023-04-10 13:22:33 +00:00
|
|
|
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
|
|
|
from selfprivacy_api.backup.providers import get_provider
|
|
|
|
from selfprivacy_api.backup.storage import Storage
|
2023-06-07 15:05:58 +00:00
|
|
|
from selfprivacy_api.backup.jobs import (
|
|
|
|
get_backup_job,
|
|
|
|
add_backup_job,
|
|
|
|
get_restore_job,
|
|
|
|
add_restore_job,
|
|
|
|
)
|
2023-04-24 16:37:07 +00:00
|
|
|
from selfprivacy_api.jobs import Jobs, JobStatus
|
2023-03-13 19:03:41 +00:00
|
|
|
|
2023-06-16 15:09:39 +00:00
|
|
|
DEFAULT_JSON_PROVIDER = {
|
|
|
|
"provider": "BACKBLAZE",
|
|
|
|
"accountId": "",
|
|
|
|
"accountKey": "",
|
|
|
|
"bucket": "",
|
|
|
|
}
|
|
|
|
|
2023-03-10 14:14:41 +00:00
|
|
|
|
2023-02-20 16:09:01 +00:00
|
|
|
class Backups:
|
2023-02-08 14:18:45 +00:00
|
|
|
"""A singleton controller for backups"""
|
|
|
|
|
2023-02-08 14:57:34 +00:00
|
|
|
provider: AbstractBackupProvider
|
2023-02-08 14:18:45 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def set_localfile_repo(file_path: str):
|
2023-02-20 13:51:06 +00:00
|
|
|
ProviderClass = get_provider(BackupProvider.FILE)
|
2023-06-14 14:07:51 +00:00
|
|
|
provider = ProviderClass(login="", key="", location=file_path, repo_id="")
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.store_provider(provider)
|
2023-04-03 18:18:23 +00:00
|
|
|
|
2023-05-29 15:34:26 +00:00
|
|
|
def set_provider(provider: AbstractBackupProvider):
|
|
|
|
Storage.store_provider(provider)
|
|
|
|
|
2023-04-03 18:18:23 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_last_backed_up(service: Service) -> Optional[datetime]:
|
2023-04-07 15:18:54 +00:00
|
|
|
"""Get a timezone-aware time of the last backup of a service"""
|
2023-04-10 13:22:33 +00:00
|
|
|
return Storage.get_last_backup_time(service.get_id())
|
2023-04-07 15:41:02 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_cached_snapshots_service(service_id: str) -> List[Snapshot]:
|
2023-04-10 13:22:33 +00:00
|
|
|
snapshots = Storage.get_cached_snapshots()
|
2023-04-07 15:41:02 +00:00
|
|
|
return [snap for snap in snapshots if snap.service_name == service_id]
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def sync_service_snapshots(service_id: str, snapshots: List[Snapshot]):
|
|
|
|
for snapshot in snapshots:
|
|
|
|
if snapshot.service_name == service_id:
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.cache_snapshot(snapshot)
|
2023-04-07 15:41:02 +00:00
|
|
|
for snapshot in Backups.get_cached_snapshots_service(service_id):
|
|
|
|
if snapshot.id not in [snap.id for snap in snapshots]:
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.delete_cached_snapshot(snapshot)
|
2023-04-03 18:37:12 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def enable_autobackup(service: Service):
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.set_autobackup(service)
|
2023-04-03 23:23:47 +00:00
|
|
|
|
2023-04-10 16:35:35 +00:00
|
|
|
@staticmethod
|
|
|
|
def _service_ids_to_back_up(time: datetime) -> List[str]:
|
|
|
|
services = Storage.services_with_autobackup()
|
|
|
|
return [id for id in services if Backups.is_time_to_backup_service(id, time)]
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def services_to_back_up(time: datetime) -> List[Service]:
|
|
|
|
result = []
|
|
|
|
for id in Backups._service_ids_to_back_up(time):
|
|
|
|
service = get_service_by_id(id)
|
|
|
|
if service is None:
|
|
|
|
raise ValueError("Cannot look up a service scheduled for backup!")
|
|
|
|
result.append(service)
|
|
|
|
return result
|
|
|
|
|
2023-04-03 23:23:47 +00:00
|
|
|
@staticmethod
|
|
|
|
def is_time_to_backup(time: datetime) -> bool:
|
|
|
|
"""
|
|
|
|
Intended as a time validator for huey cron scheduler of automatic backups
|
|
|
|
"""
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2023-04-10 16:35:35 +00:00
|
|
|
return Backups._service_ids_to_back_up(time) != []
|
2023-04-03 23:23:47 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_time_to_backup_service(service_id: str, time: datetime):
|
|
|
|
period = Backups.autobackup_period_minutes()
|
|
|
|
if period is None:
|
|
|
|
return False
|
2023-04-10 15:51:54 +00:00
|
|
|
if not Storage.is_autobackup_set(service_id):
|
2023-04-03 23:23:47 +00:00
|
|
|
return False
|
|
|
|
|
2023-04-10 13:22:33 +00:00
|
|
|
last_backup = Storage.get_last_backup_time(service_id)
|
2023-04-03 23:23:47 +00:00
|
|
|
if last_backup is None:
|
|
|
|
return True # queue a backup immediately if there are no previous backups
|
|
|
|
|
|
|
|
if time > last_backup + timedelta(minutes=period):
|
|
|
|
return True
|
|
|
|
return False
|
2023-04-03 18:37:12 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def disable_autobackup(service: Service):
|
2023-04-03 18:54:27 +00:00
|
|
|
"""also see disable_all_autobackup()"""
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.unset_autobackup(service)
|
2023-04-03 18:37:12 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_autobackup_enabled(service: Service) -> bool:
|
2023-04-10 13:22:33 +00:00
|
|
|
return Storage.is_autobackup_set(service.get_id())
|
2023-04-03 18:37:12 +00:00
|
|
|
|
2023-04-03 18:54:27 +00:00
|
|
|
@staticmethod
|
|
|
|
def autobackup_period_minutes() -> Optional[int]:
|
|
|
|
"""None means autobackup is disabled"""
|
2023-04-10 13:22:33 +00:00
|
|
|
return Storage.autobackup_period_minutes()
|
2023-04-03 18:54:27 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def set_autobackup_period_minutes(minutes: int):
|
2023-04-03 22:39:04 +00:00
|
|
|
"""
|
|
|
|
0 and negative numbers are equivalent to disable.
|
|
|
|
Setting to a positive number may result in a backup very soon if some services are not backed up.
|
|
|
|
"""
|
|
|
|
if minutes <= 0:
|
|
|
|
Backups.disable_all_autobackup()
|
|
|
|
return
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.store_autobackup_period_minutes(minutes)
|
2023-04-03 18:54:27 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def disable_all_autobackup():
|
|
|
|
"""disables all automatic backing up, but does not change per-service settings"""
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.delete_backup_period()
|
2023-04-03 18:54:27 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def provider():
|
|
|
|
return Backups.lookup_provider()
|
|
|
|
|
|
|
|
@staticmethod
|
2023-05-29 15:34:26 +00:00
|
|
|
def set_provider(kind: str, login: str, key: str, location: str, repo_id: str = ""):
|
2023-06-16 13:43:41 +00:00
|
|
|
provider = Backups.construct_provider(kind, login, key, location, repo_id)
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.store_provider(provider)
|
2023-02-20 13:51:06 +00:00
|
|
|
|
2023-03-10 14:14:41 +00:00
|
|
|
@staticmethod
|
2023-05-29 15:34:26 +00:00
|
|
|
def construct_provider(
|
|
|
|
kind: str, login: str, key: str, location: str, repo_id: str = ""
|
|
|
|
):
|
2023-03-10 14:14:41 +00:00
|
|
|
provider_class = get_provider(BackupProvider[kind])
|
2023-03-29 11:15:38 +00:00
|
|
|
|
2023-05-29 15:34:26 +00:00
|
|
|
return provider_class(login=login, key=key, location=location, repo_id=repo_id)
|
2023-03-10 14:14:41 +00:00
|
|
|
|
2023-03-13 19:03:41 +00:00
|
|
|
@staticmethod
|
2023-06-16 15:09:39 +00:00
|
|
|
def reset(reset_json=True):
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.reset()
|
2023-06-16 15:09:39 +00:00
|
|
|
if reset_json:
|
|
|
|
try:
|
|
|
|
Backups.reset_provider_json()
|
|
|
|
except FileNotFoundError: # if there is no userdata file, we do not need to reset it
|
|
|
|
pass
|
2023-04-03 18:18:23 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def lookup_provider() -> AbstractBackupProvider:
|
2023-02-08 14:57:34 +00:00
|
|
|
redis_provider = Backups.load_provider_redis()
|
|
|
|
if redis_provider is not None:
|
2023-03-10 14:14:41 +00:00
|
|
|
return redis_provider
|
2023-02-08 14:57:34 +00:00
|
|
|
|
|
|
|
json_provider = Backups.load_provider_json()
|
|
|
|
if json_provider is not None:
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.store_provider(json_provider)
|
2023-03-10 14:14:41 +00:00
|
|
|
return json_provider
|
2023-02-08 14:57:34 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
memory_provider = Backups.construct_provider("MEMORY", login="", key="")
|
2023-04-10 13:22:33 +00:00
|
|
|
Storage.store_provider(memory_provider)
|
2023-03-29 11:15:38 +00:00
|
|
|
return memory_provider
|
2023-02-08 14:57:34 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def load_provider_json() -> AbstractBackupProvider:
|
2023-03-10 14:14:41 +00:00
|
|
|
with ReadUserData() as user_data:
|
|
|
|
account = ""
|
|
|
|
key = ""
|
|
|
|
|
|
|
|
if "backup" not in user_data.keys():
|
|
|
|
if "backblaze" in user_data.keys():
|
|
|
|
account = user_data["backblaze"]["accountId"]
|
|
|
|
key = user_data["backblaze"]["accountKey"]
|
2023-05-29 15:34:26 +00:00
|
|
|
location = user_data["backblaze"]["bucket"]
|
2023-03-10 14:14:41 +00:00
|
|
|
provider_string = "BACKBLAZE"
|
|
|
|
return Backups.construct_provider(
|
2023-05-29 15:34:26 +00:00
|
|
|
kind=provider_string, login=account, key=key, location=location
|
2023-03-10 14:14:41 +00:00
|
|
|
)
|
|
|
|
return None
|
|
|
|
|
|
|
|
account = user_data["backup"]["accountId"]
|
|
|
|
key = user_data["backup"]["accountKey"]
|
|
|
|
provider_string = user_data["backup"]["provider"]
|
2023-05-29 15:34:26 +00:00
|
|
|
location = user_data["backup"]["bucket"]
|
2023-03-10 14:14:41 +00:00
|
|
|
return Backups.construct_provider(
|
2023-05-29 15:34:26 +00:00
|
|
|
kind=provider_string, login=account, key=key, location=location
|
2023-03-10 14:14:41 +00:00
|
|
|
)
|
2023-02-08 14:05:25 +00:00
|
|
|
|
2023-06-16 15:09:39 +00:00
|
|
|
def reset_provider_json() -> AbstractBackupProvider:
|
|
|
|
with WriteUserData() as user_data:
|
|
|
|
if "backblaze" in user_data.keys():
|
|
|
|
del user_data["backblaze"]
|
|
|
|
|
|
|
|
user_data["backup"] = DEFAULT_JSON_PROVIDER
|
|
|
|
|
2023-04-10 13:22:33 +00:00
|
|
|
@staticmethod
|
|
|
|
def load_provider_redis() -> AbstractBackupProvider:
|
|
|
|
provider_model = Storage.load_provider()
|
|
|
|
if provider_model is None:
|
|
|
|
return None
|
|
|
|
return Backups.construct_provider(
|
2023-05-29 15:34:26 +00:00
|
|
|
provider_model.kind,
|
|
|
|
provider_model.login,
|
|
|
|
provider_model.key,
|
|
|
|
provider_model.location,
|
|
|
|
provider_model.repo_id,
|
2023-04-10 13:22:33 +00:00
|
|
|
)
|
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def back_up(service: Service):
|
2023-04-07 15:41:02 +00:00
|
|
|
"""The top-level function to back up a service"""
|
2023-04-14 11:20:03 +00:00
|
|
|
folders = service.get_folders()
|
2023-02-08 14:57:34 +00:00
|
|
|
repo_name = service.get_id()
|
2023-02-08 15:27:49 +00:00
|
|
|
|
2023-04-24 16:37:07 +00:00
|
|
|
job = get_backup_job(service)
|
|
|
|
if job is None:
|
|
|
|
job = add_backup_job(service)
|
|
|
|
Jobs.update(job, status=JobStatus.RUNNING)
|
2023-04-24 16:15:12 +00:00
|
|
|
|
2023-05-08 10:55:22 +00:00
|
|
|
try:
|
|
|
|
service.pre_backup()
|
|
|
|
snapshot = Backups.provider().backuper.start_backup(folders, repo_name)
|
|
|
|
Backups._store_last_snapshot(repo_name, snapshot)
|
|
|
|
service.post_restore()
|
|
|
|
except Exception as e:
|
|
|
|
Jobs.update(job, status=JobStatus.ERROR)
|
|
|
|
raise e
|
2023-04-03 18:18:23 +00:00
|
|
|
|
2023-04-24 17:03:56 +00:00
|
|
|
Jobs.update(job, status=JobStatus.FINISHED)
|
2023-06-05 11:19:01 +00:00
|
|
|
return snapshot
|
2023-02-17 15:55:19 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
2023-05-29 15:34:26 +00:00
|
|
|
def init_repo(service: Optional[Service] = None):
|
|
|
|
if service is not None:
|
|
|
|
repo_name = service.get_id()
|
|
|
|
|
2023-05-29 16:50:14 +00:00
|
|
|
Backups.provider().backuper.init()
|
|
|
|
Storage.mark_as_init()
|
2023-03-14 00:39:15 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
2023-05-29 16:50:14 +00:00
|
|
|
def is_initted() -> bool:
|
|
|
|
if Storage.has_init_mark():
|
2023-03-14 00:39:15 +00:00
|
|
|
return True
|
|
|
|
|
2023-05-29 16:50:14 +00:00
|
|
|
initted = Backups.provider().backuper.is_initted()
|
2023-03-14 00:39:15 +00:00
|
|
|
if initted:
|
2023-05-29 16:50:14 +00:00
|
|
|
Storage.mark_as_init()
|
2023-03-14 00:39:15 +00:00
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
2023-02-17 16:11:17 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_snapshots(service: Service) -> List[Snapshot]:
|
2023-06-05 11:28:53 +00:00
|
|
|
snapshots = Backups.get_all_snapshots()
|
|
|
|
return [snap for snap in snapshots if snap.service_name == service.get_id()]
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_all_snapshots() -> List[Snapshot]:
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
2023-04-07 17:24:53 +00:00
|
|
|
if cached_snapshots != []:
|
|
|
|
return cached_snapshots
|
|
|
|
# TODO: the oldest snapshots will get expired faster than the new ones.
|
|
|
|
# How to detect that the end is missing?
|
|
|
|
|
2023-05-31 13:16:08 +00:00
|
|
|
upstream_snapshots = Backups.provider().backuper.get_snapshots()
|
2023-06-05 11:28:53 +00:00
|
|
|
Backups.sync_all_snapshots()
|
|
|
|
return upstream_snapshots
|
2023-02-22 14:45:11 +00:00
|
|
|
|
2023-06-01 14:03:26 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_snapshot_by_id(id: str) -> Optional[Snapshot]:
|
|
|
|
snap = Storage.get_cached_snapshot_by_id(id)
|
|
|
|
if snap is not None:
|
|
|
|
return snap
|
|
|
|
|
|
|
|
# Possibly our cache entry got invalidated, let's try one more time
|
|
|
|
Backups.sync_all_snapshots()
|
|
|
|
snap = Storage.get_cached_snapshot_by_id(id)
|
|
|
|
|
|
|
|
return snap
|
|
|
|
|
2023-06-01 16:12:32 +00:00
|
|
|
@staticmethod
|
|
|
|
def force_snapshot_reload():
|
|
|
|
Backups.sync_all_snapshots()
|
|
|
|
|
2023-06-01 14:03:26 +00:00
|
|
|
@staticmethod
|
|
|
|
def sync_all_snapshots():
|
|
|
|
upstream_snapshots = Backups.provider().backuper.get_snapshots()
|
|
|
|
Storage.invalidate_snapshot_storage()
|
|
|
|
for snapshot in upstream_snapshots:
|
|
|
|
Storage.cache_snapshot(snapshot)
|
|
|
|
|
2023-06-07 15:05:58 +00:00
|
|
|
# to be deprecated/internalized in favor of restore_snapshot()
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def restore_service_from_snapshot(service: Service, snapshot_id: str):
|
2023-02-22 14:45:11 +00:00
|
|
|
repo_name = service.get_id()
|
2023-04-14 11:20:03 +00:00
|
|
|
folders = service.get_folders()
|
2023-02-22 14:45:11 +00:00
|
|
|
|
2023-04-14 11:20:03 +00:00
|
|
|
Backups.provider().backuper.restore_from_backup(repo_name, snapshot_id, folders)
|
2023-02-22 14:45:11 +00:00
|
|
|
|
2023-06-07 16:33:13 +00:00
|
|
|
@staticmethod
|
|
|
|
def assert_restorable(snapshot: Snapshot):
|
|
|
|
service = get_service_by_id(snapshot.service_name)
|
|
|
|
if service is None:
|
|
|
|
raise ValueError(
|
|
|
|
f"snapshot has a nonexistent service: {snapshot.service_name}"
|
|
|
|
)
|
|
|
|
|
|
|
|
needed_space = Backups.snapshot_restored_size(snapshot)
|
|
|
|
available_space = Backups.space_usable_for_service(service)
|
|
|
|
if needed_space > available_space:
|
|
|
|
raise ValueError(
|
|
|
|
f"we only have {available_space} bytes but snapshot needs{ needed_space}"
|
|
|
|
)
|
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def restore_snapshot(snapshot: Snapshot):
|
2023-06-07 15:05:58 +00:00
|
|
|
service = get_service_by_id(snapshot.service_name)
|
|
|
|
|
|
|
|
job = get_restore_job(service)
|
|
|
|
if job is None:
|
|
|
|
job = add_restore_job(snapshot)
|
|
|
|
|
|
|
|
Jobs.update(job, status=JobStatus.RUNNING)
|
|
|
|
try:
|
2023-06-07 16:33:13 +00:00
|
|
|
Backups.assert_restorable(snapshot)
|
2023-06-07 15:05:58 +00:00
|
|
|
Backups.restore_service_from_snapshot(service, snapshot.id)
|
|
|
|
service.post_restore()
|
|
|
|
except Exception as e:
|
|
|
|
Jobs.update(job, status=JobStatus.ERROR)
|
|
|
|
raise e
|
|
|
|
|
|
|
|
Jobs.update(job, status=JobStatus.FINISHED)
|
2023-02-22 18:48:08 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def service_snapshot_size(service: Service, snapshot_id: str) -> float:
|
2023-02-22 18:48:08 +00:00
|
|
|
repo_name = service.get_id()
|
2023-03-29 11:15:38 +00:00
|
|
|
return Backups.provider().backuper.restored_size(repo_name, snapshot_id)
|
2023-02-22 18:48:08 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
@staticmethod
|
|
|
|
def snapshot_restored_size(snapshot: Snapshot) -> float:
|
2023-04-03 18:39:55 +00:00
|
|
|
return Backups.service_snapshot_size(
|
2023-02-22 18:48:08 +00:00
|
|
|
get_service_by_id(snapshot.service_name), snapshot.id
|
|
|
|
)
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2023-06-07 16:33:13 +00:00
|
|
|
@staticmethod
|
|
|
|
def space_usable_for_service(service: Service) -> bool:
|
|
|
|
folders = service.get_folders()
|
|
|
|
if folders == []:
|
|
|
|
raise ValueError("unallocated service", service.get_id())
|
|
|
|
|
|
|
|
fs_info = statvfs(folders[0])
|
|
|
|
usable_bytes = fs_info.f_frsize * fs_info.f_bavail
|
|
|
|
return usable_bytes
|
|
|
|
|
2023-04-10 13:22:33 +00:00
|
|
|
@staticmethod
|
|
|
|
def _store_last_snapshot(service_id: str, snapshot: Snapshot):
|
|
|
|
"""What do we do with a snapshot that is just made?"""
|
|
|
|
# non-expiring timestamp of the last
|
|
|
|
Storage.store_last_timestamp(service_id, snapshot)
|
|
|
|
# expiring cache entry
|
|
|
|
Storage.cache_snapshot(snapshot)
|