fix(services): proper backup progress reporting

This commit is contained in:
Houkime 2023-06-28 11:45:07 +00:00 committed by Inex Code
parent 1fb5e3af97
commit 0e13e61b73
2 changed files with 36 additions and 6 deletions

View file

@ -127,19 +127,21 @@ class ResticBackupper(AbstractBackupper):
return ResticBackupper._snapshot_from_fresh_summary(message, repo_name)
raise ValueError("no summary message in restic json output")
def parse_message(self, raw_message, job=None) -> object:
message = ResticBackupper.parse_json_output(raw_message)
def parse_message(self, raw_message_line: str, job=None) -> dict:
message = ResticBackupper.parse_json_output(raw_message_line)
if not isinstance(message, dict):
raise ValueError("we have too many messages on one line?")
if message["message_type"] == "status":
if job is not None: # only update status if we run under some job
Jobs.update(
job,
JobStatus.RUNNING,
progress=int(message["percent_done"]),
progress=int(message["percent_done"] * 100),
)
return message
@staticmethod
def _snapshot_from_fresh_summary(message: object, repo_name) -> Snapshot:
def _snapshot_from_fresh_summary(message: dict, repo_name) -> Snapshot:
return Snapshot(
id=message["snapshot_id"],
created_at=datetime.datetime.now(datetime.timezone.utc),

View file

@ -3,6 +3,7 @@ import os.path as path
from os import makedirs
from os import remove
from os import listdir
from os import urandom
from datetime import datetime, timedelta, timezone
import selfprivacy_api.services as services
@ -259,9 +260,18 @@ def assert_job_has_run(job_type):
assert JobStatus.RUNNING in Jobs.status_updates(job)
def assert_job_had_progress(job_type):
def job_progress_updates(job_type):
job = [job for job in finished_jobs() if job.type_id == job_type][0]
assert len(Jobs.progress_updates(job)) > 0
return Jobs.progress_updates(job)
def assert_job_had_progress(job_type):
assert len(job_progress_updates(job_type)) > 0
def make_large_file(path: str, bytes: int):
with open(path, "wb") as file:
file.write(urandom(bytes))
def test_snapshots_by_id(backups, dummy_service):
@ -290,6 +300,24 @@ def test_backup_service_task(backups, dummy_service):
assert_job_had_progress(job_type_id)
def test_backup_larger_file(backups, dummy_service):
dir = path.join(dummy_service.get_folders()[0], "LARGEFILE")
mega = 2**20
make_large_file(dir, 10 * mega)
handle = start_backup(dummy_service)
handle(blocking=True)
# results will be slightly different on different machines. if someone has troubles with it on their machine, consider dropping this test.
id = dummy_service.get_id()
job_type_id = f"services.{id}.backup"
assert_job_finished(job_type_id, count=1)
assert_job_has_run(job_type_id)
updates = job_progress_updates(job_type_id)
assert len(updates) > 3
assert updates[1] > 10
def test_restore_snapshot_task(backups, dummy_service):
Backups.back_up(dummy_service)
snaps = Backups.get_snapshots(dummy_service)