2023-02-03 17:04:35 +00:00
|
|
|
import pytest
|
2023-11-20 11:14:22 +00:00
|
|
|
|
2024-09-23 11:35:46 +00:00
|
|
|
from typing import List
|
|
|
|
|
2023-07-26 11:54:17 +00:00
|
|
|
import os
|
2023-02-03 19:09:24 +00:00
|
|
|
import os.path as path
|
2023-02-22 15:58:36 +00:00
|
|
|
from os import remove
|
|
|
|
from os import listdir
|
2023-06-28 11:45:07 +00:00
|
|
|
from os import urandom
|
2023-02-03 17:04:35 +00:00
|
|
|
|
2023-11-20 11:14:22 +00:00
|
|
|
from datetime import datetime, timedelta, timezone
|
2023-08-14 11:50:59 +00:00
|
|
|
import tempfile
|
|
|
|
|
2023-11-07 01:00:38 +00:00
|
|
|
from selfprivacy_api.utils.huey import huey
|
|
|
|
|
2023-11-20 11:14:22 +00:00
|
|
|
from selfprivacy_api.services.service import ServiceStatus
|
2024-07-24 15:15:31 +00:00
|
|
|
from selfprivacy_api.services import ServiceManager
|
2023-11-20 11:14:22 +00:00
|
|
|
|
2024-03-18 17:13:06 +00:00
|
|
|
from selfprivacy_api.graphql.queries.providers import BackupProvider as ProviderEnum
|
2023-11-20 11:14:22 +00:00
|
|
|
from selfprivacy_api.graphql.common_types.backup import (
|
|
|
|
RestoreStrategy,
|
|
|
|
BackupReason,
|
|
|
|
)
|
2024-03-18 17:13:06 +00:00
|
|
|
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
2023-11-20 11:14:22 +00:00
|
|
|
|
2024-03-18 17:13:06 +00:00
|
|
|
from selfprivacy_api.jobs import Job, Jobs, JobStatus
|
2023-02-22 13:35:55 +00:00
|
|
|
|
2023-07-05 13:13:30 +00:00
|
|
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
|
|
|
2023-07-26 11:54:17 +00:00
|
|
|
from selfprivacy_api.backup import Backups, BACKUP_PROVIDER_ENVS
|
2023-02-01 11:58:55 +00:00
|
|
|
import selfprivacy_api.backup.providers as providers
|
|
|
|
from selfprivacy_api.backup.providers import AbstractBackupProvider
|
|
|
|
from selfprivacy_api.backup.providers.backblaze import Backblaze
|
2023-07-26 11:54:17 +00:00
|
|
|
from selfprivacy_api.backup.providers.none import NoBackups
|
2024-01-24 14:36:44 +00:00
|
|
|
from selfprivacy_api.backup.providers import get_kind
|
2023-07-03 13:28:23 +00:00
|
|
|
from selfprivacy_api.backup.util import sync
|
2023-07-03 12:15:36 +00:00
|
|
|
|
2023-09-01 10:41:27 +00:00
|
|
|
from selfprivacy_api.backup.tasks import (
|
|
|
|
start_backup,
|
|
|
|
restore_snapshot,
|
|
|
|
reload_snapshot_cache,
|
2024-09-22 14:33:06 +00:00
|
|
|
total_backup,
|
|
|
|
do_full_restore,
|
2024-09-23 11:35:46 +00:00
|
|
|
which_snapshots_to_full_restore,
|
2023-09-01 10:41:27 +00:00
|
|
|
)
|
2023-04-10 13:22:33 +00:00
|
|
|
from selfprivacy_api.backup.storage import Storage
|
2024-03-18 17:13:06 +00:00
|
|
|
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
2024-09-22 14:33:06 +00:00
|
|
|
from selfprivacy_api.backup.jobs import (
|
|
|
|
get_backup_fail,
|
|
|
|
add_total_backup_job,
|
|
|
|
add_total_restore_job,
|
|
|
|
)
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2024-03-18 17:40:48 +00:00
|
|
|
from tests.common import assert_job_errored
|
2024-09-23 11:35:46 +00:00
|
|
|
from tests.conftest import (
|
|
|
|
write_testfile_bodies,
|
|
|
|
get_testfile_bodies,
|
|
|
|
assert_original_files,
|
|
|
|
assert_rebuild_was_made,
|
|
|
|
)
|
2024-07-25 17:30:46 +00:00
|
|
|
from tests.test_dkim import dkim_file
|
2024-03-18 17:40:48 +00:00
|
|
|
|
2024-09-22 14:33:06 +00:00
|
|
|
from tests.test_graphql.test_services import (
|
|
|
|
only_dummy_service_and_api,
|
|
|
|
only_dummy_service,
|
|
|
|
)
|
2023-02-01 11:58:55 +00:00
|
|
|
|
2023-02-08 14:05:25 +00:00
|
|
|
REPO_NAME = "test_backup"
|
2023-02-03 19:09:24 +00:00
|
|
|
|
2023-09-20 13:17:57 +00:00
|
|
|
REPOFILE_NAME = "totallyunrelated"
|
|
|
|
|
2023-02-03 19:09:24 +00:00
|
|
|
|
2023-07-26 14:26:04 +00:00
|
|
|
def prepare_localfile_backups(temp_dir):
|
2023-09-20 13:17:57 +00:00
|
|
|
test_repo_path = path.join(temp_dir, REPOFILE_NAME)
|
2023-07-26 14:26:04 +00:00
|
|
|
assert not path.exists(test_repo_path)
|
|
|
|
Backups.set_localfile_repo(test_repo_path)
|
|
|
|
|
|
|
|
|
2023-02-20 16:09:01 +00:00
|
|
|
@pytest.fixture(scope="function")
|
2023-07-26 14:26:04 +00:00
|
|
|
def backups_local(tmpdir):
|
2023-03-29 11:15:38 +00:00
|
|
|
Backups.reset()
|
2023-07-26 14:26:04 +00:00
|
|
|
prepare_localfile_backups(tmpdir)
|
|
|
|
Jobs.reset()
|
|
|
|
Backups.init_repo()
|
2023-03-29 11:15:38 +00:00
|
|
|
|
2023-03-13 19:03:41 +00:00
|
|
|
|
2023-07-26 14:26:04 +00:00
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
def backups(tmpdir):
|
2023-10-11 17:34:53 +00:00
|
|
|
"""
|
|
|
|
For those tests that are supposed to pass with
|
|
|
|
both local and cloud repos
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Sometimes this is false. Idk why.
|
|
|
|
huey.immediate = True
|
|
|
|
assert huey.immediate is True
|
|
|
|
|
2023-07-26 14:26:04 +00:00
|
|
|
Backups.reset()
|
|
|
|
if BACKUP_PROVIDER_ENVS["kind"] in os.environ.keys():
|
|
|
|
Backups.set_provider_from_envs()
|
|
|
|
else:
|
|
|
|
prepare_localfile_backups(tmpdir)
|
2023-04-21 12:19:59 +00:00
|
|
|
Jobs.reset()
|
2023-07-26 14:26:04 +00:00
|
|
|
|
|
|
|
Backups.init_repo()
|
2023-09-20 13:17:57 +00:00
|
|
|
assert Backups.provider().location == str(tmpdir) + "/" + REPOFILE_NAME
|
2023-07-26 14:26:04 +00:00
|
|
|
yield
|
2023-07-26 16:52:58 +00:00
|
|
|
Backups.erase_repo()
|
2023-04-21 12:19:59 +00:00
|
|
|
|
2023-03-13 19:03:41 +00:00
|
|
|
|
2023-02-03 18:49:24 +00:00
|
|
|
@pytest.fixture()
|
2023-02-08 14:05:25 +00:00
|
|
|
def memory_backup() -> AbstractBackupProvider:
|
2023-02-03 18:49:24 +00:00
|
|
|
ProviderClass = providers.get_provider(BackupProvider.MEMORY)
|
|
|
|
assert ProviderClass is not None
|
|
|
|
memory_provider = ProviderClass(login="", key="")
|
|
|
|
assert memory_provider is not None
|
|
|
|
return memory_provider
|
|
|
|
|
|
|
|
|
2023-02-20 11:50:52 +00:00
|
|
|
@pytest.fixture()
|
|
|
|
def file_backup(tmpdir) -> AbstractBackupProvider:
|
|
|
|
test_repo_path = path.join(tmpdir, "test_repo")
|
|
|
|
ProviderClass = providers.get_provider(BackupProvider.FILE)
|
|
|
|
assert ProviderClass is not None
|
2023-06-14 14:07:51 +00:00
|
|
|
provider = ProviderClass(location=test_repo_path)
|
2023-02-20 11:50:52 +00:00
|
|
|
assert provider is not None
|
|
|
|
return provider
|
|
|
|
|
|
|
|
|
2024-09-23 11:35:46 +00:00
|
|
|
def ids(snapshots: List[Snapshot]) -> List[str]:
|
|
|
|
return [snapshot.id for snapshot in snapshots]
|
|
|
|
|
|
|
|
|
|
|
|
def assert_job_ok(job: Job):
|
|
|
|
try:
|
|
|
|
assert job.status == JobStatus.FINISHED
|
|
|
|
# For easier debug
|
|
|
|
except AssertionError:
|
|
|
|
raise ValueError("Job errored out when it was not supposed to:", job.error)
|
|
|
|
|
|
|
|
|
2023-07-26 11:54:17 +00:00
|
|
|
def test_reset_sets_to_none1():
|
|
|
|
Backups.reset()
|
|
|
|
provider = Backups.provider()
|
|
|
|
assert provider is not None
|
|
|
|
assert isinstance(provider, NoBackups)
|
|
|
|
|
|
|
|
|
|
|
|
def test_reset_sets_to_none2(backups):
|
|
|
|
# now with something set up first^^^
|
|
|
|
Backups.reset()
|
|
|
|
provider = Backups.provider()
|
|
|
|
assert provider is not None
|
|
|
|
assert isinstance(provider, NoBackups)
|
|
|
|
|
|
|
|
|
|
|
|
def test_setting_from_envs(tmpdir):
|
|
|
|
Backups.reset()
|
2023-07-26 14:26:04 +00:00
|
|
|
environment_stash = {}
|
|
|
|
if BACKUP_PROVIDER_ENVS["kind"] in os.environ.keys():
|
|
|
|
# we are running under special envs, stash them before rewriting them
|
|
|
|
for key in BACKUP_PROVIDER_ENVS.values():
|
|
|
|
environment_stash[key] = os.environ[key]
|
|
|
|
|
2023-07-26 11:54:17 +00:00
|
|
|
os.environ[BACKUP_PROVIDER_ENVS["kind"]] = "BACKBLAZE"
|
|
|
|
os.environ[BACKUP_PROVIDER_ENVS["login"]] = "ID"
|
|
|
|
os.environ[BACKUP_PROVIDER_ENVS["key"]] = "KEY"
|
|
|
|
os.environ[BACKUP_PROVIDER_ENVS["location"]] = "selfprivacy"
|
|
|
|
Backups.set_provider_from_envs()
|
|
|
|
provider = Backups.provider()
|
|
|
|
|
|
|
|
assert provider is not None
|
|
|
|
assert isinstance(provider, Backblaze)
|
|
|
|
assert provider.login == "ID"
|
|
|
|
assert provider.key == "KEY"
|
|
|
|
assert provider.location == "selfprivacy"
|
2023-03-10 14:14:41 +00:00
|
|
|
|
2023-06-23 12:04:33 +00:00
|
|
|
assert provider.backupper.account == "ID"
|
|
|
|
assert provider.backupper.key == "KEY"
|
2023-05-22 16:01:57 +00:00
|
|
|
|
2023-07-26 14:26:04 +00:00
|
|
|
if environment_stash != {}:
|
|
|
|
for key in BACKUP_PROVIDER_ENVS.values():
|
|
|
|
os.environ[key] = environment_stash[key]
|
|
|
|
else:
|
|
|
|
for key in BACKUP_PROVIDER_ENVS.values():
|
|
|
|
del os.environ[key]
|
|
|
|
|
2023-03-10 14:14:41 +00:00
|
|
|
|
2023-02-01 11:58:55 +00:00
|
|
|
def test_select_backend():
|
|
|
|
provider = providers.get_provider(BackupProvider.BACKBLAZE)
|
|
|
|
assert provider is not None
|
|
|
|
assert provider == Backblaze
|
2023-02-03 17:04:35 +00:00
|
|
|
|
|
|
|
|
2023-02-20 11:50:52 +00:00
|
|
|
def test_file_backend_init(file_backup):
|
2023-06-23 12:04:33 +00:00
|
|
|
file_backup.backupper.init()
|
2023-02-20 11:50:52 +00:00
|
|
|
|
|
|
|
|
2023-07-26 16:45:08 +00:00
|
|
|
def test_reinit_after_purge(backups):
|
|
|
|
assert Backups.is_initted() is True
|
|
|
|
|
|
|
|
Backups.erase_repo()
|
|
|
|
assert Backups.is_initted() is False
|
|
|
|
with pytest.raises(ValueError):
|
2024-02-12 20:47:33 +00:00
|
|
|
Backups.force_snapshot_cache_reload()
|
2023-07-26 16:45:08 +00:00
|
|
|
|
|
|
|
Backups.init_repo()
|
|
|
|
assert Backups.is_initted() is True
|
|
|
|
assert len(Backups.get_all_snapshots()) == 0
|
|
|
|
|
|
|
|
|
2023-02-20 16:09:01 +00:00
|
|
|
def test_backup_service(dummy_service, backups):
|
2023-04-24 17:03:56 +00:00
|
|
|
id = dummy_service.get_id()
|
|
|
|
assert_job_finished(f"services.{id}.backup", count=0)
|
2023-04-07 15:18:54 +00:00
|
|
|
assert Backups.get_last_backed_up(dummy_service) is None
|
2023-04-24 17:03:56 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
Backups.back_up(dummy_service)
|
2023-02-13 11:16:35 +00:00
|
|
|
|
2023-04-07 15:18:54 +00:00
|
|
|
now = datetime.now(timezone.utc)
|
|
|
|
date = Backups.get_last_backed_up(dummy_service)
|
|
|
|
assert date is not None
|
|
|
|
assert now > date
|
|
|
|
assert now - date < timedelta(minutes=1)
|
|
|
|
|
2023-04-24 17:03:56 +00:00
|
|
|
assert_job_finished(f"services.{id}.backup", count=1)
|
|
|
|
|
2023-02-13 11:16:35 +00:00
|
|
|
|
2024-03-18 17:13:06 +00:00
|
|
|
def all_job_text(job: Job) -> str:
|
|
|
|
# Use when we update to pydantic 2.xxx
|
|
|
|
# return Job.model_dump_json()
|
|
|
|
result = ""
|
|
|
|
if job.status_text is not None:
|
|
|
|
result += job.status_text
|
|
|
|
if job.description is not None:
|
|
|
|
result += job.description
|
|
|
|
if job.error is not None:
|
|
|
|
result += job.error
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
def test_error_censoring_encryptionkey(dummy_service, backups):
|
|
|
|
# Discard our key to inject a failure
|
|
|
|
old_key = LocalBackupSecret.get()
|
|
|
|
LocalBackupSecret.reset()
|
|
|
|
new_key = LocalBackupSecret.get()
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
# Should fail without correct key
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
|
|
|
|
job = get_backup_fail(dummy_service)
|
|
|
|
assert_job_errored(job)
|
|
|
|
|
|
|
|
job_text = all_job_text(job)
|
|
|
|
|
|
|
|
assert old_key not in job_text
|
|
|
|
assert new_key not in job_text
|
|
|
|
# local backups do not have login key
|
|
|
|
# assert Backups.provider().key not in job_text
|
|
|
|
|
|
|
|
assert "CENSORED" in job_text
|
|
|
|
|
|
|
|
|
|
|
|
def test_error_censoring_loginkey(dummy_service, backups, fp):
|
|
|
|
# We do not want to screw up our teardown
|
|
|
|
old_provider = Backups.provider()
|
|
|
|
|
|
|
|
secret = "aSecretNYA"
|
|
|
|
|
|
|
|
Backups.set_provider(
|
2024-03-18 17:32:18 +00:00
|
|
|
ProviderEnum.BACKBLAZE, login="meow", key=secret, location="moon"
|
2024-03-18 17:13:06 +00:00
|
|
|
)
|
|
|
|
assert Backups.provider().key == secret
|
|
|
|
|
|
|
|
# We could have called real backblaze but it is kind of not privacy so.
|
|
|
|
fp.allow_unregistered(True)
|
|
|
|
fp.register(
|
|
|
|
["restic", fp.any()],
|
|
|
|
returncode=1,
|
|
|
|
stdout="only real cats are allowed",
|
|
|
|
# We do not want to suddenly call real backblaze even if code changes
|
|
|
|
occurrences=100,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
|
|
|
|
job = get_backup_fail(dummy_service)
|
|
|
|
assert_job_errored(job)
|
|
|
|
|
|
|
|
job_text = all_job_text(job)
|
|
|
|
assert secret not in job_text
|
|
|
|
assert job_text.count("CENSORED") == 2
|
|
|
|
|
|
|
|
# We do not want to screw up our teardown
|
|
|
|
Storage.store_provider(old_provider)
|
|
|
|
|
|
|
|
|
2023-02-17 15:55:19 +00:00
|
|
|
def test_no_repo(memory_backup):
|
|
|
|
with pytest.raises(ValueError):
|
2023-06-23 12:04:33 +00:00
|
|
|
assert memory_backup.backupper.get_snapshots() == []
|
2023-02-17 15:55:19 +00:00
|
|
|
|
|
|
|
|
2023-02-22 10:25:51 +00:00
|
|
|
def test_one_snapshot(backups, dummy_service):
|
2023-03-29 11:15:38 +00:00
|
|
|
Backups.back_up(dummy_service)
|
2023-02-22 13:35:55 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
snaps = Backups.get_snapshots(dummy_service)
|
2023-02-22 13:35:55 +00:00
|
|
|
assert len(snaps) == 1
|
|
|
|
snap = snaps[0]
|
|
|
|
assert snap.service_name == dummy_service.get_id()
|
2023-02-22 15:58:36 +00:00
|
|
|
|
|
|
|
|
2023-04-03 17:23:16 +00:00
|
|
|
def test_backup_returns_snapshot(backups, dummy_service):
|
2023-04-14 11:20:03 +00:00
|
|
|
service_folders = dummy_service.get_folders()
|
2023-04-03 17:23:16 +00:00
|
|
|
provider = Backups.provider()
|
|
|
|
name = dummy_service.get_id()
|
2023-06-23 12:04:33 +00:00
|
|
|
snapshot = provider.backupper.start_backup(service_folders, name)
|
2023-04-03 17:23:16 +00:00
|
|
|
|
|
|
|
assert snapshot.id is not None
|
2024-02-12 20:47:33 +00:00
|
|
|
|
|
|
|
snapshots = provider.backupper.get_snapshots()
|
|
|
|
assert snapshots != []
|
|
|
|
|
|
|
|
assert len(snapshot.id) == len(snapshots[0].id)
|
2023-07-21 11:11:24 +00:00
|
|
|
assert Backups.get_snapshot_by_id(snapshot.id) is not None
|
2023-04-03 17:23:16 +00:00
|
|
|
assert snapshot.service_name == name
|
|
|
|
assert snapshot.created_at is not None
|
2023-08-21 11:30:35 +00:00
|
|
|
assert snapshot.reason == BackupReason.EXPLICIT
|
|
|
|
|
|
|
|
|
|
|
|
def test_backup_reasons(backups, dummy_service):
|
|
|
|
snap = Backups.back_up(dummy_service, BackupReason.AUTO)
|
|
|
|
assert snap.reason == BackupReason.AUTO
|
|
|
|
|
|
|
|
Backups.force_snapshot_cache_reload()
|
|
|
|
snaps = Backups.get_snapshots(dummy_service)
|
|
|
|
assert snaps[0].reason == BackupReason.AUTO
|
2023-04-03 17:23:16 +00:00
|
|
|
|
|
|
|
|
2023-07-03 12:54:43 +00:00
|
|
|
def folder_files(folder):
|
|
|
|
return [
|
|
|
|
path.join(folder, filename)
|
|
|
|
for filename in listdir(folder)
|
|
|
|
if filename is not None
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2023-04-19 15:09:06 +00:00
|
|
|
def service_files(service):
|
|
|
|
result = []
|
|
|
|
for service_folder in service.get_folders():
|
2023-07-03 12:54:43 +00:00
|
|
|
result.extend(folder_files(service_folder))
|
2023-04-19 15:09:06 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
|
2023-02-22 15:58:36 +00:00
|
|
|
def test_restore(backups, dummy_service):
|
2023-04-19 15:09:06 +00:00
|
|
|
paths_to_nuke = service_files(dummy_service)
|
2023-04-14 13:06:17 +00:00
|
|
|
contents = []
|
|
|
|
|
2023-04-19 15:09:06 +00:00
|
|
|
for service_file in paths_to_nuke:
|
|
|
|
with open(service_file, "r") as file:
|
2023-04-14 13:06:17 +00:00
|
|
|
contents.append(file.read())
|
2023-02-22 15:58:36 +00:00
|
|
|
|
2023-03-29 11:15:38 +00:00
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
snap = Backups.get_snapshots(dummy_service)[0]
|
2023-02-22 15:58:36 +00:00
|
|
|
assert snap is not None
|
|
|
|
|
2023-04-14 13:06:17 +00:00
|
|
|
for p in paths_to_nuke:
|
|
|
|
assert path.exists(p)
|
|
|
|
remove(p)
|
|
|
|
assert not path.exists(p)
|
2023-02-22 15:58:36 +00:00
|
|
|
|
2023-06-26 18:42:26 +00:00
|
|
|
Backups._restore_service_from_snapshot(dummy_service, snap.id)
|
2023-04-14 13:06:17 +00:00
|
|
|
for p, content in zip(paths_to_nuke, contents):
|
|
|
|
assert path.exists(p)
|
|
|
|
with open(p, "r") as file:
|
|
|
|
assert file.read() == content
|
2023-02-22 18:48:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_sizing(backups, dummy_service):
|
2023-03-29 11:15:38 +00:00
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
snap = Backups.get_snapshots(dummy_service)[0]
|
2023-06-26 19:41:18 +00:00
|
|
|
size = Backups.snapshot_restored_size(snap.id)
|
2023-02-22 18:48:08 +00:00
|
|
|
assert size is not None
|
|
|
|
assert size > 0
|
2023-03-13 19:03:41 +00:00
|
|
|
|
|
|
|
|
2023-07-26 14:26:04 +00:00
|
|
|
def test_init_tracking(backups, tmpdir):
|
|
|
|
assert Backups.is_initted() is True
|
|
|
|
Backups.reset()
|
2023-05-29 16:50:14 +00:00
|
|
|
assert Backups.is_initted() is False
|
2023-07-26 14:26:04 +00:00
|
|
|
separate_dir = tmpdir / "out_of_the_way"
|
|
|
|
prepare_localfile_backups(separate_dir)
|
2023-05-29 16:50:14 +00:00
|
|
|
Backups.init_repo()
|
2023-03-14 00:39:15 +00:00
|
|
|
|
2023-05-29 16:50:14 +00:00
|
|
|
assert Backups.is_initted() is True
|
2023-03-29 11:45:52 +00:00
|
|
|
|
|
|
|
|
2023-05-17 20:02:21 +00:00
|
|
|
def finished_jobs():
|
|
|
|
return [job for job in Jobs.get_jobs() if job.status is JobStatus.FINISHED]
|
|
|
|
|
|
|
|
|
2023-04-24 16:50:22 +00:00
|
|
|
def assert_job_finished(job_type, count):
|
2023-05-17 20:02:21 +00:00
|
|
|
finished_types = [job.type_id for job in finished_jobs()]
|
2023-04-24 16:50:22 +00:00
|
|
|
assert finished_types.count(job_type) == count
|
|
|
|
|
|
|
|
|
2023-05-08 12:43:11 +00:00
|
|
|
def assert_job_has_run(job_type):
|
2023-05-17 20:02:21 +00:00
|
|
|
job = [job for job in finished_jobs() if job.type_id == job_type][0]
|
2023-05-08 12:43:11 +00:00
|
|
|
assert JobStatus.RUNNING in Jobs.status_updates(job)
|
|
|
|
|
|
|
|
|
2023-06-28 11:45:07 +00:00
|
|
|
def job_progress_updates(job_type):
|
2023-05-17 20:09:29 +00:00
|
|
|
job = [job for job in finished_jobs() if job.type_id == job_type][0]
|
2023-06-28 11:45:07 +00:00
|
|
|
return Jobs.progress_updates(job)
|
|
|
|
|
|
|
|
|
|
|
|
def assert_job_had_progress(job_type):
|
|
|
|
assert len(job_progress_updates(job_type)) > 0
|
|
|
|
|
|
|
|
|
|
|
|
def make_large_file(path: str, bytes: int):
|
|
|
|
with open(path, "wb") as file:
|
|
|
|
file.write(urandom(bytes))
|
2023-05-17 20:09:29 +00:00
|
|
|
|
|
|
|
|
2023-06-05 11:19:01 +00:00
|
|
|
def test_snapshots_by_id(backups, dummy_service):
|
|
|
|
snap1 = Backups.back_up(dummy_service)
|
|
|
|
snap2 = Backups.back_up(dummy_service)
|
|
|
|
snap3 = Backups.back_up(dummy_service)
|
|
|
|
|
|
|
|
assert snap2.id is not None
|
|
|
|
assert snap2.id != ""
|
|
|
|
|
|
|
|
assert len(Backups.get_snapshots(dummy_service)) == 3
|
|
|
|
assert Backups.get_snapshot_by_id(snap2.id).id == snap2.id
|
|
|
|
|
|
|
|
|
2023-07-12 16:43:26 +00:00
|
|
|
@pytest.fixture(params=["instant_server_stop", "delayed_server_stop"])
|
|
|
|
def simulated_service_stopping_delay(request) -> float:
|
|
|
|
if request.param == "instant_server_stop":
|
|
|
|
return 0.0
|
|
|
|
else:
|
|
|
|
return 0.3
|
|
|
|
|
|
|
|
|
|
|
|
def test_backup_service_task(backups, dummy_service, simulated_service_stopping_delay):
|
|
|
|
dummy_service.set_delay(simulated_service_stopping_delay)
|
|
|
|
|
2023-10-11 17:34:53 +00:00
|
|
|
handle = start_backup(dummy_service.get_id())
|
2023-03-29 11:45:52 +00:00
|
|
|
handle(blocking=True)
|
|
|
|
|
|
|
|
snaps = Backups.get_snapshots(dummy_service)
|
|
|
|
assert len(snaps) == 1
|
2023-04-03 21:59:14 +00:00
|
|
|
|
2023-04-21 12:19:59 +00:00
|
|
|
id = dummy_service.get_id()
|
2023-05-08 12:43:11 +00:00
|
|
|
job_type_id = f"services.{id}.backup"
|
|
|
|
assert_job_finished(job_type_id, count=1)
|
|
|
|
assert_job_has_run(job_type_id)
|
2023-05-17 20:09:29 +00:00
|
|
|
assert_job_had_progress(job_type_id)
|
2023-04-21 12:19:59 +00:00
|
|
|
|
2023-04-03 21:59:14 +00:00
|
|
|
|
2023-07-05 13:13:30 +00:00
|
|
|
def test_forget_snapshot(backups, dummy_service):
|
|
|
|
snap1 = Backups.back_up(dummy_service)
|
|
|
|
snap2 = Backups.back_up(dummy_service)
|
|
|
|
assert len(Backups.get_snapshots(dummy_service)) == 2
|
|
|
|
|
|
|
|
Backups.forget_snapshot(snap2)
|
|
|
|
assert len(Backups.get_snapshots(dummy_service)) == 1
|
|
|
|
Backups.force_snapshot_cache_reload()
|
|
|
|
assert len(Backups.get_snapshots(dummy_service)) == 1
|
|
|
|
|
|
|
|
assert Backups.get_snapshots(dummy_service)[0].id == snap1.id
|
|
|
|
|
|
|
|
Backups.forget_snapshot(snap1)
|
|
|
|
assert len(Backups.get_snapshots(dummy_service)) == 0
|
|
|
|
|
|
|
|
|
|
|
|
def test_forget_nonexistent_snapshot(backups, dummy_service):
|
|
|
|
bogus = Snapshot(
|
2023-08-21 11:11:56 +00:00
|
|
|
id="gibberjibber",
|
|
|
|
service_name="nohoho",
|
|
|
|
created_at=datetime.now(timezone.utc),
|
|
|
|
reason=BackupReason.EXPLICIT,
|
2023-07-05 13:13:30 +00:00
|
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
Backups.forget_snapshot(bogus)
|
|
|
|
|
|
|
|
|
2023-06-28 11:45:07 +00:00
|
|
|
def test_backup_larger_file(backups, dummy_service):
|
|
|
|
dir = path.join(dummy_service.get_folders()[0], "LARGEFILE")
|
|
|
|
mega = 2**20
|
2023-06-28 13:04:57 +00:00
|
|
|
make_large_file(dir, 100 * mega)
|
2023-06-28 11:45:07 +00:00
|
|
|
|
2023-10-11 17:34:53 +00:00
|
|
|
handle = start_backup(dummy_service.get_id())
|
2023-06-28 11:45:07 +00:00
|
|
|
handle(blocking=True)
|
|
|
|
|
|
|
|
# results will be slightly different on different machines. if someone has troubles with it on their machine, consider dropping this test.
|
|
|
|
id = dummy_service.get_id()
|
|
|
|
job_type_id = f"services.{id}.backup"
|
|
|
|
assert_job_finished(job_type_id, count=1)
|
|
|
|
assert_job_has_run(job_type_id)
|
|
|
|
updates = job_progress_updates(job_type_id)
|
|
|
|
assert len(updates) > 3
|
2023-06-29 10:44:29 +00:00
|
|
|
assert updates[int((len(updates) - 1) / 2.0)] > 10
|
2023-07-03 15:28:12 +00:00
|
|
|
# clean up a bit
|
2023-07-03 13:29:31 +00:00
|
|
|
remove(dir)
|
2023-06-28 11:45:07 +00:00
|
|
|
|
|
|
|
|
2023-07-07 12:49:52 +00:00
|
|
|
@pytest.fixture(params=["verify", "inplace"])
|
|
|
|
def restore_strategy(request) -> RestoreStrategy:
|
|
|
|
if request.param == "verify":
|
|
|
|
return RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
|
|
|
else:
|
|
|
|
return RestoreStrategy.INPLACE
|
|
|
|
|
|
|
|
|
2024-08-09 13:39:53 +00:00
|
|
|
@pytest.fixture(params=["failed", "healthy", "fail_to_stop"])
|
|
|
|
def failed(request) -> str:
|
|
|
|
return request.param
|
2023-08-23 13:39:12 +00:00
|
|
|
|
|
|
|
|
2023-07-12 16:53:49 +00:00
|
|
|
def test_restore_snapshot_task(
|
2023-08-23 13:39:12 +00:00
|
|
|
backups, dummy_service, restore_strategy, simulated_service_stopping_delay, failed
|
2023-07-12 16:53:49 +00:00
|
|
|
):
|
|
|
|
dummy_service.set_delay(simulated_service_stopping_delay)
|
2024-08-09 13:39:53 +00:00
|
|
|
if failed == "failed":
|
2023-08-23 13:39:12 +00:00
|
|
|
dummy_service.set_status(ServiceStatus.FAILED)
|
2023-07-12 16:53:49 +00:00
|
|
|
|
2024-08-09 13:39:53 +00:00
|
|
|
if failed == "fail_to_stop":
|
|
|
|
dummy_service.simulate_fail_to_stop(True)
|
|
|
|
|
2023-04-19 15:09:06 +00:00
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
snaps = Backups.get_snapshots(dummy_service)
|
|
|
|
assert len(snaps) == 1
|
|
|
|
|
|
|
|
paths_to_nuke = service_files(dummy_service)
|
|
|
|
contents = []
|
|
|
|
|
|
|
|
for service_file in paths_to_nuke:
|
|
|
|
with open(service_file, "r") as file:
|
|
|
|
contents.append(file.read())
|
|
|
|
|
|
|
|
for p in paths_to_nuke:
|
|
|
|
remove(p)
|
|
|
|
|
2023-07-07 12:49:52 +00:00
|
|
|
handle = restore_snapshot(snaps[0], restore_strategy)
|
2023-04-19 15:09:06 +00:00
|
|
|
handle(blocking=True)
|
|
|
|
|
|
|
|
for p, content in zip(paths_to_nuke, contents):
|
|
|
|
assert path.exists(p)
|
|
|
|
with open(p, "r") as file:
|
|
|
|
assert file.read() == content
|
|
|
|
|
2023-07-07 12:49:52 +00:00
|
|
|
snaps = Backups.get_snapshots(dummy_service)
|
2023-07-19 12:59:29 +00:00
|
|
|
if restore_strategy == RestoreStrategy.INPLACE:
|
|
|
|
assert len(snaps) == 2
|
2023-08-21 11:30:35 +00:00
|
|
|
reasons = [snap.reason for snap in snaps]
|
|
|
|
assert BackupReason.PRE_RESTORE in reasons
|
2023-07-19 12:59:29 +00:00
|
|
|
else:
|
|
|
|
assert len(snaps) == 1
|
2023-07-07 12:49:52 +00:00
|
|
|
|
2023-04-19 15:09:06 +00:00
|
|
|
|
2023-11-07 01:00:38 +00:00
|
|
|
def test_backup_unbackuppable(backups, dummy_service):
|
|
|
|
dummy_service.set_backuppable(False)
|
|
|
|
assert dummy_service.can_be_backed_up() is False
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
|
|
|
|
|
2023-04-10 13:22:33 +00:00
|
|
|
# Storage
|
|
|
|
def test_snapshots_caching(backups, dummy_service):
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
|
|
|
|
# we test indirectly that we do redis calls instead of shell calls
|
|
|
|
start = datetime.now()
|
|
|
|
for i in range(10):
|
|
|
|
snapshots = Backups.get_snapshots(dummy_service)
|
|
|
|
assert len(snapshots) == 1
|
|
|
|
assert datetime.now() - start < timedelta(seconds=0.5)
|
|
|
|
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 1
|
|
|
|
|
2024-02-12 21:23:01 +00:00
|
|
|
snap_to_uncache = cached_snapshots[0]
|
|
|
|
Storage.delete_cached_snapshot(snap_to_uncache)
|
2023-04-10 13:22:33 +00:00
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 0
|
|
|
|
|
2024-02-12 20:47:33 +00:00
|
|
|
# We do not assume that no snapshots means we need to reload the cache
|
2023-04-10 13:22:33 +00:00
|
|
|
snapshots = Backups.get_snapshots(dummy_service)
|
2024-02-12 20:47:33 +00:00
|
|
|
assert len(snapshots) == 0
|
|
|
|
# No cache reload happened
|
2023-04-10 13:22:33 +00:00
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
2024-02-12 20:47:33 +00:00
|
|
|
assert len(cached_snapshots) == 0
|
2023-04-10 13:22:33 +00:00
|
|
|
|
|
|
|
|
2024-02-12 21:23:01 +00:00
|
|
|
# Storage
|
|
|
|
def test_snapshot_cache_autoreloads(backups, dummy_service):
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 1
|
|
|
|
snap_to_uncache = cached_snapshots[0]
|
|
|
|
|
|
|
|
Storage.delete_cached_snapshot(snap_to_uncache)
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 0
|
|
|
|
|
|
|
|
# When we create a snapshot we do reload cache
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 2
|
|
|
|
assert snap_to_uncache in cached_snapshots
|
|
|
|
|
|
|
|
Storage.delete_cached_snapshot(snap_to_uncache)
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 1
|
|
|
|
|
|
|
|
# When we try to delete a snapshot we cannot find in cache, it is ok and we do reload cache
|
|
|
|
Backups.forget_snapshot(snap_to_uncache)
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 1
|
|
|
|
assert snap_to_uncache not in cached_snapshots
|
|
|
|
|
|
|
|
|
2023-07-26 10:09:27 +00:00
|
|
|
def lowlevel_forget(snapshot_id):
|
|
|
|
Backups.provider().backupper.forget_snapshot(snapshot_id)
|
|
|
|
|
|
|
|
|
|
|
|
# Storage
|
|
|
|
def test_snapshots_cache_invalidation(backups, dummy_service):
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 1
|
|
|
|
|
|
|
|
Storage.invalidate_snapshot_storage()
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 0
|
|
|
|
|
|
|
|
Backups.force_snapshot_cache_reload()
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 1
|
|
|
|
snap = cached_snapshots[0]
|
|
|
|
|
|
|
|
lowlevel_forget(snap.id)
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 1
|
|
|
|
|
|
|
|
Backups.force_snapshot_cache_reload()
|
|
|
|
cached_snapshots = Storage.get_cached_snapshots()
|
|
|
|
assert len(cached_snapshots) == 0
|
|
|
|
|
|
|
|
|
2023-04-10 13:22:33 +00:00
|
|
|
# Storage
|
|
|
|
def test_init_tracking_caching(backups, raw_dummy_service):
|
2023-07-26 14:26:04 +00:00
|
|
|
assert Storage.has_init_mark() is True
|
|
|
|
Backups.reset()
|
2023-05-29 16:50:14 +00:00
|
|
|
assert Storage.has_init_mark() is False
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2023-05-29 16:50:14 +00:00
|
|
|
Storage.mark_as_init()
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2023-05-29 16:50:14 +00:00
|
|
|
assert Storage.has_init_mark() is True
|
|
|
|
assert Backups.is_initted() is True
|
2023-04-10 13:22:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Storage
|
2023-07-26 14:26:04 +00:00
|
|
|
def test_init_tracking_caching2(backups, tmpdir):
|
|
|
|
assert Storage.has_init_mark() is True
|
|
|
|
Backups.reset()
|
|
|
|
assert Storage.has_init_mark() is False
|
|
|
|
separate_dir = tmpdir / "out_of_the_way"
|
|
|
|
prepare_localfile_backups(separate_dir)
|
2023-05-29 16:50:14 +00:00
|
|
|
assert Storage.has_init_mark() is False
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2023-05-29 16:50:14 +00:00
|
|
|
Backups.init_repo()
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2023-05-29 16:50:14 +00:00
|
|
|
assert Storage.has_init_mark() is True
|
2023-04-10 13:22:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Storage
|
2024-01-24 14:36:44 +00:00
|
|
|
def test_provider_storage(backups):
|
|
|
|
test_login = "ID"
|
|
|
|
test_key = "KEY"
|
|
|
|
test_location = "selprivacy_bin"
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2024-01-24 14:36:44 +00:00
|
|
|
old_provider = Backups.provider()
|
|
|
|
assert old_provider is not None
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2024-01-24 14:36:44 +00:00
|
|
|
assert not isinstance(old_provider, Backblaze)
|
|
|
|
assert old_provider.login != test_login
|
|
|
|
assert old_provider.key != test_key
|
|
|
|
assert old_provider.location != test_location
|
|
|
|
|
|
|
|
test_provider = Backups._construct_provider(
|
|
|
|
kind=BackupProvider.BACKBLAZE, login="ID", key=test_key, location=test_location
|
|
|
|
)
|
|
|
|
|
|
|
|
assert isinstance(test_provider, Backblaze)
|
|
|
|
assert get_kind(test_provider) == "BACKBLAZE"
|
|
|
|
assert test_provider.login == test_login
|
|
|
|
assert test_provider.key == test_key
|
|
|
|
assert test_provider.location == test_location
|
|
|
|
|
|
|
|
Storage.store_provider(test_provider)
|
|
|
|
|
|
|
|
restored_provider_model = Storage.load_provider()
|
|
|
|
assert restored_provider_model.kind == "BACKBLAZE"
|
|
|
|
assert restored_provider_model.login == test_login
|
|
|
|
assert restored_provider_model.key == test_key
|
|
|
|
assert restored_provider_model.location == test_location
|
2023-04-10 13:22:33 +00:00
|
|
|
|
2023-06-26 18:20:22 +00:00
|
|
|
restored_provider = Backups._load_provider_redis()
|
2023-04-10 13:22:33 +00:00
|
|
|
assert isinstance(restored_provider, Backblaze)
|
2024-01-24 14:36:44 +00:00
|
|
|
assert restored_provider.login == test_login
|
|
|
|
assert restored_provider.key == test_key
|
|
|
|
assert restored_provider.location == test_location
|
|
|
|
|
|
|
|
# Revert our mess so we can teardown ok
|
|
|
|
Storage.store_provider(old_provider)
|
2023-04-12 17:18:12 +00:00
|
|
|
|
|
|
|
|
2023-07-03 12:15:36 +00:00
|
|
|
def test_sync(dummy_service):
|
|
|
|
src = dummy_service.get_folders()[0]
|
|
|
|
dst = dummy_service.get_folders()[1]
|
2023-07-12 12:10:40 +00:00
|
|
|
old_files_src = set(listdir(src))
|
|
|
|
old_files_dst = set(listdir(dst))
|
2023-07-03 12:15:36 +00:00
|
|
|
assert old_files_src != old_files_dst
|
|
|
|
|
2023-07-03 13:28:23 +00:00
|
|
|
sync(src, dst)
|
2023-07-12 12:10:40 +00:00
|
|
|
new_files_src = set(listdir(src))
|
|
|
|
new_files_dst = set(listdir(dst))
|
2023-07-03 12:15:36 +00:00
|
|
|
assert new_files_src == old_files_src
|
|
|
|
assert new_files_dst == new_files_src
|
|
|
|
|
|
|
|
|
|
|
|
def test_sync_nonexistent_src(dummy_service):
|
|
|
|
src = "/var/lib/nonexistentFluffyBunniesOfUnix"
|
|
|
|
dst = dummy_service.get_folders()[1]
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
2023-07-03 13:28:23 +00:00
|
|
|
sync(src, dst)
|
2023-07-03 15:28:12 +00:00
|
|
|
|
|
|
|
|
2023-07-14 11:41:03 +00:00
|
|
|
def test_move_blocks_backups(backups, dummy_service, restore_strategy):
|
|
|
|
snap = Backups.back_up(dummy_service)
|
|
|
|
job = Jobs.add(
|
|
|
|
type_id=f"services.{dummy_service.get_id()}.move",
|
|
|
|
name="Move Dummy",
|
|
|
|
description=f"Moving Dummy data to the Rainbow Land",
|
|
|
|
status=JobStatus.RUNNING,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
Backups.restore_snapshot(snap, restore_strategy)
|
2023-08-07 13:33:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_double_lock_unlock(backups, dummy_service):
|
|
|
|
# notice that introducing stale locks is only safe for other tests if we erase repo in between
|
|
|
|
# which we do at the time of writing this test
|
|
|
|
|
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
|
|
|
|
Backups.provider().backupper.unlock()
|
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
|
|
|
|
Backups.provider().backupper.unlock()
|
|
|
|
Backups.provider().backupper.unlock()
|
2023-08-09 13:47:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_operations_while_locked(backups, dummy_service):
|
2023-08-09 13:58:53 +00:00
|
|
|
# Stale lock prevention test
|
|
|
|
|
|
|
|
# consider making it fully at the level of backupper?
|
|
|
|
# because this is where prevention lives?
|
|
|
|
# Backups singleton is here only so that we can run this against B2, S3 and whatever
|
|
|
|
# But maybe it is not necessary (if restic treats them uniformly enough)
|
|
|
|
|
2023-08-09 13:47:18 +00:00
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
snap = Backups.back_up(dummy_service)
|
|
|
|
assert snap is not None
|
2023-08-09 13:58:53 +00:00
|
|
|
|
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
# using lowlevel to make sure no caching interferes
|
|
|
|
assert Backups.provider().backupper.is_initted() is True
|
|
|
|
|
2023-08-09 14:46:27 +00:00
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
assert Backups.snapshot_restored_size(snap.id) > 0
|
|
|
|
|
2023-08-09 15:18:20 +00:00
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
Backups.restore_snapshot(snap)
|
|
|
|
|
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
Backups.forget_snapshot(snap)
|
|
|
|
|
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
assert Backups.provider().backupper.get_snapshots() == []
|
|
|
|
|
2023-08-09 13:58:53 +00:00
|
|
|
# check that no locks were left
|
|
|
|
Backups.provider().backupper.lock()
|
|
|
|
Backups.provider().backupper.unlock()
|
2023-08-14 11:50:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
# a paranoid check to weed out problems with tempdirs that are not dependent on us
|
|
|
|
def test_tempfile():
|
|
|
|
with tempfile.TemporaryDirectory() as temp:
|
|
|
|
assert path.exists(temp)
|
|
|
|
assert not path.exists(temp)
|
2023-09-01 10:41:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Storage
|
|
|
|
def test_cache_invalidaton_task(backups, dummy_service):
|
|
|
|
Backups.back_up(dummy_service)
|
|
|
|
assert len(Storage.get_cached_snapshots()) == 1
|
|
|
|
|
|
|
|
# Does not trigger resync
|
|
|
|
Storage.invalidate_snapshot_storage()
|
|
|
|
assert Storage.get_cached_snapshots() == []
|
|
|
|
|
|
|
|
reload_snapshot_cache()
|
|
|
|
assert len(Storage.get_cached_snapshots()) == 1
|
2024-07-24 15:15:31 +00:00
|
|
|
|
|
|
|
|
2024-07-25 17:30:46 +00:00
|
|
|
def test_service_manager_backup_snapshot_persists(backups, generic_userdata, dkim_file):
|
|
|
|
# There was a bug with snapshot disappearance due to post_restore hooks, checking for that
|
|
|
|
manager = ServiceManager.get_service_by_id("api")
|
|
|
|
assert manager is not None
|
2024-07-24 15:15:31 +00:00
|
|
|
|
2024-07-25 17:30:46 +00:00
|
|
|
snapshot = Backups.back_up(manager)
|
|
|
|
|
|
|
|
Backups.force_snapshot_cache_reload()
|
|
|
|
ids = [snap.id for snap in Backups.get_all_snapshots()]
|
|
|
|
assert snapshot.id in ids
|
|
|
|
|
|
|
|
|
|
|
|
def test_service_manager_backs_up_without_crashing(
|
|
|
|
backups, generic_userdata, dkim_file, dummy_service
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
Service manager is special and needs testing.
|
|
|
|
"""
|
|
|
|
manager = ServiceManager.get_service_by_id("api")
|
|
|
|
assert manager is not None
|
|
|
|
|
|
|
|
snapshot = Backups.back_up(manager)
|
|
|
|
Backups.restore_snapshot(snapshot)
|
2024-09-22 14:33:06 +00:00
|
|
|
|
|
|
|
|
2024-09-23 10:05:24 +00:00
|
|
|
def test_backup_all_restore_all(
|
|
|
|
backups,
|
|
|
|
generic_userdata,
|
|
|
|
dkim_file,
|
|
|
|
only_dummy_service_and_api,
|
|
|
|
catch_nixos_rebuild_calls,
|
|
|
|
):
|
|
|
|
dummy_service = only_dummy_service_and_api
|
|
|
|
fp = catch_nixos_rebuild_calls
|
|
|
|
fp.pass_command(["restic", fp.any()])
|
|
|
|
fp.keep_last_process(True)
|
2024-09-23 11:35:46 +00:00
|
|
|
fp.pass_command(["rclone", fp.any()])
|
|
|
|
fp.keep_last_process(True)
|
|
|
|
fp.pass_command(["lsblk", fp.any()])
|
|
|
|
fp.keep_last_process(True)
|
|
|
|
|
|
|
|
assert len(Backups.get_all_snapshots()) == 0
|
2024-09-23 10:05:24 +00:00
|
|
|
|
|
|
|
backup_job = add_total_backup_job()
|
|
|
|
total_backup(backup_job)
|
|
|
|
assert len(Backups.get_all_snapshots()) == 2
|
2024-09-22 14:33:06 +00:00
|
|
|
|
2024-09-23 11:35:46 +00:00
|
|
|
assert set(ids(which_snapshots_to_full_restore())) == set(
|
|
|
|
ids(Backups.get_all_snapshots())
|
|
|
|
)
|
|
|
|
|
|
|
|
write_testfile_bodies(dummy_service, ["bogus", "bleeegh corruption ><"])
|
|
|
|
|
2024-09-23 10:05:24 +00:00
|
|
|
restore_job = add_total_restore_job()
|
2024-09-22 14:33:06 +00:00
|
|
|
|
2024-09-23 10:05:24 +00:00
|
|
|
do_full_restore(restore_job)
|
2024-09-23 11:35:46 +00:00
|
|
|
assert_job_ok(restore_job)
|
2024-09-22 14:33:06 +00:00
|
|
|
|
2024-09-23 11:35:46 +00:00
|
|
|
assert_rebuild_was_made(fp)
|
|
|
|
assert_original_files(dummy_service)
|