mirror of
https://git.selfprivacy.org/SelfPrivacy/selfprivacy-rest-api.git
synced 2024-11-29 07:21:27 +00:00
fix: do a redis reset()
This commit is contained in:
commit
be8d249a04
|
@ -5,8 +5,11 @@ name: default
|
|||
steps:
|
||||
- name: Run Tests and Generate Coverage Report
|
||||
commands:
|
||||
- kill $(ps aux | grep '[r]edis-server 127.0.0.1:6389' | awk '{print $2}')
|
||||
- kill $(ps aux | grep 'redis-server 127.0.0.1:6389' | awk '{print $2}') || true
|
||||
- redis-server --bind 127.0.0.1 --port 6389 >/dev/null &
|
||||
# We do not care about persistance on CI
|
||||
- sleep 10
|
||||
- redis-cli -h 127.0.0.1 -p 6389 config set stop-writes-on-bgsave-error no
|
||||
- coverage run -m pytest -q
|
||||
- coverage xml
|
||||
- sonar-scanner -Dsonar.projectKey=SelfPrivacy-REST-API -Dsonar.sources=. -Dsonar.host.url=http://analyzer.lan:9000 -Dsonar.login="$SONARQUBE_TOKEN"
|
||||
|
@ -26,3 +29,7 @@ steps:
|
|||
|
||||
node:
|
||||
server: builder
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
|
|
4
.flake8
Normal file
4
.flake8
Normal file
|
@ -0,0 +1,4 @@
|
|||
[flake8]
|
||||
max-line-length = 80
|
||||
select = C,E,F,W,B,B950
|
||||
extend-ignore = E203, E501
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -147,6 +147,4 @@ cython_debug/
|
|||
# End of https://www.toptal.com/developers/gitignore/api/flask
|
||||
|
||||
*.db
|
||||
|
||||
# Redis db
|
||||
*.rdb
|
|
@ -1,3 +1,6 @@
|
|||
[MASTER]
|
||||
init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()))"
|
||||
extension-pkg-whitelist=pydantic
|
||||
|
||||
[FORMAT]
|
||||
max-line-length=88
|
||||
|
|
64
api.nix
64
api.nix
|
@ -1,64 +0,0 @@
|
|||
{ lib, python39Packages }:
|
||||
with python39Packages;
|
||||
buildPythonApplication {
|
||||
pname = "selfprivacy-api";
|
||||
version = "2.0.0";
|
||||
|
||||
propagatedBuildInputs = [
|
||||
setuptools
|
||||
portalocker
|
||||
pytz
|
||||
pytest
|
||||
pytest-mock
|
||||
pytest-datadir
|
||||
huey
|
||||
gevent
|
||||
mnemonic
|
||||
pydantic
|
||||
typing-extensions
|
||||
psutil
|
||||
fastapi
|
||||
uvicorn
|
||||
(buildPythonPackage rec {
|
||||
pname = "strawberry-graphql";
|
||||
version = "0.123.0";
|
||||
format = "pyproject";
|
||||
patches = [
|
||||
./strawberry-graphql.patch
|
||||
];
|
||||
propagatedBuildInputs = [
|
||||
typing-extensions
|
||||
python-multipart
|
||||
python-dateutil
|
||||
# flask
|
||||
pydantic
|
||||
pygments
|
||||
poetry
|
||||
# flask-cors
|
||||
(buildPythonPackage rec {
|
||||
pname = "graphql-core";
|
||||
version = "3.2.0";
|
||||
format = "setuptools";
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "sha256-huKgvgCL/eGe94OI3opyWh2UKpGQykMcJKYIN5c4A84=";
|
||||
};
|
||||
checkInputs = [
|
||||
pytest-asyncio
|
||||
pytest-benchmark
|
||||
pytestCheckHook
|
||||
];
|
||||
pythonImportsCheck = [
|
||||
"graphql"
|
||||
];
|
||||
})
|
||||
];
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "KsmZ5Xv8tUg6yBxieAEtvoKoRG60VS+iVGV0X6oCExo=";
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
src = ./.;
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
{ pkgs ? import <nixpkgs> {} }:
|
||||
pkgs.callPackage ./api.nix {}
|
|
@ -9,7 +9,6 @@ import uvicorn
|
|||
from selfprivacy_api.dependencies import get_api_version
|
||||
from selfprivacy_api.graphql.schema import schema
|
||||
from selfprivacy_api.migrations import run_migrations
|
||||
from selfprivacy_api.restic_controller.tasks import init_restic
|
||||
|
||||
from selfprivacy_api.rest import (
|
||||
system,
|
||||
|
@ -49,7 +48,6 @@ async def get_version():
|
|||
@app.on_event("startup")
|
||||
async def startup():
|
||||
run_migrations()
|
||||
init_restic()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
548
selfprivacy_api/backup/__init__.py
Normal file
548
selfprivacy_api/backup/__init__.py
Normal file
|
@ -0,0 +1,548 @@
|
|||
"""
|
||||
This module contains the controller class for backups.
|
||||
"""
|
||||
from datetime import datetime, timedelta
|
||||
from os import statvfs
|
||||
from typing import List, Optional
|
||||
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData
|
||||
|
||||
from selfprivacy_api.services import (
|
||||
get_service_by_id,
|
||||
get_all_services,
|
||||
)
|
||||
from selfprivacy_api.services.service import (
|
||||
Service,
|
||||
ServiceStatus,
|
||||
StoppedService,
|
||||
)
|
||||
|
||||
from selfprivacy_api.jobs import Jobs, JobStatus, Job
|
||||
|
||||
from selfprivacy_api.graphql.queries.providers import (
|
||||
BackupProvider as BackupProviderEnum,
|
||||
)
|
||||
from selfprivacy_api.graphql.common_types.backup import RestoreStrategy
|
||||
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
|
||||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||
from selfprivacy_api.backup.providers import get_provider
|
||||
from selfprivacy_api.backup.storage import Storage
|
||||
from selfprivacy_api.backup.jobs import (
|
||||
get_backup_job,
|
||||
add_backup_job,
|
||||
get_restore_job,
|
||||
add_restore_job,
|
||||
)
|
||||
|
||||
DEFAULT_JSON_PROVIDER = {
|
||||
"provider": "BACKBLAZE",
|
||||
"accountId": "",
|
||||
"accountKey": "",
|
||||
"bucket": "",
|
||||
}
|
||||
|
||||
|
||||
class NotDeadError(AssertionError):
|
||||
"""
|
||||
This error is raised when we try to back up a service that is not dead yet.
|
||||
"""
|
||||
|
||||
def __init__(self, service: Service):
|
||||
self.service_name = service.get_id()
|
||||
super().__init__()
|
||||
|
||||
def __str__(self):
|
||||
return f"""
|
||||
Service {self.service_name} should be either stopped or dead from
|
||||
an error before we back up.
|
||||
Normally, this error is unreachable because we do try ensure this.
|
||||
Apparently, not this time.
|
||||
"""
|
||||
|
||||
|
||||
class Backups:
|
||||
"""A stateless controller class for backups"""
|
||||
|
||||
# Providers
|
||||
|
||||
@staticmethod
|
||||
def provider() -> AbstractBackupProvider:
|
||||
"""
|
||||
Returns the current backup storage provider.
|
||||
"""
|
||||
return Backups._lookup_provider()
|
||||
|
||||
@staticmethod
|
||||
def set_provider(
|
||||
kind: BackupProviderEnum,
|
||||
login: str,
|
||||
key: str,
|
||||
location: str,
|
||||
repo_id: str = "",
|
||||
) -> None:
|
||||
"""
|
||||
Sets the new configuration of the backup storage provider.
|
||||
|
||||
In case of `BackupProviderEnum.BACKBLAZE`, the `login` is the key ID,
|
||||
the `key` is the key itself, and the `location` is the bucket name and
|
||||
the `repo_id` is the bucket ID.
|
||||
"""
|
||||
provider: AbstractBackupProvider = Backups._construct_provider(
|
||||
kind,
|
||||
login,
|
||||
key,
|
||||
location,
|
||||
repo_id,
|
||||
)
|
||||
Storage.store_provider(provider)
|
||||
|
||||
@staticmethod
|
||||
def reset(reset_json=True) -> None:
|
||||
"""
|
||||
Deletes all the data about the backup storage provider.
|
||||
"""
|
||||
Storage.reset()
|
||||
if reset_json:
|
||||
try:
|
||||
Backups._reset_provider_json()
|
||||
except FileNotFoundError:
|
||||
# if there is no userdata file, we do not need to reset it
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _lookup_provider() -> AbstractBackupProvider:
|
||||
redis_provider = Backups._load_provider_redis()
|
||||
if redis_provider is not None:
|
||||
return redis_provider
|
||||
|
||||
try:
|
||||
json_provider = Backups._load_provider_json()
|
||||
except FileNotFoundError:
|
||||
json_provider = None
|
||||
|
||||
if json_provider is not None:
|
||||
Storage.store_provider(json_provider)
|
||||
return json_provider
|
||||
|
||||
none_provider = Backups._construct_provider(
|
||||
BackupProviderEnum.NONE, login="", key="", location=""
|
||||
)
|
||||
Storage.store_provider(none_provider)
|
||||
return none_provider
|
||||
|
||||
@staticmethod
|
||||
def _construct_provider(
|
||||
kind: BackupProviderEnum,
|
||||
login: str,
|
||||
key: str,
|
||||
location: str,
|
||||
repo_id: str = "",
|
||||
) -> AbstractBackupProvider:
|
||||
provider_class = get_provider(kind)
|
||||
|
||||
return provider_class(
|
||||
login=login,
|
||||
key=key,
|
||||
location=location,
|
||||
repo_id=repo_id,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _load_provider_redis() -> Optional[AbstractBackupProvider]:
|
||||
provider_model = Storage.load_provider()
|
||||
if provider_model is None:
|
||||
return None
|
||||
return Backups._construct_provider(
|
||||
BackupProviderEnum[provider_model.kind],
|
||||
provider_model.login,
|
||||
provider_model.key,
|
||||
provider_model.location,
|
||||
provider_model.repo_id,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _load_provider_json() -> Optional[AbstractBackupProvider]:
|
||||
with ReadUserData() as user_data:
|
||||
provider_dict = {
|
||||
"provider": "",
|
||||
"accountId": "",
|
||||
"accountKey": "",
|
||||
"bucket": "",
|
||||
}
|
||||
|
||||
if "backup" not in user_data.keys():
|
||||
if "backblaze" in user_data.keys():
|
||||
provider_dict.update(user_data["backblaze"])
|
||||
provider_dict["provider"] = "BACKBLAZE"
|
||||
return None
|
||||
else:
|
||||
provider_dict.update(user_data["backup"])
|
||||
|
||||
if provider_dict == DEFAULT_JSON_PROVIDER:
|
||||
return None
|
||||
try:
|
||||
return Backups._construct_provider(
|
||||
kind=BackupProviderEnum[provider_dict["provider"]],
|
||||
login=provider_dict["accountId"],
|
||||
key=provider_dict["accountKey"],
|
||||
location=provider_dict["bucket"],
|
||||
)
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _reset_provider_json() -> None:
|
||||
with WriteUserData() as user_data:
|
||||
if "backblaze" in user_data.keys():
|
||||
del user_data["backblaze"]
|
||||
|
||||
user_data["backup"] = DEFAULT_JSON_PROVIDER
|
||||
|
||||
# Init
|
||||
|
||||
@staticmethod
|
||||
def init_repo() -> None:
|
||||
"""
|
||||
Initializes the backup repository. This is required once per repo.
|
||||
"""
|
||||
Backups.provider().backupper.init()
|
||||
Storage.mark_as_init()
|
||||
|
||||
@staticmethod
|
||||
def is_initted() -> bool:
|
||||
"""
|
||||
Returns whether the backup repository is initialized or not.
|
||||
If it is not initialized, we cannot back up and probably should
|
||||
call `init_repo` first.
|
||||
"""
|
||||
if Storage.has_init_mark():
|
||||
return True
|
||||
|
||||
initted = Backups.provider().backupper.is_initted()
|
||||
if initted:
|
||||
Storage.mark_as_init()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# Backup
|
||||
|
||||
@staticmethod
|
||||
def back_up(service: Service) -> Snapshot:
|
||||
"""The top-level function to back up a service"""
|
||||
folders = service.get_folders()
|
||||
tag = service.get_id()
|
||||
|
||||
job = get_backup_job(service)
|
||||
if job is None:
|
||||
job = add_backup_job(service)
|
||||
Jobs.update(job, status=JobStatus.RUNNING)
|
||||
|
||||
try:
|
||||
service.pre_backup()
|
||||
snapshot = Backups.provider().backupper.start_backup(
|
||||
folders,
|
||||
tag,
|
||||
)
|
||||
Backups._store_last_snapshot(tag, snapshot)
|
||||
service.post_restore()
|
||||
except Exception as error:
|
||||
Jobs.update(job, status=JobStatus.ERROR)
|
||||
raise error
|
||||
|
||||
Jobs.update(job, status=JobStatus.FINISHED)
|
||||
return snapshot
|
||||
|
||||
# Restoring
|
||||
|
||||
@staticmethod
|
||||
def _ensure_queued_restore_job(service, snapshot) -> Job:
|
||||
job = get_restore_job(service)
|
||||
if job is None:
|
||||
job = add_restore_job(snapshot)
|
||||
|
||||
Jobs.update(job, status=JobStatus.CREATED)
|
||||
return job
|
||||
|
||||
@staticmethod
|
||||
def _inplace_restore(
|
||||
service: Service,
|
||||
snapshot: Snapshot,
|
||||
job: Job,
|
||||
) -> None:
|
||||
failsafe_snapshot = Backups.back_up(service)
|
||||
|
||||
Jobs.update(job, status=JobStatus.RUNNING)
|
||||
try:
|
||||
Backups._restore_service_from_snapshot(
|
||||
service,
|
||||
snapshot.id,
|
||||
verify=False,
|
||||
)
|
||||
except Exception as error:
|
||||
Backups._restore_service_from_snapshot(
|
||||
service, failsafe_snapshot.id, verify=False
|
||||
)
|
||||
raise error
|
||||
|
||||
@staticmethod
|
||||
def restore_snapshot(
|
||||
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
||||
) -> None:
|
||||
"""Restores a snapshot to its original service using the given strategy"""
|
||||
service = get_service_by_id(snapshot.service_name)
|
||||
if service is None:
|
||||
raise ValueError(
|
||||
f"snapshot has a nonexistent service: {snapshot.service_name}"
|
||||
)
|
||||
job = Backups._ensure_queued_restore_job(service, snapshot)
|
||||
|
||||
try:
|
||||
Backups._assert_restorable(snapshot)
|
||||
with StoppedService(service):
|
||||
Backups.assert_dead(service)
|
||||
if strategy == RestoreStrategy.INPLACE:
|
||||
Backups._inplace_restore(service, snapshot, job)
|
||||
else: # verify_before_download is our default
|
||||
Jobs.update(job, status=JobStatus.RUNNING)
|
||||
Backups._restore_service_from_snapshot(
|
||||
service, snapshot.id, verify=True
|
||||
)
|
||||
|
||||
service.post_restore()
|
||||
|
||||
except Exception as error:
|
||||
Jobs.update(job, status=JobStatus.ERROR)
|
||||
raise error
|
||||
|
||||
Jobs.update(job, status=JobStatus.FINISHED)
|
||||
|
||||
@staticmethod
|
||||
def _assert_restorable(
|
||||
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
||||
) -> None:
|
||||
service = get_service_by_id(snapshot.service_name)
|
||||
if service is None:
|
||||
raise ValueError(
|
||||
f"snapshot has a nonexistent service: {snapshot.service_name}"
|
||||
)
|
||||
|
||||
restored_snap_size = Backups.snapshot_restored_size(snapshot.id)
|
||||
|
||||
if strategy == RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE:
|
||||
needed_space = restored_snap_size
|
||||
elif strategy == RestoreStrategy.INPLACE:
|
||||
needed_space = restored_snap_size - service.get_storage_usage()
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"""
|
||||
We do not know if there is enough space for restoration because
|
||||
there is some novel restore strategy used!
|
||||
This is a developer's fault, open an issue please
|
||||
"""
|
||||
)
|
||||
available_space = Backups.space_usable_for_service(service)
|
||||
if needed_space > available_space:
|
||||
raise ValueError(
|
||||
f"we only have {available_space} bytes "
|
||||
f"but snapshot needs {needed_space}"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _restore_service_from_snapshot(
|
||||
service: Service,
|
||||
snapshot_id: str,
|
||||
verify=True,
|
||||
) -> None:
|
||||
folders = service.get_folders()
|
||||
|
||||
Backups.provider().backupper.restore_from_backup(
|
||||
snapshot_id,
|
||||
folders,
|
||||
verify=verify,
|
||||
)
|
||||
|
||||
# Snapshots
|
||||
|
||||
@staticmethod
|
||||
def get_snapshots(service: Service) -> List[Snapshot]:
|
||||
"""Returns all snapshots for a given service"""
|
||||
snapshots = Backups.get_all_snapshots()
|
||||
service_id = service.get_id()
|
||||
return list(
|
||||
filter(
|
||||
lambda snap: snap.service_name == service_id,
|
||||
snapshots,
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_all_snapshots() -> List[Snapshot]:
|
||||
"""Returns all snapshots"""
|
||||
cached_snapshots = Storage.get_cached_snapshots()
|
||||
if cached_snapshots:
|
||||
return cached_snapshots
|
||||
# TODO: the oldest snapshots will get expired faster than the new ones.
|
||||
# How to detect that the end is missing?
|
||||
|
||||
Backups.force_snapshot_cache_reload()
|
||||
return Storage.get_cached_snapshots()
|
||||
|
||||
@staticmethod
|
||||
def get_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
|
||||
"""Returns a backup snapshot by its id"""
|
||||
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
|
||||
if snap is not None:
|
||||
return snap
|
||||
|
||||
# Possibly our cache entry got invalidated, let's try one more time
|
||||
Backups.force_snapshot_cache_reload()
|
||||
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
|
||||
|
||||
return snap
|
||||
|
||||
@staticmethod
|
||||
def forget_snapshot(snapshot: Snapshot) -> None:
|
||||
"""Deletes a snapshot from the storage"""
|
||||
Backups.provider().backupper.forget_snapshot(snapshot.id)
|
||||
Storage.delete_cached_snapshot(snapshot)
|
||||
|
||||
@staticmethod
|
||||
def force_snapshot_cache_reload() -> None:
|
||||
"""
|
||||
Forces a reload of the snapshot cache.
|
||||
|
||||
This may be an expensive operation, so use it wisely.
|
||||
User pays for the API calls.
|
||||
"""
|
||||
upstream_snapshots = Backups.provider().backupper.get_snapshots()
|
||||
Storage.invalidate_snapshot_storage()
|
||||
for snapshot in upstream_snapshots:
|
||||
Storage.cache_snapshot(snapshot)
|
||||
|
||||
@staticmethod
|
||||
def snapshot_restored_size(snapshot_id: str) -> int:
|
||||
"""Returns the size of the snapshot"""
|
||||
return Backups.provider().backupper.restored_size(
|
||||
snapshot_id,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _store_last_snapshot(service_id: str, snapshot: Snapshot) -> None:
|
||||
"""What do we do with a snapshot that is just made?"""
|
||||
# non-expiring timestamp of the last
|
||||
Storage.store_last_timestamp(service_id, snapshot)
|
||||
# expiring cache entry
|
||||
Storage.cache_snapshot(snapshot)
|
||||
|
||||
# Autobackup
|
||||
|
||||
@staticmethod
|
||||
def autobackup_period_minutes() -> Optional[int]:
|
||||
"""None means autobackup is disabled"""
|
||||
return Storage.autobackup_period_minutes()
|
||||
|
||||
@staticmethod
|
||||
def set_autobackup_period_minutes(minutes: int) -> None:
|
||||
"""
|
||||
0 and negative numbers are equivalent to disable.
|
||||
Setting to a positive number may result in a backup very soon
|
||||
if some services are not backed up.
|
||||
"""
|
||||
if minutes <= 0:
|
||||
Backups.disable_all_autobackup()
|
||||
return
|
||||
Storage.store_autobackup_period_minutes(minutes)
|
||||
|
||||
@staticmethod
|
||||
def disable_all_autobackup() -> None:
|
||||
"""
|
||||
Disables all automatic backing up,
|
||||
but does not change per-service settings
|
||||
"""
|
||||
Storage.delete_backup_period()
|
||||
|
||||
@staticmethod
|
||||
def is_time_to_backup(time: datetime) -> bool:
|
||||
"""
|
||||
Intended as a time validator for huey cron scheduler
|
||||
of automatic backups
|
||||
"""
|
||||
|
||||
return Backups.services_to_back_up(time) != []
|
||||
|
||||
@staticmethod
|
||||
def services_to_back_up(time: datetime) -> List[Service]:
|
||||
"""Returns a list of services that should be backed up at a given time"""
|
||||
return [
|
||||
service
|
||||
for service in get_all_services()
|
||||
if Backups.is_time_to_backup_service(service, time)
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def get_last_backed_up(service: Service) -> Optional[datetime]:
|
||||
"""Get a timezone-aware time of the last backup of a service"""
|
||||
return Storage.get_last_backup_time(service.get_id())
|
||||
|
||||
@staticmethod
|
||||
def is_time_to_backup_service(service: Service, time: datetime):
|
||||
"""Returns True if it is time to back up a service"""
|
||||
period = Backups.autobackup_period_minutes()
|
||||
service_id = service.get_id()
|
||||
if not service.can_be_backed_up():
|
||||
return False
|
||||
if period is None:
|
||||
return False
|
||||
|
||||
last_backup = Storage.get_last_backup_time(service_id)
|
||||
if last_backup is None:
|
||||
# queue a backup immediately if there are no previous backups
|
||||
return True
|
||||
|
||||
if time > last_backup + timedelta(minutes=period):
|
||||
return True
|
||||
return False
|
||||
|
||||
# Helpers
|
||||
|
||||
@staticmethod
|
||||
def space_usable_for_service(service: Service) -> int:
|
||||
"""
|
||||
Returns the amount of space available on the volume the given
|
||||
service is located on.
|
||||
"""
|
||||
folders = service.get_folders()
|
||||
if folders == []:
|
||||
raise ValueError("unallocated service", service.get_id())
|
||||
|
||||
# We assume all folders of one service live at the same volume
|
||||
fs_info = statvfs(folders[0])
|
||||
usable_bytes = fs_info.f_frsize * fs_info.f_bavail
|
||||
return usable_bytes
|
||||
|
||||
@staticmethod
|
||||
def set_localfile_repo(file_path: str):
|
||||
"""Used by tests to set a local folder as a backup repo"""
|
||||
# pylint: disable-next=invalid-name
|
||||
ProviderClass = get_provider(BackupProviderEnum.FILE)
|
||||
provider = ProviderClass(
|
||||
login="",
|
||||
key="",
|
||||
location=file_path,
|
||||
repo_id="",
|
||||
)
|
||||
Storage.store_provider(provider)
|
||||
|
||||
@staticmethod
|
||||
def assert_dead(service: Service):
|
||||
"""
|
||||
Checks if a service is dead and can be safely restored from a snapshot.
|
||||
"""
|
||||
if service.get_status() not in [
|
||||
ServiceStatus.INACTIVE,
|
||||
ServiceStatus.FAILED,
|
||||
]:
|
||||
raise NotDeadError(service)
|
57
selfprivacy_api/backup/backuppers/__init__.py
Normal file
57
selfprivacy_api/backup/backuppers/__init__.py
Normal file
|
@ -0,0 +1,57 @@
|
|||
from abc import ABC, abstractmethod
|
||||
from typing import List
|
||||
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
|
||||
|
||||
class AbstractBackupper(ABC):
|
||||
"""Abstract class for backuppers"""
|
||||
|
||||
# flake8: noqa: B027
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def is_initted(self) -> bool:
|
||||
"""Returns true if the repository is initted"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def set_creds(self, account: str, key: str, repo: str) -> None:
|
||||
"""Set the credentials for the backupper"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def start_backup(self, folders: List[str], tag: str) -> Snapshot:
|
||||
"""Start a backup of the given folders"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def get_snapshots(self) -> List[Snapshot]:
|
||||
"""Get all snapshots from the repo"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def init(self) -> None:
|
||||
"""Initialize the repository"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def restore_from_backup(
|
||||
self,
|
||||
snapshot_id: str,
|
||||
folders: List[str],
|
||||
verify=True,
|
||||
) -> None:
|
||||
"""Restore a target folder using a snapshot"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def restored_size(self, snapshot_id: str) -> int:
|
||||
"""Get the size of the restored snapshot"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def forget_snapshot(self, snapshot_id) -> None:
|
||||
"""Forget a snapshot"""
|
||||
raise NotImplementedError
|
34
selfprivacy_api/backup/backuppers/none_backupper.py
Normal file
34
selfprivacy_api/backup/backuppers/none_backupper.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
from typing import List
|
||||
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
||||
|
||||
|
||||
class NoneBackupper(AbstractBackupper):
|
||||
"""A backupper that does nothing"""
|
||||
|
||||
def is_initted(self, repo_name: str = "") -> bool:
|
||||
return False
|
||||
|
||||
def set_creds(self, account: str, key: str, repo: str):
|
||||
pass
|
||||
|
||||
def start_backup(self, folders: List[str], tag: str):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_snapshots(self) -> List[Snapshot]:
|
||||
"""Get all snapshots from the repo"""
|
||||
return []
|
||||
|
||||
def init(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def restore_from_backup(self, snapshot_id: str, folders: List[str], verify=True):
|
||||
"""Restore a target folder using a snapshot"""
|
||||
raise NotImplementedError
|
||||
|
||||
def restored_size(self, snapshot_id: str) -> int:
|
||||
raise NotImplementedError
|
||||
|
||||
def forget_snapshot(self, snapshot_id):
|
||||
raise NotImplementedError
|
417
selfprivacy_api/backup/backuppers/restic_backupper.py
Normal file
417
selfprivacy_api/backup/backuppers/restic_backupper.py
Normal file
|
@ -0,0 +1,417 @@
|
|||
import subprocess
|
||||
import json
|
||||
import datetime
|
||||
import tempfile
|
||||
|
||||
from typing import List
|
||||
from collections.abc import Iterable
|
||||
from json.decoder import JSONDecodeError
|
||||
from os.path import exists, join
|
||||
from os import listdir
|
||||
from time import sleep
|
||||
|
||||
from selfprivacy_api.backup.util import output_yielder, sync
|
||||
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
from selfprivacy_api.backup.jobs import get_backup_job
|
||||
from selfprivacy_api.services import get_service_by_id
|
||||
from selfprivacy_api.jobs import Jobs, JobStatus
|
||||
|
||||
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
||||
|
||||
SHORT_ID_LEN = 8
|
||||
|
||||
|
||||
class ResticBackupper(AbstractBackupper):
|
||||
def __init__(self, login_flag: str, key_flag: str, storage_type: str) -> None:
|
||||
self.login_flag = login_flag
|
||||
self.key_flag = key_flag
|
||||
self.storage_type = storage_type
|
||||
self.account = ""
|
||||
self.key = ""
|
||||
self.repo = ""
|
||||
super().__init__()
|
||||
|
||||
def set_creds(self, account: str, key: str, repo: str) -> None:
|
||||
self.account = account
|
||||
self.key = key
|
||||
self.repo = repo
|
||||
|
||||
def restic_repo(self) -> str:
|
||||
# https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#other-services-via-rclone
|
||||
# https://forum.rclone.org/t/can-rclone-be-run-solely-with-command-line-options-no-config-no-env-vars/6314/5
|
||||
return f"rclone:{self.storage_type}{self.repo}"
|
||||
|
||||
def rclone_args(self):
|
||||
return "rclone.args=serve restic --stdio " + self.backend_rclone_args()
|
||||
|
||||
def backend_rclone_args(self) -> str:
|
||||
acc_arg = ""
|
||||
key_arg = ""
|
||||
if self.account != "":
|
||||
acc_arg = f"{self.login_flag} {self.account}"
|
||||
if self.key != "":
|
||||
key_arg = f"{self.key_flag} {self.key}"
|
||||
|
||||
return f"{acc_arg} {key_arg}"
|
||||
|
||||
def _password_command(self):
|
||||
return f"echo {LocalBackupSecret.get()}"
|
||||
|
||||
def restic_command(self, *args, tag: str = "") -> List[str]:
|
||||
command = [
|
||||
"restic",
|
||||
"-o",
|
||||
self.rclone_args(),
|
||||
"-r",
|
||||
self.restic_repo(),
|
||||
"--password-command",
|
||||
self._password_command(),
|
||||
]
|
||||
if tag != "":
|
||||
command.extend(
|
||||
[
|
||||
"--tag",
|
||||
tag,
|
||||
]
|
||||
)
|
||||
if args:
|
||||
command.extend(ResticBackupper.__flatten_list(args))
|
||||
return command
|
||||
|
||||
def mount_repo(self, mount_directory):
|
||||
mount_command = self.restic_command("mount", mount_directory)
|
||||
mount_command.insert(0, "nohup")
|
||||
handle = subprocess.Popen(
|
||||
mount_command,
|
||||
stdout=subprocess.DEVNULL,
|
||||
shell=False,
|
||||
)
|
||||
sleep(2)
|
||||
if "ids" not in listdir(mount_directory):
|
||||
raise IOError("failed to mount dir ", mount_directory)
|
||||
return handle
|
||||
|
||||
def unmount_repo(self, mount_directory):
|
||||
mount_command = ["umount", "-l", mount_directory]
|
||||
with subprocess.Popen(
|
||||
mount_command, stdout=subprocess.PIPE, shell=False
|
||||
) as handle:
|
||||
output = handle.communicate()[0].decode("utf-8")
|
||||
# TODO: check for exit code?
|
||||
if "error" in output.lower():
|
||||
return IOError("failed to unmount dir ", mount_directory, ": ", output)
|
||||
|
||||
if not listdir(mount_directory) == []:
|
||||
return IOError("failed to unmount dir ", mount_directory)
|
||||
|
||||
@staticmethod
|
||||
def __flatten_list(list_to_flatten):
|
||||
"""string-aware list flattener"""
|
||||
result = []
|
||||
for item in list_to_flatten:
|
||||
if isinstance(item, Iterable) and not isinstance(item, str):
|
||||
result.extend(ResticBackupper.__flatten_list(item))
|
||||
continue
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
def start_backup(self, folders: List[str], tag: str) -> Snapshot:
|
||||
"""
|
||||
Start backup with restic
|
||||
"""
|
||||
|
||||
# but maybe it is ok to accept a union
|
||||
# of a string and an array of strings
|
||||
assert not isinstance(folders, str)
|
||||
|
||||
backup_command = self.restic_command(
|
||||
"backup",
|
||||
"--json",
|
||||
folders,
|
||||
tag=tag,
|
||||
)
|
||||
|
||||
messages = []
|
||||
|
||||
service = get_service_by_id(tag)
|
||||
if service is None:
|
||||
raise ValueError("No service with id ", tag)
|
||||
|
||||
job = get_backup_job(service)
|
||||
try:
|
||||
for raw_message in output_yielder(backup_command):
|
||||
message = self.parse_message(
|
||||
raw_message,
|
||||
job,
|
||||
)
|
||||
messages.append(message)
|
||||
return ResticBackupper._snapshot_from_backup_messages(
|
||||
messages,
|
||||
tag,
|
||||
)
|
||||
except ValueError as error:
|
||||
raise ValueError("Could not create a snapshot: ", messages) from error
|
||||
|
||||
@staticmethod
|
||||
def _snapshot_from_backup_messages(messages, repo_name) -> Snapshot:
|
||||
for message in messages:
|
||||
if message["message_type"] == "summary":
|
||||
return ResticBackupper._snapshot_from_fresh_summary(
|
||||
message,
|
||||
repo_name,
|
||||
)
|
||||
raise ValueError("no summary message in restic json output")
|
||||
|
||||
def parse_message(self, raw_message_line: str, job=None) -> dict:
|
||||
message = ResticBackupper.parse_json_output(raw_message_line)
|
||||
if not isinstance(message, dict):
|
||||
raise ValueError("we have too many messages on one line?")
|
||||
if message["message_type"] == "status":
|
||||
if job is not None: # only update status if we run under some job
|
||||
Jobs.update(
|
||||
job,
|
||||
JobStatus.RUNNING,
|
||||
progress=int(message["percent_done"] * 100),
|
||||
)
|
||||
return message
|
||||
|
||||
@staticmethod
|
||||
def _snapshot_from_fresh_summary(message: dict, repo_name) -> Snapshot:
|
||||
return Snapshot(
|
||||
# There is a discrepancy between versions of restic/rclone
|
||||
# Some report short_id in this field and some full
|
||||
id=message["snapshot_id"][0:SHORT_ID_LEN],
|
||||
created_at=datetime.datetime.now(datetime.timezone.utc),
|
||||
service_name=repo_name,
|
||||
)
|
||||
|
||||
def init(self) -> None:
|
||||
init_command = self.restic_command(
|
||||
"init",
|
||||
)
|
||||
with subprocess.Popen(
|
||||
init_command,
|
||||
shell=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
) as process_handle:
|
||||
output = process_handle.communicate()[0].decode("utf-8")
|
||||
if "created restic repository" not in output:
|
||||
raise ValueError("cannot init a repo: " + output)
|
||||
|
||||
def is_initted(self) -> bool:
|
||||
command = self.restic_command(
|
||||
"check",
|
||||
"--json",
|
||||
)
|
||||
|
||||
with subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
shell=False,
|
||||
) as handle:
|
||||
output = handle.communicate()[0].decode("utf-8")
|
||||
if not ResticBackupper.has_json(output):
|
||||
return False
|
||||
# raise NotImplementedError("error(big): " + output)
|
||||
return True
|
||||
|
||||
def restored_size(self, snapshot_id: str) -> int:
|
||||
"""
|
||||
Size of a snapshot
|
||||
"""
|
||||
command = self.restic_command(
|
||||
"stats",
|
||||
snapshot_id,
|
||||
"--json",
|
||||
)
|
||||
|
||||
with subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
shell=False,
|
||||
) as handle:
|
||||
output = handle.communicate()[0].decode("utf-8")
|
||||
try:
|
||||
parsed_output = ResticBackupper.parse_json_output(output)
|
||||
return parsed_output["total_size"]
|
||||
except ValueError as error:
|
||||
raise ValueError("cannot restore a snapshot: " + output) from error
|
||||
|
||||
def restore_from_backup(
|
||||
self,
|
||||
snapshot_id,
|
||||
folders: List[str],
|
||||
verify=True,
|
||||
) -> None:
|
||||
"""
|
||||
Restore from backup with restic
|
||||
"""
|
||||
if folders is None or folders == []:
|
||||
raise ValueError("cannot restore without knowing where to!")
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
if verify:
|
||||
self._raw_verified_restore(snapshot_id, target=temp_dir)
|
||||
snapshot_root = temp_dir
|
||||
else: # attempting inplace restore via mount + sync
|
||||
self.mount_repo(temp_dir)
|
||||
snapshot_root = join(temp_dir, "ids", snapshot_id)
|
||||
|
||||
assert snapshot_root is not None
|
||||
for folder in folders:
|
||||
src = join(snapshot_root, folder.strip("/"))
|
||||
if not exists(src):
|
||||
raise ValueError(f"No such path: {src}. We tried to find {folder}")
|
||||
dst = folder
|
||||
sync(src, dst)
|
||||
|
||||
if not verify:
|
||||
self.unmount_repo(temp_dir)
|
||||
|
||||
def _raw_verified_restore(self, snapshot_id, target="/"):
|
||||
"""barebones restic restore"""
|
||||
restore_command = self.restic_command(
|
||||
"restore", snapshot_id, "--target", target, "--verify"
|
||||
)
|
||||
|
||||
with subprocess.Popen(
|
||||
restore_command, stdout=subprocess.PIPE, shell=False
|
||||
) as handle:
|
||||
|
||||
# for some reason restore does not support
|
||||
# nice reporting of progress via json
|
||||
output = handle.communicate()[0].decode("utf-8")
|
||||
if "restoring" not in output:
|
||||
raise ValueError("cannot restore a snapshot: " + output)
|
||||
|
||||
assert (
|
||||
handle.returncode is not None
|
||||
) # none should be impossible after communicate
|
||||
if handle.returncode != 0:
|
||||
raise ValueError(
|
||||
"restore exited with errorcode",
|
||||
handle.returncode,
|
||||
":",
|
||||
output,
|
||||
)
|
||||
|
||||
def forget_snapshot(self, snapshot_id) -> None:
|
||||
"""
|
||||
Either removes snapshot or marks it for deletion later,
|
||||
depending on server settings
|
||||
"""
|
||||
forget_command = self.restic_command(
|
||||
"forget",
|
||||
snapshot_id,
|
||||
)
|
||||
|
||||
with subprocess.Popen(
|
||||
forget_command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
shell=False,
|
||||
) as handle:
|
||||
# for some reason restore does not support
|
||||
# nice reporting of progress via json
|
||||
output, err = [
|
||||
string.decode(
|
||||
"utf-8",
|
||||
)
|
||||
for string in handle.communicate()
|
||||
]
|
||||
|
||||
if "no matching ID found" in err:
|
||||
raise ValueError(
|
||||
"trying to delete, but no such snapshot: ", snapshot_id
|
||||
)
|
||||
|
||||
assert (
|
||||
handle.returncode is not None
|
||||
) # none should be impossible after communicate
|
||||
if handle.returncode != 0:
|
||||
raise ValueError(
|
||||
"forget exited with errorcode",
|
||||
handle.returncode,
|
||||
":",
|
||||
output,
|
||||
)
|
||||
|
||||
def _load_snapshots(self) -> object:
|
||||
"""
|
||||
Load list of snapshots from repository
|
||||
raises Value Error if repo does not exist
|
||||
"""
|
||||
listing_command = self.restic_command(
|
||||
"snapshots",
|
||||
"--json",
|
||||
)
|
||||
|
||||
with subprocess.Popen(
|
||||
listing_command,
|
||||
shell=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
) as backup_listing_process_descriptor:
|
||||
output = backup_listing_process_descriptor.communicate()[0].decode("utf-8")
|
||||
|
||||
if "Is there a repository at the following location?" in output:
|
||||
raise ValueError("No repository! : " + output)
|
||||
try:
|
||||
return ResticBackupper.parse_json_output(output)
|
||||
except ValueError as error:
|
||||
raise ValueError("Cannot load snapshots: ") from error
|
||||
|
||||
def get_snapshots(self) -> List[Snapshot]:
|
||||
"""Get all snapshots from the repo"""
|
||||
snapshots = []
|
||||
for restic_snapshot in self._load_snapshots():
|
||||
snapshot = Snapshot(
|
||||
id=restic_snapshot["short_id"],
|
||||
created_at=restic_snapshot["time"],
|
||||
service_name=restic_snapshot["tags"][0],
|
||||
)
|
||||
|
||||
snapshots.append(snapshot)
|
||||
return snapshots
|
||||
|
||||
@staticmethod
|
||||
def parse_json_output(output: str) -> object:
|
||||
starting_index = ResticBackupper.json_start(output)
|
||||
|
||||
if starting_index == -1:
|
||||
raise ValueError("There is no json in the restic output: " + output)
|
||||
|
||||
truncated_output = output[starting_index:]
|
||||
json_messages = truncated_output.splitlines()
|
||||
if len(json_messages) == 1:
|
||||
try:
|
||||
return json.loads(truncated_output)
|
||||
except JSONDecodeError as error:
|
||||
raise ValueError(
|
||||
"There is no json in the restic output : " + output
|
||||
) from error
|
||||
|
||||
result_array = []
|
||||
for message in json_messages:
|
||||
result_array.append(json.loads(message))
|
||||
return result_array
|
||||
|
||||
@staticmethod
|
||||
def json_start(output: str) -> int:
|
||||
indices = [
|
||||
output.find("["),
|
||||
output.find("{"),
|
||||
]
|
||||
indices = [x for x in indices if x != -1]
|
||||
|
||||
if indices == []:
|
||||
return -1
|
||||
return min(indices)
|
||||
|
||||
@staticmethod
|
||||
def has_json(output: str) -> bool:
|
||||
if ResticBackupper.json_start(output) == -1:
|
||||
return False
|
||||
return True
|
88
selfprivacy_api/backup/jobs.py
Normal file
88
selfprivacy_api/backup/jobs.py
Normal file
|
@ -0,0 +1,88 @@
|
|||
from typing import Optional, List
|
||||
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
from selfprivacy_api.jobs import Jobs, Job, JobStatus
|
||||
from selfprivacy_api.services.service import Service
|
||||
from selfprivacy_api.services import get_service_by_id
|
||||
|
||||
|
||||
def job_type_prefix(service: Service) -> str:
|
||||
return f"services.{service.get_id()}"
|
||||
|
||||
|
||||
def backup_job_type(service: Service) -> str:
|
||||
return f"{job_type_prefix(service)}.backup"
|
||||
|
||||
|
||||
def restore_job_type(service: Service) -> str:
|
||||
return f"{job_type_prefix(service)}.restore"
|
||||
|
||||
|
||||
def get_jobs_by_service(service: Service) -> List[Job]:
|
||||
result = []
|
||||
for job in Jobs.get_jobs():
|
||||
if job.type_id.startswith(job_type_prefix(service)) and job.status in [
|
||||
JobStatus.CREATED,
|
||||
JobStatus.RUNNING,
|
||||
]:
|
||||
result.append(job)
|
||||
return result
|
||||
|
||||
|
||||
def is_something_running_for(service: Service) -> bool:
|
||||
running_jobs = [
|
||||
job for job in get_jobs_by_service(service) if job.status == JobStatus.RUNNING
|
||||
]
|
||||
return len(running_jobs) != 0
|
||||
|
||||
|
||||
def add_backup_job(service: Service) -> Job:
|
||||
if is_something_running_for(service):
|
||||
message = (
|
||||
f"Cannot start a backup of {service.get_id()}, another operation is running: "
|
||||
+ get_jobs_by_service(service)[0].type_id
|
||||
)
|
||||
raise ValueError(message)
|
||||
display_name = service.get_display_name()
|
||||
job = Jobs.add(
|
||||
type_id=backup_job_type(service),
|
||||
name=f"Backup {display_name}",
|
||||
description=f"Backing up {display_name}",
|
||||
)
|
||||
return job
|
||||
|
||||
|
||||
def add_restore_job(snapshot: Snapshot) -> Job:
|
||||
service = get_service_by_id(snapshot.service_name)
|
||||
if service is None:
|
||||
raise ValueError(f"no such service: {snapshot.service_name}")
|
||||
if is_something_running_for(service):
|
||||
message = (
|
||||
f"Cannot start a restore of {service.get_id()}, another operation is running: "
|
||||
+ get_jobs_by_service(service)[0].type_id
|
||||
)
|
||||
raise ValueError(message)
|
||||
display_name = service.get_display_name()
|
||||
job = Jobs.add(
|
||||
type_id=restore_job_type(service),
|
||||
name=f"Restore {display_name}",
|
||||
description=f"restoring {display_name} from {snapshot.id}",
|
||||
)
|
||||
return job
|
||||
|
||||
|
||||
def get_job_by_type(type_id: str) -> Optional[Job]:
|
||||
for job in Jobs.get_jobs():
|
||||
if job.type_id == type_id and job.status in [
|
||||
JobStatus.CREATED,
|
||||
JobStatus.RUNNING,
|
||||
]:
|
||||
return job
|
||||
|
||||
|
||||
def get_backup_job(service: Service) -> Optional[Job]:
|
||||
return get_job_by_type(backup_job_type(service))
|
||||
|
||||
|
||||
def get_restore_job(service: Service) -> Optional[Job]:
|
||||
return get_job_by_type(restore_job_type(service))
|
45
selfprivacy_api/backup/local_secret.py
Normal file
45
selfprivacy_api/backup/local_secret.py
Normal file
|
@ -0,0 +1,45 @@
|
|||
"""Handling of local secret used for encrypted backups.
|
||||
Separated out for circular dependency reasons
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import secrets
|
||||
|
||||
from selfprivacy_api.utils.redis_pool import RedisPool
|
||||
|
||||
|
||||
REDIS_KEY = "backup:local_secret"
|
||||
|
||||
redis = RedisPool().get_connection()
|
||||
|
||||
|
||||
class LocalBackupSecret:
|
||||
@staticmethod
|
||||
def get() -> str:
|
||||
"""A secret string which backblaze/other clouds do not know.
|
||||
Serves as encryption key.
|
||||
"""
|
||||
if not LocalBackupSecret.exists():
|
||||
LocalBackupSecret.reset()
|
||||
return redis.get(REDIS_KEY) # type: ignore
|
||||
|
||||
@staticmethod
|
||||
def set(secret: str):
|
||||
redis.set(REDIS_KEY, secret)
|
||||
|
||||
@staticmethod
|
||||
def reset():
|
||||
new_secret = LocalBackupSecret._generate()
|
||||
LocalBackupSecret.set(new_secret)
|
||||
|
||||
@staticmethod
|
||||
def _full_reset():
|
||||
redis.delete(REDIS_KEY)
|
||||
|
||||
@staticmethod
|
||||
def exists() -> bool:
|
||||
return redis.exists(REDIS_KEY) == 1
|
||||
|
||||
@staticmethod
|
||||
def _generate() -> str:
|
||||
return secrets.token_urlsafe(256)
|
29
selfprivacy_api/backup/providers/__init__.py
Normal file
29
selfprivacy_api/backup/providers/__init__.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
from typing import Type
|
||||
|
||||
from selfprivacy_api.graphql.queries.providers import (
|
||||
BackupProvider as BackupProviderEnum,
|
||||
)
|
||||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||
|
||||
from selfprivacy_api.backup.providers.backblaze import Backblaze
|
||||
from selfprivacy_api.backup.providers.memory import InMemoryBackup
|
||||
from selfprivacy_api.backup.providers.local_file import LocalFileBackup
|
||||
from selfprivacy_api.backup.providers.none import NoBackups
|
||||
|
||||
PROVIDER_MAPPING: dict[BackupProviderEnum, Type[AbstractBackupProvider]] = {
|
||||
BackupProviderEnum.BACKBLAZE: Backblaze,
|
||||
BackupProviderEnum.MEMORY: InMemoryBackup,
|
||||
BackupProviderEnum.FILE: LocalFileBackup,
|
||||
BackupProviderEnum.NONE: NoBackups,
|
||||
}
|
||||
|
||||
|
||||
def get_provider(
|
||||
provider_type: BackupProviderEnum,
|
||||
) -> Type[AbstractBackupProvider]:
|
||||
return PROVIDER_MAPPING[provider_type]
|
||||
|
||||
|
||||
def get_kind(provider: AbstractBackupProvider) -> str:
|
||||
"""Get the kind of the provider in the form of a string"""
|
||||
return provider.name.value
|
11
selfprivacy_api/backup/providers/backblaze.py
Normal file
11
selfprivacy_api/backup/providers/backblaze.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
from .provider import AbstractBackupProvider
|
||||
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
||||
from selfprivacy_api.graphql.queries.providers import (
|
||||
BackupProvider as BackupProviderEnum,
|
||||
)
|
||||
|
||||
|
||||
class Backblaze(AbstractBackupProvider):
|
||||
backupper = ResticBackupper("--b2-account", "--b2-key", ":b2:")
|
||||
|
||||
name = BackupProviderEnum.BACKBLAZE
|
11
selfprivacy_api/backup/providers/local_file.py
Normal file
11
selfprivacy_api/backup/providers/local_file.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
from .provider import AbstractBackupProvider
|
||||
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
||||
from selfprivacy_api.graphql.queries.providers import (
|
||||
BackupProvider as BackupProviderEnum,
|
||||
)
|
||||
|
||||
|
||||
class LocalFileBackup(AbstractBackupProvider):
|
||||
backupper = ResticBackupper("", "", ":local:")
|
||||
|
||||
name = BackupProviderEnum.FILE
|
11
selfprivacy_api/backup/providers/memory.py
Normal file
11
selfprivacy_api/backup/providers/memory.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
from .provider import AbstractBackupProvider
|
||||
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
||||
from selfprivacy_api.graphql.queries.providers import (
|
||||
BackupProvider as BackupProviderEnum,
|
||||
)
|
||||
|
||||
|
||||
class InMemoryBackup(AbstractBackupProvider):
|
||||
backupper = ResticBackupper("", "", ":memory:")
|
||||
|
||||
name = BackupProviderEnum.MEMORY
|
11
selfprivacy_api/backup/providers/none.py
Normal file
11
selfprivacy_api/backup/providers/none.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||
from selfprivacy_api.backup.backuppers.none_backupper import NoneBackupper
|
||||
from selfprivacy_api.graphql.queries.providers import (
|
||||
BackupProvider as BackupProviderEnum,
|
||||
)
|
||||
|
||||
|
||||
class NoBackups(AbstractBackupProvider):
|
||||
backupper = NoneBackupper()
|
||||
|
||||
name = BackupProviderEnum.NONE
|
25
selfprivacy_api/backup/providers/provider.py
Normal file
25
selfprivacy_api/backup/providers/provider.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
"""
|
||||
An abstract class for BackBlaze, S3 etc.
|
||||
It assumes that while some providers are supported via restic/rclone, others
|
||||
may require different backends
|
||||
"""
|
||||
from abc import ABC, abstractmethod
|
||||
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
||||
from selfprivacy_api.graphql.queries.providers import (
|
||||
BackupProvider as BackupProviderEnum,
|
||||
)
|
||||
|
||||
|
||||
class AbstractBackupProvider(ABC):
|
||||
backupper: AbstractBackupper
|
||||
|
||||
name: BackupProviderEnum
|
||||
|
||||
def __init__(self, login="", key="", location="", repo_id=""):
|
||||
self.backupper.set_creds(login, key, location)
|
||||
self.login = login
|
||||
self.key = key
|
||||
self.location = location
|
||||
# We do not need to do anything with this one
|
||||
# Just remember in case the app forgets
|
||||
self.repo_id = repo_id
|
172
selfprivacy_api/backup/storage.py
Normal file
172
selfprivacy_api/backup/storage.py
Normal file
|
@ -0,0 +1,172 @@
|
|||
"""
|
||||
Module for storing backup related data in redis.
|
||||
"""
|
||||
from typing import List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
from selfprivacy_api.models.backup.provider import BackupProviderModel
|
||||
|
||||
from selfprivacy_api.utils.redis_pool import RedisPool
|
||||
from selfprivacy_api.utils.redis_model_storage import (
|
||||
store_model_as_hash,
|
||||
hash_as_model,
|
||||
)
|
||||
|
||||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||
from selfprivacy_api.backup.providers import get_kind
|
||||
|
||||
# a hack to store file path.
|
||||
REDIS_SNAPSHOT_CACHE_EXPIRE_SECONDS = 24 * 60 * 60 # one day
|
||||
|
||||
REDIS_SNAPSHOTS_PREFIX = "backups:snapshots:"
|
||||
REDIS_LAST_BACKUP_PREFIX = "backups:last-backed-up:"
|
||||
REDIS_INITTED_CACHE_PREFIX = "backups:initted_services:"
|
||||
|
||||
REDIS_PROVIDER_KEY = "backups:provider"
|
||||
REDIS_AUTOBACKUP_PERIOD_KEY = "backups:autobackup_period"
|
||||
|
||||
|
||||
redis = RedisPool().get_connection()
|
||||
|
||||
|
||||
class Storage:
|
||||
"""Static class for storing backup related data in redis"""
|
||||
|
||||
@staticmethod
|
||||
def reset() -> None:
|
||||
"""Deletes all backup related data from redis"""
|
||||
redis.delete(REDIS_PROVIDER_KEY)
|
||||
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
|
||||
|
||||
prefixes_to_clean = [
|
||||
REDIS_INITTED_CACHE_PREFIX,
|
||||
REDIS_SNAPSHOTS_PREFIX,
|
||||
REDIS_LAST_BACKUP_PREFIX,
|
||||
]
|
||||
|
||||
for prefix in prefixes_to_clean:
|
||||
for key in redis.keys(prefix + "*"):
|
||||
redis.delete(key)
|
||||
|
||||
@staticmethod
|
||||
def invalidate_snapshot_storage() -> None:
|
||||
"""Deletes all cached snapshots from redis"""
|
||||
for key in redis.keys(REDIS_SNAPSHOTS_PREFIX + "*"):
|
||||
redis.delete(key)
|
||||
|
||||
@staticmethod
|
||||
def __last_backup_key(service_id: str) -> str:
|
||||
return REDIS_LAST_BACKUP_PREFIX + service_id
|
||||
|
||||
@staticmethod
|
||||
def __snapshot_key(snapshot: Snapshot) -> str:
|
||||
return REDIS_SNAPSHOTS_PREFIX + snapshot.id
|
||||
|
||||
@staticmethod
|
||||
def get_last_backup_time(service_id: str) -> Optional[datetime]:
|
||||
"""Returns last backup time for a service or None if it was never backed up"""
|
||||
key = Storage.__last_backup_key(service_id)
|
||||
if not redis.exists(key):
|
||||
return None
|
||||
|
||||
snapshot = hash_as_model(redis, key, Snapshot)
|
||||
if not snapshot:
|
||||
return None
|
||||
return snapshot.created_at
|
||||
|
||||
@staticmethod
|
||||
def store_last_timestamp(service_id: str, snapshot: Snapshot) -> None:
|
||||
"""Stores last backup time for a service"""
|
||||
store_model_as_hash(
|
||||
redis,
|
||||
Storage.__last_backup_key(service_id),
|
||||
snapshot,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def cache_snapshot(snapshot: Snapshot) -> None:
|
||||
"""Stores snapshot metadata in redis for caching purposes"""
|
||||
snapshot_key = Storage.__snapshot_key(snapshot)
|
||||
store_model_as_hash(redis, snapshot_key, snapshot)
|
||||
redis.expire(snapshot_key, REDIS_SNAPSHOT_CACHE_EXPIRE_SECONDS)
|
||||
|
||||
@staticmethod
|
||||
def delete_cached_snapshot(snapshot: Snapshot) -> None:
|
||||
"""Deletes snapshot metadata from redis"""
|
||||
snapshot_key = Storage.__snapshot_key(snapshot)
|
||||
redis.delete(snapshot_key)
|
||||
|
||||
@staticmethod
|
||||
def get_cached_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
|
||||
"""Returns cached snapshot by id or None if it doesn't exist"""
|
||||
key = REDIS_SNAPSHOTS_PREFIX + snapshot_id
|
||||
if not redis.exists(key):
|
||||
return None
|
||||
return hash_as_model(redis, key, Snapshot)
|
||||
|
||||
@staticmethod
|
||||
def get_cached_snapshots() -> List[Snapshot]:
|
||||
"""Returns all cached snapshots stored in redis"""
|
||||
keys: list[str] = redis.keys(REDIS_SNAPSHOTS_PREFIX + "*") # type: ignore
|
||||
result: list[Snapshot] = []
|
||||
|
||||
for key in keys:
|
||||
snapshot = hash_as_model(redis, key, Snapshot)
|
||||
if snapshot:
|
||||
result.append(snapshot)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def autobackup_period_minutes() -> Optional[int]:
|
||||
"""None means autobackup is disabled"""
|
||||
if not redis.exists(REDIS_AUTOBACKUP_PERIOD_KEY):
|
||||
return None
|
||||
return int(redis.get(REDIS_AUTOBACKUP_PERIOD_KEY)) # type: ignore
|
||||
|
||||
@staticmethod
|
||||
def store_autobackup_period_minutes(minutes: int) -> None:
|
||||
"""Set the new autobackup period in minutes"""
|
||||
redis.set(REDIS_AUTOBACKUP_PERIOD_KEY, minutes)
|
||||
|
||||
@staticmethod
|
||||
def delete_backup_period() -> None:
|
||||
"""Set the autobackup period to none, effectively disabling autobackup"""
|
||||
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
|
||||
|
||||
@staticmethod
|
||||
def store_provider(provider: AbstractBackupProvider) -> None:
|
||||
"""Stores backup stroage provider auth data in redis"""
|
||||
store_model_as_hash(
|
||||
redis,
|
||||
REDIS_PROVIDER_KEY,
|
||||
BackupProviderModel(
|
||||
kind=get_kind(provider),
|
||||
login=provider.login,
|
||||
key=provider.key,
|
||||
location=provider.location,
|
||||
repo_id=provider.repo_id,
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load_provider() -> Optional[BackupProviderModel]:
|
||||
"""Loads backup storage provider auth data from redis"""
|
||||
provider_model = hash_as_model(
|
||||
redis,
|
||||
REDIS_PROVIDER_KEY,
|
||||
BackupProviderModel,
|
||||
)
|
||||
return provider_model
|
||||
|
||||
@staticmethod
|
||||
def has_init_mark() -> bool:
|
||||
"""Returns True if the repository was initialized"""
|
||||
if redis.exists(REDIS_INITTED_CACHE_PREFIX):
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def mark_as_init():
|
||||
"""Marks the repository as initialized"""
|
||||
redis.set(REDIS_INITTED_CACHE_PREFIX, 1)
|
52
selfprivacy_api/backup/tasks.py
Normal file
52
selfprivacy_api/backup/tasks.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
"""
|
||||
The tasks module contains the worker tasks that are used to back up and restore
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from selfprivacy_api.graphql.common_types.backup import RestoreStrategy
|
||||
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
from selfprivacy_api.utils.huey import huey
|
||||
from selfprivacy_api.services.service import Service
|
||||
from selfprivacy_api.backup import Backups
|
||||
|
||||
|
||||
def validate_datetime(dt: datetime) -> bool:
|
||||
"""
|
||||
Validates that the datetime passed in is timezone-aware.
|
||||
"""
|
||||
if dt.tzinfo is None:
|
||||
return Backups.is_time_to_backup(dt.replace(tzinfo=timezone.utc))
|
||||
return Backups.is_time_to_backup(dt)
|
||||
|
||||
|
||||
# huey tasks need to return something
|
||||
@huey.task()
|
||||
def start_backup(service: Service) -> bool:
|
||||
"""
|
||||
The worker task that starts the backup process.
|
||||
"""
|
||||
Backups.back_up(service)
|
||||
return True
|
||||
|
||||
|
||||
@huey.task()
|
||||
def restore_snapshot(
|
||||
snapshot: Snapshot,
|
||||
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
|
||||
) -> bool:
|
||||
"""
|
||||
The worker task that starts the restore process.
|
||||
"""
|
||||
Backups.restore_snapshot(snapshot, strategy)
|
||||
return True
|
||||
|
||||
|
||||
@huey.periodic_task(validate_datetime=validate_datetime)
|
||||
def automatic_backup():
|
||||
"""
|
||||
The worker periodic task that starts the automatic backup process.
|
||||
"""
|
||||
time = datetime.utcnow().replace(tzinfo=timezone.utc)
|
||||
for service in Backups.services_to_back_up(time):
|
||||
start_backup(service)
|
27
selfprivacy_api/backup/util.py
Normal file
27
selfprivacy_api/backup/util.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
import subprocess
|
||||
from os.path import exists
|
||||
|
||||
|
||||
def output_yielder(command):
|
||||
with subprocess.Popen(
|
||||
command,
|
||||
shell=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True,
|
||||
) as handle:
|
||||
for line in iter(handle.stdout.readline, ""):
|
||||
if "NOTICE:" not in line:
|
||||
yield line
|
||||
|
||||
|
||||
def sync(src_path: str, dest_path: str):
|
||||
"""a wrapper around rclone sync"""
|
||||
|
||||
if not exists(src_path):
|
||||
raise ValueError("source dir for rclone sync must exist")
|
||||
|
||||
rclone_command = ["rclone", "sync", "-P", src_path, dest_path]
|
||||
for raw_message in output_yielder(rclone_command):
|
||||
if "ERROR" in raw_message:
|
||||
raise ValueError(raw_message)
|
|
@ -27,4 +27,4 @@ async def get_token_header(
|
|||
|
||||
def get_api_version() -> str:
|
||||
"""Get API version"""
|
||||
return "2.1.2"
|
||||
return "2.2.1"
|
||||
|
|
10
selfprivacy_api/graphql/common_types/backup.py
Normal file
10
selfprivacy_api/graphql/common_types/backup.py
Normal file
|
@ -0,0 +1,10 @@
|
|||
"""Backup"""
|
||||
# pylint: disable=too-few-public-methods
|
||||
import strawberry
|
||||
from enum import Enum
|
||||
|
||||
|
||||
@strawberry.enum
|
||||
class RestoreStrategy(Enum):
|
||||
INPLACE = "INPLACE"
|
||||
DOWNLOAD_VERIFY_OVERWRITE = "DOWNLOAD_VERIFY_OVERWRITE"
|
|
@ -12,6 +12,7 @@ class ApiJob:
|
|||
"""Job type for GraphQL."""
|
||||
|
||||
uid: str
|
||||
type_id: str
|
||||
name: str
|
||||
description: str
|
||||
status: str
|
||||
|
@ -28,6 +29,7 @@ def job_to_api_job(job: Job) -> ApiJob:
|
|||
"""Convert a Job from jobs controller to a GraphQL ApiJob."""
|
||||
return ApiJob(
|
||||
uid=str(job.uid),
|
||||
type_id=job.type_id,
|
||||
name=job.name,
|
||||
description=job.description,
|
||||
status=job.status.name,
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from enum import Enum
|
||||
import typing
|
||||
import strawberry
|
||||
import datetime
|
||||
from selfprivacy_api.graphql.common_types.dns import DnsRecord
|
||||
|
||||
from selfprivacy_api.services import get_service_by_id, get_services_by_location
|
||||
|
@ -15,7 +16,7 @@ def get_usages(root: "StorageVolume") -> list["StorageUsageInterface"]:
|
|||
service=service_to_graphql_service(service),
|
||||
title=service.get_display_name(),
|
||||
used_space=str(service.get_storage_usage()),
|
||||
volume=get_volume_by_id(service.get_location()),
|
||||
volume=get_volume_by_id(service.get_drive()),
|
||||
)
|
||||
for service in get_services_by_location(root.name)
|
||||
]
|
||||
|
@ -79,7 +80,7 @@ def get_storage_usage(root: "Service") -> ServiceStorageUsage:
|
|||
service=service_to_graphql_service(service),
|
||||
title=service.get_display_name(),
|
||||
used_space=str(service.get_storage_usage()),
|
||||
volume=get_volume_by_id(service.get_location()),
|
||||
volume=get_volume_by_id(service.get_drive()),
|
||||
)
|
||||
|
||||
|
||||
|
@ -92,6 +93,8 @@ class Service:
|
|||
is_movable: bool
|
||||
is_required: bool
|
||||
is_enabled: bool
|
||||
can_be_backed_up: bool
|
||||
backup_description: str
|
||||
status: ServiceStatusEnum
|
||||
url: typing.Optional[str]
|
||||
dns_records: typing.Optional[typing.List[DnsRecord]]
|
||||
|
@ -101,6 +104,17 @@ class Service:
|
|||
"""Get storage usage for a service"""
|
||||
return get_storage_usage(self)
|
||||
|
||||
@strawberry.field
|
||||
def backup_snapshots(self) -> typing.Optional[typing.List["SnapshotInfo"]]:
|
||||
return None
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class SnapshotInfo:
|
||||
id: str
|
||||
service: Service
|
||||
created_at: datetime.datetime
|
||||
|
||||
|
||||
def service_to_graphql_service(service: ServiceInterface) -> Service:
|
||||
"""Convert service to graphql service"""
|
||||
|
@ -112,6 +126,8 @@ def service_to_graphql_service(service: ServiceInterface) -> Service:
|
|||
is_movable=service.is_movable(),
|
||||
is_required=service.is_required(),
|
||||
is_enabled=service.is_enabled(),
|
||||
can_be_backed_up=service.can_be_backed_up(),
|
||||
backup_description=service.get_backup_description(),
|
||||
status=ServiceStatusEnum(service.get_status().value),
|
||||
url=service.get_url(),
|
||||
dns_records=[
|
||||
|
|
168
selfprivacy_api/graphql/mutations/backup_mutations.py
Normal file
168
selfprivacy_api/graphql/mutations/backup_mutations.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
import typing
|
||||
import strawberry
|
||||
|
||||
from selfprivacy_api.graphql import IsAuthenticated
|
||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||
GenericMutationReturn,
|
||||
GenericJobMutationReturn,
|
||||
MutationReturnInterface,
|
||||
)
|
||||
from selfprivacy_api.graphql.queries.backup import BackupConfiguration
|
||||
from selfprivacy_api.graphql.queries.backup import Backup
|
||||
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
||||
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
||||
from selfprivacy_api.graphql.common_types.backup import RestoreStrategy
|
||||
|
||||
from selfprivacy_api.backup import Backups
|
||||
from selfprivacy_api.services import get_service_by_id
|
||||
from selfprivacy_api.backup.tasks import start_backup, restore_snapshot
|
||||
from selfprivacy_api.backup.jobs import add_backup_job, add_restore_job
|
||||
|
||||
|
||||
@strawberry.input
|
||||
class InitializeRepositoryInput:
|
||||
"""Initialize repository input"""
|
||||
|
||||
provider: BackupProvider
|
||||
# The following field may become optional for other providers?
|
||||
# Backblaze takes bucket id and name
|
||||
location_id: str
|
||||
location_name: str
|
||||
# Key ID and key for Backblaze
|
||||
login: str
|
||||
password: str
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class GenericBackupConfigReturn(MutationReturnInterface):
|
||||
"""Generic backup config return"""
|
||||
|
||||
configuration: typing.Optional[BackupConfiguration]
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class BackupMutations:
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def initialize_repository(
|
||||
self, repository: InitializeRepositoryInput
|
||||
) -> GenericBackupConfigReturn:
|
||||
"""Initialize a new repository"""
|
||||
Backups.set_provider(
|
||||
kind=repository.provider,
|
||||
login=repository.login,
|
||||
key=repository.password,
|
||||
location=repository.location_name,
|
||||
repo_id=repository.location_id,
|
||||
)
|
||||
Backups.init_repo()
|
||||
return GenericBackupConfigReturn(
|
||||
success=True,
|
||||
message="",
|
||||
code=200,
|
||||
configuration=Backup().configuration(),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def remove_repository(self) -> GenericBackupConfigReturn:
|
||||
"""Remove repository"""
|
||||
Backups.reset()
|
||||
return GenericBackupConfigReturn(
|
||||
success=True,
|
||||
message="",
|
||||
code=200,
|
||||
configuration=Backup().configuration(),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def set_autobackup_period(
|
||||
self, period: typing.Optional[int] = None
|
||||
) -> GenericBackupConfigReturn:
|
||||
"""Set autobackup period. None is to disable autobackup"""
|
||||
if period is not None:
|
||||
Backups.set_autobackup_period_minutes(period)
|
||||
else:
|
||||
Backups.set_autobackup_period_minutes(0)
|
||||
|
||||
return GenericBackupConfigReturn(
|
||||
success=True,
|
||||
message="",
|
||||
code=200,
|
||||
configuration=Backup().configuration(),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def start_backup(self, service_id: str) -> GenericJobMutationReturn:
|
||||
"""Start backup"""
|
||||
|
||||
service = get_service_by_id(service_id)
|
||||
if service is None:
|
||||
return GenericJobMutationReturn(
|
||||
success=False,
|
||||
code=300,
|
||||
message=f"nonexistent service: {service_id}",
|
||||
job=None,
|
||||
)
|
||||
|
||||
job = add_backup_job(service)
|
||||
start_backup(service)
|
||||
|
||||
return GenericJobMutationReturn(
|
||||
success=True,
|
||||
code=200,
|
||||
message="Backup job queued",
|
||||
job=job_to_api_job(job),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def restore_backup(
|
||||
self,
|
||||
snapshot_id: str,
|
||||
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
|
||||
) -> GenericJobMutationReturn:
|
||||
"""Restore backup"""
|
||||
snap = Backups.get_snapshot_by_id(snapshot_id)
|
||||
if snap is None:
|
||||
return GenericJobMutationReturn(
|
||||
success=False,
|
||||
code=404,
|
||||
message=f"No such snapshot: {snapshot_id}",
|
||||
job=None,
|
||||
)
|
||||
|
||||
service = get_service_by_id(snap.service_name)
|
||||
if service is None:
|
||||
return GenericJobMutationReturn(
|
||||
success=False,
|
||||
code=404,
|
||||
message=f"nonexistent service: {snap.service_name}",
|
||||
job=None,
|
||||
)
|
||||
|
||||
try:
|
||||
job = add_restore_job(snap)
|
||||
except ValueError as error:
|
||||
return GenericJobMutationReturn(
|
||||
success=False,
|
||||
code=400,
|
||||
message=str(error),
|
||||
job=None,
|
||||
)
|
||||
|
||||
restore_snapshot(snap, strategy)
|
||||
|
||||
return GenericJobMutationReturn(
|
||||
success=True,
|
||||
code=200,
|
||||
message="restore job created",
|
||||
job=job_to_api_job(job),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def force_snapshots_reload(self) -> GenericMutationReturn:
|
||||
"""Force snapshots reload"""
|
||||
Backups.force_snapshot_cache_reload()
|
||||
return GenericMutationReturn(
|
||||
success=True,
|
||||
code=200,
|
||||
message="",
|
||||
)
|
215
selfprivacy_api/graphql/mutations/deprecated_mutations.py
Normal file
215
selfprivacy_api/graphql/mutations/deprecated_mutations.py
Normal file
|
@ -0,0 +1,215 @@
|
|||
"""Deprecated mutations
|
||||
|
||||
There was made a mistake, where mutations were not grouped, and were instead
|
||||
placed in the root of mutations schema. In this file, we import all the
|
||||
mutations from and provide them to the root for backwards compatibility.
|
||||
"""
|
||||
|
||||
import strawberry
|
||||
from selfprivacy_api.graphql import IsAuthenticated
|
||||
from selfprivacy_api.graphql.common_types.user import UserMutationReturn
|
||||
from selfprivacy_api.graphql.mutations.api_mutations import (
|
||||
ApiKeyMutationReturn,
|
||||
ApiMutations,
|
||||
DeviceApiTokenMutationReturn,
|
||||
)
|
||||
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
||||
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
|
||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||
GenericJobMutationReturn,
|
||||
GenericMutationReturn,
|
||||
)
|
||||
from selfprivacy_api.graphql.mutations.services_mutations import (
|
||||
ServiceMutationReturn,
|
||||
ServicesMutations,
|
||||
)
|
||||
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
|
||||
from selfprivacy_api.graphql.mutations.system_mutations import (
|
||||
AutoUpgradeSettingsMutationReturn,
|
||||
SystemMutations,
|
||||
TimezoneMutationReturn,
|
||||
)
|
||||
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
||||
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
|
||||
|
||||
|
||||
def deprecated_mutation(func, group, auth=True):
|
||||
return strawberry.mutation(
|
||||
resolver=func,
|
||||
permission_classes=[IsAuthenticated] if auth else [],
|
||||
deprecation_reason=f"Use `{group}.{func.__name__}` instead",
|
||||
)
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class DeprecatedApiMutations:
|
||||
get_new_recovery_api_key: ApiKeyMutationReturn = deprecated_mutation(
|
||||
ApiMutations.get_new_recovery_api_key,
|
||||
"api",
|
||||
)
|
||||
|
||||
use_recovery_api_key: DeviceApiTokenMutationReturn = deprecated_mutation(
|
||||
ApiMutations.use_recovery_api_key,
|
||||
"api",
|
||||
auth=False,
|
||||
)
|
||||
|
||||
refresh_device_api_token: DeviceApiTokenMutationReturn = deprecated_mutation(
|
||||
ApiMutations.refresh_device_api_token,
|
||||
"api",
|
||||
)
|
||||
|
||||
delete_device_api_token: GenericMutationReturn = deprecated_mutation(
|
||||
ApiMutations.delete_device_api_token,
|
||||
"api",
|
||||
)
|
||||
|
||||
get_new_device_api_key: ApiKeyMutationReturn = deprecated_mutation(
|
||||
ApiMutations.get_new_device_api_key,
|
||||
"api",
|
||||
)
|
||||
|
||||
invalidate_new_device_api_key: GenericMutationReturn = deprecated_mutation(
|
||||
ApiMutations.invalidate_new_device_api_key,
|
||||
"api",
|
||||
)
|
||||
|
||||
authorize_with_new_device_api_key: DeviceApiTokenMutationReturn = (
|
||||
deprecated_mutation(
|
||||
ApiMutations.authorize_with_new_device_api_key,
|
||||
"api",
|
||||
auth=False,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class DeprecatedSystemMutations:
|
||||
change_timezone: TimezoneMutationReturn = deprecated_mutation(
|
||||
SystemMutations.change_timezone,
|
||||
"system",
|
||||
)
|
||||
|
||||
change_auto_upgrade_settings: AutoUpgradeSettingsMutationReturn = (
|
||||
deprecated_mutation(
|
||||
SystemMutations.change_auto_upgrade_settings,
|
||||
"system",
|
||||
)
|
||||
)
|
||||
|
||||
run_system_rebuild: GenericMutationReturn = deprecated_mutation(
|
||||
SystemMutations.run_system_rebuild,
|
||||
"system",
|
||||
)
|
||||
|
||||
run_system_rollback: GenericMutationReturn = deprecated_mutation(
|
||||
SystemMutations.run_system_rollback,
|
||||
"system",
|
||||
)
|
||||
|
||||
run_system_upgrade: GenericMutationReturn = deprecated_mutation(
|
||||
SystemMutations.run_system_upgrade,
|
||||
"system",
|
||||
)
|
||||
|
||||
reboot_system: GenericMutationReturn = deprecated_mutation(
|
||||
SystemMutations.reboot_system,
|
||||
"system",
|
||||
)
|
||||
|
||||
pull_repository_changes: GenericMutationReturn = deprecated_mutation(
|
||||
SystemMutations.pull_repository_changes,
|
||||
"system",
|
||||
)
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class DeprecatedUsersMutations:
|
||||
create_user: UserMutationReturn = deprecated_mutation(
|
||||
UsersMutations.create_user,
|
||||
"users",
|
||||
)
|
||||
|
||||
delete_user: GenericMutationReturn = deprecated_mutation(
|
||||
UsersMutations.delete_user,
|
||||
"users",
|
||||
)
|
||||
|
||||
update_user: UserMutationReturn = deprecated_mutation(
|
||||
UsersMutations.update_user,
|
||||
"users",
|
||||
)
|
||||
|
||||
add_ssh_key: UserMutationReturn = deprecated_mutation(
|
||||
UsersMutations.add_ssh_key,
|
||||
"users",
|
||||
)
|
||||
|
||||
remove_ssh_key: UserMutationReturn = deprecated_mutation(
|
||||
UsersMutations.remove_ssh_key,
|
||||
"users",
|
||||
)
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class DeprecatedStorageMutations:
|
||||
resize_volume: GenericMutationReturn = deprecated_mutation(
|
||||
StorageMutations.resize_volume,
|
||||
"storage",
|
||||
)
|
||||
|
||||
mount_volume: GenericMutationReturn = deprecated_mutation(
|
||||
StorageMutations.mount_volume,
|
||||
"storage",
|
||||
)
|
||||
|
||||
unmount_volume: GenericMutationReturn = deprecated_mutation(
|
||||
StorageMutations.unmount_volume,
|
||||
"storage",
|
||||
)
|
||||
|
||||
migrate_to_binds: GenericJobMutationReturn = deprecated_mutation(
|
||||
StorageMutations.migrate_to_binds,
|
||||
"storage",
|
||||
)
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class DeprecatedServicesMutations:
|
||||
enable_service: ServiceMutationReturn = deprecated_mutation(
|
||||
ServicesMutations.enable_service,
|
||||
"services",
|
||||
)
|
||||
|
||||
disable_service: ServiceMutationReturn = deprecated_mutation(
|
||||
ServicesMutations.disable_service,
|
||||
"services",
|
||||
)
|
||||
|
||||
stop_service: ServiceMutationReturn = deprecated_mutation(
|
||||
ServicesMutations.stop_service,
|
||||
"services",
|
||||
)
|
||||
|
||||
start_service: ServiceMutationReturn = deprecated_mutation(
|
||||
ServicesMutations.start_service,
|
||||
"services",
|
||||
)
|
||||
|
||||
restart_service: ServiceMutationReturn = deprecated_mutation(
|
||||
ServicesMutations.restart_service,
|
||||
"services",
|
||||
)
|
||||
|
||||
move_service: ServiceMutationReturn = deprecated_mutation(
|
||||
ServicesMutations.move_service,
|
||||
"services",
|
||||
)
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class DeprecatedJobMutations:
|
||||
remove_job: GenericMutationReturn = deprecated_mutation(
|
||||
JobMutations.remove_job,
|
||||
"jobs",
|
||||
)
|
|
@ -17,5 +17,5 @@ class GenericMutationReturn(MutationReturnInterface):
|
|||
|
||||
|
||||
@strawberry.type
|
||||
class GenericJobButationReturn(MutationReturnInterface):
|
||||
class GenericJobMutationReturn(MutationReturnInterface):
|
||||
job: typing.Optional[ApiJob] = None
|
||||
|
|
|
@ -10,7 +10,7 @@ from selfprivacy_api.graphql.common_types.service import (
|
|||
service_to_graphql_service,
|
||||
)
|
||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||
GenericJobButationReturn,
|
||||
GenericJobMutationReturn,
|
||||
GenericMutationReturn,
|
||||
)
|
||||
|
||||
|
@ -34,7 +34,7 @@ class MoveServiceInput:
|
|||
|
||||
|
||||
@strawberry.type
|
||||
class ServiceJobMutationReturn(GenericJobButationReturn):
|
||||
class ServiceJobMutationReturn(GenericJobMutationReturn):
|
||||
"""Service job mutation return type."""
|
||||
|
||||
service: typing.Optional[Service] = None
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Users management module"""
|
||||
# pylint: disable=too-few-public-methods
|
||||
|
||||
import strawberry
|
||||
from selfprivacy_api.actions.users import UserNotFound
|
||||
|
||||
from selfprivacy_api.graphql import IsAuthenticated
|
||||
from selfprivacy_api.actions.ssh import (
|
||||
InvalidPublicKey,
|
||||
KeyAlreadyExists,
|
||||
KeyNotFound,
|
||||
create_ssh_key,
|
||||
remove_ssh_key,
|
||||
)
|
||||
from selfprivacy_api.graphql.common_types.user import (
|
||||
UserMutationReturn,
|
||||
get_user_by_username,
|
||||
)
|
||||
|
||||
|
||||
@strawberry.input
|
||||
class SshMutationInput:
|
||||
"""Input type for ssh mutation"""
|
||||
|
||||
username: str
|
||||
ssh_key: str
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class SshMutations:
|
||||
"""Mutations ssh"""
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def add_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
||||
"""Add a new ssh key"""
|
||||
|
||||
try:
|
||||
create_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
||||
except KeyAlreadyExists:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="Key already exists",
|
||||
code=409,
|
||||
)
|
||||
except InvalidPublicKey:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="Invalid key type. Only ssh-ed25519 and ssh-rsa are supported",
|
||||
code=400,
|
||||
)
|
||||
except UserNotFound:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="User not found",
|
||||
code=404,
|
||||
)
|
||||
except Exception as e:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message=str(e),
|
||||
code=500,
|
||||
)
|
||||
|
||||
return UserMutationReturn(
|
||||
success=True,
|
||||
message="New SSH key successfully written",
|
||||
code=201,
|
||||
user=get_user_by_username(ssh_input.username),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def remove_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
||||
"""Remove ssh key from user"""
|
||||
|
||||
try:
|
||||
remove_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
||||
except KeyNotFound:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="Key not found",
|
||||
code=404,
|
||||
)
|
||||
except UserNotFound:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="User not found",
|
||||
code=404,
|
||||
)
|
||||
except Exception as e:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message=str(e),
|
||||
code=500,
|
||||
)
|
||||
|
||||
return UserMutationReturn(
|
||||
success=True,
|
||||
message="SSH key successfully removed",
|
||||
code=200,
|
||||
user=get_user_by_username(ssh_input.username),
|
||||
)
|
|
@ -4,7 +4,7 @@ from selfprivacy_api.graphql import IsAuthenticated
|
|||
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
||||
from selfprivacy_api.utils.block_devices import BlockDevices
|
||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||
GenericJobButationReturn,
|
||||
GenericJobMutationReturn,
|
||||
GenericMutationReturn,
|
||||
)
|
||||
from selfprivacy_api.jobs.migrate_to_binds import (
|
||||
|
@ -79,10 +79,10 @@ class StorageMutations:
|
|||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def migrate_to_binds(self, input: MigrateToBindsInput) -> GenericJobButationReturn:
|
||||
def migrate_to_binds(self, input: MigrateToBindsInput) -> GenericJobMutationReturn:
|
||||
"""Migrate to binds"""
|
||||
if is_bind_migrated():
|
||||
return GenericJobButationReturn(
|
||||
return GenericJobMutationReturn(
|
||||
success=False, code=409, message="Already migrated to binds"
|
||||
)
|
||||
job = start_bind_migration(
|
||||
|
@ -94,7 +94,7 @@ class StorageMutations:
|
|||
pleroma_block_device=input.pleroma_block_device,
|
||||
)
|
||||
)
|
||||
return GenericJobButationReturn(
|
||||
return GenericJobMutationReturn(
|
||||
success=True,
|
||||
code=200,
|
||||
message="Migration to binds started, rebuild the system to apply changes",
|
||||
|
|
|
@ -3,10 +3,18 @@
|
|||
# pylint: disable=too-few-public-methods
|
||||
import strawberry
|
||||
from selfprivacy_api.graphql import IsAuthenticated
|
||||
from selfprivacy_api.actions.users import UserNotFound
|
||||
from selfprivacy_api.graphql.common_types.user import (
|
||||
UserMutationReturn,
|
||||
get_user_by_username,
|
||||
)
|
||||
from selfprivacy_api.actions.ssh import (
|
||||
InvalidPublicKey,
|
||||
KeyAlreadyExists,
|
||||
KeyNotFound,
|
||||
create_ssh_key,
|
||||
remove_ssh_key,
|
||||
)
|
||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||
GenericMutationReturn,
|
||||
)
|
||||
|
@ -21,8 +29,16 @@ class UserMutationInput:
|
|||
password: str
|
||||
|
||||
|
||||
@strawberry.input
|
||||
class SshMutationInput:
|
||||
"""Input type for ssh mutation"""
|
||||
|
||||
username: str
|
||||
ssh_key: str
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class UserMutations:
|
||||
class UsersMutations:
|
||||
"""Mutations change user settings"""
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
|
@ -115,3 +131,73 @@ class UserMutations:
|
|||
code=200,
|
||||
user=get_user_by_username(user.username),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def add_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
||||
"""Add a new ssh key"""
|
||||
|
||||
try:
|
||||
create_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
||||
except KeyAlreadyExists:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="Key already exists",
|
||||
code=409,
|
||||
)
|
||||
except InvalidPublicKey:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="Invalid key type. Only ssh-ed25519 and ssh-rsa are supported",
|
||||
code=400,
|
||||
)
|
||||
except UserNotFound:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="User not found",
|
||||
code=404,
|
||||
)
|
||||
except Exception as e:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message=str(e),
|
||||
code=500,
|
||||
)
|
||||
|
||||
return UserMutationReturn(
|
||||
success=True,
|
||||
message="New SSH key successfully written",
|
||||
code=201,
|
||||
user=get_user_by_username(ssh_input.username),
|
||||
)
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def remove_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
||||
"""Remove ssh key from user"""
|
||||
|
||||
try:
|
||||
remove_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
||||
except KeyNotFound:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="Key not found",
|
||||
code=404,
|
||||
)
|
||||
except UserNotFound:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message="User not found",
|
||||
code=404,
|
||||
)
|
||||
except Exception as e:
|
||||
return UserMutationReturn(
|
||||
success=False,
|
||||
message=str(e),
|
||||
code=500,
|
||||
)
|
||||
|
||||
return UserMutationReturn(
|
||||
success=True,
|
||||
message="SSH key successfully removed",
|
||||
code=200,
|
||||
user=get_user_by_username(ssh_input.username),
|
||||
)
|
||||
|
|
78
selfprivacy_api/graphql/queries/backup.py
Normal file
78
selfprivacy_api/graphql/queries/backup.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
"""Backup"""
|
||||
# pylint: disable=too-few-public-methods
|
||||
import typing
|
||||
import strawberry
|
||||
|
||||
|
||||
from selfprivacy_api.backup import Backups
|
||||
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
||||
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
||||
from selfprivacy_api.graphql.common_types.service import (
|
||||
Service,
|
||||
ServiceStatusEnum,
|
||||
SnapshotInfo,
|
||||
service_to_graphql_service,
|
||||
)
|
||||
from selfprivacy_api.services import get_service_by_id
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class BackupConfiguration:
|
||||
provider: BackupProvider
|
||||
# When server is lost, the app should have the key to decrypt backups
|
||||
# on a new server
|
||||
encryption_key: str
|
||||
# False when repo is not initialized and not ready to be used
|
||||
is_initialized: bool
|
||||
# If none, autobackups are disabled
|
||||
autobackup_period: typing.Optional[int]
|
||||
# Bucket name for Backblaze, path for some other providers
|
||||
location_name: typing.Optional[str]
|
||||
location_id: typing.Optional[str]
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class Backup:
|
||||
@strawberry.field
|
||||
def configuration(self) -> BackupConfiguration:
|
||||
return BackupConfiguration(
|
||||
provider=Backups.provider().name,
|
||||
encryption_key=LocalBackupSecret.get(),
|
||||
is_initialized=Backups.is_initted(),
|
||||
autobackup_period=Backups.autobackup_period_minutes(),
|
||||
location_name=Backups.provider().location,
|
||||
location_id=Backups.provider().repo_id,
|
||||
)
|
||||
|
||||
@strawberry.field
|
||||
def all_snapshots(self) -> typing.List[SnapshotInfo]:
|
||||
if not Backups.is_initted():
|
||||
return []
|
||||
result = []
|
||||
snapshots = Backups.get_all_snapshots()
|
||||
for snap in snapshots:
|
||||
service = get_service_by_id(snap.service_name)
|
||||
if service is None:
|
||||
service = Service(
|
||||
id=snap.service_name,
|
||||
display_name=f"{snap.service_name} (Orphaned)",
|
||||
description="",
|
||||
svg_icon="",
|
||||
is_movable=False,
|
||||
is_required=False,
|
||||
is_enabled=False,
|
||||
status=ServiceStatusEnum.OFF,
|
||||
url=None,
|
||||
dns_records=None,
|
||||
can_be_backed_up=False,
|
||||
backup_description="",
|
||||
)
|
||||
else:
|
||||
service = service_to_graphql_service(service)
|
||||
graphql_snap = SnapshotInfo(
|
||||
id=snap.id,
|
||||
service=service,
|
||||
created_at=snap.created_at,
|
||||
)
|
||||
result.append(graphql_snap)
|
||||
return result
|
|
@ -7,6 +7,7 @@ import strawberry
|
|||
class DnsProvider(Enum):
|
||||
CLOUDFLARE = "CLOUDFLARE"
|
||||
DIGITALOCEAN = "DIGITALOCEAN"
|
||||
DESEC = "DESEC"
|
||||
|
||||
|
||||
@strawberry.enum
|
||||
|
@ -18,3 +19,7 @@ class ServerProvider(Enum):
|
|||
@strawberry.enum
|
||||
class BackupProvider(Enum):
|
||||
BACKBLAZE = "BACKBLAZE"
|
||||
NONE = "NONE"
|
||||
# for testing purposes, make sure not selectable in prod.
|
||||
MEMORY = "MEMORY"
|
||||
FILE = "FILE"
|
||||
|
|
|
@ -23,7 +23,7 @@ class Storage:
|
|||
else str(volume.size),
|
||||
free_space=str(volume.fsavail),
|
||||
used_space=str(volume.fsused),
|
||||
root=volume.name == "sda1",
|
||||
root=volume.is_root(),
|
||||
name=volume.name,
|
||||
model=volume.model,
|
||||
serial=volume.serial,
|
||||
|
|
|
@ -5,21 +5,30 @@ import asyncio
|
|||
from typing import AsyncGenerator
|
||||
import strawberry
|
||||
from selfprivacy_api.graphql import IsAuthenticated
|
||||
from selfprivacy_api.graphql.mutations.deprecated_mutations import (
|
||||
DeprecatedApiMutations,
|
||||
DeprecatedJobMutations,
|
||||
DeprecatedServicesMutations,
|
||||
DeprecatedStorageMutations,
|
||||
DeprecatedSystemMutations,
|
||||
DeprecatedUsersMutations,
|
||||
)
|
||||
from selfprivacy_api.graphql.mutations.api_mutations import ApiMutations
|
||||
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
|
||||
from selfprivacy_api.graphql.mutations.mutation_interface import GenericMutationReturn
|
||||
from selfprivacy_api.graphql.mutations.services_mutations import ServicesMutations
|
||||
from selfprivacy_api.graphql.mutations.ssh_mutations import SshMutations
|
||||
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
|
||||
from selfprivacy_api.graphql.mutations.system_mutations import SystemMutations
|
||||
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
||||
|
||||
from selfprivacy_api.graphql.queries.api_queries import Api
|
||||
from selfprivacy_api.graphql.queries.backup import Backup
|
||||
from selfprivacy_api.graphql.queries.jobs import Job
|
||||
from selfprivacy_api.graphql.queries.services import Services
|
||||
from selfprivacy_api.graphql.queries.storage import Storage
|
||||
from selfprivacy_api.graphql.queries.system import System
|
||||
|
||||
from selfprivacy_api.graphql.mutations.users_mutations import UserMutations
|
||||
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
|
||||
from selfprivacy_api.graphql.queries.users import Users
|
||||
from selfprivacy_api.jobs.test import test_job
|
||||
|
||||
|
@ -28,16 +37,16 @@ from selfprivacy_api.jobs.test import test_job
|
|||
class Query:
|
||||
"""Root schema for queries"""
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def system(self) -> System:
|
||||
"""System queries"""
|
||||
return System()
|
||||
|
||||
@strawberry.field
|
||||
def api(self) -> Api:
|
||||
"""API access status"""
|
||||
return Api()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def system(self) -> System:
|
||||
"""System queries"""
|
||||
return System()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def users(self) -> Users:
|
||||
"""Users queries"""
|
||||
|
@ -58,19 +67,58 @@ class Query:
|
|||
"""Services queries"""
|
||||
return Services()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def backup(self) -> Backup:
|
||||
"""Backup queries"""
|
||||
return Backup()
|
||||
|
||||
|
||||
@strawberry.type
|
||||
class Mutation(
|
||||
ApiMutations,
|
||||
SystemMutations,
|
||||
UserMutations,
|
||||
SshMutations,
|
||||
StorageMutations,
|
||||
ServicesMutations,
|
||||
JobMutations,
|
||||
DeprecatedApiMutations,
|
||||
DeprecatedSystemMutations,
|
||||
DeprecatedUsersMutations,
|
||||
DeprecatedStorageMutations,
|
||||
DeprecatedServicesMutations,
|
||||
DeprecatedJobMutations,
|
||||
):
|
||||
"""Root schema for mutations"""
|
||||
|
||||
@strawberry.field
|
||||
def api(self) -> ApiMutations:
|
||||
"""API mutations"""
|
||||
return ApiMutations()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def system(self) -> SystemMutations:
|
||||
"""System mutations"""
|
||||
return SystemMutations()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def users(self) -> UsersMutations:
|
||||
"""Users mutations"""
|
||||
return UsersMutations()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def storage(self) -> StorageMutations:
|
||||
"""Storage mutations"""
|
||||
return StorageMutations()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def services(self) -> ServicesMutations:
|
||||
"""Services mutations"""
|
||||
return ServicesMutations()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def jobs(self) -> JobMutations:
|
||||
"""Jobs mutations"""
|
||||
return JobMutations()
|
||||
|
||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||
def backup(self) -> BackupMutations:
|
||||
"""Backup mutations"""
|
||||
return BackupMutations()
|
||||
|
||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def test_mutation(self) -> GenericMutationReturn:
|
||||
"""Test mutation"""
|
||||
|
@ -95,4 +143,8 @@ class Subscription:
|
|||
await asyncio.sleep(0.5)
|
||||
|
||||
|
||||
schema = strawberry.Schema(query=Query, mutation=Mutation, subscription=Subscription)
|
||||
schema = strawberry.Schema(
|
||||
query=Query,
|
||||
mutation=Mutation,
|
||||
subscription=Subscription,
|
||||
)
|
||||
|
|
|
@ -26,8 +26,11 @@ from selfprivacy_api.utils.redis_pool import RedisPool
|
|||
|
||||
JOB_EXPIRATION_SECONDS = 10 * 24 * 60 * 60 # ten days
|
||||
|
||||
STATUS_LOGS_PREFIX = "jobs_logs:status:"
|
||||
PROGRESS_LOGS_PREFIX = "jobs_logs:progress:"
|
||||
|
||||
class JobStatus(Enum):
|
||||
|
||||
class JobStatus(str, Enum):
|
||||
"""
|
||||
Status of a job.
|
||||
"""
|
||||
|
@ -70,6 +73,7 @@ class Jobs:
|
|||
jobs = Jobs.get_jobs()
|
||||
for job in jobs:
|
||||
Jobs.remove(job)
|
||||
Jobs.reset_logs()
|
||||
|
||||
@staticmethod
|
||||
def add(
|
||||
|
@ -120,6 +124,60 @@ class Jobs:
|
|||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def reset_logs() -> None:
|
||||
redis = RedisPool().get_connection()
|
||||
for key in redis.keys(STATUS_LOGS_PREFIX + "*"):
|
||||
redis.delete(key)
|
||||
|
||||
@staticmethod
|
||||
def log_status_update(job: Job, status: JobStatus) -> None:
|
||||
redis = RedisPool().get_connection()
|
||||
key = _status_log_key_from_uuid(job.uid)
|
||||
redis.lpush(key, status.value)
|
||||
redis.expire(key, 10)
|
||||
|
||||
@staticmethod
|
||||
def log_progress_update(job: Job, progress: int) -> None:
|
||||
redis = RedisPool().get_connection()
|
||||
key = _progress_log_key_from_uuid(job.uid)
|
||||
redis.lpush(key, progress)
|
||||
redis.expire(key, 10)
|
||||
|
||||
@staticmethod
|
||||
def status_updates(job: Job) -> list[JobStatus]:
|
||||
result: list[JobStatus] = []
|
||||
|
||||
redis = RedisPool().get_connection()
|
||||
key = _status_log_key_from_uuid(job.uid)
|
||||
if not redis.exists(key):
|
||||
return []
|
||||
|
||||
status_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
|
||||
for status in status_strings:
|
||||
try:
|
||||
result.append(JobStatus[status])
|
||||
except KeyError as error:
|
||||
raise ValueError("impossible job status: " + status) from error
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def progress_updates(job: Job) -> list[int]:
|
||||
result: list[int] = []
|
||||
|
||||
redis = RedisPool().get_connection()
|
||||
key = _progress_log_key_from_uuid(job.uid)
|
||||
if not redis.exists(key):
|
||||
return []
|
||||
|
||||
progress_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
|
||||
for progress in progress_strings:
|
||||
try:
|
||||
result.append(int(progress))
|
||||
except KeyError as error:
|
||||
raise ValueError("impossible job progress: " + progress) from error
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def update(
|
||||
job: Job,
|
||||
|
@ -140,9 +198,17 @@ class Jobs:
|
|||
job.description = description
|
||||
if status_text is not None:
|
||||
job.status_text = status_text
|
||||
if progress is not None:
|
||||
|
||||
# if it is finished it is 100
|
||||
# unless user says otherwise
|
||||
if status == JobStatus.FINISHED and progress is None:
|
||||
progress = 100
|
||||
if progress is not None and job.progress != progress:
|
||||
job.progress = progress
|
||||
Jobs.log_progress_update(job, progress)
|
||||
|
||||
job.status = status
|
||||
Jobs.log_status_update(job, status)
|
||||
job.updated_at = datetime.datetime.now()
|
||||
job.error = error
|
||||
job.result = result
|
||||
|
@ -194,11 +260,19 @@ class Jobs:
|
|||
return False
|
||||
|
||||
|
||||
def _redis_key_from_uuid(uuid_string):
|
||||
def _redis_key_from_uuid(uuid_string) -> str:
|
||||
return "jobs:" + str(uuid_string)
|
||||
|
||||
|
||||
def _store_job_as_hash(redis, redis_key, model):
|
||||
def _status_log_key_from_uuid(uuid_string) -> str:
|
||||
return STATUS_LOGS_PREFIX + str(uuid_string)
|
||||
|
||||
|
||||
def _progress_log_key_from_uuid(uuid_string) -> str:
|
||||
return PROGRESS_LOGS_PREFIX + str(uuid_string)
|
||||
|
||||
|
||||
def _store_job_as_hash(redis, redis_key, model) -> None:
|
||||
for key, value in model.dict().items():
|
||||
if isinstance(value, uuid.UUID):
|
||||
value = str(value)
|
||||
|
@ -209,7 +283,7 @@ def _store_job_as_hash(redis, redis_key, model):
|
|||
redis.hset(redis_key, key, str(value))
|
||||
|
||||
|
||||
def _job_from_hash(redis, redis_key):
|
||||
def _job_from_hash(redis, redis_key) -> typing.Optional[Job]:
|
||||
if redis.exists(redis_key):
|
||||
job_dict = redis.hgetall(redis_key)
|
||||
for date in [
|
||||
|
|
|
@ -22,6 +22,9 @@ from selfprivacy_api.migrations.providers import CreateProviderFields
|
|||
from selfprivacy_api.migrations.prepare_for_nixos_2211 import (
|
||||
MigrateToSelfprivacyChannelFrom2205,
|
||||
)
|
||||
from selfprivacy_api.migrations.prepare_for_nixos_2305 import (
|
||||
MigrateToSelfprivacyChannelFrom2211,
|
||||
)
|
||||
|
||||
migrations = [
|
||||
FixNixosConfigBranch(),
|
||||
|
@ -31,6 +34,7 @@ migrations = [
|
|||
CheckForFailedBindsMigration(),
|
||||
CreateProviderFields(),
|
||||
MigrateToSelfprivacyChannelFrom2205(),
|
||||
MigrateToSelfprivacyChannelFrom2211(),
|
||||
]
|
||||
|
||||
|
||||
|
|
58
selfprivacy_api/migrations/prepare_for_nixos_2305.py
Normal file
58
selfprivacy_api/migrations/prepare_for_nixos_2305.py
Normal file
|
@ -0,0 +1,58 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
from selfprivacy_api.migrations.migration import Migration
|
||||
|
||||
|
||||
class MigrateToSelfprivacyChannelFrom2211(Migration):
|
||||
"""Migrate to selfprivacy Nix channel.
|
||||
For some reason NixOS 22.11 servers initialized with the nixos channel instead of selfprivacy.
|
||||
This stops us from upgrading to NixOS 23.05
|
||||
"""
|
||||
|
||||
def get_migration_name(self):
|
||||
return "migrate_to_selfprivacy_channel_from_2211"
|
||||
|
||||
def get_migration_description(self):
|
||||
return "Migrate to selfprivacy Nix channel from NixOS 22.11."
|
||||
|
||||
def is_migration_needed(self):
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
["nix-channel", "--list"], start_new_session=True
|
||||
)
|
||||
output = output.decode("utf-8")
|
||||
first_line = output.split("\n", maxsplit=1)[0]
|
||||
return first_line.startswith("nixos") and (
|
||||
first_line.endswith("nixos-22.11")
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
def migrate(self):
|
||||
# Change the channel and update them.
|
||||
# Also, go to /etc/nixos directory and make a git pull
|
||||
current_working_directory = os.getcwd()
|
||||
try:
|
||||
print("Changing channel")
|
||||
os.chdir("/etc/nixos")
|
||||
subprocess.check_output(
|
||||
[
|
||||
"nix-channel",
|
||||
"--add",
|
||||
"https://channel.selfprivacy.org/nixos-selfpricacy",
|
||||
"nixos",
|
||||
]
|
||||
)
|
||||
subprocess.check_output(["nix-channel", "--update"])
|
||||
nixos_config_branch = subprocess.check_output(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"], start_new_session=True
|
||||
)
|
||||
if nixos_config_branch.decode("utf-8").strip() == "api-redis":
|
||||
print("Also changing nixos-config branch from api-redis to master")
|
||||
subprocess.check_output(["git", "checkout", "master"])
|
||||
subprocess.check_output(["git", "pull"])
|
||||
os.chdir(current_working_directory)
|
||||
except subprocess.CalledProcessError:
|
||||
os.chdir(current_working_directory)
|
||||
print("Error")
|
0
selfprivacy_api/models/backup/__init__.py
Normal file
0
selfprivacy_api/models/backup/__init__.py
Normal file
11
selfprivacy_api/models/backup/provider.py
Normal file
11
selfprivacy_api/models/backup/provider.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
from pydantic import BaseModel
|
||||
|
||||
"""for storage in Redis"""
|
||||
|
||||
|
||||
class BackupProviderModel(BaseModel):
|
||||
kind: str
|
||||
login: str
|
||||
key: str
|
||||
location: str
|
||||
repo_id: str # for app usage, not for us
|
8
selfprivacy_api/models/backup/snapshot.py
Normal file
8
selfprivacy_api/models/backup/snapshot.py
Normal file
|
@ -0,0 +1,8 @@
|
|||
import datetime
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Snapshot(BaseModel):
|
||||
id: str
|
||||
service_name: str
|
||||
created_at: datetime.datetime
|
|
@ -1,8 +1,9 @@
|
|||
"""
|
||||
Token repository using Redis as backend.
|
||||
"""
|
||||
from typing import Optional
|
||||
from typing import Any, Optional
|
||||
from datetime import datetime
|
||||
from hashlib import md5
|
||||
|
||||
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
|
||||
AbstractTokensRepository,
|
||||
|
@ -28,12 +29,15 @@ class RedisTokensRepository(AbstractTokensRepository):
|
|||
|
||||
@staticmethod
|
||||
def token_key_for_device(device_name: str):
|
||||
return TOKENS_PREFIX + str(hash(device_name))
|
||||
md5_hash = md5()
|
||||
md5_hash.update(bytes(device_name, "utf-8"))
|
||||
digest = md5_hash.hexdigest()
|
||||
return TOKENS_PREFIX + digest
|
||||
|
||||
def get_tokens(self) -> list[Token]:
|
||||
"""Get the tokens"""
|
||||
redis = self.connection
|
||||
token_keys = redis.keys(TOKENS_PREFIX + "*")
|
||||
token_keys: list[str] = redis.keys(TOKENS_PREFIX + "*") # type: ignore
|
||||
tokens = []
|
||||
for key in token_keys:
|
||||
token = self._token_from_hash(key)
|
||||
|
@ -41,11 +45,20 @@ class RedisTokensRepository(AbstractTokensRepository):
|
|||
tokens.append(token)
|
||||
return tokens
|
||||
|
||||
def _discover_token_key(self, input_token: Token) -> Optional[str]:
|
||||
"""brute-force searching for tokens, for robust deletion"""
|
||||
redis = self.connection
|
||||
token_keys: list[str] = redis.keys(TOKENS_PREFIX + "*") # type: ignore
|
||||
for key in token_keys:
|
||||
token = self._token_from_hash(key)
|
||||
if token == input_token:
|
||||
return key
|
||||
|
||||
def delete_token(self, input_token: Token) -> None:
|
||||
"""Delete the token"""
|
||||
redis = self.connection
|
||||
key = RedisTokensRepository._token_redis_key(input_token)
|
||||
if input_token not in self.get_tokens():
|
||||
key = self._discover_token_key(input_token)
|
||||
if key is None:
|
||||
raise TokenNotFound
|
||||
redis.delete(key)
|
||||
|
||||
|
@ -107,26 +120,28 @@ class RedisTokensRepository(AbstractTokensRepository):
|
|||
return self._new_device_key_from_hash(NEW_DEVICE_KEY_REDIS_KEY)
|
||||
|
||||
@staticmethod
|
||||
def _is_date_key(key: str):
|
||||
def _is_date_key(key: str) -> bool:
|
||||
return key in [
|
||||
"created_at",
|
||||
"expires_at",
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _prepare_model_dict(d: dict):
|
||||
date_keys = [key for key in d.keys() if RedisTokensRepository._is_date_key(key)]
|
||||
def _prepare_model_dict(model_dict: dict[str, Any]) -> None:
|
||||
date_keys = [
|
||||
key for key in model_dict.keys() if RedisTokensRepository._is_date_key(key)
|
||||
]
|
||||
for date in date_keys:
|
||||
if d[date] != "None":
|
||||
d[date] = datetime.fromisoformat(d[date])
|
||||
for key in d.keys():
|
||||
if d[key] == "None":
|
||||
d[key] = None
|
||||
if model_dict[date] != "None":
|
||||
model_dict[date] = datetime.fromisoformat(model_dict[date])
|
||||
for key in model_dict.keys():
|
||||
if model_dict[key] == "None":
|
||||
model_dict[key] = None
|
||||
|
||||
def _model_dict_from_hash(self, redis_key: str) -> Optional[dict]:
|
||||
def _model_dict_from_hash(self, redis_key: str) -> Optional[dict[str, Any]]:
|
||||
redis = self.connection
|
||||
if redis.exists(redis_key):
|
||||
token_dict = redis.hgetall(redis_key)
|
||||
token_dict: dict[str, Any] = redis.hgetall(redis_key) # type: ignore
|
||||
RedisTokensRepository._prepare_model_dict(token_dict)
|
||||
return token_dict
|
||||
return None
|
||||
|
@ -138,7 +153,10 @@ class RedisTokensRepository(AbstractTokensRepository):
|
|||
return None
|
||||
|
||||
def _token_from_hash(self, redis_key: str) -> Optional[Token]:
|
||||
return self._hash_as_model(redis_key, Token)
|
||||
token = self._hash_as_model(redis_key, Token)
|
||||
if token is not None:
|
||||
token.created_at = token.created_at.replace(tzinfo=None)
|
||||
return token
|
||||
|
||||
def _recovery_key_from_hash(self, redis_key: str) -> Optional[RecoveryKey]:
|
||||
return self._hash_as_model(redis_key, RecoveryKey)
|
||||
|
|
|
@ -16,8 +16,6 @@ from selfprivacy_api.actions.ssh import (
|
|||
from selfprivacy_api.actions.users import UserNotFound, get_user_by_username
|
||||
|
||||
from selfprivacy_api.dependencies import get_token_header
|
||||
from selfprivacy_api.restic_controller import ResticController, ResticStates
|
||||
from selfprivacy_api.restic_controller import tasks as restic_tasks
|
||||
from selfprivacy_api.services.bitwarden import Bitwarden
|
||||
from selfprivacy_api.services.gitea import Gitea
|
||||
from selfprivacy_api.services.mailserver import MailServer
|
||||
|
@ -25,7 +23,7 @@ from selfprivacy_api.services.nextcloud import Nextcloud
|
|||
from selfprivacy_api.services.ocserv import Ocserv
|
||||
from selfprivacy_api.services.pleroma import Pleroma
|
||||
from selfprivacy_api.services.service import ServiceStatus
|
||||
from selfprivacy_api.utils import WriteUserData, get_dkim_key, get_domain
|
||||
from selfprivacy_api.utils import get_dkim_key, get_domain
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/services",
|
||||
|
@ -186,44 +184,34 @@ async def disable_pleroma():
|
|||
|
||||
@router.get("/restic/backup/list")
|
||||
async def get_restic_backup_list():
|
||||
restic = ResticController()
|
||||
return restic.snapshot_list
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail="This endpoint is deprecated, please use GraphQL API",
|
||||
)
|
||||
|
||||
|
||||
@router.put("/restic/backup/create")
|
||||
async def create_restic_backup():
|
||||
restic = ResticController()
|
||||
if restic.state is ResticStates.NO_KEY:
|
||||
raise HTTPException(status_code=400, detail="Backup key not provided")
|
||||
if restic.state is ResticStates.INITIALIZING:
|
||||
raise HTTPException(status_code=400, detail="Backup is initializing")
|
||||
if restic.state is ResticStates.BACKING_UP:
|
||||
raise HTTPException(status_code=409, detail="Backup is already running")
|
||||
restic_tasks.start_backup()
|
||||
return {
|
||||
"status": 0,
|
||||
"message": "Backup creation has started",
|
||||
}
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail="This endpoint is deprecated, please use GraphQL API",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/restic/backup/status")
|
||||
async def get_restic_backup_status():
|
||||
restic = ResticController()
|
||||
|
||||
return {
|
||||
"status": restic.state.name,
|
||||
"progress": restic.progress,
|
||||
"error_message": restic.error_message,
|
||||
}
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail="This endpoint is deprecated, please use GraphQL API",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/restic/backup/reload")
|
||||
async def reload_restic_backup():
|
||||
restic_tasks.load_snapshots()
|
||||
return {
|
||||
"status": 0,
|
||||
"message": "Snapshots reload started",
|
||||
}
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail="This endpoint is deprecated, please use GraphQL API",
|
||||
)
|
||||
|
||||
|
||||
class BackupRestoreInput(BaseModel):
|
||||
|
@ -232,29 +220,10 @@ class BackupRestoreInput(BaseModel):
|
|||
|
||||
@router.put("/restic/backup/restore")
|
||||
async def restore_restic_backup(backup: BackupRestoreInput):
|
||||
restic = ResticController()
|
||||
if restic.state is ResticStates.NO_KEY:
|
||||
raise HTTPException(status_code=400, detail="Backup key not provided")
|
||||
if restic.state is ResticStates.NOT_INITIALIZED:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Backups repository is not initialized"
|
||||
status_code=410,
|
||||
detail="This endpoint is deprecated, please use GraphQL API",
|
||||
)
|
||||
if restic.state is ResticStates.BACKING_UP:
|
||||
raise HTTPException(status_code=409, detail="Backup is already running")
|
||||
if restic.state is ResticStates.INITIALIZING:
|
||||
raise HTTPException(status_code=400, detail="Repository is initializing")
|
||||
if restic.state is ResticStates.RESTORING:
|
||||
raise HTTPException(status_code=409, detail="Restore is already running")
|
||||
|
||||
for backup_item in restic.snapshot_list:
|
||||
if backup_item["short_id"] == backup.backupId:
|
||||
restic_tasks.restore_from_backup(backup.backupId)
|
||||
return {
|
||||
"status": 0,
|
||||
"message": "Backup restoration procedure started",
|
||||
}
|
||||
|
||||
raise HTTPException(status_code=404, detail="Backup not found")
|
||||
|
||||
|
||||
class BackupConfigInput(BaseModel):
|
||||
|
@ -265,17 +234,10 @@ class BackupConfigInput(BaseModel):
|
|||
|
||||
@router.put("/restic/backblaze/config")
|
||||
async def set_backblaze_config(backup_config: BackupConfigInput):
|
||||
with WriteUserData() as data:
|
||||
if "backup" not in data:
|
||||
data["backup"] = {}
|
||||
data["backup"]["provider"] = "BACKBLAZE"
|
||||
data["backup"]["accountId"] = backup_config.accountId
|
||||
data["backup"]["accountKey"] = backup_config.accountKey
|
||||
data["backup"]["bucket"] = backup_config.bucket
|
||||
|
||||
restic_tasks.update_keys_from_userdata()
|
||||
|
||||
return "New backup settings saved"
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail="This endpoint is deprecated, please use GraphQL API",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/ssh/enable")
|
||||
|
|
|
@ -1,244 +0,0 @@
|
|||
"""Restic singleton controller."""
|
||||
from datetime import datetime
|
||||
import json
|
||||
import subprocess
|
||||
import os
|
||||
from threading import Lock
|
||||
from enum import Enum
|
||||
import portalocker
|
||||
from selfprivacy_api.utils import ReadUserData
|
||||
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
|
||||
|
||||
|
||||
class ResticStates(Enum):
|
||||
"""Restic states enum."""
|
||||
|
||||
NO_KEY = 0
|
||||
NOT_INITIALIZED = 1
|
||||
INITIALIZED = 2
|
||||
BACKING_UP = 3
|
||||
RESTORING = 4
|
||||
ERROR = 5
|
||||
INITIALIZING = 6
|
||||
|
||||
|
||||
class ResticController(metaclass=SingletonMetaclass):
|
||||
"""
|
||||
States in wich the restic_controller may be
|
||||
- no backblaze key
|
||||
- backblaze key is provided, but repository is not initialized
|
||||
- backblaze key is provided, repository is initialized
|
||||
- fetching list of snapshots
|
||||
- creating snapshot, current progress can be retrieved
|
||||
- recovering from snapshot
|
||||
|
||||
Any ongoing operation acquires the lock
|
||||
Current state can be fetched with get_state()
|
||||
"""
|
||||
|
||||
_initialized = False
|
||||
|
||||
def __init__(self):
|
||||
if self._initialized:
|
||||
return
|
||||
self.state = ResticStates.NO_KEY
|
||||
self.lock = False
|
||||
self.progress = 0
|
||||
self._backblaze_account = None
|
||||
self._backblaze_key = None
|
||||
self._repository_name = None
|
||||
self.snapshot_list = []
|
||||
self.error_message = None
|
||||
self._initialized = True
|
||||
self.load_configuration()
|
||||
self.write_rclone_config()
|
||||
self.load_snapshots()
|
||||
|
||||
def load_configuration(self):
|
||||
"""Load current configuration from user data to singleton."""
|
||||
with ReadUserData() as user_data:
|
||||
self._backblaze_account = user_data["backblaze"]["accountId"]
|
||||
self._backblaze_key = user_data["backblaze"]["accountKey"]
|
||||
self._repository_name = user_data["backblaze"]["bucket"]
|
||||
if self._backblaze_account and self._backblaze_key and self._repository_name:
|
||||
self.state = ResticStates.INITIALIZING
|
||||
else:
|
||||
self.state = ResticStates.NO_KEY
|
||||
|
||||
def write_rclone_config(self):
|
||||
"""
|
||||
Open /root/.config/rclone/rclone.conf with portalocker
|
||||
and write configuration in the following format:
|
||||
[backblaze]
|
||||
type = b2
|
||||
account = {self.backblaze_account}
|
||||
key = {self.backblaze_key}
|
||||
"""
|
||||
with portalocker.Lock(
|
||||
"/root/.config/rclone/rclone.conf", "w", timeout=None
|
||||
) as rclone_config:
|
||||
rclone_config.write(
|
||||
f"[backblaze]\n"
|
||||
f"type = b2\n"
|
||||
f"account = {self._backblaze_account}\n"
|
||||
f"key = {self._backblaze_key}\n"
|
||||
)
|
||||
|
||||
def load_snapshots(self):
|
||||
"""
|
||||
Load list of snapshots from repository
|
||||
"""
|
||||
backup_listing_command = [
|
||||
"restic",
|
||||
"-o",
|
||||
"rclone.args=serve restic --stdio",
|
||||
"-r",
|
||||
f"rclone:backblaze:{self._repository_name}/sfbackup",
|
||||
"snapshots",
|
||||
"--json",
|
||||
]
|
||||
|
||||
if self.state in (ResticStates.BACKING_UP, ResticStates.RESTORING):
|
||||
return
|
||||
with subprocess.Popen(
|
||||
backup_listing_command,
|
||||
shell=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
) as backup_listing_process_descriptor:
|
||||
snapshots_list = backup_listing_process_descriptor.communicate()[0].decode(
|
||||
"utf-8"
|
||||
)
|
||||
try:
|
||||
starting_index = snapshots_list.find("[")
|
||||
json.loads(snapshots_list[starting_index:])
|
||||
self.snapshot_list = json.loads(snapshots_list[starting_index:])
|
||||
self.state = ResticStates.INITIALIZED
|
||||
print(snapshots_list)
|
||||
except ValueError:
|
||||
if "Is there a repository at the following location?" in snapshots_list:
|
||||
self.state = ResticStates.NOT_INITIALIZED
|
||||
return
|
||||
self.state = ResticStates.ERROR
|
||||
self.error_message = snapshots_list
|
||||
return
|
||||
|
||||
def initialize_repository(self):
|
||||
"""
|
||||
Initialize repository with restic
|
||||
"""
|
||||
initialize_repository_command = [
|
||||
"restic",
|
||||
"-o",
|
||||
"rclone.args=serve restic --stdio",
|
||||
"-r",
|
||||
f"rclone:backblaze:{self._repository_name}/sfbackup",
|
||||
"init",
|
||||
]
|
||||
with subprocess.Popen(
|
||||
initialize_repository_command,
|
||||
shell=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
) as initialize_repository_process_descriptor:
|
||||
msg = initialize_repository_process_descriptor.communicate()[0].decode(
|
||||
"utf-8"
|
||||
)
|
||||
if initialize_repository_process_descriptor.returncode == 0:
|
||||
self.state = ResticStates.INITIALIZED
|
||||
else:
|
||||
self.state = ResticStates.ERROR
|
||||
self.error_message = msg
|
||||
|
||||
self.state = ResticStates.INITIALIZED
|
||||
|
||||
def start_backup(self):
|
||||
"""
|
||||
Start backup with restic
|
||||
"""
|
||||
backup_command = [
|
||||
"restic",
|
||||
"-o",
|
||||
"rclone.args=serve restic --stdio",
|
||||
"-r",
|
||||
f"rclone:backblaze:{self._repository_name}/sfbackup",
|
||||
"--verbose",
|
||||
"--json",
|
||||
"backup",
|
||||
"/var",
|
||||
]
|
||||
with open("/var/backup.log", "w", encoding="utf-8") as log_file:
|
||||
subprocess.Popen(
|
||||
backup_command,
|
||||
shell=False,
|
||||
stdout=log_file,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
self.state = ResticStates.BACKING_UP
|
||||
self.progress = 0
|
||||
|
||||
def check_progress(self):
|
||||
"""
|
||||
Check progress of ongoing backup operation
|
||||
"""
|
||||
backup_status_check_command = ["tail", "-1", "/var/backup.log"]
|
||||
|
||||
if self.state in (ResticStates.NO_KEY, ResticStates.NOT_INITIALIZED):
|
||||
return
|
||||
|
||||
# If the log file does not exists
|
||||
if os.path.exists("/var/backup.log") is False:
|
||||
self.state = ResticStates.INITIALIZED
|
||||
|
||||
with subprocess.Popen(
|
||||
backup_status_check_command,
|
||||
shell=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
) as backup_status_check_process_descriptor:
|
||||
backup_process_status = (
|
||||
backup_status_check_process_descriptor.communicate()[0].decode("utf-8")
|
||||
)
|
||||
|
||||
try:
|
||||
status = json.loads(backup_process_status)
|
||||
except ValueError:
|
||||
print(backup_process_status)
|
||||
self.error_message = backup_process_status
|
||||
return
|
||||
if status["message_type"] == "status":
|
||||
self.progress = status["percent_done"]
|
||||
self.state = ResticStates.BACKING_UP
|
||||
elif status["message_type"] == "summary":
|
||||
self.state = ResticStates.INITIALIZED
|
||||
self.progress = 0
|
||||
self.snapshot_list.append(
|
||||
{
|
||||
"short_id": status["snapshot_id"],
|
||||
# Current time in format 2021-12-02T00:02:51.086452543+03:00
|
||||
"time": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
|
||||
}
|
||||
)
|
||||
|
||||
def restore_from_backup(self, snapshot_id):
|
||||
"""
|
||||
Restore from backup with restic
|
||||
"""
|
||||
backup_restoration_command = [
|
||||
"restic",
|
||||
"-o",
|
||||
"rclone.args=serve restic --stdio",
|
||||
"-r",
|
||||
f"rclone:backblaze:{self._repository_name}/sfbackup",
|
||||
"restore",
|
||||
snapshot_id,
|
||||
"--target",
|
||||
"/",
|
||||
]
|
||||
|
||||
self.state = ResticStates.RESTORING
|
||||
|
||||
subprocess.run(backup_restoration_command, shell=False)
|
||||
|
||||
self.state = ResticStates.INITIALIZED
|
|
@ -1,70 +0,0 @@
|
|||
"""Tasks for the restic controller."""
|
||||
from huey import crontab
|
||||
from selfprivacy_api.utils.huey import huey
|
||||
from . import ResticController, ResticStates
|
||||
|
||||
|
||||
@huey.task()
|
||||
def init_restic():
|
||||
controller = ResticController()
|
||||
if controller.state == ResticStates.NOT_INITIALIZED:
|
||||
initialize_repository()
|
||||
|
||||
|
||||
@huey.task()
|
||||
def update_keys_from_userdata():
|
||||
controller = ResticController()
|
||||
controller.load_configuration()
|
||||
controller.write_rclone_config()
|
||||
initialize_repository()
|
||||
|
||||
|
||||
# Check every morning at 5:00 AM
|
||||
@huey.task(crontab(hour=5, minute=0))
|
||||
def cron_load_snapshots():
|
||||
controller = ResticController()
|
||||
controller.load_snapshots()
|
||||
|
||||
|
||||
# Check every morning at 5:00 AM
|
||||
@huey.task()
|
||||
def load_snapshots():
|
||||
controller = ResticController()
|
||||
controller.load_snapshots()
|
||||
if controller.state == ResticStates.NOT_INITIALIZED:
|
||||
load_snapshots.schedule(delay=120)
|
||||
|
||||
|
||||
@huey.task()
|
||||
def initialize_repository():
|
||||
controller = ResticController()
|
||||
if controller.state is not ResticStates.NO_KEY:
|
||||
controller.initialize_repository()
|
||||
load_snapshots()
|
||||
|
||||
|
||||
@huey.task()
|
||||
def fetch_backup_status():
|
||||
controller = ResticController()
|
||||
if controller.state is ResticStates.BACKING_UP:
|
||||
controller.check_progress()
|
||||
if controller.state is ResticStates.BACKING_UP:
|
||||
fetch_backup_status.schedule(delay=2)
|
||||
else:
|
||||
load_snapshots.schedule(delay=240)
|
||||
|
||||
|
||||
@huey.task()
|
||||
def start_backup():
|
||||
controller = ResticController()
|
||||
if controller.state is ResticStates.NOT_INITIALIZED:
|
||||
resp = initialize_repository()
|
||||
resp.get()
|
||||
controller.start_backup()
|
||||
fetch_backup_status.schedule(delay=3)
|
||||
|
||||
|
||||
@huey.task()
|
||||
def restore_from_backup(snapshot):
|
||||
controller = ResticController()
|
||||
controller.restore_from_backup(snapshot)
|
|
@ -42,7 +42,7 @@ def get_disabled_services() -> list[Service]:
|
|||
|
||||
|
||||
def get_services_by_location(location: str) -> list[Service]:
|
||||
return [service for service in services if service.get_location() == location]
|
||||
return [service for service in services if service.get_drive() == location]
|
||||
|
||||
|
||||
def get_all_required_dns_records() -> list[ServiceDnsRecord]:
|
||||
|
|
|
@ -3,14 +3,12 @@ import base64
|
|||
import subprocess
|
||||
import typing
|
||||
|
||||
from selfprivacy_api.jobs import Job, JobStatus, Jobs
|
||||
from selfprivacy_api.jobs import Job, Jobs
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.services.generic_status_getter import get_service_status
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
from selfprivacy_api.utils.huey import huey
|
||||
import selfprivacy_api.utils.network as network_utils
|
||||
from selfprivacy_api.services.bitwarden.icon import BITWARDEN_ICON
|
||||
|
||||
|
@ -38,6 +36,10 @@ class Bitwarden(Service):
|
|||
"""Read SVG icon from file and return it as base64 encoded string."""
|
||||
return base64.b64encode(BITWARDEN_ICON.encode("utf-8")).decode("utf-8")
|
||||
|
||||
@staticmethod
|
||||
def get_user() -> str:
|
||||
return "vaultwarden"
|
||||
|
||||
@staticmethod
|
||||
def get_url() -> typing.Optional[str]:
|
||||
"""Return service url."""
|
||||
|
@ -52,6 +54,10 @@ class Bitwarden(Service):
|
|||
def is_required() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "Password database, encryption certificate and attachments."
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
with ReadUserData() as user_data:
|
||||
|
@ -111,19 +117,8 @@ class Bitwarden(Service):
|
|||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
storage_usage = 0
|
||||
storage_usage += get_storage_usage("/var/lib/bitwarden")
|
||||
storage_usage += get_storage_usage("/var/lib/bitwarden_rs")
|
||||
return storage_usage
|
||||
|
||||
@staticmethod
|
||||
def get_location() -> str:
|
||||
with ReadUserData() as user_data:
|
||||
if user_data.get("useBinds", False):
|
||||
return user_data.get("bitwarden", {}).get("location", "sda1")
|
||||
else:
|
||||
return "sda1"
|
||||
def get_folders() -> typing.List[str]:
|
||||
return ["/var/lib/bitwarden", "/var/lib/bitwarden_rs"]
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
|
@ -154,20 +149,7 @@ class Bitwarden(Service):
|
|||
self,
|
||||
volume,
|
||||
job,
|
||||
[
|
||||
FolderMoveNames(
|
||||
name="bitwarden",
|
||||
bind_location="/var/lib/bitwarden",
|
||||
group="vaultwarden",
|
||||
owner="vaultwarden",
|
||||
),
|
||||
FolderMoveNames(
|
||||
name="bitwarden_rs",
|
||||
bind_location="/var/lib/bitwarden_rs",
|
||||
group="vaultwarden",
|
||||
owner="vaultwarden",
|
||||
),
|
||||
],
|
||||
FolderMoveNames.default_foldermoves(self),
|
||||
"bitwarden",
|
||||
)
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
"""Generic handler for moving services"""
|
||||
|
||||
from __future__ import annotations
|
||||
import subprocess
|
||||
import time
|
||||
import pathlib
|
||||
|
@ -11,6 +12,7 @@ from selfprivacy_api.utils.huey import huey
|
|||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData
|
||||
from selfprivacy_api.services.service import Service, ServiceStatus
|
||||
from selfprivacy_api.services.owned_path import OwnedPath
|
||||
|
||||
|
||||
class FolderMoveNames(BaseModel):
|
||||
|
@ -19,6 +21,26 @@ class FolderMoveNames(BaseModel):
|
|||
owner: str
|
||||
group: str
|
||||
|
||||
@staticmethod
|
||||
def from_owned_path(path: OwnedPath) -> FolderMoveNames:
|
||||
return FolderMoveNames(
|
||||
name=FolderMoveNames.get_foldername(path.path),
|
||||
bind_location=path.path,
|
||||
owner=path.owner,
|
||||
group=path.group,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_foldername(path: str) -> str:
|
||||
return path.split("/")[-1]
|
||||
|
||||
@staticmethod
|
||||
def default_foldermoves(service: Service) -> list[FolderMoveNames]:
|
||||
return [
|
||||
FolderMoveNames.from_owned_path(folder)
|
||||
for folder in service.get_owned_folders()
|
||||
]
|
||||
|
||||
|
||||
@huey.task()
|
||||
def move_service(
|
||||
|
@ -44,7 +66,7 @@ def move_service(
|
|||
)
|
||||
return
|
||||
# Check if we are on the same volume
|
||||
old_volume = service.get_location()
|
||||
old_volume = service.get_drive()
|
||||
if old_volume == volume.name:
|
||||
Jobs.update(
|
||||
job=job,
|
||||
|
@ -61,7 +83,7 @@ def move_service(
|
|||
)
|
||||
return
|
||||
# Make sure the volume is mounted
|
||||
if volume.name != "sda1" and f"/volumes/{volume.name}" not in volume.mountpoints:
|
||||
if not volume.is_root() and f"/volumes/{volume.name}" not in volume.mountpoints:
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
|
|
|
@ -5,12 +5,10 @@ import typing
|
|||
|
||||
from selfprivacy_api.jobs import Job, Jobs
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.services.generic_status_getter import get_service_status
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
from selfprivacy_api.utils.huey import huey
|
||||
import selfprivacy_api.utils.network as network_utils
|
||||
from selfprivacy_api.services.gitea.icon import GITEA_ICON
|
||||
|
||||
|
@ -52,6 +50,10 @@ class Gitea(Service):
|
|||
def is_required() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "Git repositories, database and user data."
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
with ReadUserData() as user_data:
|
||||
|
@ -110,18 +112,8 @@ class Gitea(Service):
|
|||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
storage_usage = 0
|
||||
storage_usage += get_storage_usage("/var/lib/gitea")
|
||||
return storage_usage
|
||||
|
||||
@staticmethod
|
||||
def get_location() -> str:
|
||||
with ReadUserData() as user_data:
|
||||
if user_data.get("useBinds", False):
|
||||
return user_data.get("gitea", {}).get("location", "sda1")
|
||||
else:
|
||||
return "sda1"
|
||||
def get_folders() -> typing.List[str]:
|
||||
return ["/var/lib/gitea"]
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
|
@ -151,14 +143,7 @@ class Gitea(Service):
|
|||
self,
|
||||
volume,
|
||||
job,
|
||||
[
|
||||
FolderMoveNames(
|
||||
name="gitea",
|
||||
bind_location="/var/lib/gitea",
|
||||
group="gitea",
|
||||
owner="gitea",
|
||||
),
|
||||
],
|
||||
FolderMoveNames.default_foldermoves(self),
|
||||
"gitea",
|
||||
)
|
||||
|
||||
|
|
|
@ -3,17 +3,13 @@ import base64
|
|||
import subprocess
|
||||
import typing
|
||||
|
||||
from selfprivacy_api.jobs import Job, Jobs
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.jobs import Job
|
||||
from selfprivacy_api.services.generic_status_getter import (
|
||||
get_service_status,
|
||||
get_service_status_from_several_units,
|
||||
)
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
from selfprivacy_api.utils.huey import huey
|
||||
import selfprivacy_api.utils.network as network_utils
|
||||
from selfprivacy_api.services.jitsi.icon import JITSI_ICON
|
||||
|
||||
|
@ -55,6 +51,10 @@ class Jitsi(Service):
|
|||
def is_required() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "Secrets that are used to encrypt the communication."
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
with ReadUserData() as user_data:
|
||||
|
@ -84,18 +84,27 @@ class Jitsi(Service):
|
|||
|
||||
@staticmethod
|
||||
def stop():
|
||||
subprocess.run(["systemctl", "stop", "jitsi-videobridge.service"])
|
||||
subprocess.run(["systemctl", "stop", "jicofo.service"])
|
||||
subprocess.run(
|
||||
["systemctl", "stop", "jitsi-videobridge.service"],
|
||||
check=False,
|
||||
)
|
||||
subprocess.run(["systemctl", "stop", "jicofo.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def start():
|
||||
subprocess.run(["systemctl", "start", "jitsi-videobridge.service"])
|
||||
subprocess.run(["systemctl", "start", "jicofo.service"])
|
||||
subprocess.run(
|
||||
["systemctl", "start", "jitsi-videobridge.service"],
|
||||
check=False,
|
||||
)
|
||||
subprocess.run(["systemctl", "start", "jicofo.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def restart():
|
||||
subprocess.run(["systemctl", "restart", "jitsi-videobridge.service"])
|
||||
subprocess.run(["systemctl", "restart", "jicofo.service"])
|
||||
subprocess.run(
|
||||
["systemctl", "restart", "jitsi-videobridge.service"],
|
||||
check=False,
|
||||
)
|
||||
subprocess.run(["systemctl", "restart", "jicofo.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def get_configuration():
|
||||
|
@ -110,14 +119,8 @@ class Jitsi(Service):
|
|||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
storage_usage = 0
|
||||
storage_usage += get_storage_usage("/var/lib/jitsi-meet")
|
||||
return storage_usage
|
||||
|
||||
@staticmethod
|
||||
def get_location() -> str:
|
||||
return "sda1"
|
||||
def get_folders() -> typing.List[str]:
|
||||
return ["/var/lib/jitsi-meet"]
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
|
|
|
@ -4,17 +4,14 @@ import base64
|
|||
import subprocess
|
||||
import typing
|
||||
|
||||
from selfprivacy_api.jobs import Job, JobStatus, Jobs
|
||||
from selfprivacy_api.jobs import Job, Jobs
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.services.generic_status_getter import (
|
||||
get_service_status,
|
||||
get_service_status_from_several_units,
|
||||
)
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
import selfprivacy_api.utils as utils
|
||||
from selfprivacy_api import utils
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
from selfprivacy_api.utils.huey import huey
|
||||
import selfprivacy_api.utils.network as network_utils
|
||||
from selfprivacy_api.services.mailserver.icon import MAILSERVER_ICON
|
||||
|
||||
|
@ -24,7 +21,7 @@ class MailServer(Service):
|
|||
|
||||
@staticmethod
|
||||
def get_id() -> str:
|
||||
return "mailserver"
|
||||
return "email"
|
||||
|
||||
@staticmethod
|
||||
def get_display_name() -> str:
|
||||
|
@ -38,6 +35,10 @@ class MailServer(Service):
|
|||
def get_svg_icon() -> str:
|
||||
return base64.b64encode(MAILSERVER_ICON.encode("utf-8")).decode("utf-8")
|
||||
|
||||
@staticmethod
|
||||
def get_user() -> str:
|
||||
return "virtualMail"
|
||||
|
||||
@staticmethod
|
||||
def get_url() -> typing.Optional[str]:
|
||||
"""Return service url."""
|
||||
|
@ -51,6 +52,10 @@ class MailServer(Service):
|
|||
def is_required() -> bool:
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "Mail boxes and filters."
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
return True
|
||||
|
@ -71,18 +76,18 @@ class MailServer(Service):
|
|||
|
||||
@staticmethod
|
||||
def stop():
|
||||
subprocess.run(["systemctl", "stop", "dovecot2.service"])
|
||||
subprocess.run(["systemctl", "stop", "postfix.service"])
|
||||
subprocess.run(["systemctl", "stop", "dovecot2.service"], check=False)
|
||||
subprocess.run(["systemctl", "stop", "postfix.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def start():
|
||||
subprocess.run(["systemctl", "start", "dovecot2.service"])
|
||||
subprocess.run(["systemctl", "start", "postfix.service"])
|
||||
subprocess.run(["systemctl", "start", "dovecot2.service"], check=False)
|
||||
subprocess.run(["systemctl", "start", "postfix.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def restart():
|
||||
subprocess.run(["systemctl", "restart", "dovecot2.service"])
|
||||
subprocess.run(["systemctl", "restart", "postfix.service"])
|
||||
subprocess.run(["systemctl", "restart", "dovecot2.service"], check=False)
|
||||
subprocess.run(["systemctl", "restart", "postfix.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def get_configuration():
|
||||
|
@ -97,16 +102,8 @@ class MailServer(Service):
|
|||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
return get_storage_usage("/var/vmail")
|
||||
|
||||
@staticmethod
|
||||
def get_location() -> str:
|
||||
with utils.ReadUserData() as user_data:
|
||||
if user_data.get("useBinds", False):
|
||||
return user_data.get("mailserver", {}).get("location", "sda1")
|
||||
else:
|
||||
return "sda1"
|
||||
def get_folders() -> typing.List[str]:
|
||||
return ["/var/vmail", "/var/sieve"]
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
|
@ -135,7 +132,7 @@ class MailServer(Service):
|
|||
type="MX", name=domain, content=domain, ttl=3600, priority=10
|
||||
),
|
||||
ServiceDnsRecord(
|
||||
type="TXT", name="_dmarc", content=f"v=DMARC1; p=none", ttl=18000
|
||||
type="TXT", name="_dmarc", content="v=DMARC1; p=none", ttl=18000
|
||||
),
|
||||
ServiceDnsRecord(
|
||||
type="TXT",
|
||||
|
@ -150,7 +147,7 @@ class MailServer(Service):
|
|||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
job = Jobs.add(
|
||||
type_id="services.mailserver.move",
|
||||
type_id="services.email.move",
|
||||
name="Move Mail Server",
|
||||
description=f"Moving mailserver data to {volume.name}",
|
||||
)
|
||||
|
@ -159,21 +156,8 @@ class MailServer(Service):
|
|||
self,
|
||||
volume,
|
||||
job,
|
||||
[
|
||||
FolderMoveNames(
|
||||
name="vmail",
|
||||
bind_location="/var/vmail",
|
||||
group="virtualMail",
|
||||
owner="virtualMail",
|
||||
),
|
||||
FolderMoveNames(
|
||||
name="sieve",
|
||||
bind_location="/var/sieve",
|
||||
group="virtualMail",
|
||||
owner="virtualMail",
|
||||
),
|
||||
],
|
||||
"mailserver",
|
||||
FolderMoveNames.default_foldermoves(self),
|
||||
"email",
|
||||
)
|
||||
|
||||
return job
|
||||
|
|
|
@ -4,7 +4,6 @@ import subprocess
|
|||
import typing
|
||||
from selfprivacy_api.jobs import Job, Jobs
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.services.generic_status_getter import get_service_status
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
|
||||
|
@ -50,6 +49,10 @@ class Nextcloud(Service):
|
|||
def is_required() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "All the files and other data stored in Nextcloud."
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
with ReadUserData() as user_data:
|
||||
|
@ -114,22 +117,8 @@ class Nextcloud(Service):
|
|||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
"""
|
||||
Calculate the real storage usage of /var/lib/nextcloud and all subdirectories.
|
||||
Calculate using pathlib.
|
||||
Do not follow symlinks.
|
||||
"""
|
||||
return get_storage_usage("/var/lib/nextcloud")
|
||||
|
||||
@staticmethod
|
||||
def get_location() -> str:
|
||||
"""Get the name of disk where Nextcloud is installed."""
|
||||
with ReadUserData() as user_data:
|
||||
if user_data.get("useBinds", False):
|
||||
return user_data.get("nextcloud", {}).get("location", "sda1")
|
||||
else:
|
||||
return "sda1"
|
||||
def get_folders() -> typing.List[str]:
|
||||
return ["/var/lib/nextcloud"]
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
|
@ -158,14 +147,7 @@ class Nextcloud(Service):
|
|||
self,
|
||||
volume,
|
||||
job,
|
||||
[
|
||||
FolderMoveNames(
|
||||
name="nextcloud",
|
||||
bind_location="/var/lib/nextcloud",
|
||||
owner="nextcloud",
|
||||
group="nextcloud",
|
||||
),
|
||||
],
|
||||
FolderMoveNames.default_foldermoves(self),
|
||||
"nextcloud",
|
||||
)
|
||||
return job
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
import base64
|
||||
import subprocess
|
||||
import typing
|
||||
from selfprivacy_api.jobs import Job, Jobs
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.jobs import Job
|
||||
from selfprivacy_api.services.generic_status_getter import get_service_status
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData
|
||||
|
@ -45,6 +43,14 @@ class Ocserv(Service):
|
|||
def is_required() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def can_be_backed_up() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "Nothing to backup."
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
with ReadUserData() as user_data:
|
||||
|
@ -70,15 +76,15 @@ class Ocserv(Service):
|
|||
|
||||
@staticmethod
|
||||
def stop():
|
||||
subprocess.run(["systemctl", "stop", "ocserv.service"])
|
||||
subprocess.run(["systemctl", "stop", "ocserv.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def start():
|
||||
subprocess.run(["systemctl", "start", "ocserv.service"])
|
||||
subprocess.run(["systemctl", "start", "ocserv.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def restart():
|
||||
subprocess.run(["systemctl", "restart", "ocserv.service"])
|
||||
subprocess.run(["systemctl", "restart", "ocserv.service"], check=False)
|
||||
|
||||
@staticmethod
|
||||
def get_configuration():
|
||||
|
@ -92,10 +98,6 @@ class Ocserv(Service):
|
|||
def get_logs():
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_location() -> str:
|
||||
return "sda1"
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
return [
|
||||
|
@ -114,8 +116,8 @@ class Ocserv(Service):
|
|||
]
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
return 0
|
||||
def get_folders() -> typing.List[str]:
|
||||
return []
|
||||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
raise NotImplementedError("ocserv service is not movable")
|
||||
|
|
7
selfprivacy_api/services/owned_path.py
Normal file
7
selfprivacy_api/services/owned_path.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class OwnedPath(BaseModel):
|
||||
path: str
|
||||
owner: str
|
||||
group: str
|
|
@ -4,9 +4,9 @@ import subprocess
|
|||
import typing
|
||||
from selfprivacy_api.jobs import Job, Jobs
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.services.generic_status_getter import get_service_status
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
from selfprivacy_api.services.owned_path import OwnedPath
|
||||
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
import selfprivacy_api.utils.network as network_utils
|
||||
|
@ -46,6 +46,10 @@ class Pleroma(Service):
|
|||
def is_required() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "Your Pleroma accounts, posts and media."
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
with ReadUserData() as user_data:
|
||||
|
@ -97,19 +101,23 @@ class Pleroma(Service):
|
|||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
storage_usage = 0
|
||||
storage_usage += get_storage_usage("/var/lib/pleroma")
|
||||
storage_usage += get_storage_usage("/var/lib/postgresql")
|
||||
return storage_usage
|
||||
|
||||
@staticmethod
|
||||
def get_location() -> str:
|
||||
with ReadUserData() as user_data:
|
||||
if user_data.get("useBinds", False):
|
||||
return user_data.get("pleroma", {}).get("location", "sda1")
|
||||
else:
|
||||
return "sda1"
|
||||
def get_owned_folders() -> typing.List[OwnedPath]:
|
||||
"""
|
||||
Get a list of occupied directories with ownership info
|
||||
pleroma has folders that are owned by different users
|
||||
"""
|
||||
return [
|
||||
OwnedPath(
|
||||
path="/var/lib/pleroma",
|
||||
owner="pleroma",
|
||||
group="pleroma",
|
||||
),
|
||||
OwnedPath(
|
||||
path="/var/lib/postgresql",
|
||||
owner="postgres",
|
||||
group="postgres",
|
||||
),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
|
@ -138,20 +146,7 @@ class Pleroma(Service):
|
|||
self,
|
||||
volume,
|
||||
job,
|
||||
[
|
||||
FolderMoveNames(
|
||||
name="pleroma",
|
||||
bind_location="/var/lib/pleroma",
|
||||
owner="pleroma",
|
||||
group="pleroma",
|
||||
),
|
||||
FolderMoveNames(
|
||||
name="postgresql",
|
||||
bind_location="/var/lib/postgresql",
|
||||
owner="postgres",
|
||||
group="postgres",
|
||||
),
|
||||
],
|
||||
FolderMoveNames.default_foldermoves(self),
|
||||
"pleroma",
|
||||
)
|
||||
return job
|
||||
|
|
|
@ -6,7 +6,14 @@ import typing
|
|||
from pydantic import BaseModel
|
||||
from selfprivacy_api.jobs import Job
|
||||
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice, BlockDevices
|
||||
|
||||
from selfprivacy_api.services.generic_size_counter import get_storage_usage
|
||||
from selfprivacy_api.services.owned_path import OwnedPath
|
||||
from selfprivacy_api import utils
|
||||
from selfprivacy_api.utils.waitloop import wait_until_true
|
||||
|
||||
DEFAULT_START_STOP_TIMEOUT = 10 * 60
|
||||
|
||||
|
||||
class ServiceStatus(Enum):
|
||||
|
@ -38,71 +45,125 @@ class Service(ABC):
|
|||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_id() -> str:
|
||||
"""
|
||||
The unique id of the service.
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_display_name() -> str:
|
||||
"""
|
||||
The name of the service that is shown to the user.
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_description() -> str:
|
||||
"""
|
||||
The description of the service that is shown to the user.
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_svg_icon() -> str:
|
||||
"""
|
||||
The monochrome svg icon of the service.
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_url() -> typing.Optional[str]:
|
||||
"""
|
||||
The url of the service if it is accessible from the internet browser.
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def get_user(cls) -> typing.Optional[str]:
|
||||
"""
|
||||
The user that owns the service's files.
|
||||
Defaults to the service's id.
|
||||
"""
|
||||
return cls.get_id()
|
||||
|
||||
@classmethod
|
||||
def get_group(cls) -> typing.Optional[str]:
|
||||
"""
|
||||
The group that owns the service's files.
|
||||
Defaults to the service's user.
|
||||
"""
|
||||
return cls.get_user()
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def is_movable() -> bool:
|
||||
"""`True` if the service can be moved to the non-system volume."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def is_required() -> bool:
|
||||
"""`True` if the service is required for the server to function."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def can_be_backed_up() -> bool:
|
||||
"""`True` if the service can be backed up."""
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_backup_description() -> str:
|
||||
"""
|
||||
The text shown to the user that exlplains what data will be
|
||||
backed up.
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def is_enabled() -> bool:
|
||||
"""`True` if the service is enabled."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_status() -> ServiceStatus:
|
||||
"""The status of the service, reported by systemd."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def enable():
|
||||
"""Enable the service. Usually this means enabling systemd unit."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def disable():
|
||||
"""Disable the service. Usually this means disabling systemd unit."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def stop():
|
||||
"""Stop the service. Usually this means stopping systemd unit."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def start():
|
||||
"""Start the service. Usually this means starting systemd unit."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def restart():
|
||||
"""Restart the service. Usually this means restarting systemd unit."""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
|
@ -120,21 +181,120 @@ class Service(ABC):
|
|||
def get_logs():
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_storage_usage() -> int:
|
||||
pass
|
||||
@classmethod
|
||||
def get_storage_usage(cls) -> int:
|
||||
"""
|
||||
Calculate the real storage usage of folders occupied by service
|
||||
Calculate using pathlib.
|
||||
Do not follow symlinks.
|
||||
"""
|
||||
storage_used = 0
|
||||
for folder in cls.get_folders():
|
||||
storage_used += get_storage_usage(folder)
|
||||
return storage_used
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def get_drive(cls) -> str:
|
||||
"""
|
||||
Get the name of the drive/volume where the service is located.
|
||||
Example values are `sda1`, `vda`, `sdb`.
|
||||
"""
|
||||
root_device: str = BlockDevices().get_root_block_device().name
|
||||
if not cls.is_movable():
|
||||
return root_device
|
||||
with utils.ReadUserData() as userdata:
|
||||
if userdata.get("useBinds", False):
|
||||
return userdata.get(cls.get_id(), {}).get(
|
||||
"location",
|
||||
root_device,
|
||||
)
|
||||
else:
|
||||
return root_device
|
||||
|
||||
@classmethod
|
||||
def get_folders(cls) -> typing.List[str]:
|
||||
"""
|
||||
get a plain list of occupied directories
|
||||
Default extracts info from overriden get_owned_folders()
|
||||
"""
|
||||
if cls.get_owned_folders == Service.get_owned_folders:
|
||||
raise NotImplementedError(
|
||||
"you need to implement at least one of get_folders() or get_owned_folders()"
|
||||
)
|
||||
return [owned_folder.path for owned_folder in cls.get_owned_folders()]
|
||||
|
||||
@classmethod
|
||||
def get_owned_folders(cls) -> typing.List[OwnedPath]:
|
||||
"""
|
||||
Get a list of occupied directories with ownership info
|
||||
Default extracts info from overriden get_folders()
|
||||
"""
|
||||
if cls.get_folders == Service.get_folders:
|
||||
raise NotImplementedError(
|
||||
"you need to implement at least one of get_folders() or get_owned_folders()"
|
||||
)
|
||||
return [cls.owned_path(path) for path in cls.get_folders()]
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_location() -> str:
|
||||
pass
|
||||
def get_foldername(path: str) -> str:
|
||||
return path.split("/")[-1]
|
||||
|
||||
@abstractmethod
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def owned_path(cls, path: str):
|
||||
"""A default guess on folder ownership"""
|
||||
return OwnedPath(
|
||||
path=path,
|
||||
owner=cls.get_user(),
|
||||
group=cls.get_group(),
|
||||
)
|
||||
|
||||
def pre_backup(self):
|
||||
pass
|
||||
|
||||
def post_restore(self):
|
||||
pass
|
||||
|
||||
|
||||
class StoppedService:
|
||||
"""
|
||||
A context manager that stops the service if needed and reactivates it
|
||||
after you are done if it was active
|
||||
|
||||
Example:
|
||||
```
|
||||
assert service.get_status() == ServiceStatus.ACTIVE
|
||||
with StoppedService(service) [as stopped_service]:
|
||||
assert service.get_status() == ServiceStatus.INACTIVE
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, service: Service):
|
||||
self.service = service
|
||||
self.original_status = service.get_status()
|
||||
|
||||
def __enter__(self) -> Service:
|
||||
self.original_status = self.service.get_status()
|
||||
if self.original_status != ServiceStatus.INACTIVE:
|
||||
self.service.stop()
|
||||
wait_until_true(
|
||||
lambda: self.service.get_status() == ServiceStatus.INACTIVE,
|
||||
timeout_sec=DEFAULT_START_STOP_TIMEOUT,
|
||||
)
|
||||
return self.service
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if self.original_status in [ServiceStatus.ACTIVATING, ServiceStatus.ACTIVE]:
|
||||
self.service.start()
|
||||
wait_until_true(
|
||||
lambda: self.service.get_status() == ServiceStatus.ACTIVE,
|
||||
timeout_sec=DEFAULT_START_STOP_TIMEOUT,
|
||||
)
|
||||
|
|
195
selfprivacy_api/services/test_service/__init__.py
Normal file
195
selfprivacy_api/services/test_service/__init__.py
Normal file
|
@ -0,0 +1,195 @@
|
|||
"""Class representing Bitwarden service"""
|
||||
import base64
|
||||
import typing
|
||||
import subprocess
|
||||
|
||||
from typing import List
|
||||
from os import path
|
||||
|
||||
# from enum import Enum
|
||||
|
||||
from selfprivacy_api.jobs import Job
|
||||
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
|
||||
from selfprivacy_api.utils.block_devices import BlockDevice
|
||||
import selfprivacy_api.utils.network as network_utils
|
||||
|
||||
from selfprivacy_api.services.test_service.icon import BITWARDEN_ICON
|
||||
|
||||
DEFAULT_DELAY = 0
|
||||
|
||||
|
||||
class DummyService(Service):
|
||||
"""A test service"""
|
||||
|
||||
folders: List[str] = []
|
||||
startstop_delay = 0
|
||||
backuppable = True
|
||||
|
||||
def __init_subclass__(cls, folders: List[str]):
|
||||
cls.folders = folders
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
status_file = self.status_file()
|
||||
with open(status_file, "w") as file:
|
||||
file.write(ServiceStatus.ACTIVE.value)
|
||||
|
||||
@staticmethod
|
||||
def get_id() -> str:
|
||||
"""Return service id."""
|
||||
return "testservice"
|
||||
|
||||
@staticmethod
|
||||
def get_display_name() -> str:
|
||||
"""Return service display name."""
|
||||
return "Test Service"
|
||||
|
||||
@staticmethod
|
||||
def get_description() -> str:
|
||||
"""Return service description."""
|
||||
return "A small service used for test purposes. Does nothing."
|
||||
|
||||
@staticmethod
|
||||
def get_svg_icon() -> str:
|
||||
"""Read SVG icon from file and return it as base64 encoded string."""
|
||||
# return ""
|
||||
return base64.b64encode(BITWARDEN_ICON.encode("utf-8")).decode("utf-8")
|
||||
|
||||
@staticmethod
|
||||
def get_url() -> typing.Optional[str]:
|
||||
"""Return service url."""
|
||||
domain = "test.com"
|
||||
return f"https://password.{domain}"
|
||||
|
||||
@staticmethod
|
||||
def is_movable() -> bool:
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def is_required() -> bool:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_backup_description() -> str:
|
||||
return "How did we get here?"
|
||||
|
||||
@staticmethod
|
||||
def is_enabled() -> bool:
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def status_file(cls) -> str:
|
||||
dir = cls.folders[0]
|
||||
# we do not REALLY want to store our state in our declared folders
|
||||
return path.join(dir, "..", "service_status")
|
||||
|
||||
@classmethod
|
||||
def set_status(cls, status: ServiceStatus):
|
||||
with open(cls.status_file(), "w") as file:
|
||||
status_string = file.write(status.value)
|
||||
|
||||
@classmethod
|
||||
def get_status(cls) -> ServiceStatus:
|
||||
with open(cls.status_file(), "r") as file:
|
||||
status_string = file.read().strip()
|
||||
return ServiceStatus[status_string]
|
||||
|
||||
@classmethod
|
||||
def change_status_with_async_delay(
|
||||
cls, new_status: ServiceStatus, delay_sec: float
|
||||
):
|
||||
"""simulating a delay on systemd side"""
|
||||
status_file = cls.status_file()
|
||||
|
||||
command = [
|
||||
"bash",
|
||||
"-c",
|
||||
f" sleep {delay_sec} && echo {new_status.value} > {status_file}",
|
||||
]
|
||||
handle = subprocess.Popen(command)
|
||||
if delay_sec == 0:
|
||||
handle.communicate()
|
||||
|
||||
@classmethod
|
||||
def set_backuppable(cls, new_value: bool) -> None:
|
||||
"""For tests: because can_be_backed_up is static,
|
||||
we can only set it up dynamically for tests via a classmethod"""
|
||||
cls.backuppable = new_value
|
||||
|
||||
@classmethod
|
||||
def can_be_backed_up(cls) -> bool:
|
||||
"""`True` if the service can be backed up."""
|
||||
return cls.backuppable
|
||||
|
||||
@classmethod
|
||||
def enable(cls):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def disable(cls, delay):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def set_delay(cls, new_delay):
|
||||
cls.startstop_delay = new_delay
|
||||
|
||||
@classmethod
|
||||
def stop(cls):
|
||||
cls.set_status(ServiceStatus.DEACTIVATING)
|
||||
cls.change_status_with_async_delay(ServiceStatus.INACTIVE, cls.startstop_delay)
|
||||
|
||||
@classmethod
|
||||
def start(cls):
|
||||
cls.set_status(ServiceStatus.ACTIVATING)
|
||||
cls.change_status_with_async_delay(ServiceStatus.ACTIVE, cls.startstop_delay)
|
||||
|
||||
@classmethod
|
||||
def restart(cls):
|
||||
cls.set_status(ServiceStatus.RELOADING) # is a correct one?
|
||||
cls.change_status_with_async_delay(ServiceStatus.ACTIVE, cls.startstop_delay)
|
||||
|
||||
@staticmethod
|
||||
def get_configuration():
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def set_configuration(config_items):
|
||||
return super().set_configuration(config_items)
|
||||
|
||||
@staticmethod
|
||||
def get_logs():
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_usage() -> int:
|
||||
storage_usage = 0
|
||||
return storage_usage
|
||||
|
||||
@staticmethod
|
||||
def get_drive() -> str:
|
||||
return "sda1"
|
||||
|
||||
@classmethod
|
||||
def get_folders(cls) -> List[str]:
|
||||
return cls.folders
|
||||
|
||||
@staticmethod
|
||||
def get_dns_records() -> typing.List[ServiceDnsRecord]:
|
||||
"""Return list of DNS records for Bitwarden service."""
|
||||
return [
|
||||
ServiceDnsRecord(
|
||||
type="A",
|
||||
name="password",
|
||||
content=network_utils.get_ip4(),
|
||||
ttl=3600,
|
||||
),
|
||||
ServiceDnsRecord(
|
||||
type="AAAA",
|
||||
name="password",
|
||||
content=network_utils.get_ip6(),
|
||||
ttl=3600,
|
||||
),
|
||||
]
|
||||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
pass
|
3
selfprivacy_api/services/test_service/bitwarden.svg
Normal file
3
selfprivacy_api/services/test_service/bitwarden.svg
Normal file
|
@ -0,0 +1,3 @@
|
|||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M5.125 2C4.2962 2 3.50134 2.32924 2.91529 2.91529C2.32924 3.50134 2 4.2962 2 5.125L2 18.875C2 19.7038 2.32924 20.4987 2.91529 21.0847C3.50134 21.6708 4.2962 22 5.125 22H18.875C19.7038 22 20.4987 21.6708 21.0847 21.0847C21.6708 20.4987 22 19.7038 22 18.875V5.125C22 4.2962 21.6708 3.50134 21.0847 2.91529C20.4987 2.32924 19.7038 2 18.875 2H5.125ZM6.25833 4.43333H17.7583C17.9317 4.43333 18.0817 4.49667 18.2083 4.62333C18.2688 4.68133 18.3168 4.7511 18.3494 4.82835C18.3819 4.9056 18.3983 4.98869 18.3975 5.0725V12.7392C18.3975 13.3117 18.2858 13.8783 18.0633 14.4408C17.8558 14.9751 17.5769 15.4789 17.2342 15.9383C16.8824 16.3987 16.4882 16.825 16.0567 17.2117C15.6008 17.6242 15.18 17.9667 14.7942 18.24C14.4075 18.5125 14.005 18.77 13.5858 19.0133C13.1667 19.2558 12.8692 19.4208 12.6925 19.5075C12.5158 19.5942 12.375 19.6608 12.2675 19.7075C12.1872 19.7472 12.0987 19.7674 12.0092 19.7667C11.919 19.7674 11.8299 19.7468 11.7492 19.7067C11.6062 19.6429 11.4645 19.5762 11.3242 19.5067C11.0218 19.3511 10.7242 19.1866 10.4317 19.0133C10.0175 18.7738 9.6143 18.5158 9.22333 18.24C8.7825 17.9225 8.36093 17.5791 7.96083 17.2117C7.52907 16.825 7.13456 16.3987 6.7825 15.9383C6.44006 15.4788 6.16141 14.9751 5.95417 14.4408C5.73555 13.9 5.62213 13.3225 5.62 12.7392V5.0725C5.62 4.89917 5.68333 4.75 5.80917 4.6225C5.86726 4.56188 5.93717 4.51382 6.01457 4.48129C6.09196 4.44875 6.17521 4.43243 6.25917 4.43333H6.25833ZM12.0083 6.35V17.7C12.8 17.2817 13.5092 16.825 14.135 16.3333C15.6992 15.1083 16.4808 13.9108 16.4808 12.7392V6.35H12.0083Z" fill="black"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.6 KiB |
5
selfprivacy_api/services/test_service/icon.py
Normal file
5
selfprivacy_api/services/test_service/icon.py
Normal file
|
@ -0,0 +1,5 @@
|
|||
BITWARDEN_ICON = """
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M5.125 2C4.2962 2 3.50134 2.32924 2.91529 2.91529C2.32924 3.50134 2 4.2962 2 5.125L2 18.875C2 19.7038 2.32924 20.4987 2.91529 21.0847C3.50134 21.6708 4.2962 22 5.125 22H18.875C19.7038 22 20.4987 21.6708 21.0847 21.0847C21.6708 20.4987 22 19.7038 22 18.875V5.125C22 4.2962 21.6708 3.50134 21.0847 2.91529C20.4987 2.32924 19.7038 2 18.875 2H5.125ZM6.25833 4.43333H17.7583C17.9317 4.43333 18.0817 4.49667 18.2083 4.62333C18.2688 4.68133 18.3168 4.7511 18.3494 4.82835C18.3819 4.9056 18.3983 4.98869 18.3975 5.0725V12.7392C18.3975 13.3117 18.2858 13.8783 18.0633 14.4408C17.8558 14.9751 17.5769 15.4789 17.2342 15.9383C16.8824 16.3987 16.4882 16.825 16.0567 17.2117C15.6008 17.6242 15.18 17.9667 14.7942 18.24C14.4075 18.5125 14.005 18.77 13.5858 19.0133C13.1667 19.2558 12.8692 19.4208 12.6925 19.5075C12.5158 19.5942 12.375 19.6608 12.2675 19.7075C12.1872 19.7472 12.0987 19.7674 12.0092 19.7667C11.919 19.7674 11.8299 19.7468 11.7492 19.7067C11.6062 19.6429 11.4645 19.5762 11.3242 19.5067C11.0218 19.3511 10.7242 19.1866 10.4317 19.0133C10.0175 18.7738 9.6143 18.5158 9.22333 18.24C8.7825 17.9225 8.36093 17.5791 7.96083 17.2117C7.52907 16.825 7.13456 16.3987 6.7825 15.9383C6.44006 15.4788 6.16141 14.9751 5.95417 14.4408C5.73555 13.9 5.62213 13.3225 5.62 12.7392V5.0725C5.62 4.89917 5.68333 4.75 5.80917 4.6225C5.86726 4.56188 5.93717 4.51382 6.01457 4.48129C6.09196 4.44875 6.17521 4.43243 6.25917 4.43333H6.25833ZM12.0083 6.35V17.7C12.8 17.2817 13.5092 16.825 14.135 16.3333C15.6992 15.1083 16.4808 13.9108 16.4808 12.7392V6.35H12.0083Z" fill="black"/>
|
||||
</svg>
|
||||
"""
|
|
@ -1,5 +1,5 @@
|
|||
from selfprivacy_api.utils.huey import huey
|
||||
from selfprivacy_api.jobs.test import test_job
|
||||
from selfprivacy_api.restic_controller.tasks import *
|
||||
from selfprivacy_api.backup.tasks import *
|
||||
from selfprivacy_api.services.generic_service_mover import move_service
|
||||
from selfprivacy_api.jobs.nix_collect_garbage import calculate_and_clear_dead_packages
|
||||
|
|
|
@ -71,6 +71,12 @@ class BlockDevice:
|
|||
def __hash__(self):
|
||||
return hash(self.name)
|
||||
|
||||
def is_root(self) -> bool:
|
||||
"""
|
||||
Return True if the block device is the root device.
|
||||
"""
|
||||
return "/" in self.mountpoints
|
||||
|
||||
def stats(self) -> typing.Dict[str, typing.Any]:
|
||||
"""
|
||||
Update current data and return a dictionary of stats.
|
||||
|
@ -175,6 +181,9 @@ class BlockDevices(metaclass=SingletonMetaclass):
|
|||
# Ignore devices with type "rom"
|
||||
if device["type"] == "rom":
|
||||
continue
|
||||
# Ignore iso9660 devices
|
||||
if device["fstype"] == "iso9660":
|
||||
continue
|
||||
if device["fstype"] is None:
|
||||
if "children" in device:
|
||||
for child in device["children"]:
|
||||
|
@ -218,3 +227,12 @@ class BlockDevices(metaclass=SingletonMetaclass):
|
|||
if mountpoint in block_device.mountpoints:
|
||||
block_devices.append(block_device)
|
||||
return block_devices
|
||||
|
||||
def get_root_block_device(self) -> BlockDevice:
|
||||
"""
|
||||
Return the root block device.
|
||||
"""
|
||||
for block_device in self.block_devices:
|
||||
if "/" in block_device.mountpoints:
|
||||
return block_device
|
||||
raise RuntimeError("No root block device found")
|
||||
|
|
|
@ -11,4 +11,5 @@ test_mode = os.environ.get("TEST_MODE")
|
|||
huey = SqliteHuey(
|
||||
HUEY_DATABASE,
|
||||
immediate=test_mode == "true",
|
||||
utc=True,
|
||||
)
|
||||
|
|
30
selfprivacy_api/utils/redis_model_storage.py
Normal file
30
selfprivacy_api/utils/redis_model_storage.py
Normal file
|
@ -0,0 +1,30 @@
|
|||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def store_model_as_hash(redis, redis_key, model):
|
||||
for key, value in model.dict().items():
|
||||
if isinstance(value, datetime):
|
||||
value = value.isoformat()
|
||||
redis.hset(redis_key, key, str(value))
|
||||
|
||||
|
||||
def hash_as_model(redis, redis_key: str, model_class):
|
||||
token_dict = _model_dict_from_hash(redis, redis_key)
|
||||
if token_dict is not None:
|
||||
return model_class(**token_dict)
|
||||
return None
|
||||
|
||||
|
||||
def _prepare_model_dict(d: dict):
|
||||
for key in d.keys():
|
||||
if d[key] == "None":
|
||||
d[key] = None
|
||||
|
||||
|
||||
def _model_dict_from_hash(redis, redis_key: str) -> Optional[dict]:
|
||||
if redis.exists(redis_key):
|
||||
token_dict = redis.hgetall(redis_key)
|
||||
_prepare_model_dict(token_dict)
|
||||
return token_dict
|
||||
return None
|
|
@ -1,9 +1,9 @@
|
|||
"""
|
||||
Redis pool module for selfprivacy_api
|
||||
"""
|
||||
from os import environ
|
||||
import redis
|
||||
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
|
||||
from os import environ
|
||||
|
||||
REDIS_SOCKET = "/run/redis-sp-api/redis.sock"
|
||||
|
||||
|
@ -14,7 +14,7 @@ class RedisPool(metaclass=SingletonMetaclass):
|
|||
"""
|
||||
|
||||
def __init__(self):
|
||||
if "USE_REDIS_PORT" in environ.keys():
|
||||
if "USE_REDIS_PORT" in environ:
|
||||
self._pool = redis.ConnectionPool(
|
||||
host="127.0.0.1",
|
||||
port=int(environ["USE_REDIS_PORT"]),
|
||||
|
|
20
selfprivacy_api/utils/waitloop.py
Normal file
20
selfprivacy_api/utils/waitloop.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
from time import sleep
|
||||
from typing import Callable
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def wait_until_true(
|
||||
readiness_checker: Callable[[], bool],
|
||||
*,
|
||||
interval: float = 0.1,
|
||||
timeout_sec: Optional[float] = None
|
||||
):
|
||||
elapsed = 0.0
|
||||
if timeout_sec is None:
|
||||
timeout_sec = 10e16
|
||||
|
||||
while (not readiness_checker()) and elapsed < timeout_sec:
|
||||
sleep(interval)
|
||||
elapsed += interval
|
||||
if elapsed > timeout_sec:
|
||||
raise TimeoutError()
|
2
setup.py
2
setup.py
|
@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
|||
|
||||
setup(
|
||||
name="selfprivacy_api",
|
||||
version="2.1.2",
|
||||
version="2.2.1",
|
||||
packages=find_packages(),
|
||||
scripts=[
|
||||
"selfprivacy_api/app.py",
|
||||
|
|
|
@ -13,6 +13,9 @@ let
|
|||
mnemonic
|
||||
coverage
|
||||
pylint
|
||||
rope
|
||||
mypy
|
||||
pylsp-mypy
|
||||
pydantic
|
||||
typing-extensions
|
||||
psutil
|
||||
|
@ -21,6 +24,8 @@ let
|
|||
uvicorn
|
||||
redis
|
||||
strawberry-graphql
|
||||
flake8-bugbear
|
||||
flake8
|
||||
]);
|
||||
in
|
||||
pkgs.mkShell {
|
||||
|
@ -29,6 +34,7 @@ pkgs.mkShell {
|
|||
pkgs.black
|
||||
pkgs.redis
|
||||
pkgs.restic
|
||||
pkgs.rclone
|
||||
];
|
||||
shellHook = ''
|
||||
PYTHONPATH=${sp-python}/${sp-python.sitePackages}
|
||||
|
@ -36,7 +42,8 @@ pkgs.mkShell {
|
|||
# for example. printenv <Name> will not fetch the value of an attribute.
|
||||
export USE_REDIS_PORT=6379
|
||||
pkill redis-server
|
||||
redis-server --bind 127.0.0.1 --port $USE_REDIS_PORT >/dev/null &
|
||||
sleep 2
|
||||
setsid redis-server --bind 127.0.0.1 --port $USE_REDIS_PORT >/dev/null 2>/dev/null &
|
||||
# maybe set more env-vars
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,96 +0,0 @@
|
|||
diff --git a/pyproject.toml b/pyproject.toml
|
||||
index 0cbf2ef..7736e92 100644
|
||||
--- a/pyproject.toml
|
||||
+++ b/pyproject.toml
|
||||
@@ -51,7 +51,6 @@ python-multipart = "^0.0.5"
|
||||
sanic = {version = ">=20.12.2,<22.0.0", optional = true}
|
||||
aiohttp = {version = "^3.7.4.post0", optional = true}
|
||||
fastapi = {version = ">=0.65.2", optional = true}
|
||||
-"backports.cached-property" = "^1.0.1"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
pytest = "^7.1"
|
||||
diff --git a/strawberry/directive.py b/strawberry/directive.py
|
||||
index 491e390..26ba345 100644
|
||||
--- a/strawberry/directive.py
|
||||
+++ b/strawberry/directive.py
|
||||
@@ -1,10 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
+from functools import cached_property
|
||||
import inspect
|
||||
from typing import Any, Callable, List, Optional, TypeVar
|
||||
|
||||
-from backports.cached_property import cached_property
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from graphql import DirectiveLocation
|
||||
diff --git a/strawberry/extensions/tracing/datadog.py b/strawberry/extensions/tracing/datadog.py
|
||||
index 01fba20..7c06950 100644
|
||||
--- a/strawberry/extensions/tracing/datadog.py
|
||||
+++ b/strawberry/extensions/tracing/datadog.py
|
||||
@@ -1,8 +1,8 @@
|
||||
import hashlib
|
||||
+from functools import cached_property
|
||||
from inspect import isawaitable
|
||||
from typing import Optional
|
||||
|
||||
-from backports.cached_property import cached_property
|
||||
from ddtrace import tracer
|
||||
|
||||
from strawberry.extensions import Extension
|
||||
diff --git a/strawberry/field.py b/strawberry/field.py
|
||||
index 80ed12a..f1bf2e9 100644
|
||||
--- a/strawberry/field.py
|
||||
+++ b/strawberry/field.py
|
||||
@@ -1,5 +1,6 @@
|
||||
import builtins
|
||||
import dataclasses
|
||||
+from functools import cached_property
|
||||
import inspect
|
||||
import sys
|
||||
from typing import (
|
||||
@@ -18,7 +19,6 @@ from typing import (
|
||||
overload,
|
||||
)
|
||||
|
||||
-from backports.cached_property import cached_property
|
||||
from typing_extensions import Literal
|
||||
|
||||
from strawberry.annotation import StrawberryAnnotation
|
||||
diff --git a/strawberry/types/fields/resolver.py b/strawberry/types/fields/resolver.py
|
||||
index c5b3edd..f4112ce 100644
|
||||
--- a/strawberry/types/fields/resolver.py
|
||||
+++ b/strawberry/types/fields/resolver.py
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations as _
|
||||
|
||||
import builtins
|
||||
+from functools import cached_property
|
||||
import inspect
|
||||
import sys
|
||||
import warnings
|
||||
@@ -22,7 +23,6 @@ from typing import ( # type: ignore[attr-defined]
|
||||
_eval_type,
|
||||
)
|
||||
|
||||
-from backports.cached_property import cached_property
|
||||
from typing_extensions import Annotated, Protocol, get_args, get_origin
|
||||
|
||||
from strawberry.annotation import StrawberryAnnotation
|
||||
diff --git a/strawberry/types/info.py b/strawberry/types/info.py
|
||||
index a172c04..475a3ee 100644
|
||||
--- a/strawberry/types/info.py
|
||||
+++ b/strawberry/types/info.py
|
||||
@@ -1,9 +1,8 @@
|
||||
import dataclasses
|
||||
+from functools import cached_property
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
|
||||
|
||||
-from backports.cached_property import cached_property
|
||||
-
|
||||
from graphql import GraphQLResolveInfo, OperationDefinitionNode
|
||||
from graphql.language import FieldNode
|
||||
from graphql.pyutils.path import Path
|
|
@ -24,5 +24,9 @@ def generate_users_query(query_array):
|
|||
return "query TestUsers {\n users {" + "\n".join(query_array) + "}\n}"
|
||||
|
||||
|
||||
def generate_backup_query(query_array):
|
||||
return "query TestBackup {\n backup {" + "\n".join(query_array) + "}\n}"
|
||||
|
||||
|
||||
def mnemonic_to_hex(mnemonic):
|
||||
return Mnemonic(language="english").to_entropy(mnemonic).hex()
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
# pylint: disable=unused-argument
|
||||
import os
|
||||
import pytest
|
||||
from os import path
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
|
||||
|
@ -10,6 +12,10 @@ def pytest_generate_tests(metafunc):
|
|||
os.environ["TEST_MODE"] = "true"
|
||||
|
||||
|
||||
def global_data_dir():
|
||||
return path.join(path.dirname(__file__), "data")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tokens_file(mocker, shared_datadir):
|
||||
"""Mock tokens file."""
|
||||
|
@ -26,6 +32,20 @@ def jobs_file(mocker, shared_datadir):
|
|||
return mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def generic_userdata(mocker, tmpdir):
|
||||
filename = "turned_on.json"
|
||||
source_path = path.join(global_data_dir(), filename)
|
||||
userdata_path = path.join(tmpdir, filename)
|
||||
|
||||
with open(userdata_path, "w") as file:
|
||||
with open(source_path, "r") as source:
|
||||
file.write(source.read())
|
||||
|
||||
mock = mocker.patch("selfprivacy_api.utils.USERDATA_FILE", new=userdata_path)
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def huey_database(mocker, shared_datadir):
|
||||
"""Mock huey database."""
|
||||
|
|
60
tests/data/turned_on.json
Normal file
60
tests/data/turned_on.json
Normal file
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"api": {
|
||||
"token": "TEST_TOKEN",
|
||||
"enableSwagger": false
|
||||
},
|
||||
"bitwarden": {
|
||||
"enable": true
|
||||
},
|
||||
"databasePassword": "PASSWORD",
|
||||
"domain": "test.tld",
|
||||
"hashedMasterPassword": "HASHED_PASSWORD",
|
||||
"hostname": "test-instance",
|
||||
"nextcloud": {
|
||||
"adminPassword": "ADMIN",
|
||||
"databasePassword": "ADMIN",
|
||||
"enable": true
|
||||
},
|
||||
"resticPassword": "PASS",
|
||||
"ssh": {
|
||||
"enable": true,
|
||||
"passwordAuthentication": true,
|
||||
"rootKeys": [
|
||||
"ssh-ed25519 KEY test@pc"
|
||||
]
|
||||
},
|
||||
"username": "tester",
|
||||
"gitea": {
|
||||
"enable": true
|
||||
},
|
||||
"ocserv": {
|
||||
"enable": true
|
||||
},
|
||||
"pleroma": {
|
||||
"enable": true
|
||||
},
|
||||
"jitsi": {
|
||||
"enable": true
|
||||
},
|
||||
"autoUpgrade": {
|
||||
"enable": true,
|
||||
"allowReboot": true
|
||||
},
|
||||
"timezone": "Europe/Moscow",
|
||||
"sshKeys": [
|
||||
"ssh-rsa KEY test@pc"
|
||||
],
|
||||
"dns": {
|
||||
"provider": "CLOUDFLARE",
|
||||
"apiKey": "TOKEN"
|
||||
},
|
||||
"server": {
|
||||
"provider": "HETZNER"
|
||||
},
|
||||
"backup": {
|
||||
"provider": "BACKBLAZE",
|
||||
"accountId": "ID",
|
||||
"accountKey": "KEY",
|
||||
"bucket": "selfprivacy"
|
||||
}
|
||||
}
|
|
@ -488,3 +488,21 @@ def test_get_block_devices_by_mountpoint(lsblk_full_mock, authorized_client):
|
|||
def test_get_block_devices_by_mountpoint_no_match(lsblk_full_mock, authorized_client):
|
||||
block_devices = BlockDevices().get_block_devices_by_mountpoint("/foo")
|
||||
assert len(block_devices) == 0
|
||||
|
||||
|
||||
def test_get_root_block_device(lsblk_full_mock, authorized_client):
|
||||
block_device = BlockDevices().get_root_block_device()
|
||||
assert block_device is not None
|
||||
assert block_device.name == "sda1"
|
||||
assert block_device.path == "/dev/sda1"
|
||||
assert block_device.fsavail == "4605702144"
|
||||
assert block_device.fssize == "19814920192"
|
||||
assert block_device.fstype == "ext4"
|
||||
assert block_device.fsused == "14353719296"
|
||||
assert block_device.mountpoints == ["/nix/store", "/"]
|
||||
assert block_device.label is None
|
||||
assert block_device.uuid == "ec80c004-baec-4a2c-851d-0e1807135511"
|
||||
assert block_device.size == "20210236928"
|
||||
assert block_device.model is None
|
||||
assert block_device.serial is None
|
||||
assert block_device.type == "part"
|
||||
|
|
372
tests/test_graphql/test_api_backup.py
Normal file
372
tests/test_graphql/test_api_backup.py
Normal file
|
@ -0,0 +1,372 @@
|
|||
from os import path
|
||||
from tests.test_graphql.test_backup import dummy_service, backups, raw_dummy_service
|
||||
from tests.common import generate_backup_query
|
||||
|
||||
|
||||
from selfprivacy_api.graphql.common_types.service import service_to_graphql_service
|
||||
from selfprivacy_api.jobs import Jobs, JobStatus
|
||||
|
||||
API_RELOAD_SNAPSHOTS = """
|
||||
mutation TestSnapshotsReload {
|
||||
backup {
|
||||
forceSnapshotsReload {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
API_SET_AUTOBACKUP_PERIOD_MUTATION = """
|
||||
mutation TestAutobackupPeriod($period: Int) {
|
||||
backup {
|
||||
setAutobackupPeriod(period: $period) {
|
||||
success
|
||||
message
|
||||
code
|
||||
configuration {
|
||||
provider
|
||||
encryptionKey
|
||||
isInitialized
|
||||
autobackupPeriod
|
||||
locationName
|
||||
locationId
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
API_REMOVE_REPOSITORY_MUTATION = """
|
||||
mutation TestRemoveRepo {
|
||||
backup {
|
||||
removeRepository {
|
||||
success
|
||||
message
|
||||
code
|
||||
configuration {
|
||||
provider
|
||||
encryptionKey
|
||||
isInitialized
|
||||
autobackupPeriod
|
||||
locationName
|
||||
locationId
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
API_INIT_MUTATION = """
|
||||
mutation TestInitRepo($input: InitializeRepositoryInput!) {
|
||||
backup {
|
||||
initializeRepository(repository: $input) {
|
||||
success
|
||||
message
|
||||
code
|
||||
configuration {
|
||||
provider
|
||||
encryptionKey
|
||||
isInitialized
|
||||
autobackupPeriod
|
||||
locationName
|
||||
locationId
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
API_RESTORE_MUTATION = """
|
||||
mutation TestRestoreService($snapshot_id: String!) {
|
||||
backup {
|
||||
restoreBackup(snapshotId: $snapshot_id) {
|
||||
success
|
||||
message
|
||||
code
|
||||
job {
|
||||
uid
|
||||
status
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
API_SNAPSHOTS_QUERY = """
|
||||
allSnapshots {
|
||||
id
|
||||
service {
|
||||
id
|
||||
}
|
||||
createdAt
|
||||
}
|
||||
"""
|
||||
|
||||
API_BACK_UP_MUTATION = """
|
||||
mutation TestBackupService($service_id: String!) {
|
||||
backup {
|
||||
startBackup(serviceId: $service_id) {
|
||||
success
|
||||
message
|
||||
code
|
||||
job {
|
||||
uid
|
||||
status
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def api_restore(authorized_client, snapshot_id):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
"query": API_RESTORE_MUTATION,
|
||||
"variables": {"snapshot_id": snapshot_id},
|
||||
},
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def api_backup(authorized_client, service):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
"query": API_BACK_UP_MUTATION,
|
||||
"variables": {"service_id": service.get_id()},
|
||||
},
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def api_set_period(authorized_client, period):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
"query": API_SET_AUTOBACKUP_PERIOD_MUTATION,
|
||||
"variables": {"period": period},
|
||||
},
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def api_remove(authorized_client):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
"query": API_REMOVE_REPOSITORY_MUTATION,
|
||||
"variables": {},
|
||||
},
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def api_reload_snapshots(authorized_client):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
"query": API_RELOAD_SNAPSHOTS,
|
||||
"variables": {},
|
||||
},
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def api_init_without_key(
|
||||
authorized_client, kind, login, password, location_name, location_id
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
"query": API_INIT_MUTATION,
|
||||
"variables": {
|
||||
"input": {
|
||||
"provider": kind,
|
||||
"locationId": location_id,
|
||||
"locationName": location_name,
|
||||
"login": login,
|
||||
"password": password,
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def assert_ok(data):
|
||||
assert data["code"] == 200
|
||||
assert data["success"] is True
|
||||
|
||||
|
||||
def get_data(response):
|
||||
assert response.status_code == 200
|
||||
response = response.json()
|
||||
if (
|
||||
"errors" in response.keys()
|
||||
): # convenience for debugging, this will display error
|
||||
assert response["errors"] == []
|
||||
assert response["data"] is not None
|
||||
data = response["data"]
|
||||
return data
|
||||
|
||||
|
||||
def api_snapshots(authorized_client):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={"query": generate_backup_query([API_SNAPSHOTS_QUERY])},
|
||||
)
|
||||
data = get_data(response)
|
||||
result = data["backup"]["allSnapshots"]
|
||||
assert result is not None
|
||||
return result
|
||||
|
||||
|
||||
def test_dummy_service_convertible_to_gql(dummy_service):
|
||||
gql_service = service_to_graphql_service(dummy_service)
|
||||
assert gql_service is not None
|
||||
|
||||
|
||||
def test_snapshots_empty(authorized_client, dummy_service):
|
||||
snaps = api_snapshots(authorized_client)
|
||||
assert snaps == []
|
||||
|
||||
|
||||
def test_start_backup(authorized_client, dummy_service):
|
||||
response = api_backup(authorized_client, dummy_service)
|
||||
data = get_data(response)["backup"]["startBackup"]
|
||||
assert data["success"] is True
|
||||
job = data["job"]
|
||||
|
||||
assert Jobs.get_job(job["uid"]).status == JobStatus.FINISHED
|
||||
snaps = api_snapshots(authorized_client)
|
||||
assert len(snaps) == 1
|
||||
snap = snaps[0]
|
||||
|
||||
assert snap["id"] is not None
|
||||
assert snap["id"] != ""
|
||||
assert snap["service"]["id"] == "testservice"
|
||||
|
||||
|
||||
def test_restore(authorized_client, dummy_service):
|
||||
api_backup(authorized_client, dummy_service)
|
||||
snap = api_snapshots(authorized_client)[0]
|
||||
assert snap["id"] is not None
|
||||
|
||||
response = api_restore(authorized_client, snap["id"])
|
||||
data = get_data(response)["backup"]["restoreBackup"]
|
||||
assert data["success"] is True
|
||||
job = data["job"]
|
||||
|
||||
assert Jobs.get_job(job["uid"]).status == JobStatus.FINISHED
|
||||
|
||||
|
||||
def test_reinit(authorized_client, dummy_service, tmpdir):
|
||||
test_repo_path = path.join(tmpdir, "not_at_all_sus")
|
||||
response = api_init_without_key(
|
||||
authorized_client, "FILE", "", "", test_repo_path, ""
|
||||
)
|
||||
data = get_data(response)["backup"]["initializeRepository"]
|
||||
assert_ok(data)
|
||||
configuration = data["configuration"]
|
||||
assert configuration["provider"] == "FILE"
|
||||
assert configuration["locationId"] == ""
|
||||
assert configuration["locationName"] == test_repo_path
|
||||
assert len(configuration["encryptionKey"]) > 1
|
||||
assert configuration["isInitialized"] is True
|
||||
|
||||
response = api_backup(authorized_client, dummy_service)
|
||||
data = get_data(response)["backup"]["startBackup"]
|
||||
assert data["success"] is True
|
||||
job = data["job"]
|
||||
|
||||
assert Jobs.get_job(job["uid"]).status == JobStatus.FINISHED
|
||||
|
||||
|
||||
def test_remove(authorized_client, generic_userdata):
|
||||
response = api_remove(authorized_client)
|
||||
data = get_data(response)["backup"]["removeRepository"]
|
||||
assert_ok(data)
|
||||
|
||||
configuration = data["configuration"]
|
||||
assert configuration["provider"] == "NONE"
|
||||
assert configuration["locationId"] == ""
|
||||
assert configuration["locationName"] == ""
|
||||
# still generated every time it is missing
|
||||
assert len(configuration["encryptionKey"]) > 1
|
||||
assert configuration["isInitialized"] is False
|
||||
|
||||
|
||||
def test_autobackup_period_nonzero(authorized_client):
|
||||
new_period = 11
|
||||
response = api_set_period(authorized_client, new_period)
|
||||
data = get_data(response)["backup"]["setAutobackupPeriod"]
|
||||
assert_ok(data)
|
||||
|
||||
configuration = data["configuration"]
|
||||
assert configuration["autobackupPeriod"] == new_period
|
||||
|
||||
|
||||
def test_autobackup_period_zero(authorized_client):
|
||||
new_period = 0
|
||||
# since it is none by default, we better first set it to something non-negative
|
||||
response = api_set_period(authorized_client, 11)
|
||||
# and now we nullify it
|
||||
response = api_set_period(authorized_client, new_period)
|
||||
data = get_data(response)["backup"]["setAutobackupPeriod"]
|
||||
assert_ok(data)
|
||||
|
||||
configuration = data["configuration"]
|
||||
assert configuration["autobackupPeriod"] == None
|
||||
|
||||
|
||||
def test_autobackup_period_none(authorized_client):
|
||||
# since it is none by default, we better first set it to something non-negative
|
||||
response = api_set_period(authorized_client, 11)
|
||||
# and now we nullify it
|
||||
response = api_set_period(authorized_client, None)
|
||||
data = get_data(response)["backup"]["setAutobackupPeriod"]
|
||||
assert_ok(data)
|
||||
|
||||
configuration = data["configuration"]
|
||||
assert configuration["autobackupPeriod"] == None
|
||||
|
||||
|
||||
def test_autobackup_period_negative(authorized_client):
|
||||
# since it is none by default, we better first set it to something non-negative
|
||||
response = api_set_period(authorized_client, 11)
|
||||
# and now we nullify it
|
||||
response = api_set_period(authorized_client, -12)
|
||||
data = get_data(response)["backup"]["setAutobackupPeriod"]
|
||||
assert_ok(data)
|
||||
|
||||
configuration = data["configuration"]
|
||||
assert configuration["autobackupPeriod"] == None
|
||||
|
||||
|
||||
# We cannot really check the effect at this level, we leave it to backend tests
|
||||
# But we still make it run in both empty and full scenarios and ask for snaps afterwards
|
||||
def test_reload_snapshots_bare_bare_bare(authorized_client, dummy_service):
|
||||
api_remove(authorized_client)
|
||||
|
||||
response = api_reload_snapshots(authorized_client)
|
||||
data = get_data(response)["backup"]["forceSnapshotsReload"]
|
||||
assert_ok(data)
|
||||
|
||||
snaps = api_snapshots(authorized_client)
|
||||
assert snaps == []
|
||||
|
||||
|
||||
def test_reload_snapshots(authorized_client, dummy_service):
|
||||
response = api_backup(authorized_client, dummy_service)
|
||||
data = get_data(response)["backup"]["startBackup"]
|
||||
|
||||
response = api_reload_snapshots(authorized_client)
|
||||
data = get_data(response)["backup"]["forceSnapshotsReload"]
|
||||
assert_ok(data)
|
||||
|
||||
snaps = api_snapshots(authorized_client)
|
||||
assert len(snaps) == 1
|
|
@ -75,12 +75,14 @@ def test_graphql_tokens_info_unauthorized(client, tokens_file):
|
|||
|
||||
DELETE_TOKEN_MUTATION = """
|
||||
mutation DeleteToken($device: String!) {
|
||||
api {
|
||||
deleteDeviceApiToken(device: $device) {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -110,9 +112,9 @@ def test_graphql_delete_token(authorized_client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["success"] is True
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["message"] is not None
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["success"] is True
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["code"] == 200
|
||||
assert read_json(tokens_file) == {
|
||||
"tokens": [
|
||||
{
|
||||
|
@ -136,13 +138,16 @@ def test_graphql_delete_self_token(authorized_client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["success"] is False
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["message"] is not None
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["code"] == 400
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["success"] is False
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["code"] == 400
|
||||
assert read_json(tokens_file) == TOKENS_FILE_CONTETS
|
||||
|
||||
|
||||
def test_graphql_delete_nonexistent_token(authorized_client, tokens_file):
|
||||
def test_graphql_delete_nonexistent_token(
|
||||
authorized_client,
|
||||
tokens_file,
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
|
@ -154,14 +159,15 @@ def test_graphql_delete_nonexistent_token(authorized_client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["success"] is False
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["message"] is not None
|
||||
assert response.json()["data"]["deleteDeviceApiToken"]["code"] == 404
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["success"] is False
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["deleteDeviceApiToken"]["code"] == 404
|
||||
assert read_json(tokens_file) == TOKENS_FILE_CONTETS
|
||||
|
||||
|
||||
REFRESH_TOKEN_MUTATION = """
|
||||
mutation RefreshToken {
|
||||
api {
|
||||
refreshDeviceApiToken {
|
||||
success
|
||||
message
|
||||
|
@ -169,6 +175,7 @@ mutation RefreshToken {
|
|||
token
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -181,19 +188,25 @@ def test_graphql_refresh_token_unauthorized(client, tokens_file):
|
|||
assert response.json()["data"] is None
|
||||
|
||||
|
||||
def test_graphql_refresh_token(authorized_client, tokens_file, token_repo):
|
||||
def test_graphql_refresh_token(
|
||||
authorized_client,
|
||||
tokens_file,
|
||||
token_repo,
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={"query": REFRESH_TOKEN_MUTATION},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["refreshDeviceApiToken"]["success"] is True
|
||||
assert response.json()["data"]["refreshDeviceApiToken"]["message"] is not None
|
||||
assert response.json()["data"]["refreshDeviceApiToken"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["refreshDeviceApiToken"]["success"] is True
|
||||
assert (
|
||||
response.json()["data"]["api"]["refreshDeviceApiToken"]["message"] is not None
|
||||
)
|
||||
assert response.json()["data"]["api"]["refreshDeviceApiToken"]["code"] == 200
|
||||
token = token_repo.get_token_by_name("test_token")
|
||||
assert token == Token(
|
||||
token=response.json()["data"]["refreshDeviceApiToken"]["token"],
|
||||
token=response.json()["data"]["api"]["refreshDeviceApiToken"]["token"],
|
||||
device_name="test_token",
|
||||
created_at=datetime.datetime(2022, 1, 14, 8, 31, 10, 789314),
|
||||
)
|
||||
|
@ -201,6 +214,7 @@ def test_graphql_refresh_token(authorized_client, tokens_file, token_repo):
|
|||
|
||||
NEW_DEVICE_KEY_MUTATION = """
|
||||
mutation NewDeviceKey {
|
||||
api {
|
||||
getNewDeviceApiKey {
|
||||
success
|
||||
message
|
||||
|
@ -208,10 +222,14 @@ mutation NewDeviceKey {
|
|||
key
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def test_graphql_get_new_device_auth_key_unauthorized(client, tokens_file):
|
||||
def test_graphql_get_new_device_auth_key_unauthorized(
|
||||
client,
|
||||
tokens_file,
|
||||
):
|
||||
response = client.post(
|
||||
"/graphql",
|
||||
json={"query": NEW_DEVICE_KEY_MUTATION},
|
||||
|
@ -220,22 +238,26 @@ def test_graphql_get_new_device_auth_key_unauthorized(client, tokens_file):
|
|||
assert response.json()["data"] is None
|
||||
|
||||
|
||||
def test_graphql_get_new_device_auth_key(authorized_client, tokens_file):
|
||||
def test_graphql_get_new_device_auth_key(
|
||||
authorized_client,
|
||||
tokens_file,
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={"query": NEW_DEVICE_KEY_MUTATION},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["getNewDeviceApiKey"]["key"].split(" ").__len__() == 12
|
||||
response.json()["data"]["api"]["getNewDeviceApiKey"]["key"].split(" ").__len__()
|
||||
== 12
|
||||
)
|
||||
token = (
|
||||
Mnemonic(language="english")
|
||||
.to_entropy(response.json()["data"]["getNewDeviceApiKey"]["key"])
|
||||
.to_entropy(response.json()["data"]["api"]["getNewDeviceApiKey"]["key"])
|
||||
.hex()
|
||||
)
|
||||
assert read_json(tokens_file)["new_device"]["token"] == token
|
||||
|
@ -243,20 +265,25 @@ def test_graphql_get_new_device_auth_key(authorized_client, tokens_file):
|
|||
|
||||
INVALIDATE_NEW_DEVICE_KEY_MUTATION = """
|
||||
mutation InvalidateNewDeviceKey {
|
||||
api {
|
||||
invalidateNewDeviceApiKey {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def test_graphql_invalidate_new_device_token_unauthorized(client, tokens_file):
|
||||
def test_graphql_invalidate_new_device_token_unauthorized(
|
||||
client,
|
||||
tokens_file,
|
||||
):
|
||||
response = client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
"query": DELETE_TOKEN_MUTATION,
|
||||
"query": INVALIDATE_NEW_DEVICE_KEY_MUTATION,
|
||||
"variables": {
|
||||
"device": "test_token",
|
||||
},
|
||||
|
@ -266,22 +293,26 @@ def test_graphql_invalidate_new_device_token_unauthorized(client, tokens_file):
|
|||
assert response.json()["data"] is None
|
||||
|
||||
|
||||
def test_graphql_get_and_delete_new_device_key(authorized_client, tokens_file):
|
||||
def test_graphql_get_and_delete_new_device_key(
|
||||
authorized_client,
|
||||
tokens_file,
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={"query": NEW_DEVICE_KEY_MUTATION},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["getNewDeviceApiKey"]["key"].split(" ").__len__() == 12
|
||||
response.json()["data"]["api"]["getNewDeviceApiKey"]["key"].split(" ").__len__()
|
||||
== 12
|
||||
)
|
||||
token = (
|
||||
Mnemonic(language="english")
|
||||
.to_entropy(response.json()["data"]["getNewDeviceApiKey"]["key"])
|
||||
.to_entropy(response.json()["data"]["api"]["getNewDeviceApiKey"]["key"])
|
||||
.hex()
|
||||
)
|
||||
assert read_json(tokens_file)["new_device"]["token"] == token
|
||||
|
@ -291,14 +322,20 @@ def test_graphql_get_and_delete_new_device_key(authorized_client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["invalidateNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["invalidateNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["invalidateNewDeviceApiKey"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["api"]["invalidateNewDeviceApiKey"]["success"] is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["api"]["invalidateNewDeviceApiKey"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["api"]["invalidateNewDeviceApiKey"]["code"] == 200
|
||||
assert read_json(tokens_file) == TOKENS_FILE_CONTETS
|
||||
|
||||
|
||||
AUTHORIZE_WITH_NEW_DEVICE_KEY_MUTATION = """
|
||||
mutation AuthorizeWithNewDeviceKey($input: UseNewDeviceKeyInput!) {
|
||||
api {
|
||||
authorizeWithNewDeviceApiKey(input: $input) {
|
||||
success
|
||||
message
|
||||
|
@ -306,20 +343,25 @@ mutation AuthorizeWithNewDeviceKey($input: UseNewDeviceKeyInput!) {
|
|||
token
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def test_graphql_get_and_authorize_new_device(client, authorized_client, tokens_file):
|
||||
def test_graphql_get_and_authorize_new_device(
|
||||
client,
|
||||
authorized_client,
|
||||
tokens_file,
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={"query": NEW_DEVICE_KEY_MUTATION},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["code"] == 200
|
||||
mnemonic_key = response.json()["data"]["getNewDeviceApiKey"]["key"]
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["code"] == 200
|
||||
mnemonic_key = response.json()["data"]["api"]["getNewDeviceApiKey"]["key"]
|
||||
assert mnemonic_key.split(" ").__len__() == 12
|
||||
key = Mnemonic(language="english").to_entropy(mnemonic_key).hex()
|
||||
assert read_json(tokens_file)["new_device"]["token"] == key
|
||||
|
@ -337,17 +379,24 @@ def test_graphql_get_and_authorize_new_device(client, authorized_client, tokens_
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["success"] is True
|
||||
assert (
|
||||
response.json()["data"]["authorizeWithNewDeviceApiKey"]["message"] is not None
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["success"]
|
||||
is True
|
||||
)
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["code"] == 200
|
||||
token = response.json()["data"]["authorizeWithNewDeviceApiKey"]["token"]
|
||||
assert (
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["code"] == 200
|
||||
token = response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["token"]
|
||||
assert read_json(tokens_file)["tokens"][2]["token"] == token
|
||||
assert read_json(tokens_file)["tokens"][2]["name"] == "new_device"
|
||||
|
||||
|
||||
def test_graphql_authorize_new_device_with_invalid_key(client, tokens_file):
|
||||
def test_graphql_authorize_new_device_with_invalid_key(
|
||||
client,
|
||||
tokens_file,
|
||||
):
|
||||
response = client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
|
@ -362,25 +411,33 @@ def test_graphql_authorize_new_device_with_invalid_key(client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["success"] is False
|
||||
assert (
|
||||
response.json()["data"]["authorizeWithNewDeviceApiKey"]["message"] is not None
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["success"]
|
||||
is False
|
||||
)
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["code"] == 404
|
||||
assert (
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["code"] == 404
|
||||
assert read_json(tokens_file) == TOKENS_FILE_CONTETS
|
||||
|
||||
|
||||
def test_graphql_get_and_authorize_used_key(client, authorized_client, tokens_file):
|
||||
def test_graphql_get_and_authorize_used_key(
|
||||
client,
|
||||
authorized_client,
|
||||
tokens_file,
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
json={"query": NEW_DEVICE_KEY_MUTATION},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["code"] == 200
|
||||
mnemonic_key = response.json()["data"]["getNewDeviceApiKey"]["key"]
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["code"] == 200
|
||||
mnemonic_key = response.json()["data"]["api"]["getNewDeviceApiKey"]["key"]
|
||||
assert mnemonic_key.split(" ").__len__() == 12
|
||||
key = Mnemonic(language="english").to_entropy(mnemonic_key).hex()
|
||||
assert read_json(tokens_file)["new_device"]["token"] == key
|
||||
|
@ -398,14 +455,18 @@ def test_graphql_get_and_authorize_used_key(client, authorized_client, tokens_fi
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["success"] is True
|
||||
assert (
|
||||
response.json()["data"]["authorizeWithNewDeviceApiKey"]["message"] is not None
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["success"]
|
||||
is True
|
||||
)
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["code"] == 200
|
||||
assert (
|
||||
read_json(tokens_file)["tokens"][2]["token"]
|
||||
== response.json()["data"]["authorizeWithNewDeviceApiKey"]["token"]
|
||||
== response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["token"]
|
||||
)
|
||||
assert read_json(tokens_file)["tokens"][2]["name"] == "new_token"
|
||||
|
||||
|
@ -415,7 +476,7 @@ def test_graphql_get_and_authorize_used_key(client, authorized_client, tokens_fi
|
|||
"query": AUTHORIZE_WITH_NEW_DEVICE_KEY_MUTATION,
|
||||
"variables": {
|
||||
"input": {
|
||||
"key": mnemonic_key,
|
||||
"key": NEW_DEVICE_KEY_MUTATION,
|
||||
"deviceName": "test_token2",
|
||||
}
|
||||
},
|
||||
|
@ -423,16 +484,22 @@ def test_graphql_get_and_authorize_used_key(client, authorized_client, tokens_fi
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["success"] is False
|
||||
assert (
|
||||
response.json()["data"]["authorizeWithNewDeviceApiKey"]["message"] is not None
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["success"]
|
||||
is False
|
||||
)
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["code"] == 404
|
||||
assert (
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["code"] == 404
|
||||
assert read_json(tokens_file)["tokens"].__len__() == 3
|
||||
|
||||
|
||||
def test_graphql_get_and_authorize_key_after_12_minutes(
|
||||
client, authorized_client, tokens_file
|
||||
client,
|
||||
authorized_client,
|
||||
tokens_file,
|
||||
):
|
||||
response = authorized_client.post(
|
||||
"/graphql",
|
||||
|
@ -440,15 +507,16 @@ def test_graphql_get_and_authorize_key_after_12_minutes(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewDeviceApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewDeviceApiKey"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["getNewDeviceApiKey"]["key"].split(" ").__len__() == 12
|
||||
response.json()["data"]["api"]["getNewDeviceApiKey"]["key"].split(" ").__len__()
|
||||
== 12
|
||||
)
|
||||
key = (
|
||||
Mnemonic(language="english")
|
||||
.to_entropy(response.json()["data"]["getNewDeviceApiKey"]["key"])
|
||||
.to_entropy(response.json()["data"]["api"]["getNewDeviceApiKey"]["key"])
|
||||
.hex()
|
||||
)
|
||||
assert read_json(tokens_file)["new_device"]["token"] == key
|
||||
|
@ -473,14 +541,21 @@ def test_graphql_get_and_authorize_key_after_12_minutes(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["success"] is False
|
||||
assert (
|
||||
response.json()["data"]["authorizeWithNewDeviceApiKey"]["message"] is not None
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["success"]
|
||||
is False
|
||||
)
|
||||
assert response.json()["data"]["authorizeWithNewDeviceApiKey"]["code"] == 404
|
||||
assert (
|
||||
response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["api"]["authorizeWithNewDeviceApiKey"]["code"] == 404
|
||||
|
||||
|
||||
def test_graphql_authorize_without_token(client, tokens_file):
|
||||
def test_graphql_authorize_without_token(
|
||||
client,
|
||||
tokens_file,
|
||||
):
|
||||
response = client.post(
|
||||
"/graphql",
|
||||
json={
|
||||
|
|
|
@ -57,6 +57,7 @@ def test_graphql_recovery_key_status_when_none_exists(authorized_client, tokens_
|
|||
|
||||
API_RECOVERY_KEY_GENERATE_MUTATION = """
|
||||
mutation TestGenerateRecoveryKey($limits: RecoveryKeyLimitsInput) {
|
||||
api {
|
||||
getNewRecoveryApiKey(limits: $limits) {
|
||||
success
|
||||
message
|
||||
|
@ -64,10 +65,12 @@ mutation TestGenerateRecoveryKey($limits: RecoveryKeyLimitsInput) {
|
|||
key
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
API_RECOVERY_KEY_USE_MUTATION = """
|
||||
mutation TestUseRecoveryKey($input: UseRecoveryKeyInput!) {
|
||||
api {
|
||||
useRecoveryApiKey(input: $input) {
|
||||
success
|
||||
message
|
||||
|
@ -75,6 +78,7 @@ mutation TestUseRecoveryKey($input: UseRecoveryKeyInput!) {
|
|||
token
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -87,18 +91,20 @@ def test_graphql_generate_recovery_key(client, authorized_client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["key"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"] is not None
|
||||
assert (
|
||||
response.json()["data"]["getNewRecoveryApiKey"]["key"].split(" ").__len__()
|
||||
response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"]
|
||||
.split(" ")
|
||||
.__len__()
|
||||
== 18
|
||||
)
|
||||
assert read_json(tokens_file)["recovery_token"] is not None
|
||||
time_generated = read_json(tokens_file)["recovery_token"]["date"]
|
||||
assert time_generated is not None
|
||||
key = response.json()["data"]["getNewRecoveryApiKey"]["key"]
|
||||
key = response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"]
|
||||
assert (
|
||||
datetime.datetime.strptime(time_generated, "%Y-%m-%dT%H:%M:%S.%f")
|
||||
- datetime.timedelta(seconds=5)
|
||||
|
@ -136,12 +142,12 @@ def test_graphql_generate_recovery_key(client, authorized_client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert (
|
||||
response.json()["data"]["useRecoveryApiKey"]["token"]
|
||||
response.json()["data"]["api"]["useRecoveryApiKey"]["token"]
|
||||
== read_json(tokens_file)["tokens"][2]["token"]
|
||||
)
|
||||
assert read_json(tokens_file)["tokens"][2]["name"] == "new_test_token"
|
||||
|
@ -161,12 +167,12 @@ def test_graphql_generate_recovery_key(client, authorized_client, tokens_file):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert (
|
||||
response.json()["data"]["useRecoveryApiKey"]["token"]
|
||||
response.json()["data"]["api"]["useRecoveryApiKey"]["token"]
|
||||
== read_json(tokens_file)["tokens"][3]["token"]
|
||||
)
|
||||
assert read_json(tokens_file)["tokens"][3]["name"] == "new_test_token2"
|
||||
|
@ -190,17 +196,19 @@ def test_graphql_generate_recovery_key_with_expiration_date(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["key"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"] is not None
|
||||
assert (
|
||||
response.json()["data"]["getNewRecoveryApiKey"]["key"].split(" ").__len__()
|
||||
response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"]
|
||||
.split(" ")
|
||||
.__len__()
|
||||
== 18
|
||||
)
|
||||
assert read_json(tokens_file)["recovery_token"] is not None
|
||||
|
||||
key = response.json()["data"]["getNewRecoveryApiKey"]["key"]
|
||||
key = response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"]
|
||||
assert read_json(tokens_file)["recovery_token"]["expiration"] == expiration_date_str
|
||||
assert read_json(tokens_file)["recovery_token"]["token"] == mnemonic_to_hex(key)
|
||||
|
||||
|
@ -246,12 +254,12 @@ def test_graphql_generate_recovery_key_with_expiration_date(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert (
|
||||
response.json()["data"]["useRecoveryApiKey"]["token"]
|
||||
response.json()["data"]["api"]["useRecoveryApiKey"]["token"]
|
||||
== read_json(tokens_file)["tokens"][2]["token"]
|
||||
)
|
||||
|
||||
|
@ -270,12 +278,12 @@ def test_graphql_generate_recovery_key_with_expiration_date(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert (
|
||||
response.json()["data"]["useRecoveryApiKey"]["token"]
|
||||
response.json()["data"]["api"]["useRecoveryApiKey"]["token"]
|
||||
== read_json(tokens_file)["tokens"][3]["token"]
|
||||
)
|
||||
|
||||
|
@ -299,10 +307,10 @@ def test_graphql_generate_recovery_key_with_expiration_date(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 404
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 404
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is None
|
||||
|
||||
assert read_json(tokens_file)["tokens"] == new_data["tokens"]
|
||||
|
||||
|
@ -345,10 +353,10 @@ def test_graphql_generate_recovery_key_with_expiration_in_the_past(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["code"] == 400
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["key"] is None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["code"] == 400
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"] is None
|
||||
assert "recovery_token" not in read_json(tokens_file)
|
||||
|
||||
|
||||
|
@ -393,12 +401,12 @@ def test_graphql_generate_recovery_key_with_limited_uses(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["key"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"] is not None
|
||||
|
||||
mnemonic_key = response.json()["data"]["getNewRecoveryApiKey"]["key"]
|
||||
mnemonic_key = response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"]
|
||||
key = mnemonic_to_hex(mnemonic_key)
|
||||
|
||||
assert read_json(tokens_file)["recovery_token"]["token"] == key
|
||||
|
@ -433,10 +441,10 @@ def test_graphql_generate_recovery_key_with_limited_uses(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is not None
|
||||
|
||||
# Try to get token status
|
||||
response = authorized_client.post(
|
||||
|
@ -467,10 +475,10 @@ def test_graphql_generate_recovery_key_with_limited_uses(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is True
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 200
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is not None
|
||||
|
||||
# Try to get token status
|
||||
response = authorized_client.post(
|
||||
|
@ -501,10 +509,10 @@ def test_graphql_generate_recovery_key_with_limited_uses(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["code"] == 404
|
||||
assert response.json()["data"]["useRecoveryApiKey"]["token"] is None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["code"] == 404
|
||||
assert response.json()["data"]["api"]["useRecoveryApiKey"]["token"] is None
|
||||
|
||||
|
||||
def test_graphql_generate_recovery_key_with_negative_uses(
|
||||
|
@ -524,10 +532,10 @@ def test_graphql_generate_recovery_key_with_negative_uses(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["code"] == 400
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["key"] is None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["code"] == 400
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"] is None
|
||||
|
||||
|
||||
def test_graphql_generate_recovery_key_with_zero_uses(authorized_client, tokens_file):
|
||||
|
@ -545,7 +553,7 @@ def test_graphql_generate_recovery_key_with_zero_uses(authorized_client, tokens_
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["code"] == 400
|
||||
assert response.json()["data"]["getNewRecoveryApiKey"]["key"] is None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["success"] is False
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["message"] is not None
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["code"] == 400
|
||||
assert response.json()["data"]["api"]["getNewRecoveryApiKey"]["key"] is None
|
||||
|
|
645
tests/test_graphql/test_backup.py
Normal file
645
tests/test_graphql/test_backup.py
Normal file
|
@ -0,0 +1,645 @@
|
|||
import pytest
|
||||
import os.path as path
|
||||
from os import makedirs
|
||||
from os import remove
|
||||
from os import listdir
|
||||
from os import urandom
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from subprocess import Popen
|
||||
|
||||
import selfprivacy_api.services as services
|
||||
from selfprivacy_api.services import Service, get_all_services
|
||||
|
||||
from selfprivacy_api.services import get_service_by_id
|
||||
from selfprivacy_api.services.test_service import DummyService
|
||||
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
||||
from selfprivacy_api.graphql.common_types.backup import RestoreStrategy
|
||||
from selfprivacy_api.jobs import Jobs, JobStatus
|
||||
|
||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||
|
||||
from selfprivacy_api.backup import Backups
|
||||
import selfprivacy_api.backup.providers as providers
|
||||
from selfprivacy_api.backup.providers import AbstractBackupProvider
|
||||
from selfprivacy_api.backup.providers.backblaze import Backblaze
|
||||
from selfprivacy_api.backup.util import sync
|
||||
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
||||
from selfprivacy_api.backup.jobs import add_backup_job, add_restore_job
|
||||
|
||||
|
||||
from selfprivacy_api.backup.tasks import start_backup, restore_snapshot
|
||||
from selfprivacy_api.backup.storage import Storage
|
||||
from selfprivacy_api.backup.jobs import get_backup_job
|
||||
|
||||
|
||||
TESTFILE_BODY = "testytest!"
|
||||
TESTFILE_2_BODY = "testissimo!"
|
||||
REPO_NAME = "test_backup"
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def backups(tmpdir):
|
||||
Backups.reset()
|
||||
|
||||
test_repo_path = path.join(tmpdir, "totallyunrelated")
|
||||
Backups.set_localfile_repo(test_repo_path)
|
||||
|
||||
Jobs.reset()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def backups_backblaze(generic_userdata):
|
||||
Backups.reset(reset_json=False)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def raw_dummy_service(tmpdir):
|
||||
dirnames = ["test_service", "also_test_service"]
|
||||
service_dirs = []
|
||||
for d in dirnames:
|
||||
service_dir = path.join(tmpdir, d)
|
||||
makedirs(service_dir)
|
||||
service_dirs.append(service_dir)
|
||||
|
||||
testfile_path_1 = path.join(service_dirs[0], "testfile.txt")
|
||||
with open(testfile_path_1, "w") as file:
|
||||
file.write(TESTFILE_BODY)
|
||||
|
||||
testfile_path_2 = path.join(service_dirs[1], "testfile2.txt")
|
||||
with open(testfile_path_2, "w") as file:
|
||||
file.write(TESTFILE_2_BODY)
|
||||
|
||||
# we need this to not change get_folders() much
|
||||
class TestDummyService(DummyService, folders=service_dirs):
|
||||
pass
|
||||
|
||||
service = TestDummyService()
|
||||
return service
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def dummy_service(tmpdir, backups, raw_dummy_service) -> Service:
|
||||
service = raw_dummy_service
|
||||
repo_path = path.join(tmpdir, "test_repo")
|
||||
assert not path.exists(repo_path)
|
||||
# assert not repo_path
|
||||
|
||||
Backups.init_repo()
|
||||
|
||||
# register our service
|
||||
services.services.append(service)
|
||||
|
||||
assert get_service_by_id(service.get_id()) is not None
|
||||
yield service
|
||||
|
||||
# cleanup because apparently it matters wrt tasks
|
||||
services.services.remove(service)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def memory_backup() -> AbstractBackupProvider:
|
||||
ProviderClass = providers.get_provider(BackupProvider.MEMORY)
|
||||
assert ProviderClass is not None
|
||||
memory_provider = ProviderClass(login="", key="")
|
||||
assert memory_provider is not None
|
||||
return memory_provider
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def file_backup(tmpdir) -> AbstractBackupProvider:
|
||||
test_repo_path = path.join(tmpdir, "test_repo")
|
||||
ProviderClass = providers.get_provider(BackupProvider.FILE)
|
||||
assert ProviderClass is not None
|
||||
provider = ProviderClass(location=test_repo_path)
|
||||
assert provider is not None
|
||||
return provider
|
||||
|
||||
|
||||
def test_config_load(generic_userdata):
|
||||
Backups.reset(reset_json=False)
|
||||
provider = Backups.provider()
|
||||
|
||||
assert provider is not None
|
||||
assert isinstance(provider, Backblaze)
|
||||
assert provider.login == "ID"
|
||||
assert provider.key == "KEY"
|
||||
assert provider.location == "selfprivacy"
|
||||
|
||||
assert provider.backupper.account == "ID"
|
||||
assert provider.backupper.key == "KEY"
|
||||
|
||||
|
||||
def test_json_reset(generic_userdata):
|
||||
Backups.reset(reset_json=False)
|
||||
provider = Backups.provider()
|
||||
assert provider is not None
|
||||
assert isinstance(provider, Backblaze)
|
||||
assert provider.login == "ID"
|
||||
assert provider.key == "KEY"
|
||||
assert provider.location == "selfprivacy"
|
||||
|
||||
Backups.reset()
|
||||
provider = Backups.provider()
|
||||
assert provider is not None
|
||||
assert isinstance(provider, AbstractBackupProvider)
|
||||
assert provider.login == ""
|
||||
assert provider.key == ""
|
||||
assert provider.location == ""
|
||||
assert provider.repo_id == ""
|
||||
|
||||
|
||||
def test_select_backend():
|
||||
provider = providers.get_provider(BackupProvider.BACKBLAZE)
|
||||
assert provider is not None
|
||||
assert provider == Backblaze
|
||||
|
||||
|
||||
def test_file_backend_init(file_backup):
|
||||
file_backup.backupper.init()
|
||||
|
||||
|
||||
def test_backup_simple_file(raw_dummy_service, file_backup):
|
||||
# temporarily incomplete
|
||||
service = raw_dummy_service
|
||||
assert service is not None
|
||||
assert file_backup is not None
|
||||
|
||||
name = service.get_id()
|
||||
file_backup.backupper.init()
|
||||
|
||||
|
||||
def test_backup_service(dummy_service, backups):
|
||||
id = dummy_service.get_id()
|
||||
assert_job_finished(f"services.{id}.backup", count=0)
|
||||
assert Backups.get_last_backed_up(dummy_service) is None
|
||||
|
||||
Backups.back_up(dummy_service)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
date = Backups.get_last_backed_up(dummy_service)
|
||||
assert date is not None
|
||||
assert now > date
|
||||
assert now - date < timedelta(minutes=1)
|
||||
|
||||
assert_job_finished(f"services.{id}.backup", count=1)
|
||||
|
||||
|
||||
def test_no_repo(memory_backup):
|
||||
with pytest.raises(ValueError):
|
||||
assert memory_backup.backupper.get_snapshots() == []
|
||||
|
||||
|
||||
def test_one_snapshot(backups, dummy_service):
|
||||
Backups.back_up(dummy_service)
|
||||
|
||||
snaps = Backups.get_snapshots(dummy_service)
|
||||
assert len(snaps) == 1
|
||||
snap = snaps[0]
|
||||
assert snap.service_name == dummy_service.get_id()
|
||||
|
||||
|
||||
def test_backup_returns_snapshot(backups, dummy_service):
|
||||
service_folders = dummy_service.get_folders()
|
||||
provider = Backups.provider()
|
||||
name = dummy_service.get_id()
|
||||
snapshot = provider.backupper.start_backup(service_folders, name)
|
||||
|
||||
assert snapshot.id is not None
|
||||
assert len(snapshot.id) == len(Backups.get_all_snapshots()[0].id)
|
||||
assert Backups.get_snapshot_by_id(snapshot.id) is not None
|
||||
assert snapshot.service_name == name
|
||||
assert snapshot.created_at is not None
|
||||
|
||||
|
||||
def folder_files(folder):
|
||||
return [
|
||||
path.join(folder, filename)
|
||||
for filename in listdir(folder)
|
||||
if filename is not None
|
||||
]
|
||||
|
||||
|
||||
def service_files(service):
|
||||
result = []
|
||||
for service_folder in service.get_folders():
|
||||
result.extend(folder_files(service_folder))
|
||||
return result
|
||||
|
||||
|
||||
def test_restore(backups, dummy_service):
|
||||
paths_to_nuke = service_files(dummy_service)
|
||||
contents = []
|
||||
|
||||
for service_file in paths_to_nuke:
|
||||
with open(service_file, "r") as file:
|
||||
contents.append(file.read())
|
||||
|
||||
Backups.back_up(dummy_service)
|
||||
snap = Backups.get_snapshots(dummy_service)[0]
|
||||
assert snap is not None
|
||||
|
||||
for p in paths_to_nuke:
|
||||
assert path.exists(p)
|
||||
remove(p)
|
||||
assert not path.exists(p)
|
||||
|
||||
Backups._restore_service_from_snapshot(dummy_service, snap.id)
|
||||
for p, content in zip(paths_to_nuke, contents):
|
||||
assert path.exists(p)
|
||||
with open(p, "r") as file:
|
||||
assert file.read() == content
|
||||
|
||||
|
||||
def test_sizing(backups, dummy_service):
|
||||
Backups.back_up(dummy_service)
|
||||
snap = Backups.get_snapshots(dummy_service)[0]
|
||||
size = Backups.snapshot_restored_size(snap.id)
|
||||
assert size is not None
|
||||
assert size > 0
|
||||
|
||||
|
||||
def test_init_tracking(backups, raw_dummy_service):
|
||||
assert Backups.is_initted() is False
|
||||
|
||||
Backups.init_repo()
|
||||
|
||||
assert Backups.is_initted() is True
|
||||
|
||||
|
||||
def finished_jobs():
|
||||
return [job for job in Jobs.get_jobs() if job.status is JobStatus.FINISHED]
|
||||
|
||||
|
||||
def assert_job_finished(job_type, count):
|
||||
finished_types = [job.type_id for job in finished_jobs()]
|
||||
assert finished_types.count(job_type) == count
|
||||
|
||||
|
||||
def assert_job_has_run(job_type):
|
||||
job = [job for job in finished_jobs() if job.type_id == job_type][0]
|
||||
assert JobStatus.RUNNING in Jobs.status_updates(job)
|
||||
|
||||
|
||||
def job_progress_updates(job_type):
|
||||
job = [job for job in finished_jobs() if job.type_id == job_type][0]
|
||||
return Jobs.progress_updates(job)
|
||||
|
||||
|
||||
def assert_job_had_progress(job_type):
|
||||
assert len(job_progress_updates(job_type)) > 0
|
||||
|
||||
|
||||
def make_large_file(path: str, bytes: int):
|
||||
with open(path, "wb") as file:
|
||||
file.write(urandom(bytes))
|
||||
|
||||
|
||||
def test_snapshots_by_id(backups, dummy_service):
|
||||
snap1 = Backups.back_up(dummy_service)
|
||||
snap2 = Backups.back_up(dummy_service)
|
||||
snap3 = Backups.back_up(dummy_service)
|
||||
|
||||
assert snap2.id is not None
|
||||
assert snap2.id != ""
|
||||
|
||||
assert len(Backups.get_snapshots(dummy_service)) == 3
|
||||
assert Backups.get_snapshot_by_id(snap2.id).id == snap2.id
|
||||
|
||||
|
||||
@pytest.fixture(params=["instant_server_stop", "delayed_server_stop"])
|
||||
def simulated_service_stopping_delay(request) -> float:
|
||||
if request.param == "instant_server_stop":
|
||||
return 0.0
|
||||
else:
|
||||
return 0.3
|
||||
|
||||
|
||||
def test_backup_service_task(backups, dummy_service, simulated_service_stopping_delay):
|
||||
dummy_service.set_delay(simulated_service_stopping_delay)
|
||||
|
||||
handle = start_backup(dummy_service)
|
||||
handle(blocking=True)
|
||||
|
||||
snaps = Backups.get_snapshots(dummy_service)
|
||||
assert len(snaps) == 1
|
||||
|
||||
id = dummy_service.get_id()
|
||||
job_type_id = f"services.{id}.backup"
|
||||
assert_job_finished(job_type_id, count=1)
|
||||
assert_job_has_run(job_type_id)
|
||||
assert_job_had_progress(job_type_id)
|
||||
|
||||
|
||||
def test_forget_snapshot(backups, dummy_service):
|
||||
snap1 = Backups.back_up(dummy_service)
|
||||
snap2 = Backups.back_up(dummy_service)
|
||||
assert len(Backups.get_snapshots(dummy_service)) == 2
|
||||
|
||||
Backups.forget_snapshot(snap2)
|
||||
assert len(Backups.get_snapshots(dummy_service)) == 1
|
||||
Backups.force_snapshot_cache_reload()
|
||||
assert len(Backups.get_snapshots(dummy_service)) == 1
|
||||
|
||||
assert Backups.get_snapshots(dummy_service)[0].id == snap1.id
|
||||
|
||||
Backups.forget_snapshot(snap1)
|
||||
assert len(Backups.get_snapshots(dummy_service)) == 0
|
||||
|
||||
|
||||
def test_forget_nonexistent_snapshot(backups, dummy_service):
|
||||
bogus = Snapshot(
|
||||
id="gibberjibber", service_name="nohoho", created_at=datetime.now(timezone.utc)
|
||||
)
|
||||
with pytest.raises(ValueError):
|
||||
Backups.forget_snapshot(bogus)
|
||||
|
||||
|
||||
def test_backup_larger_file(backups, dummy_service):
|
||||
dir = path.join(dummy_service.get_folders()[0], "LARGEFILE")
|
||||
mega = 2**20
|
||||
make_large_file(dir, 100 * mega)
|
||||
|
||||
handle = start_backup(dummy_service)
|
||||
handle(blocking=True)
|
||||
|
||||
# results will be slightly different on different machines. if someone has troubles with it on their machine, consider dropping this test.
|
||||
id = dummy_service.get_id()
|
||||
job_type_id = f"services.{id}.backup"
|
||||
assert_job_finished(job_type_id, count=1)
|
||||
assert_job_has_run(job_type_id)
|
||||
updates = job_progress_updates(job_type_id)
|
||||
assert len(updates) > 3
|
||||
assert updates[int((len(updates) - 1) / 2.0)] > 10
|
||||
# clean up a bit
|
||||
remove(dir)
|
||||
|
||||
|
||||
@pytest.fixture(params=["verify", "inplace"])
|
||||
def restore_strategy(request) -> RestoreStrategy:
|
||||
if request.param == "verify":
|
||||
return RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
||||
else:
|
||||
return RestoreStrategy.INPLACE
|
||||
|
||||
|
||||
def test_restore_snapshot_task(
|
||||
backups, dummy_service, restore_strategy, simulated_service_stopping_delay
|
||||
):
|
||||
dummy_service.set_delay(simulated_service_stopping_delay)
|
||||
|
||||
Backups.back_up(dummy_service)
|
||||
snaps = Backups.get_snapshots(dummy_service)
|
||||
assert len(snaps) == 1
|
||||
|
||||
paths_to_nuke = service_files(dummy_service)
|
||||
contents = []
|
||||
|
||||
for service_file in paths_to_nuke:
|
||||
with open(service_file, "r") as file:
|
||||
contents.append(file.read())
|
||||
|
||||
for p in paths_to_nuke:
|
||||
remove(p)
|
||||
|
||||
handle = restore_snapshot(snaps[0], restore_strategy)
|
||||
handle(blocking=True)
|
||||
|
||||
for p, content in zip(paths_to_nuke, contents):
|
||||
assert path.exists(p)
|
||||
with open(p, "r") as file:
|
||||
assert file.read() == content
|
||||
|
||||
snaps = Backups.get_snapshots(dummy_service)
|
||||
if restore_strategy == RestoreStrategy.INPLACE:
|
||||
assert len(snaps) == 2
|
||||
else:
|
||||
assert len(snaps) == 1
|
||||
|
||||
|
||||
def test_set_autobackup_period(backups):
|
||||
assert Backups.autobackup_period_minutes() is None
|
||||
|
||||
Backups.set_autobackup_period_minutes(2)
|
||||
assert Backups.autobackup_period_minutes() == 2
|
||||
|
||||
Backups.disable_all_autobackup()
|
||||
assert Backups.autobackup_period_minutes() is None
|
||||
|
||||
Backups.set_autobackup_period_minutes(3)
|
||||
assert Backups.autobackup_period_minutes() == 3
|
||||
|
||||
Backups.set_autobackup_period_minutes(0)
|
||||
assert Backups.autobackup_period_minutes() is None
|
||||
|
||||
Backups.set_autobackup_period_minutes(3)
|
||||
assert Backups.autobackup_period_minutes() == 3
|
||||
|
||||
Backups.set_autobackup_period_minutes(-1)
|
||||
assert Backups.autobackup_period_minutes() is None
|
||||
|
||||
|
||||
def test_no_default_autobackup(backups, dummy_service):
|
||||
now = datetime.now(timezone.utc)
|
||||
assert not Backups.is_time_to_backup_service(dummy_service, now)
|
||||
assert not Backups.is_time_to_backup(now)
|
||||
|
||||
|
||||
def backuppable_services() -> list[Service]:
|
||||
return [service for service in get_all_services() if service.can_be_backed_up()]
|
||||
|
||||
|
||||
def test_services_to_back_up(backups, dummy_service):
|
||||
backup_period = 13 # minutes
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
dummy_service.set_backuppable(False)
|
||||
services = Backups.services_to_back_up(now)
|
||||
assert len(services) == 0
|
||||
|
||||
dummy_service.set_backuppable(True)
|
||||
|
||||
services = Backups.services_to_back_up(now)
|
||||
assert len(services) == 0
|
||||
|
||||
Backups.set_autobackup_period_minutes(backup_period)
|
||||
|
||||
services = Backups.services_to_back_up(now)
|
||||
assert len(services) == len(backuppable_services())
|
||||
assert dummy_service.get_id() in [
|
||||
service.get_id() for service in backuppable_services()
|
||||
]
|
||||
|
||||
|
||||
def test_autobackup_timer_periods(backups, dummy_service):
|
||||
now = datetime.now(timezone.utc)
|
||||
backup_period = 13 # minutes
|
||||
|
||||
assert not Backups.is_time_to_backup_service(dummy_service, now)
|
||||
assert not Backups.is_time_to_backup(now)
|
||||
|
||||
Backups.set_autobackup_period_minutes(backup_period)
|
||||
assert Backups.is_time_to_backup_service(dummy_service, now)
|
||||
assert Backups.is_time_to_backup(now)
|
||||
|
||||
Backups.set_autobackup_period_minutes(0)
|
||||
assert not Backups.is_time_to_backup_service(dummy_service, now)
|
||||
assert not Backups.is_time_to_backup(now)
|
||||
|
||||
|
||||
def test_autobackup_timer_enabling(backups, dummy_service):
|
||||
now = datetime.now(timezone.utc)
|
||||
backup_period = 13 # minutes
|
||||
dummy_service.set_backuppable(False)
|
||||
|
||||
Backups.set_autobackup_period_minutes(backup_period)
|
||||
assert Backups.is_time_to_backup(
|
||||
now
|
||||
) # there are other services too, not just our dummy
|
||||
|
||||
# not backuppable service is not backuppable even if period is set
|
||||
assert not Backups.is_time_to_backup_service(dummy_service, now)
|
||||
|
||||
dummy_service.set_backuppable(True)
|
||||
assert dummy_service.can_be_backed_up()
|
||||
assert Backups.is_time_to_backup_service(dummy_service, now)
|
||||
|
||||
Backups.disable_all_autobackup()
|
||||
assert not Backups.is_time_to_backup_service(dummy_service, now)
|
||||
assert not Backups.is_time_to_backup(now)
|
||||
|
||||
|
||||
def test_autobackup_timing(backups, dummy_service):
|
||||
backup_period = 13 # minutes
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
Backups.set_autobackup_period_minutes(backup_period)
|
||||
assert Backups.is_time_to_backup_service(dummy_service, now)
|
||||
assert Backups.is_time_to_backup(now)
|
||||
|
||||
Backups.back_up(dummy_service)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
assert not Backups.is_time_to_backup_service(dummy_service, now)
|
||||
|
||||
past = datetime.now(timezone.utc) - timedelta(minutes=1)
|
||||
assert not Backups.is_time_to_backup_service(dummy_service, past)
|
||||
|
||||
future = datetime.now(timezone.utc) + timedelta(minutes=backup_period + 2)
|
||||
assert Backups.is_time_to_backup_service(dummy_service, future)
|
||||
|
||||
|
||||
# Storage
|
||||
def test_snapshots_caching(backups, dummy_service):
|
||||
Backups.back_up(dummy_service)
|
||||
|
||||
# we test indirectly that we do redis calls instead of shell calls
|
||||
start = datetime.now()
|
||||
for i in range(10):
|
||||
snapshots = Backups.get_snapshots(dummy_service)
|
||||
assert len(snapshots) == 1
|
||||
assert datetime.now() - start < timedelta(seconds=0.5)
|
||||
|
||||
cached_snapshots = Storage.get_cached_snapshots()
|
||||
assert len(cached_snapshots) == 1
|
||||
|
||||
Storage.delete_cached_snapshot(cached_snapshots[0])
|
||||
cached_snapshots = Storage.get_cached_snapshots()
|
||||
assert len(cached_snapshots) == 0
|
||||
|
||||
snapshots = Backups.get_snapshots(dummy_service)
|
||||
assert len(snapshots) == 1
|
||||
cached_snapshots = Storage.get_cached_snapshots()
|
||||
assert len(cached_snapshots) == 1
|
||||
|
||||
|
||||
# Storage
|
||||
def test_init_tracking_caching(backups, raw_dummy_service):
|
||||
assert Storage.has_init_mark() is False
|
||||
|
||||
Storage.mark_as_init()
|
||||
|
||||
assert Storage.has_init_mark() is True
|
||||
assert Backups.is_initted() is True
|
||||
|
||||
|
||||
# Storage
|
||||
def test_init_tracking_caching2(backups, raw_dummy_service):
|
||||
assert Storage.has_init_mark() is False
|
||||
|
||||
Backups.init_repo()
|
||||
|
||||
assert Storage.has_init_mark() is True
|
||||
|
||||
|
||||
# Storage
|
||||
def test_provider_storage(backups_backblaze):
|
||||
provider = Backups.provider()
|
||||
|
||||
assert provider is not None
|
||||
|
||||
assert isinstance(provider, Backblaze)
|
||||
assert provider.login == "ID"
|
||||
assert provider.key == "KEY"
|
||||
|
||||
Storage.store_provider(provider)
|
||||
restored_provider = Backups._load_provider_redis()
|
||||
assert isinstance(restored_provider, Backblaze)
|
||||
assert restored_provider.login == "ID"
|
||||
assert restored_provider.key == "KEY"
|
||||
|
||||
|
||||
def test_sync(dummy_service):
|
||||
src = dummy_service.get_folders()[0]
|
||||
dst = dummy_service.get_folders()[1]
|
||||
old_files_src = set(listdir(src))
|
||||
old_files_dst = set(listdir(dst))
|
||||
assert old_files_src != old_files_dst
|
||||
|
||||
sync(src, dst)
|
||||
new_files_src = set(listdir(src))
|
||||
new_files_dst = set(listdir(dst))
|
||||
assert new_files_src == old_files_src
|
||||
assert new_files_dst == new_files_src
|
||||
|
||||
|
||||
def test_sync_nonexistent_src(dummy_service):
|
||||
src = "/var/lib/nonexistentFluffyBunniesOfUnix"
|
||||
dst = dummy_service.get_folders()[1]
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
sync(src, dst)
|
||||
|
||||
|
||||
# Restic lowlevel
|
||||
def test_mount_umount(backups, dummy_service, tmpdir):
|
||||
Backups.back_up(dummy_service)
|
||||
backupper = Backups.provider().backupper
|
||||
assert isinstance(backupper, ResticBackupper)
|
||||
|
||||
mountpoint = tmpdir / "mount"
|
||||
makedirs(mountpoint)
|
||||
assert path.exists(mountpoint)
|
||||
assert len(listdir(mountpoint)) == 0
|
||||
|
||||
handle = backupper.mount_repo(mountpoint)
|
||||
assert len(listdir(mountpoint)) != 0
|
||||
|
||||
backupper.unmount_repo(mountpoint)
|
||||
# handle.terminate()
|
||||
assert len(listdir(mountpoint)) == 0
|
||||
|
||||
|
||||
def test_move_blocks_backups(backups, dummy_service, restore_strategy):
|
||||
snap = Backups.back_up(dummy_service)
|
||||
job = Jobs.add(
|
||||
type_id=f"services.{dummy_service.get_id()}.move",
|
||||
name="Move Dummy",
|
||||
description=f"Moving Dummy data to the Rainbow Land",
|
||||
status=JobStatus.RUNNING,
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
Backups.back_up(dummy_service)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
Backups.restore_snapshot(snap, restore_strategy)
|
38
tests/test_graphql/test_localsecret.py
Normal file
38
tests/test_graphql/test_localsecret.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
||||
from pytest import fixture
|
||||
|
||||
|
||||
@fixture()
|
||||
def localsecret():
|
||||
LocalBackupSecret._full_reset()
|
||||
return LocalBackupSecret
|
||||
|
||||
|
||||
def test_local_secret_firstget(localsecret):
|
||||
assert not LocalBackupSecret.exists()
|
||||
secret = LocalBackupSecret.get()
|
||||
assert LocalBackupSecret.exists()
|
||||
assert secret is not None
|
||||
|
||||
# making sure it does not reset again
|
||||
secret2 = LocalBackupSecret.get()
|
||||
assert LocalBackupSecret.exists()
|
||||
assert secret2 == secret
|
||||
|
||||
|
||||
def test_local_secret_reset(localsecret):
|
||||
secret1 = LocalBackupSecret.get()
|
||||
|
||||
LocalBackupSecret.reset()
|
||||
secret2 = LocalBackupSecret.get()
|
||||
assert secret2 is not None
|
||||
assert secret2 != secret1
|
||||
|
||||
|
||||
def test_local_secret_set(localsecret):
|
||||
newsecret = "great and totally safe secret"
|
||||
oldsecret = LocalBackupSecret.get()
|
||||
assert oldsecret != newsecret
|
||||
|
||||
LocalBackupSecret.set(newsecret)
|
||||
assert LocalBackupSecret.get() == newsecret
|
|
@ -44,6 +44,7 @@ def some_users(mocker, datadir):
|
|||
|
||||
API_CREATE_SSH_KEY_MUTATION = """
|
||||
mutation addSshKey($sshInput: SshMutationInput!) {
|
||||
users {
|
||||
addSshKey(sshInput: $sshInput) {
|
||||
success
|
||||
message
|
||||
|
@ -54,6 +55,7 @@ mutation addSshKey($sshInput: SshMutationInput!) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -90,12 +92,12 @@ def test_graphql_add_ssh_key(authorized_client, some_users, mock_subprocess_pope
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["code"] == 201
|
||||
assert response.json()["data"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["addSshKey"]["success"] is True
|
||||
assert response.json()["data"]["users"]["addSshKey"]["code"] == 201
|
||||
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["addSshKey"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["user"]["username"] == "user1"
|
||||
assert response.json()["data"]["addSshKey"]["user"]["sshKeys"] == [
|
||||
assert response.json()["data"]["users"]["addSshKey"]["user"]["username"] == "user1"
|
||||
assert response.json()["data"]["users"]["addSshKey"]["user"]["sshKeys"] == [
|
||||
"ssh-rsa KEY user1@pc",
|
||||
"ssh-rsa KEY test_key@pc",
|
||||
]
|
||||
|
@ -117,12 +119,12 @@ def test_graphql_add_root_ssh_key(authorized_client, some_users, mock_subprocess
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["code"] == 201
|
||||
assert response.json()["data"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["addSshKey"]["success"] is True
|
||||
assert response.json()["data"]["users"]["addSshKey"]["code"] == 201
|
||||
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["addSshKey"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["user"]["username"] == "root"
|
||||
assert response.json()["data"]["addSshKey"]["user"]["sshKeys"] == [
|
||||
assert response.json()["data"]["users"]["addSshKey"]["user"]["username"] == "root"
|
||||
assert response.json()["data"]["users"]["addSshKey"]["user"]["sshKeys"] == [
|
||||
"ssh-ed25519 KEY test@pc",
|
||||
"ssh-rsa KEY test_key@pc",
|
||||
]
|
||||
|
@ -144,12 +146,12 @@ def test_graphql_add_main_ssh_key(authorized_client, some_users, mock_subprocess
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["code"] == 201
|
||||
assert response.json()["data"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["addSshKey"]["success"] is True
|
||||
assert response.json()["data"]["users"]["addSshKey"]["code"] == 201
|
||||
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["addSshKey"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["user"]["username"] == "tester"
|
||||
assert response.json()["data"]["addSshKey"]["user"]["sshKeys"] == [
|
||||
assert response.json()["data"]["users"]["addSshKey"]["user"]["username"] == "tester"
|
||||
assert response.json()["data"]["users"]["addSshKey"]["user"]["sshKeys"] == [
|
||||
"ssh-rsa KEY test@pc",
|
||||
"ssh-rsa KEY test_key@pc",
|
||||
]
|
||||
|
@ -171,9 +173,9 @@ def test_graphql_add_bad_ssh_key(authorized_client, some_users, mock_subprocess_
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["code"] == 400
|
||||
assert response.json()["data"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["addSshKey"]["success"] is False
|
||||
assert response.json()["data"]["users"]["addSshKey"]["code"] == 400
|
||||
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["addSshKey"]["success"] is False
|
||||
|
||||
|
||||
def test_graphql_add_ssh_key_nonexistent_user(
|
||||
|
@ -194,13 +196,14 @@ def test_graphql_add_ssh_key_nonexistent_user(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["addSshKey"]["code"] == 404
|
||||
assert response.json()["data"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["addSshKey"]["success"] is False
|
||||
assert response.json()["data"]["users"]["addSshKey"]["code"] == 404
|
||||
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["addSshKey"]["success"] is False
|
||||
|
||||
|
||||
API_REMOVE_SSH_KEY_MUTATION = """
|
||||
mutation removeSshKey($sshInput: SshMutationInput!) {
|
||||
users {
|
||||
removeSshKey(sshInput: $sshInput) {
|
||||
success
|
||||
message
|
||||
|
@ -211,6 +214,7 @@ mutation removeSshKey($sshInput: SshMutationInput!) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -247,12 +251,14 @@ def test_graphql_remove_ssh_key(authorized_client, some_users, mock_subprocess_p
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["code"] == 200
|
||||
assert response.json()["data"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["removeSshKey"]["success"] is True
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 200
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["user"]["username"] == "user1"
|
||||
assert response.json()["data"]["removeSshKey"]["user"]["sshKeys"] == []
|
||||
assert (
|
||||
response.json()["data"]["users"]["removeSshKey"]["user"]["username"] == "user1"
|
||||
)
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["user"]["sshKeys"] == []
|
||||
|
||||
|
||||
def test_graphql_remove_root_ssh_key(
|
||||
|
@ -273,12 +279,14 @@ def test_graphql_remove_root_ssh_key(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["code"] == 200
|
||||
assert response.json()["data"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["removeSshKey"]["success"] is True
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 200
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["user"]["username"] == "root"
|
||||
assert response.json()["data"]["removeSshKey"]["user"]["sshKeys"] == []
|
||||
assert (
|
||||
response.json()["data"]["users"]["removeSshKey"]["user"]["username"] == "root"
|
||||
)
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["user"]["sshKeys"] == []
|
||||
|
||||
|
||||
def test_graphql_remove_main_ssh_key(
|
||||
|
@ -299,12 +307,14 @@ def test_graphql_remove_main_ssh_key(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["code"] == 200
|
||||
assert response.json()["data"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["removeSshKey"]["success"] is True
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 200
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["user"]["username"] == "tester"
|
||||
assert response.json()["data"]["removeSshKey"]["user"]["sshKeys"] == []
|
||||
assert (
|
||||
response.json()["data"]["users"]["removeSshKey"]["user"]["username"] == "tester"
|
||||
)
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["user"]["sshKeys"] == []
|
||||
|
||||
|
||||
def test_graphql_remove_nonexistent_ssh_key(
|
||||
|
@ -325,9 +335,9 @@ def test_graphql_remove_nonexistent_ssh_key(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["code"] == 404
|
||||
assert response.json()["data"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["removeSshKey"]["success"] is False
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 404
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["success"] is False
|
||||
|
||||
|
||||
def test_graphql_remove_ssh_key_nonexistent_user(
|
||||
|
@ -348,6 +358,6 @@ def test_graphql_remove_ssh_key_nonexistent_user(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["removeSshKey"]["code"] == 404
|
||||
assert response.json()["data"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["removeSshKey"]["success"] is False
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 404
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["removeSshKey"]["success"] is False
|
||||
|
|
|
@ -382,6 +382,7 @@ def test_graphql_get_timezone_on_undefined(authorized_client, undefined_config):
|
|||
|
||||
API_CHANGE_TIMEZONE_MUTATION = """
|
||||
mutation changeTimezone($timezone: String!) {
|
||||
system {
|
||||
changeTimezone(timezone: $timezone) {
|
||||
success
|
||||
message
|
||||
|
@ -389,6 +390,7 @@ mutation changeTimezone($timezone: String!) {
|
|||
timezone
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -420,10 +422,13 @@ def test_graphql_change_timezone(authorized_client, turned_on):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeTimezone"]["success"] is True
|
||||
assert response.json()["data"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["changeTimezone"]["code"] == 200
|
||||
assert response.json()["data"]["changeTimezone"]["timezone"] == "Europe/Helsinki"
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["success"] is True
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeTimezone"]["timezone"]
|
||||
== "Europe/Helsinki"
|
||||
)
|
||||
assert read_json(turned_on / "turned_on.json")["timezone"] == "Europe/Helsinki"
|
||||
|
||||
|
||||
|
@ -440,10 +445,13 @@ def test_graphql_change_timezone_on_undefined(authorized_client, undefined_confi
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeTimezone"]["success"] is True
|
||||
assert response.json()["data"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["changeTimezone"]["code"] == 200
|
||||
assert response.json()["data"]["changeTimezone"]["timezone"] == "Europe/Helsinki"
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["success"] is True
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeTimezone"]["timezone"]
|
||||
== "Europe/Helsinki"
|
||||
)
|
||||
assert (
|
||||
read_json(undefined_config / "undefined.json")["timezone"] == "Europe/Helsinki"
|
||||
)
|
||||
|
@ -462,10 +470,10 @@ def test_graphql_change_timezone_without_timezone(authorized_client, turned_on):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeTimezone"]["success"] is False
|
||||
assert response.json()["data"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["changeTimezone"]["code"] == 400
|
||||
assert response.json()["data"]["changeTimezone"]["timezone"] is None
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["success"] is False
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 400
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["timezone"] is None
|
||||
assert read_json(turned_on / "turned_on.json")["timezone"] == "Europe/Moscow"
|
||||
|
||||
|
||||
|
@ -482,10 +490,10 @@ def test_graphql_change_timezone_with_invalid_timezone(authorized_client, turned
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeTimezone"]["success"] is False
|
||||
assert response.json()["data"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["changeTimezone"]["code"] == 400
|
||||
assert response.json()["data"]["changeTimezone"]["timezone"] is None
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["success"] is False
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 400
|
||||
assert response.json()["data"]["system"]["changeTimezone"]["timezone"] is None
|
||||
assert read_json(turned_on / "turned_on.json")["timezone"] == "Europe/Moscow"
|
||||
|
||||
|
||||
|
@ -589,6 +597,7 @@ def test_graphql_get_auto_upgrade_turned_off(authorized_client, turned_off):
|
|||
|
||||
API_CHANGE_AUTO_UPGRADE_SETTINGS = """
|
||||
mutation changeServerSettings($settings: AutoUpgradeSettingsInput!) {
|
||||
system {
|
||||
changeAutoUpgradeSettings(settings: $settings) {
|
||||
success
|
||||
message
|
||||
|
@ -597,6 +606,7 @@ mutation changeServerSettings($settings: AutoUpgradeSettingsInput!) {
|
|||
allowReboot
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -634,14 +644,25 @@ def test_graphql_change_auto_upgrade(authorized_client, turned_on):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
|
||||
"enableAutoUpgrade"
|
||||
]
|
||||
is False
|
||||
)
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
|
||||
is True
|
||||
)
|
||||
assert read_json(turned_on / "turned_on.json")["autoUpgrade"]["enable"] is False
|
||||
assert read_json(turned_on / "turned_on.json")["autoUpgrade"]["allowReboot"] is True
|
||||
|
||||
|
@ -662,14 +683,25 @@ def test_graphql_change_auto_upgrade_on_undefined(authorized_client, undefined_c
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
|
||||
"enableAutoUpgrade"
|
||||
]
|
||||
is False
|
||||
)
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
read_json(undefined_config / "undefined.json")["autoUpgrade"]["enable"] is False
|
||||
)
|
||||
|
@ -695,14 +727,25 @@ def test_graphql_change_auto_upgrade_without_vlaues(authorized_client, no_values
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
|
||||
"enableAutoUpgrade"
|
||||
]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
|
||||
is True
|
||||
)
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
|
||||
assert read_json(no_values / "no_values.json")["autoUpgrade"]["enable"] is True
|
||||
assert read_json(no_values / "no_values.json")["autoUpgrade"]["allowReboot"] is True
|
||||
|
||||
|
@ -723,14 +766,25 @@ def test_graphql_change_auto_upgrade_turned_off(authorized_client, turned_off):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
|
||||
"enableAutoUpgrade"
|
||||
]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
|
||||
is True
|
||||
)
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
|
||||
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is True
|
||||
assert (
|
||||
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is True
|
||||
|
@ -752,14 +806,25 @@ def test_grphql_change_auto_upgrade_without_enable(authorized_client, turned_off
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
|
||||
"enableAutoUpgrade"
|
||||
]
|
||||
is False
|
||||
)
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
|
||||
is True
|
||||
)
|
||||
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is False
|
||||
assert (
|
||||
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is True
|
||||
|
@ -783,14 +848,25 @@ def test_graphql_change_auto_upgrade_without_allow_reboot(
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
|
||||
is True
|
||||
)
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is False
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
|
||||
"enableAutoUpgrade"
|
||||
]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
|
||||
is False
|
||||
)
|
||||
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is True
|
||||
assert (
|
||||
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is False
|
||||
|
@ -810,14 +886,25 @@ def test_graphql_change_auto_upgrade_with_empty_input(authorized_client, turned_
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
|
||||
is True
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
|
||||
"enableAutoUpgrade"
|
||||
]
|
||||
is False
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
|
||||
is False
|
||||
)
|
||||
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is False
|
||||
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is False
|
||||
assert (
|
||||
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is False
|
||||
|
@ -826,12 +913,14 @@ def test_graphql_change_auto_upgrade_with_empty_input(authorized_client, turned_
|
|||
|
||||
API_PULL_SYSTEM_CONFIGURATION_MUTATION = """
|
||||
mutation testPullSystemConfiguration {
|
||||
system {
|
||||
pullRepositoryChanges {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -861,9 +950,12 @@ def test_graphql_pull_system_configuration(
|
|||
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["pullRepositoryChanges"]["success"] is True
|
||||
assert response.json()["data"]["pullRepositoryChanges"]["message"] is not None
|
||||
assert response.json()["data"]["pullRepositoryChanges"]["code"] == 200
|
||||
assert response.json()["data"]["system"]["pullRepositoryChanges"]["success"] is True
|
||||
assert (
|
||||
response.json()["data"]["system"]["pullRepositoryChanges"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["pullRepositoryChanges"]["code"] == 200
|
||||
|
||||
assert mock_subprocess_popen.call_count == 1
|
||||
assert mock_subprocess_popen.call_args[0][0] == ["git", "pull"]
|
||||
|
@ -886,9 +978,14 @@ def test_graphql_pull_system_broken_repo(
|
|||
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["pullRepositoryChanges"]["success"] is False
|
||||
assert response.json()["data"]["pullRepositoryChanges"]["message"] is not None
|
||||
assert response.json()["data"]["pullRepositoryChanges"]["code"] == 500
|
||||
assert (
|
||||
response.json()["data"]["system"]["pullRepositoryChanges"]["success"] is False
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["system"]["pullRepositoryChanges"]["message"]
|
||||
is not None
|
||||
)
|
||||
assert response.json()["data"]["system"]["pullRepositoryChanges"]["code"] == 500
|
||||
|
||||
assert mock_broken_service.call_count == 1
|
||||
assert mock_os_chdir.call_count == 2
|
||||
|
|
|
@ -54,12 +54,14 @@ def mock_subprocess_check_output(mocker):
|
|||
|
||||
API_REBUILD_SYSTEM_MUTATION = """
|
||||
mutation rebuildSystem {
|
||||
system {
|
||||
runSystemRebuild {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -86,9 +88,9 @@ def test_graphql_system_rebuild(authorized_client, mock_subprocess_popen):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["runSystemRebuild"]["success"] is True
|
||||
assert response.json()["data"]["runSystemRebuild"]["message"] is not None
|
||||
assert response.json()["data"]["runSystemRebuild"]["code"] == 200
|
||||
assert response.json()["data"]["system"]["runSystemRebuild"]["success"] is True
|
||||
assert response.json()["data"]["system"]["runSystemRebuild"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["runSystemRebuild"]["code"] == 200
|
||||
assert mock_subprocess_popen.call_count == 1
|
||||
assert mock_subprocess_popen.call_args[0][0] == [
|
||||
"systemctl",
|
||||
|
@ -99,12 +101,14 @@ def test_graphql_system_rebuild(authorized_client, mock_subprocess_popen):
|
|||
|
||||
API_UPGRADE_SYSTEM_MUTATION = """
|
||||
mutation upgradeSystem {
|
||||
system {
|
||||
runSystemUpgrade {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -131,9 +135,9 @@ def test_graphql_system_upgrade(authorized_client, mock_subprocess_popen):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["runSystemUpgrade"]["success"] is True
|
||||
assert response.json()["data"]["runSystemUpgrade"]["message"] is not None
|
||||
assert response.json()["data"]["runSystemUpgrade"]["code"] == 200
|
||||
assert response.json()["data"]["system"]["runSystemUpgrade"]["success"] is True
|
||||
assert response.json()["data"]["system"]["runSystemUpgrade"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["runSystemUpgrade"]["code"] == 200
|
||||
assert mock_subprocess_popen.call_count == 1
|
||||
assert mock_subprocess_popen.call_args[0][0] == [
|
||||
"systemctl",
|
||||
|
@ -144,12 +148,14 @@ def test_graphql_system_upgrade(authorized_client, mock_subprocess_popen):
|
|||
|
||||
API_ROLLBACK_SYSTEM_MUTATION = """
|
||||
mutation rollbackSystem {
|
||||
system {
|
||||
runSystemRollback {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -176,9 +182,9 @@ def test_graphql_system_rollback(authorized_client, mock_subprocess_popen):
|
|||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
assert response.json()["data"]["runSystemRollback"]["success"] is True
|
||||
assert response.json()["data"]["runSystemRollback"]["message"] is not None
|
||||
assert response.json()["data"]["runSystemRollback"]["code"] == 200
|
||||
assert response.json()["data"]["system"]["runSystemRollback"]["success"] is True
|
||||
assert response.json()["data"]["system"]["runSystemRollback"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["runSystemRollback"]["code"] == 200
|
||||
assert mock_subprocess_popen.call_count == 1
|
||||
assert mock_subprocess_popen.call_args[0][0] == [
|
||||
"systemctl",
|
||||
|
@ -189,12 +195,14 @@ def test_graphql_system_rollback(authorized_client, mock_subprocess_popen):
|
|||
|
||||
API_REBOOT_SYSTEM_MUTATION = """
|
||||
mutation system {
|
||||
system {
|
||||
rebootSystem {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -223,9 +231,9 @@ def test_graphql_reboot_system(authorized_client, mock_subprocess_popen):
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["rebootSystem"]["success"] is True
|
||||
assert response.json()["data"]["rebootSystem"]["message"] is not None
|
||||
assert response.json()["data"]["rebootSystem"]["code"] == 200
|
||||
assert response.json()["data"]["system"]["rebootSystem"]["success"] is True
|
||||
assert response.json()["data"]["system"]["rebootSystem"]["message"] is not None
|
||||
assert response.json()["data"]["system"]["rebootSystem"]["code"] == 200
|
||||
|
||||
assert mock_subprocess_popen.call_count == 1
|
||||
assert mock_subprocess_popen.call_args[0][0] == ["reboot"]
|
||||
|
|
|
@ -295,6 +295,7 @@ def test_graphql_get_nonexistent_user(
|
|||
|
||||
API_CREATE_USERS_MUTATION = """
|
||||
mutation createUser($user: UserMutationInput!) {
|
||||
users {
|
||||
createUser(user: $user) {
|
||||
success
|
||||
message
|
||||
|
@ -305,6 +306,7 @@ mutation createUser($user: UserMutationInput!) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -341,12 +343,12 @@ def test_graphql_add_user(authorized_client, one_user, mock_subprocess_popen):
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 201
|
||||
assert response.json()["data"]["createUser"]["success"] is True
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 201
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"]["username"] == "user2"
|
||||
assert response.json()["data"]["createUser"]["user"]["sshKeys"] == []
|
||||
assert response.json()["data"]["users"]["createUser"]["user"]["username"] == "user2"
|
||||
assert response.json()["data"]["users"]["createUser"]["user"]["sshKeys"] == []
|
||||
|
||||
|
||||
def test_graphql_add_undefined_settings(
|
||||
|
@ -367,12 +369,12 @@ def test_graphql_add_undefined_settings(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 201
|
||||
assert response.json()["data"]["createUser"]["success"] is True
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 201
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"]["username"] == "user2"
|
||||
assert response.json()["data"]["createUser"]["user"]["sshKeys"] == []
|
||||
assert response.json()["data"]["users"]["createUser"]["user"]["username"] == "user2"
|
||||
assert response.json()["data"]["users"]["createUser"]["user"]["sshKeys"] == []
|
||||
|
||||
|
||||
def test_graphql_add_without_password(
|
||||
|
@ -393,11 +395,11 @@ def test_graphql_add_without_password(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["createUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"] is None
|
||||
assert response.json()["data"]["users"]["createUser"]["user"] is None
|
||||
|
||||
|
||||
def test_graphql_add_without_both(authorized_client, one_user, mock_subprocess_popen):
|
||||
|
@ -416,11 +418,11 @@ def test_graphql_add_without_both(authorized_client, one_user, mock_subprocess_p
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["createUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"] is None
|
||||
assert response.json()["data"]["users"]["createUser"]["user"] is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("username", invalid_usernames)
|
||||
|
@ -442,11 +444,11 @@ def test_graphql_add_system_username(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 409
|
||||
assert response.json()["data"]["createUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 409
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"] is None
|
||||
assert response.json()["data"]["users"]["createUser"]["user"] is None
|
||||
|
||||
|
||||
def test_graphql_add_existing_user(authorized_client, one_user, mock_subprocess_popen):
|
||||
|
@ -465,13 +467,13 @@ def test_graphql_add_existing_user(authorized_client, one_user, mock_subprocess_
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 409
|
||||
assert response.json()["data"]["createUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 409
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"]["username"] == "user1"
|
||||
assert response.json()["data"]["users"]["createUser"]["user"]["username"] == "user1"
|
||||
assert (
|
||||
response.json()["data"]["createUser"]["user"]["sshKeys"][0]
|
||||
response.json()["data"]["users"]["createUser"]["user"]["sshKeys"][0]
|
||||
== "ssh-rsa KEY user1@pc"
|
||||
)
|
||||
|
||||
|
@ -492,13 +494,15 @@ def test_graphql_add_main_user(authorized_client, one_user, mock_subprocess_pope
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 409
|
||||
assert response.json()["data"]["createUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 409
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"]["username"] == "tester"
|
||||
assert (
|
||||
response.json()["data"]["createUser"]["user"]["sshKeys"][0]
|
||||
response.json()["data"]["users"]["createUser"]["user"]["username"] == "tester"
|
||||
)
|
||||
assert (
|
||||
response.json()["data"]["users"]["createUser"]["user"]["sshKeys"][0]
|
||||
== "ssh-rsa KEY test@pc"
|
||||
)
|
||||
|
||||
|
@ -518,11 +522,11 @@ def test_graphql_add_long_username(authorized_client, one_user, mock_subprocess_
|
|||
)
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["createUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"] is None
|
||||
assert response.json()["data"]["users"]["createUser"]["user"] is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("username", ["", "1", "фыр", "user1@", "^-^"])
|
||||
|
@ -544,21 +548,23 @@ def test_graphql_add_invalid_username(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["createUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["createUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["createUser"]["code"] == 400
|
||||
assert response.json()["data"]["users"]["createUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["createUser"]["user"] is None
|
||||
assert response.json()["data"]["users"]["createUser"]["user"] is None
|
||||
|
||||
|
||||
API_DELETE_USER_MUTATION = """
|
||||
mutation deleteUser($username: String!) {
|
||||
users {
|
||||
deleteUser(username: $username) {
|
||||
success
|
||||
message
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -585,9 +591,9 @@ def test_graphql_delete_user(authorized_client, some_users, mock_subprocess_pope
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["deleteUser"]["code"] == 200
|
||||
assert response.json()["data"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["deleteUser"]["success"] is True
|
||||
assert response.json()["data"]["users"]["deleteUser"]["code"] == 200
|
||||
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["deleteUser"]["success"] is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize("username", ["", "def"])
|
||||
|
@ -604,9 +610,9 @@ def test_graphql_delete_nonexistent_users(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["deleteUser"]["code"] == 404
|
||||
assert response.json()["data"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["deleteUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["deleteUser"]["code"] == 404
|
||||
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["deleteUser"]["success"] is False
|
||||
|
||||
|
||||
@pytest.mark.parametrize("username", invalid_usernames)
|
||||
|
@ -624,11 +630,11 @@ def test_graphql_delete_system_users(
|
|||
assert response.json().get("data") is not None
|
||||
|
||||
assert (
|
||||
response.json()["data"]["deleteUser"]["code"] == 404
|
||||
or response.json()["data"]["deleteUser"]["code"] == 400
|
||||
response.json()["data"]["users"]["deleteUser"]["code"] == 404
|
||||
or response.json()["data"]["users"]["deleteUser"]["code"] == 400
|
||||
)
|
||||
assert response.json()["data"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["deleteUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["deleteUser"]["success"] is False
|
||||
|
||||
|
||||
def test_graphql_delete_main_user(authorized_client, some_users, mock_subprocess_popen):
|
||||
|
@ -642,13 +648,14 @@ def test_graphql_delete_main_user(authorized_client, some_users, mock_subprocess
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["deleteUser"]["code"] == 400
|
||||
assert response.json()["data"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["deleteUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["deleteUser"]["code"] == 400
|
||||
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["deleteUser"]["success"] is False
|
||||
|
||||
|
||||
API_UPDATE_USER_MUTATION = """
|
||||
mutation updateUser($user: UserMutationInput!) {
|
||||
users {
|
||||
updateUser(user: $user) {
|
||||
success
|
||||
message
|
||||
|
@ -659,6 +666,7 @@ mutation updateUser($user: UserMutationInput!) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
@ -695,12 +703,12 @@ def test_graphql_update_user(authorized_client, some_users, mock_subprocess_pope
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["updateUser"]["code"] == 200
|
||||
assert response.json()["data"]["updateUser"]["message"] is not None
|
||||
assert response.json()["data"]["updateUser"]["success"] is True
|
||||
assert response.json()["data"]["users"]["updateUser"]["code"] == 200
|
||||
assert response.json()["data"]["users"]["updateUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["updateUser"]["success"] is True
|
||||
|
||||
assert response.json()["data"]["updateUser"]["user"]["username"] == "user1"
|
||||
assert response.json()["data"]["updateUser"]["user"]["sshKeys"] == [
|
||||
assert response.json()["data"]["users"]["updateUser"]["user"]["username"] == "user1"
|
||||
assert response.json()["data"]["users"]["updateUser"]["user"]["sshKeys"] == [
|
||||
"ssh-rsa KEY user1@pc"
|
||||
]
|
||||
assert mock_subprocess_popen.call_count == 1
|
||||
|
@ -724,9 +732,9 @@ def test_graphql_update_nonexistent_user(
|
|||
assert response.status_code == 200
|
||||
assert response.json().get("data") is not None
|
||||
|
||||
assert response.json()["data"]["updateUser"]["code"] == 404
|
||||
assert response.json()["data"]["updateUser"]["message"] is not None
|
||||
assert response.json()["data"]["updateUser"]["success"] is False
|
||||
assert response.json()["data"]["users"]["updateUser"]["code"] == 404
|
||||
assert response.json()["data"]["users"]["updateUser"]["message"] is not None
|
||||
assert response.json()["data"]["users"]["updateUser"]["success"] is False
|
||||
|
||||
assert response.json()["data"]["updateUser"]["user"] is None
|
||||
assert response.json()["data"]["users"]["updateUser"]["user"] is None
|
||||
assert mock_subprocess_popen.call_count == 1
|
||||
|
|
|
@ -80,6 +80,29 @@ def test_jobs(jobs_with_one_job):
|
|||
jobsmodule.JOB_EXPIRATION_SECONDS = backup
|
||||
|
||||
|
||||
def test_finishing_equals_100(jobs_with_one_job):
|
||||
jobs = jobs_with_one_job
|
||||
test_job = jobs.get_jobs()[0]
|
||||
assert not jobs.is_busy()
|
||||
assert test_job.progress != 100
|
||||
|
||||
jobs.update(job=test_job, status=JobStatus.FINISHED)
|
||||
|
||||
assert test_job.progress == 100
|
||||
|
||||
|
||||
def test_finishing_equals_100_unless_stated_otherwise(jobs_with_one_job):
|
||||
jobs = jobs_with_one_job
|
||||
test_job = jobs.get_jobs()[0]
|
||||
assert not jobs.is_busy()
|
||||
assert test_job.progress != 100
|
||||
assert test_job.progress != 23
|
||||
|
||||
jobs.update(job=test_job, status=JobStatus.FINISHED, progress=23)
|
||||
|
||||
assert test_job.progress == 23
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def jobs():
|
||||
j = Jobs()
|
||||
|
|
33
tests/test_model_storage.py
Normal file
33
tests/test_model_storage.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
import pytest
|
||||
|
||||
from pydantic import BaseModel
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from selfprivacy_api.utils.redis_model_storage import store_model_as_hash, hash_as_model
|
||||
from selfprivacy_api.utils.redis_pool import RedisPool
|
||||
|
||||
TEST_KEY = "model_storage"
|
||||
redis = RedisPool().get_connection()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def clean_redis():
|
||||
redis.delete(TEST_KEY)
|
||||
|
||||
|
||||
class DummyModel(BaseModel):
|
||||
name: str
|
||||
date: Optional[datetime]
|
||||
|
||||
|
||||
def test_store_retrieve():
|
||||
model = DummyModel(name="test", date=datetime.now())
|
||||
store_model_as_hash(redis, TEST_KEY, model)
|
||||
assert hash_as_model(redis, TEST_KEY, DummyModel) == model
|
||||
|
||||
|
||||
def test_store_retrieve_none():
|
||||
model = DummyModel(name="test", date=None)
|
||||
store_model_as_hash(redis, TEST_KEY, model)
|
||||
assert hash_as_model(redis, TEST_KEY, DummyModel) == model
|
|
@ -1,506 +0,0 @@
|
|||
# pylint: disable=redefined-outer-name
|
||||
# pylint: disable=unused-argument
|
||||
import json
|
||||
import pytest
|
||||
from selfprivacy_api.restic_controller import ResticStates
|
||||
|
||||
|
||||
def read_json(file_path):
|
||||
with open(file_path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
MOCKED_SNAPSHOTS = [
|
||||
{
|
||||
"time": "2021-12-06T09:05:04.224685677+03:00",
|
||||
"tree": "b76152d1e716d86d420407ead05d9911f2b6d971fe1589c12b63e4de65b14d4e",
|
||||
"paths": ["/var"],
|
||||
"hostname": "test-host",
|
||||
"username": "root",
|
||||
"id": "f96b428f1ca1252089ea3e25cd8ee33e63fb24615f1cc07559ba907d990d81c5",
|
||||
"short_id": "f96b428f",
|
||||
},
|
||||
{
|
||||
"time": "2021-12-08T07:42:06.998894055+03:00",
|
||||
"parent": "f96b428f1ca1252089ea3e25cd8ee33e63fb24615f1cc07559ba907d990d81c5",
|
||||
"tree": "8379b4fdc9ee3e9bb7c322f632a7bed9fc334b0258abbf4e7134f8fe5b3d61b0",
|
||||
"paths": ["/var"],
|
||||
"hostname": "test-host",
|
||||
"username": "root",
|
||||
"id": "db96b36efec97e5ba385099b43f9062d214c7312c20138aee7b8bd2c6cd8995a",
|
||||
"short_id": "db96b36e",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class ResticControllerMock:
|
||||
snapshot_list = MOCKED_SNAPSHOTS
|
||||
state = ResticStates.INITIALIZED
|
||||
progress = 0
|
||||
error_message = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_controller(mocker):
|
||||
mock = mocker.patch(
|
||||
"selfprivacy_api.rest.services.ResticController",
|
||||
autospec=True,
|
||||
return_value=ResticControllerMock,
|
||||
)
|
||||
return mock
|
||||
|
||||
|
||||
class ResticControllerMockNoKey:
|
||||
snapshot_list = []
|
||||
state = ResticStates.NO_KEY
|
||||
progress = 0
|
||||
error_message = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_controller_no_key(mocker):
|
||||
mock = mocker.patch(
|
||||
"selfprivacy_api.rest.services.ResticController",
|
||||
autospec=True,
|
||||
return_value=ResticControllerMockNoKey,
|
||||
)
|
||||
return mock
|
||||
|
||||
|
||||
class ResticControllerNotInitialized:
|
||||
snapshot_list = []
|
||||
state = ResticStates.NOT_INITIALIZED
|
||||
progress = 0
|
||||
error_message = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_controller_not_initialized(mocker):
|
||||
mock = mocker.patch(
|
||||
"selfprivacy_api.rest.services.ResticController",
|
||||
autospec=True,
|
||||
return_value=ResticControllerNotInitialized,
|
||||
)
|
||||
return mock
|
||||
|
||||
|
||||
class ResticControllerInitializing:
|
||||
snapshot_list = []
|
||||
state = ResticStates.INITIALIZING
|
||||
progress = 0
|
||||
error_message = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_controller_initializing(mocker):
|
||||
mock = mocker.patch(
|
||||
"selfprivacy_api.rest.services.ResticController",
|
||||
autospec=True,
|
||||
return_value=ResticControllerInitializing,
|
||||
)
|
||||
return mock
|
||||
|
||||
|
||||
class ResticControllerBackingUp:
|
||||
snapshot_list = MOCKED_SNAPSHOTS
|
||||
state = ResticStates.BACKING_UP
|
||||
progress = 0.42
|
||||
error_message = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_controller_backing_up(mocker):
|
||||
mock = mocker.patch(
|
||||
"selfprivacy_api.rest.services.ResticController",
|
||||
autospec=True,
|
||||
return_value=ResticControllerBackingUp,
|
||||
)
|
||||
return mock
|
||||
|
||||
|
||||
class ResticControllerError:
|
||||
snapshot_list = MOCKED_SNAPSHOTS
|
||||
state = ResticStates.ERROR
|
||||
progress = 0
|
||||
error_message = "Error message"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_controller_error(mocker):
|
||||
mock = mocker.patch(
|
||||
"selfprivacy_api.rest.services.ResticController",
|
||||
autospec=True,
|
||||
return_value=ResticControllerError,
|
||||
)
|
||||
return mock
|
||||
|
||||
|
||||
class ResticControllerRestoring:
|
||||
snapshot_list = MOCKED_SNAPSHOTS
|
||||
state = ResticStates.RESTORING
|
||||
progress = 0
|
||||
error_message = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_controller_restoring(mocker):
|
||||
mock = mocker.patch(
|
||||
"selfprivacy_api.rest.services.ResticController",
|
||||
autospec=True,
|
||||
return_value=ResticControllerRestoring,
|
||||
)
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_restic_tasks(mocker):
|
||||
mock = mocker.patch("selfprivacy_api.rest.services.restic_tasks", autospec=True)
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def undefined_settings(mocker, datadir):
|
||||
mocker.patch("selfprivacy_api.utils.USERDATA_FILE", new=datadir / "undefined.json")
|
||||
assert "backup" not in read_json(datadir / "undefined.json")
|
||||
return datadir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def some_settings(mocker, datadir):
|
||||
mocker.patch(
|
||||
"selfprivacy_api.utils.USERDATA_FILE", new=datadir / "some_values.json"
|
||||
)
|
||||
assert "backup" in read_json(datadir / "some_values.json")
|
||||
assert read_json(datadir / "some_values.json")["backup"]["provider"] == "BACKBLAZE"
|
||||
assert read_json(datadir / "some_values.json")["backup"]["accountId"] == "ID"
|
||||
assert read_json(datadir / "some_values.json")["backup"]["accountKey"] == "KEY"
|
||||
assert read_json(datadir / "some_values.json")["backup"]["bucket"] == "BUCKET"
|
||||
return datadir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def no_values(mocker, datadir):
|
||||
mocker.patch("selfprivacy_api.utils.USERDATA_FILE", new=datadir / "no_values.json")
|
||||
assert "backup" in read_json(datadir / "no_values.json")
|
||||
assert "provider" not in read_json(datadir / "no_values.json")["backup"]
|
||||
assert "accountId" not in read_json(datadir / "no_values.json")["backup"]
|
||||
assert "accountKey" not in read_json(datadir / "no_values.json")["backup"]
|
||||
assert "bucket" not in read_json(datadir / "no_values.json")["backup"]
|
||||
return datadir
|
||||
|
||||
|
||||
def test_get_snapshots_unauthorized(client, mock_restic_controller, mock_restic_tasks):
|
||||
response = client.get("/services/restic/backup/list")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
def test_get_snapshots(authorized_client, mock_restic_controller, mock_restic_tasks):
|
||||
response = authorized_client.get("/services/restic/backup/list")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == MOCKED_SNAPSHOTS
|
||||
|
||||
|
||||
def test_create_backup_unauthorized(client, mock_restic_controller, mock_restic_tasks):
|
||||
response = client.put("/services/restic/backup/create")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
def test_create_backup(authorized_client, mock_restic_controller, mock_restic_tasks):
|
||||
response = authorized_client.put("/services/restic/backup/create")
|
||||
assert response.status_code == 200
|
||||
assert mock_restic_tasks.start_backup.call_count == 1
|
||||
|
||||
|
||||
def test_create_backup_without_key(
|
||||
authorized_client, mock_restic_controller_no_key, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put("/services/restic/backup/create")
|
||||
assert response.status_code == 400
|
||||
assert mock_restic_tasks.start_backup.call_count == 0
|
||||
|
||||
|
||||
def test_create_backup_initializing(
|
||||
authorized_client, mock_restic_controller_initializing, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put("/services/restic/backup/create")
|
||||
assert response.status_code == 400
|
||||
assert mock_restic_tasks.start_backup.call_count == 0
|
||||
|
||||
|
||||
def test_create_backup_backing_up(
|
||||
authorized_client, mock_restic_controller_backing_up, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put("/services/restic/backup/create")
|
||||
assert response.status_code == 409
|
||||
assert mock_restic_tasks.start_backup.call_count == 0
|
||||
|
||||
|
||||
def test_check_backup_status_unauthorized(
|
||||
client, mock_restic_controller, mock_restic_tasks
|
||||
):
|
||||
response = client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
def test_check_backup_status(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"status": "INITIALIZED",
|
||||
"progress": 0,
|
||||
"error_message": None,
|
||||
}
|
||||
|
||||
|
||||
def test_check_backup_status_no_key(
|
||||
authorized_client, mock_restic_controller_no_key, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"status": "NO_KEY",
|
||||
"progress": 0,
|
||||
"error_message": None,
|
||||
}
|
||||
|
||||
|
||||
def test_check_backup_status_not_initialized(
|
||||
authorized_client, mock_restic_controller_not_initialized, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"status": "NOT_INITIALIZED",
|
||||
"progress": 0,
|
||||
"error_message": None,
|
||||
}
|
||||
|
||||
|
||||
def test_check_backup_status_initializing(
|
||||
authorized_client, mock_restic_controller_initializing, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"status": "INITIALIZING",
|
||||
"progress": 0,
|
||||
"error_message": None,
|
||||
}
|
||||
|
||||
|
||||
def test_check_backup_status_backing_up(
|
||||
authorized_client, mock_restic_controller_backing_up
|
||||
):
|
||||
response = authorized_client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"status": "BACKING_UP",
|
||||
"progress": 0.42,
|
||||
"error_message": None,
|
||||
}
|
||||
|
||||
|
||||
def test_check_backup_status_error(
|
||||
authorized_client, mock_restic_controller_error, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"status": "ERROR",
|
||||
"progress": 0,
|
||||
"error_message": "Error message",
|
||||
}
|
||||
|
||||
|
||||
def test_check_backup_status_restoring(
|
||||
authorized_client, mock_restic_controller_restoring, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.get("/services/restic/backup/status")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {
|
||||
"status": "RESTORING",
|
||||
"progress": 0,
|
||||
"error_message": None,
|
||||
}
|
||||
|
||||
|
||||
def test_reload_unauthenticated(client, mock_restic_controller, mock_restic_tasks):
|
||||
response = client.get("/services/restic/backup/reload")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
def test_backup_reload(authorized_client, mock_restic_controller, mock_restic_tasks):
|
||||
response = authorized_client.get("/services/restic/backup/reload")
|
||||
assert response.status_code == 200
|
||||
assert mock_restic_tasks.load_snapshots.call_count == 1
|
||||
|
||||
|
||||
def test_backup_restore_unauthorized(client, mock_restic_controller, mock_restic_tasks):
|
||||
response = client.put("/services/restic/backup/restore")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
def test_backup_restore_without_backup_id(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put("/services/restic/backup/restore", json={})
|
||||
assert response.status_code == 422
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 0
|
||||
|
||||
|
||||
def test_backup_restore_with_nonexistent_backup_id(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "nonexistent"}
|
||||
)
|
||||
assert response.status_code == 404
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 0
|
||||
|
||||
|
||||
def test_backup_restore_when_no_key(
|
||||
authorized_client, mock_restic_controller_no_key, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 0
|
||||
|
||||
|
||||
def test_backup_restore_when_not_initialized(
|
||||
authorized_client, mock_restic_controller_not_initialized, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 0
|
||||
|
||||
|
||||
def test_backup_restore_when_initializing(
|
||||
authorized_client, mock_restic_controller_initializing, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 0
|
||||
|
||||
|
||||
def test_backup_restore_when_backing_up(
|
||||
authorized_client, mock_restic_controller_backing_up, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
|
||||
)
|
||||
assert response.status_code == 409
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 0
|
||||
|
||||
|
||||
def test_backup_restore_when_restoring(
|
||||
authorized_client, mock_restic_controller_restoring, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
|
||||
)
|
||||
assert response.status_code == 409
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 0
|
||||
|
||||
|
||||
def test_backup_restore_when_error(
|
||||
authorized_client, mock_restic_controller_error, mock_restic_tasks
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 1
|
||||
|
||||
|
||||
def test_backup_restore(authorized_client, mock_restic_controller, mock_restic_tasks):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert mock_restic_tasks.restore_from_backup.call_count == 1
|
||||
|
||||
|
||||
def test_set_backblaze_config_unauthorized(
|
||||
client, mock_restic_controller, mock_restic_tasks, some_settings
|
||||
):
|
||||
response = client.put("/services/restic/backblaze/config")
|
||||
assert response.status_code == 401
|
||||
assert mock_restic_tasks.update_keys_from_userdata.call_count == 0
|
||||
|
||||
|
||||
def test_set_backblaze_config_without_arguments(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks, some_settings
|
||||
):
|
||||
response = authorized_client.put("/services/restic/backblaze/config")
|
||||
assert response.status_code == 422
|
||||
assert mock_restic_tasks.update_keys_from_userdata.call_count == 0
|
||||
|
||||
|
||||
def test_set_backblaze_config_without_all_values(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks, some_settings
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backblaze/config",
|
||||
json={"accountId": "123", "applicationKey": "456"},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
assert mock_restic_tasks.update_keys_from_userdata.call_count == 0
|
||||
|
||||
|
||||
def test_set_backblaze_config(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks, some_settings
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backblaze/config",
|
||||
json={"accountId": "123", "accountKey": "456", "bucket": "789"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert mock_restic_tasks.update_keys_from_userdata.call_count == 1
|
||||
assert read_json(some_settings / "some_values.json")["backup"] == {
|
||||
"provider": "BACKBLAZE",
|
||||
"accountId": "123",
|
||||
"accountKey": "456",
|
||||
"bucket": "789",
|
||||
}
|
||||
|
||||
|
||||
def test_set_backblaze_config_on_undefined(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks, undefined_settings
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backblaze/config",
|
||||
json={"accountId": "123", "accountKey": "456", "bucket": "789"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert mock_restic_tasks.update_keys_from_userdata.call_count == 1
|
||||
assert read_json(undefined_settings / "undefined.json")["backup"] == {
|
||||
"provider": "BACKBLAZE",
|
||||
"accountId": "123",
|
||||
"accountKey": "456",
|
||||
"bucket": "789",
|
||||
}
|
||||
|
||||
|
||||
def test_set_backblaze_config_on_no_values(
|
||||
authorized_client, mock_restic_controller, mock_restic_tasks, no_values
|
||||
):
|
||||
response = authorized_client.put(
|
||||
"/services/restic/backblaze/config",
|
||||
json={"accountId": "123", "accountKey": "456", "bucket": "789"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert mock_restic_tasks.update_keys_from_userdata.call_count == 1
|
||||
assert read_json(no_values / "no_values.json")["backup"] == {
|
||||
"provider": "BACKBLAZE",
|
||||
"accountId": "123",
|
||||
"accountKey": "456",
|
||||
"bucket": "789",
|
||||
}
|
89
tests/test_services.py
Normal file
89
tests/test_services.py
Normal file
|
@ -0,0 +1,89 @@
|
|||
"""
|
||||
Tests for generic service methods
|
||||
"""
|
||||
from pytest import raises
|
||||
|
||||
from selfprivacy_api.services.bitwarden import Bitwarden
|
||||
from selfprivacy_api.services.pleroma import Pleroma
|
||||
from selfprivacy_api.services.owned_path import OwnedPath
|
||||
from selfprivacy_api.services.generic_service_mover import FolderMoveNames
|
||||
|
||||
from selfprivacy_api.services.test_service import DummyService
|
||||
from selfprivacy_api.services.service import Service, ServiceStatus, StoppedService
|
||||
from selfprivacy_api.utils.waitloop import wait_until_true
|
||||
|
||||
from tests.test_graphql.test_backup import raw_dummy_service
|
||||
|
||||
|
||||
def test_unimplemented_folders_raises():
|
||||
with raises(NotImplementedError):
|
||||
Service.get_folders()
|
||||
with raises(NotImplementedError):
|
||||
Service.get_owned_folders()
|
||||
|
||||
class OurDummy(DummyService, folders=["testydir", "dirtessimo"]):
|
||||
pass
|
||||
|
||||
owned_folders = OurDummy.get_owned_folders()
|
||||
assert owned_folders is not None
|
||||
|
||||
|
||||
def test_service_stopper(raw_dummy_service):
|
||||
dummy: Service = raw_dummy_service
|
||||
dummy.set_delay(0.3)
|
||||
|
||||
assert dummy.get_status() == ServiceStatus.ACTIVE
|
||||
|
||||
with StoppedService(dummy) as stopped_dummy:
|
||||
assert stopped_dummy.get_status() == ServiceStatus.INACTIVE
|
||||
assert dummy.get_status() == ServiceStatus.INACTIVE
|
||||
|
||||
assert dummy.get_status() == ServiceStatus.ACTIVE
|
||||
|
||||
|
||||
def test_delayed_start_stop(raw_dummy_service):
|
||||
dummy = raw_dummy_service
|
||||
dummy.set_delay(0.3)
|
||||
|
||||
dummy.stop()
|
||||
assert dummy.get_status() == ServiceStatus.DEACTIVATING
|
||||
wait_until_true(lambda: dummy.get_status() == ServiceStatus.INACTIVE)
|
||||
assert dummy.get_status() == ServiceStatus.INACTIVE
|
||||
|
||||
dummy.start()
|
||||
assert dummy.get_status() == ServiceStatus.ACTIVATING
|
||||
wait_until_true(lambda: dummy.get_status() == ServiceStatus.ACTIVE)
|
||||
assert dummy.get_status() == ServiceStatus.ACTIVE
|
||||
|
||||
|
||||
def test_owned_folders_from_not_owned():
|
||||
assert Bitwarden.get_owned_folders() == [
|
||||
OwnedPath(
|
||||
path=folder,
|
||||
group="vaultwarden",
|
||||
owner="vaultwarden",
|
||||
)
|
||||
for folder in Bitwarden.get_folders()
|
||||
]
|
||||
|
||||
|
||||
def test_paths_from_owned_paths():
|
||||
assert len(Pleroma.get_folders()) == 2
|
||||
assert Pleroma.get_folders() == [
|
||||
ownedpath.path for ownedpath in Pleroma.get_owned_folders()
|
||||
]
|
||||
|
||||
|
||||
def test_foldermoves_from_ownedpaths():
|
||||
owned = OwnedPath(
|
||||
path="var/lib/bitwarden",
|
||||
group="vaultwarden",
|
||||
owner="vaultwarden",
|
||||
)
|
||||
|
||||
assert FolderMoveNames.from_owned_path(owned) == FolderMoveNames(
|
||||
name="bitwarden",
|
||||
bind_location="var/lib/bitwarden",
|
||||
group="vaultwarden",
|
||||
owner="vaultwarden",
|
||||
)
|
Loading…
Reference in a new issue