mirror of
https://git.selfprivacy.org/SelfPrivacy/selfprivacy-rest-api.git
synced 2024-11-25 13:31:27 +00:00
refactor: Use singleton metaclass for all singleton classes
This commit is contained in:
parent
0a09a338b8
commit
8cdacb73dd
|
@ -43,7 +43,7 @@ def job_to_api_job(job: Job) -> ApiJob:
|
|||
|
||||
def get_api_job_by_id(job_id: str) -> typing.Optional[ApiJob]:
|
||||
"""Get a job for GraphQL by its ID."""
|
||||
job = Jobs.get_instance().get_job(job_id)
|
||||
job = Jobs.get_job(job_id)
|
||||
if job is None:
|
||||
return None
|
||||
return job_to_api_job(job)
|
||||
|
|
|
@ -14,7 +14,7 @@ class JobMutations:
|
|||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||
def remove_job(self, job_id: str) -> GenericMutationReturn:
|
||||
"""Remove a job from the queue"""
|
||||
result = Jobs.get_instance().remove_by_uid(job_id)
|
||||
result = Jobs.remove_by_uid(job_id)
|
||||
if result:
|
||||
return GenericMutationReturn(
|
||||
success=True,
|
||||
|
|
|
@ -16,9 +16,9 @@ class Job:
|
|||
@strawberry.field
|
||||
def get_jobs(self) -> typing.List[ApiJob]:
|
||||
|
||||
Jobs.get_instance().get_jobs()
|
||||
Jobs.get_jobs()
|
||||
|
||||
return [job_to_api_job(job) for job in Jobs.get_instance().get_jobs()]
|
||||
return [job_to_api_job(job) for job in Jobs.get_jobs()]
|
||||
|
||||
@strawberry.field
|
||||
def get_job(self, job_id: str) -> typing.Optional[ApiJob]:
|
||||
|
|
|
@ -17,10 +17,7 @@ A job is a dictionary with the following keys:
|
|||
import typing
|
||||
import datetime
|
||||
from uuid import UUID
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from enum import Enum
|
||||
|
||||
|
@ -64,29 +61,6 @@ class Jobs:
|
|||
Jobs class.
|
||||
"""
|
||||
|
||||
__instance = None
|
||||
|
||||
@staticmethod
|
||||
def get_instance():
|
||||
"""
|
||||
Singleton method.
|
||||
"""
|
||||
if Jobs.__instance is None:
|
||||
Jobs()
|
||||
if Jobs.__instance is None:
|
||||
raise Exception("Couldn't init Jobs singleton!")
|
||||
return Jobs.__instance
|
||||
return Jobs.__instance
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the jobs list.
|
||||
"""
|
||||
if Jobs.__instance is not None:
|
||||
raise Exception("This class is a singleton!")
|
||||
else:
|
||||
Jobs.__instance = self
|
||||
|
||||
@staticmethod
|
||||
def reset() -> None:
|
||||
"""
|
||||
|
@ -130,13 +104,15 @@ class Jobs:
|
|||
user_data["jobs"] = [json.loads(job.json())]
|
||||
return job
|
||||
|
||||
def remove(self, job: Job) -> None:
|
||||
@staticmethod
|
||||
def remove(job: Job) -> None:
|
||||
"""
|
||||
Remove a job from the jobs list.
|
||||
"""
|
||||
self.remove_by_uid(str(job.uid))
|
||||
Jobs.remove_by_uid(str(job.uid))
|
||||
|
||||
def remove_by_uid(self, job_uuid: str) -> bool:
|
||||
@staticmethod
|
||||
def remove_by_uid(job_uuid: str) -> bool:
|
||||
"""
|
||||
Remove a job from the jobs list.
|
||||
"""
|
||||
|
|
|
@ -5,7 +5,7 @@ from selfprivacy_api.jobs import JobStatus, Jobs
|
|||
|
||||
@huey.task()
|
||||
def test_job():
|
||||
job = Jobs.get_instance().add(
|
||||
job = Jobs.add(
|
||||
type_id="test",
|
||||
name="Test job",
|
||||
description="This is a test job.",
|
||||
|
@ -14,42 +14,42 @@ def test_job():
|
|||
progress=0,
|
||||
)
|
||||
time.sleep(5)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.RUNNING,
|
||||
status_text="Performing pre-move checks...",
|
||||
progress=5,
|
||||
)
|
||||
time.sleep(5)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.RUNNING,
|
||||
status_text="Performing pre-move checks...",
|
||||
progress=10,
|
||||
)
|
||||
time.sleep(5)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.RUNNING,
|
||||
status_text="Performing pre-move checks...",
|
||||
progress=15,
|
||||
)
|
||||
time.sleep(5)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.RUNNING,
|
||||
status_text="Performing pre-move checks...",
|
||||
progress=20,
|
||||
)
|
||||
time.sleep(5)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.RUNNING,
|
||||
status_text="Performing pre-move checks...",
|
||||
progress=25,
|
||||
)
|
||||
time.sleep(5)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.FINISHED,
|
||||
status_text="Job finished.",
|
||||
|
|
|
@ -8,7 +8,9 @@ at api.skippedMigrations in userdata.json and populating it
|
|||
with IDs of the migrations to skip.
|
||||
Adding DISABLE_ALL to that array disables the migrations module entirely.
|
||||
"""
|
||||
from selfprivacy_api.migrations.check_for_failed_binds_migration import CheckForFailedBindsMigration
|
||||
from selfprivacy_api.migrations.check_for_failed_binds_migration import (
|
||||
CheckForFailedBindsMigration,
|
||||
)
|
||||
from selfprivacy_api.utils import ReadUserData
|
||||
from selfprivacy_api.migrations.fix_nixos_config_branch import FixNixosConfigBranch
|
||||
from selfprivacy_api.migrations.create_tokens_json import CreateTokensJson
|
||||
|
|
|
@ -15,7 +15,7 @@ class CheckForFailedBindsMigration(Migration):
|
|||
|
||||
def is_migration_needed(self):
|
||||
try:
|
||||
jobs = Jobs.get_instance().get_jobs()
|
||||
jobs = Jobs.get_jobs()
|
||||
# If there is a job with type_id "migrations.migrate_to_binds" and status is not "FINISHED",
|
||||
# then migration is needed and job is deleted
|
||||
for job in jobs:
|
||||
|
@ -33,13 +33,13 @@ class CheckForFailedBindsMigration(Migration):
|
|||
# Get info about existing volumes
|
||||
# Write info about volumes to userdata.json
|
||||
try:
|
||||
jobs = Jobs.get_instance().get_jobs()
|
||||
jobs = Jobs.get_jobs()
|
||||
for job in jobs:
|
||||
if (
|
||||
job.type_id == "migrations.migrate_to_binds"
|
||||
and job.status != JobStatus.FINISHED
|
||||
):
|
||||
Jobs.get_instance().remove(job)
|
||||
Jobs.remove(job)
|
||||
with WriteUserData() as userdata:
|
||||
userdata["useBinds"] = False
|
||||
print("Done")
|
||||
|
|
|
@ -7,6 +7,7 @@ from threading import Lock
|
|||
from enum import Enum
|
||||
import portalocker
|
||||
from selfprivacy_api.utils import ReadUserData
|
||||
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
|
||||
|
||||
|
||||
class ResticStates(Enum):
|
||||
|
@ -21,7 +22,7 @@ class ResticStates(Enum):
|
|||
INITIALIZING = 6
|
||||
|
||||
|
||||
class ResticController:
|
||||
class ResticController(metaclass=SingletonMetaclass):
|
||||
"""
|
||||
States in wich the restic_controller may be
|
||||
- no backblaze key
|
||||
|
@ -35,16 +36,8 @@ class ResticController:
|
|||
Current state can be fetched with get_state()
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
_lock = Lock()
|
||||
_initialized = False
|
||||
|
||||
def __new__(cls):
|
||||
if not cls._instance:
|
||||
with cls._lock:
|
||||
cls._instance = super(ResticController, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if self._initialized:
|
||||
return
|
||||
|
|
|
@ -144,7 +144,7 @@ class Bitwarden(Service):
|
|||
]
|
||||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
job = Jobs.get_instance().add(
|
||||
job = Jobs.add(
|
||||
type_id="services.bitwarden.move",
|
||||
name="Move Bitwarden",
|
||||
description=f"Moving Bitwarden data to {volume.name}",
|
||||
|
|
|
@ -29,7 +29,7 @@ def move_service(
|
|||
userdata_location: str,
|
||||
):
|
||||
"""Move a service to another volume."""
|
||||
job = Jobs.get_instance().update(
|
||||
job = Jobs.update(
|
||||
job=job,
|
||||
status_text="Performing pre-move checks...",
|
||||
status=JobStatus.RUNNING,
|
||||
|
@ -37,7 +37,7 @@ def move_service(
|
|||
service_name = service.get_display_name()
|
||||
with ReadUserData() as user_data:
|
||||
if not user_data.get("useBinds", False):
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error="Server is not using binds.",
|
||||
|
@ -46,7 +46,7 @@ def move_service(
|
|||
# Check if we are on the same volume
|
||||
old_volume = service.get_location()
|
||||
if old_volume == volume.name:
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error=f"{service_name} is already on this volume.",
|
||||
|
@ -54,7 +54,7 @@ def move_service(
|
|||
return
|
||||
# Check if there is enough space on the new volume
|
||||
if int(volume.fsavail) < service.get_storage_usage():
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error="Not enough space on the new volume.",
|
||||
|
@ -62,7 +62,7 @@ def move_service(
|
|||
return
|
||||
# Make sure the volume is mounted
|
||||
if volume.name != "sda1" and f"/volumes/{volume.name}" not in volume.mountpoints:
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error="Volume is not mounted.",
|
||||
|
@ -71,14 +71,14 @@ def move_service(
|
|||
# Make sure current actual directory exists and if its user and group are correct
|
||||
for folder in folder_names:
|
||||
if not pathlib.Path(f"/volumes/{old_volume}/{folder.name}").exists():
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error=f"{service_name} is not found.",
|
||||
)
|
||||
return
|
||||
if not pathlib.Path(f"/volumes/{old_volume}/{folder.name}").is_dir():
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error=f"{service_name} is not a directory.",
|
||||
|
@ -88,7 +88,7 @@ def move_service(
|
|||
not pathlib.Path(f"/volumes/{old_volume}/{folder.name}").owner()
|
||||
== folder.owner
|
||||
):
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error=f"{service_name} owner is not {folder.owner}.",
|
||||
|
@ -96,7 +96,7 @@ def move_service(
|
|||
return
|
||||
|
||||
# Stop service
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.RUNNING,
|
||||
status_text=f"Stopping {service_name}...",
|
||||
|
@ -113,7 +113,7 @@ def move_service(
|
|||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error=f"{service_name} did not stop in 30 seconds.",
|
||||
|
@ -121,7 +121,7 @@ def move_service(
|
|||
return
|
||||
|
||||
# Unmount old volume
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status_text="Unmounting old folder...",
|
||||
status=JobStatus.RUNNING,
|
||||
|
@ -134,14 +134,14 @@ def move_service(
|
|||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error="Unable to unmount old volume.",
|
||||
)
|
||||
return
|
||||
# Move data to new volume and set correct permissions
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status_text="Moving data to new volume...",
|
||||
status=JobStatus.RUNNING,
|
||||
|
@ -154,14 +154,14 @@ def move_service(
|
|||
f"/volumes/{old_volume}/{folder.name}",
|
||||
f"/volumes/{volume.name}/{folder.name}",
|
||||
)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status_text="Moving data to new volume...",
|
||||
status=JobStatus.RUNNING,
|
||||
progress=current_progress + folder_percentage,
|
||||
)
|
||||
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status_text=f"Making sure {service_name} owns its files...",
|
||||
status=JobStatus.RUNNING,
|
||||
|
@ -180,14 +180,14 @@ def move_service(
|
|||
)
|
||||
except subprocess.CalledProcessError as error:
|
||||
print(error.output)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.RUNNING,
|
||||
error=f"Unable to set ownership of new volume. {service_name} may not be able to access its files. Continuing anyway.",
|
||||
)
|
||||
|
||||
# Mount new volume
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status_text=f"Mounting {service_name} data...",
|
||||
status=JobStatus.RUNNING,
|
||||
|
@ -207,7 +207,7 @@ def move_service(
|
|||
)
|
||||
except subprocess.CalledProcessError as error:
|
||||
print(error.output)
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.ERROR,
|
||||
error="Unable to mount new volume.",
|
||||
|
@ -215,7 +215,7 @@ def move_service(
|
|||
return
|
||||
|
||||
# Update userdata
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status_text="Finishing move...",
|
||||
status=JobStatus.RUNNING,
|
||||
|
@ -227,7 +227,7 @@ def move_service(
|
|||
user_data[userdata_location]["location"] = volume.name
|
||||
# Start service
|
||||
service.start()
|
||||
Jobs.get_instance().update(
|
||||
Jobs.update(
|
||||
job=job,
|
||||
status=JobStatus.FINISHED,
|
||||
result=f"{service_name} moved successfully.",
|
||||
|
|
|
@ -141,7 +141,7 @@ class Gitea(Service):
|
|||
]
|
||||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
job = Jobs.get_instance().add(
|
||||
job = Jobs.add(
|
||||
type_id="services.gitea.move",
|
||||
name="Move Gitea",
|
||||
description=f"Moving Gitea data to {volume.name}",
|
||||
|
|
|
@ -149,7 +149,7 @@ class MailServer(Service):
|
|||
]
|
||||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
job = Jobs.get_instance().add(
|
||||
job = Jobs.add(
|
||||
type_id="services.mailserver.move",
|
||||
name="Move Mail Server",
|
||||
description=f"Moving mailserver data to {volume.name}",
|
||||
|
|
|
@ -149,7 +149,7 @@ class Nextcloud(Service):
|
|||
]
|
||||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
job = Jobs.get_instance().add(
|
||||
job = Jobs.add(
|
||||
type_id="services.nextcloud.move",
|
||||
name="Move Nextcloud",
|
||||
description=f"Moving Nextcloud to volume {volume.name}",
|
||||
|
|
|
@ -129,7 +129,7 @@ class Pleroma(Service):
|
|||
]
|
||||
|
||||
def move_to_volume(self, volume: BlockDevice) -> Job:
|
||||
job = Jobs.get_instance().add(
|
||||
job = Jobs.add(
|
||||
type_id="services.pleroma.move",
|
||||
name="Move Pleroma",
|
||||
description=f"Moving Pleroma to volume {volume.name}",
|
||||
|
|
|
@ -4,6 +4,7 @@ import json
|
|||
import typing
|
||||
|
||||
from selfprivacy_api.utils import WriteUserData
|
||||
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
|
||||
|
||||
|
||||
def get_block_device(device_name):
|
||||
|
@ -147,16 +148,9 @@ class BlockDevice:
|
|||
return False
|
||||
|
||||
|
||||
class BlockDevices:
|
||||
class BlockDevices(metaclass=SingletonMetaclass):
|
||||
"""Singleton holding all Block devices"""
|
||||
|
||||
_instance = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if not cls._instance:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
self.block_devices = []
|
||||
self.update()
|
||||
|
|
23
selfprivacy_api/utils/singleton_metaclass.py
Normal file
23
selfprivacy_api/utils/singleton_metaclass.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
"""
|
||||
Singleton is a creational design pattern, which ensures that only
|
||||
one object of its kind exists and provides a single point of access
|
||||
to it for any other code.
|
||||
"""
|
||||
from threading import Lock
|
||||
|
||||
|
||||
class SingletonMetaclass(type):
|
||||
"""
|
||||
This is a thread-safe implementation of Singleton.
|
||||
"""
|
||||
|
||||
_instances = {}
|
||||
_lock: Lock = Lock()
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
with cls._lock:
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(SingletonMetaclass, cls).__call__(
|
||||
*args, **kwargs
|
||||
)
|
||||
return cls._instances[cls]
|
Loading…
Reference in a new issue