feat: add Prometheus monitoring (#120)

Co-authored-by: nhnn <nhnn@disroot.org>
Co-authored-by: Inex Code <inex.code@selfprivacy.org>
Reviewed-on: https://git.selfprivacy.org/SelfPrivacy/selfprivacy-rest-api/pulls/120
Co-authored-by: dettlaff <dettlaff@riseup.net>
Co-committed-by: dettlaff <dettlaff@riseup.net>
This commit is contained in:
dettlaff 2024-07-30 16:55:57 +03:00 committed by Inex Code
parent 1259c081ef
commit 4cd90d0c93
15 changed files with 1416 additions and 4 deletions

View file

@ -19,6 +19,7 @@ pythonPackages.buildPythonPackage rec {
strawberry-graphql strawberry-graphql
typing-extensions typing-extensions
uvicorn uvicorn
requests
websockets websockets
httpx httpx
]; ];

View file

@ -0,0 +1,120 @@
import strawberry
from typing import Optional
from datetime import datetime
from selfprivacy_api.models.services import ServiceStatus
from selfprivacy_api.services.prometheus import Prometheus
from selfprivacy_api.utils.monitoring import (
MonitoringQueries,
MonitoringQueryError,
MonitoringValuesResult,
MonitoringMetricsResult,
)
@strawberry.type
class CpuMonitoring:
start: Optional[datetime]
end: Optional[datetime]
step: int
@strawberry.field
def overall_usage(self) -> MonitoringValuesResult:
if Prometheus().get_status() != ServiceStatus.ACTIVE:
return MonitoringQueryError(error="Prometheus is not running")
return MonitoringQueries.cpu_usage_overall(self.start, self.end, self.step)
@strawberry.type
class MemoryMonitoring:
start: Optional[datetime]
end: Optional[datetime]
step: int
@strawberry.field
def overall_usage(self) -> MonitoringValuesResult:
if Prometheus().get_status() != ServiceStatus.ACTIVE:
return MonitoringQueryError(error="Prometheus is not running")
return MonitoringQueries.memory_usage_overall(self.start, self.end, self.step)
@strawberry.field
def average_usage_by_service(self) -> MonitoringMetricsResult:
if Prometheus().get_status() != ServiceStatus.ACTIVE:
return MonitoringQueryError(error="Prometheus is not running")
return MonitoringQueries.memory_usage_average_by_slice(self.start, self.end)
@strawberry.field
def max_usage_by_service(self) -> MonitoringMetricsResult:
if Prometheus().get_status() != ServiceStatus.ACTIVE:
return MonitoringQueryError(error="Prometheus is not running")
return MonitoringQueries.memory_usage_max_by_slice(self.start, self.end)
@strawberry.type
class DiskMonitoring:
start: Optional[datetime]
end: Optional[datetime]
step: int
@strawberry.field
def overall_usage(self) -> MonitoringMetricsResult:
if Prometheus().get_status() != ServiceStatus.ACTIVE:
return MonitoringQueryError(error="Prometheus is not running")
return MonitoringQueries.disk_usage_overall(self.start, self.end, self.step)
@strawberry.type
class NetworkMonitoring:
start: Optional[datetime]
end: Optional[datetime]
step: int
@strawberry.field
def overall_usage(self) -> MonitoringMetricsResult:
if Prometheus().get_status() != ServiceStatus.ACTIVE:
return MonitoringQueryError(error="Prometheus is not running")
return MonitoringQueries.network_usage_overall(self.start, self.end, self.step)
@strawberry.type
class Monitoring:
@strawberry.field
def cpu_usage(
self,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60,
) -> CpuMonitoring:
return CpuMonitoring(start=start, end=end, step=step)
@strawberry.field
def memory_usage(
self,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60,
) -> MemoryMonitoring:
return MemoryMonitoring(start=start, end=end, step=step)
@strawberry.field
def disk_usage(
self,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60,
) -> DiskMonitoring:
return DiskMonitoring(start=start, end=end, step=step)
@strawberry.field
def network_usage(
self,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60,
) -> NetworkMonitoring:
return NetworkMonitoring(start=start, end=end, step=step)

View file

@ -31,6 +31,7 @@ from selfprivacy_api.graphql.queries.logs import LogEntry, Logs
from selfprivacy_api.graphql.queries.services import Services from selfprivacy_api.graphql.queries.services import Services
from selfprivacy_api.graphql.queries.storage import Storage from selfprivacy_api.graphql.queries.storage import Storage
from selfprivacy_api.graphql.queries.system import System from selfprivacy_api.graphql.queries.system import System
from selfprivacy_api.graphql.queries.monitoring import Monitoring
from selfprivacy_api.graphql.subscriptions.jobs import ApiJob from selfprivacy_api.graphql.subscriptions.jobs import ApiJob
from selfprivacy_api.graphql.subscriptions.jobs import ( from selfprivacy_api.graphql.subscriptions.jobs import (
@ -93,6 +94,11 @@ class Query:
"""Backup queries""" """Backup queries"""
return Backup() return Backup()
@strawberry.field(permission_classes=[IsAuthenticated])
def monitoring(self) -> Monitoring:
"""Monitoring queries"""
return Monitoring()
@strawberry.type @strawberry.type
class Mutation( class Mutation(

View file

@ -15,10 +15,12 @@ from selfprivacy_api.migrations.check_for_system_rebuild_jobs import (
CheckForSystemRebuildJobs, CheckForSystemRebuildJobs,
) )
from selfprivacy_api.migrations.add_roundcube import AddRoundcube from selfprivacy_api.migrations.add_roundcube import AddRoundcube
from selfprivacy_api.migrations.add_monitoring import AddMonitoring
migrations = [ migrations = [
WriteTokenToRedis(), WriteTokenToRedis(),
CheckForSystemRebuildJobs(), CheckForSystemRebuildJobs(),
AddMonitoring(),
AddRoundcube(), AddRoundcube(),
] ]

View file

@ -0,0 +1,37 @@
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.services.flake_service_manager import FlakeServiceManager
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.utils.block_devices import BlockDevices
class AddMonitoring(Migration):
"""Adds monitoring service if it is not present."""
def get_migration_name(self) -> str:
return "add_monitoring"
def get_migration_description(self) -> str:
return "Adds the Monitoring if it is not present."
def is_migration_needed(self) -> bool:
with FlakeServiceManager() as manager:
if "monitoring" not in manager.services:
return True
with ReadUserData() as data:
if "monitoring" not in data["modules"]:
return True
return False
def migrate(self) -> None:
with FlakeServiceManager() as manager:
if "monitoring" not in manager.services:
manager.services["monitoring"] = (
"git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=flakes&dir=sp-modules/monitoring"
)
with WriteUserData() as data:
if "monitoring" not in data["modules"]:
data["modules"]["monitoring"] = {
"enable": True,
"location": BlockDevices().get_root_block_device().name,
}

View file

@ -4,6 +4,7 @@ import typing
from selfprivacy_api.services.bitwarden import Bitwarden from selfprivacy_api.services.bitwarden import Bitwarden
from selfprivacy_api.services.forgejo import Forgejo from selfprivacy_api.services.forgejo import Forgejo
from selfprivacy_api.services.jitsimeet import JitsiMeet from selfprivacy_api.services.jitsimeet import JitsiMeet
from selfprivacy_api.services.prometheus import Prometheus
from selfprivacy_api.services.roundcube import Roundcube from selfprivacy_api.services.roundcube import Roundcube
from selfprivacy_api.services.mailserver import MailServer from selfprivacy_api.services.mailserver import MailServer
from selfprivacy_api.services.nextcloud import Nextcloud from selfprivacy_api.services.nextcloud import Nextcloud
@ -21,6 +22,7 @@ services: list[Service] = [
Ocserv(), Ocserv(),
JitsiMeet(), JitsiMeet(),
Roundcube(), Roundcube(),
Prometheus(),
] ]

View file

@ -65,9 +65,9 @@ class Forgejo(Service):
"forgejo-auto", "forgejo-auto",
"forgejo-light", "forgejo-light",
"forgejo-dark", "forgejo-dark",
"auto", "gitea-auto",
"gitea", "gitea-light",
"arc-green", "gitea-dark",
], ],
), ),
} }

View file

@ -0,0 +1,86 @@
"""Class representing Nextcloud service."""
import base64
import subprocess
from typing import Optional, List
from selfprivacy_api.services.owned_path import OwnedPath
from selfprivacy_api.utils.systemd import get_service_status
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.services.prometheus.icon import PROMETHEUS_ICON
class Prometheus(Service):
"""Class representing Prometheus service."""
@staticmethod
def get_id() -> str:
return "monitoring"
@staticmethod
def get_display_name() -> str:
return "Prometheus"
@staticmethod
def get_description() -> str:
return "Prometheus is used for resource monitoring and alerts."
@staticmethod
def get_svg_icon() -> str:
return base64.b64encode(PROMETHEUS_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_url() -> Optional[str]:
"""Return service url."""
return None
@staticmethod
def get_subdomain() -> Optional[str]:
return None
@staticmethod
def is_movable() -> bool:
return False
@staticmethod
def is_required() -> bool:
return True
@staticmethod
def can_be_backed_up() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "Backups are not available for Prometheus."
@staticmethod
def get_status() -> ServiceStatus:
return get_service_status("prometheus.service")
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "prometheus.service"])
@staticmethod
def start():
subprocess.run(["systemctl", "start", "prometheus.service"])
@staticmethod
def restart():
subprocess.run(["systemctl", "restart", "prometheus.service"])
@staticmethod
def get_logs():
return ""
@staticmethod
def get_owned_folders() -> List[OwnedPath]:
return [
OwnedPath(
path="/var/lib/prometheus",
owner="prometheus",
group="prometheus",
),
]

View file

@ -0,0 +1,5 @@
PROMETHEUS_ICON = """
<svg width="128" height="128" viewBox="0 0 128 128" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M64.125 0.51C99.229 0.517 128.045 29.133 128 63.951C127.955 99.293 99.258 127.515 63.392 127.49C28.325 127.466 -0.0249987 98.818 1.26289e-06 63.434C0.0230013 28.834 28.898 0.503 64.125 0.51ZM44.72 22.793C45.523 26.753 44.745 30.448 43.553 34.082C42.73 36.597 41.591 39.022 40.911 41.574C39.789 45.777 38.52 50.004 38.052 54.3C37.381 60.481 39.81 65.925 43.966 71.34L24.86 67.318C24.893 67.92 24.86 68.148 24.925 68.342C26.736 73.662 29.923 78.144 33.495 82.372C33.872 82.818 34.732 83.046 35.372 83.046C54.422 83.084 73.473 83.08 92.524 83.055C93.114 83.055 93.905 82.945 94.265 82.565C98.349 78.271 101.47 73.38 103.425 67.223L83.197 71.185C84.533 68.567 86.052 66.269 86.93 63.742C89.924 55.099 88.682 46.744 84.385 38.862C80.936 32.538 77.754 26.242 79.475 18.619C75.833 22.219 74.432 26.798 73.543 31.517C72.671 36.167 72.154 40.881 71.478 45.6C71.38 45.457 71.258 45.35 71.236 45.227C71.1507 44.7338 71.0919 44.2365 71.06 43.737C70.647 36.011 69.14 28.567 65.954 21.457C64.081 17.275 62.013 12.995 63.946 8.001C62.639 8.694 61.456 9.378 60.608 10.357C58.081 13.277 57.035 16.785 56.766 20.626C56.535 23.908 56.22 27.205 55.61 30.432C54.97 33.824 53.96 37.146 51.678 40.263C50.76 33.607 50.658 27.019 44.722 22.793H44.72ZM93.842 88.88H34.088V99.26H93.842V88.88ZM45.938 104.626C45.889 113.268 54.691 119.707 65.571 119.24C74.591 118.851 82.57 111.756 81.886 104.626H45.938Z" fill="black"/>
</svg>
"""

View file

@ -145,7 +145,9 @@ def check_if_subdomain_is_taken(subdomain: str) -> bool:
with ReadUserData() as data: with ReadUserData() as data:
for module in data["modules"]: for module in data["modules"]:
if ( if (
data["modules"][module].get("subdomain", DEFAULT_SUBDOMAINS[module]) data["modules"][module].get(
"subdomain", DEFAULT_SUBDOMAINS.get(module, "")
)
== subdomain == subdomain
): ):
return True return True

View file

@ -8,6 +8,7 @@ DEFAULT_SUBDOMAINS = {
"pleroma": "social", "pleroma": "social",
"roundcube": "roundcube", "roundcube": "roundcube",
"testservice": "test", "testservice": "test",
"monitoring": "",
} }
RESERVED_SUBDOMAINS = [ RESERVED_SUBDOMAINS = [

View file

@ -0,0 +1,422 @@
"""Prometheus monitoring queries."""
# pylint: disable=too-few-public-methods
import requests
import strawberry
from dataclasses import dataclass
from typing import Optional, Annotated, Union, List, Tuple
from datetime import datetime, timedelta
PROMETHEUS_URL = "http://localhost:9001"
@strawberry.type
@dataclass
class MonitoringValue:
timestamp: datetime
value: str
@strawberry.type
@dataclass
class MonitoringMetric:
metric_id: str
values: List[MonitoringValue]
@strawberry.type
class MonitoringQueryError:
error: str
@strawberry.type
class MonitoringValues:
values: List[MonitoringValue]
@strawberry.type
class MonitoringMetrics:
metrics: List[MonitoringMetric]
MonitoringValuesResult = Annotated[
Union[MonitoringValues, MonitoringQueryError],
strawberry.union("MonitoringValuesResult"),
]
MonitoringMetricsResult = Annotated[
Union[MonitoringMetrics, MonitoringQueryError],
strawberry.union("MonitoringMetricsResult"),
]
class MonitoringQueries:
@staticmethod
def _send_range_query(
query: str, start: int, end: int, step: int, result_type: Optional[str] = None
) -> Union[dict, MonitoringQueryError]:
try:
response = requests.get(
f"{PROMETHEUS_URL}/api/v1/query_range",
params={
"query": query,
"start": start,
"end": end,
"step": step,
},
)
if response.status_code != 200:
return MonitoringQueryError(
error=f"Prometheus returned unexpected HTTP status code. Error: {response.text}. The query was {query}"
)
json = response.json()
if result_type and json["data"]["resultType"] != result_type:
return MonitoringQueryError(
error="Unexpected resultType returned from Prometheus, request failed"
)
return json["data"]
except Exception as error:
return MonitoringQueryError(
error=f"Prometheus request failed! Error: {str(error)}"
)
@staticmethod
def _send_query(
query: str, result_type: Optional[str] = None
) -> Union[dict, MonitoringQueryError]:
try:
response = requests.get(
f"{PROMETHEUS_URL}/api/v1/query",
params={
"query": query,
},
)
if response.status_code != 200:
return MonitoringQueryError(
error=f"Prometheus returned unexpected HTTP status code. Error: {response.text}. The query was {query}"
)
json = response.json()
if result_type and json["data"]["resultType"] != result_type:
return MonitoringQueryError(
error="Unexpected resultType returned from Prometheus, request failed"
)
return json["data"]
except Exception as error:
return MonitoringQueryError(
error=f"Prometheus request failed! Error: {str(error)}"
)
@staticmethod
def _prometheus_value_to_monitoring_value(x: Tuple[int, str]):
return MonitoringValue(timestamp=datetime.fromtimestamp(x[0]), value=x[1])
@staticmethod
def _clean_slice_id(slice_id: str, clean_id: bool) -> str:
"""Slices come in form of `/slice_name.slice`, we need to remove the `.slice` and `/` part."""
if clean_id:
return slice_id.split(".")[0].split("/")[1]
return slice_id
@staticmethod
def _prometheus_response_to_monitoring_metrics(
response: dict, id_key: str, clean_id: bool = False
) -> List[MonitoringMetric]:
if response["resultType"] == "vector":
return list(
map(
lambda x: MonitoringMetric(
metric_id=MonitoringQueries._clean_slice_id(
x["metric"][id_key], clean_id=clean_id
),
values=[
MonitoringQueries._prometheus_value_to_monitoring_value(
x["value"]
)
],
),
response["result"],
)
)
else:
return list(
map(
lambda x: MonitoringMetric(
metric_id=MonitoringQueries._clean_slice_id(
x["metric"][id_key], clean_id=clean_id
),
values=list(
map(
MonitoringQueries._prometheus_value_to_monitoring_value,
x["values"],
)
),
),
response["result"],
)
)
@staticmethod
def _calculate_offset_and_duration(
start: datetime, end: datetime
) -> Tuple[int, int]:
"""Calculate the offset and duration for Prometheus queries.
They mast be in seconds.
"""
offset = int((datetime.now() - end).total_seconds())
duration = int((end - start).total_seconds())
return offset, duration
@staticmethod
def cpu_usage_overall(
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60, # seconds
) -> MonitoringValuesResult:
"""
Get CPU information.
Args:
start (datetime, optional): The start time.
Defaults to 20 minutes ago if not provided.
end (datetime, optional): The end time.
Defaults to current time if not provided.
step (int): Interval in seconds for querying disk usage data.
"""
if start is None:
start = datetime.now() - timedelta(minutes=20)
if end is None:
end = datetime.now()
start_timestamp = int(start.timestamp())
end_timestamp = int(end.timestamp())
query = '100 - (avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100)'
data = MonitoringQueries._send_range_query(
query, start_timestamp, end_timestamp, step, result_type="matrix"
)
if isinstance(data, MonitoringQueryError):
return data
return MonitoringValues(
values=list(
map(
MonitoringQueries._prometheus_value_to_monitoring_value,
data["result"][0]["values"],
)
)
)
@staticmethod
def memory_usage_overall(
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60, # seconds
) -> MonitoringValuesResult:
"""
Get memory usage.
Args:
start (datetime, optional): The start time.
Defaults to 20 minutes ago if not provided.
end (datetime, optional): The end time.
Defaults to current time if not provided.
step (int): Interval in seconds for querying memory usage data.
"""
if start is None:
start = datetime.now() - timedelta(minutes=20)
if end is None:
end = datetime.now()
start_timestamp = int(start.timestamp())
end_timestamp = int(end.timestamp())
query = "100 - (100 * (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes))"
data = MonitoringQueries._send_range_query(
query, start_timestamp, end_timestamp, step, result_type="matrix"
)
if isinstance(data, MonitoringQueryError):
return data
return MonitoringValues(
values=list(
map(
MonitoringQueries._prometheus_value_to_monitoring_value,
data["result"][0]["values"],
)
)
)
@staticmethod
def memory_usage_max_by_slice(
start: Optional[datetime] = None,
end: Optional[datetime] = None,
) -> MonitoringMetricsResult:
"""
Get maximum memory usage for each service (i.e. systemd slice).
Args:
start (datetime, optional): The start time.
Defaults to 20 minutes ago if not provided.
end (datetime, optional): The end time.
Defaults to current time if not provided.
"""
if start is None:
start = datetime.now() - timedelta(minutes=20)
if end is None:
end = datetime.now()
offset, duration = MonitoringQueries._calculate_offset_and_duration(start, end)
if offset == 0:
query = f'max_over_time((container_memory_rss{{id!~".*slice.*slice", id=~".*slice"}}+container_memory_swap{{id!~".*slice.*slice", id=~".*slice"}})[{duration}s:])'
else:
query = f'max_over_time((container_memory_rss{{id!~".*slice.*slice", id=~".*slice"}}+container_memory_swap{{id!~".*slice.*slice", id=~".*slice"}})[{duration}s:] offset {offset}s)'
data = MonitoringQueries._send_query(query, result_type="vector")
if isinstance(data, MonitoringQueryError):
return data
return MonitoringMetrics(
metrics=MonitoringQueries._prometheus_response_to_monitoring_metrics(
data, "id", clean_id=True
)
)
@staticmethod
def memory_usage_average_by_slice(
start: Optional[datetime] = None,
end: Optional[datetime] = None,
) -> MonitoringMetricsResult:
"""
Get average memory usage for each service (i.e. systemd slice).
Args:
start (datetime, optional): The start time.
Defaults to 20 minutes ago if not provided.
end (datetime, optional): The end time.
Defaults to current time if not provided.
"""
if start is None:
start = datetime.now() - timedelta(minutes=20)
if end is None:
end = datetime.now()
offset, duration = MonitoringQueries._calculate_offset_and_duration(start, end)
if offset == 0:
query = f'avg_over_time((container_memory_rss{{id!~".*slice.*slice", id=~".*slice"}}+container_memory_swap{{id!~".*slice.*slice", id=~".*slice"}})[{duration}s:])'
else:
query = f'avg_over_time((container_memory_rss{{id!~".*slice.*slice", id=~".*slice"}}+container_memory_swap{{id!~".*slice.*slice", id=~".*slice"}})[{duration}s:] offset {offset}s)'
data = MonitoringQueries._send_query(query, result_type="vector")
if isinstance(data, MonitoringQueryError):
return data
return MonitoringMetrics(
metrics=MonitoringQueries._prometheus_response_to_monitoring_metrics(
data, "id", clean_id=True
)
)
@staticmethod
def disk_usage_overall(
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60, # seconds
) -> MonitoringMetricsResult:
"""
Get disk usage information.
Args:
start (datetime, optional): The start time.
Defaults to 20 minutes ago if not provided.
end (datetime, optional): The end time.
Defaults to current time if not provided.
step (int): Interval in seconds for querying disk usage data.
"""
if start is None:
start = datetime.now() - timedelta(minutes=20)
if end is None:
end = datetime.now()
start_timestamp = int(start.timestamp())
end_timestamp = int(end.timestamp())
query = """100 - (100 * sum by (device) (node_filesystem_avail_bytes{fstype!="rootfs",fstype!="ramfs",fstype!="tmpfs",mountpoint!="/efi"}) / sum by (device) (node_filesystem_size_bytes{fstype!="rootfs",fstype!="ramfs",fstype!="tmpfs",mountpoint!="/efi"}))"""
data = MonitoringQueries._send_range_query(
query, start_timestamp, end_timestamp, step, result_type="matrix"
)
if isinstance(data, MonitoringQueryError):
return data
return MonitoringMetrics(
metrics=MonitoringQueries._prometheus_response_to_monitoring_metrics(
data, "device"
)
)
@staticmethod
def network_usage_overall(
start: Optional[datetime] = None,
end: Optional[datetime] = None,
step: int = 60, # seconds
) -> MonitoringMetricsResult:
"""
Get network usage information for both download and upload.
Args:
start (datetime, optional): The start time.
Defaults to 20 minutes ago if not provided.
end (datetime, optional): The end time.
Defaults to current time if not provided.
step (int): Interval in seconds for querying network data.
"""
if start is None:
start = datetime.now() - timedelta(minutes=20)
if end is None:
end = datetime.now()
start_timestamp = int(start.timestamp())
end_timestamp = int(end.timestamp())
query = """
label_replace(rate(node_network_receive_bytes_total{device!="lo"}[5m]), "direction", "receive", "device", ".*")
or
label_replace(rate(node_network_transmit_bytes_total{device!="lo"}[5m]), "direction", "transmit", "device", ".*")
"""
data = MonitoringQueries._send_range_query(
query, start_timestamp, end_timestamp, step, result_type="matrix"
)
if isinstance(data, MonitoringQueryError):
return data
return MonitoringMetrics(
metrics=MonitoringQueries._prometheus_response_to_monitoring_metrics(
data, "direction"
)
)

View file

@ -65,6 +65,10 @@
}, },
"roundcube": { "roundcube": {
"enable": true "enable": true
},
"monitoring": {
"enable": true,
"location": "sdb"
} }
}, },
"volumes": [ "volumes": [

View file

@ -0,0 +1,253 @@
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=missing-function-docstring
# from dataclasses import dataclass
# from datetime import datetime
# from typing import List, Dict
# import pytest
# from tests.test_graphql.common import (
# assert_empty,
# get_data,
# )
# MOCK_VALUES = [
# [1720135748, "3.75"],
# [1720135808, "4.525000000139698"],
# [1720135868, "4.541666666433841"],
# [1720135928, "4.574999999798209"],
# [1720135988, "4.579166666759804"],
# [1720136048, "3.8791666664959195"],
# [1720136108, "4.5458333333954215"],
# [1720136168, "4.566666666651145"],
# [1720136228, "4.791666666666671"],
# [1720136288, "4.720833333364382"],
# [1720136348, "3.9624999999068677"],
# [1720136408, "4.6875"],
# [1720136468, "4.404166666790843"],
# [1720136528, "4.31666666680637"],
# [1720136588, "4.358333333317816"],
# [1720136648, "3.7083333334885538"],
# [1720136708, "4.558333333116025"],
# [1720136768, "4.729166666511446"],
# [1720136828, "4.75416666672875"],
# [1720136888, "4.624999999844775"],
# [1720136948, "3.9041666667132375"],
# ]
# @dataclass
# class DumbResponse:
# status_code: int
# json_data: dict
# def json(self):
# return self.json_data
# def generate_prometheus_response(result_type: str, result: List[Dict]):
# return DumbResponse(
# status_code=200,
# json_data={"data": {"resultType": result_type, "result": result}},
# )
# MOCK_SINGLE_METRIC_PROMETHEUS_RESPONSE = generate_prometheus_response(
# "matrix", [{"values": MOCK_VALUES}]
# )
# MOCK_MULTIPLE_METRIC_DEVICE_PROMETHEUS_RESPONSE = generate_prometheus_response(
# "matrix",
# [
# {"metric": {"device": "a"}, "values": MOCK_VALUES},
# {"metric": {"device": "b"}, "values": MOCK_VALUES},
# {"metric": {"device": "c"}, "values": MOCK_VALUES},
# ],
# )
# # def generate_mock_metrics(name: str):
# # return {
# # "data": {
# # "monitoring": {
# # f"{name}": {
# # "resultType": "matrix",
# # "result": [
# # {
# # "metric": {"instance": "127.0.0.1:9002"},
# # "values": ,
# # }
# # ],
# # }
# # }
# # }
# # }
# # MOCK_CPU_USAGE_RESPONSE = generate_mock_metrics("cpuUsage")
# # MOCK_DISK_USAGE_RESPONSE = generate_mock_metrics("diskUsage")
# # MOCK_MEMORY_USAGE_RESPONSE = generate_mock_metrics("memoryUsage")
# def generate_mock_query(name):
# return f"""
# query Query {{
# monitoring {{
# {name} {{ resultType, result }}
# }}
# }}
# """
# def generate_mock_query_with_options(name):
# return f"""
# query Query($start: DateTime, $end: DateTime, $step: Int) {{
# monitoring {{
# {name}(start: $start, end: $end, step: $step) {{ resultType, result }}
# }}
# }}
# """
# def prometheus_result_from_dict(dict):
# # return MonitoringQueryResult(result_type=dict["resultType"], result=dict["result"])
# return dict
# @pytest.fixture
# def mock_cpu_usage(mocker):
# mock = mocker.patch(
# "selfprivacy_api.utils.prometheus.PrometheusQueries._send_query",
# return_value=MOCK_CPU_USAGE_RESPONSE["data"]["monitoring"]["cpuUsage"],
# )
# return mock
# @pytest.fixture
# def mock_memory_usage(mocker):
# mock = mocker.patch(
# "selfprivacy_api.utils.prometheus.PrometheusQueries._send_query",
# return_value=prometheus_result_from_dict(
# MOCK_MEMORY_USAGE_RESPONSE["data"]["monitoring"]["memoryUsage"]
# ),
# )
# return mock
# @pytest.fixture
# def mock_disk_usage(mocker):
# mock = mocker.patch(
# "selfprivacy_api.utils.prometheus.PrometheusQueries._send_query",
# return_value=prometheus_result_from_dict(
# MOCK_DISK_USAGE_RESPONSE["data"]["monitoring"]["diskUsage"]
# ),
# )
# return mock
# def test_graphql_get_disk_usage(client, authorized_client, mock_disk_usage):
# response = authorized_client.post(
# "/graphql",
# json={"query": generate_mock_query("diskUsage")},
# )
# data = get_data(response)
# assert data == MOCK_DISK_USAGE_RESPONSE["data"]
# def test_graphql_get_disk_usage_with_options(
# client, authorized_client, mock_disk_usage
# ):
# response = authorized_client.post(
# "/graphql",
# json={
# "query": generate_mock_query_with_options("diskUsage"),
# "variables": {
# "start": datetime.fromtimestamp(1720136108).isoformat(),
# "end": datetime.fromtimestamp(1720137319).isoformat(),
# "step": 90,
# },
# },
# )
# data = get_data(response)
# assert data == MOCK_DISK_USAGE_RESPONSE["data"]
# def test_graphql_get_disk_usage_unauthorized(client):
# response = client.post(
# "/graphql",
# json={"query": generate_mock_query("diskUsage")},
# )
# assert_empty(response)
# def test_graphql_get_memory_usage(client, authorized_client, mock_memory_usage):
# response = authorized_client.post(
# "/graphql",
# json={"query": generate_mock_query("memoryUsage")},
# )
# data = get_data(response)
# assert data == MOCK_MEMORY_USAGE_RESPONSE["data"]
# def test_graphql_get_memory_usage_with_options(
# client, authorized_client, mock_memory_usage
# ):
# response = authorized_client.post(
# "/graphql",
# json={
# "query": generate_mock_query_with_options("memoryUsage"),
# "variables": {
# "start": datetime.fromtimestamp(1720136108).isoformat(),
# "end": datetime.fromtimestamp(1720137319).isoformat(),
# "step": 90,
# },
# },
# )
# data = get_data(response)
# assert data == MOCK_MEMORY_USAGE_RESPONSE["data"]
# def test_graphql_get_memory_usage_unauthorized(client):
# response = client.post(
# "/graphql",
# json={"query": generate_mock_query("memoryUsage")},
# )
# assert_empty(response)
# def test_graphql_get_cpu_usage(client, authorized_client, mock_cpu_usage):
# response = authorized_client.post(
# "/graphql",
# json={"query": generate_mock_query("cpuUsage")},
# )
# data = get_data(response)
# assert data == MOCK_CPU_USAGE_RESPONSE["data"]
# def test_graphql_get_cpu_usage_with_options(client, authorized_client, mock_cpu_usage):
# response = authorized_client.post(
# "/graphql",
# json={
# "query": generate_mock_query_with_options("cpuUsage"),
# "variables": {
# "start": datetime.fromtimestamp(1720136108).isoformat(),
# "end": datetime.fromtimestamp(1720137319).isoformat(),
# "step": 90,
# },
# },
# )
# data = get_data(response)
# assert data == MOCK_CPU_USAGE_RESPONSE["data"]
# def test_graphql_get_cpu_usage_unauthorized(client):
# response = client.post(
# "/graphql",
# json={"query": generate_mock_query("cpuUsage")},
# )
# assert_empty(response)

View file

@ -0,0 +1,471 @@
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=missing-function-docstring
# TODO(def): Finish this please.
# from datetime import datetime
# import pytest
# from selfprivacy_api.models.services import ServiceStatus
# from tests.test_graphql.common import (
# assert_empty,
# get_data,
# )
# @pytest.fixture
# def mock_get_status_active(mocker):
# mock = mocker.patch(
# "selfprivacy_api.graphql.queries.monitoring.Prometheus.get_status",
# return_value=ServiceStatus.ACTIVE,
# )
# return mock
# @pytest.fixture
# def mock_send_query(mocker):
# mock = mocker.patch(
# "selfprivacy_api.utils.monitoring.MonitoringQueries._send_range_query",
# # "selfprivacy_api.graphql.queries.monitoring._send_query",
# return_value=["test result"],
# )
# return mock
# # ....
# CPU_USAGE_QUERY = """
# query {
# monitoring {
# cpuUsage {
# start
# end
# step
# overallUsage {
# ... on MonitoringValues {
# values {
# timestamp
# value
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# }
# }
# }
# """
# CPU_USAGE_QUERY_WITH_OPTIONS = """
# query Query($end: String!, $start: String!, $step: String!) {
# monitoring {
# cpuUsage(end: $end, start: $start, step: $step) {
# end
# overallUsage {
# ... on MonitoringValues {
# values {
# timestamp
# value
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# start
# step
# }
# }
# }
# """
# MEMORY_USAGE_QUERY = """
# query Query {
# monitoring {
# memoryUsage {
# averageUsageByService {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# end
# maxUsageByService {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# overallUsage {
# ... on MonitoringValues {
# values {
# timestamp
# value
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# start
# step
# }
# }
# }
# """
# MEMORY_USAGE_QUERY_WITH_OPTIONS = """
# query Query($end: String!, $start: String!, $step: String!) {
# monitoring {
# memoryUsage(end: $end, start: $start, step: $step) {
# averageUsageByService {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# end
# maxUsageByService {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# overallUsage {
# ... on MonitoringValues {
# values {
# timestamp
# value
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# start
# step
# }
# }
# }
# """
# NETWORK_USAGE_QUERY = """
# query Query {
# monitoring {
# networkUsage {
# end
# start
# step
# overallUsage {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# }
# }
# }
# """
# NETWORK_USAGE_QUERY_WITH_OPTIONS = """
# query Query($end: String!, $start: String!, $step: String!) {
# monitoring {
# networkUsage(end: $end, start: $start, step: $step) {
# end
# overallUsage {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# start
# step
# }
# }
# }
# """
# DISK_USAGE_QUERY = """
# query Query {
# monitoring {
# diskUsage {
# __typename
# start
# end
# step
# overallUsage {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# }
# }
# }
# """
# DISK_USAGE_QUERY_WITH_OPTIONS = """
# query Query($end: String!, $start: String!, $step: String!) {
# monitoring {
# diskUsage(end: $end, start: $start, step: $step) {
# end
# overallUsage {
# ... on MonitoringMetrics {
# metrics {
# metricId
# values {
# timestamp
# value
# }
# }
# }
# ... on MonitoringQueryError {
# error
# }
# }
# start
# step
# }
# }
# }
# """
# def test_graphql_get_disk_usage(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={"query": DISK_USAGE_QUERY},
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_disk_usage_with_options(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={
# "query": DISK_USAGE_QUERY,
# "variables": {
# "start": datetime.fromtimestamp(1720136108).isoformat(),
# "end": datetime.fromtimestamp(1720137319).isoformat(),
# "step": 90,
# },
# },
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_disk_usage_unauthorized(client):
# response = client.post(
# "/graphql",
# json={"query": DISK_USAGE_QUERY},
# )
# assert_empty(response)
# def test_graphql_get_memory_usage(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={"query": MEMORY_USAGE_QUERY},
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_memory_usage_with_options(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={
# "query": MEMORY_USAGE_QUERY_WITH_OPTIONS,
# "variables": {
# "start": datetime.fromtimestamp(1720136108).isoformat(),
# "end": datetime.fromtimestamp(1720137319).isoformat(),
# "step": 90,
# },
# },
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_memory_usage_unauthorized(client):
# response = client.post(
# "/graphql",
# json={"query": MEMORY_USAGE_QUERY},
# )
# assert_empty(response)
# def test_graphql_get_cpu_usage(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={"query": CPU_USAGE_QUERY},
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_cpu_usage_with_options(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={
# "query": CPU_USAGE_QUERY_WITH_OPTIONS,
# "variables": {
# "start": datetime.fromtimestamp(1720136108).isoformat(),
# "end": datetime.fromtimestamp(1720137319).isoformat(),
# "step": 90,
# },
# },
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_cpu_usage_unauthorized(client):
# response = client.post(
# "/graphql",
# json={"query": CPU_USAGE_QUERY},
# )
# assert_empty(response)
# def test_graphql_get_network_usage(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={"query": NETWORK_USAGE_QUERY},
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_network_usage_with_options(
# client,
# authorized_client,
# mock_send_query,
# mock_get_status_active,
# ):
# response = authorized_client.post(
# "/graphql",
# json={
# "query": NETWORK_USAGE_QUERY_WITH_OPTIONS,
# "variables": {
# "start": datetime.fromtimestamp(1720136108).isoformat(),
# "end": datetime.fromtimestamp(1720137319).isoformat(),
# "step": 90,
# },
# },
# )
# data = get_data(response)
# assert data == ["test result"]
# def test_graphql_get_network_usage_unauthorized(client):
# response = client.post(
# "/graphql",
# json={"query": NETWORK_USAGE_QUERY},
# )
# assert_empty(response)