diff --git a/CHANGELOG.md b/CHANGELOG.md index c69d215..dfb7252 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,15 @@ This project adheres to [Semantic Versioning](https://semver.org/). *Unreleased* versions radiate potential—-and dread. Once you merge an infernal PR, move its bullet under a new version heading with the actual release date.* --> +## [Unreleased] - YYYY-MM-DD +### Added +- 🔥 Moved alembic migration code into openSAMPL along with Docker image information +- 🔥 Moved backend api code into openSAMPL along with Docker image information +- 🔥 Docker-compose for developers which installs openSAMPL as editable on backend image + +### Fixed +- 🩹 Bug which caused random data duration to always be 1 hour + ## [1.1.5] - 2025-09-22 ### Fixed - 🩹 More durable timestamp extrapolation in time data insertion diff --git a/opensampl/server/__init__.py b/opensampl/server/__init__.py index 9afc2c0..8568dcd 100644 --- a/opensampl/server/__init__.py +++ b/opensampl/server/__init__.py @@ -12,10 +12,14 @@ def check_command(command: list[str]) -> bool: return False -if not check_command(["docker", "--version"]): - raise ImportError("Docker is not installed or not found in PATH. Please install Docker.") +def ensure_docker(): + """Ensure Docker and Docker Compose are installed, error if not""" + if not check_command(["docker", "--version"]): + raise RuntimeError("Docker is not installed or not found in PATH. Please install Docker.") -compose_installed = check_command(["docker", "compose", "version"]) or check_command(["docker-compose", "--version"]) + compose_installed = check_command(["docker", "compose", "version"]) or check_command( + ["docker-compose", "--version"] + ) -if not compose_installed: - raise ImportError("Neither 'docker compose' nor 'docker-compose' is installed. Please install Docker Compose.") + if not compose_installed: + raise RuntimeError("Neither 'docker compose' nor 'docker-compose' is installed. Please install Docker Compose.") diff --git a/opensampl/server/backend/Dockerfile b/opensampl/server/backend/Dockerfile new file mode 100755 index 0000000..31b6875 --- /dev/null +++ b/opensampl/server/backend/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.12 AS base +ARG OPENSAMPL_VERSION=1.1.5 + +WORKDIR /tmp +ENV ROUTE_TO_BACKEND=false + +FROM base AS prod + +RUN pip install --no-cache-dir "opensampl[backend]==${OPENSAMPL_VERSION}" + +CMD ["uvicorn", "opensampl.server.backend.main:app", "--proxy-headers", "--host", "0.0.0.0", "--port", "8000"] + +FROM base AS dev + +CMD ["sh", "-c", "pip install -e \"./opensampl[backend]\" && uvicorn opensampl.server.backend.main:app --proxy-headers --host 0.0.0.0 --port 8000 --log-level debug --reload"] \ No newline at end of file diff --git a/opensampl/server/backend/__init__.py b/opensampl/server/backend/__init__.py new file mode 100644 index 0000000..478ae3a --- /dev/null +++ b/opensampl/server/backend/__init__.py @@ -0,0 +1 @@ +"""Backend API tooling""" diff --git a/opensampl/server/backend/main.py b/opensampl/server/backend/main.py new file mode 100644 index 0000000..a53c5a4 --- /dev/null +++ b/opensampl/server/backend/main.py @@ -0,0 +1,372 @@ +"""API Configuration to Indirectly interact with the database""" + +import io +import json +import os +import sys +import time +from datetime import UTC, datetime, timedelta +from typing import Any, Callable, Optional + +import pandas as pd +import psycopg2 +from fastapi import Depends, FastAPI, File, Form, HTTPException, Request, Response, Security, UploadFile +from fastapi.responses import JSONResponse, RedirectResponse +from fastapi.security.api_key import APIKeyHeader +from loguru import logger +from prometheus_client import CONTENT_TYPE_LATEST, Counter, Histogram, generate_latest +from pydantic import BaseModel +from sqlalchemy import create_engine, or_, select, text +from sqlalchemy.exc import IntegrityError, SQLAlchemyError +from sqlalchemy.orm import Session, sessionmaker + +from opensampl import load_data +from opensampl.db.access_orm import APIAccessKey +from opensampl.db.orm import ProbeMetadata +from opensampl.metrics import METRICS, MetricType +from opensampl.references import REF_TYPES, CompoundReferenceType, ReferenceType +from opensampl.vendors.constants import ProbeKey, VendorType + + +class TimeDataPoint(BaseModel): + """Time Data Model""" + + time: str + value: float + + +class WriteTablePayload(BaseModel): + """Write Table Payload Model""" + + table: str + data: dict[str, Any] + if_exists: load_data.conflict_actions = "update" + + +class ProbeMetadataPayload(BaseModel): + """Probe Metadata Payload Model""" + + vendor: VendorType + probe_key: ProbeKey + data: dict[str, Any] + + +DATABASE_URI = os.getenv("DATABASE_URL") +engine = create_engine(DATABASE_URI) + +loglevel = os.getenv("BACKEND_LOG_LEVEL", "INFO") +app = FastAPI( + title="openSAMPL Backend", + description=""" + The backend for interacting with openSAMPL server + + Provides additional security and durability for loading data and interacting with database. + """, +) + + +REQUEST_COUNT = Counter("http_requests_total", "Total number of HTTP requests", ["method", "endpoint", "http_status"]) + +REQUEST_LATENCY = Histogram( + "http_request_duration_seconds", "Duration of HTTP requests in seconds", ["method", "endpoint"] +) + +EXCLUDED_PATHS = {"/metrics", "/healthcheck", "/healthcheck_database", "/healthcheck_metadata"} + +logger.configure(handlers=[{"sink": sys.stderr, "level": loglevel}]) + +USE_API_KEY = os.getenv("USE_API_KEY", "false").lower() == "true" +API_KEY_NAME = "access-key" + +api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False) + + +def get_keys(): + """Get active API keys""" + env_keys = os.getenv("API_KEYS", "").strip() + keys = [k.strip() for k in env_keys.split(",") if k.strip()] + if keys: + logger.debug("api access keys loaded from env") + return keys + try: + Session = sessionmaker(bind=engine) # noqa: N806 + with Session() as session: + now = datetime.now(tz=UTC) + stmt = select(APIAccessKey.key).where(or_(APIAccessKey.expires_at is None, APIAccessKey.expires_at > now)) + result = session.execute(stmt) + keys = [row[0] for row in result.all()] + logger.debug("api access keys loaded from db") + return keys + except Exception as e: + logger.debug(f"exception attempting to load api access keys from db: {e}") + return [] + + +def validate_api_key(api_key: str = Security(api_key_header)): + """Validate provided API key""" + if not USE_API_KEY: + return None # Security is disabled + if api_key not in get_keys(): + raise HTTPException(status_code=403, detail="Invalid or missing API key") + return api_key + + +def get_db(): + """Get database session""" + Session = sessionmaker(bind=engine) # noqa: N806 + try: + session = Session() + yield session + finally: + session.close() + + +@app.middleware("http") +async def metrics_middleware(request: Request, call_next: Callable) -> Response: + """Middleware to track request metrics.""" + if request.url.path in EXCLUDED_PATHS: + return await call_next(request) + start_time = time.time() + response: Response = await call_next(request) + duration = time.time() - start_time + + REQUEST_COUNT.labels(method=request.method, endpoint=request.url.path, http_status=response.status_code).inc() + + REQUEST_LATENCY.labels(method=request.method, endpoint=request.url.path).observe(duration) + + return response + + +# add route to docs from / to /docs +@app.get("/", include_in_schema=False) +async def docs_redirect(): + """Redirect bare url to docs""" + return RedirectResponse(url="/docs") + + +@app.get("/setloglevel") +def set_log_level(newloglevel: str, api_key: str = Depends(validate_api_key)): + """Change visible log level in backend container""" + newloglevel = newloglevel.upper() + logger.configure(handlers=[{"sink": sys.stderr, "level": newloglevel}]) + return {"loglevel": newloglevel} + + +@app.get("/checkloglevel") +def check_log_level(api_key: str = Depends(validate_api_key)): + """Check which log levels are visible in backend container""" + logger.debug("Debug test") + logger.info("Info test") + logger.warning("Warning test") + logger.error("Error test") + current_level = next(iter(logger._core.handlers.values()))["level"].name # noqa: SLF001 + return {"loglevel": current_level} + + +@app.post("/write_to_table") +def write_to_table( + payload: WriteTablePayload, api_key: str = Depends(validate_api_key), session: Session = Depends(get_db) +): + """Write given data to specified table""" + try: + load_data.write_to_table(table=payload.table, data=payload.data, if_exists=payload.if_exists, session=session) + logger.debug(f"Successfully wrote to {payload.table} using: {payload.data}") + return JSONResponse(content={"message": f"Succeeded loading data into {payload.table}"}, status_code=200) + except IntegrityError as e: + if isinstance(e.orig, psycopg2.errors.UniqueViolation): + return JSONResponse(content={"message": f"Unique violation error: {e}"}, status_code=409) + return JSONResponse(content={"message": f"Integrity error: {e}"}, status_code=500) + except SQLAlchemyError as e: + logger.error(f"SQLAlchemy error: {e}") + return JSONResponse(content={"message": f"Database error: {e}"}, status_code=500) + except json.JSONDecodeError as e: + logger.error(f"JSON decode error: {e}") + return JSONResponse(content={"message": f"Invalid JSON data: {e}"}, status_code=400) + except Exception as e: + logger.error(f"Unexpected error: {e}") + return JSONResponse(content={"message": f"Failed to load JSON into database: {e}"}, status_code=500) + + +@app.post("/load_time_data") +async def load_time_data( # noqa: PLR0912, C901 + probe_key_str: str = Form(...), + metric_type_str: Optional[str] = Form(None), + reference_type_str: Optional[str] = Form(None), + compound_key_str: Optional[str] = Form(None), + file: UploadFile = File(...), + api_key: str = Depends(validate_api_key), + session: Session = Depends(get_db), +): + """Load provided data for given probe""" + try: + probe_key = ProbeKey(**json.loads(probe_key_str)) + + if metric_type_str is not None: + metric_type_dict = json.loads(metric_type_str) + metric_type = MetricType(**metric_type_dict) + else: + metric_type = METRICS.UNKNOWN + + if reference_type_str is not None: + reference_type_dict = json.loads(reference_type_str) + if "reference_table" in reference_type_dict: + reference_type = CompoundReferenceType(**reference_type_dict) + else: + reference_type = ReferenceType(**reference_type_dict) + else: + reference_type = REF_TYPES.UNKNOWN + + compound_key = None if compound_key_str is None else json.loads(compound_key_str) + + content = await file.read() + df = pd.read_csv(io.BytesIO(content)) + logger.info(df.head()) + # Convert time strings back to datetime + df["time"] = pd.to_datetime(df["time"]) + + # Use the same load_time_data function as before + load_data.load_time_data( + probe_key=probe_key, + metric_type=metric_type, + reference_type=reference_type, + compound_key=compound_key, + data=df, + session=session, + ) + + return JSONResponse(content={"message": f"Successfully loaded {len(df)} data points"}, status_code=200) + except IntegrityError as e: + if session: + session.rollback() + session.close() + if isinstance(e.orig, psycopg2.errors.UniqueViolation): + return JSONResponse(content={"message": f"Unique violation error: {e}"}, status_code=409) + return JSONResponse(content={"message": f"Integrity error: {e}"}, status_code=500) + except SQLAlchemyError as e: + logger.error(f"Database error: {e}") + if session: + session.rollback() + session.close() + raise HTTPException(status_code=500, detail=f"Database error: {e!s}") from e + except Exception as e: + logger.error(f"Unexpected error: {e}") + if session: + session.rollback() + session.close() + raise HTTPException(status_code=500, detail=f"Error processing time series data: {e!s}") from e + + +@app.post("/load_probe_metadata") +def load_probe_metadata( + payload: ProbeMetadataPayload, api_key: str = Depends(validate_api_key), session: Session = Depends(get_db) +): + """Load metadata for given probe""" + logger.debug(f"Received payload: {payload.model_dump()}") + + try: + load_data.load_probe_metadata( + vendor=payload.vendor, probe_key=payload.probe_key, data=payload.data, session=session + ) + logger.debug( + f"Successfully wrote to {ProbeMetadata.__tablename__} and {payload.vendor.metadata_table}: {payload.data}" + ) + return JSONResponse(content={"message": f"Succeeded loaded metadata for {payload.probe_key}"}, status_code=200) + except IntegrityError as e: + session.rollback() + if isinstance(e.orig, psycopg2.errors.UniqueViolation): + return JSONResponse(content={"message": f"Unique violation error: {e}"}, status_code=409) + return JSONResponse(content={"message": f"Integrity error: {e}"}, status_code=500) + except SQLAlchemyError as e: + logger.error(f"SQLAlchemy error: {e}") + return JSONResponse(content={"message": f"Database error: {e}"}, status_code=500) + except json.JSONDecodeError as e: + logger.error(f"JSON decode error: {e}") + return JSONResponse(content={"message": f"Invalid JSON data: {e}"}, status_code=400) + except Exception as e: + logger.exception(f"Unexpected error: {e}") + return JSONResponse(content={"message": f"Failed to load JSON into database: {e}"}, status_code=500) + + +@app.get("/create_new_tables") +def create_new_tables( + create_schema: bool = True, api_key: str = Depends(validate_api_key), session: Session = Depends(get_db) +): + """Update DB based on ORM Tables""" + try: + load_data.create_new_tables(create_schema=create_schema, session=session) + return JSONResponse(content={"message": "Succeeded in creating any new tables"}, status_code=200) + except SQLAlchemyError as e: + logger.error(f"SQLAlchemy error: {e}") + return JSONResponse(content={"message": f"Database error: {e}"}, status_code=500) + except json.JSONDecodeError as e: + logger.error(f"JSON decode error: {e}") + return JSONResponse(content={"message": f"Invalid JSON data: {e}"}, status_code=400) + except Exception as e: + logger.error(f"Unexpected error: {e}") + return JSONResponse(content={"message": f"Failed to load JSON into database: {e}"}, status_code=500) + + +@app.get("/gen_api_key") +def generate_api_key(expire_after: Optional[int] = None, session: Session = Depends(get_db)): + """Generate new API key in the database""" + try: + new_key = APIAccessKey() + new_key.generate_key() + if expire_after: + new_key.expires_at = datetime.now(tz=UTC) + timedelta(days=expire_after) + + session.add(new_key) + + session.commit() + return JSONResponse(content={"message": "Succeeded in creating new access key"}, status_code=200) + except SQLAlchemyError as e: + logger.error(f"SQLAlchemy error: {e}") + return JSONResponse(content={"message": f"Database error: {e}"}, status_code=500) + except Exception as e: + logger.error(f"Unexpected error: {e}") + return JSONResponse(content={"message": f"Failed to create new access key: {e}"}, status_code=500) + + +@app.get("/healthcheck") +def healthcheck(): + """Ensure the api is accepting queries""" + return {"status": "OK"} + + +@app.get("/healthcheck_database") +def healthcheck_db(): + """Ensure the db is accepting connections""" + try: + with engine.connect() as connection: + connection.execute(text("SELECT 1")) + except SQLAlchemyError as e: + return JSONResponse(content={"message": f"Database connection error: {e!s}"}, status_code=503) + else: + return {"status": "OK"} + + +@app.get("/healthcheck_metadata") +def healthcheck_metadata(): + """Ensure that the database exists AND the expected format is present""" + # eventually, we want to make the schema configurable through environment variables + # for now, we have it hard coded too many places. So this is a small step towards that goal + SCHEMA = "castdb" # noqa: N806 + + try: + with engine.connect() as connection: + result = connection.execute( + text("SELECT schema_name FROM information_schema.schemata WHERE schema_name = :schema;"), + {"schema": SCHEMA}, + ) + schema_exists = result.fetchone() is not None + if schema_exists: + return {"status": "OK"} + return JSONResponse(status_code=500, content={"message": f"Expected schema '{SCHEMA}' does not exist"}) + except SQLAlchemyError as e: + return JSONResponse(content={"message": f"Database connection error: {e!s}"}, status_code=503) + + +@app.get("/metrics", include_in_schema=False) +def metrics(): + """Expose Prometheus metrics.""" + return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST) diff --git a/opensampl/server/cli.py b/opensampl/server/cli.py index d78ceeb..9815149 100644 --- a/opensampl/server/cli.py +++ b/opensampl/server/cli.py @@ -13,6 +13,9 @@ from loguru import logger from opensampl.config.server import ServerConfig +from opensampl.server import ensure_docker + +ensure_docker() def load_config(env_file: str | None = None) -> ServerConfig: diff --git a/opensampl/server/cli2.py b/opensampl/server/cli2.py index 14d9d54..6b15011 100644 --- a/opensampl/server/cli2.py +++ b/opensampl/server/cli2.py @@ -19,6 +19,9 @@ from loguru import logger from opensampl.config.server import ServerConfig +from opensampl.server import ensure_docker + +ensure_docker() def load_config(env_file: str | None = None) -> ServerConfig: diff --git a/opensampl/server/docker-compose.dev.yaml b/opensampl/server/docker-compose.dev.yaml new file mode 100644 index 0000000..c1cc204 --- /dev/null +++ b/opensampl/server/docker-compose.dev.yaml @@ -0,0 +1,75 @@ +services: + db: + image: savannah.ornl.gov/opensampl/db:latest + ports: + - "5415:5432" + volumes: + - castdb:/home/postgres/pgdata/data + environment: + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - POSTGRES_USER=${POSTGRES_USER} + - POSTGRES_DB=${POSTGRES_DB} + - GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD} + restart: unless-stopped + healthcheck: + test: [ "CMD", "pg_isready", "-U", "${POSTGRES_USER}", "-d", "${POSTGRES_DB}" ] + interval: 5s + retries: 5 + start_period: 10s + timeout: 3s + command: > + postgres + -c shared_preload_libraries=timescaledb,pg_cron + -c cron.database_name=${POSTGRES_DB} + + grafana: + image: savannah.ornl.gov/opensampl/grafana:latest + build: + context: ./grafana + restart: unless-stopped + ports: + - "3015:3000" + env_file: + - grafana/grafana.env + environment: + - POSTGRES_DB=${POSTGRES_DB} + - GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD} + volumes: + - grafana-data:/var/lib/grafana + + + migrations: + image: savannah.ornl.gov/opensampl/migrations:latest + build: + context: migrations + restart: "no" + environment: + - DB_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} + depends_on: + db: + condition: service_healthy + + + backend: + image: savannah.ornl.gov/opensampl/backend:latest + build: + context: backend + target: dev + ports: + - "8015:8000" + restart: unless-stopped + environment: + - DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} + - ROUTE_TO_BACKEND=false + - BACKEND_LOG_LEVEL=${BACKEND_LOG_LEVEL} + - USE_API_KEY=${USE_API_KEY} + - API_KEYS=${API_KEYS} + volumes: + - ../..:/tmp/opensampl + depends_on: + db: + condition: service_healthy + +volumes: + castdb: + grafana-data: \ No newline at end of file diff --git a/opensampl/server/docker-compose.yaml b/opensampl/server/docker-compose.yaml index 931bf8a..48fa367 100644 --- a/opensampl/server/docker-compose.yaml +++ b/opensampl/server/docker-compose.yaml @@ -26,6 +26,7 @@ services: image: savannah.ornl.gov/opensampl/grafana:latest build: context: ./grafana + restart: "always" ports: - "3015:3000" environment: @@ -35,6 +36,8 @@ services: migrations: image: savannah.ornl.gov/opensampl/migrations:latest + build: + context: ./migrations restart: "no" environment: - DB_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} @@ -46,6 +49,9 @@ services: backend: image: savannah.ornl.gov/opensampl/backend:latest + build: + context: ./backend + target: prod ports: - "8015:8000" restart: always diff --git a/opensampl/server/migrations/Dockerfile b/opensampl/server/migrations/Dockerfile new file mode 100644 index 0000000..d3c7ae1 --- /dev/null +++ b/opensampl/server/migrations/Dockerfile @@ -0,0 +1,19 @@ +FROM python:3.12 + +ARG OPENSAMPL_VERSION=1.1.5 + +WORKDIR / +RUN pip install --no-cache-dir "opensampl[migrations]==${OPENSAMPL_VERSION}" alembic + +RUN useradd -m alembic +USER alembic +WORKDIR /app +COPY _migrations/ /app/_migrations/ +COPY alembic.ini . + +# needs 15 seconds for the db to finish initializing when run locally +CMD ["sh", "-c", "sleep 15 && alembic upgrade head"] +# uncomment below to reset datbase before any migration +#CMD ["alembic", "downgrade", "base"] +# or uncomment below to allow container to just stay awake +#CMD ["tail", "-f", "/dev/null"] diff --git a/opensampl/server/migrations/_migrations/README b/opensampl/server/migrations/_migrations/README new file mode 100644 index 0000000..98e4f9c --- /dev/null +++ b/opensampl/server/migrations/_migrations/README @@ -0,0 +1 @@ +Generic single-database configuration. \ No newline at end of file diff --git a/opensampl/server/migrations/_migrations/env.py b/opensampl/server/migrations/_migrations/env.py new file mode 100644 index 0000000..a44ba86 --- /dev/null +++ b/opensampl/server/migrations/_migrations/env.py @@ -0,0 +1,81 @@ +import os +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +sqlalchemy_url = os.environ.get('DB_URI') +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. +config.set_main_option('sqlalchemy.url', sqlalchemy_url) + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/opensampl/server/migrations/_migrations/script.py.mako b/opensampl/server/migrations/_migrations/script.py.mako new file mode 100644 index 0000000..fbc4b07 --- /dev/null +++ b/opensampl/server/migrations/_migrations/script.py.mako @@ -0,0 +1,26 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/opensampl/server/migrations/_migrations/versions/2024_03_26_1145_create_schema_initialize_orm.py b/opensampl/server/migrations/_migrations/versions/2024_03_26_1145_create_schema_initialize_orm.py new file mode 100644 index 0000000..f3f97c6 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2024_03_26_1145_create_schema_initialize_orm.py @@ -0,0 +1,101 @@ +"""create schema & initialize orm + +Revision ID: fe18404ea614 +Revises: +Create Date: 2024-03-26 11:45:04.612673 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +import geoalchemy2 + + +# revision identifiers, used by Alembic. +revision: str = 'fe18404ea614' +down_revision: Union[str, None] = None +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA = 'castdb' +def upgrade() -> None: + # starting postgis + op.execute("CREATE EXTENSION IF NOT EXISTS postgis;") + + # Create our schema + op.execute(f"CREATE SCHEMA IF NOT EXISTS {SCHEMA};") + op.execute(f"CREATE SCHEMA IF NOT EXISTS access;") + + # Create locations table + op.create_table('locations', + sa.Column('uuid', sa.String(36), primary_key=True), + sa.Column('name', sa.Text(), nullable=False, unique=True), + sa.Column('geom', geoalchemy2.Geometry(geometry_type='GEOMETRY', srid=4326)), + sa.Column('public', sa.Boolean(), nullable=True), + schema=SCHEMA, + if_not_exists=True + ) + + # Create test_metadata table + op.create_table('test_metadata', + sa.Column('uuid', sa.String(36), primary_key=True), + sa.Column('name', sa.Text(), unique=True, nullable=False), + sa.Column('start_date', sa.TIMESTAMP()), + sa.Column('end_date', sa.TIMESTAMP()), + schema=SCHEMA, + if_not_exists=True + ) + + # Create probe_metadata table + op.create_table('probe_metadata', + sa.Column('uuid', sa.String(36), primary_key=True), + sa.Column('probe_id', sa.Text()), + sa.Column('ip_address', sa.Text()), + sa.Column('vendor', sa.Text()), + sa.Column('model', sa.Text()), + sa.Column('name', sa.Text(), unique=True), + sa.Column('public', sa.Boolean(), nullable=True), + sa.Column('location_uuid', sa.String(36), sa.ForeignKey('castdb.locations.uuid')), + sa.Column('test_uuid', sa.String(36), sa.ForeignKey('castdb.test_metadata.uuid')), + sa.UniqueConstraint('probe_id', 'ip_address', name='uq_probe_metadata_ipaddress_probeid'), + schema=SCHEMA, + if_not_exists=True + ) + + # Create probe_data table + op.create_table('probe_data', + sa.Column('time', sa.TIMESTAMP(), primary_key=True), + sa.Column('probe_uuid', sa.String(36), sa.ForeignKey('castdb.probe_metadata.uuid'), + primary_key=True), + sa.Column('value', sa.NUMERIC()), + schema=SCHEMA, + if_not_exists=True + ) + + # Create adva_metadata table + op.create_table('adva_metadata', + sa.Column('probe_uuid', sa.String(36), sa.ForeignKey('castdb.probe_metadata.uuid'), + primary_key=True), + sa.Column('type', sa.Text()), + sa.Column('start', sa.TIMESTAMP()), + sa.Column('frequency', sa.Integer()), + sa.Column('timemultiplier', sa.Integer()), + sa.Column('multiplier', sa.Integer()), + sa.Column('title', sa.Text()), + sa.Column('adva_probe', sa.Text()), + sa.Column('adva_reference', sa.Text()), + sa.Column('adva_reference_expected_ql', sa.Text()), + sa.Column('adva_source', sa.Text()), + sa.Column('adva_direction', sa.Text()), + sa.Column('adva_version', sa.Float()), + sa.Column('adva_status', sa.Text()), + sa.Column('adva_mtie_mask', sa.Text()), + sa.Column('adva_mask_margin', sa.Integer()), + schema=SCHEMA, + if_not_exists=True + ) + + +def downgrade() -> None: + op.execute(f"DROP SCHEMA IF EXISTS {SCHEMA} CASCADE;") diff --git a/opensampl/server/migrations/_migrations/versions/2024_12_04_1155_update_db_tables.py b/opensampl/server/migrations/_migrations/versions/2024_12_04_1155_update_db_tables.py new file mode 100644 index 0000000..a6023ea --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2024_12_04_1155_update_db_tables.py @@ -0,0 +1,195 @@ +"""update db tables + +Revision ID: 7f8adc06bb6b +Revises: fe18404ea614 +Create Date: 2024-12-04 11:55:12.955284 + +""" +from typing import Sequence, Union, Dict + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.engine import reflection +from loguru import logger +import uuid + +# revision identifiers, used by Alembic. +revision: str = '7f8adc06bb6b' +down_revision: Union[str, None] = 'fe18404ea614' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA = 'castdb' + +def create_uuid_mapping(connection, table_name: str, id_columns: list) -> Dict[tuple, str]: + """Create mapping of old composite keys to new UUIDs""" + # Query all existing records + select_stmt = sa.text(f""" + SELECT {', '.join(id_columns)} + FROM {SCHEMA}.{table_name} + """) + records = connection.execute(select_stmt).fetchall() + + # Create mapping + return {tuple(record): str(uuid.uuid4()) for record in records} + + +def upgrade(): + # Create connection for executing raw SQL + connection = op.get_bind() + inspector = reflection.Inspector.from_engine(connection) + existing_columns = [col['name'] for col in inspector.get_columns('probe_metadata', schema=SCHEMA)] + + # Step 1: Add new UUID column to probe_metadata (nullable initially) + if 'uuid' not in existing_columns: + op.add_column('probe_metadata', sa.Column('uuid', sa.String(36), nullable=True), schema=SCHEMA) + # Generate and store UUIDs for existing probe_metadata records + probe_uuid_map = create_uuid_mapping( + connection, + 'probe_metadata', + ['probe_id', 'ip_address'] + ) + + # Update probe_metadata with UUIDs + for (probe_id, ip_address), new_uuid in probe_uuid_map.items(): + op.execute(f""" + UPDATE {SCHEMA}.probe_metadata + SET uuid = '{new_uuid}', vendor = 'ADVA' + WHERE probe_id = '{probe_id}' + AND ip_address = '{ip_address}' + """) + + # Make UUID column non-nullable and make it the primary key + op.alter_column('probe_metadata', 'uuid', + existing_type=sa.String(36), + nullable=False, + schema=SCHEMA + ) + + if 'public' not in existing_columns: + op.add_column('probe_metadata', sa.Column('public', sa.Boolean, nullable=True), schema=SCHEMA) + + + + def safe_drop_constraint(constraint, table): + if table in inspector.get_table_names(schema=SCHEMA): + constraints = [fk['name'] for fk in inspector.get_foreign_keys(table, schema=SCHEMA) if fk['name']] + if constraint in constraints: + op.drop_constraint(constraint, table, type_='foreignkey', schema=SCHEMA) + + # First drop foreign key constraints from dependent tables + safe_drop_constraint('ad_data_probe_id_ip_address_fkey', 'ad_data') + safe_drop_constraint('mtie_data_probe_id_ip_address_fkey', 'mtie_data') + safe_drop_constraint('avg_phase_err_data_probe_id_ip_address_fkey', 'avg_phase_err_data') + safe_drop_constraint('raw_data_probe_id_ip_address_fkey', 'raw_data') + safe_drop_constraint('headers_probe_id_ip_address_fkey', 'headers') + + # Drop old primary key and create new one with UUID + pk_info = inspector.get_pk_constraint('probe_metadata', schema=SCHEMA) + existing_pk_name = pk_info.get('name') + existing_pk_cols = pk_info.get('constrained_columns', []) + + # Replace only if it's not already set to 'uuid' as the sole primary key + if existing_pk_cols != ['uuid']: + if existing_pk_name: + op.drop_constraint(existing_pk_name, 'probe_metadata', type_='primary', schema=SCHEMA) + + op.create_primary_key( + 'probe_metadata_pkey', + 'probe_metadata', + ['uuid'], + schema=SCHEMA + ) + def safe_create_unique_constraint(name, table, columns): + existing = [uc['name'] for uc in inspector.get_unique_constraints(table, schema=SCHEMA)] + if name not in existing: + op.create_unique_constraint(name, table, columns, schema=SCHEMA) + + safe_create_unique_constraint('uq_probe_metadata_uuid', 'probe_metadata', ['uuid']) + safe_create_unique_constraint('uq_probe_metadata_name', 'probe_metadata', ['name']) + safe_create_unique_constraint('uq_probe_metadata_ipaddress_probeid', 'probe_metadata', ['ip_address', 'probe_id']) + + + # Now create adva_metadata table (after uuid is unique) + op.create_table('adva_metadata', + sa.Column('probe_uuid', sa.String(36), + sa.ForeignKey(f'{SCHEMA}.probe_metadata.uuid'), + primary_key=True), + sa.Column('type', sa.Text), + sa.Column('start', sa.TIMESTAMP), + sa.Column('frequency', sa.Integer), + sa.Column('timemultiplier', sa.Integer), + sa.Column('multiplier', sa.Integer), + sa.Column('title', sa.Text), + sa.Column('adva_probe', sa.Text), + sa.Column('adva_reference', sa.Text), + sa.Column('adva_reference_expected_ql', sa.Text), + sa.Column('adva_source', sa.Text), + sa.Column('adva_direction', sa.Text), + sa.Column('adva_version', sa.Float), + sa.Column('adva_status', sa.Text), + sa.Column('adva_mtie_mask', sa.Text), + sa.Column('adva_mask_margin', sa.Integer), + schema=SCHEMA, + if_not_exists=True + ) + + # Migrate data from adva_headers to adva_metadata + if 'adva_headers' in inspector.get_table_names(schema=SCHEMA): + op.execute(f""" + INSERT INTO {SCHEMA}.adva_metadata ( + probe_uuid, type, start, frequency, multiplier, + adva_probe, adva_reference, + adva_source, adva_direction, adva_version, adva_status, + adva_mtie_mask, adva_mask_margin + ) + SELECT + pm.uuid, + ah.type, + ah.start, + ah.frequency, + CAST(ah.multiplier as INTEGER), + ah.adva_probe, + ah.adva_ref as adva_reference, + ah.adva_src as adva_source, + ah.adva_direction, + CAST(ah.adva_version as FLOAT), + ah.adva_status, + ah.adva_mtie_mask, + CAST(ah.adva_mask_margin as INTEGER) + FROM {SCHEMA}.adva_headers ah + JOIN {SCHEMA}.headers h ON h.adva_id = ah.id + JOIN {SCHEMA}.probe_metadata pm + ON pm.probe_id = h.probe_id + AND pm.ip_address = h.ip_address + """) + + # Create new probe_data table + op.create_table('probe_data', + sa.Column('time', sa.TIMESTAMP, primary_key=True), + sa.Column('probe_uuid', sa.String(36), + sa.ForeignKey(f'{SCHEMA}.probe_metadata.uuid'), + primary_key=True), + sa.Column('value', sa.NUMERIC), + schema=SCHEMA, + if_not_exists=True + ) + + # Convert probe_data to hypertable + op.execute(""" + SELECT create_hypertable('castdb.probe_data', 'time', + chunk_time_interval => INTERVAL '1 hour', + if_not_exists => TRUE, + migrate_data => TRUE); + """) + + # Drop old header tables + op.drop_table('adva_headers', schema=SCHEMA, if_exists=True) + op.drop_table('headers', schema=SCHEMA, if_exists=True) + + + +def downgrade(): + # This migration is not reversible due to potential data loss + # and the complexity of regenerating composite keys + logger.info("Downgrade is not supported for this migration.") diff --git a/opensampl/server/migrations/_migrations/versions/2025_01_28_2212_create_time_buckets.py b/opensampl/server/migrations/_migrations/versions/2025_01_28_2212_create_time_buckets.py new file mode 100644 index 0000000..a6a7c80 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_01_28_2212_create_time_buckets.py @@ -0,0 +1,109 @@ +"""create time buckets + +Revision ID: c464878dac7b +Revises: 7f8adc06bb6b +Create Date: 2025-01-28 22:12:48.387383 + +""" +from typing import Sequence, Union, Tuple + +from alembic import op +import sqlalchemy as sa +from loguru import logger + + +# revision identifiers, used by Alembic. +revision: str = 'c464878dac7b' +down_revision: Union[str, None] = '7f8adc06bb6b' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +time_buckets = [ + #suffix interval cron schedule (min hr * * *) + ('1min', '1 minute', '*/5 * * * *'), # Every 5 minutes + ('5min', '5 minutes', '*/5 * * * *'), # Every 5 minutes + ('15min', '15 minutes', '*/15 * * * *'), # Every 15 minutes + ('1hour', '1 hour', '0 * * * *'), # Start of every hour + ('6hour', '6 hours', '0 */6 * * *'), # Every 6 hours + ('1day', '1 day', '0 0 * * *') # Midnight every day +] + +# def execute_out_of_transaction(statement: str): +# """Execute a statement outside of a transaction block""" +# # Get connection from alembic +# connection = op.get_bind() +# +# # Close existing transaction +# connection.execution_options(isolation_level="AUTOCOMMIT") +# +# # Execute statement +# connection.execute(statement) + + +def upgrade(): + # Install pg_cron extension if not exists + op.execute(""" + CREATE EXTENSION IF NOT EXISTS pg_cron; + """) + + for suffix, interval, schedule in time_buckets: + # Create materialized views with indexes + try: + op.execute(f""" + CREATE MATERIALIZED VIEW castdb.avg_phase_err_{suffix} AS + SELECT + time_bucket('{interval}', pd.time) as "time", + pd.probe_uuid as uuid, + AVG(pd.value) * 1e9 as value + FROM castdb.probe_data pd + GROUP BY + time_bucket('{interval}', pd.time), + pd.probe_uuid; + + CREATE INDEX ON castdb.avg_phase_err_{suffix} ("time" DESC); + CREATE INDEX ON castdb.avg_phase_err_{suffix} (uuid); + + CREATE UNIQUE INDEX IF NOT EXISTS avg_phase_err_{suffix}_unique_idx ON castdb.avg_phase_err_{suffix} ("time", uuid); + + CREATE MATERIALIZED VIEW castdb.mtie_{suffix} AS + SELECT + time_bucket('{interval}', pd.time) as "time", + pd.probe_uuid as uuid, + (MAX(pd.value) - MIN(pd.value)) * 1e9 as value + FROM castdb.probe_data pd + GROUP BY + time_bucket('{interval}', pd.time), + pd.probe_uuid; + + CREATE INDEX ON castdb.mtie_{suffix} ("time" DESC); + CREATE INDEX ON castdb.mtie_{suffix} (uuid); + + CREATE UNIQUE INDEX IF NOT EXISTS mtie_{suffix}_unique_idx ON castdb.mtie_{suffix} ("time", uuid); + -- Schedule refresh using pg_cron + SELECT cron.schedule( + 'refresh_{suffix}', + '{schedule}', + $$ + REFRESH MATERIALIZED VIEW CONCURRENTLY castdb.avg_phase_err_{suffix}; + REFRESH MATERIALIZED VIEW CONCURRENTLY castdb.mtie_{suffix}; + $$ + ); + """) + except Exception as e: + logger.warning(f"Error creating materialized view for {suffix}: {e}") + + +def downgrade(): + for suffix, _, _ in time_buckets: + # Remove cron jobs first + op.execute(f""" + SELECT cron.unschedule('refresh_{suffix}'); + """) + + op.execute(f"DROP MATERIALIZED VIEW IF EXISTS castdb.avg_phase_err_{suffix} CASCADE;") + op.execute(f"DROP MATERIALIZED VIEW IF EXISTS castdb.mtie_{suffix} CASCADE;") + + # Remove hypertable (this will keep the table but remove timescale functionality) + # op.execute(""" + # SELECT drop_chunks('castdb.probe_data', older_than => '-infinity'::timestamp); + # """) \ No newline at end of file diff --git a/opensampl/server/migrations/_migrations/versions/2025_01_29_0909_updating_location_and_test_tables.py b/opensampl/server/migrations/_migrations/versions/2025_01_29_0909_updating_location_and_test_tables.py new file mode 100644 index 0000000..7d57f84 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_01_29_0909_updating_location_and_test_tables.py @@ -0,0 +1,234 @@ +"""updating location and test tables + +Revision ID: bd1322d0b00f +Revises: c464878dac7b +Create Date: 2025-01-29 09:09:01.383919 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.engine import reflection + +from loguru import logger + +import uuid +from typing import Dict + + +# revision identifiers, used by Alembic. +revision: str = 'bd1322d0b00f' +down_revision: Union[str, None] = 'c464878dac7b' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA = 'castdb' + + +def create_uuid_mapping(connection, table_name: str, id_columns: list) -> Dict[tuple, str]: + """Create mapping of old composite keys to new UUIDs""" + # Query all existing records + select_stmt = sa.text(f""" + SELECT {', '.join(id_columns)} + FROM {SCHEMA}.{table_name} + """) + records = connection.execute(select_stmt).fetchall() + + # Create mapping + return {tuple(record): str(uuid.uuid4()) for record in records} + + +def upgrade(): + connection = op.get_bind() + inspector = reflection.Inspector.from_engine(connection) + + def safe_create_unique_constraint(name, table, columns): + existing = [uc['name'] for uc in inspector.get_unique_constraints(table, schema=SCHEMA)] + if name not in existing: + op.create_unique_constraint(name, table, columns, schema=SCHEMA) + + def safe_drop_constraint(constraint, table, type_='foreignkey'): + constraints = [fk['name'] for fk in inspector.get_foreign_keys(table, schema=SCHEMA) if fk['name']] + if constraint in constraints: + op.drop_constraint(constraint, table, type_=type_, schema=SCHEMA) + + def safe_create_foreign_key( + constraint: str, + source_table: str, + referent_table: str, + local_cols: list[str], + remote_cols: list[str] + ): + existing_fks = [fk["name"] for fk in inspector.get_foreign_keys(source_table, schema=SCHEMA)] + if constraint not in existing_fks: + op.create_foreign_key( + constraint, + source_table, + referent_table, + local_cols, + remote_cols, + source_schema=SCHEMA, + referent_schema=SCHEMA + ) + + location_columns = [col["name"] for col in inspector.get_columns("locations", schema=SCHEMA)] + probe_md_columns = [col['name'] for col in inspector.get_columns('probe_metadata', schema=SCHEMA)] + if "uuid" not in location_columns: + op.add_column("locations", sa.Column("uuid", sa.String(36), nullable=True), schema=SCHEMA) + location_uuid_map = create_uuid_mapping( + connection, + 'locations', + ['location_id'] + ) + + # Update locations with UUIDs + for (location_id,), new_uuid in location_uuid_map.items(): + op.execute(f""" + UPDATE {SCHEMA}.locations + SET uuid = '{new_uuid}' + WHERE location_id = '{location_id}' + """) + + # Make locations UUID and name columns non-nullable + op.alter_column('locations', 'uuid', + existing_type=sa.String(36), + nullable=False, + schema=SCHEMA + ) + + safe_create_unique_constraint( + 'uq_locations_uuid', + 'locations', + ['uuid'] + ) + safe_drop_constraint('probe_metadata_location_id_fkey', 'probe_metadata') + if 'location_uuid' not in probe_md_columns: + op.add_column('probe_metadata', + sa.Column('location_uuid', sa.String(36), nullable=True), + schema=SCHEMA + ) + if 'location_id' in probe_md_columns: + op.execute(f""" + UPDATE {SCHEMA}.probe_metadata pm + SET location_uuid = l.uuid + FROM {SCHEMA}.locations l + WHERE pm.location_id = l.location_id + """) + safe_drop_constraint('locations_pkey', 'locations', type_='primary') + op.create_primary_key( + 'locations_pkey', + 'locations', + ['uuid'], + schema=SCHEMA + ) + safe_create_foreign_key( + 'probe_metadata_location_uuid_fkey', + 'probe_metadata', + 'locations', + ['location_uuid'], + ['uuid'], + ) + + if "public" not in location_columns: + op.add_column("locations", sa.Column("public", sa.Boolean, nullable=True), schema=SCHEMA) + + op.alter_column('locations', 'name', + existing_type=sa.Text, + nullable=False, + schema=SCHEMA + ) + + safe_create_unique_constraint( + 'uq_locations_name', + 'locations', + ['name'] + ) + + # Step 2: Handle test_metadata table + test_columns = [col["name"] for col in inspector.get_columns("test_metadata", schema=SCHEMA)] + + if 'uuid' not in test_columns: + op.add_column('test_metadata', + sa.Column('uuid', sa.String(36), nullable=True), + schema=SCHEMA + ) + + # Generate UUIDs for test_metadata + test_uuid_map = create_uuid_mapping( + connection, + 'test_metadata', + ['test_id'] + ) + + # Update test_metadata with UUIDs + for (test_id,), new_uuid in test_uuid_map.items(): + op.execute(f""" + UPDATE {SCHEMA}.test_metadata + SET uuid = '{new_uuid}' + WHERE test_id = '{test_id}' + """) + + # Make test_metadata UUID and name columns non-nullable + op.alter_column('test_metadata', 'uuid', + existing_type=sa.String(36), + nullable=False, + schema=SCHEMA + ) + + safe_drop_constraint('probe_metadata_test_id_fkey', 'probe_metadata') + + if 'test_uuid' not in probe_md_columns: + op.add_column('probe_metadata', + sa.Column('test_uuid', sa.String(36), nullable=True), + schema=SCHEMA + ) + + if 'test_id' in probe_md_columns: + op.execute(f""" + UPDATE {SCHEMA}.probe_metadata pm + SET test_uuid = t.uuid + FROM {SCHEMA}.test_metadata t + WHERE pm.test_id = t.test_id + """) + + safe_drop_constraint('test_metadata_pkey', 'test_metadata', type_='primary') + op.create_primary_key( + 'test_metadata_pkey', + 'test_metadata', + ['uuid'], + schema=SCHEMA + ) + safe_create_foreign_key( + 'probe_metadata_test_uuid_fkey', + 'probe_metadata', + 'test_metadata', + ['test_uuid'], + ['uuid'] + ) + + op.alter_column('test_metadata', 'name', + existing_type=sa.Text, + nullable=False, + schema=SCHEMA + ) + + safe_create_unique_constraint( + 'uq_test_metadata_uuid', + 'test_metadata', + ['uuid'] + ) + safe_create_unique_constraint( + 'uq_test_metadata_name', + 'test_metadata', + ['name'] + ) + + + op.drop_column('locations', 'location_id', schema=SCHEMA, if_exists=True) + op.drop_column('test_metadata', 'test_id', schema=SCHEMA, if_exists=True) + op.drop_column('probe_metadata', 'location_id', schema=SCHEMA, if_exists=True) + op.drop_column('probe_metadata', 'test_id', schema=SCHEMA, if_exists=True) + +def downgrade(): + logger.info("Downgrade is not supported for this migration.") \ No newline at end of file diff --git a/opensampl/server/migrations/_migrations/versions/2025_03_05_0958_add_grafana_user_access.py b/opensampl/server/migrations/_migrations/versions/2025_03_05_0958_add_grafana_user_access.py new file mode 100644 index 0000000..8850392 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_03_05_0958_add_grafana_user_access.py @@ -0,0 +1,28 @@ +"""add grafana user access + +Revision ID: ba4a99e5f745 +Revises: bd1322d0b00f +Create Date: 2025-03-05 09:58:44.110655 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'ba4a99e5f745' +down_revision: Union[str, None] = 'bd1322d0b00f' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute("GRANT ALL ON SCHEMA castdb TO grafana;") + op.execute("GRANT SELECT ON ALL TABLES IN SCHEMA castdb TO grafana;") + op.execute("ALTER DEFAULT PRIVILEGES IN SCHEMA castdb GRANT SELECT ON TABLES TO grafana;") + + +def downgrade() -> None: + pass diff --git a/opensampl/server/migrations/_migrations/versions/2025_03_26_0743_create_campus_view.py b/opensampl/server/migrations/_migrations/versions/2025_03_26_0743_create_campus_view.py new file mode 100644 index 0000000..eafb2f6 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_03_26_0743_create_campus_view.py @@ -0,0 +1,61 @@ +"""create campus view + +Revision ID: e881512e7a10 +Revises: ba4a99e5f745 +Create Date: 2025-03-26 07:43:04.981724 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'e881512e7a10' +down_revision: Union[str, None] = 'ba4a99e5f745' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +""" +This particular migration is not needed in our public version, just for the ORNL cast system +""" + +def upgrade() -> None: + op.execute(""" +CREATE VIEW castdb.campus_locations AS +WITH ornl AS ( + SELECT * FROM castdb.locations l + WHERE l.name = 'Oak Ridge National Laboratory' +), + hvc AS ( + SELECT * FROM castdb.locations l + WHERE l.name = 'Hardin Valley Campus' + ) +SELECT + l.uuid, + l.name, + l.public, + CASE + WHEN l.name = hvc.name THEN ST_Y(ornl.geom :: geometry) + ELSE ST_Y(l.geom :: geometry) + END AS latitude, + CASE + WHEN l.name = hvc.name THEN ST_X(ornl.geom :: geometry) + ELSE ST_X(l.geom :: geometry) + END AS longitude, + CASE + WHEN l.name = hvc.name THEN ornl.name + ELSE l.name + END AS campus, + CASE + WHEN l.name = hvc.name THEN ornl.geom + ELSE l.geom + END AS geom +FROM castdb.locations l, hvc, ornl; + """) + op.execute('GRANT SELECT ON ALL TABLES IN SCHEMA castdb TO "grafana";') + + +def downgrade() -> None: + op.execute(sa.text("DROP VIEW IF EXISTS castdb.campus_locations CASCADE;")) diff --git a/opensampl/server/migrations/_migrations/versions/2025_04_14_1231_update_retention_policy.py b/opensampl/server/migrations/_migrations/versions/2025_04_14_1231_update_retention_policy.py new file mode 100644 index 0000000..d381f5c --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_04_14_1231_update_retention_policy.py @@ -0,0 +1,43 @@ +"""update retention policy + +Revision ID: 89ca5e16c662 +Revises: e881512e7a10 +Create Date: 2025-04-14 12:31:26.335799 + +""" +from typing import Sequence, Union +import os +from alembic import op +import sqlalchemy as sa +import re + +# revision identifiers, used by Alembic. +revision: str = '89ca5e16c662' +down_revision: Union[str, None] = 'e881512e7a10' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +def sanitize_interval(value: str, fallback: str) -> str: + """ + Validate that the input string is a safe Postgres INTERVAL. + Fallback to a default if not valid. + """ + # Very basic pattern: number + space + unit (e.g., '7 days', '1 hour', etc.) + pattern = r"^\s*\d+\s+(second|minute|hour|day|week|month|year)s?\s*$" + if re.match(pattern, value.strip(), re.IGNORECASE): + return value.strip() + return fallback + +def upgrade() -> None: + chunk_interval = sanitize_interval(os.getenv("CHUNK_INTERVAL", ""), "1 day") + + # set chunks interval (1 hour was too small). Can be configured in ENV + op.execute(f"SELECT set_chunk_time_interval('castdb.probe_data', INTERVAL '{chunk_interval}');") + + + + +def downgrade(): + # Reset chunk interval to 1 hour + op.execute("SELECT set_chunk_time_interval('castdb.probe_data', INTERVAL '1 hour');") + diff --git a/opensampl/server/migrations/_migrations/versions/2025_04_22_1531_add_access_tokens.py b/opensampl/server/migrations/_migrations/versions/2025_04_22_1531_add_access_tokens.py new file mode 100644 index 0000000..41c3795 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_04_22_1531_add_access_tokens.py @@ -0,0 +1,33 @@ +"""add access tokens + +Revision ID: 4435cf3ed8eb +Revises: 89ca5e16c662 +Create Date: 2025-04-22 15:31:20.546256 + +""" +from typing import Sequence, Union +from datetime import datetime +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '4435cf3ed8eb' +down_revision: Union[str, None] = '89ca5e16c662' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade(): + op.create_table( + 'api_access_keys', + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('key', sa.String(length=64), unique=True, nullable=False), + sa.Column('created_at', sa.DateTime, nullable=False, default=datetime.utcnow), + sa.Column('expires_at', sa.DateTime, nullable=True), + schema='access', + if_not_exists=True, + ) + +def downgrade(): + op.drop_table('api_access_keys', schema='access', if_exists=True) diff --git a/opensampl/server/migrations/_migrations/versions/2025_04_22_1650_turn_off_matviews.py b/opensampl/server/migrations/_migrations/versions/2025_04_22_1650_turn_off_matviews.py new file mode 100644 index 0000000..15f683f --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_04_22_1650_turn_off_matviews.py @@ -0,0 +1,45 @@ +"""turn off matviews + +Revision ID: 74df2bd60bb8 +Revises: 4435cf3ed8eb +Create Date: 2025-04-22 16:50:04.899162 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + +from loguru import logger + +# revision identifiers, used by Alembic. +revision: str = '74df2bd60bb8' +down_revision: Union[str, None] = '4435cf3ed8eb' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +def has_update(): + conn = op.get_bind() + has_privilege = conn.execute(sa.text(""" + SELECT has_table_privilege(current_user, 'cron.job', 'UPDATE') AS has_update + """)).scalar() + return has_privilege + +def upgrade() -> None: + """ + It ends up being more than adequate to simply run the queries via grafana to generate time buckets. Their caching is good enough. + """ + + if not has_update(): + logger.warning("current user cannot update cron.job table to turn off.") + return + + op.execute("UPDATE cron.job SET active = false;") + + +def downgrade() -> None: + if not has_update(): + logger.warning("current user cannot update cron.job table to turn back on.") + return + + op.execute("UPDATE cron.job SET active = true;") diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_03_1223_add_column_comments.py b/opensampl/server/migrations/_migrations/versions/2025_06_03_1223_add_column_comments.py new file mode 100644 index 0000000..040dc8b --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_03_1223_add_column_comments.py @@ -0,0 +1,147 @@ +"""add column comments + +Revision ID: 07cf92bf4aa0 +Revises: 74df2bd60bb8 +Create Date: 2025-06-03 12:23:43.611971 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + +from loguru import logger + +# revision identifiers, used by Alembic. +revision: str = '07cf92bf4aa0' +down_revision: Union[str, None] = '74df2bd60bb8' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA = 'castdb' + +def upgrade(): + """Add comments to all existing columns""" + + # Add comments to locations table + op.alter_column('locations', 'uuid', + comment="Auto generated primary key UUID for the location", + schema=SCHEMA) + op.alter_column('locations', 'name', + comment="Unique name identifying the location", + schema=SCHEMA) + op.alter_column('locations', 'geom', + comment="Geospatial point geometry (lat, lon, z)", + schema=SCHEMA) + op.alter_column('locations', 'public', + comment="Whether this location is publicly visible", + schema=SCHEMA) + + # Add comments to test_metadata table + op.alter_column('test_metadata', 'uuid', + comment="Auto generated primary key UUID for the test", + schema=SCHEMA) + op.alter_column('test_metadata', 'name', + comment="Unique name of the test", + schema=SCHEMA) + op.alter_column('test_metadata', 'start_date', + comment="Start timestamp of the test", + schema=SCHEMA) + op.alter_column('test_metadata', 'end_date', + comment="End timestamp of the test", + schema=SCHEMA) + + # Add comments to probe_metadata table + op.alter_column('probe_metadata', 'uuid', + comment="Auto generated primary key UUID for the probe metadata entry", + schema=SCHEMA) + op.alter_column('probe_metadata', 'probe_id', + comment="Interface ID of the probe device; can be multiple probes from the same ip_address", + schema=SCHEMA) + op.alter_column('probe_metadata', 'ip_address', + comment="IP address of the probe", + schema=SCHEMA) + op.alter_column('probe_metadata', 'vendor', + comment="Manufacturer/vendor of the probe", + schema=SCHEMA) + op.alter_column('probe_metadata', 'model', + comment="Model name/number of the probe", + schema=SCHEMA) + op.alter_column('probe_metadata', 'name', + comment="Human-readable name for the probe", + schema=SCHEMA) + op.alter_column('probe_metadata', 'public', + comment="Whether this probe is publicly visible", + schema=SCHEMA) + op.alter_column('probe_metadata', 'location_uuid', + comment="Foreign key to the associated location", + schema=SCHEMA) + op.alter_column('probe_metadata', 'test_uuid', + comment="Foreign key to the associated test", + schema=SCHEMA) + + # Add comments to probe_data table + op.alter_column('probe_data', 'time', + comment="Timestamp of the measurement", + schema=SCHEMA) + op.alter_column('probe_data', 'probe_uuid', + comment="Foreign key to the probe that collected the data", + schema=SCHEMA) + + # Add comments to adva_metadata table + op.alter_column('adva_metadata', 'probe_uuid', + comment="Foreign key to the associated probe", + schema=SCHEMA) + op.alter_column('adva_metadata', 'type', + comment="ADVA measurement type (eg Phase)", + schema=SCHEMA) + op.alter_column('adva_metadata', 'start', + comment="Start time for the current measurement series", + schema=SCHEMA) + op.alter_column('adva_metadata', 'frequency', + comment="Sampling frequency of the ADVA probe, in rate per second", + schema=SCHEMA) + op.alter_column('adva_metadata', 'timemultiplier', + comment="Time multiplier used by the ADVA tool", + schema=SCHEMA) + op.alter_column('adva_metadata', 'multiplier', + comment="Data scaling multiplier", + schema=SCHEMA) + + +def downgrade(): + """Remove comments from all columns""" + # Remove comments from locations table + op.alter_column('locations', 'uuid', comment=None, schema=SCHEMA) + op.alter_column('locations', 'name', comment=None, schema=SCHEMA) + op.alter_column('locations', 'geom', comment=None, schema=SCHEMA) + op.alter_column('locations', 'public', comment=None, schema=SCHEMA) + + # Remove comments from test_metadata table + op.alter_column('test_metadata', 'uuid', comment=None, schema=SCHEMA) + op.alter_column('test_metadata', 'name', comment=None, schema=SCHEMA) + op.alter_column('test_metadata', 'start_date', comment=None, schema=SCHEMA) + op.alter_column('test_metadata', 'end_date', comment=None, schema=SCHEMA) + + # Remove comments from probe_metadata table + op.alter_column('probe_metadata', 'uuid', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'probe_id', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'ip_address', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'vendor', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'model', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'name', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'public', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'location_uuid', comment=None, schema=SCHEMA) + op.alter_column('probe_metadata', 'test_uuid', comment=None, schema=SCHEMA) + + # Remove comments from probe_data table + op.alter_column('probe_data', 'time', comment=None, schema=SCHEMA) + op.alter_column('probe_data', 'probe_uuid', comment=None, schema=SCHEMA) + + # Remove comments from adva_metadata table + op.alter_column('adva_metadata', 'probe_uuid', comment=None, schema=SCHEMA) + op.alter_column('adva_metadata', 'type', comment=None, schema=SCHEMA) + op.alter_column('adva_metadata', 'start', comment=None, schema=SCHEMA) + op.alter_column('adva_metadata', 'frequency', comment=None, schema=SCHEMA) + op.alter_column('adva_metadata', 'timemultiplier', comment=None, schema=SCHEMA) + op.alter_column('adva_metadata', 'multiplier', comment=None, schema=SCHEMA) \ No newline at end of file diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_03_1235_create_reference_and_metric_tables.py b/opensampl/server/migrations/_migrations/versions/2025_06_03_1235_create_reference_and_metric_tables.py new file mode 100644 index 0000000..dce1cad --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_03_1235_create_reference_and_metric_tables.py @@ -0,0 +1,162 @@ +"""create reference and metric tables + +Revision ID: d1546c1ecf9b +Revises: 07cf92bf4aa0 +Create Date: 2025-06-03 12:35:20.987981 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +import uuid + +from loguru import logger + +# revision identifiers, used by Alembic. +revision: str = 'd1546c1ecf9b' +down_revision: Union[str, None] = '07cf92bf4aa0' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA = 'castdb' + +def upgrade(): + """Create reference system tables and populate with initial data""" + + # Create reference_type table + op.create_table('reference_type', + sa.Column('uuid', sa.String(length=36), nullable=False, primary_key=True, + comment="Auto generated primary key UUID for the reference type"), + sa.Column('name', sa.String(), nullable=False, unique=True, + comment="Unique name of the reference type (e.g., GPS, GNSS, Unknown)"), + sa.Column('description', sa.Text(), nullable=True, + comment="Optional human-readable description of the reference type"), + sa.Column('reference_table', sa.String(), nullable=True, + comment="Optional table name if the reference type is a compound type"), + schema=SCHEMA, + if_not_exists=True + ) + + # Create metric_type table + op.create_table('metric_type', + sa.Column('uuid', sa.String(length=36), nullable=False, primary_key=True, + comment="Auto generated primary key UUID for the metric type"), + sa.Column('name', sa.String(), nullable=True, unique=True, + comment="Unique name for the metric type (e.g., phase offset, delay, quality)"), + sa.Column('description', sa.Text(), nullable=True, + comment="Optional human-readable description of the metric"), + sa.Column('unit', sa.String(), nullable=False, + comment="Measurement unit (e.g., ns, s, ppm)"), + sa.Column('value_type', sa.String(), nullable=False, server_default='string', + comment="Data type of the value (e.g., float, int, string)"), + schema=SCHEMA, + if_not_exists=True + ) + + # Create reference table + op.create_table('reference', + sa.Column('uuid', sa.String(length=36), nullable=False, primary_key=True, + comment="Auto generated primary key UUID for the reference entry"), + sa.Column('reference_type_uuid', sa.String(length=36), sa.ForeignKey(f'{SCHEMA}.reference_type.uuid'), + comment="Foreign key to the reference type (e.g., GPS, GNSS, Probe)"), + sa.Column('compound_reference_uuid', sa.String(length=36), sa.ForeignKey(f'{SCHEMA}.probe_metadata.uuid'), nullable=True, + comment="Optional foreign key if the reference type is Compound. Which table it references is determined via reference_table field in reference_type table"), + schema=SCHEMA, + if_not_exists=True + ) + + # Populate reference_type table with initial values + reference_type_table = sa.table('reference_type', + sa.column('uuid', sa.String), + sa.column('name', sa.String), + sa.column('description', sa.Text), + sa.column('reference_table', sa.Text), + schema=SCHEMA + ) + + # Generate UUIDs for reference types + gps_ref_type_uuid = str(uuid.uuid4()) + gnss_ref_type_uuid = str(uuid.uuid4()) + probe_ref_type_uuid = str(uuid.uuid4()) + unknown_ref_type_uuid = str(uuid.uuid4()) + + op.bulk_insert(reference_type_table, [ + { + 'uuid': gps_ref_type_uuid, + 'name': 'GPS', + 'description': 'Global Positioning System time reference' + }, + { + 'uuid': gnss_ref_type_uuid, + 'name': 'GNSS', + 'description': 'Global Navigation Satellite System time reference' + }, + { + 'uuid': probe_ref_type_uuid, + 'name': 'PROBE', + 'description': 'Another probe device used as time reference', + 'reference_table': 'probe_metadata' + }, + { + 'uuid': unknown_ref_type_uuid, + 'name': 'UNKNOWN', + 'description': 'Unknown or unspecified reference type' + } + ]) + + reference_table = sa.table('reference', + sa.column('uuid', sa.String), + sa.column('reference_type_uuid', sa.String), + sa.column('compound_reference_uuid', sa.String), + schema=SCHEMA + ) + + unknown_ref_uuid = str(uuid.uuid4()) + + op.bulk_insert(reference_table, [ + { + 'uuid': unknown_ref_uuid, + 'reference_type_uuid': unknown_ref_type_uuid, + 'reference_probe_uuid': None + } + ]) + + # Populate metric_type table with initial values + metric_type_table = sa.table('metric_type', + sa.column('uuid', sa.String), + sa.column('name', sa.String), + sa.column('description', sa.Text), + sa.column('unit', sa.String), + sa.column('value_type', sa.String), + schema=SCHEMA + ) + + # Generate UUIDs for metric types + phase_offset_uuid = str(uuid.uuid4()) + unknown_metric_uuid = str(uuid.uuid4()) + + op.bulk_insert(metric_type_table, [ + { + 'uuid': phase_offset_uuid, + 'name': 'Phase Offset', + 'description': 'Difference in seconds between the probe\'s time reading and the reference time reading', + 'unit': 's', + 'value_type': 'float' + }, + { + 'uuid': unknown_metric_uuid, + 'name': 'UNKNOWN', + 'description': 'Unknown or unspecified metric type, with value_type of jsonb due to flexibility', + 'unit': 'unknown', + 'value_type': 'jsonb' + } + ]) + + +def downgrade(): + """Drop reference system tables""" + # Drop tables in reverse order due to foreign key constraints + op.drop_table('reference', schema=SCHEMA, if_exists=True) + op.drop_table('metric_type', schema=SCHEMA, if_exists=True) + op.drop_table('reference_type', schema=SCHEMA, if_exists=True) \ No newline at end of file diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_03_1254_update_probe_data_to_take_reference_and_.py b/opensampl/server/migrations/_migrations/versions/2025_06_03_1254_update_probe_data_to_take_reference_and_.py new file mode 100644 index 0000000..4c370b0 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_03_1254_update_probe_data_to_take_reference_and_.py @@ -0,0 +1,167 @@ +"""update probe data to take reference and metric + +Revision ID: 519588f63e5c +Revises: d1546c1ecf9b +Create Date: 2025-06-03 12:54:47.183309 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +from loguru import logger + +# revision identifiers, used by Alembic. +revision: str = '519588f63e5c' +down_revision: Union[str, None] = 'd1546c1ecf9b' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA = 'castdb' + +time_buckets = [ + #suffix interval cron schedule (min hr * * *) + ('1min', '1 minute', '*/5 * * * *'), # Every 5 minutes + ('5min', '5 minutes', '*/5 * * * *'), # Every 5 minutes + ('15min', '15 minutes', '*/15 * * * *'), # Every 15 minutes + ('1hour', '1 hour', '0 * * * *'), # Start of every hour + ('6hour', '6 hours', '0 */6 * * *'), # Every 6 hours + ('1day', '1 day', '0 0 * * *') # Midnight every day +] + +def upgrade(): + """Update probe_data table structure""" + # Add new columns to probe_data table + op.add_column('probe_data', + sa.Column('reference_uuid', sa.String(length=36), nullable=True, + comment="Foreign key to the reference point for the reading"), + schema=SCHEMA, if_not_exists=True) + + op.add_column('probe_data', + sa.Column('metric_type_uuid', sa.String(length=36), nullable=True, + comment="Foreign key to the metric type being measured"), + schema=SCHEMA, if_not_exists=True) + + connection = op.get_bind() + + unknown_reference_uuid = connection.execute( + sa.text(""" + SELECT r.uuid FROM castdb.reference r + JOIN castdb.reference_type rt ON r.reference_type_uuid = rt.uuid + WHERE lower(rt.name) = lower('UNKNOWN') + """) + ).scalar() + + phase_offset_metric_uuid = connection.execute( + sa.text("SELECT uuid FROM castdb.metric_type WHERE lower(name) = lower('Phase Offset')") + ).scalar() + + # Populate new columns with UNKNOWN reference and phase offset metric for existing records + op.execute( + sa.text(""" + UPDATE castdb.probe_data + SET reference_uuid = :ref_uuid, + metric_type_uuid = :metric_uuid + WHERE reference_uuid IS NULL + OR metric_type_uuid IS NULL + """).bindparams(ref_uuid=unknown_reference_uuid, metric_uuid=phase_offset_metric_uuid) + ) + + # Make the new columns non-nullable now that they have values + op.alter_column('probe_data', 'reference_uuid', nullable=False, schema=SCHEMA) + op.alter_column('probe_data', 'metric_type_uuid', nullable=False, schema=SCHEMA) + + # Add foreign key constraints + op.create_foreign_key( + 'fk_probe_data_reference_uuid', 'probe_data', 'reference', + ['reference_uuid'], ['uuid'], source_schema=SCHEMA, referent_schema=SCHEMA + ) + + op.create_foreign_key( + 'fk_probe_data_metric_type_uuid', 'probe_data', 'metric_type', + ['metric_type_uuid'], ['uuid'], source_schema=SCHEMA, referent_schema=SCHEMA + ) + + # Before we can change the type of "value" we need to drop the mat views + for suffix, _, _ in time_buckets: + op.execute(f"DROP MATERIALIZED VIEW IF EXISTS castdb.avg_phase_err_{suffix} CASCADE;") + op.execute(f"DROP MATERIALIZED VIEW IF EXISTS castdb.mtie_{suffix} CASCADE;") + + pk_name = connection.execute( + sa.text(""" + SELECT constraint_name + FROM information_schema.table_constraints + WHERE table_schema = :schema_name + AND table_name = 'probe_data' + AND constraint_type = 'PRIMARY KEY' + """).bindparams(schema_name=SCHEMA) + ).scalar() + + # Drop the old primary key constraint (whatever it's named) + if pk_name: + op.drop_constraint(pk_name, 'probe_data', type_='primary', schema=SCHEMA) + + # Create new primary key with all required columns + op.create_primary_key( + 'probe_data_pkey', 'probe_data', + ['time', 'probe_uuid', 'reference_uuid', 'metric_type_uuid'], + schema=SCHEMA + ) + + # Change value column from NUMERIC to JSONB (containing numeric value) + op.alter_column('probe_data', 'value', + type_=sa.dialects.postgresql.JSONB(), + postgresql_using='to_jsonb(value)', + comment="Measurement value stored as JSON; value's expected type defined via metric", + schema=SCHEMA) + +def downgrade(): + """Revert probe_data table structure changes""" + connection = op.get_bind() + + conflict_count = connection.execute(sa.text(""" + SELECT COUNT(*) + FROM (SELECT TIME, probe_uuid + FROM castdb.probe_data + GROUP BY TIME, probe_uuid + HAVING COUNT (*) > 1) dupes + """)).scalar() + + if conflict_count > 0: + raise Exception("Unsafe downgrade: would violate original primary key due to duplicated time/probe_uuid") + + # Add back the old NUMERIC value column + op.add_column('probe_data', + sa.Column('value_old', sa.NUMERIC(), nullable=True), + schema=SCHEMA) + + # Copy JSONB values back to NUMERIC (this will lose non-numeric data!) + op.execute(""" + UPDATE castdb.probe_data + SET value_old = (value::text)::numeric + WHERE value IS NOT NULL AND jsonb_typeof(value) = 'number' + """) + + # Drop the JSONB value column + op.drop_column('probe_data', 'value', schema=SCHEMA) + + # Rename the old column back to 'value' + op.alter_column('probe_data', 'value_old', new_column_name='value', schema=SCHEMA) + + # Drop the new primary key + op.drop_constraint('probe_data_pkey', 'probe_data', type_='primary', schema=SCHEMA) + + # Recreate the old primary key + op.create_primary_key( + 'probe_data_pkey', 'probe_data', + ['time', 'probe_uuid'], + schema=SCHEMA + ) + + # Drop foreign key constraints + op.drop_constraint('fk_probe_data_reference_uuid', 'probe_data', type_='foreignkey', schema=SCHEMA) + op.drop_constraint('fk_probe_data_metric_type_uuid', 'probe_data', type_='foreignkey', schema=SCHEMA) + + # Drop the new columns + op.drop_column('probe_data', 'reference_uuid', schema=SCHEMA) + op.drop_column('probe_data', 'metric_type_uuid', schema=SCHEMA) \ No newline at end of file diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_03_1318_adding_freeform_metadata_to_adva.py b/opensampl/server/migrations/_migrations/versions/2025_06_03_1318_adding_freeform_metadata_to_adva.py new file mode 100644 index 0000000..6b19e32 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_03_1318_adding_freeform_metadata_to_adva.py @@ -0,0 +1,30 @@ +"""adding freeform metadata to adva + +Revision ID: 4b47485da562 +Revises: 519588f63e5c +Create Date: 2025-06-03 13:18:34.256294 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +from loguru import logger + +# revision identifiers, used by Alembic. +revision: str = '4b47485da562' +down_revision: Union[str, None] = '519588f63e5c' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA='castdb' + +def upgrade() -> None: + op.add_column('adva_metadata', + sa.Column('additional_metadata', sa.dialects.postgresql.JSONB(), nullable=True, + comment="Additional metadata found in the file headers that did not match existing columns"), + schema=SCHEMA) + + +def downgrade() -> None: + op.drop_column('adva_metadata', 'additional_metadata', schema=SCHEMA, if_exists=True) diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_03_1358_setting_default_reference_and_metric.py b/opensampl/server/migrations/_migrations/versions/2025_06_03_1358_setting_default_reference_and_metric.py new file mode 100644 index 0000000..6b63230 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_03_1358_setting_default_reference_and_metric.py @@ -0,0 +1,97 @@ +"""setting default reference and metric + +Revision ID: 90e87a6293f7 +Revises: 4b47485da562 +Create Date: 2025-06-03 13:58:13.297062 + +""" +from typing import Sequence, Union +import os +from alembic import op +import sqlalchemy as sa + +from loguru import logger + +# revision identifiers, used by Alembic. +revision: str = '90e87a6293f7' +down_revision: Union[str, None] = '4b47485da562' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA = 'castdb' + +def upgrade() -> None: + # Create the default_table + op.create_table( + 'defaults', + sa.Column('table_name', sa.Text, primary_key=True, comment='Name of the table/category this entry belongs to'), + sa.Column('uuid', sa.String(36), nullable=False, comment='UUID reference resolved from name_value'), + schema=SCHEMA, + if_not_exists=True, + ) + + # 2. Function to get default UUID + op.execute(sa.text(f""" + CREATE OR REPLACE FUNCTION get_default_uuid_for(table_arg TEXT) + RETURNS UUID AS $$ + DECLARE + result UUID; + BEGIN + SELECT uuid INTO result + FROM {SCHEMA}.defaults + WHERE table_name = table_arg; + + IF result IS NULL THEN + RAISE EXCEPTION 'No default UUID found for table: %', table_arg; + END IF; + + RETURN result; + END; + $$ LANGUAGE plpgsql; + """)) + + # 3. Function to set default UUID by name + op.execute(sa.text(f""" + CREATE OR REPLACE FUNCTION set_default_by_name( + table_arg TEXT, + name_value TEXT + ) + RETURNS VOID AS $$ + DECLARE + id UUID; + schema_name TEXT := '{SCHEMA}'; + sql TEXT; + BEGIN + -- Use format with two %I to quote both schema and table names + sql := format('SELECT uuid FROM %I.%I WHERE lower(name) = lower($1) LIMIT 1', + schema_name, table_arg); + EXECUTE sql INTO id USING name_value; + + IF id IS NULL THEN + RAISE EXCEPTION 'No row found in %.% with name = %', schema_name, table_arg, name_value; + END IF; + + INSERT INTO "{SCHEMA}"."defaults" (table_name, uuid) + VALUES (table_arg, id) + ON CONFLICT (table_name) DO UPDATE + SET uuid = EXCLUDED.uuid; + END; + $$ LANGUAGE plpgsql; + """)) + + # Set the defaults + op.execute(sa.text(f"""SELECT set_default_by_name('metric_type', 'Phase Offset')""")) + op.execute(sa.text(f"""SELECT set_default_by_name('reference_type', 'UNKNOWN')""")) + + op.execute(sa.text(f""" + INSERT INTO "{SCHEMA}"."defaults" (table_name, uuid) + VALUES ('reference', get_default_uuid_for('reference_type')) + """)) + + +def downgrade() -> None: + op.execute(sa.text(""" + DROP FUNCTION IF EXISTS set_default_by_name CASCADE; + DROP FUNCTION IF EXISTS get_default_uuid_for CASCADE; + """)) + op.drop_table('defaults', schema=SCHEMA, if_exists=True) diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_03_1539_making_default_trigger_functions.py b/opensampl/server/migrations/_migrations/versions/2025_06_03_1539_making_default_trigger_functions.py new file mode 100644 index 0000000..4578b58 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_03_1539_making_default_trigger_functions.py @@ -0,0 +1,55 @@ +"""making default trigger functions + +Revision ID: 94f32a76726e +Revises: 90e87a6293f7 +Create Date: 2025-06-03 15:39:41.048401 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '94f32a76726e' +down_revision: Union[str, None] = '90e87a6293f7' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute(sa.text(""" + -- Trigger function to set default values for probe_data table + CREATE OR REPLACE FUNCTION set_probe_data_defaults() + RETURNS TRIGGER AS $$ + BEGIN + -- Set default reference_uuid if not provided + IF NEW.reference_uuid IS NULL THEN + NEW.reference_uuid := get_default_uuid_for('reference'); + END IF; + + -- Set default metric_type_uuid if not provided + IF NEW.metric_type_uuid IS NULL THEN + NEW.metric_type_uuid := get_default_uuid_for('metric_type'); + END IF; + + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + + -- Create the trigger that fires before INSERT or UPDATE + CREATE TRIGGER probe_data_set_defaults + BEFORE INSERT ON castdb.probe_data + FOR EACH ROW + EXECUTE FUNCTION set_probe_data_defaults(); + """)) + + +def downgrade() -> None: + op.execute(sa.text( + """ + DROP TRIGGER IF EXISTS probe_data_set_defaults ON castdb.probe_data; + DROP FUNCTION IF EXISTS set_probe_data_defaults; + """ + )) diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_23_0825_data_filtering_functions.py b/opensampl/server/migrations/_migrations/versions/2025_06_23_0825_data_filtering_functions.py new file mode 100644 index 0000000..6aa70a9 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_23_0825_data_filtering_functions.py @@ -0,0 +1,103 @@ +"""data filtering functions + +Revision ID: c73212f2c0dd +Revises: 94f32a76726e +Create Date: 2025-06-23 08:25:44.638142 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'c73212f2c0dd' +down_revision: Union[str, None] = '94f32a76726e' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute(sa.text(""" + -- Function to filter probe data by probe UUID + CREATE OR REPLACE FUNCTION get_probe_data_by_probe(probe_uuid_param TEXT) + RETURNS TABLE( + "time" TIMESTAMP, + probe_uuid VARCHAR(36), + reference_uuid VARCHAR(36), + metric_type_uuid VARCHAR(36), + value JSONB + ) AS $$ + BEGIN + RETURN QUERY + SELECT + pd.time, + pd.probe_uuid, + pd.reference_uuid, + pd.metric_type_uuid, + pd.value + FROM castdb.probe_data pd + WHERE pd.probe_uuid = probe_uuid_param + ORDER BY pd.time; + END; + $$ LANGUAGE plpgsql; + + -- Function to filter probe data by metric type UUID + CREATE OR REPLACE FUNCTION get_probe_data_by_metric(metric_type_uuid_param TEXT) + RETURNS TABLE( + "time" TIMESTAMP, + probe_uuid VARCHAR(36), + reference_uuid VARCHAR(36), + metric_type_uuid VARCHAR(36), + value JSONB + ) AS $$ + BEGIN + RETURN QUERY + SELECT + pd.time, + pd.probe_uuid, + pd.reference_uuid, + pd.metric_type_uuid, + pd.value + FROM castdb.probe_data pd + WHERE pd.metric_type_uuid = metric_type_uuid_param + ORDER BY pd.time; + END; + $$ LANGUAGE plpgsql; + + -- Function to filter probe data by both probe UUID and metric type UUID + CREATE OR REPLACE FUNCTION get_probe_data_by_probe_and_metric( + probe_uuid_param TEXT, + metric_type_uuid_param TEXT + ) + RETURNS TABLE( + "time" TIMESTAMP, + probe_uuid VARCHAR(36), + reference_uuid VARCHAR(36), + metric_type_uuid VARCHAR(36), + value JSONB + ) AS $$ + BEGIN + RETURN QUERY + SELECT + pd.time, + pd.probe_uuid, + pd.reference_uuid, + pd.metric_type_uuid, + pd.value + FROM castdb.probe_data pd + WHERE pd.probe_uuid = probe_uuid_param + AND pd.metric_type_uuid = metric_type_uuid_param + ORDER BY pd.time; + END; + $$ LANGUAGE plpgsql; + """)) + + +def downgrade() -> None: + op.execute(sa.text(""" + DROP FUNCTION IF EXISTS get_probe_data_by_probe; + DROP FUNCTION IF EXISTS get_probe_data_by_metric; + DROP FUNCTION IF EXISTS get_probe_data_by_probe_and_metric; + """)) diff --git a/opensampl/server/migrations/_migrations/versions/2025_06_23_1654_making_microsemi_table.py b/opensampl/server/migrations/_migrations/versions/2025_06_23_1654_making_microsemi_table.py new file mode 100644 index 0000000..169c404 --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_06_23_1654_making_microsemi_table.py @@ -0,0 +1,73 @@ +"""making microsemi table + +Revision ID: c45e2dbdf900 +Revises: c73212f2c0dd +Create Date: 2025-06-23 16:54:41.381184 + +""" +from typing import Sequence, Union +from sqlalchemy.dialects import postgresql + +from alembic import op +import sqlalchemy as sa +import uuid + +# revision identifiers, used by Alembic. +revision: str = 'c45e2dbdf900' +down_revision: Union[str, None] = 'c73212f2c0dd' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + +SCHEMA='castdb' + +def upgrade(): + op.create_table( + 'microchip_twst_metadata', + sa.Column( + 'probe_uuid', + sa.String(), + sa.ForeignKey('castdb.probe_metadata.uuid', ondelete='CASCADE'), + primary_key=True, + comment='Foreign key to the associated probe' + ), + sa.Column( + 'additional_metadata', + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment='Additional metadata found in the file headers that did not match existing columns' + ), + comment='Microchip TWST Clock Probe specific metadata provided by probe text file exports.', + schema='castdb', + if_not_exists=True, + ) + + metric_type_table = sa.table('metric_type', + sa.column('uuid', sa.String), + sa.column('name', sa.String), + sa.column('description', sa.Text), + sa.column('unit', sa.String), + sa.column('value_type', sa.String), + schema=SCHEMA + ) + + # Generate UUIDs for metric types + ebno_uuid = str(uuid.uuid4()) + + op.bulk_insert(metric_type_table, [ + { + 'uuid': ebno_uuid, + 'name': 'Eb/No', + 'description': ( + "Energy per bit to noise power spectral density ratio measured at the clock probe. " + "Indicates the quality of the received signal relative to noise."), + 'unit': 'dB', + 'value_type': 'float' + } + ]) + + +def downgrade(): + op.drop_table('microsemi_twst_metadata', schema='castdb', if_exists=True) + + op.execute(sa.text("DELETE FROM castdb.metric_type WHERE name = 'Eb/No'")) + diff --git a/opensampl/server/migrations/_migrations/versions/2025_08_15_0840_create_microchip_tp400_metadata.py b/opensampl/server/migrations/_migrations/versions/2025_08_15_0840_create_microchip_tp400_metadata.py new file mode 100644 index 0000000..2b16d7d --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_08_15_0840_create_microchip_tp400_metadata.py @@ -0,0 +1,44 @@ +"""create microchip tp400 metadata + +Revision ID: 2e2b5c419a9b +Revises: c45e2dbdf900 +Create Date: 2025-08-15 08:40:34.520515 + +""" +from typing import Sequence, Union +from sqlalchemy.dialects import postgresql + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision: str = '2e2b5c419a9b' +down_revision: Union[str, None] = 'c45e2dbdf900' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.create_table( + 'microchip_tp4100_metadata', + sa.Column( + 'probe_uuid', + sa.String(), + sa.ForeignKey('castdb.probe_metadata.uuid', ondelete='CASCADE'), + primary_key=True, + comment='Foreign key to the associated probe' + ), + sa.Column( + 'additional_metadata', + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment='Additional metadata found in the file headers that did not match existing columns' + ), + comment='Microchip TP4100 Clock Probe specific metadata provided by probe text file exports.', + schema='castdb', + if_not_exists=True, + ) + +def downgrade() -> None: + op.drop_table('microchip_tp4100_metadata', schema='castdb', if_exists=True) + diff --git a/opensampl/server/migrations/_migrations/versions/2025_09_22_0915_campus_view_not_forced.py b/opensampl/server/migrations/_migrations/versions/2025_09_22_0915_campus_view_not_forced.py new file mode 100644 index 0000000..13d5d8e --- /dev/null +++ b/opensampl/server/migrations/_migrations/versions/2025_09_22_0915_campus_view_not_forced.py @@ -0,0 +1,65 @@ +"""campus view not forced + +Revision ID: d419cac01df2 +Revises: 2e2b5c419a9b +Create Date: 2025-09-22 09:15:53.973961 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'd419cac01df2' +down_revision: Union[str, None] = '2e2b5c419a9b' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute(""" +CREATE OR REPLACE VIEW castdb.campus_locations AS +WITH ornl AS ( + SELECT l_1.uuid, l_1.name, l_1.geom, l_1.public + FROM castdb.locations l_1 + WHERE l_1.name = 'Oak Ridge National Laboratory' +), +hvc AS ( + SELECT l_1.uuid, l_1.name, l_1.geom, l_1.public + FROM castdb.locations l_1 + WHERE l_1.name = 'Hardin Valley Campus' +) +SELECT + l.uuid, + l.name, + l.public, + CASE + WHEN l.name = hvc.name AND ornl.geom IS NOT NULL + THEN ST_Y(ornl.geom::geometry) + ELSE ST_Y(l.geom::geometry) + END AS latitude, + CASE + WHEN l.name = hvc.name AND ornl.geom IS NOT NULL + THEN ST_X(ornl.geom::geometry) + ELSE ST_X(l.geom::geometry) + END AS longitude, + CASE + WHEN l.name = hvc.name AND ornl.name IS NOT NULL + THEN ornl.name + ELSE l.name + END AS campus, + CASE + WHEN l.name = hvc.name AND ornl.geom IS NOT NULL + THEN ornl.geom + ELSE l.geom + END AS geom +FROM castdb.locations l +LEFT JOIN hvc ON TRUE +LEFT JOIN ornl ON TRUE; +""") + + +def downgrade() -> None: + pass diff --git a/opensampl/server/migrations/alembic.ini b/opensampl/server/migrations/alembic.ini new file mode 100644 index 0000000..891ca1d --- /dev/null +++ b/opensampl/server/migrations/alembic.ini @@ -0,0 +1,114 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = _migrations + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the +# "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to migrations/versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "version_path_separator" below. +# version_locations = %(here)s/bar:%(here)s/bat:migrations/versions + +# version path separator; As mentioned above, this is the character used to split +# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. +# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. +# Valid values for version_path_separator are: +# +# version_path_separator = : +# version_path_separator = ; +# version_path_separator = space +version_path_separator = os # Use os.pathsep. Default configuration used for new projects. + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the exec runner, execute a binary +# hooks = ruff +# ruff.type = exec +# ruff.executable = %(here)s/.venv/bin/ruff +# ruff.options = --fix REVISION_SCRIPT_FILENAME + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/opensampl/vendors/base_probe.py b/opensampl/vendors/base_probe.py index eaa8948..8001bc4 100644 --- a/opensampl/vendors/base_probe.py +++ b/opensampl/vendors/base_probe.py @@ -18,7 +18,7 @@ import requests.exceptions import yaml from loguru import logger -from pydantic import BaseModel, ValidationInfo, field_serializer, field_validator, model_validator +from pydantic import BaseModel, Field, ValidationInfo, field_serializer, field_validator, model_validator from sqlalchemy.exc import IntegrityError from tqdm import tqdm @@ -148,7 +148,7 @@ class RandomDataConfig(BaseModel): # General configuration num_probes: int = 1 - duration_hours: float = 1.0 + duration_hours: float = Field(1.0, alias="duration") seed: Optional[int] = None # Time series parameters diff --git a/pyproject.toml b/pyproject.toml index 109602b..b9500f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "opensampl" -version = "1.2.0" +version = "1.1.5" description = "Python tools for adding clock data to a timescale db." license = {file = "LICENSE"} authors = [ @@ -68,7 +68,14 @@ Documentation = "https://ornl.github.io/OpenSAMPL" Changelog = "https://github.com/ORNL/OpenSAMPL/blob/main/CHANGELOG.md" [project.optional-dependencies] -server = [] +migrations = [ + "alembic", +] +backend = [ + "fastapi", + "uvicorn", + "prometheus-client", +] collect = ["telnetlib3==2.0.4"] [project.scripts] @@ -115,7 +122,7 @@ build-backend = "hatchling.build" [tool.ruff] line-length = 120 -exclude = [".git", "__pycache__", "venv", "env", ".venv", ".env", "build", "dist", "docs"] +exclude = [".git", "__pycache__", "venv", "env", ".venv", ".env", "build", "dist", "docs", "opensampl/server/migrations/**/*.py",] include = ["opensampl/**/*.py"] [tool.ruff.lint] @@ -129,7 +136,7 @@ ignore = ["D203", "D212", "D400", "D415", "ANN401", "S101", "PLR2004", "COM812", [tool.ruff.lint.per-file-ignores] "opensampl/vendors/**/*.py" = ['S311'] # we want to ignore the errors about random - +"opensampl/server/backend/main.py" = ['B008', 'ARG001'] #ignore complaints about calling functions in args [tool.ruff.lint.pylint] max-args = 10